neobbo/src/scheduler/sched.c

179 lines
No EOL
4 KiB
C

#include <assert.h>
#include <kprint.h>
#include <stdint.h>
#include <neobbo.h>
#include <smp.h>
#include <error.h>
#include <string.h>
#include <mm/kmalloc.h>
#include <mm/slab.h>
#include <scheduler/sched.h>
extern void switch_context(struct context **old, struct context *new);
extern void sched_enter(void *a);
struct ma_cache *thread_cache;
struct thread *idle;
#define QUANTUM_US 10000
int next_pid = 1;
void idle_task(){
for(;;){
__builtin_ia32_pause();
}
}
void test_task(){
kprintf("Hello world from scheduled task!\n");
//for(;;);
}
void best_task(){
kprintf("Hello world I am best\n");
//for(;;);
}
void thread_exit(){
#ifdef __x86_64__
asm("cli");
#endif
cpu_state *cpu = get_cpu_struct();
struct thread *p = cpu->current_process;
if(p == p->next && p->prev == p){
// If this is the only thread in the queue then set cpu->head to NULL, so scheduler knows to idle
cpu->head = NULL;
}else{
// Remove process from circular linked list
p->next->prev = p->prev;
p->prev->next = p->next;
cpu->head = p->next;
}
p->state = ZOMBIE;
// Switch to scheduler
yield();
}
/* Setup a process structure */
struct thread *alloc_thread(void){
struct thread *t = ma_cache_alloc(thread_cache, 0);
memset(t, 0, sizeof(struct thread));
t->state = READY;
t->kstack = kzalloc(8 * 4096);
t->pid = next_pid++;
uint64_t *sp = (uint64_t*)((uint64_t)t->kstack + 8 * 4096);
// Push the exit function (the thread will return to it when it finishes)
*--sp = (uint64_t)thread_exit;
// Allocate space for context
sp -= sizeof(struct context)/sizeof(uint64_t);
t->context = (struct context*)sp;
t->context->rbp = (uint64_t)sp;
return t;
}
struct thread *add_thread(uint64_t *entry){
struct thread *t = alloc_thread();
assert(t != NULL && "Thread allocation failed!");
struct cpu_state *cpu = get_cpu_struct();
struct thread *head = get_cpu_struct()->head;
struct thread *base = get_cpu_struct()->base;
// Manage circley linked list
if(base == NULL){
t->next = t;
t->prev = t;
cpu->base = t;
cpu->head = t;
} else {
t->prev = head;
t->next = base;
head->next = t;
base->prev = t;
cpu->head = t;
}
t->context->rip = (uint64_t)entry;
return t;
}
[[noreturn]] void sched(){
cpu_state *cpu = get_cpu_struct();
cpu->scheduler_initialized = true; // Allow us to get interrupts that schedule us
for(;;){
#ifdef __x86_64__
asm("cli"); // we sti at the end of switch_context
#endif
struct thread *prev = cpu->current_process;
if(cpu->head == NULL){
cpu->current_process = idle;
}else{
cpu->current_process = cpu->head;
cpu->head = cpu->head->next;
}
if(prev->state == ZOMBIE){
ma_cache_dealloc(prev);
}else{
prev->state = READY;
}
cpu->current_process->state = RUNNING;
switch_context(&(get_cpu_struct()->scheduler_context), get_cpu_struct()->current_process->context);
}
}
void scheduler_init(){
assert(get_cpu_struct_initialized() && "CPU struct not initialized!");
cpu_state *cpu = get_cpu_struct();
if(cpu->current_process != NULL){
kprintf("scheduler on CPU {d} already initialized!\n", cpu->id);
return;
}
if(cpu->id == get_kinfo()->bsp_id){
idle = alloc_thread();
idle->context->rip = (uint64_t)idle_task;
assert(idle != NULL && "Failed to allocate idle task!");
}
cpu->current_process = idle;
kprintf("scheduler on CPU {d} initialized!", cpu->id);
sched();
for(;;);
}
void yield(){
#ifdef __x86_64__
asm("cli");
#endif
switch_context(&get_cpu_struct()->current_process->context, get_cpu_struct()->scheduler_context);
}