Work on scheduler

This commit is contained in:
ssimnb 2026-02-26 08:22:29 +01:00
parent e72c8fe6fd
commit 184f1a60de
9 changed files with 67 additions and 41 deletions

View file

@ -20,7 +20,8 @@ typedef struct cpu_state {
}cpu_state; }cpu_state;
void smp_init(); void smp_init();
cpu_state *get_cpu_struct(); cpu_state *get_current_cpu_state();
cpu_state *get_cpu_state(int);
uint64_t get_cpu_count(); uint64_t get_cpu_count();
void bsp_early_init(); void bsp_early_init();
bool get_cpu_struct_initialized(); bool get_cpu_struct_initialized();

View file

@ -47,10 +47,10 @@ uint32_t lapic_read_reg(uint32_t reg){
} }
void apic_sleep(uint64_t ms){ void apic_sleep(uint64_t ms){
uint64_t lapic_timer_ticks = get_cpu_struct()->lapic_timer_ticks; uint64_t lapic_timer_ticks = get_current_cpu_struct()->lapic_timer_ticks;
uint64_t curcnt = get_cpu_struct()->lapic_timer_ticks; uint64_t curcnt = get_current_cpu_struct()->lapic_timer_ticks;
while (lapic_timer_ticks - curcnt < ms) { while (lapic_timer_ticks - curcnt < ms) {
lapic_timer_ticks = get_cpu_struct()->lapic_timer_ticks; lapic_timer_ticks = get_current_cpu_struct()->lapic_timer_ticks;
} }
} }
@ -129,7 +129,7 @@ void ap_apic_init(){
void apic_timer_handler(){ void apic_timer_handler(){
if(get_cpu_struct_initialized()){ if(get_cpu_struct_initialized()){
get_cpu_struct()->lapic_timer_ticks++; get_current_cpu_struct()->lapic_timer_ticks++;
} }
lapic_write_reg(LAPIC_EOI_REG, 0); lapic_write_reg(LAPIC_EOI_REG, 0);

View file

@ -241,7 +241,7 @@ void interrupt_handler(interrupt_frame *r){
} }
if(r->int_no == 69 && get_cpu_struct_initialized() if(r->int_no == 69 && get_cpu_struct_initialized()
&& get_cpu_struct()->scheduler_initialized){ && get_current_cpu_struct()->scheduler_initialized){
yield(); yield();
return; return;
} }

View file

@ -37,6 +37,7 @@ void initialize_kinfo(){
kinfo.cmdline = executable_cmdline_request.response->cmdline; kinfo.cmdline = executable_cmdline_request.response->cmdline;
kinfo.cpu_count = smp_request.response->cpu_count; kinfo.cpu_count = smp_request.response->cpu_count;
kinfo.boot_timestamp = date_at_boot_request.response->timestamp; kinfo.boot_timestamp = date_at_boot_request.response->timestamp;
#ifdef __x86_64__ #ifdef __x86_64__
kinfo.bsp_id = smp_request.response->bsp_lapic_id; kinfo.bsp_id = smp_request.response->bsp_lapic_id;
#endif #endif

View file

@ -29,7 +29,7 @@ struct mutex *init_mutex(){
kstatus try_mutex(struct mutex *mut){ kstatus try_mutex(struct mutex *mut){
if(!atomic_flag_test_and_set_explicit(&mut->lock, memory_order_acquire)){ if(!atomic_flag_test_and_set_explicit(&mut->lock, memory_order_acquire)){
mut->holder = get_cpu_struct()->current_process; mut->holder = get_current_cpu_struct()->current_process;
mut->locked = true; mut->locked = true;
return KERNEL_MUTEX_ACQUIRED; return KERNEL_MUTEX_ACQUIRED;
} }
@ -40,7 +40,7 @@ kstatus try_mutex(struct mutex *mut){
kstatus acquire_mutex(struct mutex *mut){ kstatus acquire_mutex(struct mutex *mut){
if(get_cpu_struct()->current_process == mut->holder){ if(get_current_cpu_struct()->current_process == mut->holder){
klog(__func__, "Holder attempted to acquire mutex"); klog(__func__, "Holder attempted to acquire mutex");
return KERNEL_STATUS_ERROR; return KERNEL_STATUS_ERROR;
} }

View file

@ -1,6 +1,7 @@
#include <assert.h> #include <assert.h>
#include <kprint.h> #include <kprint.h>
#include <stdatomic.h>
#include <stdint.h> #include <stdint.h>
#include <neobbo.h> #include <neobbo.h>
#include <smp.h> #include <smp.h>
@ -9,6 +10,7 @@
#include <mm/kmalloc.h> #include <mm/kmalloc.h>
#include <mm/slab.h> #include <mm/slab.h>
#include <scheduler/sched.h> #include <scheduler/sched.h>
#include <lock.h>
extern void switch_context(struct context **old, struct context *new); extern void switch_context(struct context **old, struct context *new);
extern void sched_enter(void *a); extern void sched_enter(void *a);
@ -42,7 +44,7 @@ void thread_exit(){
asm("cli"); asm("cli");
#endif #endif
cpu_state *cpu = get_cpu_struct(); cpu_state *cpu = get_current_cpu_state();
struct thread *p = cpu->current_process; struct thread *p = cpu->current_process;
if(p == p->next && p->prev == p){ if(p == p->next && p->prev == p){
@ -70,11 +72,8 @@ struct thread *alloc_thread(void){
t->kstack = kzalloc(8 * 4096); t->kstack = kzalloc(8 * 4096);
t->pid = next_pid++;
uint64_t *sp = (uint64_t*)((uint64_t)t->kstack + 8 * 4096); uint64_t *sp = (uint64_t*)((uint64_t)t->kstack + 8 * 4096);
// Push the exit function (the thread will return to it when it finishes) // Push the exit function (the thread will return to it when it finishes)
*--sp = (uint64_t)thread_exit; *--sp = (uint64_t)thread_exit;
@ -93,9 +92,9 @@ struct thread *add_thread(uint64_t *entry){
assert(t != NULL && "Thread allocation failed!"); assert(t != NULL && "Thread allocation failed!");
struct cpu_state *cpu = get_cpu_struct(); struct cpu_state *cpu = get_current_cpu_state();
struct thread *head = get_cpu_struct()->head; struct thread *head = get_current_cpu_state()->head;
struct thread *base = get_cpu_struct()->base; struct thread *base = get_current_cpu_state()->base;
// Manage circley linked list // Manage circley linked list
if(base == NULL){ if(base == NULL){
@ -113,12 +112,25 @@ struct thread *add_thread(uint64_t *entry){
t->context->rip = (uint64_t)entry; t->context->rip = (uint64_t)entry;
t->pid = next_pid++;
return t; return t;
} }
// This function determines to which CPU a new thread should be added
kstatus spawn_thread(struct thread *thread){
uint64_t cpu_count = get_kinfo()->cpu_count;
uint16_t i = 0;
for(i = 0; i < cpu_count; i++){
if(get_cpu_state(i)->scheduler_initialized){
}
}
[[noreturn]] void sched(){ [[noreturn]] void sched(){
cpu_state *cpu = get_cpu_struct(); cpu_state *cpu = get_current_cpu_struct();
cpu->scheduler_initialized = true; // Allow us to get interrupts that schedule us cpu->scheduler_initialized = true; // Allow us to get interrupts that schedule us
for(;;){ for(;;){
#ifdef __x86_64__ #ifdef __x86_64__
@ -127,8 +139,12 @@ struct thread *add_thread(uint64_t *entry){
struct thread *prev = cpu->current_process; struct thread *prev = cpu->current_process;
if(prev == NULL && cpu->head == NULL){
continue;
}
if(cpu->head == NULL){ if(cpu->head == NULL){
cpu->current_process = idle; continue;
}else{ }else{
cpu->current_process = cpu->head; cpu->current_process = cpu->head;
cpu->head = cpu->head->next; cpu->head = cpu->head->next;
@ -141,14 +157,16 @@ struct thread *add_thread(uint64_t *entry){
} }
cpu->current_process->state = RUNNING; cpu->current_process->state = RUNNING;
switch_context(&(get_cpu_struct()->scheduler_context), get_cpu_struct()->current_process->context); switch_context(&(get_current_cpu_struct()->scheduler_context), get_current_cpu_struct()->current_process->context);
} }
} }
static atomic_flag scheduler_init_lock = ATOMIC_FLAG_INIT;
void scheduler_init(){ void scheduler_init(){
acquire_spinlock(&scheduler_init_lock);
assert(get_cpu_struct_initialized() && "CPU struct not initialized!"); assert(get_cpu_struct_initialized() && "CPU struct not initialized!");
cpu_state *cpu = get_cpu_struct(); cpu_state *cpu = get_current_cpu_struct();
if(cpu->current_process != NULL){ if(cpu->current_process != NULL){
kprintf("scheduler on CPU {d} already initialized!\n", cpu->id); kprintf("scheduler on CPU {d} already initialized!\n", cpu->id);
@ -161,9 +179,11 @@ void scheduler_init(){
assert(idle != NULL && "Failed to allocate idle task!"); assert(idle != NULL && "Failed to allocate idle task!");
} }
cpu->current_process = idle; //cpu->current_process = idle;
kprintf("scheduler on CPU {d} initialized!", cpu->id); kprintf("scheduler on CPU {d} initialized!\n", cpu->id);
free_spinlock(&scheduler_init_lock);
sched(); sched();
@ -175,5 +195,5 @@ void yield(){
#ifdef __x86_64__ #ifdef __x86_64__
asm("cli"); asm("cli");
#endif #endif
switch_context(&get_cpu_struct()->current_process->context, get_cpu_struct()->scheduler_context); switch_context(&get_current_cpu_struct()->current_process->context, get_current_cpu_struct()->scheduler_context);
} }

View file

@ -19,17 +19,17 @@ extern void s_load_idt();
extern void s_load_gdt(); extern void s_load_gdt();
extern volatile struct limine_mp_request smp_request; extern volatile struct limine_mp_request smp_request;
static cpu_state *cpus;
static cpu_state bsp_cpu;
/* Returns the CPU structure for this particular CPU */ /* Returns the CPU structure for this particular CPU */
cpu_state *get_cpu_struct(){ cpu_state *get_current_cpu_struct(){
return (cpu_state*)rdmsr(GSBASE); return (cpu_state*)rdmsr(GSBASE);
} }
uint64_t get_cpu_count(){ cpu_state *get_cpu_struct(int id){
if(smp_request.response != NULL){ return &cpus[id];
return smp_request.response->cpu_count;
}
return 0;
} }
bool get_cpu_struct_initialized(){ bool get_cpu_struct_initialized(){
@ -59,41 +59,45 @@ void ap_init(struct limine_mp_info *smp_info){
asm volatile( asm volatile(
"movq %%cr3, %%rax\n\ "movq %%cr3, %%rax\n\
movq %%rax, %%cr3\n" movq %%rax, %%cr3\n"
: : : "rax" : : : "rax"
); );
cpu_state *cpu_struct = (cpu_state*)kzalloc(sizeof(cpu_state)); cpu_state cpu_struct = cpus[smp_info->lapic_id];
cpu_struct->id = smp_info->lapic_id; cpu_struct.id = smp_info->lapic_id;
wrmsr(KERNELGSBASE, (uint64_t)cpu_struct); wrmsr(KERNELGSBASE, (uint64_t)&cpu_struct);
wrmsr(GSBASE, (uint64_t)cpu_struct); wrmsr(GSBASE, (uint64_t)&cpu_struct);
/* Initialize APIC & APIC timer */ /* Initialize APIC & APIC timer */
ap_apic_init(); ap_apic_init();
free_spinlock(&ap_init_lock); free_spinlock(&ap_init_lock);
for(;;); while(!bsp_cpu.scheduler_initialized){
__builtin_ia32_pause();
}
scheduler_init(); scheduler_init();
} }
static cpu_state bsp_cpu;
void smp_init(){ void smp_init(){
struct limine_mp_response *smp_response = smp_request.response; struct limine_mp_response *smp_response = smp_request.response;
kprintf("smp: {d} CPUs\n", smp_response->cpu_count); kprintf("smp: {d} CPUs\n", smp_response->cpu_count);
for(uint64_t i = 0; i < smp_response->cpu_count; i++){ cpus = (cpu_state *)kzalloc(sizeof(cpu_state) * smp_response->cpu_count);
for(uint64_t i = 1; i < smp_response->cpu_count; i++){
/* Pointer to smp_info is passed in RDI by Limine, so no need to pass any arguments here */ /* Pointer to smp_info is passed in RDI by Limine, so no need to pass any arguments here */
smp_response->cpus[i]->goto_address = &ap_init; smp_response->cpus[i]->goto_address = &ap_init;
} }
bsp_cpu.scheduler_context = (struct context*)kmalloc(sizeof(struct context)); bsp_cpu.scheduler_context = (struct context*)kzalloc(sizeof(struct context));
cpus[bsp_cpu.id] = bsp_cpu;
/* If one of the APs has halted, then halt the BSP */ /* If one of the APs has halted, then halt the BSP */
extern bool kernel_killed; extern bool kernel_killed;

View file

@ -17,7 +17,7 @@ uint64_t get_timestamp_us(){
} }
if(get_cpu_struct_initialized()){ if(get_cpu_struct_initialized()){
return get_cpu_struct()->lapic_timer_ticks * 1000ULL; return get_current_cpu_struct()->lapic_timer_ticks * 1000ULL;
} }
#endif #endif

View file

@ -206,7 +206,7 @@ void uacpi_kernel_free_event(uacpi_handle handle){
} }
uacpi_thread_id uacpi_kernel_get_thread_id(void){ uacpi_thread_id uacpi_kernel_get_thread_id(void){
return get_cpu_struct(); return get_current_cpu_struct();
} }
uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle handle, uacpi_u16 t){ uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle handle, uacpi_u16 t){