diff --git a/include/smp.h b/include/smp.h index fb57db6..340b5b8 100644 --- a/include/smp.h +++ b/include/smp.h @@ -20,7 +20,8 @@ typedef struct cpu_state { }cpu_state; void smp_init(); -cpu_state *get_cpu_struct(); +cpu_state *get_current_cpu_state(); +cpu_state *get_cpu_state(int); uint64_t get_cpu_count(); void bsp_early_init(); bool get_cpu_struct_initialized(); diff --git a/src/arch/amd64/hal/apic.c b/src/arch/amd64/hal/apic.c index cb8b41b..3e7c6d1 100644 --- a/src/arch/amd64/hal/apic.c +++ b/src/arch/amd64/hal/apic.c @@ -47,10 +47,10 @@ uint32_t lapic_read_reg(uint32_t reg){ } void apic_sleep(uint64_t ms){ - uint64_t lapic_timer_ticks = get_cpu_struct()->lapic_timer_ticks; - uint64_t curcnt = get_cpu_struct()->lapic_timer_ticks; + uint64_t lapic_timer_ticks = get_current_cpu_struct()->lapic_timer_ticks; + uint64_t curcnt = get_current_cpu_struct()->lapic_timer_ticks; while (lapic_timer_ticks - curcnt < ms) { - lapic_timer_ticks = get_cpu_struct()->lapic_timer_ticks; + lapic_timer_ticks = get_current_cpu_struct()->lapic_timer_ticks; } } @@ -129,7 +129,7 @@ void ap_apic_init(){ void apic_timer_handler(){ if(get_cpu_struct_initialized()){ - get_cpu_struct()->lapic_timer_ticks++; + get_current_cpu_struct()->lapic_timer_ticks++; } lapic_write_reg(LAPIC_EOI_REG, 0); diff --git a/src/arch/amd64/hal/idt.c b/src/arch/amd64/hal/idt.c index a5d64ae..c1a9fcd 100644 --- a/src/arch/amd64/hal/idt.c +++ b/src/arch/amd64/hal/idt.c @@ -241,7 +241,7 @@ void interrupt_handler(interrupt_frame *r){ } if(r->int_no == 69 && get_cpu_struct_initialized() - && get_cpu_struct()->scheduler_initialized){ + && get_current_cpu_struct()->scheduler_initialized){ yield(); return; } diff --git a/src/kinfo.c b/src/kinfo.c index ef1bbf8..3d776c1 100644 --- a/src/kinfo.c +++ b/src/kinfo.c @@ -37,6 +37,7 @@ void initialize_kinfo(){ kinfo.cmdline = executable_cmdline_request.response->cmdline; kinfo.cpu_count = smp_request.response->cpu_count; kinfo.boot_timestamp = date_at_boot_request.response->timestamp; + #ifdef __x86_64__ kinfo.bsp_id = smp_request.response->bsp_lapic_id; #endif diff --git a/src/lib/lock.c b/src/lib/lock.c index 6cc0c88..83b76dc 100644 --- a/src/lib/lock.c +++ b/src/lib/lock.c @@ -29,7 +29,7 @@ struct mutex *init_mutex(){ kstatus try_mutex(struct mutex *mut){ if(!atomic_flag_test_and_set_explicit(&mut->lock, memory_order_acquire)){ - mut->holder = get_cpu_struct()->current_process; + mut->holder = get_current_cpu_struct()->current_process; mut->locked = true; return KERNEL_MUTEX_ACQUIRED; } @@ -40,7 +40,7 @@ kstatus try_mutex(struct mutex *mut){ kstatus acquire_mutex(struct mutex *mut){ - if(get_cpu_struct()->current_process == mut->holder){ + if(get_current_cpu_struct()->current_process == mut->holder){ klog(__func__, "Holder attempted to acquire mutex"); return KERNEL_STATUS_ERROR; } diff --git a/src/scheduler/sched.c b/src/scheduler/sched.c index 55354af..4e4202b 100644 --- a/src/scheduler/sched.c +++ b/src/scheduler/sched.c @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -9,6 +10,7 @@ #include #include #include +#include extern void switch_context(struct context **old, struct context *new); extern void sched_enter(void *a); @@ -42,7 +44,7 @@ void thread_exit(){ asm("cli"); #endif - cpu_state *cpu = get_cpu_struct(); + cpu_state *cpu = get_current_cpu_state(); struct thread *p = cpu->current_process; if(p == p->next && p->prev == p){ @@ -70,10 +72,7 @@ struct thread *alloc_thread(void){ t->kstack = kzalloc(8 * 4096); - t->pid = next_pid++; - uint64_t *sp = (uint64_t*)((uint64_t)t->kstack + 8 * 4096); - // Push the exit function (the thread will return to it when it finishes) *--sp = (uint64_t)thread_exit; @@ -93,9 +92,9 @@ struct thread *add_thread(uint64_t *entry){ assert(t != NULL && "Thread allocation failed!"); - struct cpu_state *cpu = get_cpu_struct(); - struct thread *head = get_cpu_struct()->head; - struct thread *base = get_cpu_struct()->base; + struct cpu_state *cpu = get_current_cpu_state(); + struct thread *head = get_current_cpu_state()->head; + struct thread *base = get_current_cpu_state()->base; // Manage circley linked list if(base == NULL){ @@ -113,12 +112,25 @@ struct thread *add_thread(uint64_t *entry){ t->context->rip = (uint64_t)entry; + t->pid = next_pid++; + return t; } +// This function determines to which CPU a new thread should be added +kstatus spawn_thread(struct thread *thread){ + uint64_t cpu_count = get_kinfo()->cpu_count; + uint16_t i = 0; + for(i = 0; i < cpu_count; i++){ + if(get_cpu_state(i)->scheduler_initialized){ + } + + +} + [[noreturn]] void sched(){ - cpu_state *cpu = get_cpu_struct(); + cpu_state *cpu = get_current_cpu_struct(); cpu->scheduler_initialized = true; // Allow us to get interrupts that schedule us for(;;){ #ifdef __x86_64__ @@ -127,8 +139,12 @@ struct thread *add_thread(uint64_t *entry){ struct thread *prev = cpu->current_process; + if(prev == NULL && cpu->head == NULL){ + continue; + } + if(cpu->head == NULL){ - cpu->current_process = idle; + continue; }else{ cpu->current_process = cpu->head; cpu->head = cpu->head->next; @@ -141,14 +157,16 @@ struct thread *add_thread(uint64_t *entry){ } cpu->current_process->state = RUNNING; - switch_context(&(get_cpu_struct()->scheduler_context), get_cpu_struct()->current_process->context); + switch_context(&(get_current_cpu_struct()->scheduler_context), get_current_cpu_struct()->current_process->context); } } +static atomic_flag scheduler_init_lock = ATOMIC_FLAG_INIT; void scheduler_init(){ + acquire_spinlock(&scheduler_init_lock); assert(get_cpu_struct_initialized() && "CPU struct not initialized!"); - cpu_state *cpu = get_cpu_struct(); + cpu_state *cpu = get_current_cpu_struct(); if(cpu->current_process != NULL){ kprintf("scheduler on CPU {d} already initialized!\n", cpu->id); @@ -161,9 +179,11 @@ void scheduler_init(){ assert(idle != NULL && "Failed to allocate idle task!"); } - cpu->current_process = idle; + //cpu->current_process = idle; - kprintf("scheduler on CPU {d} initialized!", cpu->id); + kprintf("scheduler on CPU {d} initialized!\n", cpu->id); + + free_spinlock(&scheduler_init_lock); sched(); @@ -175,5 +195,5 @@ void yield(){ #ifdef __x86_64__ asm("cli"); #endif - switch_context(&get_cpu_struct()->current_process->context, get_cpu_struct()->scheduler_context); + switch_context(&get_current_cpu_struct()->current_process->context, get_current_cpu_struct()->scheduler_context); } \ No newline at end of file diff --git a/src/smp.c b/src/smp.c index adab05e..9c8efe3 100644 --- a/src/smp.c +++ b/src/smp.c @@ -19,17 +19,17 @@ extern void s_load_idt(); extern void s_load_gdt(); extern volatile struct limine_mp_request smp_request; +static cpu_state *cpus; + +static cpu_state bsp_cpu; + /* Returns the CPU structure for this particular CPU */ -cpu_state *get_cpu_struct(){ +cpu_state *get_current_cpu_struct(){ return (cpu_state*)rdmsr(GSBASE); } -uint64_t get_cpu_count(){ - if(smp_request.response != NULL){ - return smp_request.response->cpu_count; - } - - return 0; +cpu_state *get_cpu_struct(int id){ + return &cpus[id]; } bool get_cpu_struct_initialized(){ @@ -59,41 +59,45 @@ void ap_init(struct limine_mp_info *smp_info){ asm volatile( "movq %%cr3, %%rax\n\ - movq %%rax, %%cr3\n" + movq %%rax, %%cr3\n" : : : "rax" ); - cpu_state *cpu_struct = (cpu_state*)kzalloc(sizeof(cpu_state)); + cpu_state cpu_struct = cpus[smp_info->lapic_id]; - cpu_struct->id = smp_info->lapic_id; + cpu_struct.id = smp_info->lapic_id; - wrmsr(KERNELGSBASE, (uint64_t)cpu_struct); - wrmsr(GSBASE, (uint64_t)cpu_struct); + wrmsr(KERNELGSBASE, (uint64_t)&cpu_struct); + wrmsr(GSBASE, (uint64_t)&cpu_struct); /* Initialize APIC & APIC timer */ ap_apic_init(); free_spinlock(&ap_init_lock); - for(;;); + while(!bsp_cpu.scheduler_initialized){ + __builtin_ia32_pause(); + } scheduler_init(); } -static cpu_state bsp_cpu; - void smp_init(){ struct limine_mp_response *smp_response = smp_request.response; kprintf("smp: {d} CPUs\n", smp_response->cpu_count); - for(uint64_t i = 0; i < smp_response->cpu_count; i++){ + cpus = (cpu_state *)kzalloc(sizeof(cpu_state) * smp_response->cpu_count); + + for(uint64_t i = 1; i < smp_response->cpu_count; i++){ /* Pointer to smp_info is passed in RDI by Limine, so no need to pass any arguments here */ smp_response->cpus[i]->goto_address = &ap_init; } - bsp_cpu.scheduler_context = (struct context*)kmalloc(sizeof(struct context)); + bsp_cpu.scheduler_context = (struct context*)kzalloc(sizeof(struct context)); + + cpus[bsp_cpu.id] = bsp_cpu; /* If one of the APs has halted, then halt the BSP */ extern bool kernel_killed; diff --git a/src/sys/time.c b/src/sys/time.c index 008a538..ee90bbe 100644 --- a/src/sys/time.c +++ b/src/sys/time.c @@ -17,7 +17,7 @@ uint64_t get_timestamp_us(){ } if(get_cpu_struct_initialized()){ - return get_cpu_struct()->lapic_timer_ticks * 1000ULL; + return get_current_cpu_struct()->lapic_timer_ticks * 1000ULL; } #endif diff --git a/uacpi_kernel_api.c b/uacpi_kernel_api.c index e900cbd..874a79c 100644 --- a/uacpi_kernel_api.c +++ b/uacpi_kernel_api.c @@ -206,7 +206,7 @@ void uacpi_kernel_free_event(uacpi_handle handle){ } uacpi_thread_id uacpi_kernel_get_thread_id(void){ - return get_cpu_struct(); + return get_current_cpu_struct(); } uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle handle, uacpi_u16 t){