diff --git a/include/scheduler/sched.h b/include/scheduler/sched.h index 2ccd202..607f367 100644 --- a/include/scheduler/sched.h +++ b/include/scheduler/sched.h @@ -19,6 +19,7 @@ struct thread { struct thread *prev; uint64_t *mem; uint64_t *kstack; + uint64_t *entry; proc_state state; uint16_t pid; struct context *context; diff --git a/include/smp.h b/include/smp.h index 340b5b8..36d3fa0 100644 --- a/include/smp.h +++ b/include/smp.h @@ -24,5 +24,6 @@ cpu_state *get_current_cpu_state(); cpu_state *get_cpu_state(int); uint64_t get_cpu_count(); void bsp_early_init(); -bool get_cpu_struct_initialized(); +cpu_state *get_bsp_cpu_state(); +bool get_cpu_state_initialized(); diff --git a/src/arch/amd64/hal/apic.c b/src/arch/amd64/hal/apic.c index 3e7c6d1..34dff79 100644 --- a/src/arch/amd64/hal/apic.c +++ b/src/arch/amd64/hal/apic.c @@ -47,10 +47,10 @@ uint32_t lapic_read_reg(uint32_t reg){ } void apic_sleep(uint64_t ms){ - uint64_t lapic_timer_ticks = get_current_cpu_struct()->lapic_timer_ticks; - uint64_t curcnt = get_current_cpu_struct()->lapic_timer_ticks; + uint64_t lapic_timer_ticks = get_current_cpu_state()->lapic_timer_ticks; + uint64_t curcnt = get_current_cpu_state()->lapic_timer_ticks; while (lapic_timer_ticks - curcnt < ms) { - lapic_timer_ticks = get_current_cpu_struct()->lapic_timer_ticks; + lapic_timer_ticks = get_current_cpu_state()->lapic_timer_ticks; } } @@ -88,7 +88,7 @@ void lapic_timer_init(int us){ void apic_init(void){ asm("cli"); - lapic_address = rdmsr(IA32_APIC_BASE_MSR) + get_kinfo()->hhdmoffset; + lapic_address = (rdmsr(IA32_APIC_BASE_MSR) & 0xFFFFF000) + get_kinfo()->hhdmoffset; lapic_ao_t *lapic_ao = (lapic_ao_t*) find_ics(0x5); // Local APIC Address Override @@ -107,7 +107,7 @@ void apic_init(void){ ioapic_init(); /* Start the timers for calibration of the APIC timer */ - timer_init(); + timer_init(); /* Start the APIC timer with 1us timer */ lapic_timer_init(1); @@ -128,8 +128,10 @@ void ap_apic_init(){ void apic_timer_handler(){ - if(get_cpu_struct_initialized()){ - get_current_cpu_struct()->lapic_timer_ticks++; + if(get_cpu_state_initialized()){ + get_current_cpu_state()->lapic_timer_ticks++; + }else{ + get_bsp_cpu_state()->lapic_timer_ticks++; } lapic_write_reg(LAPIC_EOI_REG, 0); diff --git a/src/arch/amd64/hal/idt.c b/src/arch/amd64/hal/idt.c index c1a9fcd..c0f93a2 100644 --- a/src/arch/amd64/hal/idt.c +++ b/src/arch/amd64/hal/idt.c @@ -240,8 +240,8 @@ void interrupt_handler(interrupt_frame *r){ return; } - if(r->int_no == 69 && get_cpu_struct_initialized() - && get_current_cpu_struct()->scheduler_initialized){ + if(r->int_no == 69 && get_cpu_state_initialized() + && get_current_cpu_state()->scheduler_initialized){ yield(); return; } diff --git a/src/arch/amd64/hal/tsc.c b/src/arch/amd64/hal/tsc.c index 3a5015b..b68b2a4 100644 --- a/src/arch/amd64/hal/tsc.c +++ b/src/arch/amd64/hal/tsc.c @@ -39,13 +39,10 @@ kstatus tsc_init(){ return KERNEL_STATUS_ERROR; } - kprintf("tsc: Invariant TSC found\n"); - /* Get the core crystal clock so we can determine TSC speed */ __get_cpuid(0x15, &unused, &unused, &core_crystal_clock, &unused); if(core_crystal_clock != 0){ - kprintf("cpuid 15h supported!\n"); /* Make it so that it ticks every millisecond */ core_crystal_clock *= 1000; @@ -60,8 +57,6 @@ kstatus tsc_init(){ core_crystal_clock = read2 - read1; } - kprintf("Core crystal clock: {d}\n", core_crystal_clock); - enable_tsc(); return KERNEL_STATUS_SUCCESS; diff --git a/src/lib/kprint.c b/src/lib/kprint.c index cedbcfa..e0bdd04 100644 --- a/src/lib/kprint.c +++ b/src/lib/kprint.c @@ -34,8 +34,8 @@ void klog(const char *func, const char *msg, ...){ void print_timestamp(struct flanterm_context *ft_ctx){ uint64_t time = get_timestamp_us(); char str[100]; - uint64_t seconds = time / 1000000ULL; - uint64_t millis = (time % 1000000ULL) / 1000ULL; + uint64_t seconds = time / 10000000ULL; + uint64_t millis = (time % 10000000ULL) / 10000ULL; str[0] = '['; str[1] = '\0'; diff --git a/src/lib/lock.c b/src/lib/lock.c index 83b76dc..41e65be 100644 --- a/src/lib/lock.c +++ b/src/lib/lock.c @@ -29,7 +29,7 @@ struct mutex *init_mutex(){ kstatus try_mutex(struct mutex *mut){ if(!atomic_flag_test_and_set_explicit(&mut->lock, memory_order_acquire)){ - mut->holder = get_current_cpu_struct()->current_process; + mut->holder = get_current_cpu_state()->current_process; mut->locked = true; return KERNEL_MUTEX_ACQUIRED; } @@ -40,7 +40,7 @@ kstatus try_mutex(struct mutex *mut){ kstatus acquire_mutex(struct mutex *mut){ - if(get_current_cpu_struct()->current_process == mut->holder){ + if(get_current_cpu_state()->current_process == mut->holder){ klog(__func__, "Holder attempted to acquire mutex"); return KERNEL_STATUS_ERROR; } diff --git a/src/main.c b/src/main.c index 98043ad..beaa7ee 100644 --- a/src/main.c +++ b/src/main.c @@ -85,7 +85,6 @@ void _start(void){ bsp_early_init(); - klog("apic", "Initalizing APIC"); apic_init(); @@ -108,7 +107,6 @@ void _start(void){ klog("pci", "Getting le pci"); pci_init(); - scheduler_init(); death: diff --git a/src/scheduler/sched.c b/src/scheduler/sched.c index 4e4202b..e64de21 100644 --- a/src/scheduler/sched.c +++ b/src/scheduler/sched.c @@ -45,6 +45,9 @@ void thread_exit(){ #endif cpu_state *cpu = get_current_cpu_state(); + + cpu->process_count--; + struct thread *p = cpu->current_process; if(p == p->next && p->prev == p){ @@ -64,7 +67,7 @@ void thread_exit(){ } /* Setup a process structure */ -struct thread *alloc_thread(void){ +struct thread *alloc_thread(uint64_t *entry){ struct thread *t = ma_cache_alloc(thread_cache, 0); memset(t, 0, sizeof(struct thread)); @@ -82,19 +85,28 @@ struct thread *alloc_thread(void){ t->context = (struct context*)sp; t->context->rbp = (uint64_t)sp; + + t->entry = entry; + t->context->rip = (uint64_t)entry; return t; } -struct thread *add_thread(uint64_t *entry){ - struct thread *t = alloc_thread(); +kstatus add_thread(struct thread *t, struct cpu_state *cpu){ - assert(t != NULL && "Thread allocation failed!"); + if(t == NULL){ + klog(__func__, "Thread is null!"); + return KERNEL_STATUS_ERROR; + } - struct cpu_state *cpu = get_current_cpu_state(); - struct thread *head = get_current_cpu_state()->head; - struct thread *base = get_current_cpu_state()->base; + if(cpu == NULL){ + klog(__func__, "CPU is null!"); + return KERNEL_STATUS_ERROR; + } + + struct thread *head = cpu->head; + struct thread *base = cpu->base; // Manage circley linked list if(base == NULL){ @@ -110,27 +122,37 @@ struct thread *add_thread(uint64_t *entry){ cpu->head = t; } - t->context->rip = (uint64_t)entry; - - t->pid = next_pid++; - - return t; + return KERNEL_STATUS_SUCCESS; } // This function determines to which CPU a new thread should be added kstatus spawn_thread(struct thread *thread){ uint64_t cpu_count = get_kinfo()->cpu_count; - uint16_t i = 0; - for(i = 0; i < cpu_count; i++){ - if(get_cpu_state(i)->scheduler_initialized){ + + uint16_t min = 0; + struct cpu_state *min_cpu = NULL; + + for(uint16_t i = 0; i < cpu_count; i++){ + struct cpu_state *cpu = get_cpu_state(i); + if(cpu->process_count <= min){ + min = cpu->process_count; + min_cpu = cpu; + } } + if(min_cpu == NULL){ + return KERNEL_STATUS_ERROR; + } + add_thread(thread, min_cpu); + + min_cpu->process_count++; + + return KERNEL_STATUS_SUCCESS; } - [[noreturn]] void sched(){ - cpu_state *cpu = get_current_cpu_struct(); + cpu_state *cpu = get_current_cpu_state(); cpu->scheduler_initialized = true; // Allow us to get interrupts that schedule us for(;;){ #ifdef __x86_64__ @@ -139,34 +161,35 @@ kstatus spawn_thread(struct thread *thread){ struct thread *prev = cpu->current_process; - if(prev == NULL && cpu->head == NULL){ - continue; - } - - if(cpu->head == NULL){ - continue; + if(prev == NULL){ + if(cpu->head == NULL){ + cpu->current_process = idle; + goto end; + }else{ + cpu->current_process = cpu->head; + cpu->head = cpu->head->next; + } }else{ - cpu->current_process = cpu->head; - cpu->head = cpu->head->next; - } - - if(prev->state == ZOMBIE){ - ma_cache_dealloc(prev); - }else{ - prev->state = READY; + if(prev->state == ZOMBIE){ + ma_cache_dealloc(prev); + }else{ + prev->state = READY; + } } + + end: cpu->current_process->state = RUNNING; - switch_context(&(get_current_cpu_struct()->scheduler_context), get_current_cpu_struct()->current_process->context); + switch_context(&(get_current_cpu_state()->scheduler_context), get_current_cpu_state()->current_process->context); } } static atomic_flag scheduler_init_lock = ATOMIC_FLAG_INIT; void scheduler_init(){ acquire_spinlock(&scheduler_init_lock); - assert(get_cpu_struct_initialized() && "CPU struct not initialized!"); + assert(get_cpu_state_initialized() && "CPU struct not initialized!"); - cpu_state *cpu = get_current_cpu_struct(); + cpu_state *cpu = get_current_cpu_state(); if(cpu->current_process != NULL){ kprintf("scheduler on CPU {d} already initialized!\n", cpu->id); @@ -174,17 +197,16 @@ void scheduler_init(){ } if(cpu->id == get_kinfo()->bsp_id){ - idle = alloc_thread(); - idle->context->rip = (uint64_t)idle_task; + idle = alloc_thread((uint64_t*)idle_task); assert(idle != NULL && "Failed to allocate idle task!"); } - //cpu->current_process = idle; - kprintf("scheduler on CPU {d} initialized!\n", cpu->id); free_spinlock(&scheduler_init_lock); + spawn_thread(alloc_thread((uint64_t*)best_task)); + sched(); for(;;); @@ -195,5 +217,5 @@ void yield(){ #ifdef __x86_64__ asm("cli"); #endif - switch_context(&get_current_cpu_struct()->current_process->context, get_current_cpu_struct()->scheduler_context); + switch_context(&get_current_cpu_state()->current_process->context, get_current_cpu_state()->scheduler_context); } \ No newline at end of file diff --git a/src/smp.c b/src/smp.c index 9c8efe3..8fbc0e6 100644 --- a/src/smp.c +++ b/src/smp.c @@ -24,15 +24,24 @@ static cpu_state *cpus; static cpu_state bsp_cpu; /* Returns the CPU structure for this particular CPU */ -cpu_state *get_current_cpu_struct(){ +cpu_state *get_current_cpu_state(){ return (cpu_state*)rdmsr(GSBASE); } -cpu_state *get_cpu_struct(int id){ +cpu_state *get_bsp_cpu_state(){ + return &bsp_cpu; +} + +cpu_state *get_cpu_state(int id){ + + if(id == 0){ + return &bsp_cpu; + } + return &cpus[id]; } -bool get_cpu_struct_initialized(){ +bool get_cpu_state_initialized(){ if(rdmsr(GSBASE) < get_kinfo()->hhdmoffset){ return false; } @@ -96,14 +105,12 @@ void smp_init(){ } bsp_cpu.scheduler_context = (struct context*)kzalloc(sizeof(struct context)); - - cpus[bsp_cpu.id] = bsp_cpu; + cpus[bsp_cpu.id] = bsp_cpu; /* If one of the APs has halted, then halt the BSP */ extern bool kernel_killed; - if(kernel_killed == true){ - kkill(); - } + + assert(!kernel_killed && "Some APs failed to init!"); } @@ -113,7 +120,8 @@ void bsp_early_init(){ struct limine_mp_response *smp_response = smp_request.response; - bsp_cpu.id = smp_response->cpus[0]->lapic_id; + bsp_cpu.id = smp_response->cpus[0]->lapic_id; + wrmsr(KERNELGSBASE, (uint64_t)&bsp_cpu); wrmsr(GSBASE, (uint64_t)&bsp_cpu); } \ No newline at end of file diff --git a/src/sys/time.c b/src/sys/time.c index ee90bbe..fdf20c5 100644 --- a/src/sys/time.c +++ b/src/sys/time.c @@ -16,8 +16,10 @@ uint64_t get_timestamp_us(){ return tsc; } - if(get_cpu_struct_initialized()){ - return get_current_cpu_struct()->lapic_timer_ticks * 1000ULL; + if(get_cpu_state_initialized()){ + return get_current_cpu_state()->lapic_timer_ticks * 1000ULL; + }else{ + return get_bsp_cpu_state()->lapic_timer_ticks * 1000ULL; } #endif diff --git a/uacpi_kernel_api.c b/uacpi_kernel_api.c index 874a79c..e2d4e8b 100644 --- a/uacpi_kernel_api.c +++ b/uacpi_kernel_api.c @@ -206,7 +206,7 @@ void uacpi_kernel_free_event(uacpi_handle handle){ } uacpi_thread_id uacpi_kernel_get_thread_id(void){ - return get_current_cpu_struct(); + return get_current_cpu_state(); } uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle handle, uacpi_u16 t){