Almost finished scheduler, switched to circular linked list

This commit is contained in:
ssimnb 2026-02-09 06:33:12 +01:00
parent edfbfdad14
commit 4c7ecb4012
10 changed files with 124 additions and 89 deletions

View file

@ -126,14 +126,14 @@ void ap_apic_init(){
} }
void apic_timer_handler(){ void apic_timer_handler(){
//kprintf("hii\n");
lapic_write_reg(LAPIC_EOI_REG, 0);
if(get_cpu_struct_initialized()){ if(get_cpu_struct_initialized()){
get_cpu_struct()->lapic_timer_ticks++; get_cpu_struct()->lapic_timer_ticks++;
} }
lapic_write_reg(LAPIC_EOI_REG, 0);
} }
void apic_send_ipi(uint8_t dest_field, uint8_t dest_shorthand, uint8_t trigger, uint8_t level, uint8_t status, uint8_t destination, uint8_t delivery_mode, uint8_t vector){ void apic_send_ipi(uint8_t dest_field, uint8_t dest_shorthand, uint8_t trigger, uint8_t level, uint8_t status, uint8_t destination, uint8_t delivery_mode, uint8_t vector){

View file

@ -239,10 +239,7 @@ void interrupt_handler(interrupt_frame *r){
if(r->int_no == 69 && get_cpu_struct_initialized() if(r->int_no == 69 && get_cpu_struct_initialized()
&& get_cpu_struct()->scheduler_initialized){ && get_cpu_struct()->scheduler_initialized){
if(get_cpu_struct()->current_process->state == RUNNING){ yield();
enter_scheduler();
}
} }
if(r->int_no == 70){ if(r->int_no == 70){

View file

@ -65,8 +65,6 @@ void ap_init(struct limine_smp_info *smp_info){
memset(cpu_struct, 0, sizeof(cpu_state)); memset(cpu_struct, 0, sizeof(cpu_state));
cpu_struct->lapic_id = smp_info->lapic_id; cpu_struct->lapic_id = smp_info->lapic_id;
cpu_struct->scheduler_context = (context*)kmalloc(sizeof(context));
wrmsr(KERNELGSBASE, (uint64_t)cpu_struct); wrmsr(KERNELGSBASE, (uint64_t)cpu_struct);
wrmsr(GSBASE, (uint64_t)cpu_struct); wrmsr(GSBASE, (uint64_t)cpu_struct);
@ -102,7 +100,7 @@ void smp_init(){
cpu_state *cpu_struct = (cpu_state*)kmalloc(sizeof(cpu_state)); cpu_state *cpu_struct = (cpu_state*)kmalloc(sizeof(cpu_state));
cpu_struct->lapic_id = smp_response->cpus[0]->lapic_id; cpu_struct->lapic_id = smp_response->cpus[0]->lapic_id;
cpu_struct->scheduler_context = (context*)kmalloc(sizeof(context)); cpu_struct->scheduler_context = (struct context*)kmalloc(sizeof(struct context));
wrmsr(KERNELGSBASE, (uint64_t)cpu_struct); wrmsr(KERNELGSBASE, (uint64_t)cpu_struct);
wrmsr(GSBASE, (uint64_t)cpu_struct); wrmsr(GSBASE, (uint64_t)cpu_struct);

View file

@ -10,10 +10,12 @@
typedef struct cpu_state { typedef struct cpu_state {
uint32_t lapic_id; uint32_t lapic_id;
uint64_t lapic_timer_ticks; uint64_t lapic_timer_ticks;
proc process_list[PROC_MAX]; struct thread *head;
proc *current_process; struct thread *base;
struct thread *current_process;
uint16_t process_count; uint16_t process_count;
context *scheduler_context; struct context *scheduler_context;
uint64_t *scheduler_stack;
bool scheduler_initialized; bool scheduler_initialized;
}cpu_state; }cpu_state;

View file

@ -106,6 +106,8 @@ void _start(void){
_kmalloc_init(); _kmalloc_init();
create_base_caches();
klog("smp", "Starting APs"); klog("smp", "Starting APs");
smp_init(); smp_init();

View file

@ -3,7 +3,7 @@
#include <kprint.h> #include <kprint.h>
#include <kmath.h> #include <kmath.h>
#include <string.h> #include <string.h>
struct ma_kcache *kmalloc_caches[14] = {0}; struct ma_cache *kmalloc_caches[14] = {0};
// Create various sizes of caches to be used by kmalloc // Create various sizes of caches to be used by kmalloc
void _kmalloc_init(void){ void _kmalloc_init(void){

View file

@ -6,11 +6,12 @@
#include "vmm.h" #include "vmm.h"
#include "page.h" #include "page.h"
#include "slab.h" #include "slab.h"
#include "../scheduler/sched.h"
#include <kprint.h> #include <kprint.h>
#include <neobbo.h> #include <neobbo.h>
#include <lock.h> #include <lock.h>
struct ma_kcache *caches = NULL; struct ma_cache *caches = NULL;
atomic_flag caches_lock = ATOMIC_FLAG_INIT; atomic_flag caches_lock = ATOMIC_FLAG_INIT;
@ -65,7 +66,7 @@ uint64_t *_ma_slab_get_free_obj(struct ma_slab *slab){
} }
kstatus _ma_alloc_slab(struct ma_kcache *kcache){ kstatus _ma_alloc_slab(struct ma_cache *kcache){
struct ma_slab *slab_structure = (struct ma_slab*)va_alloc_contigious_pages(1); struct ma_slab *slab_structure = (struct ma_slab*)va_alloc_contigious_pages(1);
memset(slab_structure, 0, PAGE_SIZE); memset(slab_structure, 0, PAGE_SIZE);
@ -133,7 +134,7 @@ kstatus _ma_alloc_slab(struct ma_kcache *kcache){
void _ma_move_slab(struct ma_slab *slab, enum SLAB_STATE newstate){ void _ma_move_slab(struct ma_slab *slab, enum SLAB_STATE newstate){
struct ma_kcache *cache = slab->cache; struct ma_cache *cache = slab->cache;
struct ma_slab *sb = 0; struct ma_slab *sb = 0;
switch (newstate) { switch (newstate) {
case FREE: case FREE:
@ -265,11 +266,11 @@ void _ma_move_slab(struct ma_slab *slab, enum SLAB_STATE newstate){
} }
struct ma_kcache *ma_cache_create(char *name, size_t size, uint32_t flags, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t)){ struct ma_cache *ma_cache_create(char *name, size_t size, uint32_t flags, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t)){
acquire_spinlock(&caches_lock); acquire_spinlock(&caches_lock);
struct ma_kcache *kcache = (struct ma_kcache*)va_alloc_contigious_pages(1); struct ma_cache *kcache = (struct ma_cache*)va_alloc_contigious_pages(1);
memset(kcache, 0, 4096); memset(kcache, 0, 4096);
memcpy(kcache->name, name, 16); memcpy(kcache->name, name, 16);
@ -293,7 +294,7 @@ struct ma_kcache *ma_cache_create(char *name, size_t size, uint32_t flags, void
} }
void *ma_cache_alloc(struct ma_kcache *kcache, uint32_t flags){ void *ma_cache_alloc(struct ma_cache *kcache, uint32_t flags){
acquire_spinlock(&kcache->lock); acquire_spinlock(&kcache->lock);
@ -339,7 +340,7 @@ void *ma_cache_alloc(struct ma_kcache *kcache, uint32_t flags){
} }
void cache_info(struct ma_kcache *cache){ void cache_info(struct ma_cache *cache){
kprintf("name: {s}\n", cache->name); kprintf("name: {s}\n", cache->name);
kprintf("objsize: {d}\n", cache->objsize); kprintf("objsize: {d}\n", cache->objsize);
kprintf("num: {d}\n", cache->num); kprintf("num: {d}\n", cache->num);
@ -454,3 +455,7 @@ kstatus ma_cache_dealloc(void *object){
return KERNEL_STATUS_SUCCESS; return KERNEL_STATUS_SUCCESS;
} }
extern struct ma_cache *thread_cache;
void create_base_caches(){
thread_cache = ma_cache_create("thread", sizeof(struct thread), 0, NULL, NULL);
}

View file

@ -14,7 +14,7 @@ struct ma_bufctl {
}; };
// ADD COLORING // ADD COLORING
struct ma_slab { struct ma_slab {
struct ma_kcache *cache; struct ma_cache *cache;
struct ma_slab *next; struct ma_slab *next;
struct ma_slab *prev; struct ma_slab *prev;
@ -32,12 +32,12 @@ struct ma_objref {
struct ma_objref *prev; struct ma_objref *prev;
void *addr; // Addr of the object void *addr; // Addr of the object
struct ma_slab *slab; // The slab which the obj belongs to struct ma_slab *slab; // The slab which the obj belongs to
struct ma_kcache *kcache; // The cache which the obj belongs to struct ma_cache *kcache; // The cache which the obj belongs to
}; };
struct ma_kcache { struct ma_cache {
struct ma_kcache *next; struct ma_cache *next;
struct ma_kcache *prev; struct ma_cache *prev;
uint32_t objsize; // Size of the object which the cache stores uint32_t objsize; // Size of the object which the cache stores
uint16_t flags; // Not useful yet uint16_t flags; // Not useful yet
@ -53,7 +53,8 @@ struct ma_kcache {
char name[KCACHE_NAME_LEN]; char name[KCACHE_NAME_LEN];
}; };
void *ma_cache_alloc(struct ma_kcache *kcache, uint32_t flags); void *ma_cache_alloc(struct ma_cache *kcache, uint32_t flags);
kstatus ma_cache_dealloc(void *object); kstatus ma_cache_dealloc(void *object);
struct ma_kcache *ma_cache_create(char *name, size_t size, uint32_t flags, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t)); struct ma_cache *ma_cache_create(char *name, size_t size, uint32_t flags, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t));
void cache_info(struct ma_kcache *cache); void cache_info(struct ma_cache *cache);
void create_base_caches();

View file

@ -1,14 +1,16 @@
#include <kprint.h> #include <kprint.h>
#include <stdint.h> #include <stdint.h>
#include <string.h>
#include <neobbo.h> #include <neobbo.h>
#include "../hal/smp.h" #include "../hal/smp.h"
#include <error.h> #include <error.h>
#include <string.h>
#include "../mm/kmalloc.h" #include "../mm/kmalloc.h"
#include "../mm/slab.h"
#include "sched.h" #include "sched.h"
extern void switch_context(context **old, context *new); extern void switch_context(struct context **old, struct context *new);
struct ma_cache *thread_cache;
#define QUANTUM_US 10000 #define QUANTUM_US 10000
@ -21,45 +23,66 @@ void idle_task(){
void test_task(){ void test_task(){
kprintf("Hello world from scheduled task!\n"); kprintf("Hello world from scheduled task!\n");
//for(;;);
} }
void best_task(){ void best_task(){
kprintf("Hello world I am best\n"); kprintf("Hello world I am best\n");
//for(;;);
}
void thread_exit(){
struct thread *p = get_cpu_struct()->current_process;
// Remove process from circular linked list
p->prev->next = p->next;
ma_cache_dealloc(p); // KILL the thread
sched();
} }
/* Setup a process structure */ /* Setup a process structure */
proc *alloc_process(void){ struct thread *alloc_thread(void){
proc *proc_list = get_cpu_struct()->process_list; struct thread *head = get_cpu_struct()->head;
struct thread *base = get_cpu_struct()->base;
for(uint64_t i = 0; i < PROC_MAX; i++){ struct thread *t = ma_cache_alloc(thread_cache, 0);
if(proc_list[i].state == UNUSED){ memset(t, 0, sizeof(struct thread));
proc *p = &proc_list[i];
p->state = READY; if(base == NULL){
base = t;
p->kstack = kzalloc(8 * 4096); head = base;
}else{
p->pid = next_pid++; head->next = t;
t->prev = head;
uint64_t *sp = (uint64_t*)((uint64_t)p->kstack + 8 * 4096); t->next = base; // Circular linked list
head = t;
// Allocate space for context
sp -= sizeof(context)/sizeof(uint64_t);
p->context = (context*)sp;
p->context->rbp = (uint64_t)p->kstack;
return p;
}
} }
klog(__func__, "Couldnt find free process!!!!!!\n"); t->state = READY;
return NULL; t->kstack = kzalloc(8 * 4096);
t->pid = next_pid++;
uint64_t *sp = (uint64_t*)((uint64_t)t->kstack + 8 * 4096);
// Push the exit function (the thread will return to it when it finishes)
*--sp = (uint64_t)thread_exit;
// Allocate space for context
sp -= sizeof(struct context)/sizeof(uint64_t);
t->context = (struct context*)sp;
t->context->rbp = (uint64_t)sp;
return t;
} }
proc *add_process(uint64_t *entry){
proc *proc = alloc_process(); struct thread *add_process(uint64_t *entry){
struct thread *proc = alloc_thread();
if (proc == NULL) { if (proc == NULL) {
klog(__func__, "proc == null!"); klog(__func__, "proc == null!");
@ -73,55 +96,59 @@ proc *add_process(uint64_t *entry){
return proc; return proc;
} }
[[noreturn]] void sched(){
cpu_state *cpu = get_cpu_struct();
asm("cli");
struct thread *prev = cpu->current_process;
prev->state = READY;
cpu->current_process = prev->next;
cpu->current_process->state = RUNNING;
asm("sti");
switch_context(&(get_cpu_struct()->scheduler_context), get_cpu_struct()->current_process->context);
}
void scheduler_init(){ void scheduler_init(){
if(!get_cpu_struct_initialized()){ if(!get_cpu_struct_initialized()){
klog(__func__, "CPU struct not initialized!"); klog(__func__, "CPU struct not initialized!");
kkill(); kkill();
} }
cpu_state *state = get_cpu_struct(); cpu_state *cpu = get_cpu_struct();
if(state->current_process != NULL){ if(cpu->current_process != NULL){
kprintf("scheduler on CPU {d} already initialized!\n", state->lapic_id); kprintf("scheduler on CPU {d} already initialized!\n", cpu->lapic_id);
return; return;
} }
proc *proc_list = state->process_list; struct thread *idle = add_process((uint64_t*)idle_task);
memset(proc_list, 0, sizeof(proc) * 512);
proc *idle = add_process((uint64_t*)idle_task);
if(idle == NULL){ if(idle == NULL){
klog(__func__, "Failed to allocate idle task"); klog(__func__, "Failed to allocate idle task");
kkill(); kkill();
} }
state->current_process = idle; cpu->current_process = idle;
cpu->scheduler_stack = kzalloc(4096);
cpu->scheduler_context = (struct context*)((uint64_t)cpu->scheduler_stack + 4096);
cpu->scheduler_context->rbp = (uint64_t)cpu->scheduler_context;
cpu->scheduler_context->rip = (uint64_t)sched;
add_process((uint64_t*)test_task); add_process((uint64_t*)test_task);
add_process((uint64_t *)best_task); add_process((uint64_t *)best_task);
state->scheduler_initialized = true; // Initialize scheduler -> we will now get timer interrupts to switch us into sched()
cpu->scheduler_initialized = true;
int i = 0;
for(;;){
for(; i < PROC_MAX; i++){
if(proc_list[i].state == READY){
asm("sti");
proc *prev = state->current_process;
prev->state = READY;
state->current_process = &proc_list[i];
state->current_process->state = RUNNING;
switch_context(&(get_cpu_struct()->scheduler_context), get_cpu_struct()->current_process->context);
}
}
i = 0;
}
} }
void enter_scheduler(){
void yield(){
switch_context(&get_cpu_struct()->current_process->context, get_cpu_struct()->scheduler_context); switch_context(&get_cpu_struct()->current_process->context, get_cpu_struct()->scheduler_context);
} }

View file

@ -9,20 +9,23 @@ typedef enum proc_state {
UNUSED = 0 UNUSED = 0
}proc_state; }proc_state;
typedef struct context { struct context {
uint64_t r15, r14, r13, r12, rbp, rbx, rip; uint64_t r15, r14, r13, r12, rbp, rbx, rip;
} context; };
typedef struct proc { struct thread {
struct thread *next;
struct thread *prev;
uint64_t *mem; uint64_t *mem;
uint64_t *kstack; uint64_t *kstack;
proc_state state; proc_state state;
uint16_t pid; uint16_t pid;
context *context; struct context *context;
}proc; };
void scheduler_init(); void scheduler_init();
void enter_scheduler(); [[noreturn]] void sched();
void yield();
#define PROC_MAX 512 // Max number of processes #define PROC_MAX 512 // Max number of processes