Fix slab allocator

This commit is contained in:
ssimnb 2026-04-07 09:10:53 +02:00
parent 3b08a6ce99
commit 0db98c358e
8 changed files with 185 additions and 314 deletions

View file

@ -4,6 +4,6 @@
void _kmalloc_init(void); void _kmalloc_init(void);
void *kmalloc(size_t size); void *kmalloc(size_t size) __attribute__ ((alloc_size (1)));
void *kzalloc(size_t size); void *kzalloc(size_t size) __attribute__ ((alloc_size (1)));
kstatus kfree(void *addr); kstatus kfree(void *addr);

View file

@ -43,11 +43,16 @@ struct ma_cache {
uint16_t flags; // Not useful yet uint16_t flags; // Not useful yet
uint32_t num; // Number of objects per slab uint32_t num; // Number of objects per slab
uint32_t slabsize; // How many pages does a single slab take up. Useful for objects > PAGE_SIZE uint32_t slabsize; // How many pages does a single slab take up. Useful for objects > PAGE_SIZE
uint16_t color; // Max color of the slab
uint16_t color_next; // Next color to use
uint16_t alignment; // Alignment required of the objects. Default is 8 bytes if 0 is passed
struct ma_slab *slabs_free; struct ma_slab *slabs_free;
struct ma_slab *slabs_partial; struct ma_slab *slabs_partial;
struct ma_slab *slabs_used; struct ma_slab *slabs_used;
void (*constructor)(void *, size_t);
void (*destructor)(void *, size_t);
atomic_flag lock; atomic_flag lock;
char name[KCACHE_NAME_LEN]; char name[KCACHE_NAME_LEN];
@ -55,6 +60,6 @@ struct ma_cache {
void *ma_cache_alloc(struct ma_cache *kcache, uint32_t flags); void *ma_cache_alloc(struct ma_cache *kcache, uint32_t flags);
kstatus ma_cache_dealloc(void *object); kstatus ma_cache_dealloc(void *object);
struct ma_cache *ma_cache_create(char *name, size_t size, uint32_t flags, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t)); struct ma_cache *ma_cache_create(char *name, size_t size, uint32_t flags, uint16_t align, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t));
void cache_info(struct ma_cache *cache); void cache_info(struct ma_cache *cache);
void create_base_caches(); void create_base_caches();

View file

@ -27,6 +27,8 @@ typedef char link_symbol_ptr[];
#define SIZE_IN_PAGES(size) size/PAGE_SIZE #define SIZE_IN_PAGES(size) size/PAGE_SIZE
#define UNLIKELY(c) (__builtin_expect(c, 0))
struct kernel_info *get_kinfo(); struct kernel_info *get_kinfo();
void initialize_kinfo(); void initialize_kinfo();
void kkill(void); // phase this out in favor of assert void kkill(void); // phase this out in favor of assert

View file

@ -7,25 +7,25 @@ struct ma_cache *kmalloc_caches[14] = {0};
// Create various sizes of caches to be used by kmalloc // Create various sizes of caches to be used by kmalloc
void _kmalloc_init(void){ void _kmalloc_init(void){
kmalloc_caches[0] = ma_cache_create("kmalloc16", 16, 0, NULL, NULL); kmalloc_caches[0] = ma_cache_create("kmalloc16", 16, 0, 8, NULL, NULL);
kmalloc_caches[1] = ma_cache_create("kmalloc32", 32, 0, NULL, NULL); kmalloc_caches[1] = ma_cache_create("kmalloc32", 32, 0, 8, NULL, NULL);
kmalloc_caches[2] = ma_cache_create("kmalloc64", 64, 0, NULL, NULL); kmalloc_caches[2] = ma_cache_create("kmalloc64", 64, 0, 8, NULL, NULL);
kmalloc_caches[3] = ma_cache_create("kmalloc128", 128, 0, NULL, NULL); kmalloc_caches[3] = ma_cache_create("kmalloc128", 128, 0, 8, NULL, NULL);
kmalloc_caches[4] = ma_cache_create("kmalloc256", 256, 0, NULL, NULL); kmalloc_caches[4] = ma_cache_create("kmalloc256", 256, 0, 8, NULL, NULL);
kmalloc_caches[5] = ma_cache_create("kmalloc512", 512, 0, NULL, NULL); kmalloc_caches[5] = ma_cache_create("kmalloc512", 512, 0, 8, NULL, NULL);
kmalloc_caches[6] = ma_cache_create("kmalloc1K", 1024, 0, NULL, NULL); kmalloc_caches[6] = ma_cache_create("kmalloc1K", 1024, 0, 8, NULL, NULL);
kmalloc_caches[7] = ma_cache_create("kmalloc4K", 4096, 0, NULL, NULL); kmalloc_caches[7] = ma_cache_create("kmalloc4K", 4096, 0, 8, NULL, NULL);
kmalloc_caches[8] = ma_cache_create("kmalloc8K", 8192, 0, NULL, NULL); kmalloc_caches[8] = ma_cache_create("kmalloc8K", 8192, 0, 8, NULL, NULL);
kmalloc_caches[9] = ma_cache_create("kmalloc32K", 32768, 0, NULL, NULL); kmalloc_caches[9] = ma_cache_create("kmalloc32K", 32768, 0, 8, NULL, NULL);
kmalloc_caches[10] = ma_cache_create("kmalloc64K", 65536, 0, NULL, NULL); kmalloc_caches[10] = ma_cache_create("kmalloc64K", 65536, 0, 8, NULL, NULL);
kmalloc_caches[11] = ma_cache_create("kmalloc131K", 131072, 0, NULL, NULL); kmalloc_caches[11] = ma_cache_create("kmalloc131K", 131072, 0, 8, NULL, NULL);
kmalloc_caches[12] = ma_cache_create("kmalloc524K", 524288, 0, NULL, NULL); kmalloc_caches[12] = ma_cache_create("kmalloc524K", 524288, 0, 8, NULL, NULL);
kmalloc_caches[13] = ma_cache_create("kmalloc1M", 1048576, 0, NULL, NULL); kmalloc_caches[13] = ma_cache_create("kmalloc1M", 1048576, 0, 8, NULL, NULL);
} }
size_t sizes[14] = {16, 32, 64, 128, 256, 512, 1024, 4096, 8192, 32768, 65536, 131072, 524288, 1048756}; size_t sizes[14] = {16, 32, 64, 128, 256, 512, 1024, 4096, 8192, 32768, 65536, 131072, 524288, 1048756};
void *kmalloc(size_t size){ __attribute__((malloc)) void *kmalloc(size_t size) {
if(size > 1048576){ if(size > 1048576){
klog(__func__, "Attempted to allocate more than max size (1M)"); klog(__func__, "Attempted to allocate more than max size (1M)");
return NULL; return NULL;
@ -52,6 +52,7 @@ void *kzalloc(size_t size){
if(addr == NULL){ if(addr == NULL){
return NULL; return NULL;
} }
kprintf("addr: 0x{x}, size: {d}", addr, size);
memset(addr, 0, size); memset(addr, 0, size);
return addr; return addr;
} }

View file

@ -26,8 +26,8 @@ atomic_flag pmm_lock = ATOMIC_FLAG_INIT;
void pmm_free(uint64_t *addr){ void pmm_free(uint64_t *addr){
acquire_spinlock(&pmm_lock); acquire_spinlock(&pmm_lock);
uint64_t *virt_addr = (uint64_t*)((uint64_t)addr+get_kinfo()->hhdmoffset); uint64_t *virt_addr = (uint64_t*)((uint64_t)addr+get_kinfo()->hhdmoffset);
/* Make the given page point to the previous free page */
/* Make the given page point to the previous free page */
*virt_addr = (uint64_t)free_list; *virt_addr = (uint64_t)free_list;
/* Make the free_list point to the newly freed page */ /* Make the free_list point to the newly freed page */
@ -66,13 +66,8 @@ void pmm_init(){
for(uint64_t i = 0; i < memmap_response->entry_count; i++){ for(uint64_t i = 0; i < memmap_response->entry_count; i++){
switch (entries[i]->type) { switch (entries[i]->type) {
case LIMINE_MEMMAP_USABLE: case LIMINE_MEMMAP_USABLE:
//kprintf("usable: base: 0x{x}, length: 0x{xn}", entries[i]->base, entries[i]->length);
mem_size += entries[i]->length; mem_size += entries[i]->length;
break; break;
default:
;
//kprintf("base: 0x{x}, length: 0x{xn}", entries[i]->base, entries[i]->length);
} }
} }

View file

@ -1,3 +1,4 @@
#include <assert.h>
#include <stdatomic.h> #include <stdatomic.h>
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
@ -22,6 +23,61 @@ enum SLAB_STATE {
USED USED
}; };
void _unlink_slab(struct ma_slab *slab, struct ma_slab **list){
if(*list == slab){
*list = slab->next;
if (slab->next) slab->next->prev = NULL;
}else{
if(slab->next) slab->next->prev = slab->prev;
if(slab->prev) slab->prev->next = slab->next;
}
slab->next = NULL;
slab->prev = NULL;
}
void _link_slab(struct ma_slab *slab, struct ma_slab **list){
if(*list){
slab->next = *list;
(*list)->prev = slab;
}
*list = slab;
}
void _ma_move_slab(struct ma_slab *slab, enum SLAB_STATE newstate){
struct ma_cache *cache = slab->cache;
struct ma_slab **list;
struct ma_slab **new_list;
if(slab->refcount == 0){
list = &cache->slabs_free;
}else if(slab->refcount == slab->cache->num){
list = &cache->slabs_used;
}else{
list = &cache->slabs_partial;
}
switch (newstate) {
case FREE:
new_list = &cache->slabs_free;
break;
case PARTIAL:
new_list = &cache->slabs_partial;
break;
case USED:
new_list = &cache->slabs_used;
break;
default:
klog(__func__, "Unknown new state!");
return;
}
_unlink_slab(slab, list);
_link_slab(slab, new_list);
}
// Gets free object from slab and handles stuff // Gets free object from slab and handles stuff
uint64_t *_ma_slab_get_free_obj(struct ma_slab *slab){ uint64_t *_ma_slab_get_free_obj(struct ma_slab *slab){
@ -35,29 +91,19 @@ uint64_t *_ma_slab_get_free_obj(struct ma_slab *slab){
return NULL; return NULL;
} }
if(slab->free->next != NULL){
slab->free = slab->free->next; slab->free = slab->free->next;
}else{
slab->free = NULL;
}
// Move the slab from the free to the partial list on the first time it's allocated from
if(slab->refcount == 0){ if(slab->refcount == 0){
if(slab->prev != NULL){ _unlink_slab(slab, &slab->cache->slabs_free);
slab->prev->next = slab->next;
if(slab->cache->num == 1){
/* If max object number in slab is 1, then move the slab directly to
* used list to prevent bug with _ma_move_slab()
*/
_link_slab(slab, &slab->cache->slabs_used);
}else{ }else{
slab->cache->slabs_free = NULL; _link_slab(slab, &slab->cache->slabs_partial);
} }
/* If there is a partial slab head then make it the head and do other stuff */
if(slab->cache->slabs_partial != NULL){
slab->cache->slabs_partial->next = slab;
slab->prev = slab->cache->slabs_partial;
}
slab->next = NULL;
slab->cache->slabs_partial = slab;
} }
slab->refcount++; slab->refcount++;
@ -72,24 +118,34 @@ kstatus _ma_alloc_slab(struct ma_cache *kcache){
// Put the addresses in the slab structure into the bufctls // Put the addresses in the slab structure into the bufctls
if(kcache->objsize >= 512){ if(kcache->objsize >= 512){
/* Here we store the bufctls seperately from the actual objects // Here we store the bufctls seperately from the actual objects
*
*/
slab_structure->free = (struct ma_bufctl*)(va_alloc_contigious_pages(1)); // Store the bufctls off-page slab_structure->free = va_alloc_contigious_pages(1); // Store the bufctls off-page
memset(slab_structure->free, 0, 4096); memset(slab_structure->free, 0, PAGE_SIZE);
uint64_t slabsize = kcache->slabsize; uint64_t slabsize = kcache->slabsize;
void *mstart = va_alloc_contigious_pages(kcache->slabsize); void *mstart = va_alloc_contigious_pages(kcache->slabsize) + kcache->color_next;
struct page *page;
for(size_t j = 0; j < kcache->slabsize; j++){ for(size_t j = 0; j < kcache->slabsize; j++){
get_page((void*)((uint64_t)mstart + j*PAGE_SIZE))->slab = slab_structure; page = get_page((void*)((uint64_t)mstart + j*PAGE_SIZE));
get_page((void*)((uint64_t)mstart + j*PAGE_SIZE))->bufctls = slab_structure->free; page->slab = slab_structure;
page->bufctls = slab_structure->free;
} }
for(size_t i = 0; i < (PAGE_SIZE * slabsize)/kcache->objsize; i++){ for(size_t i = 0; i < (PAGE_SIZE * slabsize)/kcache->objsize; i++){
((struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i)))->startaddr = (size_t*)((uint64_t)mstart + i * kcache->objsize); struct ma_bufctl *current_obj = ((struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i)));
((struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i)))->next = (struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i+1)); current_obj->startaddr = (size_t*)((uint64_t)mstart + i * kcache->objsize);
if(i+1 < kcache->num){
current_obj->next = (struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i+1));
}else{
current_obj->next = NULL;
}
if(kcache->constructor){
kcache->constructor(current_obj->startaddr, kcache->objsize);
}
} }
}else{ }else{
@ -99,7 +155,7 @@ kstatus _ma_alloc_slab(struct ma_cache *kcache){
* Their startaddr is the same as the address of the bufctl, since the objects act as the bufctls * Their startaddr is the same as the address of the bufctl, since the objects act as the bufctls
*/ */
slab_structure->free = va_alloc_contigious_pages(kcache->slabsize); slab_structure->free = va_alloc_contigious_pages(kcache->slabsize) + kcache->color_next;
get_page(slab_structure->free)->slab = slab_structure; get_page(slab_structure->free)->slab = slab_structure;
get_page(slab_structure->free)->bufctls = slab_structure->free; get_page(slab_structure->free)->bufctls = slab_structure->free;
@ -107,182 +163,68 @@ kstatus _ma_alloc_slab(struct ma_cache *kcache){
uint64_t size = (kcache->objsize >= sizeof(struct ma_bufctl)) ? kcache->objsize : sizeof(struct ma_bufctl); uint64_t size = (kcache->objsize >= sizeof(struct ma_bufctl)) ? kcache->objsize : sizeof(struct ma_bufctl);
for(size_t i = 0; i < kcache->num; i++){ for(size_t i = 0; i < kcache->num; i++){
((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->startaddr = (size_t*)((uint64_t)slab_structure->free + i * size); struct ma_bufctl *current_obj = ((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i));
serial_kprintf("{s}: Adding 0x{x} to freelist\n", kcache->name, (size_t*)((uint64_t)slab_structure->free + i * size)); current_obj->startaddr = (size_t*)((uint64_t)slab_structure->free + i * size);
if(i+1 < kcache->num){ if(i+1 < kcache->num){
((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->next = (struct ma_bufctl*)((uint64_t)slab_structure->free + size*(i+1)); current_obj->next = (struct ma_bufctl*)((uint64_t)slab_structure->free + size*(i+1));
}else{ }else{
((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->next = NULL; current_obj->next = NULL;
}
if(kcache->constructor){
kcache->constructor(current_obj->startaddr, kcache->objsize);
} }
} }
//memcpy(get_page(slab_structure->free)->bufctls, slab_structure->free, kcache->slabsize * PAGE_SIZE);
} }
//asm("int $1"); kcache->color_next = (kcache->color_next + kcache->alignment > kcache->color) ? 0 : kcache->color_next + kcache->alignment;
if(kcache->slabs_free == NULL){ _link_slab(slab_structure, &kcache->slabs_free);
kcache->slabs_free = slab_structure;
}else{
// Change head
kcache->slabs_free->next = slab_structure;
slab_structure->prev = kcache->slabs_free;
kcache->slabs_free = slab_structure;
}
slab_structure->cache = kcache; slab_structure->cache = kcache;
return KERNEL_STATUS_SUCCESS; return KERNEL_STATUS_SUCCESS;
} }
// TODO: fix this complicated POS
void _ma_move_slab(struct ma_slab *slab, enum SLAB_STATE newstate){
struct ma_cache *cache = slab->cache;
struct ma_slab *sb = 0;
switch (newstate) {
case FREE:
if(cache->slabs_partial != NULL){
sb = cache->slabs_partial;
while(sb != NULL){
if(sb == slab){
goto free_common;
}
sb = sb->prev;
}
}
if(cache->slabs_used != NULL){ struct ma_cache *ma_cache_create(char *name, size_t size, uint32_t flags, uint16_t align, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t)){
sb = cache->slabs_used;
while(sb != NULL){
if(sb == slab){
goto free_common;
}
sb = sb->prev;
}
}
return;
case PARTIAL:
if(cache->slabs_free != NULL){
sb = cache->slabs_free;
while(sb != NULL){
if(sb == slab){
goto partial_common;
}
sb = sb->prev;
}
}
if(cache->slabs_used != NULL){
sb = cache->slabs_used;
while(sb != NULL){
if(sb == slab){
goto partial_common;
}
sb = sb->prev;
}
}
return;
case USED:
if(cache->slabs_free != NULL){
sb = cache->slabs_free;
while(sb != NULL){
if(sb == slab){
goto used_common;
}
sb = sb->prev;
}
}
if(cache->slabs_partial != NULL){
sb = cache->slabs_partial;
while(sb != NULL){
if(sb == slab){
goto used_common;
}
sb = sb->prev;
}
}
return;
}
free_common:
slab->next = NULL;
slab->prev = NULL;
// Preserve the linkage
if(sb->prev != NULL){
if(sb->next != NULL){
sb->next->prev = sb->prev;
}
sb->prev->next = sb->next;
}
if(cache->slabs_free != NULL){
cache->slabs_free->next = slab;
slab->prev = cache->slabs_free;
}
cache->slabs_free = slab;
return;
partial_common:
slab->next = NULL;
slab->prev = NULL;
if(sb->prev != NULL){
if(sb->next != NULL){
sb->next->prev = sb->prev;
}
sb->prev->next = sb->next;
}
if(cache->slabs_partial != NULL){
cache->slabs_partial->next = slab;
slab->prev = cache->slabs_partial;
}
cache->slabs_partial = slab;
return;
used_common:
slab->next = NULL;
slab->prev = NULL;
if(sb->prev != NULL){
if(sb->next != NULL){
sb->next->prev = sb->prev;
}
sb->prev->next = sb->next;
}
if(cache->slabs_used != NULL){
cache->slabs_used->next = slab;
slab->prev = cache->slabs_used;
}
cache->slabs_used = slab;
return;
}
struct ma_cache *ma_cache_create(char *name, size_t size, uint32_t flags, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t)){
acquire_spinlock(&caches_lock); acquire_spinlock(&caches_lock);
struct ma_cache *kcache = (struct ma_cache*)va_alloc_contigious_pages(1); struct ma_cache *kcache = (struct ma_cache*)va_alloc_contigious_pages(1);
memset(kcache, 0, 4096);
assert(kcache && "kcache is null!");
memset(kcache, 0, PAGE_SIZE);
memcpy(kcache->name, name, 16); memcpy(kcache->name, name, 16);
kcache->slabsize = (size / PAGE_SIZE) + 1; kcache->slabsize = (size + PAGE_SIZE - 1) / PAGE_SIZE;
kcache->num = (4096 * kcache->slabsize - sizeof(struct ma_slab)) / ((size >= sizeof(struct ma_bufctl)) ? size : sizeof(struct ma_bufctl)); // Calculate the number of buffers in this slab
kcache->objsize = size; if(size >= 512){
kcache->num = (PAGE_SIZE * kcache->slabsize) / size; // Calculate the number of buffers in this slab
}else{
kcache->num = (PAGE_SIZE * kcache->slabsize - sizeof(struct ma_slab)) / ((size >= sizeof(struct ma_bufctl)) ? size : sizeof(struct ma_bufctl)); // Calculate the number of buffers in this slab
}
kcache->objsize = (size > 16) ? size : sizeof(struct ma_bufctl);
memset(&kcache->lock, 0, sizeof(atomic_flag)); memset(&kcache->lock, 0, sizeof(atomic_flag));
kcache->constructor = constructor;
kcache->destructor = destructor;
// Calculate color
kcache->alignment = (align == 0) ? 8 : align; // We default to 8 byte alignment (SDM Volume 1 Chapter 4.1.1) but also change this once we support more than x86
if(size >= 512){
kcache->color = (kcache->slabsize * PAGE_SIZE) % kcache->objsize; // This is going to be the max color we can use
}else {
kcache->color = (PAGE_SIZE - sizeof(struct ma_slab)) % kcache->objsize; // This is going to be the max color we can use
}
kcache->color_next = 0; // Calculated by doing += align while color_next < color
_ma_alloc_slab(kcache); _ma_alloc_slab(kcache);
if(caches != NULL){ if(caches != NULL){
@ -300,43 +242,31 @@ struct ma_cache *ma_cache_create(char *name, size_t size, uint32_t flags, void (
void *ma_cache_alloc(struct ma_cache *kcache, uint32_t flags){ void *ma_cache_alloc(struct ma_cache *kcache, uint32_t flags){
kprintf("Allocating from: {s}\n", kcache->name);
acquire_spinlock(&kcache->lock); acquire_spinlock(&kcache->lock);
struct ma_slab *slab = NULL; struct ma_slab *slab = NULL;
if(kcache->slabs_free == NULL){
if(kcache->slabs_partial == NULL){ if(kcache->slabs_partial == NULL){
if(kcache->slabs_free == NULL){
_ma_alloc_slab(kcache); _ma_alloc_slab(kcache);
}
slab = kcache->slabs_free; slab = kcache->slabs_free;
}else{ }else{
slab = kcache->slabs_partial; slab = kcache->slabs_partial;
} }
}else{
slab = kcache->slabs_free;
}
uint64_t *addr = _ma_slab_get_free_obj(slab); uint64_t *addr = _ma_slab_get_free_obj(slab);
// If there's no free object then allocate new slab // If there's no free object then allocate new slab
if(addr == NULL){ if(addr == NULL){
slab->free = NULL; if(kcache->num > 1){
_ma_move_slab(slab, USED);
if(kcache->slabs_partial->prev != NULL){
kcache->slabs_partial = kcache->slabs_partial->prev;
}else{
kcache->slabs_partial = NULL;
} }
if(kcache->slabs_used != NULL){
kcache->slabs_used->next = slab;
slab->prev = kcache->slabs_used;
kcache->slabs_used = slab;
}else{
kcache->slabs_used = slab;
}
_ma_alloc_slab(kcache); _ma_alloc_slab(kcache);
addr = _ma_slab_get_free_obj(kcache->slabs_free); slab = kcache->slabs_free;
addr = _ma_slab_get_free_obj(slab);
} }
free_spinlock(&kcache->lock); free_spinlock(&kcache->lock);
@ -345,65 +275,6 @@ void *ma_cache_alloc(struct ma_cache *kcache, uint32_t flags){
} }
void cache_info(struct ma_cache *cache){
kprintf("name: {s}\n", cache->name);
kprintf("objsize: {d}\n", cache->objsize);
kprintf("num: {d}\n", cache->num);
kprintf("slabsize: {d}\n", cache->slabsize);
int slabsfreecnt = 0;
if(cache->slabs_free == NULL){
kprintf("slabsfree: 0\n");
}else{
if(cache->slabs_free->prev == NULL){
kprintf("slabsfree: 1\n");
}else{
struct ma_slab *slab = cache->slabs_free;
while(slab->prev != NULL){
slab = slab->prev;
slabsfreecnt++;
}
kprintf("slabsfree : {d}\n", slabsfreecnt);
}
}
int slabspartcnt = 0;
if(cache->slabs_partial == NULL){
kprintf("slabspartial: 0\n");
}else{
if(cache->slabs_partial->prev == NULL){
kprintf("slabspartial: 1\n");
}else{
struct ma_slab *slab = cache->slabs_partial;
while(slab->prev != NULL){
slab = slab->prev;
slabspartcnt++;
}
kprintf("slabspartial: {d}\n", slabspartcnt+1);
}
}
int slabsfullcnt = 0;
if(cache->slabs_used == NULL){
kprintf("slabsused: 0\n");
}else{
if(cache->slabs_used->prev == NULL){
kprintf("slabsused: 1\n");
}else{
struct ma_slab *slab = cache->slabs_used;
while(slab->prev != NULL){
slab = slab->prev;
slabsfullcnt++;
}
kprintf("slabsused : {d}\n", slabsfullcnt);
}
}
}
struct ma_bufctl *addr_to_bufctl(void *object){ struct ma_bufctl *addr_to_bufctl(void *object){
struct ma_slab *slab = get_page(object)->slab; struct ma_slab *slab = get_page(object)->slab;
@ -422,12 +293,7 @@ struct ma_bufctl *addr_to_bufctl(void *object){
} }
for(size_t i = 0; i < slab->cache->num; i++){ for(size_t i = 0; i < slab->cache->num; i++){
if((bufs + i)->startaddr != 0){
kprintf("addr_to_bufctl: we're looking at 0x{x}\n", (bufs + i)->startaddr);
}
if((bufs + i)->startaddr == object){ if((bufs + i)->startaddr == object){
//kprintf("addr_to_bufctl: we're looking at 0x{x}\n", (bufs + i)->startaddr);
return (bufs + i); return (bufs + i);
} }
} }
@ -444,7 +310,7 @@ kstatus ma_cache_dealloc(void *object){
return KERNEL_STATUS_ERROR; return KERNEL_STATUS_ERROR;
} }
acquire_spinlock(&slab->lock); acquire_spinlock(&slab->cache->lock);
struct ma_bufctl *buf = addr_to_bufctl(object); struct ma_bufctl *buf = addr_to_bufctl(object);
@ -464,7 +330,7 @@ kstatus ma_cache_dealloc(void *object){
_ma_move_slab(slab, FREE); _ma_move_slab(slab, FREE);
} }
free_spinlock(&slab->lock); free_spinlock(&slab->cache->lock);
return KERNEL_STATUS_SUCCESS; return KERNEL_STATUS_SUCCESS;
@ -472,6 +338,6 @@ kstatus ma_cache_dealloc(void *object){
extern struct ma_cache *thread_cache; extern struct ma_cache *thread_cache;
extern struct ma_cache *mutex_cache; extern struct ma_cache *mutex_cache;
void create_base_caches(){ void create_base_caches(){
thread_cache = ma_cache_create("thread", sizeof(struct thread), 0, NULL, NULL); thread_cache = ma_cache_create("thread", sizeof(struct thread), 0, 0, NULL, NULL);
mutex_cache = ma_cache_create("mutex", sizeof(struct mutex), 0, NULL, NULL); mutex_cache = ma_cache_create("mutex", sizeof(struct mutex), 0, 0, NULL, NULL);
} }

View file

@ -339,7 +339,7 @@ void *va_alloc_contigious_pages(size_t pages){
} }
/* Maps pages from phys_addr to phys_addr+size into the kernels address space */ /* Maps pages from phys_addr to phys_addr+size into the kernels address space (phys_addr + hhdmoffset) */
void kmap_pages(void *phys_addr, uint64_t size, uint64_t flags){ void kmap_pages(void *phys_addr, uint64_t size, uint64_t flags){
for(uint64_t i = 0; i < size; i++){ for(uint64_t i = 0; i < size; i++){
vmm_map_page(kernel_page_map, (uint64_t)phys_addr + get_kinfo()->hhdmoffset + (i * PAGE_SIZE), (uint64_t)phys_addr + (i * PAGE_SIZE), PTE_BIT_PRESENT | flags); vmm_map_page(kernel_page_map, (uint64_t)phys_addr + get_kinfo()->hhdmoffset + (i * PAGE_SIZE), (uint64_t)phys_addr + (i * PAGE_SIZE), PTE_BIT_PRESENT | flags);

View file

@ -161,22 +161,24 @@ kstatus spawn_thread(struct thread *thread){
struct thread *prev = cpu->current_process; struct thread *prev = cpu->current_process;
if(prev == NULL){
if(cpu->head == NULL){ if (prev != NULL) {
cpu->current_process = idle;
goto end;
}else{
cpu->current_process = cpu->head;
cpu->head = cpu->head->next;
}
}else{
if (prev->state == ZOMBIE) { if (prev->state == ZOMBIE) {
ma_cache_dealloc(prev); ma_cache_dealloc(prev);
}else{ }else if(prev->state == RUNNING){
prev->state = READY; prev->state = READY;
cpu->current_process = prev->next;
goto end;
} }
} }
if(cpu->head != NULL){
cpu->current_process = cpu->head;
cpu->head = cpu->head->next;
} else {
cpu->current_process = idle;
}
end: end:
cpu->current_process->state = RUNNING; cpu->current_process->state = RUNNING;
@ -191,7 +193,7 @@ void scheduler_init(){
cpu_state *cpu = get_current_cpu_state(); cpu_state *cpu = get_current_cpu_state();
if(cpu->current_process != NULL){ if(UNLIKELY(cpu->current_process != NULL)){
kprintf("scheduler on CPU {d} already initialized!\n", cpu->id); kprintf("scheduler on CPU {d} already initialized!\n", cpu->id);
return; return;
} }