neobbo/src/mm/slab.c
2026-01-24 08:15:08 +01:00

441 lines
12 KiB
C

#include <stdatomic.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "error.h"
#include "vmm.h"
#include "page.h"
#include "slab.h"
#include <kprint.h>
#include <neobbo.h>
#include <lock.h>
struct ma_kcache *caches = NULL;
atomic_flag caches_lock = ATOMIC_FLAG_INIT;
enum SLAB_STATE {
FREE = 0,
PARTIAL,
USED
};
// Gets free object from slab and handles stuff
uint64_t *_ma_slab_get_free_obj(struct ma_slab *slab){
if(slab->free == NULL){
return NULL;
}
uint64_t *addr = slab->free->startaddr;
if(addr == NULL){
return NULL;
}
if(slab->free->next != NULL){
slab->free = slab->free->next;
}else{
slab->free = NULL;
}
// Move the slab from the free to the partial list on the first time it's allocated from
if(slab->refcount == 0){
if(slab->prev != NULL){
slab->prev->next = slab->next;
}else{
slab->cache->slabs_free = NULL;
}
/* If there is a partial slab head then make it the head and do other stuff */
if(slab->cache->slabs_partial != NULL){
slab->cache->slabs_partial->next = slab;
slab->prev = slab->cache->slabs_partial;
}
slab->next = NULL;
slab->cache->slabs_partial = slab;
}
slab->refcount++;
return addr;
}
kstatus _ma_alloc_slab(struct ma_kcache *kcache){
struct ma_slab *slab_structure = (struct ma_slab*)va_alloc_contigious_pages(1);
memset(slab_structure, 0, PAGE_SIZE);
// Put the addresses in the slab structure into the bufctls
if(kcache->objsize >= 512){
slab_structure->free = (struct ma_bufctl*)(va_alloc_contigious_pages(1)); // Store the bufctls off-page
memset(slab_structure->free, 0, 4096);
uint64_t slabsize = kcache->slabsize;
void *mstart = va_alloc_contigious_pages(kcache->slabsize);
for(size_t j = 0; j < kcache->slabsize; j++){
get_page((void*)((uint64_t)mstart + j*PAGE_SIZE))->slab = slab_structure;
get_page((void*)((uint64_t)mstart + j*PAGE_SIZE))->bufctls = slab_structure->free;
}
for(size_t i = 0; i < (PAGE_SIZE * slabsize)/kcache->objsize; i++){
((struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i)))->startaddr = (size_t*)((uint64_t)mstart + i * kcache->objsize);
((struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i)))->next = (struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i+1));
}
}else{
/* In this case the objects acts as bufctl structures. Small downside: there will always be a max of 252 objects per slab, no matter the size of the object, since
* they have to have enough space to store a bufctl structure (16 bytes).
*
* Their startaddr is the same as the address of the bufctl, since the objects act as the bufctls.
*/
slab_structure->free = va_alloc_contigious_pages(kcache->slabsize);
get_page(slab_structure->free)->slab = slab_structure;
get_page(slab_structure->free)->bufctls = slab_structure->free;
uint64_t size = (kcache->objsize >= sizeof(struct ma_bufctl)) ? kcache->objsize : sizeof(struct ma_bufctl);
for(size_t i = 0; i < kcache->num; i++){
((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->startaddr = (size_t*)((uint64_t)slab_structure->free + i * size);
if(i+1 < kcache->num){
((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->next = (struct ma_bufctl*)((uint64_t)slab_structure->free + size*(i+1));
}else{
((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->next = NULL;
}
}
}
if(kcache->slabs_free == NULL){
kcache->slabs_free = slab_structure;
}else{
// Change head
kcache->slabs_free->next = slab_structure;
slab_structure->prev = kcache->slabs_free;
kcache->slabs_free = slab_structure;
}
slab_structure->cache = kcache;
return KERNEL_STATUS_SUCCESS;
}
void _ma_move_slab(struct ma_slab *slab, enum SLAB_STATE newstate){
struct ma_kcache *cache = slab->cache;
struct ma_slab *sb = 0;
switch (newstate) {
case FREE:
if(cache->slabs_partial != NULL){
sb = cache->slabs_partial;
while(sb != NULL){
if(sb == slab){
goto free_common;
}
sb = sb->prev;
}
}
if(cache->slabs_used != NULL){
sb = cache->slabs_used;
while(sb != NULL){
if(sb == slab){
goto free_common;
}
sb = sb->prev;
}
}
return;
case PARTIAL:
if(cache->slabs_free != NULL){
sb = cache->slabs_free;
while(sb != NULL){
if(sb == slab){
goto partial_common;
}
sb = sb->prev;
}
}
if(cache->slabs_used != NULL){
sb = cache->slabs_used;
while(sb != NULL){
if(sb == slab){
goto partial_common;
}
sb = sb->prev;
}
}
return;
case USED:
if(cache->slabs_free != NULL){
sb = cache->slabs_free;
while(sb != NULL){
if(sb == slab){
goto used_common;
}
sb = sb->prev;
}
}
if(cache->slabs_partial != NULL){
sb = cache->slabs_partial;
while(sb != NULL){
if(sb == slab){
goto used_common;
}
sb = sb->prev;
}
}
return;
}
free_common:
// Preserve the linkage
if(sb->prev != NULL){
if(sb->next != NULL){
sb->next->prev = sb->prev;
}
sb->prev->next = sb->next;
}
if(cache->slabs_free != NULL){
cache->slabs_free->next = slab;
slab->prev = cache->slabs_free;
}
cache->slabs_free = slab;
return;
partial_common:
if(sb->prev != NULL){
if(sb->next != NULL){
sb->next->prev = sb->prev;
}
sb->prev->next = sb->next;
}
if(cache->slabs_partial != NULL){
cache->slabs_partial->next = slab;
slab->prev = cache->slabs_partial;
}
cache->slabs_partial = slab;
return;
used_common:
if(sb->prev != NULL){
if(sb->next != NULL){
sb->next->prev = sb->prev;
}
sb->prev->next = sb->next;
}
if(cache->slabs_used != NULL){
cache->slabs_used->next = slab;
slab->prev = cache->slabs_used;
}
cache->slabs_used = slab;
return;
}
struct ma_kcache *ma_cache_create(char *name, size_t size, uint32_t flags, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t)){
acquire_spinlock(&caches_lock);
struct ma_kcache *kcache = (struct ma_kcache*)va_alloc_contigious_pages(1);
memset(kcache, 0, 4096);
memcpy(kcache->name, name, 16);
kcache->slabsize = (size / PAGE_SIZE) + 1;
kcache->num = (4096 * kcache->slabsize - sizeof(struct ma_slab)) / ((size >= sizeof(struct ma_bufctl)) ? size : sizeof(struct ma_bufctl)); // Calculate the number of buffers in this slab
kcache->objsize = size;
memset(&kcache->lock, 0, sizeof(atomic_flag));
_ma_alloc_slab(kcache);
if(caches != NULL){
caches->next = kcache;
kcache->prev = caches;
}
caches = kcache;
free_spinlock(&caches_lock);
return kcache;
}
void *ma_cache_alloc(struct ma_kcache *kcache, uint32_t flags){
acquire_spinlock(&kcache->lock);
struct ma_slab *slab = NULL;
if(kcache->slabs_free == NULL){
if(kcache->slabs_partial == NULL){
_ma_alloc_slab(kcache);
slab = kcache->slabs_free;
}else{
slab = kcache->slabs_partial;
}
}else{
slab = kcache->slabs_free;
}
uint64_t *addr = _ma_slab_get_free_obj(slab);
if(addr == NULL){
slab->free = NULL;
if(kcache->slabs_partial->prev != NULL){
kcache->slabs_partial = kcache->slabs_partial->prev;
}else{
kcache->slabs_partial = NULL;
}
if(kcache->slabs_used != NULL){
kcache->slabs_used->next = slab;
slab->prev = kcache->slabs_used;
kcache->slabs_used = slab;
}else{
kcache->slabs_used = slab;
}
_ma_alloc_slab(kcache);
addr = _ma_slab_get_free_obj(kcache->slabs_free);
}
free_spinlock(&kcache->lock);
return addr;
}
void cache_info(struct ma_kcache *cache){
kprintf("name: {s}\n", cache->name);
kprintf("objsize: {d}\n", cache->objsize);
kprintf("num: {d}\n", cache->num);
kprintf("slabsize: {d}\n", cache->slabsize);
int slabsfreecnt = 0;
if(cache->slabs_free == NULL){
kprintf("slabsfree: 0\n");
}else{
if(cache->slabs_free->prev == NULL){
kprintf("slabsfree: 1\n");
}else{
struct ma_slab *slab = cache->slabs_free;
while(slab->prev != NULL){
slab = slab->prev;
slabsfreecnt++;
}
kprintf("slabsfree : {d}\n", slabsfreecnt);
}
}
int slabspartcnt = 0;
if(cache->slabs_partial == NULL){
kprintf("slabspartial: 0\n");
}else{
if(cache->slabs_partial->prev == NULL){
kprintf("slabspartial: 1\n");
}else{
struct ma_slab *slab = cache->slabs_partial;
while(slab->prev != NULL){
slab = slab->prev;
slabspartcnt++;
}
kprintf("slabspartial: {d}\n", slabspartcnt+1);
}
}
int slabsfullcnt = 0;
if(cache->slabs_used == NULL){
kprintf("slabsused: 0\n");
}else{
if(cache->slabs_used->prev == NULL){
kprintf("slabsused: 1\n");
}else{
struct ma_slab *slab = cache->slabs_used;
while(slab->prev != NULL){
slab = slab->prev;
slabsfullcnt++;
}
kprintf("slabsused : {d}\n", slabsfullcnt);
}
}
}
struct ma_bufctl *addr_to_bufctl(void *object){
struct ma_slab *slab = get_page(object)->slab;
if(slab == NULL){
return NULL;
}
struct ma_bufctl *bufs = get_page(object)->bufctls;
if(bufs == NULL){
return NULL;
}
for(size_t i = 0; i < slab->cache->num; i++){
if((bufs + i)->startaddr == object){
return (bufs + i);
}
}
return NULL;
}
kstatus ma_cache_dealloc(void *object){
struct ma_slab *slab = get_page(object)->slab;
if(slab == NULL){
klog(__func__, "slab == null");
return KERNEL_STATUS_ERROR;
}
acquire_spinlock(&slab->lock);
struct ma_bufctl *buf = addr_to_bufctl(object);
if(buf == NULL){
klog(__func__, "bufctl not found");
return KERNEL_STATUS_ERROR;
}
buf->next = slab->free;
slab->free = buf;
slab->refcount--;
if(slab->refcount == slab->cache->num - 1){
_ma_move_slab(slab, PARTIAL);
}else if(slab->refcount == 0){
_ma_move_slab(slab, FREE);
}
free_spinlock(&slab->lock);
return KERNEL_STATUS_SUCCESS;
}