Minor changes to build system

This commit is contained in:
ssimnb 2026-02-17 10:50:52 +01:00
parent f478f8d38b
commit a7fd9ac224
16 changed files with 391 additions and 224 deletions

2
.gitignore vendored
View file

@ -2,4 +2,4 @@ build/
iso_root/
limine/
test/
include/uACPI
include/uacpi

View file

@ -3,12 +3,12 @@ CC = gcc
AS = nasm
LD = ld
SRC_DIR := src build/uACPI/source build
SRC_DIR := src build/flanterm
C_SOURCES := $(shell find $(SRC_DIR) -type f -name '*.c')
C_OBJECTS := $(patsubst %.c,$(BUILD_DIR)/%.o,$(C_SOURCES))
ASM_SOURCES := $(shell find $(SRC_DIR) -type f -name '*.asm')
ASM_OBJECTS := $(patsubst %.asm,$(BUILD_DIR)/%.o,$(ASM_SOURCES))
ASM_OBJECTS := $(patsubst %.asm,$(BUILD_DIR)/%asm.o,$(ASM_SOURCES))
CFLAGS += -Wall \
-Wextra \
@ -43,7 +43,7 @@ NASMFLAGS = -f elf64 -g -F dwarf
all: amd64
dependencies:
deps:
mkdir -p $(BUILD_DIR) || true
rm -rf build/limine
git clone https://github.com/limine-bootloader/limine.git --branch=v8.x-binary --depth=1 build/limine
@ -56,13 +56,13 @@ dependencies:
mkdir include/uACPI
cp -r build/uACPI/include/ include/
cp -r build/uACPI/include/* include/
$(BUILD_DIR)/%.o: %.c
mkdir -p $(dir $@)
$(CC) -c $< -o $@ $(CFLAGS)
$(BUILD_DIR)/%.o: %.asm
$(BUILD_DIR)/%asm.o: %.asm
mkdir -p $(dir $@)
$(AS) $< -o $@ $(NASMFLAGS)

View file

@ -5,6 +5,9 @@ typedef enum {
/* Success */
KERNEL_STATUS_SUCCESS,
KERNEL_MUTEX_ACQUIRED,
KERNEL_MUTEX_LOCKED,
/* General error */
KERNEL_STATUS_ERROR,
} kstatus;

View file

@ -1,9 +1,23 @@
#include <error.h>
#include <stdatomic.h>
#include <stdbool.h>
#ifndef SPINLOCK_H
#define SPINLOCK_H
struct mutex {
atomic_flag lock;
bool locked;
struct thread *holder;
};
void acquire_spinlock(atomic_flag *lock);
void free_spinlock(atomic_flag *lock);
struct mutex *init_mutex();
kstatus acquire_mutex(struct mutex *mut);
void free_mutex(struct mutex *mut);
kstatus try_mutex(struct mutex *mut);
#endif

View file

@ -3,6 +3,7 @@
#pragma once
typedef enum proc_state {
ZOMBIE = 4,
RUNNING = 3,
READY = 2,
SLEEPING = 1,

3
include/sys/time.h Normal file
View file

@ -0,0 +1,3 @@
#include <stdint.h>
uint64_t get_timestamp_ns();
void sleep(int ms);

View file

@ -20,8 +20,3 @@ void timer_init(void){
calibration_timer = PMT;
}
}
void sleep(int ms){
/* Eventually fix this */
apic_sleep(ms);
}

View file

@ -22,5 +22,6 @@ switch_context:
pop r12
pop rbp
pop rbx
sti
ret

58
src/lib/lock.c Normal file
View file

@ -0,0 +1,58 @@
#include "arch/amd64/hal/smp.h"
#include "error.h"
#include <mm/slab.h>
#include <lock.h>
#include <stdatomic.h>
#include <kprint.h>
#include <string.h>
struct ma_cache *mutex_cache;
void acquire_spinlock(atomic_flag *lock){
while(atomic_flag_test_and_set_explicit(lock, memory_order_acquire)){
asm volatile("pause");
}
atomic_thread_fence(memory_order_acquire);
}
void free_spinlock(atomic_flag *lock){
atomic_flag_clear_explicit(lock, memory_order_release);
}
struct mutex *init_mutex(){
struct mutex *ret = ma_cache_alloc(mutex_cache, 0);
memset(ret, 0, sizeof(struct mutex));
return ret;
}
kstatus try_mutex(struct mutex *mut){
if(!atomic_flag_test_and_set_explicit(&mut->lock, memory_order_acquire)){
mut->holder = get_cpu_struct()->current_process;
mut->locked = true;
return KERNEL_MUTEX_ACQUIRED;
}
return KERNEL_MUTEX_LOCKED;
}
kstatus acquire_mutex(struct mutex *mut){
if(get_cpu_struct()->current_process == mut->holder){
klog(__func__, "Holder attempted to acquire mutex");
return KERNEL_STATUS_ERROR;
}
kstatus ret;
if((ret = try_mutex(mut)) == KERNEL_MUTEX_ACQUIRED){
return ret;
}
}

View file

@ -1,15 +0,0 @@
#include <lock.h>
#include <stdatomic.h>
#include <kprint.h>
void acquire_spinlock(atomic_flag *lock){
while(atomic_flag_test_and_set_explicit(lock, memory_order_acquire)){
asm volatile("nop");
}
atomic_thread_fence(memory_order_acquire);
}
void free_spinlock(atomic_flag *lock){
atomic_flag_clear_explicit(lock, memory_order_release);
}

View file

@ -72,6 +72,9 @@ kstatus _ma_alloc_slab(struct ma_cache *kcache){
// Put the addresses in the slab structure into the bufctls
if(kcache->objsize >= 512){
/* Here we store the bufctls seperately from the actual objects
*
*/
slab_structure->free = (struct ma_bufctl*)(va_alloc_contigious_pages(1)); // Store the bufctls off-page
memset(slab_structure->free, 0, 4096);
@ -93,7 +96,7 @@ kstatus _ma_alloc_slab(struct ma_cache *kcache){
/* In this case the objects acts as bufctl structures. Small downside: there will always be a max of 252 objects per slab, no matter the size of the object, since
* they have to have enough space to store a bufctl structure (16 bytes).
*
* Their startaddr is the same as the address of the bufctl, since the objects act as the bufctls.
* Their startaddr is the same as the address of the bufctl, since the objects act as the bufctls
*/
slab_structure->free = va_alloc_contigious_pages(kcache->slabsize);
@ -105,6 +108,7 @@ kstatus _ma_alloc_slab(struct ma_cache *kcache){
for(size_t i = 0; i < kcache->num; i++){
((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->startaddr = (size_t*)((uint64_t)slab_structure->free + i * size);
serial_kprintf("{s}: Adding 0x{x} to freelist\n", kcache->name, (size_t*)((uint64_t)slab_structure->free + i * size));
if(i+1 < kcache->num){
((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->next = (struct ma_bufctl*)((uint64_t)slab_structure->free + size*(i+1));
}else{
@ -112,6 +116,8 @@ kstatus _ma_alloc_slab(struct ma_cache *kcache){
}
}
//memcpy(get_page(slab_structure->free)->bufctls, slab_structure->free, kcache->slabsize * PAGE_SIZE);
}
//asm("int $1");
@ -130,10 +136,8 @@ kstatus _ma_alloc_slab(struct ma_cache *kcache){
return KERNEL_STATUS_SUCCESS;
}
// fix this complicated POS
// TODO: fix this complicated POS
void _ma_move_slab(struct ma_slab *slab, enum SLAB_STATE newstate){
struct ma_cache *cache = slab->cache;
struct ma_slab *sb = 0;
switch (newstate) {
@ -313,6 +317,7 @@ void *ma_cache_alloc(struct ma_cache *kcache, uint32_t flags){
uint64_t *addr = _ma_slab_get_free_obj(slab);
// If there's no free object then allocate new slab
if(addr == NULL){
slab->free = NULL;
@ -406,6 +411,10 @@ struct ma_bufctl *addr_to_bufctl(void *object){
return NULL;
}
if(slab->cache->objsize < 512){
return (struct ma_bufctl*)object;
}
struct ma_bufctl *bufs = get_page(object)->bufctls;
if(bufs == NULL){
@ -413,7 +422,12 @@ struct ma_bufctl *addr_to_bufctl(void *object){
}
for(size_t i = 0; i < slab->cache->num; i++){
if((bufs + i)->startaddr != 0){
kprintf("addr_to_bufctl: we're looking at 0x{x}\n", (bufs + i)->startaddr);
}
if((bufs + i)->startaddr == object){
//kprintf("addr_to_bufctl: we're looking at 0x{x}\n", (bufs + i)->startaddr);
return (bufs + i);
}
}
@ -456,6 +470,8 @@ kstatus ma_cache_dealloc(void *object){
}
extern struct ma_cache *thread_cache;
extern struct ma_cache *mutex_cache;
void create_base_caches(){
thread_cache = ma_cache_create("thread", sizeof(struct thread), 0, NULL, NULL);
mutex_cache = ma_cache_create("mutex", sizeof(struct mutex), 0, NULL, NULL);
}

View file

@ -304,7 +304,7 @@ int vmm_map_contigious_pages(uint64_t *page_map, uint64_t virt_addr, uint64_t ph
return 0;
}
#define VA_BASE 0x800815000
#define VA_BASE 0x900915000
uint64_t va_base = VA_BASE;
atomic_flag va_lock = ATOMIC_FLAG_INIT;

View file

@ -39,46 +39,31 @@ void thread_exit(){
cpu_state *cpu = get_cpu_struct();
struct thread *p = cpu->current_process;
kprintf("hi");
// Remove process from circular linked list
p->prev->next = p->next;
if(p == p->next){
cpu->current_process = idle;
}
// If this is the only thread in the queue then set cpu->head to NULL, so scheduler knows to idle
cpu->head = NULL;
}else{
// Remove process from circular linked list
p->next->prev = p->prev;
p->prev->next = p->next;
if(p == cpu->head){
if(p == cpu->head){
//cpu->head = p->next;
}
}
ma_cache_dealloc(p); // KILL the thread
p->state = ZOMBIE;
sched();
// Switch to scheduler
yield();
}
/* Setup a process structure */
struct thread *alloc_thread(void){
struct cpu_state *cpu = get_cpu_struct();
struct thread *head = get_cpu_struct()->head;
struct thread *base = get_cpu_struct()->base;
struct thread *t = ma_cache_alloc(thread_cache, 0);
memset(t, 0, sizeof(struct thread));
if(base == NULL){
t->next = t;
t->prev = t;
cpu->base = t;
cpu->head = t;
} else {
t->prev = head;
t->next = base;
head->next = t;
base->prev = t;
cpu->head = t;
}
t->state = READY;
t->kstack = kzalloc(8 * 4096);
@ -100,36 +85,61 @@ struct thread *alloc_thread(void){
}
struct thread *add_process(uint64_t *entry){
struct thread *proc = alloc_thread();
struct thread *add_thread(uint64_t *entry){
struct thread *t = alloc_thread();
if (proc == NULL) {
klog(__func__, "proc == null!");
kkill();
assert(t != NULL && "Thread allocation failed!");
struct cpu_state *cpu = get_cpu_struct();
struct thread *head = get_cpu_struct()->head;
struct thread *base = get_cpu_struct()->base;
// Manage circley linked list
if(base == NULL){
t->next = t;
t->prev = t;
cpu->base = t;
cpu->head = t;
} else {
t->prev = head;
t->next = base;
head->next = t;
base->prev = t;
cpu->head = t;
}
proc->context->rip = (uint64_t)entry;
t->context->rip = (uint64_t)entry;
//kprintf("entry: 0x{xn}", entry);
return proc;
return t;
}
[[noreturn]] void sched(){
cpu_state *cpu = get_cpu_struct();
for(;;){
asm("cli"); // we sti at the end of switch_context
cpu_state *cpu = get_cpu_struct();
struct thread *prev = cpu->current_process;
asm("cli");
if(prev->state == ZOMBIE){
kprintf("we are freeing allocated thread 0x{x}\n", prev);
ma_cache_dealloc(prev);
}else{
prev->state = READY;
}
struct thread *prev = cpu->current_process;
prev->state = READY;
cpu->current_process = prev->next;
cpu->current_process->state = RUNNING;
asm("sti");
switch_context(&(get_cpu_struct()->scheduler_context), get_cpu_struct()->current_process->context);
if(cpu->head == NULL){
cpu->current_process = idle;
}else{
if(prev->next){
cpu->current_process = prev->next;
}else{
cpu->current_process = cpu->head;
}
}
cpu->current_process->state = RUNNING;
switch_context(&(get_cpu_struct()->scheduler_context), get_cpu_struct()->current_process->context);
}
}
void scheduler_init(){
@ -142,11 +152,11 @@ void scheduler_init(){
return;
}
idle = add_process((uint64_t*)idle_task);
idle = alloc_thread();
idle->context->rip = (uint64_t)idle;
assert(idle != NULL && "Failed to allocate idle task!");
cpu->current_process = idle;
cpu->scheduler_stack = kzalloc(4096);
@ -154,9 +164,9 @@ void scheduler_init(){
cpu->scheduler_context->rbp = (uint64_t)cpu->scheduler_context;
cpu->scheduler_context->rip = (uint64_t)sched;
add_process((uint64_t*)test_task);
add_thread((uint64_t*)test_task);
add_process((uint64_t *)best_task);
add_thread((uint64_t *)best_task);
// Initialize scheduler -> we will now get timer interrupts to switch us into sched()
cpu->scheduler_initialized = true;

View file

@ -2,7 +2,9 @@
#include <stdint.h>
#include <arch/amd64/hal/tsc.h>
uint64_t kernel_get_timestamp(){
#include <arch/amd64/hal/apic.h>
uint64_t get_timestamp_ns(){
uint64_t ret = 0;
uint64_t tsc = tsc_get_timestamp();
@ -14,3 +16,7 @@ uint64_t kernel_get_timestamp(){
return tsc;
}
void sleep(int ms){
apic_sleep(ms);
}

View file

@ -1,141 +0,0 @@
#include <uacpi/uacpi.h>
#include <sys/pci.h>
#include <mm/vmm.h>
#include <mm/kmalloc.h>
#include <limine.h>
#include <kprint.h>
#include <neobbo.h>
#include <io.h>
uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rsdp_address){
extern struct limine_rsdp_request rsdp_request;
out_rsdp_address = (uacpi_phys_addr*)rsdp_request.response->address;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_pci_device_open(uacpi_pci_address address, uacpi_handle *out_handle){
if(address.segment != 0){
klog(LOG_ERROR, __func__, "Multiple segments not implemented!");
return UACPI_STATUS_UNIMPLEMENTED;
}
l84_pci_function_return ret = check_device(address.bus, address.device);
return ret.func_addr[address.function];
}
void uacpi_kernel_pci_device_close(uacpi_handle handle){
return;
}
uacpi_status uacpi_kernel_pci_read(uacpi_handle device, uacpi_size offset, uacpi_u8 byte_width, uacpi_u64 *value){
if(byte_width == 1){
uint8_t read = *(uint8_t*)((uint64_t)device + offset);
*value = read;
}else if(byte_width == 2){
uint16_t read = *(uint16_t*)((uint64_t)device + offset);
*value = read;
}else if(byte_width == 4){
uint32_t read = *(uint32_t*)((uint64_t)device + offset);
*value = read;
}else{
return UACPI_STATUS_INTERNAL_ERROR;
}
return UACPI_STATUS_OK;
};
uacpi_status uacpi_kernel_pci_write(uacpi_handle device, uacpi_size offset, uacpi_u8 byte_width, uacpi_u64 value){
if(byte_width == 1){
*(uint8_t*)((uint64_t)device + offset) = value;
}else if(byte_width == 2){
*(uint16_t*)((uint64_t)device + offset) = value;
}else if(byte_width == 4){
*(uint32_t*)((uint64_t)device + offset) = value;
}else{
return UACPI_STATUS_INTERNAL_ERROR;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_io_map(uacpi_io_addr base, uacpi_size len, uacpi_handle *out_handle){
return UACPI_STATUS_UNIMPLEMENTED;
}
void uacpi_kernel_io_unmap(uacpi_handle handle){
asm("nop");
}
uacpi_status uacpi_kernel_io_read(uacpi_handle handle, uacpi_size offset, uacpi_u8 byte_width, uacpi_u64 *value){
if(byte_width == 1){
*value = inb((uint16_t)offset);
}else if(byte_width == 2){
*value = inw((uint16_t)offset);
}else if(byte_width == 4){
*value = inl((uint16_t)offset);
}else{
return UACPI_STATUS_INTERNAL_ERROR;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_io_write(uacpi_handle handle, uacpi_size offset, uacpi_u8 byte_width, uacpi_u64 value){
if(byte_width == 1){
outb((uint16_t)offset, value);
}else if(byte_width == 2){
outw((uint16_t)offset, value);
}else if(byte_width == 4){
outl((uint16_t)offset, value);
}else{
return UACPI_STATUS_INTERNAL_ERROR;
}
return UACPI_STATUS_OK;
}
void *uacpi_kernel_map(uacpi_phys_addr addr, uacpi_size len){
kernel_map_pages((void*)addr, len, PTE_BIT_RW);
return (void*)addr;
}
void uacpi_kernel_unmap(void *addr, uacpi_size len){
kernel_unmap_pages(addr, len);
}
void *uacpi_kernel_alloc(uacpi_size size){
void *ret = kmalloc(size);
if(ret == NULL){
klog(LOG_ERROR, __func__, "OOM");
kkill();
}
return ret;
}
void uacpi_kernel_free(void *mem){
if(mem == NULL){
return;
}
kfree(mem);
}
void uacpi_kernel_log(uacpi_log_level level, const uacpi_char *str){
switch(level){
case UACPI_LOG_ERROR:
kprintf("uacpi: error: %s\n", str);
case UACPI_LOG_WARN:
kprintf("uacpi: warn: %s\n", str);
case UACPI_LOG_INFO:
kprintf("uacpi: info: %s\n", str);
default:
asm("nop");
}
}

216
uacpi_kernel_api.c Normal file
View file

@ -0,0 +1,216 @@
#include "arch/amd64/hal/smp.h"
#include <stdatomic.h>
#include <uacpi/kernel_api.h>
#include <uacpi/log.h>
#include <uacpi/status.h>
#include <uacpi/types.h>
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <uacpi/uacpi.h>
#include <sys/pci.h>
#include <mm/vmm.h>
#include <mm/kmalloc.h>
#include <limine.h>
#include <kprint.h>
#include <neobbo.h>
#include <io.h>
#include <sys/time.h>
extern uint64_t hhdmoffset;
uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rsdp_address){
extern struct limine_rsdp_request rsdp_request;
assert(rsdp_request.response != NULL);
out_rsdp_address = (uacpi_phys_addr*)rsdp_request.response->address;
return UACPI_STATUS_OK;
}
void uacpi_kernel_log(uacpi_log_level l, const uacpi_char* str){
char* level;
switch(l){
case UACPI_LOG_ERROR:
kprintf("{k}[uACPI] {sk}\n", ANSI_COLOR_RED, str, ANSI_COLOR_RESET);
break;
case UACPI_LOG_WARN:
kprintf("{k}[uACPI] {sk}\n", ANSI_COLOR_YELLOW, str, ANSI_COLOR_RESET);
break;
case UACPI_LOG_INFO:
kprintf("{k}[uACPI]{k} {s}\n", ANSI_COLOR_MAGENTA, ANSI_COLOR_RESET, str);
break;
default:
kprintf("[uACPI] {s}\n", str);
}
}
uacpi_status uacpi_kernel_pci_device_open(uacpi_pci_address address, uacpi_handle *out_handle){
*out_handle = kzalloc(sizeof(uacpi_pci_address));
memcpy(*out_handle, &address, sizeof(uacpi_pci_address));
return UACPI_STATUS_OK;
}
void uacpi_kernel_pci_device_close(uacpi_handle handle){
free(handle);
return;
}
uacpi_status uacpi_kernel_pci_read8(uacpi_handle device, uacpi_size offset, uacpi_u8 *value){
*value = *(uint8_t*)((uint64_t)device + offset);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_pci_read16(uacpi_handle device, uacpi_size offset, uacpi_u16 *value){
*value = *(uint16_t*)((uint64_t)device + offset);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_pci_read32(uacpi_handle device, uacpi_size offset, uacpi_u32 *value){
*value = *(uint32_t*)((uint64_t)device + offset);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_pci_write8(uacpi_handle device, uacpi_size offset, uacpi_u8 value){
*(uint8_t*)((uint64_t)device + offset + hhdmoffset) = value;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_pci_write16(uacpi_handle device, uacpi_size offset, uacpi_u16 value){
*(uint16_t*)((uint64_t)device + offset + hhdmoffset) = value;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_pci_write32(uacpi_handle device, uacpi_size offset, uacpi_u32 value){
*(uint32_t*)((uint64_t)device + offset + hhdmoffset) = value;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_io_map(uacpi_io_addr base, uacpi_size len, uacpi_handle *out_handle){
return UACPI_STATUS_UNIMPLEMENTED;
}
void uacpi_kernel_io_unmap(uacpi_handle handle){
asm("nop");
}
uacpi_status uacpi_kernel_io_read8(uacpi_handle handle, uacpi_size offset, uacpi_u8 *out_value){
*out_value = inb(((uint16_t)handle + offset));
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_io_read16(uacpi_handle handle, uacpi_size offset, uacpi_u16 *out_value){
*out_value = inw(((uint16_t)handle + offset));
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_io_read32(uacpi_handle handle, uacpi_size offset, uacpi_u32 *out_value){
*out_value = inl(((uint16_t)handle + offset));
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_io_write8(uacpi_handle handle, uacpi_size offset, uacpi_u8 in_value){
outb(((uint16_t)handle + offset), in_value);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_io_write16(uacpi_handle handle, uacpi_size offset, uacpi_u16 in_value){
outw(((uint16_t)handle + offset), in_value);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_io_write32(uacpi_handle handle, uacpi_size offset, uacpi_u32 in_value){
outb(((uint16_t)handle + offset), in_value);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_io_write(uacpi_handle handle, uacpi_size offset, uacpi_u8 byte_width, uacpi_u64 value){
if(byte_width == 1){
outb((uint16_t)offset, value);
}else if(byte_width == 2){
outw((uint16_t)offset, value);
}else if(byte_width == 4){
outl((uint16_t)offset, value);
}else{
return UACPI_STATUS_INTERNAL_ERROR;
}
return UACPI_STATUS_OK;
}
void *uacpi_kernel_map(uacpi_phys_addr addr, uacpi_size len){
uint64_t offset = addr % PAGE_SIZE;
kmap_pages((void*)addr, len, 0);
return (void*)addr + hhdmoffset + offset;
}
void uacpi_kernel_unmap(void *addr, uacpi_size len){
kunmap_pages(addr, len);
}
void *uacpi_kernel_alloc(uacpi_size size){
void *ret = kmalloc(size);
if(ret == NULL){
klog(__func__, "Unable to kmalloc!");
}
return ret;
}
void uacpi_kernel_free(void *mem){
if(mem == NULL){
return;
}
kfree(mem);
}
uacpi_u64 uacpi_kernel_get_nanoseconds_since_boot(void){
return get_timestamp_ns();
}
void uacpi_kernel_stall(uacpi_u8 usec){
sleep(usec / 1000);
}
void uacpi_kernel_sleep(uacpi_u64 msec){
sleep(msec );
}
uacpi_handle uacpi_kernel_create_mutex(){
return kmalloc(sizeof(atomic_flag));
}
void uacpi_kernel_free_mutex(uacpi_handle handle){
free(handle);
return;
}
uacpi_handle uacpi_kernel_create_event(void){
return kmalloc(sizeof(uint64_t));
}
void uacpi_kernel_free_event(uacpi_handle handle){
kfree(handle);
}
uacpi_thread_id uacpi_kernel_get_thread_id(void){
return get_cpu_struct();
}
uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle handle, uacpi_u16 t){
atomic_flag *flg = handle;
}