Initial commit

This commit is contained in:
bdbrd 2025-10-22 15:51:24 +02:00
commit cbc51f523e
125 changed files with 34817 additions and 0 deletions

View file

@ -0,0 +1,353 @@
#include <uacpi/internal/opregion.h>
#include <uacpi/internal/namespace.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/helpers.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/io.h>
#include <uacpi/kernel_api.h>
#include <uacpi/uacpi.h>
#define PCI_ROOT_PNP_ID "PNP0A03"
#define PCI_EXPRESS_ROOT_PNP_ID "PNP0A08"
static uacpi_namespace_node *find_pci_root(uacpi_namespace_node *node)
{
static const uacpi_char *pci_root_ids[] = {
PCI_ROOT_PNP_ID,
PCI_EXPRESS_ROOT_PNP_ID,
UACPI_NULL
};
uacpi_namespace_node *parent = node->parent;
while (parent != uacpi_namespace_root()) {
if (uacpi_device_matches_pnp_id(parent, pci_root_ids)) {
uacpi_trace(
"found a PCI root node %.4s controlling region %.4s\n",
parent->name.text, node->name.text
);
return parent;
}
parent = parent->parent;
}
uacpi_trace_region_error(
node, "unable to find PCI root controlling",
UACPI_STATUS_NOT_FOUND
);
return node;
}
static uacpi_status pci_region_attach(uacpi_region_attach_data *data)
{
uacpi_namespace_node *node, *pci_root, *device;
uacpi_pci_address address = { 0 };
uacpi_u64 value;
uacpi_status ret;
node = data->region_node;
pci_root = find_pci_root(node);
/*
* Find the actual device object that is supposed to be controlling
* this operation region.
*/
device = node;
while (device) {
uacpi_object_type type;
ret = uacpi_namespace_node_type(device, &type);
if (uacpi_unlikely_error(ret))
return ret;
if (type == UACPI_OBJECT_DEVICE)
break;
device = device->parent;
}
if (uacpi_unlikely(device == UACPI_NULL)) {
ret = UACPI_STATUS_NOT_FOUND;
uacpi_trace_region_error(
node, "unable to find device responsible for", ret
);
return ret;
}
ret = uacpi_eval_simple_integer(device, "_ADR", &value);
if (ret == UACPI_STATUS_OK) {
address.function = (value >> 0) & 0xFF;
address.device = (value >> 16) & 0xFF;
}
ret = uacpi_eval_simple_integer(pci_root, "_SEG", &value);
if (ret == UACPI_STATUS_OK)
address.segment = value;
ret = uacpi_eval_simple_integer(pci_root, "_BBN", &value);
if (ret == UACPI_STATUS_OK)
address.bus = value;
uacpi_trace(
"detected PCI device %.4s@%04X:%02X:%02X:%01X\n",
device->name.text, address.segment, address.bus,
address.device, address.function
);
return uacpi_kernel_pci_device_open(address, &data->out_region_context);
}
static uacpi_status pci_region_detach(uacpi_region_detach_data *data)
{
uacpi_kernel_pci_device_close(data->region_context);
return UACPI_STATUS_OK;
}
static uacpi_status pci_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
uacpi_handle dev = data->region_context;
uacpi_u8 width;
uacpi_size offset;
offset = data->offset;
width = data->byte_width;
return op == UACPI_REGION_OP_READ ?
uacpi_kernel_pci_read(dev, offset, width, &data->value) :
uacpi_kernel_pci_write(dev, offset, width, data->value);
}
static uacpi_status handle_pci_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
return pci_region_attach(op_data);
case UACPI_REGION_OP_DETACH:
return pci_region_detach(op_data);
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return pci_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
struct memory_region_ctx {
uacpi_phys_addr phys;
uacpi_u8 *virt;
uacpi_size size;
};
static uacpi_status memory_region_attach(uacpi_region_attach_data *data)
{
struct memory_region_ctx *ctx;
uacpi_object *region_obj;
uacpi_operation_region *op_region;
uacpi_status ret;
ctx = uacpi_kernel_alloc(sizeof(*ctx));
if (ctx == UACPI_NULL)
return UACPI_STATUS_OUT_OF_MEMORY;
ret = uacpi_namespace_node_acquire_object_typed(
data->region_node, UACPI_OBJECT_OPERATION_REGION_BIT, &region_obj
);
if (uacpi_unlikely_error(ret))
return ret;
op_region = region_obj->op_region;
ctx->size = op_region->length;
// FIXME: this really shouldn't try to map everything at once
ctx->phys = op_region->offset;
ctx->virt = uacpi_kernel_map(ctx->phys, ctx->size);
if (uacpi_unlikely(ctx->virt == UACPI_NULL)) {
ret = UACPI_STATUS_MAPPING_FAILED;
uacpi_trace_region_error(data->region_node, "unable to map", ret);
uacpi_free(ctx, sizeof(*ctx));
goto out;
}
data->out_region_context = ctx;
out:
uacpi_namespace_node_release_object(region_obj);
return ret;
}
static uacpi_status memory_region_detach(uacpi_region_detach_data *data)
{
struct memory_region_ctx *ctx = data->region_context;
uacpi_kernel_unmap(ctx->virt, ctx->size);
uacpi_free(ctx, sizeof(*ctx));
return UACPI_STATUS_OK;
}
struct io_region_ctx {
uacpi_io_addr base;
uacpi_handle handle;
};
static uacpi_status io_region_attach(uacpi_region_attach_data *data)
{
struct io_region_ctx *ctx;
uacpi_object *region_obj;
uacpi_operation_region *op_region;
uacpi_status ret;
ctx = uacpi_kernel_alloc(sizeof(*ctx));
if (ctx == UACPI_NULL)
return UACPI_STATUS_OUT_OF_MEMORY;
ret = uacpi_namespace_node_acquire_object_typed(
data->region_node, UACPI_OBJECT_OPERATION_REGION_BIT, &region_obj
);
if (uacpi_unlikely_error(ret))
return ret;
op_region = region_obj->op_region;
ctx->base = op_region->offset;
ret = uacpi_kernel_io_map(ctx->base, op_region->length, &ctx->handle);
if (uacpi_unlikely_error(ret)) {
uacpi_trace_region_error(
data->region_node, "unable to map an IO", ret
);
uacpi_free(ctx, sizeof(*ctx));
goto out;
}
data->out_region_context = ctx;
out:
uacpi_object_unref(region_obj);
return ret;
}
static uacpi_status io_region_detach(uacpi_region_detach_data *data)
{
struct io_region_ctx *ctx = data->region_context;
uacpi_kernel_io_unmap(ctx->handle);
uacpi_free(ctx, sizeof(*ctx));
return UACPI_STATUS_OK;
}
static uacpi_status memory_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
struct memory_region_ctx *ctx = data->region_context;
uacpi_u8 *ptr;
ptr = ctx->virt + (data->address - ctx->phys);
return op == UACPI_REGION_OP_READ ?
uacpi_system_memory_read(ptr, data->byte_width, &data->value) :
uacpi_system_memory_write(ptr, data->byte_width, data->value);
}
static uacpi_status handle_memory_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
return memory_region_attach(op_data);
case UACPI_REGION_OP_DETACH:
return memory_region_detach(op_data);
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return memory_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
static uacpi_status table_data_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
void *addr = UACPI_VIRT_ADDR_TO_PTR((uacpi_virt_addr)data->offset);
return op == UACPI_REGION_OP_READ ?
uacpi_system_memory_read(addr, data->byte_width, &data->value) :
uacpi_system_memory_write(addr, data->byte_width, data->value);
}
static uacpi_status handle_table_data_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
case UACPI_REGION_OP_DETACH:
return UACPI_STATUS_OK;
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return table_data_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
static uacpi_status io_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
struct io_region_ctx *ctx = data->region_context;
uacpi_u8 width;
uacpi_size offset;
offset = data->offset - ctx->base;
width = data->byte_width;
return op == UACPI_REGION_OP_READ ?
uacpi_kernel_io_read(ctx->handle, offset, width, &data->value) :
uacpi_kernel_io_write(ctx->handle, offset, width, data->value);
}
static uacpi_status handle_io_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
return io_region_attach(op_data);
case UACPI_REGION_OP_DETACH:
return io_region_detach(op_data);
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return io_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
void uacpi_install_default_address_space_handlers(void)
{
uacpi_namespace_node *root;
root = uacpi_namespace_root();
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_SYSTEM_MEMORY,
handle_memory_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_SYSTEM_IO,
handle_io_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_PCI_CONFIG,
handle_pci_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_TABLE_DATA,
handle_table_data_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
}

2424
src/uacpi/event.c Normal file

File diff suppressed because it is too large Load diff

21
src/uacpi/files.cmake Normal file
View file

@ -0,0 +1,21 @@
uacpi_add_sources(
tables.c
types.c
uacpi.c
utilities.c
interpreter.c
opcodes.c
namespace.c
stdlib.c
shareable.c
opregion.c
default_handlers.c
io.c
notify.c
sleep.c
registers.c
resources.c
event.c
mutex.c
osi.c
)

5822
src/uacpi/interpreter.c Normal file

File diff suppressed because it is too large Load diff

672
src/uacpi/io.c Normal file
View file

@ -0,0 +1,672 @@
#include <uacpi/internal/io.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/opregion.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/internal/namespace.h>
uacpi_size uacpi_round_up_bits_to_bytes(uacpi_size bit_length)
{
return UACPI_ALIGN_UP(bit_length, 8, uacpi_size) / 8;
}
static void cut_misaligned_tail(
uacpi_u8 *data, uacpi_size offset, uacpi_u32 bit_length
)
{
uacpi_u8 remainder = bit_length & 7;
if (remainder == 0)
return;
data[offset] &= ((1ull << remainder) - 1);
}
struct bit_span
{
union {
uacpi_u8 *data;
const uacpi_u8 *const_data;
};
uacpi_u64 index;
uacpi_u64 length;
};
static uacpi_size bit_span_offset(struct bit_span *span, uacpi_size bits)
{
uacpi_size delta = UACPI_MIN(span->length, bits);
span->index += delta;
span->length -= delta;
return delta;
}
static void bit_copy(struct bit_span *dst, struct bit_span *src)
{
uacpi_u8 src_shift, dst_shift, bits = 0;
uacpi_u16 dst_mask;
uacpi_u8 *dst_ptr, *src_ptr;
uacpi_u64 dst_count, src_count;
dst_ptr = dst->data + (dst->index / 8);
src_ptr = src->data + (src->index / 8);
dst_count = dst->length;
dst_shift = dst->index & 7;
src_count = src->length;
src_shift = src->index & 7;
while (dst_count)
{
bits = 0;
if (src_count) {
bits = *src_ptr >> src_shift;
if (src_shift && src_count > (uacpi_u32)(8 - src_shift))
bits |= *(src_ptr + 1) << (8 - src_shift);
if (src_count < 8) {
bits &= (1 << src_count) - 1;
src_count = 0;
} else {
src_count -= 8;
src_ptr++;
}
}
dst_mask = (dst_count < 8 ? (1 << dst_count) - 1 : 0xFF) << dst_shift;
*dst_ptr = (*dst_ptr & ~dst_mask) | ((bits << dst_shift) & dst_mask);
if (dst_shift && dst_count > (uacpi_u32)(8 - dst_shift)) {
dst_mask >>= 8;
*(dst_ptr + 1) &= ~dst_mask;
*(dst_ptr + 1) |= (bits >> (8 - dst_shift)) & dst_mask;
}
dst_count = dst_count > 8 ? dst_count - 8 : 0;
++dst_ptr;
}
}
static void do_misaligned_buffer_read(
const uacpi_buffer_field *field, uacpi_u8 *dst
)
{
struct bit_span src_span = {
.index = field->bit_index,
.length = field->bit_length,
.const_data = field->backing->data,
};
struct bit_span dst_span = {
.data = dst,
};
dst_span.length = uacpi_round_up_bits_to_bytes(field->bit_length) * 8;
bit_copy(&dst_span, &src_span);
}
void uacpi_read_buffer_field(
const uacpi_buffer_field *field, void *dst
)
{
if (!(field->bit_index & 7)) {
uacpi_u8 *src = field->backing->data;
uacpi_size count;
count = uacpi_round_up_bits_to_bytes(field->bit_length);
uacpi_memcpy(dst, src + (field->bit_index / 8), count);
cut_misaligned_tail(dst, count - 1, field->bit_length);
return;
}
do_misaligned_buffer_read(field, dst);
}
static void do_write_misaligned_buffer_field(
uacpi_buffer_field *field,
const void *src, uacpi_size size
)
{
struct bit_span src_span = {
.length = size * 8,
.const_data = src,
};
struct bit_span dst_span = {
.index = field->bit_index,
.length = field->bit_length,
.data = field->backing->data,
};
bit_copy(&dst_span, &src_span);
}
void uacpi_write_buffer_field(
uacpi_buffer_field *field,
const void *src, uacpi_size size
)
{
if (!(field->bit_index & 7)) {
uacpi_u8 *dst, last_byte, tail_shift;
uacpi_size count;
dst = field->backing->data;
dst += field->bit_index / 8;
count = uacpi_round_up_bits_to_bytes(field->bit_length);
last_byte = dst[count - 1];
tail_shift = field->bit_length & 7;
uacpi_memcpy_zerout(dst, src, count, size);
if (tail_shift) {
uacpi_u8 last_shift = 8 - tail_shift;
dst[count - 1] = dst[count - 1] << last_shift;
dst[count - 1] >>= last_shift;
dst[count - 1] |= (last_byte >> tail_shift) << tail_shift;
}
return;
}
do_write_misaligned_buffer_field(field, src, size);
}
static uacpi_status access_field_unit(
uacpi_field_unit *field, uacpi_u32 offset, uacpi_region_op op,
uacpi_u64 *in_out
)
{
uacpi_status ret = UACPI_STATUS_OK;
uacpi_namespace_node *region_node;
if (field->lock_rule) {
ret = uacpi_acquire_aml_mutex(
g_uacpi_rt_ctx.global_lock_mutex, 0xFFFF
);
if (uacpi_unlikely_error(ret))
return ret;
}
switch (field->kind) {
case UACPI_FIELD_UNIT_KIND_BANK:
ret = uacpi_write_field_unit(
field->bank_selection, &field->bank_value, sizeof(field->bank_value)
);
region_node = field->bank_region;
break;
case UACPI_FIELD_UNIT_KIND_NORMAL:
region_node = field->region;
break;
case UACPI_FIELD_UNIT_KIND_INDEX:
ret = uacpi_write_field_unit(
field->index, &offset, sizeof(offset)
);
if (uacpi_unlikely_error(ret))
goto out;
switch (op) {
case UACPI_REGION_OP_READ:
ret = uacpi_read_field_unit(
field->data, in_out, field->access_width_bytes
);
break;
case UACPI_REGION_OP_WRITE:
ret = uacpi_write_field_unit(
field->data, in_out, field->access_width_bytes
);
break;
default:
ret = UACPI_STATUS_INVALID_ARGUMENT;
break;
}
goto out;
default:
uacpi_error("invalid field unit kind %d\n", field->kind);
ret = UACPI_STATUS_INVALID_ARGUMENT;
}
if (uacpi_unlikely_error(ret))
goto out;
ret = uacpi_dispatch_opregion_io(
region_node, offset, field->access_width_bytes, op, in_out
);
out:
if (field->lock_rule)
uacpi_release_aml_mutex(g_uacpi_rt_ctx.global_lock_mutex);
return ret;
}
static uacpi_status do_read_misaligned_field_unit(
uacpi_field_unit *field, uacpi_u8 *dst, uacpi_size size
)
{
uacpi_status ret;
uacpi_size reads_to_do;
uacpi_u64 out;
uacpi_u32 byte_offset = field->byte_offset;
uacpi_u32 bits_left = field->bit_length;
uacpi_u8 width_access_bits = field->access_width_bytes * 8;
struct bit_span src_span = {
.data = (uacpi_u8*)&out,
.index = field->bit_offset_within_first_byte,
};
struct bit_span dst_span = {
.data = dst,
.index = 0,
.length = size * 8
};
reads_to_do = UACPI_ALIGN_UP(
field->bit_offset_within_first_byte + field->bit_length,
width_access_bits,
uacpi_u32
);
reads_to_do /= width_access_bits;
while (reads_to_do-- > 0) {
src_span.length = UACPI_MIN(
bits_left, width_access_bits - src_span.index
);
ret = access_field_unit(
field, byte_offset, UACPI_REGION_OP_READ,
&out
);
if (uacpi_unlikely_error(ret))
return ret;
bit_copy(&dst_span, &src_span);
bits_left -= src_span.length;
src_span.index = 0;
bit_span_offset(&dst_span, src_span.length);
byte_offset += field->access_width_bytes;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_read_field_unit(
uacpi_field_unit *field, void *dst, uacpi_size size
)
{
uacpi_status ret;
uacpi_u32 field_byte_length;
field_byte_length = uacpi_round_up_bits_to_bytes(field->bit_length);
/*
* Very simple fast case:
* - Bit offset within first byte is 0
* AND
* - Field size is <= access width
*/
if (field->bit_offset_within_first_byte == 0 &&
field_byte_length <= field->access_width_bytes)
{
uacpi_u64 out;
ret = access_field_unit(
field, field->byte_offset, UACPI_REGION_OP_READ, &out
);
if (uacpi_unlikely_error(ret))
return ret;
uacpi_memcpy_zerout(dst, &out, size, field_byte_length);
if (size >= field_byte_length)
cut_misaligned_tail(dst, field_byte_length - 1, field->bit_length);
return UACPI_STATUS_OK;
}
// Slow case
return do_read_misaligned_field_unit(field, dst, size);
}
uacpi_status uacpi_write_field_unit(
uacpi_field_unit *field, const void *src, uacpi_size size
)
{
uacpi_status ret;
uacpi_u32 bits_left, byte_offset = field->byte_offset;
uacpi_u8 width_access_bits = field->access_width_bytes * 8;
uacpi_u64 in;
struct bit_span src_span = {
.const_data = src,
.index = 0,
.length = size * 8
};
struct bit_span dst_span = {
.data = (uacpi_u8*)&in,
.index = field->bit_offset_within_first_byte,
};
bits_left = field->bit_length;
while (bits_left) {
in = 0;
dst_span.length = UACPI_MIN(
width_access_bits - dst_span.index, bits_left
);
if (dst_span.index != 0 || dst_span.length < width_access_bits) {
switch (field->update_rule) {
case UACPI_UPDATE_RULE_PRESERVE:
ret = access_field_unit(
field, byte_offset, UACPI_REGION_OP_READ, &in
);
if (uacpi_unlikely_error(ret))
return ret;
break;
case UACPI_UPDATE_RULE_WRITE_AS_ONES:
in = ~in;
break;
case UACPI_UPDATE_RULE_WRITE_AS_ZEROES:
break;
default:
uacpi_error("invalid field@%p update rule %d\n",
field, field->update_rule);
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
bit_copy(&dst_span, &src_span);
bit_span_offset(&src_span, dst_span.length);
ret = access_field_unit(
field, byte_offset, UACPI_REGION_OP_WRITE, &in
);
if (uacpi_unlikely_error(ret))
return ret;
bits_left -= dst_span.length;
dst_span.index = 0;
byte_offset += field->access_width_bytes;
}
return UACPI_STATUS_OK;
}
static uacpi_u8 gas_get_access_bit_width(const struct acpi_gas *gas)
{
/*
* Same algorithm as ACPICA.
*
* The reason we do this is apparently GAS bit offset being non-zero means
* that it's an APEI register, as opposed to FADT, which needs special
* handling. In the case of a FADT register we want to ignore the specified
* access size.
*/
uacpi_u8 access_bit_width;
if (gas->register_bit_offset == 0 &&
UACPI_IS_POWER_OF_TWO(gas->register_bit_width, uacpi_u8) &&
UACPI_IS_ALIGNED(gas->register_bit_width, 8, uacpi_u8)) {
access_bit_width = gas->register_bit_width;
} else if (gas->access_size) {
access_bit_width = gas->access_size * 8;
} else {
uacpi_u8 msb;
msb = uacpi_bit_scan_backward(
(gas->register_bit_offset + gas->register_bit_width) - 1
);
access_bit_width = 1 << msb;
if (access_bit_width <= 8) {
access_bit_width = 8;
} else {
/*
* Keep backing off to previous power of two until we find one
* that is aligned to the address specified in GAS.
*/
while (!UACPI_IS_ALIGNED(
gas->address, access_bit_width / 8, uacpi_u64
))
access_bit_width /= 2;
}
}
return UACPI_MIN(
access_bit_width,
gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_IO ? 32 : 64
);
}
static uacpi_status gas_validate(
const struct acpi_gas *gas, uacpi_u8 *access_bit_width
)
{
uacpi_size total_width;
if (uacpi_unlikely(gas == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
if (!gas->address)
return UACPI_STATUS_NOT_FOUND;
if (gas->address_space_id != UACPI_ADDRESS_SPACE_SYSTEM_IO &&
gas->address_space_id != UACPI_ADDRESS_SPACE_SYSTEM_MEMORY) {
uacpi_warn("unsupported GAS address space '%s' (%d)\n",
uacpi_address_space_to_string(gas->address_space_id),
gas->address_space_id);
return UACPI_STATUS_UNIMPLEMENTED;
}
if (gas->access_size > 4) {
uacpi_warn("unsupported GAS access size %d\n",
gas->access_size);
return UACPI_STATUS_UNIMPLEMENTED;
}
*access_bit_width = gas_get_access_bit_width(gas);
total_width = UACPI_ALIGN_UP(
gas->register_bit_offset + gas->register_bit_width,
*access_bit_width, uacpi_size
);
if (total_width > 64) {
uacpi_warn(
"GAS register total width is too large: %zu\n", total_width
);
return UACPI_STATUS_UNIMPLEMENTED;
}
return UACPI_STATUS_OK;
}
/*
* Apparently both reading and writing GAS works differently from operation
* region in that bit offsets are not respected when writing the data.
*
* Let's follow ACPICA's approach here so that we don't accidentally
* break any quirky hardware.
*/
uacpi_status uacpi_gas_read(const struct acpi_gas *gas, uacpi_u64 *out_value)
{
uacpi_status ret;
uacpi_u8 access_bit_width, access_byte_width;
uacpi_u8 bit_offset, bits_left, index = 0;
uacpi_u64 data, mask = 0xFFFFFFFFFFFFFFFF;
ret = gas_validate(gas, &access_bit_width);
if (ret != UACPI_STATUS_OK)
return ret;
bit_offset = gas->register_bit_offset;
bits_left = bit_offset + gas->register_bit_width;
access_byte_width = access_bit_width / 8;
if (access_byte_width < 8)
mask = ~(mask << access_bit_width);
*out_value = 0;
while (bits_left) {
if (bit_offset >= access_bit_width) {
data = 0;
bit_offset -= access_bit_width;
} else {
uacpi_u64 address = gas->address + (index * access_byte_width);
if (gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_IO) {
ret = uacpi_system_io_read(address, access_byte_width, &data);
} else {
void *virt;
virt = uacpi_kernel_map(address, access_byte_width);
if (uacpi_unlikely(virt == UACPI_NULL))
return UACPI_STATUS_MAPPING_FAILED;
ret = uacpi_system_memory_read(virt, access_byte_width, &data);
uacpi_kernel_unmap(virt, access_bit_width);
}
if (uacpi_unlikely_error(ret))
return ret;
}
*out_value |= (data & mask) << (index * access_bit_width);
bits_left -= UACPI_MIN(bits_left, access_bit_width);
++index;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_gas_write(const struct acpi_gas *gas, uacpi_u64 in_value)
{
uacpi_status ret;
uacpi_u8 access_bit_width, access_byte_width;
uacpi_u8 bit_offset, bits_left, index = 0;
uacpi_u64 data, mask = 0xFFFFFFFFFFFFFFFF;
ret = gas_validate(gas, &access_bit_width);
if (ret != UACPI_STATUS_OK)
return ret;
bit_offset = gas->register_bit_offset;
bits_left = bit_offset + gas->register_bit_width;
access_byte_width = access_bit_width / 8;
if (access_byte_width < 8)
mask = ~(mask << access_bit_width);
while (bits_left) {
data = (in_value >> (index * access_bit_width)) & mask;
if (bit_offset >= access_bit_width) {
bit_offset -= access_bit_width;
} else {
uacpi_u64 address = gas->address + (index * access_byte_width);
if (gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_IO) {
ret = uacpi_system_io_write(address, access_byte_width, data);
} else {
void *virt;
virt = uacpi_kernel_map(address, access_byte_width);
if (uacpi_unlikely(virt == UACPI_NULL))
return UACPI_STATUS_MAPPING_FAILED;
ret = uacpi_system_memory_write(virt, access_byte_width, data);
uacpi_kernel_unmap(virt, access_bit_width);
}
if (uacpi_unlikely_error(ret))
return ret;
}
bits_left -= UACPI_MIN(bits_left, access_bit_width);
++index;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_system_io_read(
uacpi_io_addr address, uacpi_u8 width, uacpi_u64 *out
)
{
uacpi_status ret;
uacpi_handle handle;
ret = uacpi_kernel_io_map(address, width, &handle);
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_kernel_io_read(handle, 0, width, out);
uacpi_kernel_io_unmap(handle);
return ret;
}
uacpi_status uacpi_system_io_write(
uacpi_io_addr address, uacpi_u8 width, uacpi_u64 in
)
{
uacpi_status ret;
uacpi_handle handle;
ret = uacpi_kernel_io_map(address, width, &handle);
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_kernel_io_write(handle, 0, width, in);
uacpi_kernel_io_unmap(handle);
return ret;
}
uacpi_status uacpi_system_memory_read(void *ptr, uacpi_u8 width, uacpi_u64 *out)
{
switch (width) {
case 1:
*out = *(volatile uacpi_u8*)ptr;
break;
case 2:
*out = *(volatile uacpi_u16*)ptr;
break;
case 4:
*out = *(volatile uacpi_u32*)ptr;
break;
case 8:
*out = *(volatile uacpi_u64*)ptr;
break;
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_system_memory_write(void *ptr, uacpi_u8 width, uacpi_u64 in)
{
switch (width) {
case 1:
*(volatile uacpi_u8*)ptr = in;
break;
case 2:
*(volatile uacpi_u16*)ptr = in;
break;
case 4:
*(volatile uacpi_u32*)ptr = in;
break;
case 8:
*(volatile uacpi_u64*)ptr = in;
break;
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
return UACPI_STATUS_OK;
}

392
src/uacpi/mutex.c Normal file
View file

@ -0,0 +1,392 @@
#include <uacpi/platform/atomic.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/registers.h>
#include <uacpi/internal/context.h>
#include <uacpi/kernel_api.h>
#include <uacpi/internal/namespace.h>
#ifndef UACPI_REDUCED_HARDWARE
#define GLOBAL_LOCK_PENDING (1 << 0)
#define GLOBAL_LOCK_OWNED_BIT 1
#define GLOBAL_LOCK_OWNED (1 << GLOBAL_LOCK_OWNED_BIT)
#define GLOBAL_LOCK_MASK 0b11u
static uacpi_bool try_acquire_global_lock_from_firmware(uacpi_u32 *lock)
{
uacpi_u32 value, new_value;
uacpi_bool was_owned;
value = *(volatile uacpi_u32*)lock;
do {
was_owned = (value & GLOBAL_LOCK_OWNED) >> GLOBAL_LOCK_OWNED_BIT;
// Clear both owned & pending bits.
new_value = value & ~GLOBAL_LOCK_MASK;
// Set owned unconditionally
new_value |= GLOBAL_LOCK_OWNED;
// Set pending iff the lock was owned at the time of reading
if (was_owned)
new_value |= GLOBAL_LOCK_PENDING;
} while (!uacpi_atomic_cmpxchg32(lock, &value, new_value));
return !was_owned;
}
static uacpi_bool do_release_global_lock_to_firmware(uacpi_u32 *lock)
{
uacpi_u32 value, new_value;
value = *(volatile uacpi_u32*)lock;
do {
new_value = value & ~GLOBAL_LOCK_MASK;
} while (!uacpi_atomic_cmpxchg32(lock, &value, new_value));
return value & GLOBAL_LOCK_PENDING;
}
static uacpi_status uacpi_acquire_global_lock_from_firmware(void)
{
uacpi_cpu_flags flags;
uacpi_u16 spins = 0;
uacpi_bool success;
if (!g_uacpi_rt_ctx.has_global_lock)
return UACPI_STATUS_OK;
flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
for (;;) {
spins++;
uacpi_trace(
"trying to acquire the global lock from firmware... (attempt %u)\n",
spins
);
success = try_acquire_global_lock_from_firmware(
&g_uacpi_rt_ctx.facs->global_lock
);
if (success)
break;
if (uacpi_unlikely(spins == 0xFFFF))
break;
g_uacpi_rt_ctx.global_lock_pending = UACPI_TRUE;
uacpi_trace(
"global lock is owned by firmware, waiting for a release "
"notification...\n"
);
uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
uacpi_kernel_wait_for_event(g_uacpi_rt_ctx.global_lock_event, 0xFFFF);
flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
}
g_uacpi_rt_ctx.global_lock_pending = UACPI_FALSE;
uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
if (uacpi_unlikely(!success)) {
uacpi_error("unable to acquire global lock after %u attempts\n", spins);
return UACPI_STATUS_HARDWARE_TIMEOUT;
}
uacpi_trace("global lock successfully acquired after %u attempt%s\n",
spins, spins > 1 ? "s" : "");
return UACPI_STATUS_OK;
}
static void uacpi_release_global_lock_to_firmware(void)
{
if (!g_uacpi_rt_ctx.has_global_lock)
return;
uacpi_trace("releasing the global lock to firmware...\n");
if (do_release_global_lock_to_firmware(&g_uacpi_rt_ctx.facs->global_lock)) {
uacpi_trace("notifying firmware of the global lock release since the "
"pending bit was set\n");
uacpi_write_register_field(UACPI_REGISTER_FIELD_GBL_RLS, 1);
}
}
#endif
UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_acquire_global_lock_from_firmware(void)
)
UACPI_STUB_IF_REDUCED_HARDWARE(
void uacpi_release_global_lock_to_firmware(void)
)
uacpi_status uacpi_acquire_native_mutex_with_timeout(
uacpi_handle mtx, uacpi_u16 timeout
)
{
uacpi_status ret;
if (uacpi_unlikely(mtx == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_kernel_acquire_mutex(mtx, timeout);
if (uacpi_likely_success(ret))
return ret;
if (uacpi_unlikely(ret != UACPI_STATUS_TIMEOUT || timeout == 0xFFFF)) {
uacpi_error(
"unexpected status %08X (%s) while acquiring %p (timeout=%04X)\n",
ret, uacpi_status_to_string(ret), mtx, timeout
);
}
return ret;
}
uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq)
{
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely(out_seq == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_acquire_native_mutex_with_timeout(
g_uacpi_rt_ctx.global_lock_mutex->handle, timeout
);
if (ret != UACPI_STATUS_OK)
return ret;
ret = uacpi_acquire_global_lock_from_firmware();
if (uacpi_unlikely_error(ret)) {
uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle);
return ret;
}
if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_seq_num == 0xFFFFFFFF))
g_uacpi_rt_ctx.global_lock_seq_num = 0;
*out_seq = g_uacpi_rt_ctx.global_lock_seq_num++;
g_uacpi_rt_ctx.global_lock_acquired = UACPI_TRUE;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_release_global_lock(uacpi_u32 seq)
{
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely(!g_uacpi_rt_ctx.global_lock_acquired ||
seq != g_uacpi_rt_ctx.global_lock_seq_num))
return UACPI_STATUS_INVALID_ARGUMENT;
g_uacpi_rt_ctx.global_lock_acquired = UACPI_FALSE;
uacpi_release_global_lock_to_firmware();
uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle);
return UACPI_STATUS_OK;
}
uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex *mutex)
{
uacpi_thread_id id;
id = UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner);
return id == uacpi_kernel_get_thread_id();
}
uacpi_status uacpi_acquire_aml_mutex(uacpi_mutex *mutex, uacpi_u16 timeout)
{
uacpi_thread_id this_id;
uacpi_status ret = UACPI_STATUS_OK;
this_id = uacpi_kernel_get_thread_id();
if (UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner) == this_id) {
if (uacpi_unlikely(mutex->depth == 0xFFFF)) {
uacpi_warn(
"failing an attempt to acquire mutex @%p, too many recursive "
"acquires\n", mutex
);
return UACPI_STATUS_DENIED;
}
mutex->depth++;
return ret;
}
uacpi_namespace_write_unlock();
ret = uacpi_acquire_native_mutex_with_timeout(mutex->handle, timeout);
if (ret != UACPI_STATUS_OK)
goto out;
if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle) {
ret = uacpi_acquire_global_lock_from_firmware();
if (uacpi_unlikely_error(ret)) {
uacpi_release_native_mutex(mutex->handle);
goto out;
}
}
UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, this_id);
mutex->depth = 1;
out:
uacpi_namespace_write_lock();
return ret;
}
uacpi_status uacpi_release_aml_mutex(uacpi_mutex *mutex)
{
if (mutex->depth-- > 1)
return UACPI_STATUS_OK;
if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle)
uacpi_release_global_lock_to_firmware();
UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, UACPI_THREAD_ID_NONE);
uacpi_release_native_mutex(mutex->handle);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock)
{
lock->mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(lock->mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
lock->owner = UACPI_THREAD_ID_NONE;
lock->depth = 0;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock)
{
if (uacpi_unlikely(lock->depth)) {
uacpi_warn(
"de-initializing active recursive lock %p with depth=%zu\n",
lock, lock->depth
);
lock->depth = 0;
}
lock->owner = UACPI_THREAD_ID_NONE;
if (lock->mutex != UACPI_NULL) {
uacpi_kernel_free_mutex(lock->mutex);
lock->mutex = UACPI_NULL;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock)
{
uacpi_thread_id this_id;
uacpi_status ret = UACPI_STATUS_OK;
this_id = uacpi_kernel_get_thread_id();
if (UACPI_ATOMIC_LOAD_THREAD_ID(&lock->owner) == this_id) {
lock->depth++;
return ret;
}
ret = uacpi_acquire_native_mutex(lock->mutex);
if (uacpi_unlikely_error(ret))
return ret;
UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, this_id);
lock->depth = 1;
return ret;
}
uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock)
{
if (lock->depth-- > 1)
return UACPI_STATUS_OK;
UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, UACPI_THREAD_ID_NONE);
return uacpi_release_native_mutex(lock->mutex);
}
uacpi_status uacpi_rw_lock_init(struct uacpi_rw_lock *lock)
{
lock->read_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(lock->read_mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
lock->write_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(lock->write_mutex == UACPI_NULL)) {
uacpi_kernel_free_mutex(lock->read_mutex);
lock->read_mutex = UACPI_NULL;
return UACPI_STATUS_OUT_OF_MEMORY;
}
lock->num_readers = 0;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_rw_lock_deinit(struct uacpi_rw_lock *lock)
{
if (uacpi_unlikely(lock->num_readers)) {
uacpi_warn("de-initializing rw_lock %p with %zu active readers\n",
lock, lock->num_readers);
lock->num_readers = 0;
}
if (lock->read_mutex != UACPI_NULL) {
uacpi_kernel_free_mutex(lock->read_mutex);
lock->read_mutex = UACPI_NULL;
}
if (lock->write_mutex != UACPI_NULL) {
uacpi_kernel_free_mutex(lock->write_mutex);
lock->write_mutex = UACPI_NULL;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_rw_lock_read(struct uacpi_rw_lock *lock)
{
uacpi_status ret;
ret = uacpi_acquire_native_mutex(lock->read_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (lock->num_readers++ == 0) {
ret = uacpi_acquire_native_mutex(lock->write_mutex);
if (uacpi_unlikely_error(ret))
lock->num_readers = 0;
}
uacpi_kernel_release_mutex(lock->read_mutex);
return ret;
}
uacpi_status uacpi_rw_unlock_read(struct uacpi_rw_lock *lock)
{
uacpi_status ret;
ret = uacpi_acquire_native_mutex(lock->read_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (lock->num_readers-- == 1)
uacpi_release_native_mutex(lock->write_mutex);
uacpi_kernel_release_mutex(lock->read_mutex);
return ret;
}
uacpi_status uacpi_rw_lock_write(struct uacpi_rw_lock *lock)
{
return uacpi_acquire_native_mutex(lock->write_mutex);
}
uacpi_status uacpi_rw_unlock_write(struct uacpi_rw_lock *lock)
{
return uacpi_release_native_mutex(lock->write_mutex);
}

1020
src/uacpi/namespace.c Normal file

File diff suppressed because it is too large Load diff

251
src/uacpi/notify.c Normal file
View file

@ -0,0 +1,251 @@
#include <uacpi/internal/notify.h>
#include <uacpi/internal/shareable.h>
#include <uacpi/internal/namespace.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/kernel_api.h>
static uacpi_handle notify_mutex;
uacpi_status uacpi_initialize_notify(void)
{
notify_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(notify_mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
return UACPI_STATUS_OK;
}
void uacpi_deinitialize_notify(void)
{
if (notify_mutex != UACPI_NULL)
uacpi_kernel_free_mutex(notify_mutex);
notify_mutex = UACPI_NULL;
}
struct notification_ctx {
uacpi_namespace_node *node;
uacpi_u64 value;
uacpi_object *node_object;
};
static void free_notification_ctx(struct notification_ctx *ctx)
{
uacpi_namespace_node_release_object(ctx->node_object);
uacpi_namespace_node_unref(ctx->node);
uacpi_free(ctx, sizeof(*ctx));
}
static void do_notify(uacpi_handle opaque)
{
struct notification_ctx *ctx = opaque;
uacpi_device_notify_handler *handler;
uacpi_bool did_notify_root = UACPI_FALSE;
handler = ctx->node_object->handlers->notify_head;
for (;;) {
if (handler == UACPI_NULL) {
if (did_notify_root) {
free_notification_ctx(ctx);
return;
}
handler = g_uacpi_rt_ctx.root_object->handlers->notify_head;
did_notify_root = UACPI_TRUE;
continue;
}
handler->callback(handler->user_context, ctx->node, ctx->value);
handler = handler->next;
}
}
uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value)
{
uacpi_status ret;
struct notification_ctx *ctx;
uacpi_object *node_object;
node_object = uacpi_namespace_node_get_object_typed(
node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
UACPI_OBJECT_PROCESSOR_BIT
);
if (uacpi_unlikely(node_object == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_acquire_native_mutex(notify_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (node_object->handlers->notify_head == UACPI_NULL &&
g_uacpi_rt_ctx.root_object->handlers->notify_head == UACPI_NULL) {
ret = UACPI_STATUS_NO_HANDLER;
goto out;
}
ctx = uacpi_kernel_alloc(sizeof(*ctx));
if (uacpi_unlikely(ctx == UACPI_NULL)) {
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out;
}
ctx->node = node;
// In case this node goes out of scope
uacpi_shareable_ref(node);
ctx->value = value;
ctx->node_object = uacpi_namespace_node_get_object(node);
uacpi_object_ref(ctx->node_object);
ret = uacpi_kernel_schedule_work(UACPI_WORK_NOTIFICATION, do_notify, ctx);
if (uacpi_unlikely_error(ret)) {
uacpi_warn("unable to schedule notification work: %s\n",
uacpi_status_to_string(ret));
free_notification_ctx(ctx);
}
out:
uacpi_release_native_mutex(notify_mutex);
return ret;
}
static uacpi_device_notify_handler *handler_container(
uacpi_handlers *handlers, uacpi_notify_handler target_handler
)
{
uacpi_device_notify_handler *handler = handlers->notify_head;
while (handler) {
if (handler->callback == target_handler)
return handler;
handler = handler->next;
}
return UACPI_NULL;
}
uacpi_status uacpi_install_notify_handler(
uacpi_namespace_node *node, uacpi_notify_handler handler,
uacpi_handle handler_context
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_handlers *handlers;
uacpi_device_notify_handler *new_handler;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (node == uacpi_namespace_root()) {
obj = g_uacpi_rt_ctx.root_object;
} else {
ret = uacpi_namespace_node_acquire_object_typed(
node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
UACPI_OBJECT_PROCESSOR_BIT, &obj
);
if (uacpi_unlikely_error(ret))
return ret;
}
ret = uacpi_acquire_native_mutex(notify_mutex);
if (uacpi_unlikely_error(ret))
goto out_no_mutex;
uacpi_kernel_wait_for_work_completion();
handlers = obj->handlers;
if (handler_container(handlers, handler) != UACPI_NULL) {
ret = UACPI_STATUS_ALREADY_EXISTS;
goto out;
}
new_handler = uacpi_kernel_alloc_zeroed(sizeof(*new_handler));
if (uacpi_unlikely(new_handler == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
new_handler->callback = handler;
new_handler->user_context = handler_context;
new_handler->next = handlers->notify_head;
handlers->notify_head = new_handler;
out:
uacpi_release_native_mutex(notify_mutex);
out_no_mutex:
if (node != uacpi_namespace_root())
uacpi_object_unref(obj);
return ret;
}
uacpi_status uacpi_uninstall_notify_handler(
uacpi_namespace_node *node, uacpi_notify_handler handler
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_handlers *handlers;
uacpi_device_notify_handler *containing, *prev_handler;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (node == uacpi_namespace_root()) {
obj = g_uacpi_rt_ctx.root_object;
} else {
ret = uacpi_namespace_node_acquire_object_typed(
node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
UACPI_OBJECT_PROCESSOR_BIT, &obj
);
if (uacpi_unlikely_error(ret))
return ret;
}
ret = uacpi_acquire_native_mutex(notify_mutex);
if (uacpi_unlikely_error(ret))
goto out_no_mutex;
uacpi_kernel_wait_for_work_completion();
handlers = obj->handlers;
containing = handler_container(handlers, handler);
if (containing == UACPI_NULL) {
ret = UACPI_STATUS_NOT_FOUND;
goto out;
}
prev_handler = handlers->notify_head;
// Are we the last linked handler?
if (prev_handler == containing) {
handlers->notify_head = containing->next;
goto out;
}
// Nope, we're somewhere in the middle. Do a search.
while (prev_handler) {
if (prev_handler->next == containing) {
prev_handler->next = containing->next;
goto out;
}
prev_handler = prev_handler->next;
}
out:
uacpi_release_native_mutex(notify_mutex);
out_no_mutex:
if (node != uacpi_namespace_root())
uacpi_object_unref(obj);
if (uacpi_likely_success(ret))
uacpi_free(containing, sizeof(*containing));
return ret;
}

260
src/uacpi/opcodes.c Normal file
View file

@ -0,0 +1,260 @@
#include <uacpi/internal/opcodes.h>
#define UACPI_OP(opname, opcode, ...) \
{ #opname, .decode_ops = __VA_ARGS__, .code = opcode },
#define UACPI_OUT_OF_LINE_OP(opname, opcode, out_of_line_buf, props) \
{ \
.name = #opname, \
.indirect_decode_ops = out_of_line_buf, \
.properties = props, \
.code = opcode, \
},
static const struct uacpi_op_spec opcode_table[0x100] = {
UACPI_ENUMERATE_OPCODES
};
static const struct uacpi_op_spec ext_opcode_table[] = {
UACPI_ENUMERATE_EXT_OPCODES
};
#define _(op) (op & 0x00FF)
static const uacpi_u8 ext_op_to_idx[0x100] = {
[_(UACPI_AML_OP_MutexOp)] = 1, [_(UACPI_AML_OP_EventOp)] = 2,
[_(UACPI_AML_OP_CondRefOfOp)] = 3, [_(UACPI_AML_OP_CreateFieldOp)] = 4,
[_(UACPI_AML_OP_LoadTableOp)] = 5, [_(UACPI_AML_OP_LoadOp)] = 6,
[_(UACPI_AML_OP_StallOp)] = 7, [_(UACPI_AML_OP_SleepOp)] = 8,
[_(UACPI_AML_OP_AcquireOp)] = 9, [_(UACPI_AML_OP_SignalOp)] = 10,
[_(UACPI_AML_OP_WaitOp)] = 11, [_(UACPI_AML_OP_ResetOp)] = 12,
[_(UACPI_AML_OP_ReleaseOp)] = 13, [_(UACPI_AML_OP_FromBCDOp)] = 14,
[_(UACPI_AML_OP_ToBCDOp)] = 15, [_(UACPI_AML_OP_RevisionOp)] = 16,
[_(UACPI_AML_OP_DebugOp)] = 17, [_(UACPI_AML_OP_FatalOp)] = 18,
[_(UACPI_AML_OP_TimerOp)] = 19, [_(UACPI_AML_OP_OpRegionOp)] = 20,
[_(UACPI_AML_OP_FieldOp)] = 21, [_(UACPI_AML_OP_DeviceOp)] = 22,
[_(UACPI_AML_OP_ProcessorOp)] = 23, [_(UACPI_AML_OP_PowerResOp)] = 24,
[_(UACPI_AML_OP_ThermalZoneOp)] = 25, [_(UACPI_AML_OP_IndexFieldOp)] = 26,
[_(UACPI_AML_OP_BankFieldOp)] = 27, [_(UACPI_AML_OP_DataRegionOp)] = 28,
};
const struct uacpi_op_spec *uacpi_get_op_spec(uacpi_aml_op op)
{
if (op > 0xFF)
return &ext_opcode_table[ext_op_to_idx[_(op)]];
return &opcode_table[op];
}
#define PARSE_FIELD_ELEMENTS(parse_loop_pc) \
/* Parse every field element found inside */ \
UACPI_PARSE_OP_IF_HAS_DATA, 44, \
/* Look at the first byte */ \
UACPI_PARSE_OP_LOAD_IMM, 1, \
\
/* ReservedField := 0x00 PkgLength */ \
UACPI_PARSE_OP_IF_EQUALS, 0x00, 3, \
UACPI_PARSE_OP_PKGLEN, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* AccessField := 0x01 AccessType AccessAttrib */ \
UACPI_PARSE_OP_IF_EQUALS, 0x01, 6, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* ConnectField := <0x02 NameString> | <0x02 BufferData> */ \
UACPI_PARSE_OP_IF_EQUALS, 0x02, 5, \
UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \
UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* ExtendedAccessField := 0x03 AccessType ExtendedAccessAttrib \
* AccessLength */ \
UACPI_PARSE_OP_IF_EQUALS, 0x03, 8, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* NamedField := NameSeg PkgLength */ \
\
/* \
* Discard the immediate, as it's the first byte of the \
* nameseg. We don't need it. \
*/ \
UACPI_PARSE_OP_ITEM_POP, \
UACPI_PARSE_OP_AML_PC_DECREMENT, \
UACPI_PARSE_OP_CREATE_NAMESTRING, \
UACPI_PARSE_OP_PKGLEN, \
UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_FIELD_UNIT, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
UACPI_PARSE_OP_INVOKE_HANDLER, \
UACPI_PARSE_OP_END
uacpi_u8 uacpi_field_op_decode_ops[] = {
UACPI_PARSE_OP_TRACKED_PKGLEN,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_LOAD_IMM, 1,
PARSE_FIELD_ELEMENTS(4),
};
uacpi_u8 uacpi_bank_field_op_decode_ops[] = {
UACPI_PARSE_OP_TRACKED_PKGLEN,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_OPERAND,
UACPI_PARSE_OP_LOAD_IMM, 1,
PARSE_FIELD_ELEMENTS(6),
};
uacpi_u8 uacpi_index_field_op_decode_ops[] = {
UACPI_PARSE_OP_TRACKED_PKGLEN,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_LOAD_IMM, 1,
PARSE_FIELD_ELEMENTS(5),
};
uacpi_u8 uacpi_load_op_decode_ops[] = {
// Storage for the scope pointer, this is left as 0 in case of errors
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD,
UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL,
UACPI_PARSE_OP_TARGET,
/*
* Invoke the handler here to initialize the table. If this fails, it's
* expected to keep the item 0 as NULL, which is checked below to return
* false to the caller of Load.
*/
UACPI_PARSE_OP_INVOKE_HANDLER,
UACPI_PARSE_OP_IF_NULL, 0, 3,
UACPI_PARSE_OP_LOAD_FALSE_OBJECT,
UACPI_PARSE_OP_JMP, 15,
UACPI_PARSE_OP_LOAD_TRUE_OBJECT,
UACPI_PARSE_OP_DISPATCH_TABLE_LOAD,
/*
* Invoke the handler a second time to initialize any AML GPE handlers that
* might've been loaded from this table.
*/
UACPI_PARSE_OP_INVOKE_HANDLER,
UACPI_PARSE_OP_STORE_TO_TARGET, 3,
UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV,
UACPI_PARSE_OP_END,
};
uacpi_u8 uacpi_load_table_op_decode_ops[] = {
// Storage for the scope pointer, this is left as 0 in case of errors
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD,
// Index of the table we are going to be loaded to unref it later
UACPI_PARSE_OP_LOAD_ZERO_IMM,
// Storage for the target pointer, this is left as 0 if none was requested
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_LOAD_INLINE_IMM, 1, 5,
UACPI_PARSE_OP_IF_NOT_NULL, 4, 5,
UACPI_PARSE_OP_STRING,
UACPI_PARSE_OP_IMM_DECREMENT, 4,
UACPI_PARSE_OP_JMP, 8,
UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL,
/*
* Invoke the handler here to initialize the table. If this fails, it's
* expected to keep the item 0 as NULL, which is checked below to return
* false to the caller of Load.
*/
UACPI_PARSE_OP_INVOKE_HANDLER,
UACPI_PARSE_OP_IF_NULL, 0, 3,
UACPI_PARSE_OP_LOAD_FALSE_OBJECT,
UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV,
UACPI_PARSE_OP_END,
UACPI_PARSE_OP_LOAD_TRUE_OBJECT,
UACPI_PARSE_OP_DISPATCH_TABLE_LOAD,
/*
* Invoke the handler a second time to block the store to target in case
* the load above failed, as well as do any AML GPE handler initialization.
*/
UACPI_PARSE_OP_INVOKE_HANDLER,
// If we were given a target to store to, do the store
UACPI_PARSE_OP_IF_NOT_NULL, 3, 3,
UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, 3, 10,
UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV,
UACPI_PARSE_OP_END,
};
#define POP(x) UACPI_PARSE_OP_##x
static
const uacpi_char *const pop_names[UACPI_PARSE_OP_MAX + 1] = {
[POP(END)] = "<END-OF-OP>",
[POP(SKIP_WITH_WARN_IF_NULL)] = "SKIP_WITH_WARN_IF_NULL",
[POP(SIMPLE_NAME)] = "SIMPLE_NAME",
[POP(SUPERNAME)] = "SUPERNAME",
[POP(SUPERNAME_OR_UNRESOLVED)] = "SUPERNAME_OR_UNRESOLVED",
[POP(TERM_ARG)] = "TERM_ARG",
[POP(TERM_ARG_UNWRAP_INTERNAL)] = "TERM_ARG_UNWRAP_INTERNAL",
[POP(TERM_ARG_OR_NAMED_OBJECT)] = "TERM_ARG_OR_NAMED_OBJECT",
[POP(TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED)] = "TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED",
[POP(OPERAND)] = "OPERAND",
[POP(STRING)] = "STRING",
[POP(COMPUTATIONAL_DATA)] = "COMPUTATIONAL_DATA",
[POP(TARGET)] = "TARGET",
[POP(PKGLEN)] = "PKGLEN",
[POP(TRACKED_PKGLEN)] = "TRACKED_PKGLEN",
[POP(CREATE_NAMESTRING)] = "CREATE_NAMESTRING",
[POP(CREATE_NAMESTRING_OR_NULL_IF_LOAD)] = "CREATE_NAMESTRING_OR_NULL_IF_LOAD",
[POP(EXISTING_NAMESTRING)] = "EXISTING_NAMESTRING",
[POP(EXISTING_NAMESTRING_OR_NULL)] = "EXISTING_NAMESTRING_OR_NULL",
[POP(EXISTING_NAMESTRING_OR_NULL_IF_LOAD)] = "EXISTING_NAMESTRING_OR_NULL_IF_LOAD",
[POP(INVOKE_HANDLER)] = "INVOKE_HANDLER",
[POP(OBJECT_ALLOC)] = "OBJECT_ALLOC",
[POP(EMPTY_OBJECT_ALLOC)] = "EMPTY_OBJECT_ALLOC",
[POP(OBJECT_CONVERT_TO_SHALLOW_COPY)] = "OBJECT_CONVERT_TO_SHALLOW_COPY",
[POP(OBJECT_CONVERT_TO_DEEP_COPY)] = "OBJECT_CONVERT_TO_DEEP_COPY",
[POP(OBJECT_ALLOC_TYPED)] = "OBJECT_ALLOC_TYPED",
[POP(RECORD_AML_PC)] = "RECORD_AML_PC",
[POP(LOAD_INLINE_IMM_AS_OBJECT)] = "LOAD_INLINE_IMM_AS_OBJECT",
[POP(LOAD_INLINE_IMM)] = "LOAD_INLINE_IMM",
[POP(LOAD_ZERO_IMM)] = "LOAD_ZERO_IMM",
[POP(LOAD_IMM)] = "LOAD_IMM",
[POP(LOAD_IMM_AS_OBJECT)] = "LOAD_IMM_AS_OBJECT",
[POP(LOAD_FALSE_OBJECT)] = "LOAD_FALSE_OBJECT",
[POP(LOAD_TRUE_OBJECT)] = "LOAD_TRUE_OBJECT",
[POP(TRUNCATE_NUMBER)] = "TRUNCATE_NUMBER",
[POP(TYPECHECK)] = "TYPECHECK",
[POP(INSTALL_NAMESPACE_NODE)] = "INSTALL_NAMESPACE_NODE",
[POP(OBJECT_TRANSFER_TO_PREV)] = "OBJECT_TRANSFER_TO_PREV",
[POP(OBJECT_COPY_TO_PREV)] = "OBJECT_COPY_TO_PREV",
[POP(STORE_TO_TARGET)] = "STORE_TO_TARGET",
[POP(STORE_TO_TARGET_INDIRECT)] = "STORE_TO_TARGET_INDIRECT",
[POP(UNREACHABLE)] = "UNREACHABLE",
[POP(BAD_OPCODE)] = "BAD_OPCODE",
[POP(AML_PC_DECREMENT)] = "AML_PC_DECREMENT",
[POP(IMM_DECREMENT)] = "IMM_DECREMENT",
[POP(ITEM_POP)] = "ITEM_POP",
[POP(DISPATCH_METHOD_CALL)] = "DISPATCH_METHOD_CALL",
[POP(DISPATCH_TABLE_LOAD)] = "DISPATCH_TABLE_LOAD",
[POP(CONVERT_NAMESTRING)] = "CONVERT_NAMESTRING",
[POP(IF_HAS_DATA)] = "IF_HAS_DATA",
[POP(IF_NULL)] = "IF_NULL",
[POP(IF_NOT_NULL)] = "IF_NOT_NULL",
[POP(IF_EQUALS)] = "IF_EQUALS",
[POP(JMP)] = "JMP",
};
const uacpi_char *uacpi_parse_op_to_string(enum uacpi_parse_op op)
{
if (uacpi_unlikely(op > UACPI_PARSE_OP_MAX))
return "<INVALID-OP>";
return pop_names[op];
}

881
src/uacpi/opregion.c Normal file
View file

@ -0,0 +1,881 @@
#include <uacpi/kernel_api.h>
#include <uacpi/internal/opregion.h>
#include <uacpi/internal/namespace.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/internal/interpreter.h>
struct uacpi_recursive_lock g_opregion_lock;
uacpi_status uacpi_initialize_opregion(void)
{
return uacpi_recursive_lock_init(&g_opregion_lock);
}
void uacpi_deinitialize_opregion(void)
{
uacpi_recursive_lock_deinit(&g_opregion_lock);
}
void uacpi_trace_region_error(
uacpi_namespace_node *node, uacpi_char *message, uacpi_status ret
)
{
const uacpi_char *path, *space_string = "<unknown>";
uacpi_object *obj;
path = uacpi_namespace_node_generate_absolute_path(node);
obj = uacpi_namespace_node_get_object_typed(
node, UACPI_OBJECT_OPERATION_REGION_BIT
);
if (uacpi_likely(obj != UACPI_NULL))
space_string = uacpi_address_space_to_string(obj->op_region->space);
uacpi_error(
"%s (%s) operation region %s: %s\n",
message, space_string, path, uacpi_status_to_string(ret)
);
uacpi_free_dynamic_string(path);
}
#define UACPI_TRACE_REGION_IO
void uacpi_trace_region_io(
uacpi_namespace_node *node, uacpi_address_space space, uacpi_region_op op,
uacpi_u64 offset, uacpi_u8 byte_size, uacpi_u64 ret
)
{
#ifdef UACPI_TRACE_REGION_IO
const uacpi_char *path;
const uacpi_char *type_str;
if (!uacpi_should_log(UACPI_LOG_TRACE))
return;
switch (op) {
case UACPI_REGION_OP_READ:
type_str = "read from";
break;
case UACPI_REGION_OP_WRITE:
type_str = "write to";
break;
default:
type_str = "<INVALID-OP>";
}
path = uacpi_namespace_node_generate_absolute_path(node);
uacpi_trace(
"%s [%s] (%d bytes) %s[0x%016"UACPI_PRIX64"] = 0x%"UACPI_PRIX64"\n",
type_str, path, byte_size,
uacpi_address_space_to_string(space),
UACPI_FMT64(offset), UACPI_FMT64(ret)
);
uacpi_free_dynamic_string(path);
#else
UACPI_UNUSED(op);
UACPI_UNUSED(node);
UACPI_UNUSED(offset);
UACPI_UNUSED(byte_size);
UACPI_UNUSED(ret);
#endif
}
static uacpi_bool space_needs_reg(enum uacpi_address_space space)
{
if (space == UACPI_ADDRESS_SPACE_SYSTEM_MEMORY ||
space == UACPI_ADDRESS_SPACE_SYSTEM_IO ||
space == UACPI_ADDRESS_SPACE_TABLE_DATA)
return UACPI_FALSE;
return UACPI_TRUE;
}
static uacpi_status region_run_reg(
uacpi_namespace_node *node, uacpi_u8 connection_code
)
{
uacpi_status ret;
uacpi_namespace_node *reg_node;
uacpi_object_array method_args;
uacpi_object *reg_obj, *args[2];
ret = uacpi_namespace_node_resolve(
node->parent, "_REG", UACPI_SHOULD_LOCK_NO,
UACPI_MAY_SEARCH_ABOVE_PARENT_NO, UACPI_PERMANENT_ONLY_NO, &reg_node
);
if (uacpi_unlikely_error(ret))
return ret;
reg_obj = uacpi_namespace_node_get_object_typed(
reg_node, UACPI_OBJECT_METHOD_BIT
);
if (uacpi_unlikely(reg_obj == UACPI_NULL))
return UACPI_STATUS_OK;
args[0] = uacpi_create_object(UACPI_OBJECT_INTEGER);
if (uacpi_unlikely(args[0] == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
args[1] = uacpi_create_object(UACPI_OBJECT_INTEGER);
if (uacpi_unlikely(args[1] == UACPI_NULL)) {
uacpi_object_unref(args[0]);
return UACPI_STATUS_OUT_OF_MEMORY;
}
args[0]->integer = uacpi_namespace_node_get_object(node)->op_region->space;
args[1]->integer = connection_code;
method_args.objects = args;
method_args.count = 2;
ret = uacpi_execute_control_method(
reg_node, reg_obj->method, &method_args, UACPI_NULL
);
if (uacpi_unlikely_error(ret))
uacpi_trace_region_error(node, "error during _REG execution for", ret);
uacpi_object_unref(args[0]);
uacpi_object_unref(args[1]);
return ret;
}
uacpi_address_space_handlers *uacpi_node_get_address_space_handlers(
uacpi_namespace_node *node
)
{
uacpi_object *object;
if (node == uacpi_namespace_root())
return g_uacpi_rt_ctx.root_object->address_space_handlers;
object = uacpi_namespace_node_get_object(node);
if (uacpi_unlikely(object == UACPI_NULL))
return UACPI_NULL;
switch (object->type) {
case UACPI_OBJECT_DEVICE:
case UACPI_OBJECT_PROCESSOR:
case UACPI_OBJECT_THERMAL_ZONE:
return object->address_space_handlers;
default:
return UACPI_NULL;
}
}
static uacpi_address_space_handler *find_handler(
uacpi_address_space_handlers *handlers,
enum uacpi_address_space space
)
{
uacpi_address_space_handler *handler = handlers->head;
while (handler) {
if (handler->space == space)
return handler;
handler = handler->next;
}
return UACPI_NULL;
}
static uacpi_operation_region *find_previous_region_link(
uacpi_operation_region *region
)
{
uacpi_address_space_handler *handler = region->handler;
uacpi_operation_region *parent = handler->regions;
if (parent == region)
// This is the last attached region, it has no previous link
return region;
while (parent->next != region) {
parent = parent->next;
if (uacpi_unlikely(parent == UACPI_NULL))
return UACPI_NULL;
}
return parent;
}
uacpi_status uacpi_opregion_attach(uacpi_namespace_node *node)
{
uacpi_object *obj;
uacpi_operation_region *region;
uacpi_address_space_handler *handler;
uacpi_status ret;
uacpi_region_attach_data attach_data = { 0 };
if (uacpi_namespace_node_is_dangling(node))
return UACPI_STATUS_NAMESPACE_NODE_DANGLING;
obj = uacpi_namespace_node_get_object_typed(
node, UACPI_OBJECT_OPERATION_REGION_BIT
);
if (uacpi_unlikely(obj == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
region = obj->op_region;
if (region->handler == UACPI_NULL)
return UACPI_STATUS_NO_HANDLER;
if (region->state_flags & UACPI_OP_REGION_STATE_ATTACHED)
return UACPI_STATUS_OK;
handler = region->handler;
attach_data.region_node = node;
attach_data.handler_context = handler->user_context;
uacpi_object_ref(obj);
uacpi_namespace_write_unlock();
ret = handler->callback(UACPI_REGION_OP_ATTACH, &attach_data);
uacpi_namespace_write_lock();
if (uacpi_unlikely_error(ret)) {
uacpi_trace_region_error(node, "failed to attach a handler to", ret);
uacpi_object_unref(obj);
return ret;
}
region->state_flags |= UACPI_OP_REGION_STATE_ATTACHED;
region->user_context = attach_data.out_region_context;
uacpi_object_unref(obj);
return ret;
}
static void region_install_handler(
uacpi_namespace_node *node, uacpi_address_space_handler *handler
)
{
uacpi_operation_region *region;
region = uacpi_namespace_node_get_object(node)->op_region;
region->handler = handler;
uacpi_shareable_ref(handler);
region->next = handler->regions;
handler->regions = region;
}
enum unreg {
UNREG_NO = 0,
UNREG_YES,
};
static void region_uninstall_handler(
uacpi_namespace_node *node, enum unreg unreg
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_address_space_handler *handler;
uacpi_operation_region *region, *link;
obj = uacpi_namespace_node_get_object_typed(
node, UACPI_OBJECT_OPERATION_REGION_BIT
);
if (uacpi_unlikely(obj == UACPI_NULL))
return;
region = obj->op_region;
handler = region->handler;
if (handler == UACPI_NULL)
return;
link = find_previous_region_link(region);
if (uacpi_unlikely(link == UACPI_NULL)) {
uacpi_error("operation region @%p not in the handler@%p list(?)\n",
region, handler);
goto out;
} else if (link == region) {
link = link->next;
handler->regions = link;
} else {
link->next = region->next;
}
out:
if (region->state_flags & UACPI_OP_REGION_STATE_ATTACHED) {
uacpi_region_detach_data detach_data = {
.region_node = node,
.region_context = region->user_context,
.handler_context = handler->user_context,
};
uacpi_shareable_ref(node);
uacpi_namespace_write_unlock();
ret = handler->callback(UACPI_REGION_OP_DETACH, &detach_data);
uacpi_namespace_write_lock();
uacpi_namespace_node_unref(node);
if (uacpi_unlikely_error(ret)) {
uacpi_trace_region_error(
node, "error during handler detach for", ret
);
}
}
if ((region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED) &&
unreg == UNREG_YES) {
region_run_reg(node, ACPI_REG_DISCONNECT);
region->state_flags &= ~UACPI_OP_REGION_STATE_REG_EXECUTED;
}
uacpi_address_space_handler_unref(region->handler);
region->handler = UACPI_NULL;
region->state_flags &= ~UACPI_OP_REGION_STATE_ATTACHED;
}
static uacpi_status upgrade_to_opregion_lock(void)
{
uacpi_status ret;
/*
* Drop the namespace lock, and reacquire it after the opregion lock
* so we keep the ordering with user API.
*/
uacpi_namespace_write_unlock();
ret = uacpi_recursive_lock_acquire(&g_opregion_lock);
uacpi_namespace_write_lock();
return ret;
}
void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node)
{
if (uacpi_unlikely_error(upgrade_to_opregion_lock()))
return;
region_uninstall_handler(node, UNREG_YES);
uacpi_recursive_lock_release(&g_opregion_lock);
}
uacpi_bool uacpi_address_space_handler_is_default(
uacpi_address_space_handler *handler
)
{
return handler->flags & UACPI_ADDRESS_SPACE_HANDLER_DEFAULT;
}
enum opregion_iter_action {
OPREGION_ITER_ACTION_UNINSTALL,
OPREGION_ITER_ACTION_INSTALL,
};
struct opregion_iter_ctx {
enum opregion_iter_action action;
uacpi_address_space_handler *handler;
};
static uacpi_iteration_decision do_install_or_uninstall_handler(
uacpi_handle opaque, uacpi_namespace_node *node, uacpi_u32 depth
)
{
struct opregion_iter_ctx *ctx = opaque;
uacpi_address_space_handlers *handlers;
uacpi_object *object;
UACPI_UNUSED(depth);
object = uacpi_namespace_node_get_object(node);
if (object->type == UACPI_OBJECT_OPERATION_REGION) {
uacpi_operation_region *region = object->op_region;
if (region->space != ctx->handler->space)
return UACPI_ITERATION_DECISION_CONTINUE;
if (ctx->action == OPREGION_ITER_ACTION_INSTALL) {
if (region->handler)
region_uninstall_handler(node, UNREG_NO);
region_install_handler(node, ctx->handler);
} else {
if (uacpi_unlikely(region->handler != ctx->handler)) {
uacpi_trace_region_error(
node, "handler mismatch for",
UACPI_STATUS_INTERNAL_ERROR
);
return UACPI_ITERATION_DECISION_CONTINUE;
}
region_uninstall_handler(node, UNREG_NO);
}
return UACPI_ITERATION_DECISION_CONTINUE;
}
handlers = uacpi_node_get_address_space_handlers(node);
if (handlers == UACPI_NULL)
return UACPI_ITERATION_DECISION_CONTINUE;
// Device already has a handler for this space installed
if (find_handler(handlers, ctx->handler->space) != UACPI_NULL)
return UACPI_ITERATION_DECISION_NEXT_PEER;
return UACPI_ITERATION_DECISION_CONTINUE;
}
struct reg_run_ctx {
uacpi_u8 space;
uacpi_u8 connection_code;
uacpi_size reg_executed;
uacpi_size reg_errors;
};
static uacpi_iteration_decision do_run_reg(
void *opaque, uacpi_namespace_node *node, uacpi_u32 depth
)
{
struct reg_run_ctx *ctx = opaque;
uacpi_operation_region *region;
uacpi_status ret;
uacpi_bool was_regged;
UACPI_UNUSED(depth);
region = uacpi_namespace_node_get_object(node)->op_region;
if (region->space != ctx->space)
return UACPI_ITERATION_DECISION_CONTINUE;
was_regged = region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED;
if (was_regged == (ctx->connection_code == ACPI_REG_CONNECT))
return UACPI_ITERATION_DECISION_CONTINUE;
ret = region_run_reg(node, ctx->connection_code);
if (ctx->connection_code == ACPI_REG_DISCONNECT)
region->state_flags &= ~UACPI_OP_REGION_STATE_REG_EXECUTED;
if (ret == UACPI_STATUS_NOT_FOUND)
return UACPI_ITERATION_DECISION_CONTINUE;
if (ctx->connection_code == ACPI_REG_CONNECT)
region->state_flags |= UACPI_OP_REGION_STATE_REG_EXECUTED;
ctx->reg_executed++;
if (uacpi_unlikely_error(ret)) {
ctx->reg_errors++;
return UACPI_ITERATION_DECISION_CONTINUE;
}
return UACPI_ITERATION_DECISION_CONTINUE;
}
static uacpi_status reg_or_unreg_all_opregions(
uacpi_namespace_node *device_node, enum uacpi_address_space space,
uacpi_u8 connection_code
)
{
uacpi_address_space_handlers *handlers;
uacpi_bool is_connect;
enum uacpi_permanent_only perm_only;
struct reg_run_ctx ctx = {
.space = space,
.connection_code = connection_code,
};
handlers = uacpi_node_get_address_space_handlers(device_node);
if (uacpi_unlikely(handlers == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
is_connect = connection_code == ACPI_REG_CONNECT;
if (uacpi_unlikely(is_connect &&
find_handler(handlers, space) == UACPI_NULL))
return UACPI_STATUS_NO_HANDLER;
/*
* We want to unreg non-permanent opregions as well, however,
* registering them is handled separately and should not be
* done by us.
*/
perm_only = is_connect ? UACPI_PERMANENT_ONLY_YES : UACPI_PERMANENT_ONLY_NO;
uacpi_namespace_do_for_each_child(
device_node, do_run_reg, UACPI_NULL,
UACPI_OBJECT_OPERATION_REGION_BIT, UACPI_MAX_DEPTH_ANY,
UACPI_SHOULD_LOCK_NO, perm_only, &ctx
);
uacpi_trace(
"%sactivated all '%s' opregions controlled by '%.4s', "
"%zu _REG() calls (%zu errors)\n",
connection_code == ACPI_REG_CONNECT ? "" : "de",
uacpi_address_space_to_string(space),
device_node->name.text, ctx.reg_executed, ctx.reg_errors
);
return UACPI_STATUS_OK;
}
static uacpi_address_space_handlers *extract_handlers(
uacpi_namespace_node *node
)
{
uacpi_object *handlers_obj;
if (node == uacpi_namespace_root())
return g_uacpi_rt_ctx.root_object->address_space_handlers;
handlers_obj = uacpi_namespace_node_get_object_typed(
node,
UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
UACPI_OBJECT_PROCESSOR_BIT
);
if (uacpi_unlikely(handlers_obj == UACPI_NULL))
return UACPI_NULL;
return handlers_obj->address_space_handlers;
}
uacpi_status uacpi_reg_all_opregions(
uacpi_namespace_node *device_node,
enum uacpi_address_space space
)
{
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
if (!space_needs_reg(space))
return UACPI_STATUS_OK;
ret = uacpi_recursive_lock_acquire(&g_opregion_lock);
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_namespace_write_lock();
if (uacpi_unlikely_error(ret)) {
uacpi_recursive_lock_release(&g_opregion_lock);
return ret;
}
if (uacpi_unlikely(extract_handlers(device_node) == UACPI_NULL)) {
ret = UACPI_STATUS_INVALID_ARGUMENT;
goto out;
}
ret = reg_or_unreg_all_opregions(device_node, space, ACPI_REG_CONNECT);
out:
uacpi_namespace_write_unlock();
uacpi_recursive_lock_release(&g_opregion_lock);
return ret;
}
uacpi_status uacpi_install_address_space_handler_with_flags(
uacpi_namespace_node *device_node, enum uacpi_address_space space,
uacpi_region_handler handler, uacpi_handle handler_context,
uacpi_u16 flags
)
{
uacpi_status ret;
uacpi_address_space_handlers *handlers;
uacpi_address_space_handler *this_handler, *new_handler;
struct opregion_iter_ctx iter_ctx;
ret = uacpi_recursive_lock_acquire(&g_opregion_lock);
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_namespace_write_lock();
if (uacpi_unlikely_error(ret)) {
uacpi_recursive_lock_release(&g_opregion_lock);
return ret;
}
handlers = extract_handlers(device_node);
if (uacpi_unlikely(handlers == UACPI_NULL)) {
ret = UACPI_STATUS_INVALID_ARGUMENT;
goto out;
}
this_handler = find_handler(handlers, space);
if (this_handler != UACPI_NULL) {
ret = UACPI_STATUS_ALREADY_EXISTS;
goto out;
}
new_handler = uacpi_kernel_alloc(sizeof(*new_handler));
if (new_handler == UACPI_NULL) {
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out;
}
uacpi_shareable_init(new_handler);
new_handler->next = handlers->head;
new_handler->space = space;
new_handler->user_context = handler_context;
new_handler->callback = handler;
new_handler->regions = UACPI_NULL;
new_handler->flags = flags;
handlers->head = new_handler;
iter_ctx.handler = new_handler;
iter_ctx.action = OPREGION_ITER_ACTION_INSTALL;
uacpi_namespace_do_for_each_child(
device_node, do_install_or_uninstall_handler, UACPI_NULL,
UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, UACPI_SHOULD_LOCK_NO,
UACPI_PERMANENT_ONLY_YES, &iter_ctx
);
if (!space_needs_reg(space))
goto out;
/*
* Installing an early address space handler, obviously not possible to
* execute any _REG methods here. Just return and hope that it is either
* a global address space handler, or a handler installed by a user who
* will run uacpi_reg_all_opregions manually after loading/initializing
* the namespace.
*/
if (g_uacpi_rt_ctx.init_level < UACPI_INIT_LEVEL_NAMESPACE_LOADED)
goto out;
// Init level is NAMESPACE_INITIALIZED, so we can safely run _REG now
ret = reg_or_unreg_all_opregions(
device_node, space, ACPI_REG_CONNECT
);
out:
uacpi_namespace_write_unlock();
uacpi_recursive_lock_release(&g_opregion_lock);
return ret;
}
uacpi_status uacpi_install_address_space_handler(
uacpi_namespace_node *device_node, enum uacpi_address_space space,
uacpi_region_handler handler, uacpi_handle handler_context
)
{
return uacpi_install_address_space_handler_with_flags(
device_node, space, handler, handler_context, 0
);
}
uacpi_status uacpi_uninstall_address_space_handler(
uacpi_namespace_node *device_node,
enum uacpi_address_space space
)
{
uacpi_status ret;
uacpi_address_space_handlers *handlers;
uacpi_address_space_handler *handler = UACPI_NULL, *prev_handler;
struct opregion_iter_ctx iter_ctx;
ret = uacpi_recursive_lock_acquire(&g_opregion_lock);
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_namespace_write_lock();
if (uacpi_unlikely_error(ret)) {
uacpi_recursive_lock_release(&g_opregion_lock);
return ret;
}
handlers = extract_handlers(device_node);
if (uacpi_unlikely(handlers == UACPI_NULL)) {
ret = UACPI_STATUS_INVALID_ARGUMENT;
goto out;
}
handler = find_handler(handlers, space);
if (uacpi_unlikely(handler == UACPI_NULL)) {
ret = UACPI_STATUS_NO_HANDLER;
goto out;
}
iter_ctx.handler = handler;
iter_ctx.action = OPREGION_ITER_ACTION_UNINSTALL;
uacpi_namespace_do_for_each_child(
device_node, do_install_or_uninstall_handler, UACPI_NULL,
UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, UACPI_SHOULD_LOCK_NO,
UACPI_PERMANENT_ONLY_NO, &iter_ctx
);
prev_handler = handlers->head;
// Are we the last linked handler?
if (prev_handler == handler) {
handlers->head = handler->next;
goto out_unreg;
}
// Nope, we're somewhere in the middle. Do a search.
while (prev_handler) {
if (prev_handler->next == handler) {
prev_handler->next = handler->next;
goto out;
}
prev_handler = prev_handler->next;
}
out_unreg:
if (space_needs_reg(space))
reg_or_unreg_all_opregions(device_node, space, ACPI_REG_DISCONNECT);
out:
if (handler != UACPI_NULL)
uacpi_address_space_handler_unref(handler);
uacpi_namespace_write_unlock();
uacpi_recursive_lock_release(&g_opregion_lock);
return ret;
}
uacpi_status uacpi_initialize_opregion_node(uacpi_namespace_node *node)
{
uacpi_status ret;
uacpi_namespace_node *parent = node->parent;
uacpi_operation_region *region;
uacpi_address_space_handlers *handlers;
uacpi_address_space_handler *handler;
ret = upgrade_to_opregion_lock();
if (uacpi_unlikely_error(ret))
return ret;
region = uacpi_namespace_node_get_object(node)->op_region;
ret = UACPI_STATUS_NOT_FOUND;
while (parent) {
handlers = uacpi_node_get_address_space_handlers(parent);
if (handlers != UACPI_NULL) {
handler = find_handler(handlers, region->space);
if (handler != UACPI_NULL) {
region_install_handler(node, handler);
ret = UACPI_STATUS_OK;
break;
}
}
parent = parent->parent;
}
if (ret != UACPI_STATUS_OK)
goto out;
if (!space_needs_reg(region->space))
goto out;
if (uacpi_get_current_init_level() < UACPI_INIT_LEVEL_NAMESPACE_LOADED)
goto out;
if (region_run_reg(node, ACPI_REG_CONNECT) != UACPI_STATUS_NOT_FOUND)
region->state_flags |= UACPI_OP_REGION_STATE_REG_EXECUTED;
out:
uacpi_recursive_lock_release(&g_opregion_lock);
return ret;
}
uacpi_status uacpi_dispatch_opregion_io(
uacpi_namespace_node *region_node, uacpi_u32 offset, uacpi_u8 byte_width,
uacpi_region_op op, uacpi_u64 *in_out
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_operation_region *region;
uacpi_address_space_handler *handler;
uacpi_address_space space;
uacpi_u64 offset_end;
uacpi_region_rw_data data = {
.byte_width = byte_width,
.offset = offset,
};
ret = upgrade_to_opregion_lock();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_opregion_attach(region_node);
if (uacpi_unlikely_error(ret)) {
uacpi_trace_region_error(
region_node, "unable to attach", ret
);
goto out;
}
obj = uacpi_namespace_node_get_object_typed(
region_node, UACPI_OBJECT_OPERATION_REGION_BIT
);
if (uacpi_unlikely(obj == UACPI_NULL)) {
ret = UACPI_STATUS_INVALID_ARGUMENT;
goto out;
}
region = obj->op_region;
space = region->space;
handler = region->handler;
offset_end = offset;
offset_end += byte_width;
data.offset += region->offset;
if (uacpi_unlikely(region->length < offset_end ||
data.offset < offset)) {
const uacpi_char *path;
path = uacpi_namespace_node_generate_absolute_path(region_node);
uacpi_error(
"out-of-bounds access to opregion %s[0x%"UACPI_PRIX64"->"
"0x%"UACPI_PRIX64"] at 0x%"UACPI_PRIX64" (idx=%u, width=%d)\n",
path, UACPI_FMT64(region->offset),
UACPI_FMT64(region->offset + region->length),
UACPI_FMT64(data.offset), offset, byte_width
);
uacpi_free_dynamic_string(path);
ret = UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX;
goto out;
}
data.handler_context = handler->user_context;
data.region_context = region->user_context;
if (op == UACPI_REGION_OP_WRITE) {
data.value = *in_out;
uacpi_trace_region_io(
region_node, space, op, data.offset,
byte_width, data.value
);
}
uacpi_object_ref(obj);
uacpi_namespace_write_unlock();
ret = handler->callback(op, &data);
uacpi_namespace_write_lock();
uacpi_object_unref(obj);
if (uacpi_unlikely_error(ret)) {
uacpi_trace_region_error(region_node, "unable to perform IO", ret);
goto out;
}
if (op == UACPI_REGION_OP_READ) {
*in_out = data.value;
uacpi_trace_region_io(
region_node, space, op, data.offset,
byte_width, data.value
);
}
out:
uacpi_recursive_lock_release(&g_opregion_lock);
return ret;
}

384
src/uacpi/osi.c Normal file
View file

@ -0,0 +1,384 @@
#include <uacpi/platform/atomic.h>
#include <uacpi/internal/osi.h>
#include <uacpi/internal/helpers.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/kernel_api.h>
struct registered_interface {
const uacpi_char *name;
uacpi_u8 weight;
uacpi_u8 kind;
// Only applicable for predefined host interfaces
uacpi_u8 host_type;
// Only applicable for predefined interfaces
uacpi_u8 disabled : 1;
uacpi_u8 dynamic : 1;
struct registered_interface *next;
};
static uacpi_handle interface_mutex;
static struct registered_interface *registered_interfaces;
static uacpi_interface_handler interface_handler;
static uacpi_u32 latest_queried_interface;
#define WINDOWS(string, interface) \
{ \
.name = "Windows "string, \
.weight = UACPI_VENDOR_INTERFACE_WINDOWS_##interface, \
.kind = UACPI_INTERFACE_KIND_VENDOR, \
.host_type = 0, \
.disabled = 0, \
.dynamic = 0, \
.next = UACPI_NULL \
}
#define HOST_FEATURE(string, type) \
{ \
.name = string, \
.weight = 0, \
.kind = UACPI_INTERFACE_KIND_FEATURE, \
.host_type = UACPI_HOST_INTERFACE_##type, \
.disabled = 1, \
.dynamic = 0, \
.next = UACPI_NULL, \
}
static struct registered_interface predefined_interfaces[] = {
// Vendor strings
WINDOWS("2000", 2000),
WINDOWS("2001", XP),
WINDOWS("2001 SP1", XP_SP1),
WINDOWS("2001.1", SERVER_2003),
WINDOWS("2001 SP2", XP_SP2),
WINDOWS("2001.1 SP1", SERVER_2003_SP1),
WINDOWS("2006", VISTA),
WINDOWS("2006.1", SERVER_2008),
WINDOWS("2006 SP1", VISTA_SP1),
WINDOWS("2006 SP2", VISTA_SP2),
WINDOWS("2009", 7),
WINDOWS("2012", 8),
WINDOWS("2013", 8_1),
WINDOWS("2015", 10),
WINDOWS("2016", 10_RS1),
WINDOWS("2017", 10_RS2),
WINDOWS("2017.2", 10_RS3),
WINDOWS("2018", 10_RS4),
WINDOWS("2018.2", 10_RS5),
WINDOWS("2019", 10_19H1),
WINDOWS("2020", 10_20H1),
WINDOWS("2021", 11),
WINDOWS("2022", 11_22H2),
// Feature strings
HOST_FEATURE("Module Device", MODULE_DEVICE),
HOST_FEATURE("Processor Device", PROCESSOR_DEVICE),
HOST_FEATURE("3.0 Thermal Model", 3_0_THERMAL_MODEL),
HOST_FEATURE("3.0 _SCP Extensions", 3_0_SCP_EXTENSIONS),
HOST_FEATURE("Processor Aggregator Device", PROCESSOR_AGGREGATOR_DEVICE),
// Interpreter features
{ .name = "Extended Address Space Descriptor" },
};
uacpi_status uacpi_initialize_interfaces(void)
{
uacpi_size i;
registered_interfaces = &predefined_interfaces[0];
interface_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(interface_mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
for (i = 0; i < (UACPI_ARRAY_SIZE(predefined_interfaces) - 1); ++i)
predefined_interfaces[i].next = &predefined_interfaces[i + 1];
return UACPI_STATUS_OK;
}
void uacpi_deinitialize_interfaces(void)
{
struct registered_interface *iface, *next_iface = registered_interfaces;
while (next_iface) {
iface = next_iface;
next_iface = iface->next;
iface->next = UACPI_NULL;
if (iface->dynamic) {
uacpi_free_dynamic_string(iface->name);
uacpi_free(iface, sizeof(*iface));
continue;
}
// Only features are disabled by default
iface->disabled = iface->kind == UACPI_INTERFACE_KIND_FEATURE ?
UACPI_TRUE : UACPI_FALSE;
}
if (interface_mutex)
uacpi_kernel_free_mutex(interface_mutex);
interface_mutex = UACPI_NULL;
interface_handler = UACPI_NULL;
latest_queried_interface = 0;
registered_interfaces = UACPI_NULL;
}
uacpi_vendor_interface uacpi_latest_queried_vendor_interface(void)
{
return uacpi_atomic_load32(&latest_queried_interface);
}
static struct registered_interface *find_interface_unlocked(
const uacpi_char *name
)
{
struct registered_interface *interface = registered_interfaces;
while (interface) {
if (uacpi_strcmp(interface->name, name) == 0)
return interface;
interface = interface->next;
}
return UACPI_NULL;
}
static struct registered_interface *find_host_interface_unlocked(
uacpi_host_interface type
)
{
struct registered_interface *interface = registered_interfaces;
while (interface) {
if (interface->host_type == type)
return interface;
interface = interface->next;
}
return UACPI_NULL;
}
uacpi_status uacpi_install_interface(
const uacpi_char *name, uacpi_interface_kind kind
)
{
struct registered_interface *interface;
uacpi_status ret;
uacpi_char *name_copy;
uacpi_size name_size;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = find_interface_unlocked(name);
if (interface != UACPI_NULL) {
if (interface->disabled)
interface->disabled = UACPI_FALSE;
ret = UACPI_STATUS_ALREADY_EXISTS;
goto out;
}
interface = uacpi_kernel_alloc(sizeof(*interface));
if (uacpi_unlikely(interface == UACPI_NULL)) {
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out;
}
name_size = uacpi_strlen(name) + 1;
name_copy = uacpi_kernel_alloc(name_size);
if (uacpi_unlikely(name_copy == UACPI_NULL)) {
uacpi_free(interface, sizeof(*interface));
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out;
}
uacpi_memcpy(name_copy, name, name_size);
interface->name = name_copy;
interface->weight = 0;
interface->kind = kind;
interface->host_type = 0;
interface->disabled = 0;
interface->dynamic = 1;
interface->next = registered_interfaces;
registered_interfaces = interface;
out:
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_uninstall_interface(const uacpi_char *name)
{
struct registered_interface *cur, *prev;
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
cur = registered_interfaces;
prev = cur;
ret = UACPI_STATUS_NOT_FOUND;
while (cur) {
if (uacpi_strcmp(cur->name, name) != 0) {
prev = cur;
cur = cur->next;
continue;
}
if (cur->dynamic) {
if (prev == cur) {
registered_interfaces = cur->next;
} else {
prev->next = cur->next;
}
uacpi_release_native_mutex(interface_mutex);
uacpi_free_dynamic_string(cur->name);
uacpi_free(cur, sizeof(*cur));
return UACPI_STATUS_OK;
}
/*
* If this interface was already disabled, pretend we didn't actually
* find it and keep ret as UACPI_STATUS_NOT_FOUND. The fact that it's
* still in the registered list is an implementation detail of
* predefined interfaces.
*/
if (!cur->disabled) {
cur->disabled = UACPI_TRUE;
ret = UACPI_STATUS_OK;
}
break;
}
uacpi_release_native_mutex(interface_mutex);
return ret;
}
static uacpi_status configure_host_interface(
uacpi_host_interface type, uacpi_bool enabled
)
{
struct registered_interface *interface;
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = find_host_interface_unlocked(type);
if (interface == UACPI_NULL) {
ret = UACPI_STATUS_NOT_FOUND;
goto out;
}
interface->disabled = !enabled;
out:
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_enable_host_interface(uacpi_host_interface type)
{
return configure_host_interface(type, UACPI_TRUE);
}
uacpi_status uacpi_disable_host_interface(uacpi_host_interface type)
{
return configure_host_interface(type, UACPI_FALSE);
}
uacpi_status uacpi_set_interface_query_handler(
uacpi_interface_handler handler
)
{
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (interface_handler != UACPI_NULL && handler != UACPI_NULL) {
ret = UACPI_STATUS_ALREADY_EXISTS;
goto out;
}
interface_handler = handler;
out:
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_bulk_configure_interfaces(
uacpi_interface_action action, uacpi_interface_kind kind
)
{
uacpi_status ret;
struct registered_interface *interface;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = registered_interfaces;
while (interface) {
if (kind & interface->kind)
interface->disabled = (action == UACPI_INTERFACE_ACTION_DISABLE);
interface = interface->next;
}
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_handle_osi(const uacpi_char *string, uacpi_bool *out_value)
{
uacpi_status ret;
struct registered_interface *interface;
uacpi_bool is_supported = UACPI_FALSE;
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = find_interface_unlocked(string);
if (interface == UACPI_NULL)
goto out;
if (interface->weight > latest_queried_interface)
uacpi_atomic_store32(&latest_queried_interface, interface->weight);
is_supported = !interface->disabled;
if (interface_handler)
is_supported = interface_handler(string, is_supported);
out:
uacpi_release_native_mutex(interface_mutex);
*out_value = is_supported;
return UACPI_STATUS_OK;
}

439
src/uacpi/registers.c Normal file
View file

@ -0,0 +1,439 @@
#include <uacpi/internal/registers.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/context.h>
#include <uacpi/internal/io.h>
#include <uacpi/acpi.h>
enum register_kind {
REGISTER_KIND_GAS,
REGISTER_KIND_IO,
};
enum register_access_kind {
REGISTER_ACCESS_KIND_PRESERVE,
REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
REGISTER_ACCESS_KIND_NORMAL,
};
struct register_spec {
uacpi_u8 kind;
uacpi_u8 access_kind;
uacpi_u8 access_width; // only REGISTER_KIND_IO
void *accessor0, *accessor1;
uacpi_u64 write_only_mask;
uacpi_u64 preserve_mask;
};
static const struct register_spec registers[UACPI_REGISTER_MAX + 1] = {
[UACPI_REGISTER_PM1_STS] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
.accessor0 = &g_uacpi_rt_ctx.pm1a_status_blk,
.accessor1 = &g_uacpi_rt_ctx.pm1b_status_blk,
.preserve_mask = ACPI_PM1_STS_IGN0_MASK,
},
[UACPI_REGISTER_PM1_EN] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessor0 = &g_uacpi_rt_ctx.pm1a_enable_blk,
.accessor1 = &g_uacpi_rt_ctx.pm1b_enable_blk,
},
[UACPI_REGISTER_PM1_CNT] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessor0 = &g_uacpi_rt_ctx.fadt.x_pm1a_cnt_blk,
.accessor1 = &g_uacpi_rt_ctx.fadt.x_pm1b_cnt_blk,
.write_only_mask = ACPI_PM1_CNT_SLP_EN_MASK |
ACPI_PM1_CNT_GBL_RLS_MASK,
.preserve_mask = ACPI_PM1_CNT_PRESERVE_MASK,
},
[UACPI_REGISTER_PM_TMR] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessor0 = &g_uacpi_rt_ctx.fadt.x_pm_tmr_blk,
},
[UACPI_REGISTER_PM2_CNT] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessor0 = &g_uacpi_rt_ctx.fadt.x_pm2_cnt_blk,
.preserve_mask = ACPI_PM2_CNT_PRESERVE_MASK,
},
[UACPI_REGISTER_SLP_CNT] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessor0 = &g_uacpi_rt_ctx.fadt.sleep_control_reg,
.write_only_mask = ACPI_SLP_CNT_SLP_EN_MASK,
.preserve_mask = ACPI_SLP_CNT_PRESERVE_MASK,
},
[UACPI_REGISTER_SLP_STS] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
.accessor0 = &g_uacpi_rt_ctx.fadt.sleep_status_reg,
.preserve_mask = ACPI_SLP_STS_PRESERVE_MASK,
},
[UACPI_REGISTER_RESET] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_NORMAL,
.accessor0 = &g_uacpi_rt_ctx.fadt.reset_reg,
},
[UACPI_REGISTER_SMI_CMD] = {
.kind = REGISTER_KIND_IO,
.access_kind = REGISTER_ACCESS_KIND_NORMAL,
.access_width = 1,
.accessor0 = &g_uacpi_rt_ctx.fadt.smi_cmd,
},
};
static const struct register_spec *get_reg(uacpi_u8 idx)
{
if (idx > UACPI_REGISTER_MAX)
return UACPI_NULL;
return &registers[idx];
}
static uacpi_status read_one(
enum register_kind kind, void *reg, uacpi_u8 byte_width,
uacpi_u64 *out_value
)
{
if (kind == REGISTER_KIND_GAS) {
struct acpi_gas *gas = reg;
if (!gas->address)
return UACPI_STATUS_OK;
return uacpi_gas_read(reg, out_value);
}
return uacpi_system_io_read(*(uacpi_u32*)reg, byte_width, out_value);
}
static uacpi_status write_one(
enum register_kind kind, void *reg, uacpi_u8 byte_width,
uacpi_u64 in_value
)
{
if (kind == REGISTER_KIND_GAS) {
struct acpi_gas *gas = reg;
if (!gas->address)
return UACPI_STATUS_OK;
return uacpi_gas_write(reg, in_value);
}
return uacpi_system_io_write(*(uacpi_u32*)reg, byte_width, in_value);
}
static uacpi_status do_read_register(
const struct register_spec *reg, uacpi_u64 *out_value
)
{
uacpi_status ret;
uacpi_u64 value0, value1 = 0;
ret = read_one(reg->kind, reg->accessor0, reg->access_width, &value0);
if (uacpi_unlikely_error(ret))
return ret;
if (reg->accessor1) {
ret = read_one(reg->kind, reg->accessor1, reg->access_width, &value1);
if (uacpi_unlikely_error(ret))
return ret;
}
*out_value = value0 | value1;
if (reg->write_only_mask)
*out_value &= ~reg->write_only_mask;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_read_register(
enum uacpi_register reg_enum, uacpi_u64 *out_value
)
{
const struct register_spec *reg;
reg = get_reg(reg_enum);
if (uacpi_unlikely(reg == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
return do_read_register(reg, out_value);
}
static uacpi_status do_write_register(
const struct register_spec *reg, uacpi_u64 in_value
)
{
uacpi_status ret;
if (reg->preserve_mask) {
in_value &= ~reg->preserve_mask;
if (reg->access_kind == REGISTER_ACCESS_KIND_PRESERVE) {
uacpi_u64 data;
ret = do_read_register(reg, &data);
if (uacpi_unlikely_error(ret))
return ret;
in_value |= data & reg->preserve_mask;
}
}
ret = write_one(reg->kind, reg->accessor0, reg->access_width, in_value);
if (uacpi_unlikely_error(ret))
return ret;
if (reg->accessor1)
ret = write_one(reg->kind, reg->accessor1, reg->access_width, in_value);
return ret;
}
uacpi_status uacpi_write_register(
enum uacpi_register reg_enum, uacpi_u64 in_value
)
{
const struct register_spec *reg;
reg = get_reg(reg_enum);
if (uacpi_unlikely(reg == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
return do_write_register(reg, in_value);
}
uacpi_status uacpi_write_registers(
enum uacpi_register reg_enum, uacpi_u64 in_value0, uacpi_u64 in_value1
)
{
uacpi_status ret;
const struct register_spec *reg;
reg = get_reg(reg_enum);
if (uacpi_unlikely(reg == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = write_one(reg->kind, reg->accessor0, reg->access_width, in_value0);
if (uacpi_unlikely_error(ret))
return ret;
if (reg->accessor1)
ret = write_one(reg->kind, reg->accessor1, reg->access_width, in_value1);
return ret;
}
struct register_field {
uacpi_u8 reg;
uacpi_u8 offset;
uacpi_u16 mask;
};
static const struct register_field fields[UACPI_REGISTER_FIELD_MAX + 1] = {
[UACPI_REGISTER_FIELD_TMR_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_TMR_STS_IDX,
.mask = ACPI_PM1_STS_TMR_STS_MASK,
},
[UACPI_REGISTER_FIELD_BM_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_BM_STS_IDX,
.mask = ACPI_PM1_STS_BM_STS_MASK,
},
[UACPI_REGISTER_FIELD_GBL_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_GBL_STS_IDX,
.mask = ACPI_PM1_STS_GBL_STS_MASK,
},
[UACPI_REGISTER_FIELD_PWRBTN_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_PWRBTN_STS_IDX,
.mask = ACPI_PM1_STS_PWRBTN_STS_MASK,
},
[UACPI_REGISTER_FIELD_SLPBTN_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_SLPBTN_STS_IDX,
.mask = ACPI_PM1_STS_SLPBTN_STS_MASK,
},
[UACPI_REGISTER_FIELD_RTC_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_RTC_STS_IDX,
.mask = ACPI_PM1_STS_RTC_STS_MASK,
},
[UACPI_REGISTER_FIELD_HWR_WAK_STS] = {
.reg = UACPI_REGISTER_SLP_STS,
.offset = ACPI_SLP_STS_WAK_STS_IDX,
.mask = ACPI_SLP_STS_WAK_STS_MASK,
},
[UACPI_REGISTER_FIELD_WAK_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_WAKE_STS_IDX,
.mask = ACPI_PM1_STS_WAKE_STS_MASK,
},
[UACPI_REGISTER_FIELD_PCIEX_WAKE_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_PCIEXP_WAKE_STS_IDX,
.mask = ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK,
},
[UACPI_REGISTER_FIELD_TMR_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_TMR_EN_IDX,
.mask = ACPI_PM1_EN_TMR_EN_MASK,
},
[UACPI_REGISTER_FIELD_GBL_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_GBL_EN_IDX,
.mask = ACPI_PM1_EN_GBL_EN_MASK,
},
[UACPI_REGISTER_FIELD_PWRBTN_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_PWRBTN_EN_IDX,
.mask = ACPI_PM1_EN_PWRBTN_EN_MASK,
},
[UACPI_REGISTER_FIELD_SLPBTN_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_SLPBTN_EN_IDX,
.mask = ACPI_PM1_EN_SLPBTN_EN_MASK,
},
[UACPI_REGISTER_FIELD_RTC_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_RTC_EN_IDX,
.mask = ACPI_PM1_EN_RTC_EN_MASK,
},
[UACPI_REGISTER_FIELD_PCIEXP_WAKE_DIS] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_PCIEXP_WAKE_DIS_IDX,
.mask = ACPI_PM1_EN_PCIEXP_WAKE_DIS_MASK,
},
[UACPI_REGISTER_FIELD_SCI_EN] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_SCI_EN_IDX,
.mask = ACPI_PM1_CNT_SCI_EN_MASK,
},
[UACPI_REGISTER_FIELD_BM_RLD] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_BM_RLD_IDX,
.mask = ACPI_PM1_CNT_BM_RLD_MASK,
},
[UACPI_REGISTER_FIELD_GBL_RLS] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_GBL_RLS_IDX,
.mask = ACPI_PM1_CNT_GBL_RLS_MASK,
},
[UACPI_REGISTER_FIELD_SLP_TYP] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_SLP_TYP_IDX,
.mask = ACPI_PM1_CNT_SLP_TYP_MASK,
},
[UACPI_REGISTER_FIELD_SLP_EN] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_SLP_EN_IDX,
.mask = ACPI_PM1_CNT_SLP_EN_MASK,
},
[UACPI_REGISTER_FIELD_HWR_SLP_TYP] = {
.reg = UACPI_REGISTER_SLP_CNT,
.offset = ACPI_SLP_CNT_SLP_TYP_IDX,
.mask = ACPI_SLP_CNT_SLP_TYP_MASK,
},
[UACPI_REGISTER_FIELD_HWR_SLP_EN] = {
.reg = UACPI_REGISTER_SLP_CNT,
.offset = ACPI_SLP_CNT_SLP_EN_IDX,
.mask = ACPI_SLP_CNT_SLP_EN_MASK,
},
[UACPI_REGISTER_FIELD_ARB_DIS] = {
.reg = UACPI_REGISTER_PM2_CNT,
.offset = ACPI_PM2_CNT_ARB_DIS_IDX,
.mask = ACPI_PM2_CNT_ARB_DIS_MASK,
},
};
static uacpi_handle g_reg_lock;
uacpi_status uacpi_ininitialize_registers(void)
{
g_reg_lock = uacpi_kernel_create_spinlock();
if (uacpi_unlikely(g_reg_lock == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
return UACPI_STATUS_OK;
}
void uacpi_deininitialize_registers(void)
{
if (g_reg_lock != UACPI_NULL) {
uacpi_kernel_free_spinlock(g_reg_lock);
g_reg_lock = UACPI_NULL;
}
}
uacpi_status uacpi_read_register_field(
enum uacpi_register_field field_enum, uacpi_u64 *out_value
)
{
uacpi_status ret;
uacpi_u8 field_idx = field_enum;
const struct register_field *field;
const struct register_spec *reg;
if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
field = &fields[field_idx];
reg = &registers[field->reg];
ret = do_read_register(reg, out_value);
if (uacpi_unlikely_error(ret))
return ret;
*out_value = (*out_value & field->mask) >> field->offset;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_write_register_field(
enum uacpi_register_field field_enum, uacpi_u64 in_value
)
{
uacpi_status ret;
uacpi_u8 field_idx = field_enum;
const struct register_field *field;
const struct register_spec *reg;
uacpi_u64 data;
uacpi_cpu_flags flags;
if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
field = &fields[field_idx];
reg = &registers[field->reg];
in_value = (in_value << field->offset) & field->mask;
flags = uacpi_kernel_lock_spinlock(g_reg_lock);
if (reg->kind == REGISTER_ACCESS_KIND_WRITE_TO_CLEAR) {
if (in_value == 0) {
ret = UACPI_STATUS_OK;
goto out;
}
ret = do_write_register(reg, in_value);
goto out;
}
ret = do_read_register(reg, &data);
if (uacpi_unlikely_error(ret))
goto out;
data &= ~field->mask;
data |= in_value;
ret = do_write_register(reg, data);
out:
uacpi_kernel_unlock_spinlock(g_reg_lock, flags);
return ret;
}

2500
src/uacpi/resources.c Normal file

File diff suppressed because it is too large Load diff

66
src/uacpi/shareable.c Normal file
View file

@ -0,0 +1,66 @@
#include <uacpi/internal/shareable.h>
#include <uacpi/platform/atomic.h>
#define BUGGED_REFCOUNT 0xFFFFFFFF
void uacpi_shareable_init(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
shareable->reference_count = 1;
}
uacpi_bool uacpi_bugged_shareable(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
if (uacpi_unlikely(shareable->reference_count == 0))
uacpi_make_shareable_bugged(shareable);
return uacpi_atomic_load32(&shareable->reference_count) == BUGGED_REFCOUNT;
}
void uacpi_make_shareable_bugged(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
uacpi_atomic_store32(&shareable->reference_count, BUGGED_REFCOUNT);
}
uacpi_u32 uacpi_shareable_ref(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
if (uacpi_unlikely(uacpi_bugged_shareable(shareable)))
return BUGGED_REFCOUNT;
return uacpi_atomic_inc32(&shareable->reference_count) - 1;
}
uacpi_u32 uacpi_shareable_unref(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
if (uacpi_unlikely(uacpi_bugged_shareable(shareable)))
return BUGGED_REFCOUNT;
return uacpi_atomic_dec32(&shareable->reference_count) + 1;
}
void uacpi_shareable_unref_and_delete_if_last(
uacpi_handle handle, void (*do_free)(uacpi_handle)
)
{
if (handle == UACPI_NULL)
return;
if (uacpi_unlikely(uacpi_bugged_shareable(handle)))
return;
if (uacpi_shareable_unref(handle) == 1)
do_free(handle);
}
uacpi_u32 uacpi_shareable_refcount(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
return uacpi_atomic_load32(&shareable->reference_count);
}

606
src/uacpi/sleep.c Normal file
View file

@ -0,0 +1,606 @@
#include <uacpi/sleep.h>
#include <uacpi/internal/context.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/io.h>
#include <uacpi/internal/registers.h>
#include <uacpi/internal/event.h>
#include <uacpi/platform/arch_helpers.h>
#ifndef UACPI_REDUCED_HARDWARE
#define CALL_SLEEP_FN(name, state) \
(uacpi_is_hardware_reduced() ? \
name##_hw_reduced(state) : name##_hw_full(state))
#else
#define CALL_SLEEP_FN(name, state) name##_hw_reduced(state);
#endif
static uacpi_status eval_wak(uacpi_u8 state);
static uacpi_status eval_sst(uacpi_u8 value);
#ifndef UACPI_REDUCED_HARDWARE
uacpi_status uacpi_set_waking_vector(
uacpi_phys_addr addr32, uacpi_phys_addr addr64
)
{
struct acpi_facs *facs = g_uacpi_rt_ctx.facs;
if (facs == UACPI_NULL)
return UACPI_STATUS_OK;
facs->firmware_waking_vector = addr32;
// The 64-bit wake vector doesn't exist, we're done
if (facs->length < 32)
return UACPI_STATUS_OK;
// Only allow 64-bit wake vector on 1.0 and above FACS
if (facs->version >= 1)
facs->x_firmware_waking_vector = addr64;
else
facs->x_firmware_waking_vector = 0;
return UACPI_STATUS_OK;
}
static uacpi_status enter_sleep_state_hw_full(uacpi_u8 state)
{
uacpi_status ret;
uacpi_u64 wake_status, pm1a, pm1b;
ret = uacpi_write_register_field(
UACPI_REGISTER_FIELD_WAK_STS, ACPI_PM1_STS_CLEAR
);
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_disable_all_gpes();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_clear_all_events();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_enable_all_wake_gpes();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_read_register(UACPI_REGISTER_PM1_CNT, &pm1a);
if (uacpi_unlikely_error(ret))
return ret;
pm1a &= ~((uacpi_u64)(ACPI_PM1_CNT_SLP_TYP_MASK | ACPI_PM1_CNT_SLP_EN_MASK));
pm1b = pm1a;
pm1a |= g_uacpi_rt_ctx.last_sleep_typ_a << ACPI_PM1_CNT_SLP_TYP_IDX;
pm1b |= g_uacpi_rt_ctx.last_sleep_typ_b << ACPI_PM1_CNT_SLP_TYP_IDX;
/*
* Just like ACPICA, split writing SLP_TYP and SLP_EN to work around
* buggy firmware that can't handle both written at the same time.
*/
ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
if (uacpi_unlikely_error(ret))
return ret;
pm1a |= ACPI_PM1_CNT_SLP_EN_MASK;
pm1b |= ACPI_PM1_CNT_SLP_EN_MASK;
if (state < UACPI_SLEEP_STATE_S4)
UACPI_ARCH_FLUSH_CPU_CACHE();
ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
if (uacpi_unlikely_error(ret))
return ret;
if (state > UACPI_SLEEP_STATE_S3) {
/*
* We're still here, this is a bug or very slow firmware.
* Just try spinning for a bit.
*/
uacpi_u64 stalled_time = 0;
// 10 seconds max
while (stalled_time < (10 * 1000 * 1000)) {
uacpi_kernel_stall(100);
stalled_time += 100;
}
// Try one more time
ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
if (uacpi_unlikely_error(ret))
return ret;
// Nothing we can do here, give up
return UACPI_STATUS_HARDWARE_TIMEOUT;
}
do {
ret = uacpi_read_register_field(
UACPI_REGISTER_FIELD_WAK_STS, &wake_status
);
if (uacpi_unlikely_error(ret))
return ret;
} while (wake_status != 1);
return UACPI_STATUS_OK;
}
static uacpi_status prepare_for_wake_from_sleep_state_hw_full(uacpi_u8 state)
{
uacpi_status ret;
uacpi_u64 pm1a, pm1b;
UACPI_UNUSED(state);
/*
* Some hardware apparently relies on S0 values being written to the PM1
* control register on wake, so do this here.
*/
if (g_uacpi_rt_ctx.s0_sleep_typ_a == UACPI_SLEEP_TYP_INVALID)
goto out;
ret = uacpi_read_register(UACPI_REGISTER_PM1_CNT, &pm1a);
if (uacpi_unlikely_error(ret))
goto out;
pm1a &= ~((uacpi_u64)(ACPI_PM1_CNT_SLP_TYP_MASK | ACPI_PM1_CNT_SLP_EN_MASK));
pm1b = pm1a;
pm1a |= g_uacpi_rt_ctx.s0_sleep_typ_a << ACPI_PM1_CNT_SLP_TYP_IDX;
pm1b |= g_uacpi_rt_ctx.s0_sleep_typ_b << ACPI_PM1_CNT_SLP_TYP_IDX;
uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
out:
// Errors ignored intentionally, we don't want to abort because of this
return UACPI_STATUS_OK;
}
static uacpi_status wake_from_sleep_state_hw_full(uacpi_u8 state)
{
uacpi_status ret;
g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
// Set the status to 2 (waking) while we execute the wake method.
eval_sst(2);
ret = uacpi_disable_all_gpes();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_enable_all_runtime_gpes();
if (uacpi_unlikely_error(ret))
return ret;
eval_wak(state);
// Apparently some BIOSes expect us to clear this, so do it
uacpi_write_register_field(
UACPI_REGISTER_FIELD_WAK_STS, ACPI_PM1_STS_CLEAR
);
// Now that we're awake set the status to 1 (running)
eval_sst(1);
return UACPI_STATUS_OK;
}
#endif
static uacpi_status get_slp_type_for_state(
uacpi_u8 state, uacpi_u8 *a, uacpi_u8 *b
)
{
uacpi_char path[] = "_S0";
uacpi_status ret;
uacpi_object *obj0, *obj1, *ret_obj = UACPI_NULL;
path[2] += state;
ret = uacpi_eval_typed(
uacpi_namespace_root(), path, UACPI_NULL,
UACPI_OBJECT_PACKAGE_BIT, &ret_obj
);
if (ret != UACPI_STATUS_OK) {
if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND)) {
uacpi_warn("error while evaluating %s: %s\n", path,
uacpi_status_to_string(ret));
} else {
uacpi_trace("sleep state %d is not supported as %s was not found\n",
state, path);
}
goto out;
}
switch (ret_obj->package->count) {
case 0:
uacpi_error("empty package while evaluating %s!\n", path);
ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
goto out;
case 1:
obj0 = ret_obj->package->objects[0];
if (uacpi_unlikely(obj0->type != UACPI_OBJECT_INTEGER)) {
uacpi_error(
"invalid object type at pkg[0] => %s when evaluating %s\n",
uacpi_object_type_to_string(obj0->type), path
);
goto out;
}
*a = obj0->integer;
*b = obj0->integer >> 8;
break;
default:
obj0 = ret_obj->package->objects[0];
obj1 = ret_obj->package->objects[1];
if (uacpi_unlikely(obj0->type != UACPI_OBJECT_INTEGER ||
obj1->type != UACPI_OBJECT_INTEGER)) {
uacpi_error(
"invalid object type when evaluating %s: "
"pkg[0] => %s, pkg[1] => %s\n", path,
uacpi_object_type_to_string(obj0->type),
uacpi_object_type_to_string(obj1->type)
);
ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
goto out;
}
*a = obj0->integer;
*b = obj1->integer;
break;
}
out:
if (ret != UACPI_STATUS_OK) {
*a = UACPI_SLEEP_TYP_INVALID;
*b = UACPI_SLEEP_TYP_INVALID;
}
uacpi_object_unref(ret_obj);
return ret;
}
static uacpi_status eval_sleep_helper(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u8 value
)
{
uacpi_object *arg;
uacpi_object_array args;
uacpi_status ret;
arg = uacpi_create_object(UACPI_OBJECT_INTEGER);
if (uacpi_unlikely(arg == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
arg->integer = value;
args.objects = &arg;
args.count = 1;
ret = uacpi_eval(parent, path, &args, UACPI_NULL);
switch (ret) {
case UACPI_STATUS_OK:
break;
case UACPI_STATUS_NOT_FOUND:
ret = UACPI_STATUS_OK;
break;
default:
uacpi_error("error while evaluating %s: %s\n",
path, uacpi_status_to_string(ret));
break;
}
uacpi_object_unref(arg);
return ret;
}
static uacpi_status eval_pts(uacpi_u8 state)
{
return eval_sleep_helper(uacpi_namespace_root(), "_PTS", state);
}
static uacpi_status eval_wak(uacpi_u8 state)
{
return eval_sleep_helper(uacpi_namespace_root(), "_WAK", state);
}
static uacpi_status eval_sst(uacpi_u8 value)
{
return eval_sleep_helper(
uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SI),
"_SST", value
);
}
static uacpi_status eval_sst_for_state(enum uacpi_sleep_state state)
{
uacpi_u8 arg;
/*
* This optional object is a control method that OSPM invokes to set the
* system status indicator as desired.
* Arguments:(1)
* Arg0 - An Integer containing the system status indicator identifier:
* 0 - No system state indication. Indicator off
* 1 - Working
* 2 - Waking
* 3 - Sleeping. Used to indicate system state S1, S2, or S3
* 4 - Sleeping with context saved to non-volatile storage
*/
switch (state) {
case UACPI_SLEEP_STATE_S0:
arg = 1;
break;
case UACPI_SLEEP_STATE_S1:
case UACPI_SLEEP_STATE_S2:
case UACPI_SLEEP_STATE_S3:
arg = 3;
break;
case UACPI_SLEEP_STATE_S4:
arg = 4;
break;
case UACPI_SLEEP_STATE_S5:
arg = 0;
break;
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
return eval_sst(arg);
}
uacpi_status uacpi_prepare_for_sleep_state(enum uacpi_sleep_state state_enum)
{
uacpi_u8 state = state_enum;
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_S5))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = get_slp_type_for_state(
state,
&g_uacpi_rt_ctx.last_sleep_typ_a,
&g_uacpi_rt_ctx.last_sleep_typ_b
);
if (ret != UACPI_STATUS_OK)
return ret;
ret = get_slp_type_for_state(
0,
&g_uacpi_rt_ctx.s0_sleep_typ_a,
&g_uacpi_rt_ctx.s0_sleep_typ_b
);
ret = eval_pts(state);
if (uacpi_unlikely_error(ret))
return ret;
eval_sst_for_state(state);
return UACPI_STATUS_OK;
}
static uacpi_u8 make_hw_reduced_sleep_control(uacpi_u8 slp_typ)
{
uacpi_u8 value;
value = (slp_typ << ACPI_SLP_CNT_SLP_TYP_IDX);
value &= ACPI_SLP_CNT_SLP_TYP_MASK;
value |= ACPI_SLP_CNT_SLP_EN_MASK;
return value;
}
static uacpi_status enter_sleep_state_hw_reduced(uacpi_u8 state)
{
uacpi_status ret;
uacpi_u8 sleep_control;
uacpi_u64 wake_status;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
if (!fadt->sleep_control_reg.address || !fadt->sleep_status_reg.address)
return UACPI_STATUS_NOT_FOUND;
ret = uacpi_write_register_field(
UACPI_REGISTER_FIELD_HWR_WAK_STS,
ACPI_SLP_STS_CLEAR
);
if (uacpi_unlikely_error(ret))
return ret;
sleep_control = make_hw_reduced_sleep_control(
g_uacpi_rt_ctx.last_sleep_typ_a
);
if (state < UACPI_SLEEP_STATE_S4)
UACPI_ARCH_FLUSH_CPU_CACHE();
/*
* To put the system into a sleep state, software will write the HW-reduced
* Sleep Type value (obtained from the \_Sx object in the DSDT) and the
* SLP_EN bit to the sleep control register.
*/
ret = uacpi_write_register(UACPI_REGISTER_SLP_CNT, sleep_control);
if (uacpi_unlikely_error(ret))
return ret;
/*
* The OSPM then polls the WAK_STS bit of the SLEEP_STATUS_REG waiting for
* it to be one (1), indicating that the system has been transitioned
* back to the Working state.
*/
do {
ret = uacpi_read_register_field(
UACPI_REGISTER_FIELD_HWR_WAK_STS, &wake_status
);
if (uacpi_unlikely_error(ret))
return ret;
} while (wake_status != 1);
return UACPI_STATUS_OK;
}
static uacpi_status prepare_for_wake_from_sleep_state_hw_reduced(uacpi_u8 state)
{
uacpi_u8 sleep_control;
UACPI_UNUSED(state);
if (g_uacpi_rt_ctx.s0_sleep_typ_a == UACPI_SLEEP_TYP_INVALID)
goto out;
sleep_control = make_hw_reduced_sleep_control(
g_uacpi_rt_ctx.s0_sleep_typ_a
);
uacpi_write_register(UACPI_REGISTER_SLP_CNT, sleep_control);
out:
return UACPI_STATUS_OK;
}
static uacpi_status wake_from_sleep_state_hw_reduced(uacpi_u8 state)
{
g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
// Set the status to 2 (waking) while we execute the wake method.
eval_sst(2);
eval_wak(state);
// Apparently some BIOSes expect us to clear this, so do it
uacpi_write_register_field(
UACPI_REGISTER_FIELD_HWR_WAK_STS, ACPI_SLP_STS_CLEAR
);
// Now that we're awake set the status to 1 (running)
eval_sst(1);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_enter_sleep_state(enum uacpi_sleep_state state_enum)
{
uacpi_u8 state = state_enum;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
if (uacpi_unlikely(g_uacpi_rt_ctx.last_sleep_typ_a > ACPI_SLP_TYP_MAX ||
g_uacpi_rt_ctx.last_sleep_typ_b > ACPI_SLP_TYP_MAX)) {
uacpi_error("invalid SLP_TYP values: 0x%02X:0x%02X\n",
g_uacpi_rt_ctx.last_sleep_typ_a,
g_uacpi_rt_ctx.last_sleep_typ_b);
return UACPI_STATUS_AML_BAD_ENCODING;
}
return CALL_SLEEP_FN(enter_sleep_state, state);
}
uacpi_status uacpi_prepare_for_wake_from_sleep_state(
uacpi_sleep_state state_enum
)
{
uacpi_u8 state = state_enum;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
return CALL_SLEEP_FN(prepare_for_wake_from_sleep_state, state);
}
uacpi_status uacpi_wake_from_sleep_state(
uacpi_sleep_state state_enum
)
{
uacpi_u8 state = state_enum;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
return CALL_SLEEP_FN(wake_from_sleep_state, state);
}
uacpi_status uacpi_reboot(void)
{
uacpi_status ret;
uacpi_handle pci_dev = UACPI_NULL;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
struct acpi_gas *reset_reg = &fadt->reset_reg;
/*
* Allow restarting earlier than namespace load so that the kernel can
* use this in case of some initialization error.
*/
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (!(fadt->flags & ACPI_RESET_REG_SUP) || !reset_reg->address)
return UACPI_STATUS_NOT_FOUND;
switch (reset_reg->address_space_id) {
case UACPI_ADDRESS_SPACE_SYSTEM_IO:
/*
* For SystemIO we don't do any checking, and we ignore bit width
* because that's what NT does.
*/
ret = uacpi_system_io_write(reset_reg->address, 1, fadt->reset_value);
break;
case UACPI_ADDRESS_SPACE_SYSTEM_MEMORY:
ret = uacpi_write_register(UACPI_REGISTER_RESET, fadt->reset_value);
break;
case UACPI_ADDRESS_SPACE_PCI_CONFIG: {
// Bus is assumed to be 0 here
uacpi_pci_address address = {
.segment = 0,
.bus = 0,
.device = (reset_reg->address >> 32) & 0xFF,
.function = (reset_reg->address >> 16) & 0xFF,
};
ret = uacpi_kernel_pci_device_open(address, &pci_dev);
if (uacpi_unlikely_error(ret))
break;
ret = uacpi_kernel_pci_write(
pci_dev, reset_reg->address & 0xFFFF, 1, fadt->reset_value
);
break;
}
default:
uacpi_warn(
"unable to perform a reset: unsupported address space '%s' (%d)\n",
uacpi_address_space_to_string(reset_reg->address_space_id),
reset_reg->address_space_id
);
ret = UACPI_STATUS_UNIMPLEMENTED;
}
if (ret == UACPI_STATUS_OK) {
/*
* This should've worked but we're still here.
* Spin for a bit then give up.
*/
uacpi_u64 stalled_time = 0;
while (stalled_time < (1000 * 1000)) {
uacpi_kernel_stall(100);
stalled_time += 100;
}
uacpi_error("reset timeout\n");
ret = UACPI_STATUS_HARDWARE_TIMEOUT;
}
if (pci_dev != UACPI_NULL)
uacpi_kernel_pci_device_close(pci_dev);
return ret;
}

711
src/uacpi/stdlib.c Normal file
View file

@ -0,0 +1,711 @@
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/platform/config.h>
#ifndef uacpi_memcpy
void *uacpi_memcpy(void *dest, const void *src, size_t count)
{
uacpi_char *cd = dest;
const uacpi_char *cs = src;
while (count--)
*cd++ = *cs++;
return dest;
}
#endif
#ifndef uacpi_memmove
void *uacpi_memmove(void *dest, const void *src, uacpi_size count)
{
uacpi_char *cd = dest;
const uacpi_char *cs = src;
if (src < dest) {
cs += count;
cd += count;
while (count--)
*--cd = *--cs;
} else {
while (count--)
*cd++ = *cs++;
}
return dest;
}
#endif
#ifndef uacpi_memset
void *uacpi_memset(void *dest, uacpi_i32 ch, uacpi_size count)
{
uacpi_u8 fill = ch;
uacpi_u8 *cdest = dest;
while (count--)
*cdest++ = fill;
return dest;
}
#endif
#ifndef uacpi_memcmp
uacpi_i32 uacpi_memcmp(const void *lhs, const void *rhs, uacpi_size count)
{
const uacpi_u8 *byte_lhs = lhs;
const uacpi_u8 *byte_rhs = rhs;
uacpi_size i;
for (i = 0; i < count; ++i) {
if (byte_lhs[i] != byte_rhs[i])
return byte_lhs[i] - byte_rhs[i];
}
return 0;
}
#endif
#ifndef uacpi_strlen
uacpi_size uacpi_strlen(const uacpi_char *str)
{
const uacpi_char *str1;
for (str1 = str; *str1; str1++);
return str1 - str;
}
#endif
#ifndef uacpi_strnlen
uacpi_size uacpi_strnlen(const uacpi_char *str, uacpi_size max)
{
const uacpi_char *str1;
for (str1 = str; max-- && *str1; str1++);
return str1 - str;
}
#endif
#ifndef uacpi_strcmp
uacpi_i32 uacpi_strcmp(const uacpi_char *lhs, const uacpi_char *rhs)
{
uacpi_size i = 0;
typedef const uacpi_u8 *cucp;
while (lhs[i] && rhs[i]) {
if (lhs[i] != rhs[i])
return *(cucp)&lhs[i] - *(cucp)&rhs[i];
i++;
}
return *(cucp)&lhs[i] - *(cucp)&rhs[i];
}
#endif
#ifndef uacpi_vsnprintf
struct fmt_buf_state {
uacpi_char *buffer;
uacpi_size capacity;
uacpi_size bytes_written;
};
struct fmt_spec {
uacpi_u8 is_signed : 1;
uacpi_u8 prepend : 1;
uacpi_u8 uppercase : 1;
uacpi_u8 left_justify : 1;
uacpi_u8 alternate_form : 1;
uacpi_u8 has_precision : 1;
uacpi_char pad_char;
uacpi_char prepend_char;
uacpi_u64 min_width;
uacpi_u64 precision;
uacpi_u32 base;
};
static void write_one(struct fmt_buf_state *fb_state, uacpi_char c)
{
if (fb_state->bytes_written < fb_state->capacity)
fb_state->buffer[fb_state->bytes_written] = c;
fb_state->bytes_written++;
}
static void write_many(
struct fmt_buf_state *fb_state, const uacpi_char *string, uacpi_size count
)
{
if (fb_state->bytes_written < fb_state->capacity) {
uacpi_size count_to_write;
count_to_write = UACPI_MIN(
count, fb_state->capacity - fb_state->bytes_written
);
uacpi_memcpy(
&fb_state->buffer[fb_state->bytes_written], string, count_to_write
);
}
fb_state->bytes_written += count;
}
static uacpi_char hex_char(uacpi_bool upper, uacpi_u64 value)
{
static const uacpi_char upper_hex[] = "0123456789ABCDEF";
static const uacpi_char lower_hex[] = "0123456789abcdef";
return (upper ? upper_hex : lower_hex)[value];
}
static void write_padding(
struct fmt_buf_state *fb_state, struct fmt_spec *fm, uacpi_size repr_size
)
{
uacpi_u64 mw = fm->min_width;
if (mw <= repr_size)
return;
mw -= repr_size;
while (mw--)
write_one(fb_state, fm->left_justify ? ' ' : fm->pad_char);
}
#define REPR_BUFFER_SIZE 32
static void write_integer(
struct fmt_buf_state *fb_state, struct fmt_spec *fm, uacpi_u64 value
)
{
uacpi_char repr_buffer[REPR_BUFFER_SIZE];
uacpi_size index = REPR_BUFFER_SIZE;
uacpi_u64 remainder;
uacpi_char repr;
uacpi_bool negative = UACPI_FALSE;
uacpi_size repr_size;
if (fm->is_signed) {
uacpi_i64 as_ll = value;
if (as_ll < 0) {
value = -as_ll;
negative = UACPI_TRUE;
}
}
if (fm->prepend || negative)
write_one(fb_state, negative ? '-' : fm->prepend_char);
while (value) {
remainder = value % fm->base;
value /= fm->base;
if (fm->base == 16) {
repr = hex_char(fm->uppercase, remainder);
} else if (fm->base == 8 || fm->base == 10) {
repr = remainder + '0';
} else {
repr = '?';
}
repr_buffer[--index] = repr;
}
repr_size = REPR_BUFFER_SIZE - index;
if (repr_size == 0) {
repr_buffer[--index] = '0';
repr_size = 1;
}
if (fm->alternate_form) {
if (fm->base == 16) {
repr_buffer[--index] = fm->uppercase ? 'X' : 'x';
repr_buffer[--index] = '0';
repr_size += 2;
} else if (fm->base == 8) {
repr_buffer[--index] = '0';
repr_size += 1;
}
}
if (fm->left_justify) {
write_many(fb_state, &repr_buffer[index], repr_size);
write_padding(fb_state, fm, repr_size);
} else {
write_padding(fb_state, fm, repr_size);
write_many(fb_state, &repr_buffer[index], repr_size);
}
}
static uacpi_bool string_has_at_least(
const uacpi_char *string, uacpi_size characters
)
{
while (*string) {
if (--characters == 0)
return UACPI_TRUE;
string++;
}
return UACPI_FALSE;
}
static uacpi_bool consume_digits(
const uacpi_char **string, uacpi_size *out_size
)
{
uacpi_size size = 0;
for (;;) {
char c = **string;
if (c < '0' || c > '9')
break;
size++;
*string += 1;
}
if (size == 0)
return UACPI_FALSE;
*out_size = size;
return UACPI_TRUE;
}
enum parse_number_mode {
PARSE_NUMBER_MODE_MAYBE,
PARSE_NUMBER_MODE_MUST,
};
static uacpi_bool parse_number(
const uacpi_char **fmt, enum parse_number_mode mode, uacpi_u64 *out_value
)
{
uacpi_status ret;
uacpi_size num_digits;
const uacpi_char *digits = *fmt;
if (!consume_digits(fmt, &num_digits))
return mode != PARSE_NUMBER_MODE_MUST;
ret = uacpi_string_to_integer(digits, num_digits, UACPI_BASE_DEC, out_value);
return ret == UACPI_STATUS_OK;
}
static uacpi_bool consume(const uacpi_char **string, const uacpi_char *token)
{
uacpi_size token_size;
token_size = uacpi_strlen(token);
if (!string_has_at_least(*string, token_size))
return UACPI_FALSE;
if (!uacpi_memcmp(*string, token, token_size)) {
*string += token_size;
return UACPI_TRUE;
}
return UACPI_FALSE;
}
static uacpi_bool is_one_of(uacpi_char c, const uacpi_char *list)
{
for (; *list; list++) {
if (c == *list)
return UACPI_TRUE;
}
return UACPI_FALSE;
}
static uacpi_bool consume_one_of(
const uacpi_char **string, const uacpi_char *list, uacpi_char *consumed_char
)
{
uacpi_char c = **string;
if (!c)
return UACPI_FALSE;
if (is_one_of(c, list)) {
*consumed_char = c;
*string += 1;
return UACPI_TRUE;
}
return UACPI_FALSE;
}
static uacpi_u32 base_from_specifier(uacpi_char specifier)
{
switch (specifier)
{
case 'x':
case 'X':
return 16;
case 'o':
return 8;
default:
return 10;
}
}
static uacpi_bool is_uppercase_specifier(uacpi_char specifier)
{
return specifier == 'X';
}
static const uacpi_char *find_next_conversion(
const uacpi_char *fmt, uacpi_size *offset
)
{
*offset = 0;
while (*fmt) {
if (*fmt == '%')
return fmt;
fmt++;
*offset += 1;
}
return UACPI_NULL;
}
uacpi_i32 uacpi_vsnprintf(
uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt,
uacpi_va_list vlist
)
{
struct fmt_buf_state fb_state = {
.buffer = buffer,
.capacity = capacity,
.bytes_written = 0
};
uacpi_u64 value;
const uacpi_char *next_conversion;
uacpi_size next_offset;
uacpi_char flag;
while (*fmt) {
struct fmt_spec fm = {
.pad_char = ' ',
.base = 10,
};
next_conversion = find_next_conversion(fmt, &next_offset);
if (next_offset)
write_many(&fb_state, fmt, next_offset);
if (!next_conversion)
break;
fmt = next_conversion;
if (consume(&fmt, "%%")) {
write_one(&fb_state, '%');
continue;
}
// consume %
fmt++;
while (consume_one_of(&fmt, "+- 0#", &flag)) {
switch (flag) {
case '+':
case ' ':
fm.prepend = UACPI_TRUE;
fm.prepend_char = flag;
continue;
case '-':
fm.left_justify = UACPI_TRUE;
continue;
case '0':
fm.pad_char = '0';
continue;
case '#':
fm.alternate_form = UACPI_TRUE;
continue;
default:
return -1;
}
}
if (consume(&fmt, "*")) {
fm.min_width = uacpi_va_arg(vlist, int);
} else if (!parse_number(&fmt, PARSE_NUMBER_MODE_MAYBE, &fm.min_width)) {
return -1;
}
if (consume(&fmt, ".")) {
fm.has_precision = UACPI_TRUE;
if (consume(&fmt, "*")) {
fm.precision = uacpi_va_arg(vlist, int);
} else {
if (!parse_number(&fmt, PARSE_NUMBER_MODE_MUST, &fm.precision))
return -1;
}
}
flag = 0;
if (consume(&fmt, "c")) {
uacpi_char c = uacpi_va_arg(vlist, int);
write_one(&fb_state, c);
continue;
}
if (consume(&fmt, "s")) {
const uacpi_char *string = uacpi_va_arg(vlist, uacpi_char*);
uacpi_size i;
for (i = 0; (!fm.has_precision || i < fm.precision) && string[i]; ++i)
write_one(&fb_state, string[i]);
while (fm.has_precision && (i++ < fm.precision))
write_one(&fb_state, fm.pad_char);
continue;
}
if (consume(&fmt, "p")) {
value = (uacpi_uintptr)uacpi_va_arg(vlist, void*);
fm.base = 16;
fm.min_width = UACPI_POINTER_SIZE * 2;
fm.pad_char = '0';
goto write_int;
}
if (consume(&fmt, "hh")) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = (signed char)uacpi_va_arg(vlist, int);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = (unsigned char)uacpi_va_arg(vlist, int);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "h")) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = (signed short)uacpi_va_arg(vlist, int);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = (unsigned short)uacpi_va_arg(vlist, int);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "ll") ||
(sizeof(uacpi_size) == sizeof(long long) && consume(&fmt, "z"))) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = uacpi_va_arg(vlist, long long);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = uacpi_va_arg(vlist, unsigned long long);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "l") ||
(sizeof(uacpi_size) == sizeof(long) && consume(&fmt, "z"))) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = uacpi_va_arg(vlist, long);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = uacpi_va_arg(vlist, unsigned long);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = uacpi_va_arg(vlist, uacpi_i32);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = uacpi_va_arg(vlist, uacpi_u32);
} else {
return -1;
}
write_int:
if (flag != 0) {
fm.base = base_from_specifier(flag);
fm.uppercase = is_uppercase_specifier(flag);
}
write_integer(&fb_state, &fm, value);
}
if (fb_state.capacity) {
uacpi_size last_char;
last_char = UACPI_MIN(fb_state.bytes_written, fb_state.capacity - 1);
fb_state.buffer[last_char] = '\0';
}
return fb_state.bytes_written;
}
#endif
#ifndef uacpi_snprintf
uacpi_i32 uacpi_snprintf(
uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, ...
)
{
uacpi_va_list vlist;
uacpi_i32 ret;
uacpi_va_start(vlist, fmt);
ret = uacpi_vsnprintf(buffer, capacity, fmt, vlist);
uacpi_va_end(vlist);
return ret;
}
#endif
void uacpi_memcpy_zerout(void *dst, const void *src,
uacpi_size dst_size, uacpi_size src_size)
{
uacpi_size bytes_to_copy = UACPI_MIN(src_size, dst_size);
if (bytes_to_copy)
uacpi_memcpy(dst, src, bytes_to_copy);
if (dst_size > bytes_to_copy)
uacpi_memzero((uacpi_u8*)dst + bytes_to_copy, dst_size - bytes_to_copy);
}
uacpi_u8 uacpi_bit_scan_forward(uacpi_u64 value)
{
#ifdef _MSC_VER
unsigned char ret;
unsigned long index;
#ifdef _WIN64
ret = _BitScanForward64(&index, value);
if (ret == 0)
return 0;
return (uacpi_u8)index + 1;
#else
ret = _BitScanForward(&index, value);
if (ret == 0) {
ret = _BitScanForward(&index, value >> 32);
if (ret == 0)
return 0;
return (uacpi_u8)index + 33;
}
return (uacpi_u8)index + 1;
#endif
#else
return __builtin_ffsll(value);
#endif
}
uacpi_u8 uacpi_bit_scan_backward(uacpi_u64 value)
{
#ifdef _MSC_VER
unsigned char ret;
unsigned long index;
#ifdef _WIN64
ret = _BitScanReverse64(&index, value);
if (ret == 0)
return 0;
return (uacpi_u8)index + 1;
#else
ret = _BitScanReverse(&index, value >> 32);
if (ret == 0) {
ret = _BitScanReverse(&index, value);
if (ret == 0)
return 0;
return (uacpi_u8)index + 1;
}
return (uacpi_u8)index + 33;
#endif
#else
if (value == 0)
return 0;
return 64 - __builtin_clzll(value);
#endif
}
uacpi_u8 uacpi_popcount(uacpi_u64 value)
{
#ifdef _MSC_VER
#ifdef _WIN64
return __popcnt64(value);
#else
return __popcnt(value) + __popcnt(value >> 32);
#endif
#else
return __builtin_popcountll(value);
#endif
}
#ifndef UACPI_FORMATTED_LOGGING
void uacpi_log(uacpi_log_level lvl, const uacpi_char *str, ...)
{
uacpi_char buf[UACPI_PLAIN_LOG_BUFFER_SIZE];
int ret;
uacpi_va_list vlist;
uacpi_va_start(vlist, str);
ret = uacpi_vsnprintf(buf, sizeof(buf), str, vlist);
if (uacpi_unlikely(ret < 0))
return;
/*
* If this log message is too large for the configured buffer size, cut off
* the end and transform into "...\n" to indicate that it didn't fit and
* prevent the newline from being truncated.
*/
if (uacpi_unlikely(ret >= UACPI_PLAIN_LOG_BUFFER_SIZE)) {
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 5] = '.';
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 4] = '.';
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 3] = '.';
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 2] = '\n';
}
uacpi_kernel_log(lvl, buf);
uacpi_va_end(vlist);
}
#endif
#ifndef UACPI_NATIVE_ALLOC_ZEROED
void *uacpi_builtin_alloc_zeroed(uacpi_size size)
{
void *ptr;
ptr = uacpi_kernel_alloc(size);
if (uacpi_unlikely(ptr == UACPI_NULL))
return ptr;
uacpi_memzero(ptr, size);
return ptr;
}
#endif

1350
src/uacpi/tables.c Normal file

File diff suppressed because it is too large Load diff

1460
src/uacpi/types.c Normal file

File diff suppressed because it is too large Load diff

944
src/uacpi/uacpi.c Normal file
View file

@ -0,0 +1,944 @@
#include <uacpi/uacpi.h>
#include <uacpi/acpi.h>
#include <uacpi/platform/config.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/context.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/tables.h>
#include <uacpi/internal/interpreter.h>
#include <uacpi/internal/namespace.h>
#include <uacpi/internal/opregion.h>
#include <uacpi/internal/registers.h>
#include <uacpi/internal/event.h>
#include <uacpi/internal/notify.h>
#include <uacpi/internal/osi.h>
#include <uacpi/internal/registers.h>
struct uacpi_runtime_context g_uacpi_rt_ctx = { 0 };
void uacpi_context_set_log_level(uacpi_log_level lvl)
{
if (lvl == 0)
lvl = UACPI_DEFAULT_LOG_LEVEL;
g_uacpi_rt_ctx.log_level = lvl;
}
void uacpi_logger_initialize(void)
{
if (g_uacpi_rt_ctx.log_level != 0)
return;
uacpi_context_set_log_level(UACPI_DEFAULT_LOG_LEVEL);
}
void uacpi_context_set_loop_timeout(uacpi_u32 seconds)
{
if (seconds == 0)
seconds = UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS;
g_uacpi_rt_ctx.loop_timeout_seconds = seconds;
}
void uacpi_context_set_max_call_stack_depth(uacpi_u32 depth)
{
if (depth == 0)
depth = UACPI_DEFAULT_MAX_CALL_STACK_DEPTH;
g_uacpi_rt_ctx.max_call_stack_depth = depth;
}
uacpi_u32 uacpi_context_get_loop_timeout(void)
{
return g_uacpi_rt_ctx.loop_timeout_seconds;
}
void uacpi_context_set_proactive_table_checksum(uacpi_bool setting)
{
if (setting)
g_uacpi_rt_ctx.flags |= UACPI_FLAG_PROACTIVE_TBL_CSUM;
else
g_uacpi_rt_ctx.flags &= ~UACPI_FLAG_PROACTIVE_TBL_CSUM;
}
const uacpi_char *uacpi_status_to_string(uacpi_status st)
{
switch (st) {
case UACPI_STATUS_OK:
return "no error";
case UACPI_STATUS_MAPPING_FAILED:
return "failed to map memory";
case UACPI_STATUS_OUT_OF_MEMORY:
return "out of memory";
case UACPI_STATUS_BAD_CHECKSUM:
return "bad table checksum";
case UACPI_STATUS_INVALID_SIGNATURE:
return "invalid table signature";
case UACPI_STATUS_INVALID_TABLE_LENGTH:
return "invalid table length";
case UACPI_STATUS_NOT_FOUND:
return "not found";
case UACPI_STATUS_INVALID_ARGUMENT:
return "invalid argument";
case UACPI_STATUS_UNIMPLEMENTED:
return "unimplemented";
case UACPI_STATUS_ALREADY_EXISTS:
return "already exists";
case UACPI_STATUS_INTERNAL_ERROR:
return "internal error";
case UACPI_STATUS_TYPE_MISMATCH:
return "object type mismatch";
case UACPI_STATUS_INIT_LEVEL_MISMATCH:
return "init level too low/high for this action";
case UACPI_STATUS_NAMESPACE_NODE_DANGLING:
return "attempting to use a dangling namespace node";
case UACPI_STATUS_NO_HANDLER:
return "no handler found";
case UACPI_STATUS_NO_RESOURCE_END_TAG:
return "resource template without an end tag";
case UACPI_STATUS_COMPILED_OUT:
return "this functionality has been compiled out of this build";
case UACPI_STATUS_HARDWARE_TIMEOUT:
return "timed out waiting for hardware response";
case UACPI_STATUS_TIMEOUT:
return "wait timed out";
case UACPI_STATUS_OVERRIDDEN:
return "the requested action has been overridden";
case UACPI_STATUS_DENIED:
return "the requested action has been denied";
case UACPI_STATUS_AML_UNDEFINED_REFERENCE:
return "AML referenced an undefined object";
case UACPI_STATUS_AML_INVALID_NAMESTRING:
return "invalid AML name string";
case UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS:
return "object already exists";
case UACPI_STATUS_AML_INVALID_OPCODE:
return "invalid AML opcode";
case UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE:
return "incompatible AML object type";
case UACPI_STATUS_AML_BAD_ENCODING:
return "bad AML instruction encoding";
case UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX:
return "out of bounds AML index";
case UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH:
return "AML attempted to acquire a mutex with a lower sync level";
case UACPI_STATUS_AML_INVALID_RESOURCE:
return "invalid resource template encoding or type";
case UACPI_STATUS_AML_LOOP_TIMEOUT:
return "hanging AML while loop";
case UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT:
return "reached maximum AML call stack depth";
default:
return "<invalid status>";
}
}
#ifndef UACPI_REDUCED_HARDWARE
enum hw_mode {
HW_MODE_ACPI = 0,
HW_MODE_LEGACY = 1,
};
static enum hw_mode read_mode(void)
{
uacpi_status ret;
uacpi_u64 raw_value;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
if (!fadt->smi_cmd)
return HW_MODE_ACPI;
ret = uacpi_read_register_field(UACPI_REGISTER_FIELD_SCI_EN, &raw_value);
if (uacpi_unlikely_error(ret))
return HW_MODE_LEGACY;
return raw_value ? HW_MODE_ACPI : HW_MODE_LEGACY;
}
static uacpi_status set_mode(enum hw_mode mode)
{
uacpi_status ret;
uacpi_u64 raw_value, stalled_time = 0;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
if (uacpi_unlikely(!fadt->smi_cmd)) {
uacpi_error("SMI_CMD is not implemented by the firmware\n");
return UACPI_STATUS_NOT_FOUND;
}
if (uacpi_unlikely(!fadt->acpi_enable && !fadt->acpi_disable)) {
uacpi_error("mode transition is not implemented by the hardware\n");
return UACPI_STATUS_NOT_FOUND;
}
switch (mode) {
case HW_MODE_ACPI:
raw_value = fadt->acpi_enable;
break;
case HW_MODE_LEGACY:
raw_value = fadt->acpi_disable;
break;
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
ret = uacpi_write_register(UACPI_REGISTER_SMI_CMD, raw_value);
if (uacpi_unlikely_error(ret))
return ret;
// Allow up to 5 seconds for the hardware to enter the desired mode
while (stalled_time < (5 * 1000 * 1000)) {
if (read_mode() == mode)
return UACPI_STATUS_OK;
uacpi_kernel_stall(100);
stalled_time += 100;
}
uacpi_error("hardware time out while changing modes\n");
return UACPI_STATUS_HARDWARE_TIMEOUT;
}
static uacpi_status enter_mode(enum hw_mode mode, uacpi_bool *did_change)
{
uacpi_status ret;
const uacpi_char *mode_str;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_is_hardware_reduced())
return UACPI_STATUS_OK;
mode_str = mode == HW_MODE_LEGACY ? "legacy" : "acpi";
if (read_mode() == mode) {
uacpi_trace("%s mode already enabled\n", mode_str);
return UACPI_STATUS_OK;
}
ret = set_mode(mode);
if (uacpi_unlikely_error(ret)) {
uacpi_warn(
"unable to enter %s mode: %s\n",
mode_str, uacpi_status_to_string(ret)
);
return ret;
}
uacpi_trace("entered %s mode\n", mode_str);
if (did_change != UACPI_NULL)
*did_change = UACPI_TRUE;
return ret;
}
uacpi_status uacpi_enter_acpi_mode(void)
{
return enter_mode(HW_MODE_ACPI, UACPI_NULL);
}
uacpi_status uacpi_leave_acpi_mode(void)
{
return enter_mode(HW_MODE_LEGACY, UACPI_NULL);
}
static void enter_acpi_mode_initial(void)
{
enter_mode(HW_MODE_ACPI, &g_uacpi_rt_ctx.was_in_legacy_mode);
}
#else
static void enter_acpi_mode_initial(void) { }
#endif
uacpi_init_level uacpi_get_current_init_level(void)
{
return g_uacpi_rt_ctx.init_level;
}
void uacpi_state_reset(void)
{
uacpi_deinitialize_namespace();
uacpi_deinitialize_interfaces();
uacpi_deinitialize_events();
uacpi_deinitialize_notify();
uacpi_deinitialize_opregion();
uacpi_deininitialize_registers();
uacpi_deinitialize_tables();
#ifndef UACPI_REDUCED_HARDWARE
if (g_uacpi_rt_ctx.was_in_legacy_mode)
uacpi_leave_acpi_mode();
if (g_uacpi_rt_ctx.global_lock_event)
uacpi_kernel_free_event(g_uacpi_rt_ctx.global_lock_event);
if (g_uacpi_rt_ctx.global_lock_spinlock)
uacpi_kernel_free_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
#endif
uacpi_memzero(&g_uacpi_rt_ctx, sizeof(g_uacpi_rt_ctx));
#ifdef UACPI_KERNEL_INITIALIZATION
uacpi_kernel_deinitialize();
#endif
}
uacpi_status uacpi_initialize(uacpi_u64 flags)
{
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_EARLY);
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_EARLY);
if (uacpi_unlikely_error(ret))
return ret;
#endif
g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED;
g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.s0_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.s0_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.flags = flags;
uacpi_logger_initialize();
if (g_uacpi_rt_ctx.loop_timeout_seconds == 0)
uacpi_context_set_loop_timeout(UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS);
if (g_uacpi_rt_ctx.max_call_stack_depth == 0)
uacpi_context_set_max_call_stack_depth(UACPI_DEFAULT_MAX_CALL_STACK_DEPTH);
ret = uacpi_initialize_tables();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_ininitialize_registers();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_initialize_events_early();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_opregion();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_interfaces();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_namespace();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_notify();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
uacpi_install_default_address_space_handlers();
if (!uacpi_check_flag(UACPI_FLAG_NO_ACPI_MODE))
enter_acpi_mode_initial();
return UACPI_STATUS_OK;
out_fatal_error:
uacpi_state_reset();
return ret;
}
struct table_load_stats {
uacpi_u32 load_counter;
uacpi_u32 failure_counter;
};
static void trace_table_load_failure(
struct acpi_sdt_hdr *tbl, uacpi_log_level lvl, uacpi_status ret
)
{
uacpi_log_lvl(
lvl,
"failed to load "UACPI_PRI_TBL_HDR": %s\n",
UACPI_FMT_TBL_HDR(tbl), uacpi_status_to_string(ret)
);
}
static uacpi_bool match_ssdt_or_psdt(struct uacpi_installed_table *tbl)
{
if (tbl->flags & UACPI_TABLE_LOADED)
return UACPI_FALSE;
return uacpi_signatures_match(tbl->hdr.signature, ACPI_SSDT_SIGNATURE) ||
uacpi_signatures_match(tbl->hdr.signature, ACPI_PSDT_SIGNATURE);
}
static uacpi_u64 elapsed_ms(uacpi_u64 begin_ns, uacpi_u64 end_ns)
{
return (end_ns - begin_ns) / (1000ull * 1000ull);
}
uacpi_status uacpi_namespace_load(void)
{
struct uacpi_table tbl;
uacpi_status ret;
uacpi_u64 begin_ts, end_ts;
struct table_load_stats st = { 0 };
uacpi_size cur_index;
UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
#endif
begin_ts = uacpi_kernel_get_nanoseconds_since_boot();
ret = uacpi_table_find_by_signature(ACPI_DSDT_SIGNATURE, &tbl);
if (uacpi_unlikely_error(ret)) {
uacpi_error("unable to find DSDT: %s\n", uacpi_status_to_string(ret));
goto out_fatal_error;
}
ret = uacpi_table_load_with_cause(tbl.index, UACPI_TABLE_LOAD_CAUSE_INIT);
if (uacpi_unlikely_error(ret)) {
trace_table_load_failure(tbl.hdr, UACPI_LOG_ERROR, ret);
st.failure_counter++;
}
st.load_counter++;
uacpi_table_unref(&tbl);
for (cur_index = 0;; cur_index = tbl.index + 1) {
ret = uacpi_table_match(cur_index, match_ssdt_or_psdt, &tbl);
if (ret != UACPI_STATUS_OK) {
if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND))
goto out_fatal_error;
break;
}
ret = uacpi_table_load_with_cause(tbl.index, UACPI_TABLE_LOAD_CAUSE_INIT);
if (uacpi_unlikely_error(ret)) {
trace_table_load_failure(tbl.hdr, UACPI_LOG_WARN, ret);
st.failure_counter++;
}
st.load_counter++;
uacpi_table_unref(&tbl);
}
end_ts = uacpi_kernel_get_nanoseconds_since_boot();
if (uacpi_unlikely(st.failure_counter != 0)) {
uacpi_info(
"loaded %u AML blob%s in %"UACPI_PRIu64"ms (%u error%s)\n",
st.load_counter, st.load_counter > 1 ? "s" : "",
UACPI_FMT64(elapsed_ms(begin_ts, end_ts)), st.failure_counter,
st.failure_counter > 1 ? "s" : ""
);
} else {
uacpi_u64 ops = g_uacpi_rt_ctx.opcodes_executed;
uacpi_u64 ops_per_sec = ops * UACPI_NANOSECONDS_PER_SEC;
if (uacpi_likely(end_ts > begin_ts))
ops_per_sec /= end_ts - begin_ts;
uacpi_info(
"successfully loaded %u AML blob%s, %"UACPI_PRIu64" ops in "
"%"UACPI_PRIu64"ms (avg %"UACPI_PRIu64"/s)\n",
st.load_counter, st.load_counter > 1 ? "s" : "",
UACPI_FMT64(ops), UACPI_FMT64(elapsed_ms(begin_ts, end_ts)),
UACPI_FMT64(ops_per_sec)
);
}
ret = uacpi_initialize_events();
if (uacpi_unlikely_error(ret)) {
uacpi_error("event initialization failed: %s\n",
uacpi_status_to_string(ret));
goto out_fatal_error;
}
g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_LOADED;
return UACPI_STATUS_OK;
out_fatal_error:
uacpi_state_reset();
return ret;
}
struct ns_init_context {
uacpi_size ini_executed;
uacpi_size ini_errors;
uacpi_size sta_executed;
uacpi_size sta_errors;
uacpi_size devices;
uacpi_size thermal_zones;
};
static void ini_eval(struct ns_init_context *ctx, uacpi_namespace_node *node)
{
uacpi_status ret;
ret = uacpi_eval(node, "_INI", UACPI_NULL, UACPI_NULL);
if (ret == UACPI_STATUS_NOT_FOUND)
return;
ctx->ini_executed++;
if (uacpi_unlikely_error(ret))
ctx->ini_errors++;
}
static uacpi_status sta_eval(
struct ns_init_context *ctx, uacpi_namespace_node *node,
uacpi_u32 *value
)
{
uacpi_status ret;
ret = uacpi_eval_sta(node, value);
if (*value == 0xFFFFFFFF)
return ret;
ctx->sta_executed++;
if (uacpi_unlikely_error(ret))
ctx->sta_errors++;
return ret;
}
static uacpi_iteration_decision do_sta_ini(
void *opaque, uacpi_namespace_node *node, uacpi_u32 depth
)
{
struct ns_init_context *ctx = opaque;
uacpi_status ret;
uacpi_object_type type = UACPI_OBJECT_UNINITIALIZED;
uacpi_u32 sta_ret;
UACPI_UNUSED(depth);
// We don't care about aliases
if (uacpi_namespace_node_is_alias(node))
return UACPI_ITERATION_DECISION_NEXT_PEER;
ret = uacpi_namespace_node_type(node, &type);
switch (type) {
case UACPI_OBJECT_DEVICE:
case UACPI_OBJECT_PROCESSOR:
ctx->devices++;
break;
case UACPI_OBJECT_THERMAL_ZONE:
ctx->thermal_zones++;
break;
default:
if (node != uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_TZ))
return UACPI_ITERATION_DECISION_CONTINUE;
}
ret = sta_eval(ctx, node, &sta_ret);
if (uacpi_unlikely_error(ret))
return UACPI_ITERATION_DECISION_CONTINUE;
if (!(sta_ret & ACPI_STA_RESULT_DEVICE_PRESENT)) {
if (!(sta_ret & ACPI_STA_RESULT_DEVICE_FUNCTIONING))
return UACPI_ITERATION_DECISION_NEXT_PEER;
/*
* ACPI 6.5 specification:
* _STA may return bit 0 clear (not present) with bit [3] set (device
* is functional). This case is used to indicate a valid device for
* which no device driver should be loaded (for example, a bridge
* device.) Children of this device may be present and valid. OSPM
* should continue enumeration below a device whose _STA returns this
* bit combination.
*/
return UACPI_ITERATION_DECISION_CONTINUE;
}
ini_eval(ctx, node);
return UACPI_ITERATION_DECISION_CONTINUE;
}
uacpi_status uacpi_namespace_initialize(void)
{
struct ns_init_context ctx = { 0 };
uacpi_namespace_node *root;
uacpi_u64 begin_ts, end_ts;
uacpi_address_space_handlers *handlers;
uacpi_address_space_handler *handler;
uacpi_status ret = UACPI_STATUS_OK;
UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
if (uacpi_unlikely_error(ret))
goto out;
#endif
/*
* Initialization order here is identical to ACPICA because ACPI
* specification doesn't really have any detailed steps that explain
* how to do it.
*/
root = uacpi_namespace_root();
begin_ts = uacpi_kernel_get_nanoseconds_since_boot();
// Step 1 - Execute \_INI
ini_eval(&ctx, root);
// Step 2 - Execute \_SB._INI
ini_eval(
&ctx, uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SB)
);
/*
* Step 3 - Run _REG methods for all globally installed
* address space handlers.
*/
handlers = uacpi_node_get_address_space_handlers(root);
if (handlers) {
handler = handlers->head;
while (handler) {
if (uacpi_address_space_handler_is_default(handler))
uacpi_reg_all_opregions(root, handler->space);
handler = handler->next;
}
}
// Step 4 - Run all other _STA and _INI methods
uacpi_namespace_for_each_child(
root, do_sta_ini, UACPI_NULL,
UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, &ctx
);
end_ts = uacpi_kernel_get_nanoseconds_since_boot();
uacpi_info(
"namespace initialization done in %"UACPI_PRIu64"ms: "
"%zu devices, %zu thermal zones\n",
UACPI_FMT64(elapsed_ms(begin_ts, end_ts)),
ctx.devices, ctx.thermal_zones
);
uacpi_trace(
"_STA calls: %zu (%zu errors), _INI calls: %zu (%zu errors)\n",
ctx.sta_executed, ctx.sta_errors, ctx.ini_executed,
ctx.ini_errors
);
g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED;
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
out:
if (uacpi_unlikely_error(ret))
uacpi_state_reset();
#endif
return ret;
}
uacpi_status uacpi_eval(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **out_obj
)
{
struct uacpi_namespace_node *node;
uacpi_control_method *method;
uacpi_object *obj;
uacpi_status ret = UACPI_STATUS_INVALID_ARGUMENT;
if (uacpi_unlikely(parent == UACPI_NULL && path == UACPI_NULL))
return ret;
ret = uacpi_namespace_read_lock();
if (uacpi_unlikely_error(ret))
return ret;
if (path != UACPI_NULL) {
ret = uacpi_namespace_node_resolve(
parent, path, UACPI_SHOULD_LOCK_NO,
UACPI_MAY_SEARCH_ABOVE_PARENT_NO, UACPI_PERMANENT_ONLY_YES,
&node
);
if (uacpi_unlikely_error(ret))
goto out_read_unlock;
} else {
node = parent;
}
obj = uacpi_namespace_node_get_object(node);
if (uacpi_unlikely(obj == UACPI_NULL)) {
ret = UACPI_STATUS_INVALID_ARGUMENT;
goto out_read_unlock;
}
if (obj->type != UACPI_OBJECT_METHOD) {
uacpi_object *new_obj;
if (uacpi_unlikely(out_obj == UACPI_NULL))
goto out_read_unlock;
new_obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
if (uacpi_unlikely(new_obj == UACPI_NULL)) {
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out_read_unlock;
}
ret = uacpi_object_assign(
new_obj, obj, UACPI_ASSIGN_BEHAVIOR_DEEP_COPY
);
if (uacpi_unlikely_error(ret)) {
uacpi_object_unref(new_obj);
goto out_read_unlock;
}
*out_obj = new_obj;
out_read_unlock:
uacpi_namespace_read_unlock();
return ret;
}
method = obj->method;
uacpi_shareable_ref(method);
uacpi_namespace_read_unlock();
// Upgrade to a write-lock since we're about to run a method
ret = uacpi_namespace_write_lock();
if (uacpi_unlikely_error(ret))
goto out_no_write_lock;
ret = uacpi_execute_control_method(node, method, args, out_obj);
uacpi_namespace_write_unlock();
out_no_write_lock:
uacpi_method_unref(method);
return ret;
}
uacpi_status uacpi_eval_simple(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval(parent, path, UACPI_NULL, ret);
}
uacpi_status uacpi_execute(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args
)
{
return uacpi_eval(parent, path, args, UACPI_NULL);
}
uacpi_status uacpi_execute_simple(
uacpi_namespace_node *parent, const uacpi_char *path
)
{
return uacpi_eval(parent, path, UACPI_NULL, UACPI_NULL);
}
#define TRACE_BAD_RET(path_fmt, type, ...) \
uacpi_warn( \
"unexpected '%s' object returned by method "path_fmt \
", expected type mask: %08X\n", uacpi_object_type_to_string(type), \
__VA_ARGS__ \
)
#define TRACE_NO_RET(path_fmt, ...) \
uacpi_warn( \
"no value returned from method "path_fmt", expected type mask: " \
"%08X\n", __VA_ARGS__ \
)
static void trace_invalid_return_type(
uacpi_namespace_node *parent, const uacpi_char *path,
uacpi_object_type_bits expected_mask, uacpi_object_type actual_type
)
{
const uacpi_char *abs_path;
uacpi_bool dynamic_abs_path = UACPI_FALSE;
if (parent == UACPI_NULL || (path != UACPI_NULL && path[0] == '\\')) {
abs_path = path;
} else {
abs_path = uacpi_namespace_node_generate_absolute_path(parent);
dynamic_abs_path = UACPI_TRUE;
}
if (dynamic_abs_path && path != UACPI_NULL) {
if (actual_type == UACPI_OBJECT_UNINITIALIZED)
TRACE_NO_RET("%s.%s", abs_path, path, expected_mask);
else
TRACE_BAD_RET("%s.%s", actual_type, abs_path, path, expected_mask);
} else {
if (actual_type == UACPI_OBJECT_UNINITIALIZED) {
TRACE_NO_RET("%s", abs_path, expected_mask);
} else {
TRACE_BAD_RET("%s", actual_type, abs_path, expected_mask);
}
}
if (dynamic_abs_path)
uacpi_free_dynamic_string(abs_path);
}
uacpi_status uacpi_eval_typed(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object_type_bits ret_mask,
uacpi_object **out_obj
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_object_type returned_type = UACPI_OBJECT_UNINITIALIZED;
if (uacpi_unlikely(out_obj == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_eval(parent, path, args, &obj);
if (uacpi_unlikely_error(ret))
return ret;
if (obj != UACPI_NULL)
returned_type = obj->type;
if (ret_mask && (ret_mask & (1 << returned_type)) == 0) {
trace_invalid_return_type(parent, path, ret_mask, returned_type);
uacpi_object_unref(obj);
return UACPI_STATUS_TYPE_MISMATCH;
}
*out_obj = obj;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_eval_simple_typed(
uacpi_namespace_node *parent, const uacpi_char *path,
uacpi_object_type_bits ret_mask, uacpi_object **ret
)
{
return uacpi_eval_typed(parent, path, UACPI_NULL, ret_mask, ret);
}
uacpi_status uacpi_eval_integer(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_u64 *out_value
)
{
uacpi_object *int_obj;
uacpi_status ret;
ret = uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_INTEGER_BIT, &int_obj
);
if (uacpi_unlikely_error(ret))
return ret;
*out_value = int_obj->integer;
uacpi_object_unref(int_obj);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_eval_simple_integer(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u64 *out_value
)
{
return uacpi_eval_integer(parent, path, UACPI_NULL, out_value);
}
uacpi_status uacpi_eval_buffer_or_string(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args,
UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT,
ret
);
}
uacpi_status uacpi_eval_simple_buffer_or_string(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL,
UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT,
ret
);
}
uacpi_status uacpi_eval_string(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_STRING_BIT, ret
);
}
uacpi_status uacpi_eval_simple_string(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL, UACPI_OBJECT_STRING_BIT, ret
);
}
uacpi_status uacpi_eval_buffer(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_BUFFER_BIT, ret
);
}
uacpi_status uacpi_eval_simple_buffer(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL, UACPI_OBJECT_BUFFER_BIT, ret
);
}
uacpi_status uacpi_eval_package(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_PACKAGE_BIT, ret
);
}
uacpi_status uacpi_eval_simple_package(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL, UACPI_OBJECT_PACKAGE_BIT, ret
);
}
uacpi_status uacpi_get_aml_bitness(uacpi_u8 *out_bitness)
{
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
*out_bitness = g_uacpi_rt_ctx.is_rev1 ? 32 : 64;
return UACPI_STATUS_OK;
}

1139
src/uacpi/utilities.c Normal file

File diff suppressed because it is too large Load diff