Integrate uACPI

This commit is contained in:
2025-08-17 18:37:57 +02:00
parent 069870cd0d
commit 92ccd189e7
166 changed files with 42104 additions and 33 deletions

View File

@ -0,0 +1,336 @@
#include <uacpi/internal/opregion.h>
#include <uacpi/internal/namespace.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/helpers.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/io.h>
#include <uacpi/kernel_api.h>
#include <uacpi/uacpi.h>
#ifndef UACPI_BAREBONES_MODE
#define PCI_ROOT_PNP_ID "PNP0A03"
#define PCI_EXPRESS_ROOT_PNP_ID "PNP0A08"
static uacpi_namespace_node *find_pci_root(uacpi_namespace_node *node)
{
static const uacpi_char *pci_root_ids[] = {
PCI_ROOT_PNP_ID,
PCI_EXPRESS_ROOT_PNP_ID,
UACPI_NULL
};
uacpi_namespace_node *parent = node->parent;
while (parent != uacpi_namespace_root()) {
if (uacpi_device_matches_pnp_id(parent, pci_root_ids)) {
uacpi_trace(
"found a PCI root node %.4s controlling region %.4s\n",
parent->name.text, node->name.text
);
return parent;
}
parent = parent->parent;
}
uacpi_trace_region_error(
node, "unable to find PCI root controlling",
UACPI_STATUS_NOT_FOUND
);
return node;
}
static uacpi_status pci_region_attach(uacpi_region_attach_data *data)
{
uacpi_namespace_node *node, *pci_root, *device;
uacpi_pci_address address = { 0 };
uacpi_u64 value;
uacpi_status ret;
node = data->region_node;
pci_root = find_pci_root(node);
/*
* Find the actual device object that is supposed to be controlling
* this operation region.
*/
device = node;
while (device) {
uacpi_object_type type;
ret = uacpi_namespace_node_type(device, &type);
if (uacpi_unlikely_error(ret))
return ret;
if (type == UACPI_OBJECT_DEVICE)
break;
device = device->parent;
}
if (uacpi_unlikely(device == UACPI_NULL)) {
ret = UACPI_STATUS_NOT_FOUND;
uacpi_trace_region_error(
node, "unable to find device responsible for", ret
);
return ret;
}
ret = uacpi_eval_simple_integer(device, "_ADR", &value);
if (ret == UACPI_STATUS_OK) {
address.function = (value >> 0) & 0xFF;
address.device = (value >> 16) & 0xFF;
}
ret = uacpi_eval_simple_integer(pci_root, "_SEG", &value);
if (ret == UACPI_STATUS_OK)
address.segment = value;
ret = uacpi_eval_simple_integer(pci_root, "_BBN", &value);
if (ret == UACPI_STATUS_OK)
address.bus = value;
uacpi_trace(
"detected PCI device %.4s@%04X:%02X:%02X:%01X\n",
device->name.text, address.segment, address.bus,
address.device, address.function
);
return uacpi_kernel_pci_device_open(address, &data->out_region_context);
}
static uacpi_status pci_region_detach(uacpi_region_detach_data *data)
{
uacpi_kernel_pci_device_close(data->region_context);
return UACPI_STATUS_OK;
}
static uacpi_status pci_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
uacpi_handle dev = data->region_context;
uacpi_u8 width;
uacpi_size offset;
offset = data->offset;
width = data->byte_width;
return op == UACPI_REGION_OP_READ ?
uacpi_pci_read(dev, offset, width, &data->value) :
uacpi_pci_write(dev, offset, width, data->value);
}
static uacpi_status handle_pci_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
return pci_region_attach(op_data);
case UACPI_REGION_OP_DETACH:
return pci_region_detach(op_data);
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return pci_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
struct memory_region_ctx {
uacpi_phys_addr phys;
uacpi_u8 *virt;
uacpi_size size;
};
static uacpi_status memory_region_attach(uacpi_region_attach_data *data)
{
struct memory_region_ctx *ctx;
uacpi_status ret = UACPI_STATUS_OK;
ctx = uacpi_kernel_alloc(sizeof(*ctx));
if (ctx == UACPI_NULL)
return UACPI_STATUS_OUT_OF_MEMORY;
ctx->size = data->generic_info.length;
// FIXME: this really shouldn't try to map everything at once
ctx->phys = data->generic_info.base;
ctx->virt = uacpi_kernel_map(ctx->phys, ctx->size);
if (uacpi_unlikely(ctx->virt == UACPI_NULL)) {
ret = UACPI_STATUS_MAPPING_FAILED;
uacpi_trace_region_error(data->region_node, "unable to map", ret);
uacpi_free(ctx, sizeof(*ctx));
goto out;
}
data->out_region_context = ctx;
out:
return ret;
}
static uacpi_status memory_region_detach(uacpi_region_detach_data *data)
{
struct memory_region_ctx *ctx = data->region_context;
uacpi_kernel_unmap(ctx->virt, ctx->size);
uacpi_free(ctx, sizeof(*ctx));
return UACPI_STATUS_OK;
}
struct io_region_ctx {
uacpi_io_addr base;
uacpi_handle handle;
};
static uacpi_status io_region_attach(uacpi_region_attach_data *data)
{
struct io_region_ctx *ctx;
uacpi_generic_region_info *info = &data->generic_info;
uacpi_status ret;
ctx = uacpi_kernel_alloc(sizeof(*ctx));
if (ctx == UACPI_NULL)
return UACPI_STATUS_OUT_OF_MEMORY;
ctx->base = info->base;
ret = uacpi_kernel_io_map(ctx->base, info->length, &ctx->handle);
if (uacpi_unlikely_error(ret)) {
uacpi_trace_region_error(
data->region_node, "unable to map an IO", ret
);
uacpi_free(ctx, sizeof(*ctx));
return ret;
}
data->out_region_context = ctx;
return ret;
}
static uacpi_status io_region_detach(uacpi_region_detach_data *data)
{
struct io_region_ctx *ctx = data->region_context;
uacpi_kernel_io_unmap(ctx->handle);
uacpi_free(ctx, sizeof(*ctx));
return UACPI_STATUS_OK;
}
static uacpi_status memory_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
struct memory_region_ctx *ctx = data->region_context;
uacpi_size offset;
offset = data->address - ctx->phys;
return op == UACPI_REGION_OP_READ ?
uacpi_system_memory_read(ctx->virt, offset, data->byte_width, &data->value) :
uacpi_system_memory_write(ctx->virt, offset, data->byte_width, data->value);
}
static uacpi_status handle_memory_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
return memory_region_attach(op_data);
case UACPI_REGION_OP_DETACH:
return memory_region_detach(op_data);
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return memory_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
static uacpi_status table_data_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
void *addr = UACPI_VIRT_ADDR_TO_PTR((uacpi_virt_addr)data->offset);
return op == UACPI_REGION_OP_READ ?
uacpi_system_memory_read(addr, 0, data->byte_width, &data->value) :
uacpi_system_memory_write(addr, 0, data->byte_width, data->value);
}
static uacpi_status handle_table_data_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
case UACPI_REGION_OP_DETACH:
return UACPI_STATUS_OK;
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return table_data_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
static uacpi_status io_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
struct io_region_ctx *ctx = data->region_context;
uacpi_u8 width;
uacpi_size offset;
offset = data->offset - ctx->base;
width = data->byte_width;
return op == UACPI_REGION_OP_READ ?
uacpi_system_io_read(ctx->handle, offset, width, &data->value) :
uacpi_system_io_write(ctx->handle, offset, width, data->value);
}
static uacpi_status handle_io_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
return io_region_attach(op_data);
case UACPI_REGION_OP_DETACH:
return io_region_detach(op_data);
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return io_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
void uacpi_install_default_address_space_handlers(void)
{
uacpi_namespace_node *root;
root = uacpi_namespace_root();
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_SYSTEM_MEMORY,
handle_memory_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_SYSTEM_IO,
handle_io_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_PCI_CONFIG,
handle_pci_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_TABLE_DATA,
handle_table_data_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
}
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
uacpi_add_sources(
tables.c
types.c
uacpi.c
utilities.c
interpreter.c
opcodes.c
namespace.c
stdlib.c
shareable.c
opregion.c
default_handlers.c
io.c
notify.c
sleep.c
registers.c
resources.c
event.c
mutex.c
osi.c
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,396 @@
#include <uacpi/platform/atomic.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/registers.h>
#include <uacpi/internal/context.h>
#include <uacpi/kernel_api.h>
#include <uacpi/internal/namespace.h>
#ifndef UACPI_BAREBONES_MODE
#ifndef UACPI_REDUCED_HARDWARE
#define GLOBAL_LOCK_PENDING (1 << 0)
#define GLOBAL_LOCK_OWNED_BIT 1
#define GLOBAL_LOCK_OWNED (1 << GLOBAL_LOCK_OWNED_BIT)
#define GLOBAL_LOCK_MASK 3u
static uacpi_bool try_acquire_global_lock_from_firmware(uacpi_u32 *lock)
{
uacpi_u32 value, new_value;
uacpi_bool was_owned;
value = *(volatile uacpi_u32*)lock;
do {
was_owned = (value & GLOBAL_LOCK_OWNED) >> GLOBAL_LOCK_OWNED_BIT;
// Clear both owned & pending bits.
new_value = value & ~GLOBAL_LOCK_MASK;
// Set owned unconditionally
new_value |= GLOBAL_LOCK_OWNED;
// Set pending iff the lock was owned at the time of reading
if (was_owned)
new_value |= GLOBAL_LOCK_PENDING;
} while (!uacpi_atomic_cmpxchg32(lock, &value, new_value));
return !was_owned;
}
static uacpi_bool do_release_global_lock_to_firmware(uacpi_u32 *lock)
{
uacpi_u32 value, new_value;
value = *(volatile uacpi_u32*)lock;
do {
new_value = value & ~GLOBAL_LOCK_MASK;
} while (!uacpi_atomic_cmpxchg32(lock, &value, new_value));
return value & GLOBAL_LOCK_PENDING;
}
static uacpi_status uacpi_acquire_global_lock_from_firmware(void)
{
uacpi_cpu_flags flags;
uacpi_u16 spins = 0;
uacpi_bool success;
if (!g_uacpi_rt_ctx.has_global_lock)
return UACPI_STATUS_OK;
flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
for (;;) {
spins++;
uacpi_trace(
"trying to acquire the global lock from firmware... (attempt %u)\n",
spins
);
success = try_acquire_global_lock_from_firmware(
&g_uacpi_rt_ctx.facs->global_lock
);
if (success)
break;
if (uacpi_unlikely(spins == 0xFFFF))
break;
g_uacpi_rt_ctx.global_lock_pending = UACPI_TRUE;
uacpi_trace(
"global lock is owned by firmware, waiting for a release "
"notification...\n"
);
uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
uacpi_kernel_wait_for_event(g_uacpi_rt_ctx.global_lock_event, 0xFFFF);
flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
}
g_uacpi_rt_ctx.global_lock_pending = UACPI_FALSE;
uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
if (uacpi_unlikely(!success)) {
uacpi_error("unable to acquire global lock after %u attempts\n", spins);
return UACPI_STATUS_HARDWARE_TIMEOUT;
}
uacpi_trace("global lock successfully acquired after %u attempt%s\n",
spins, spins > 1 ? "s" : "");
return UACPI_STATUS_OK;
}
static void uacpi_release_global_lock_to_firmware(void)
{
if (!g_uacpi_rt_ctx.has_global_lock)
return;
uacpi_trace("releasing the global lock to firmware...\n");
if (do_release_global_lock_to_firmware(&g_uacpi_rt_ctx.facs->global_lock)) {
uacpi_trace("notifying firmware of the global lock release since the "
"pending bit was set\n");
uacpi_write_register_field(UACPI_REGISTER_FIELD_GBL_RLS, 1);
}
}
#endif
UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_acquire_global_lock_from_firmware(void)
)
UACPI_STUB_IF_REDUCED_HARDWARE(
void uacpi_release_global_lock_to_firmware(void)
)
uacpi_status uacpi_acquire_native_mutex_with_timeout(
uacpi_handle mtx, uacpi_u16 timeout
)
{
uacpi_status ret;
if (uacpi_unlikely(mtx == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_kernel_acquire_mutex(mtx, timeout);
if (uacpi_likely_success(ret))
return ret;
if (uacpi_unlikely(ret != UACPI_STATUS_TIMEOUT || timeout == 0xFFFF)) {
uacpi_error(
"unexpected status %08X (%s) while acquiring %p (timeout=%04X)\n",
ret, uacpi_status_to_string(ret), mtx, timeout
);
}
return ret;
}
uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq)
{
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely(out_seq == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_acquire_native_mutex_with_timeout(
g_uacpi_rt_ctx.global_lock_mutex->handle, timeout
);
if (ret != UACPI_STATUS_OK)
return ret;
ret = uacpi_acquire_global_lock_from_firmware();
if (uacpi_unlikely_error(ret)) {
uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle);
return ret;
}
if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_seq_num == 0xFFFFFFFF))
g_uacpi_rt_ctx.global_lock_seq_num = 0;
*out_seq = g_uacpi_rt_ctx.global_lock_seq_num++;
g_uacpi_rt_ctx.global_lock_acquired = UACPI_TRUE;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_release_global_lock(uacpi_u32 seq)
{
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely(!g_uacpi_rt_ctx.global_lock_acquired ||
seq != g_uacpi_rt_ctx.global_lock_seq_num))
return UACPI_STATUS_INVALID_ARGUMENT;
g_uacpi_rt_ctx.global_lock_acquired = UACPI_FALSE;
uacpi_release_global_lock_to_firmware();
uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle);
return UACPI_STATUS_OK;
}
uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex *mutex)
{
uacpi_thread_id id;
id = UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner);
return id == uacpi_kernel_get_thread_id();
}
uacpi_status uacpi_acquire_aml_mutex(uacpi_mutex *mutex, uacpi_u16 timeout)
{
uacpi_thread_id this_id;
uacpi_status ret = UACPI_STATUS_OK;
this_id = uacpi_kernel_get_thread_id();
if (UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner) == this_id) {
if (uacpi_unlikely(mutex->depth == 0xFFFF)) {
uacpi_warn(
"failing an attempt to acquire mutex @%p, too many recursive "
"acquires\n", mutex
);
return UACPI_STATUS_DENIED;
}
mutex->depth++;
return ret;
}
uacpi_namespace_write_unlock();
ret = uacpi_acquire_native_mutex_with_timeout(mutex->handle, timeout);
if (ret != UACPI_STATUS_OK)
goto out;
if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle) {
ret = uacpi_acquire_global_lock_from_firmware();
if (uacpi_unlikely_error(ret)) {
uacpi_release_native_mutex(mutex->handle);
goto out;
}
}
UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, this_id);
mutex->depth = 1;
out:
uacpi_namespace_write_lock();
return ret;
}
uacpi_status uacpi_release_aml_mutex(uacpi_mutex *mutex)
{
if (mutex->depth-- > 1)
return UACPI_STATUS_OK;
if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle)
uacpi_release_global_lock_to_firmware();
UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, UACPI_THREAD_ID_NONE);
uacpi_release_native_mutex(mutex->handle);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock)
{
lock->mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(lock->mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
lock->owner = UACPI_THREAD_ID_NONE;
lock->depth = 0;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock)
{
if (uacpi_unlikely(lock->depth)) {
uacpi_warn(
"de-initializing active recursive lock %p with depth=%zu\n",
lock, lock->depth
);
lock->depth = 0;
}
lock->owner = UACPI_THREAD_ID_NONE;
if (lock->mutex != UACPI_NULL) {
uacpi_kernel_free_mutex(lock->mutex);
lock->mutex = UACPI_NULL;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock)
{
uacpi_thread_id this_id;
uacpi_status ret = UACPI_STATUS_OK;
this_id = uacpi_kernel_get_thread_id();
if (UACPI_ATOMIC_LOAD_THREAD_ID(&lock->owner) == this_id) {
lock->depth++;
return ret;
}
ret = uacpi_acquire_native_mutex(lock->mutex);
if (uacpi_unlikely_error(ret))
return ret;
UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, this_id);
lock->depth = 1;
return ret;
}
uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock)
{
if (lock->depth-- > 1)
return UACPI_STATUS_OK;
UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, UACPI_THREAD_ID_NONE);
return uacpi_release_native_mutex(lock->mutex);
}
uacpi_status uacpi_rw_lock_init(struct uacpi_rw_lock *lock)
{
lock->read_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(lock->read_mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
lock->write_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(lock->write_mutex == UACPI_NULL)) {
uacpi_kernel_free_mutex(lock->read_mutex);
lock->read_mutex = UACPI_NULL;
return UACPI_STATUS_OUT_OF_MEMORY;
}
lock->num_readers = 0;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_rw_lock_deinit(struct uacpi_rw_lock *lock)
{
if (uacpi_unlikely(lock->num_readers)) {
uacpi_warn("de-initializing rw_lock %p with %zu active readers\n",
lock, lock->num_readers);
lock->num_readers = 0;
}
if (lock->read_mutex != UACPI_NULL) {
uacpi_kernel_free_mutex(lock->read_mutex);
lock->read_mutex = UACPI_NULL;
}
if (lock->write_mutex != UACPI_NULL) {
uacpi_kernel_free_mutex(lock->write_mutex);
lock->write_mutex = UACPI_NULL;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_rw_lock_read(struct uacpi_rw_lock *lock)
{
uacpi_status ret;
ret = uacpi_acquire_native_mutex(lock->read_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (lock->num_readers++ == 0) {
ret = uacpi_acquire_native_mutex(lock->write_mutex);
if (uacpi_unlikely_error(ret))
lock->num_readers = 0;
}
uacpi_kernel_release_mutex(lock->read_mutex);
return ret;
}
uacpi_status uacpi_rw_unlock_read(struct uacpi_rw_lock *lock)
{
uacpi_status ret;
ret = uacpi_acquire_native_mutex(lock->read_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (lock->num_readers-- == 1)
uacpi_release_native_mutex(lock->write_mutex);
uacpi_kernel_release_mutex(lock->read_mutex);
return ret;
}
uacpi_status uacpi_rw_lock_write(struct uacpi_rw_lock *lock)
{
return uacpi_acquire_native_mutex(lock->write_mutex);
}
uacpi_status uacpi_rw_unlock_write(struct uacpi_rw_lock *lock)
{
return uacpi_release_native_mutex(lock->write_mutex);
}
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,255 @@
#include <uacpi/internal/notify.h>
#include <uacpi/internal/shareable.h>
#include <uacpi/internal/namespace.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/kernel_api.h>
#ifndef UACPI_BAREBONES_MODE
static uacpi_handle notify_mutex;
uacpi_status uacpi_initialize_notify(void)
{
notify_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(notify_mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
return UACPI_STATUS_OK;
}
void uacpi_deinitialize_notify(void)
{
if (notify_mutex != UACPI_NULL)
uacpi_kernel_free_mutex(notify_mutex);
notify_mutex = UACPI_NULL;
}
struct notification_ctx {
uacpi_namespace_node *node;
uacpi_u64 value;
uacpi_object *node_object;
};
static void free_notification_ctx(struct notification_ctx *ctx)
{
uacpi_namespace_node_release_object(ctx->node_object);
uacpi_namespace_node_unref(ctx->node);
uacpi_free(ctx, sizeof(*ctx));
}
static void do_notify(uacpi_handle opaque)
{
struct notification_ctx *ctx = opaque;
uacpi_device_notify_handler *handler;
uacpi_bool did_notify_root = UACPI_FALSE;
handler = ctx->node_object->handlers->notify_head;
for (;;) {
if (handler == UACPI_NULL) {
if (did_notify_root) {
free_notification_ctx(ctx);
return;
}
handler = g_uacpi_rt_ctx.root_object->handlers->notify_head;
did_notify_root = UACPI_TRUE;
continue;
}
handler->callback(handler->user_context, ctx->node, ctx->value);
handler = handler->next;
}
}
uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value)
{
uacpi_status ret;
struct notification_ctx *ctx;
uacpi_object *node_object;
node_object = uacpi_namespace_node_get_object_typed(
node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
UACPI_OBJECT_PROCESSOR_BIT
);
if (uacpi_unlikely(node_object == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_acquire_native_mutex(notify_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (node_object->handlers->notify_head == UACPI_NULL &&
g_uacpi_rt_ctx.root_object->handlers->notify_head == UACPI_NULL) {
ret = UACPI_STATUS_NO_HANDLER;
goto out;
}
ctx = uacpi_kernel_alloc(sizeof(*ctx));
if (uacpi_unlikely(ctx == UACPI_NULL)) {
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out;
}
ctx->node = node;
// In case this node goes out of scope
uacpi_shareable_ref(node);
ctx->value = value;
ctx->node_object = uacpi_namespace_node_get_object(node);
uacpi_object_ref(ctx->node_object);
ret = uacpi_kernel_schedule_work(UACPI_WORK_NOTIFICATION, do_notify, ctx);
if (uacpi_unlikely_error(ret)) {
uacpi_warn("unable to schedule notification work: %s\n",
uacpi_status_to_string(ret));
free_notification_ctx(ctx);
}
out:
uacpi_release_native_mutex(notify_mutex);
return ret;
}
static uacpi_device_notify_handler *handler_container(
uacpi_handlers *handlers, uacpi_notify_handler target_handler
)
{
uacpi_device_notify_handler *handler = handlers->notify_head;
while (handler) {
if (handler->callback == target_handler)
return handler;
handler = handler->next;
}
return UACPI_NULL;
}
uacpi_status uacpi_install_notify_handler(
uacpi_namespace_node *node, uacpi_notify_handler handler,
uacpi_handle handler_context
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_handlers *handlers;
uacpi_device_notify_handler *new_handler;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (node == uacpi_namespace_root()) {
obj = g_uacpi_rt_ctx.root_object;
} else {
ret = uacpi_namespace_node_acquire_object_typed(
node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
UACPI_OBJECT_PROCESSOR_BIT, &obj
);
if (uacpi_unlikely_error(ret))
return ret;
}
ret = uacpi_acquire_native_mutex(notify_mutex);
if (uacpi_unlikely_error(ret))
goto out_no_mutex;
uacpi_kernel_wait_for_work_completion();
handlers = obj->handlers;
if (handler_container(handlers, handler) != UACPI_NULL) {
ret = UACPI_STATUS_ALREADY_EXISTS;
goto out;
}
new_handler = uacpi_kernel_alloc_zeroed(sizeof(*new_handler));
if (uacpi_unlikely(new_handler == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
new_handler->callback = handler;
new_handler->user_context = handler_context;
new_handler->next = handlers->notify_head;
handlers->notify_head = new_handler;
out:
uacpi_release_native_mutex(notify_mutex);
out_no_mutex:
if (node != uacpi_namespace_root())
uacpi_object_unref(obj);
return ret;
}
uacpi_status uacpi_uninstall_notify_handler(
uacpi_namespace_node *node, uacpi_notify_handler handler
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_handlers *handlers;
uacpi_device_notify_handler *prev_handler, *containing = UACPI_NULL;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (node == uacpi_namespace_root()) {
obj = g_uacpi_rt_ctx.root_object;
} else {
ret = uacpi_namespace_node_acquire_object_typed(
node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
UACPI_OBJECT_PROCESSOR_BIT, &obj
);
if (uacpi_unlikely_error(ret))
return ret;
}
ret = uacpi_acquire_native_mutex(notify_mutex);
if (uacpi_unlikely_error(ret))
goto out_no_mutex;
uacpi_kernel_wait_for_work_completion();
handlers = obj->handlers;
containing = handler_container(handlers, handler);
if (containing == UACPI_NULL) {
ret = UACPI_STATUS_NOT_FOUND;
goto out;
}
prev_handler = handlers->notify_head;
// Are we the last linked handler?
if (prev_handler == containing) {
handlers->notify_head = containing->next;
goto out;
}
// Nope, we're somewhere in the middle. Do a search.
while (prev_handler) {
if (prev_handler->next == containing) {
prev_handler->next = containing->next;
goto out;
}
prev_handler = prev_handler->next;
}
out:
uacpi_release_native_mutex(notify_mutex);
out_no_mutex:
if (node != uacpi_namespace_root())
uacpi_object_unref(obj);
if (uacpi_likely_success(ret))
uacpi_free(containing, sizeof(*containing));
return ret;
}
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,265 @@
#include <uacpi/internal/opcodes.h>
#ifndef UACPI_BAREBONES_MODE
#define UACPI_OP(opname, opcode, props, ...) \
{ #opname, { .decode_ops = __VA_ARGS__ }, .properties = props, .code = opcode },
#define UACPI_OUT_OF_LINE_OP(opname, opcode, out_of_line_buf, props) \
{ \
.name = #opname, \
{ .indirect_decode_ops = out_of_line_buf }, \
.properties = props, \
.code = opcode, \
},
static const struct uacpi_op_spec opcode_table[0x100] = {
UACPI_ENUMERATE_OPCODES
};
static const struct uacpi_op_spec ext_opcode_table[] = {
UACPI_ENUMERATE_EXT_OPCODES
};
#define _(op) (op & 0x00FF)
static const uacpi_u8 ext_op_to_idx[0x100] = {
[_(UACPI_AML_OP_MutexOp)] = 1, [_(UACPI_AML_OP_EventOp)] = 2,
[_(UACPI_AML_OP_CondRefOfOp)] = 3, [_(UACPI_AML_OP_CreateFieldOp)] = 4,
[_(UACPI_AML_OP_LoadTableOp)] = 5, [_(UACPI_AML_OP_LoadOp)] = 6,
[_(UACPI_AML_OP_StallOp)] = 7, [_(UACPI_AML_OP_SleepOp)] = 8,
[_(UACPI_AML_OP_AcquireOp)] = 9, [_(UACPI_AML_OP_SignalOp)] = 10,
[_(UACPI_AML_OP_WaitOp)] = 11, [_(UACPI_AML_OP_ResetOp)] = 12,
[_(UACPI_AML_OP_ReleaseOp)] = 13, [_(UACPI_AML_OP_FromBCDOp)] = 14,
[_(UACPI_AML_OP_ToBCDOp)] = 15, [_(UACPI_AML_OP_UnloadOp)] = 16,
[_(UACPI_AML_OP_RevisionOp)] = 17, [_(UACPI_AML_OP_DebugOp)] = 18,
[_(UACPI_AML_OP_FatalOp)] = 19, [_(UACPI_AML_OP_TimerOp)] = 20,
[_(UACPI_AML_OP_OpRegionOp)] = 21, [_(UACPI_AML_OP_FieldOp)] = 22,
[_(UACPI_AML_OP_DeviceOp)] = 23, [_(UACPI_AML_OP_ProcessorOp)] = 24,
[_(UACPI_AML_OP_PowerResOp)] = 25, [_(UACPI_AML_OP_ThermalZoneOp)] = 26,
[_(UACPI_AML_OP_IndexFieldOp)] = 27, [_(UACPI_AML_OP_BankFieldOp)] = 28,
[_(UACPI_AML_OP_DataRegionOp)] = 29,
};
const struct uacpi_op_spec *uacpi_get_op_spec(uacpi_aml_op op)
{
if (op > 0xFF)
return &ext_opcode_table[ext_op_to_idx[_(op)]];
return &opcode_table[op];
}
#define PARSE_FIELD_ELEMENTS(parse_loop_pc) \
/* Parse every field element found inside */ \
UACPI_PARSE_OP_IF_HAS_DATA, 44, \
/* Look at the first byte */ \
UACPI_PARSE_OP_LOAD_IMM, 1, \
\
/* ReservedField := 0x00 PkgLength */ \
UACPI_PARSE_OP_IF_LAST_EQUALS, 0x00, 3, \
UACPI_PARSE_OP_PKGLEN, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* AccessField := 0x01 AccessType AccessAttrib */ \
UACPI_PARSE_OP_IF_LAST_EQUALS, 0x01, 6, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* ConnectField := <0x02 NameString> | <0x02 BufferData> */ \
UACPI_PARSE_OP_IF_LAST_EQUALS, 0x02, 5, \
UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \
UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* ExtendedAccessField := 0x03 AccessType ExtendedAccessAttrib \
* AccessLength */ \
UACPI_PARSE_OP_IF_LAST_EQUALS, 0x03, 8, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* NamedField := NameSeg PkgLength */ \
\
/* \
* Discard the immediate, as it's the first byte of the \
* nameseg. We don't need it. \
*/ \
UACPI_PARSE_OP_ITEM_POP, \
UACPI_PARSE_OP_AML_PC_DECREMENT, \
UACPI_PARSE_OP_CREATE_NAMESTRING, \
UACPI_PARSE_OP_PKGLEN, \
UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_FIELD_UNIT, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
UACPI_PARSE_OP_INVOKE_HANDLER, \
UACPI_PARSE_OP_END
uacpi_u8 uacpi_field_op_decode_ops[] = {
UACPI_PARSE_OP_TRACKED_PKGLEN,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_LOAD_IMM, 1,
PARSE_FIELD_ELEMENTS(4),
};
uacpi_u8 uacpi_bank_field_op_decode_ops[] = {
UACPI_PARSE_OP_TRACKED_PKGLEN,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_OPERAND,
UACPI_PARSE_OP_LOAD_IMM, 1,
PARSE_FIELD_ELEMENTS(6),
};
uacpi_u8 uacpi_index_field_op_decode_ops[] = {
UACPI_PARSE_OP_TRACKED_PKGLEN,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_LOAD_IMM, 1,
PARSE_FIELD_ELEMENTS(5),
};
uacpi_u8 uacpi_load_op_decode_ops[] = {
// Storage for the scope pointer, this is left as 0 in case of errors
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD,
// Index of the table we are going to be loading to unref it later
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL,
UACPI_PARSE_OP_TARGET,
/*
* Invoke the handler here to initialize the table. If this fails, it's
* expected to keep the item 0 as NULL, which is checked below to return
* false to the caller of Load.
*/
UACPI_PARSE_OP_INVOKE_HANDLER,
UACPI_PARSE_OP_IF_NULL, 0, 3,
UACPI_PARSE_OP_LOAD_FALSE_OBJECT,
UACPI_PARSE_OP_JMP, 16,
UACPI_PARSE_OP_LOAD_TRUE_OBJECT,
UACPI_PARSE_OP_DISPATCH_TABLE_LOAD,
/*
* Invoke the handler a second time to initialize any AML GPE handlers that
* might've been loaded from this table.
*/
UACPI_PARSE_OP_INVOKE_HANDLER,
UACPI_PARSE_OP_STORE_TO_TARGET, 4,
UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV,
UACPI_PARSE_OP_END,
};
uacpi_u8 uacpi_load_table_op_decode_ops[] = {
// Storage for the scope pointer, this is left as 0 in case of errors
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD,
// Index of the table we are going to be loading to unref it later
UACPI_PARSE_OP_LOAD_ZERO_IMM,
// Storage for the target pointer, this is left as 0 if none was requested
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_LOAD_INLINE_IMM, 1, 5,
UACPI_PARSE_OP_IF_NOT_NULL, 4, 5,
UACPI_PARSE_OP_STRING,
UACPI_PARSE_OP_IMM_DECREMENT, 4,
UACPI_PARSE_OP_JMP, 8,
UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL,
UACPI_PARSE_OP_INVOKE_HANDLER,
UACPI_PARSE_OP_LOAD_TRUE_OBJECT,
UACPI_PARSE_OP_DISPATCH_TABLE_LOAD,
/*
* Invoke the handler a second time to block the store to target in case
* the load above failed, as well as do any AML GPE handler initialization.
*/
UACPI_PARSE_OP_INVOKE_HANDLER,
// If we were given a target to store to, do the store
UACPI_PARSE_OP_IF_NOT_NULL, 3, 3,
UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, 3, 10,
UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV,
UACPI_PARSE_OP_END,
};
#define POP(x) UACPI_PARSE_OP_##x
static
const uacpi_char *const pop_names[UACPI_PARSE_OP_MAX + 1] = {
[POP(END)] = "<END-OF-OP>",
[POP(SKIP_WITH_WARN_IF_NULL)] = "SKIP_WITH_WARN_IF_NULL",
[POP(EMIT_SKIP_WARN)] = "EMIT_SKIP_WARN",
[POP(SIMPLE_NAME)] = "SIMPLE_NAME",
[POP(SUPERNAME)] = "SUPERNAME",
[POP(SUPERNAME_OR_UNRESOLVED)] = "SUPERNAME_OR_UNRESOLVED",
[POP(TERM_ARG)] = "TERM_ARG",
[POP(TERM_ARG_UNWRAP_INTERNAL)] = "TERM_ARG_UNWRAP_INTERNAL",
[POP(TERM_ARG_OR_NAMED_OBJECT)] = "TERM_ARG_OR_NAMED_OBJECT",
[POP(TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED)] = "TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED",
[POP(OPERAND)] = "OPERAND",
[POP(STRING)] = "STRING",
[POP(COMPUTATIONAL_DATA)] = "COMPUTATIONAL_DATA",
[POP(TARGET)] = "TARGET",
[POP(PKGLEN)] = "PKGLEN",
[POP(TRACKED_PKGLEN)] = "TRACKED_PKGLEN",
[POP(CREATE_NAMESTRING)] = "CREATE_NAMESTRING",
[POP(CREATE_NAMESTRING_OR_NULL_IF_LOAD)] = "CREATE_NAMESTRING_OR_NULL_IF_LOAD",
[POP(EXISTING_NAMESTRING)] = "EXISTING_NAMESTRING",
[POP(EXISTING_NAMESTRING_OR_NULL)] = "EXISTING_NAMESTRING_OR_NULL",
[POP(EXISTING_NAMESTRING_OR_NULL_IF_LOAD)] = "EXISTING_NAMESTRING_OR_NULL_IF_LOAD",
[POP(INVOKE_HANDLER)] = "INVOKE_HANDLER",
[POP(OBJECT_ALLOC)] = "OBJECT_ALLOC",
[POP(EMPTY_OBJECT_ALLOC)] = "EMPTY_OBJECT_ALLOC",
[POP(OBJECT_CONVERT_TO_SHALLOW_COPY)] = "OBJECT_CONVERT_TO_SHALLOW_COPY",
[POP(OBJECT_CONVERT_TO_DEEP_COPY)] = "OBJECT_CONVERT_TO_DEEP_COPY",
[POP(OBJECT_ALLOC_TYPED)] = "OBJECT_ALLOC_TYPED",
[POP(RECORD_AML_PC)] = "RECORD_AML_PC",
[POP(LOAD_INLINE_IMM_AS_OBJECT)] = "LOAD_INLINE_IMM_AS_OBJECT",
[POP(LOAD_INLINE_IMM)] = "LOAD_INLINE_IMM",
[POP(LOAD_ZERO_IMM)] = "LOAD_ZERO_IMM",
[POP(LOAD_IMM)] = "LOAD_IMM",
[POP(LOAD_IMM_AS_OBJECT)] = "LOAD_IMM_AS_OBJECT",
[POP(LOAD_FALSE_OBJECT)] = "LOAD_FALSE_OBJECT",
[POP(LOAD_TRUE_OBJECT)] = "LOAD_TRUE_OBJECT",
[POP(TRUNCATE_NUMBER)] = "TRUNCATE_NUMBER",
[POP(TYPECHECK)] = "TYPECHECK",
[POP(INSTALL_NAMESPACE_NODE)] = "INSTALL_NAMESPACE_NODE",
[POP(OBJECT_TRANSFER_TO_PREV)] = "OBJECT_TRANSFER_TO_PREV",
[POP(OBJECT_COPY_TO_PREV)] = "OBJECT_COPY_TO_PREV",
[POP(STORE_TO_TARGET)] = "STORE_TO_TARGET",
[POP(STORE_TO_TARGET_INDIRECT)] = "STORE_TO_TARGET_INDIRECT",
[POP(UNREACHABLE)] = "UNREACHABLE",
[POP(BAD_OPCODE)] = "BAD_OPCODE",
[POP(AML_PC_DECREMENT)] = "AML_PC_DECREMENT",
[POP(IMM_DECREMENT)] = "IMM_DECREMENT",
[POP(ITEM_POP)] = "ITEM_POP",
[POP(DISPATCH_METHOD_CALL)] = "DISPATCH_METHOD_CALL",
[POP(DISPATCH_TABLE_LOAD)] = "DISPATCH_TABLE_LOAD",
[POP(CONVERT_NAMESTRING)] = "CONVERT_NAMESTRING",
[POP(IF_HAS_DATA)] = "IF_HAS_DATA",
[POP(IF_NULL)] = "IF_NULL",
[POP(IF_LAST_NULL)] = "IF_LAST_NULL",
[POP(IF_NOT_NULL)] = "IF_NOT_NULL",
[POP(IF_LAST_NOT_NULL)] = "IF_NOT_NULL",
[POP(IF_LAST_EQUALS)] = "IF_LAST_EQUALS",
[POP(IF_LAST_FALSE)] = "IF_LAST_FALSE",
[POP(IF_LAST_TRUE)] = "IF_LAST_TRUE",
[POP(SWITCH_TO_NEXT_IF_EQUALS)] = "SWITCH_TO_NEXT_IF_EQUALS",
[POP(IF_SWITCHED_FROM)] = "IF_SWITCHED_FROM",
[POP(JMP)] = "JMP",
};
const uacpi_char *uacpi_parse_op_to_string(enum uacpi_parse_op op)
{
if (uacpi_unlikely(op > UACPI_PARSE_OP_MAX))
return "<INVALID-OP>";
return pop_names[op];
}
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,388 @@
#include <uacpi/platform/atomic.h>
#include <uacpi/internal/osi.h>
#include <uacpi/internal/helpers.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/kernel_api.h>
#ifndef UACPI_BAREBONES_MODE
struct registered_interface {
const uacpi_char *name;
uacpi_u8 weight;
uacpi_u8 kind;
// Only applicable for predefined host interfaces
uacpi_u8 host_type;
// Only applicable for predefined interfaces
uacpi_u8 disabled : 1;
uacpi_u8 dynamic : 1;
struct registered_interface *next;
};
static uacpi_handle interface_mutex;
static struct registered_interface *registered_interfaces;
static uacpi_interface_handler interface_handler;
static uacpi_u32 latest_queried_interface;
#define WINDOWS(string, interface) \
{ \
.name = "Windows "string, \
.weight = UACPI_VENDOR_INTERFACE_WINDOWS_##interface, \
.kind = UACPI_INTERFACE_KIND_VENDOR, \
.host_type = 0, \
.disabled = 0, \
.dynamic = 0, \
.next = UACPI_NULL \
}
#define HOST_FEATURE(string, type) \
{ \
.name = string, \
.weight = 0, \
.kind = UACPI_INTERFACE_KIND_FEATURE, \
.host_type = UACPI_HOST_INTERFACE_##type, \
.disabled = 1, \
.dynamic = 0, \
.next = UACPI_NULL, \
}
static struct registered_interface predefined_interfaces[] = {
// Vendor strings
WINDOWS("2000", 2000),
WINDOWS("2001", XP),
WINDOWS("2001 SP1", XP_SP1),
WINDOWS("2001.1", SERVER_2003),
WINDOWS("2001 SP2", XP_SP2),
WINDOWS("2001.1 SP1", SERVER_2003_SP1),
WINDOWS("2006", VISTA),
WINDOWS("2006.1", SERVER_2008),
WINDOWS("2006 SP1", VISTA_SP1),
WINDOWS("2006 SP2", VISTA_SP2),
WINDOWS("2009", 7),
WINDOWS("2012", 8),
WINDOWS("2013", 8_1),
WINDOWS("2015", 10),
WINDOWS("2016", 10_RS1),
WINDOWS("2017", 10_RS2),
WINDOWS("2017.2", 10_RS3),
WINDOWS("2018", 10_RS4),
WINDOWS("2018.2", 10_RS5),
WINDOWS("2019", 10_19H1),
WINDOWS("2020", 10_20H1),
WINDOWS("2021", 11),
WINDOWS("2022", 11_22H2),
// Feature strings
HOST_FEATURE("Module Device", MODULE_DEVICE),
HOST_FEATURE("Processor Device", PROCESSOR_DEVICE),
HOST_FEATURE("3.0 Thermal Model", 3_0_THERMAL_MODEL),
HOST_FEATURE("3.0 _SCP Extensions", 3_0_SCP_EXTENSIONS),
HOST_FEATURE("Processor Aggregator Device", PROCESSOR_AGGREGATOR_DEVICE),
// Interpreter features
{ .name = "Extended Address Space Descriptor" },
};
uacpi_status uacpi_initialize_interfaces(void)
{
uacpi_size i;
registered_interfaces = &predefined_interfaces[0];
interface_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(interface_mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
for (i = 0; i < (UACPI_ARRAY_SIZE(predefined_interfaces) - 1); ++i)
predefined_interfaces[i].next = &predefined_interfaces[i + 1];
return UACPI_STATUS_OK;
}
void uacpi_deinitialize_interfaces(void)
{
struct registered_interface *iface, *next_iface = registered_interfaces;
while (next_iface) {
iface = next_iface;
next_iface = iface->next;
iface->next = UACPI_NULL;
if (iface->dynamic) {
uacpi_free_dynamic_string(iface->name);
uacpi_free(iface, sizeof(*iface));
continue;
}
// Only features are disabled by default
iface->disabled = iface->kind == UACPI_INTERFACE_KIND_FEATURE ?
UACPI_TRUE : UACPI_FALSE;
}
if (interface_mutex)
uacpi_kernel_free_mutex(interface_mutex);
interface_mutex = UACPI_NULL;
interface_handler = UACPI_NULL;
latest_queried_interface = 0;
registered_interfaces = UACPI_NULL;
}
uacpi_vendor_interface uacpi_latest_queried_vendor_interface(void)
{
return uacpi_atomic_load32(&latest_queried_interface);
}
static struct registered_interface *find_interface_unlocked(
const uacpi_char *name
)
{
struct registered_interface *interface = registered_interfaces;
while (interface) {
if (uacpi_strcmp(interface->name, name) == 0)
return interface;
interface = interface->next;
}
return UACPI_NULL;
}
static struct registered_interface *find_host_interface_unlocked(
uacpi_host_interface type
)
{
struct registered_interface *interface = registered_interfaces;
while (interface) {
if (interface->host_type == type)
return interface;
interface = interface->next;
}
return UACPI_NULL;
}
uacpi_status uacpi_install_interface(
const uacpi_char *name, uacpi_interface_kind kind
)
{
struct registered_interface *interface;
uacpi_status ret;
uacpi_char *name_copy;
uacpi_size name_size;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = find_interface_unlocked(name);
if (interface != UACPI_NULL) {
if (interface->disabled)
interface->disabled = UACPI_FALSE;
ret = UACPI_STATUS_ALREADY_EXISTS;
goto out;
}
interface = uacpi_kernel_alloc(sizeof(*interface));
if (uacpi_unlikely(interface == UACPI_NULL)) {
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out;
}
name_size = uacpi_strlen(name) + 1;
name_copy = uacpi_kernel_alloc(name_size);
if (uacpi_unlikely(name_copy == UACPI_NULL)) {
uacpi_free(interface, sizeof(*interface));
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out;
}
uacpi_memcpy(name_copy, name, name_size);
interface->name = name_copy;
interface->weight = 0;
interface->kind = kind;
interface->host_type = 0;
interface->disabled = 0;
interface->dynamic = 1;
interface->next = registered_interfaces;
registered_interfaces = interface;
out:
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_uninstall_interface(const uacpi_char *name)
{
struct registered_interface *cur, *prev;
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
cur = registered_interfaces;
prev = cur;
ret = UACPI_STATUS_NOT_FOUND;
while (cur) {
if (uacpi_strcmp(cur->name, name) != 0) {
prev = cur;
cur = cur->next;
continue;
}
if (cur->dynamic) {
if (prev == cur) {
registered_interfaces = cur->next;
} else {
prev->next = cur->next;
}
uacpi_release_native_mutex(interface_mutex);
uacpi_free_dynamic_string(cur->name);
uacpi_free(cur, sizeof(*cur));
return UACPI_STATUS_OK;
}
/*
* If this interface was already disabled, pretend we didn't actually
* find it and keep ret as UACPI_STATUS_NOT_FOUND. The fact that it's
* still in the registered list is an implementation detail of
* predefined interfaces.
*/
if (!cur->disabled) {
cur->disabled = UACPI_TRUE;
ret = UACPI_STATUS_OK;
}
break;
}
uacpi_release_native_mutex(interface_mutex);
return ret;
}
static uacpi_status configure_host_interface(
uacpi_host_interface type, uacpi_bool enabled
)
{
struct registered_interface *interface;
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = find_host_interface_unlocked(type);
if (interface == UACPI_NULL) {
ret = UACPI_STATUS_NOT_FOUND;
goto out;
}
interface->disabled = !enabled;
out:
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_enable_host_interface(uacpi_host_interface type)
{
return configure_host_interface(type, UACPI_TRUE);
}
uacpi_status uacpi_disable_host_interface(uacpi_host_interface type)
{
return configure_host_interface(type, UACPI_FALSE);
}
uacpi_status uacpi_set_interface_query_handler(
uacpi_interface_handler handler
)
{
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (interface_handler != UACPI_NULL && handler != UACPI_NULL) {
ret = UACPI_STATUS_ALREADY_EXISTS;
goto out;
}
interface_handler = handler;
out:
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_bulk_configure_interfaces(
uacpi_interface_action action, uacpi_interface_kind kind
)
{
uacpi_status ret;
struct registered_interface *interface;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = registered_interfaces;
while (interface) {
if (kind & interface->kind)
interface->disabled = (action == UACPI_INTERFACE_ACTION_DISABLE);
interface = interface->next;
}
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_handle_osi(const uacpi_char *string, uacpi_bool *out_value)
{
uacpi_status ret;
struct registered_interface *interface;
uacpi_bool is_supported = UACPI_FALSE;
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = find_interface_unlocked(string);
if (interface == UACPI_NULL)
goto out;
if (interface->weight > latest_queried_interface)
uacpi_atomic_store32(&latest_queried_interface, interface->weight);
is_supported = !interface->disabled;
if (interface_handler)
is_supported = interface_handler(string, is_supported);
out:
uacpi_release_native_mutex(interface_mutex);
*out_value = is_supported;
return UACPI_STATUS_OK;
}
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,572 @@
#include <uacpi/internal/registers.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/context.h>
#include <uacpi/internal/io.h>
#include <uacpi/internal/log.h>
#include <uacpi/platform/atomic.h>
#include <uacpi/acpi.h>
#ifndef UACPI_BAREBONES_MODE
static uacpi_handle g_reg_lock;
enum register_kind {
REGISTER_KIND_GAS,
REGISTER_KIND_IO,
};
enum register_access_kind {
REGISTER_ACCESS_KIND_PRESERVE,
REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
REGISTER_ACCESS_KIND_NORMAL,
};
struct register_spec {
uacpi_u8 kind;
uacpi_u8 access_kind;
uacpi_u8 access_width; // only REGISTER_KIND_IO
void *accessors[2];
uacpi_u64 write_only_mask;
uacpi_u64 preserve_mask;
};
static const struct register_spec g_registers[UACPI_REGISTER_MAX + 1] = {
[UACPI_REGISTER_PM1_STS] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
.accessors = {
&g_uacpi_rt_ctx.pm1a_status_blk,
&g_uacpi_rt_ctx.pm1b_status_blk,
},
.preserve_mask = ACPI_PM1_STS_IGN0_MASK,
},
[UACPI_REGISTER_PM1_EN] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessors = {
&g_uacpi_rt_ctx.pm1a_enable_blk,
&g_uacpi_rt_ctx.pm1b_enable_blk,
},
},
[UACPI_REGISTER_PM1_CNT] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessors = {
&g_uacpi_rt_ctx.fadt.x_pm1a_cnt_blk,
&g_uacpi_rt_ctx.fadt.x_pm1b_cnt_blk,
},
.write_only_mask = ACPI_PM1_CNT_SLP_EN_MASK |
ACPI_PM1_CNT_GBL_RLS_MASK,
.preserve_mask = ACPI_PM1_CNT_PRESERVE_MASK,
},
[UACPI_REGISTER_PM_TMR] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessors = { &g_uacpi_rt_ctx.fadt.x_pm_tmr_blk, },
},
[UACPI_REGISTER_PM2_CNT] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessors = { &g_uacpi_rt_ctx.fadt.x_pm2_cnt_blk, },
.preserve_mask = ACPI_PM2_CNT_PRESERVE_MASK,
},
[UACPI_REGISTER_SLP_CNT] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessors = { &g_uacpi_rt_ctx.fadt.sleep_control_reg, },
.write_only_mask = ACPI_SLP_CNT_SLP_EN_MASK,
.preserve_mask = ACPI_SLP_CNT_PRESERVE_MASK,
},
[UACPI_REGISTER_SLP_STS] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
.accessors = { &g_uacpi_rt_ctx.fadt.sleep_status_reg, },
.preserve_mask = ACPI_SLP_STS_PRESERVE_MASK,
},
[UACPI_REGISTER_RESET] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_NORMAL,
.accessors = { &g_uacpi_rt_ctx.fadt.reset_reg, },
},
[UACPI_REGISTER_SMI_CMD] = {
.kind = REGISTER_KIND_IO,
.access_kind = REGISTER_ACCESS_KIND_NORMAL,
.access_width = 1,
.accessors = { &g_uacpi_rt_ctx.fadt.smi_cmd, },
},
};
enum register_mapping_state {
REGISTER_MAPPING_STATE_NONE = 0,
REGISTER_MAPPING_STATE_NOT_NEEDED,
REGISTER_MAPPING_STATE_MAPPED,
};
struct register_mapping {
uacpi_mapped_gas mappings[2];
uacpi_u8 states[2];
};
static struct register_mapping g_register_mappings[UACPI_REGISTER_MAX + 1];
static uacpi_status map_one(
const struct register_spec *spec, struct register_mapping *mapping,
uacpi_u8 idx
)
{
uacpi_status ret = UACPI_STATUS_OK;
if (mapping->states[idx] != REGISTER_MAPPING_STATE_NONE)
return ret;
if (spec->kind == REGISTER_KIND_GAS) {
struct acpi_gas *gas = spec->accessors[idx];
if (gas == UACPI_NULL || gas->address == 0) {
mapping->states[idx] = REGISTER_MAPPING_STATE_NOT_NEEDED;
return ret;
}
ret = uacpi_map_gas_noalloc(gas, &mapping->mappings[idx]);
} else {
struct acpi_gas temp_gas = { 0 };
if (idx != 0) {
mapping->states[idx] = REGISTER_MAPPING_STATE_NOT_NEEDED;
return ret;
}
temp_gas.address_space_id = UACPI_ADDRESS_SPACE_SYSTEM_IO;
temp_gas.address = *(uacpi_u32*)spec->accessors[0];
temp_gas.register_bit_width = spec->access_width * 8;
ret = uacpi_map_gas_noalloc(&temp_gas, &mapping->mappings[idx]);
}
if (uacpi_likely_success(ret))
mapping->states[idx] = REGISTER_MAPPING_STATE_MAPPED;
return ret;
}
static uacpi_status ensure_register_mapped(
const struct register_spec *spec, struct register_mapping *mapping
)
{
uacpi_status ret;
uacpi_bool needs_mapping = UACPI_FALSE;
uacpi_u8 state;
uacpi_cpu_flags flags;
state = uacpi_atomic_load8(&mapping->states[0]);
needs_mapping |= state == REGISTER_MAPPING_STATE_NONE;
state = uacpi_atomic_load8(&mapping->states[1]);
needs_mapping |= state == REGISTER_MAPPING_STATE_NONE;
if (!needs_mapping)
return UACPI_STATUS_OK;
flags = uacpi_kernel_lock_spinlock(g_reg_lock);
ret = map_one(spec, mapping, 0);
if (uacpi_unlikely_error(ret))
goto out;
ret = map_one(spec, mapping, 1);
out:
uacpi_kernel_unlock_spinlock(g_reg_lock, flags);
return ret;
}
static uacpi_status get_reg(
uacpi_u8 idx, const struct register_spec **out_spec,
struct register_mapping **out_mapping
)
{
if (idx > UACPI_REGISTER_MAX)
return UACPI_STATUS_INVALID_ARGUMENT;
*out_spec = &g_registers[idx];
*out_mapping = &g_register_mappings[idx];
return UACPI_STATUS_OK;
}
static uacpi_status do_read_one(
struct register_mapping *mapping, uacpi_u8 idx, uacpi_u64 *out_value
)
{
if (mapping->states[idx] != REGISTER_MAPPING_STATE_MAPPED)
return UACPI_STATUS_OK;
return uacpi_gas_read_mapped(&mapping->mappings[idx], out_value);
}
static uacpi_status do_read_register(
const struct register_spec *reg, struct register_mapping *mapping,
uacpi_u64 *out_value
)
{
uacpi_status ret;
uacpi_u64 value0 = 0, value1 = 0;
ret = do_read_one(mapping, 0, &value0);
if (uacpi_unlikely_error(ret))
return ret;
ret = do_read_one(mapping, 1, &value1);
if (uacpi_unlikely_error(ret))
return ret;
*out_value = value0 | value1;
if (reg->write_only_mask)
*out_value &= ~reg->write_only_mask;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_read_register(
enum uacpi_register reg_enum, uacpi_u64 *out_value
)
{
uacpi_status ret;
const struct register_spec *reg;
struct register_mapping *mapping;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = get_reg(reg_enum, &reg, &mapping);
if (uacpi_unlikely_error(ret))
return ret;
ret = ensure_register_mapped(reg, mapping);
if (uacpi_unlikely_error(ret))
return ret;
return do_read_register(reg, mapping, out_value);
}
static uacpi_status do_write_one(
struct register_mapping *mapping, uacpi_u8 idx, uacpi_u64 in_value
)
{
if (mapping->states[idx] != REGISTER_MAPPING_STATE_MAPPED)
return UACPI_STATUS_OK;
return uacpi_gas_write_mapped(&mapping->mappings[idx], in_value);
}
static uacpi_status do_write_register(
const struct register_spec *reg, struct register_mapping *mapping,
uacpi_u64 in_value
)
{
uacpi_status ret;
if (reg->preserve_mask) {
in_value &= ~reg->preserve_mask;
if (reg->access_kind == REGISTER_ACCESS_KIND_PRESERVE) {
uacpi_u64 data;
ret = do_read_register(reg, mapping, &data);
if (uacpi_unlikely_error(ret))
return ret;
in_value |= data & reg->preserve_mask;
}
}
ret = do_write_one(mapping, 0, in_value);
if (uacpi_unlikely_error(ret))
return ret;
return do_write_one(mapping, 1, in_value);
}
uacpi_status uacpi_write_register(
enum uacpi_register reg_enum, uacpi_u64 in_value
)
{
uacpi_status ret;
const struct register_spec *reg;
struct register_mapping *mapping;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = get_reg(reg_enum, &reg, &mapping);
if (uacpi_unlikely_error(ret))
return ret;
ret = ensure_register_mapped(reg, mapping);
if (uacpi_unlikely_error(ret))
return ret;
return do_write_register(reg, mapping, in_value);
}
uacpi_status uacpi_write_registers(
enum uacpi_register reg_enum, uacpi_u64 in_value0, uacpi_u64 in_value1
)
{
uacpi_status ret;
const struct register_spec *reg;
struct register_mapping *mapping;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = get_reg(reg_enum, &reg, &mapping);
if (uacpi_unlikely_error(ret))
return ret;
ret = ensure_register_mapped(reg, mapping);
if (uacpi_unlikely_error(ret))
return ret;
ret = do_write_one(mapping, 0, in_value0);
if (uacpi_unlikely_error(ret))
return ret;
return do_write_one(mapping, 1, in_value1);
}
struct register_field {
uacpi_u8 reg;
uacpi_u8 offset;
uacpi_u16 mask;
};
static const struct register_field g_fields[UACPI_REGISTER_FIELD_MAX + 1] = {
[UACPI_REGISTER_FIELD_TMR_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_TMR_STS_IDX,
.mask = ACPI_PM1_STS_TMR_STS_MASK,
},
[UACPI_REGISTER_FIELD_BM_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_BM_STS_IDX,
.mask = ACPI_PM1_STS_BM_STS_MASK,
},
[UACPI_REGISTER_FIELD_GBL_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_GBL_STS_IDX,
.mask = ACPI_PM1_STS_GBL_STS_MASK,
},
[UACPI_REGISTER_FIELD_PWRBTN_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_PWRBTN_STS_IDX,
.mask = ACPI_PM1_STS_PWRBTN_STS_MASK,
},
[UACPI_REGISTER_FIELD_SLPBTN_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_SLPBTN_STS_IDX,
.mask = ACPI_PM1_STS_SLPBTN_STS_MASK,
},
[UACPI_REGISTER_FIELD_RTC_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_RTC_STS_IDX,
.mask = ACPI_PM1_STS_RTC_STS_MASK,
},
[UACPI_REGISTER_FIELD_HWR_WAK_STS] = {
.reg = UACPI_REGISTER_SLP_STS,
.offset = ACPI_SLP_STS_WAK_STS_IDX,
.mask = ACPI_SLP_STS_WAK_STS_MASK,
},
[UACPI_REGISTER_FIELD_WAK_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_WAKE_STS_IDX,
.mask = ACPI_PM1_STS_WAKE_STS_MASK,
},
[UACPI_REGISTER_FIELD_PCIEX_WAKE_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_PCIEXP_WAKE_STS_IDX,
.mask = ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK,
},
[UACPI_REGISTER_FIELD_TMR_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_TMR_EN_IDX,
.mask = ACPI_PM1_EN_TMR_EN_MASK,
},
[UACPI_REGISTER_FIELD_GBL_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_GBL_EN_IDX,
.mask = ACPI_PM1_EN_GBL_EN_MASK,
},
[UACPI_REGISTER_FIELD_PWRBTN_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_PWRBTN_EN_IDX,
.mask = ACPI_PM1_EN_PWRBTN_EN_MASK,
},
[UACPI_REGISTER_FIELD_SLPBTN_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_SLPBTN_EN_IDX,
.mask = ACPI_PM1_EN_SLPBTN_EN_MASK,
},
[UACPI_REGISTER_FIELD_RTC_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_RTC_EN_IDX,
.mask = ACPI_PM1_EN_RTC_EN_MASK,
},
[UACPI_REGISTER_FIELD_PCIEXP_WAKE_DIS] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_PCIEXP_WAKE_DIS_IDX,
.mask = ACPI_PM1_EN_PCIEXP_WAKE_DIS_MASK,
},
[UACPI_REGISTER_FIELD_SCI_EN] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_SCI_EN_IDX,
.mask = ACPI_PM1_CNT_SCI_EN_MASK,
},
[UACPI_REGISTER_FIELD_BM_RLD] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_BM_RLD_IDX,
.mask = ACPI_PM1_CNT_BM_RLD_MASK,
},
[UACPI_REGISTER_FIELD_GBL_RLS] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_GBL_RLS_IDX,
.mask = ACPI_PM1_CNT_GBL_RLS_MASK,
},
[UACPI_REGISTER_FIELD_SLP_TYP] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_SLP_TYP_IDX,
.mask = ACPI_PM1_CNT_SLP_TYP_MASK,
},
[UACPI_REGISTER_FIELD_SLP_EN] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_SLP_EN_IDX,
.mask = ACPI_PM1_CNT_SLP_EN_MASK,
},
[UACPI_REGISTER_FIELD_HWR_SLP_TYP] = {
.reg = UACPI_REGISTER_SLP_CNT,
.offset = ACPI_SLP_CNT_SLP_TYP_IDX,
.mask = ACPI_SLP_CNT_SLP_TYP_MASK,
},
[UACPI_REGISTER_FIELD_HWR_SLP_EN] = {
.reg = UACPI_REGISTER_SLP_CNT,
.offset = ACPI_SLP_CNT_SLP_EN_IDX,
.mask = ACPI_SLP_CNT_SLP_EN_MASK,
},
[UACPI_REGISTER_FIELD_ARB_DIS] = {
.reg = UACPI_REGISTER_PM2_CNT,
.offset = ACPI_PM2_CNT_ARB_DIS_IDX,
.mask = ACPI_PM2_CNT_ARB_DIS_MASK,
},
};
uacpi_status uacpi_initialize_registers(void)
{
g_reg_lock = uacpi_kernel_create_spinlock();
if (uacpi_unlikely(g_reg_lock == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
return UACPI_STATUS_OK;
}
void uacpi_deinitialize_registers(void)
{
uacpi_u8 i;
struct register_mapping *mapping;
if (g_reg_lock != UACPI_NULL) {
uacpi_kernel_free_spinlock(g_reg_lock);
g_reg_lock = UACPI_NULL;
}
for (i = 0; i <= UACPI_REGISTER_MAX; ++i) {
mapping = &g_register_mappings[i];
if (mapping->states[0] == REGISTER_MAPPING_STATE_MAPPED)
uacpi_unmap_gas_nofree(&mapping->mappings[0]);
if (mapping->states[1] == REGISTER_MAPPING_STATE_MAPPED)
uacpi_unmap_gas_nofree(&mapping->mappings[1]);
}
uacpi_memzero(&g_register_mappings, sizeof(g_register_mappings));
}
uacpi_status uacpi_read_register_field(
enum uacpi_register_field field_enum, uacpi_u64 *out_value
)
{
uacpi_status ret;
uacpi_u8 field_idx = field_enum;
const struct register_field *field;
const struct register_spec *reg;
struct register_mapping *mapping;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
field = &g_fields[field_idx];
reg = &g_registers[field->reg];
mapping = &g_register_mappings[field->reg];
ret = ensure_register_mapped(reg, mapping);
if (uacpi_unlikely_error(ret))
return ret;
ret = do_read_register(reg, mapping, out_value);
if (uacpi_unlikely_error(ret))
return ret;
*out_value = (*out_value & field->mask) >> field->offset;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_write_register_field(
enum uacpi_register_field field_enum, uacpi_u64 in_value
)
{
uacpi_status ret;
uacpi_u8 field_idx = field_enum;
const struct register_field *field;
const struct register_spec *reg;
struct register_mapping *mapping;
uacpi_u64 data;
uacpi_cpu_flags flags;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
field = &g_fields[field_idx];
reg = &g_registers[field->reg];
mapping = &g_register_mappings[field->reg];
ret = ensure_register_mapped(reg, mapping);
if (uacpi_unlikely_error(ret))
return ret;
in_value = (in_value << field->offset) & field->mask;
flags = uacpi_kernel_lock_spinlock(g_reg_lock);
if (reg->kind == REGISTER_ACCESS_KIND_WRITE_TO_CLEAR) {
if (in_value == 0) {
ret = UACPI_STATUS_OK;
goto out;
}
ret = do_write_register(reg, mapping, in_value);
goto out;
}
ret = do_read_register(reg, mapping, &data);
if (uacpi_unlikely_error(ret))
goto out;
data &= ~field->mask;
data |= in_value;
ret = do_write_register(reg, mapping, data);
out:
uacpi_kernel_unlock_spinlock(g_reg_lock, flags);
return ret;
}
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,71 @@
#include <uacpi/internal/shareable.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/platform/atomic.h>
#ifndef UACPI_BAREBONES_MODE
#define BUGGED_REFCOUNT 0xFFFFFFFF
void uacpi_shareable_init(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
shareable->reference_count = 1;
}
uacpi_bool uacpi_bugged_shareable(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
if (uacpi_unlikely(shareable->reference_count == 0))
uacpi_make_shareable_bugged(shareable);
return uacpi_atomic_load32(&shareable->reference_count) == BUGGED_REFCOUNT;
}
void uacpi_make_shareable_bugged(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
uacpi_atomic_store32(&shareable->reference_count, BUGGED_REFCOUNT);
}
uacpi_u32 uacpi_shareable_ref(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
if (uacpi_unlikely(uacpi_bugged_shareable(shareable)))
return BUGGED_REFCOUNT;
return uacpi_atomic_inc32(&shareable->reference_count) - 1;
}
uacpi_u32 uacpi_shareable_unref(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
if (uacpi_unlikely(uacpi_bugged_shareable(shareable)))
return BUGGED_REFCOUNT;
return uacpi_atomic_dec32(&shareable->reference_count) + 1;
}
void uacpi_shareable_unref_and_delete_if_last(
uacpi_handle handle, void (*do_free)(uacpi_handle)
)
{
if (handle == UACPI_NULL)
return;
if (uacpi_unlikely(uacpi_bugged_shareable(handle)))
return;
if (uacpi_shareable_unref(handle) == 1)
do_free(handle);
}
uacpi_u32 uacpi_shareable_refcount(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
return uacpi_atomic_load32(&shareable->reference_count);
}
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,616 @@
#include <uacpi/sleep.h>
#include <uacpi/internal/context.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/io.h>
#include <uacpi/internal/registers.h>
#include <uacpi/internal/event.h>
#include <uacpi/platform/arch_helpers.h>
#ifndef UACPI_BAREBONES_MODE
#ifndef UACPI_REDUCED_HARDWARE
#define CALL_SLEEP_FN(name, state) \
(uacpi_is_hardware_reduced() ? \
name##_hw_reduced(state) : name##_hw_full(state))
#else
#define CALL_SLEEP_FN(name, state) name##_hw_reduced(state);
#endif
static uacpi_status eval_wak(uacpi_u8 state);
static uacpi_status eval_sst(uacpi_u8 value);
#ifndef UACPI_REDUCED_HARDWARE
uacpi_status uacpi_set_waking_vector(
uacpi_phys_addr addr32, uacpi_phys_addr addr64
)
{
struct acpi_facs *facs = g_uacpi_rt_ctx.facs;
if (facs == UACPI_NULL)
return UACPI_STATUS_OK;
facs->firmware_waking_vector = addr32;
// The 64-bit wake vector doesn't exist, we're done
if (facs->length < 32)
return UACPI_STATUS_OK;
// Only allow 64-bit wake vector on 1.0 and above FACS
if (facs->version >= 1)
facs->x_firmware_waking_vector = addr64;
else
facs->x_firmware_waking_vector = 0;
return UACPI_STATUS_OK;
}
static uacpi_status enter_sleep_state_hw_full(uacpi_u8 state)
{
uacpi_status ret;
uacpi_u64 wake_status, pm1a, pm1b;
ret = uacpi_write_register_field(
UACPI_REGISTER_FIELD_WAK_STS, ACPI_PM1_STS_CLEAR
);
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_disable_all_gpes();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_clear_all_events();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_enable_all_wake_gpes();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_read_register(UACPI_REGISTER_PM1_CNT, &pm1a);
if (uacpi_unlikely_error(ret))
return ret;
pm1a &= ~((uacpi_u64)(ACPI_PM1_CNT_SLP_TYP_MASK | ACPI_PM1_CNT_SLP_EN_MASK));
pm1b = pm1a;
pm1a |= g_uacpi_rt_ctx.last_sleep_typ_a << ACPI_PM1_CNT_SLP_TYP_IDX;
pm1b |= g_uacpi_rt_ctx.last_sleep_typ_b << ACPI_PM1_CNT_SLP_TYP_IDX;
/*
* Just like ACPICA, split writing SLP_TYP and SLP_EN to work around
* buggy firmware that can't handle both written at the same time.
*/
ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
if (uacpi_unlikely_error(ret))
return ret;
pm1a |= ACPI_PM1_CNT_SLP_EN_MASK;
pm1b |= ACPI_PM1_CNT_SLP_EN_MASK;
if (state < UACPI_SLEEP_STATE_S4)
UACPI_ARCH_FLUSH_CPU_CACHE();
ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
if (uacpi_unlikely_error(ret))
return ret;
if (state > UACPI_SLEEP_STATE_S3) {
/*
* We're still here, this is a bug or very slow firmware.
* Just try spinning for a bit.
*/
uacpi_u64 stalled_time = 0;
// 10 seconds max
while (stalled_time < (10 * 1000 * 1000)) {
uacpi_kernel_stall(100);
stalled_time += 100;
}
// Try one more time
ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
if (uacpi_unlikely_error(ret))
return ret;
// Nothing we can do here, give up
return UACPI_STATUS_HARDWARE_TIMEOUT;
}
do {
ret = uacpi_read_register_field(
UACPI_REGISTER_FIELD_WAK_STS, &wake_status
);
if (uacpi_unlikely_error(ret))
return ret;
} while (wake_status != 1);
return UACPI_STATUS_OK;
}
static uacpi_status prepare_for_wake_from_sleep_state_hw_full(uacpi_u8 state)
{
uacpi_status ret;
uacpi_u64 pm1a, pm1b;
UACPI_UNUSED(state);
/*
* Some hardware apparently relies on S0 values being written to the PM1
* control register on wake, so do this here.
*/
if (g_uacpi_rt_ctx.s0_sleep_typ_a == UACPI_SLEEP_TYP_INVALID)
goto out;
ret = uacpi_read_register(UACPI_REGISTER_PM1_CNT, &pm1a);
if (uacpi_unlikely_error(ret))
goto out;
pm1a &= ~((uacpi_u64)(ACPI_PM1_CNT_SLP_TYP_MASK | ACPI_PM1_CNT_SLP_EN_MASK));
pm1b = pm1a;
pm1a |= g_uacpi_rt_ctx.s0_sleep_typ_a << ACPI_PM1_CNT_SLP_TYP_IDX;
pm1b |= g_uacpi_rt_ctx.s0_sleep_typ_b << ACPI_PM1_CNT_SLP_TYP_IDX;
uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
out:
// Errors ignored intentionally, we don't want to abort because of this
return UACPI_STATUS_OK;
}
static uacpi_status wake_from_sleep_state_hw_full(uacpi_u8 state)
{
uacpi_status ret;
g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
// Set the status to 2 (waking) while we execute the wake method.
eval_sst(2);
ret = uacpi_disable_all_gpes();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_enable_all_runtime_gpes();
if (uacpi_unlikely_error(ret))
return ret;
eval_wak(state);
// Apparently some BIOSes expect us to clear this, so do it
uacpi_write_register_field(
UACPI_REGISTER_FIELD_WAK_STS, ACPI_PM1_STS_CLEAR
);
// Now that we're awake set the status to 1 (running)
eval_sst(1);
return UACPI_STATUS_OK;
}
#endif
static uacpi_status get_slp_type_for_state(
uacpi_u8 state, uacpi_u8 *a, uacpi_u8 *b
)
{
uacpi_char path[] = "_S0";
uacpi_status ret;
uacpi_object *obj0, *obj1, *ret_obj = UACPI_NULL;
path[2] += state;
ret = uacpi_eval_typed(
uacpi_namespace_root(), path, UACPI_NULL,
UACPI_OBJECT_PACKAGE_BIT, &ret_obj
);
if (ret != UACPI_STATUS_OK) {
if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND)) {
uacpi_warn("error while evaluating %s: %s\n", path,
uacpi_status_to_string(ret));
} else {
uacpi_trace("sleep state %d is not supported as %s was not found\n",
state, path);
}
goto out;
}
switch (ret_obj->package->count) {
case 0:
uacpi_error("empty package while evaluating %s!\n", path);
ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
goto out;
case 1:
obj0 = ret_obj->package->objects[0];
if (uacpi_unlikely(obj0->type != UACPI_OBJECT_INTEGER)) {
uacpi_error(
"invalid object type at pkg[0] => %s when evaluating %s\n",
uacpi_object_type_to_string(obj0->type), path
);
goto out;
}
*a = obj0->integer;
*b = obj0->integer >> 8;
break;
default:
obj0 = ret_obj->package->objects[0];
obj1 = ret_obj->package->objects[1];
if (uacpi_unlikely(obj0->type != UACPI_OBJECT_INTEGER ||
obj1->type != UACPI_OBJECT_INTEGER)) {
uacpi_error(
"invalid object type when evaluating %s: "
"pkg[0] => %s, pkg[1] => %s\n", path,
uacpi_object_type_to_string(obj0->type),
uacpi_object_type_to_string(obj1->type)
);
ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
goto out;
}
*a = obj0->integer;
*b = obj1->integer;
break;
}
out:
if (ret != UACPI_STATUS_OK) {
*a = UACPI_SLEEP_TYP_INVALID;
*b = UACPI_SLEEP_TYP_INVALID;
}
uacpi_object_unref(ret_obj);
return ret;
}
static uacpi_status eval_sleep_helper(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u8 value
)
{
uacpi_object *arg;
uacpi_object_array args;
uacpi_status ret;
arg = uacpi_create_object(UACPI_OBJECT_INTEGER);
if (uacpi_unlikely(arg == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
arg->integer = value;
args.objects = &arg;
args.count = 1;
ret = uacpi_eval(parent, path, &args, UACPI_NULL);
switch (ret) {
case UACPI_STATUS_OK:
break;
case UACPI_STATUS_NOT_FOUND:
ret = UACPI_STATUS_OK;
break;
default:
uacpi_error("error while evaluating %s: %s\n",
path, uacpi_status_to_string(ret));
break;
}
uacpi_object_unref(arg);
return ret;
}
static uacpi_status eval_pts(uacpi_u8 state)
{
return eval_sleep_helper(uacpi_namespace_root(), "_PTS", state);
}
static uacpi_status eval_wak(uacpi_u8 state)
{
return eval_sleep_helper(uacpi_namespace_root(), "_WAK", state);
}
static uacpi_status eval_sst(uacpi_u8 value)
{
return eval_sleep_helper(
uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SI),
"_SST", value
);
}
static uacpi_status eval_sst_for_state(enum uacpi_sleep_state state)
{
uacpi_u8 arg;
/*
* This optional object is a control method that OSPM invokes to set the
* system status indicator as desired.
* Arguments:(1)
* Arg0 - An Integer containing the system status indicator identifier:
* 0 - No system state indication. Indicator off
* 1 - Working
* 2 - Waking
* 3 - Sleeping. Used to indicate system state S1, S2, or S3
* 4 - Sleeping with context saved to non-volatile storage
*/
switch (state) {
case UACPI_SLEEP_STATE_S0:
arg = 1;
break;
case UACPI_SLEEP_STATE_S1:
case UACPI_SLEEP_STATE_S2:
case UACPI_SLEEP_STATE_S3:
arg = 3;
break;
case UACPI_SLEEP_STATE_S4:
arg = 4;
break;
case UACPI_SLEEP_STATE_S5:
arg = 0;
break;
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
return eval_sst(arg);
}
uacpi_status uacpi_prepare_for_sleep_state(enum uacpi_sleep_state state_enum)
{
uacpi_u8 state = state_enum;
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_S5))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = get_slp_type_for_state(
state,
&g_uacpi_rt_ctx.last_sleep_typ_a,
&g_uacpi_rt_ctx.last_sleep_typ_b
);
if (ret != UACPI_STATUS_OK)
return ret;
ret = get_slp_type_for_state(
0,
&g_uacpi_rt_ctx.s0_sleep_typ_a,
&g_uacpi_rt_ctx.s0_sleep_typ_b
);
ret = eval_pts(state);
if (uacpi_unlikely_error(ret))
return ret;
eval_sst_for_state(state);
return UACPI_STATUS_OK;
}
static uacpi_u8 make_hw_reduced_sleep_control(uacpi_u8 slp_typ)
{
uacpi_u8 value;
value = (slp_typ << ACPI_SLP_CNT_SLP_TYP_IDX);
value &= ACPI_SLP_CNT_SLP_TYP_MASK;
value |= ACPI_SLP_CNT_SLP_EN_MASK;
return value;
}
static uacpi_status enter_sleep_state_hw_reduced(uacpi_u8 state)
{
uacpi_status ret;
uacpi_u8 sleep_control;
uacpi_u64 wake_status;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
if (!fadt->sleep_control_reg.address || !fadt->sleep_status_reg.address)
return UACPI_STATUS_NOT_FOUND;
ret = uacpi_write_register_field(
UACPI_REGISTER_FIELD_HWR_WAK_STS,
ACPI_SLP_STS_CLEAR
);
if (uacpi_unlikely_error(ret))
return ret;
sleep_control = make_hw_reduced_sleep_control(
g_uacpi_rt_ctx.last_sleep_typ_a
);
if (state < UACPI_SLEEP_STATE_S4)
UACPI_ARCH_FLUSH_CPU_CACHE();
/*
* To put the system into a sleep state, software will write the HW-reduced
* Sleep Type value (obtained from the \_Sx object in the DSDT) and the
* SLP_EN bit to the sleep control register.
*/
ret = uacpi_write_register(UACPI_REGISTER_SLP_CNT, sleep_control);
if (uacpi_unlikely_error(ret))
return ret;
/*
* The OSPM then polls the WAK_STS bit of the SLEEP_STATUS_REG waiting for
* it to be one (1), indicating that the system has been transitioned
* back to the Working state.
*/
do {
ret = uacpi_read_register_field(
UACPI_REGISTER_FIELD_HWR_WAK_STS, &wake_status
);
if (uacpi_unlikely_error(ret))
return ret;
} while (wake_status != 1);
return UACPI_STATUS_OK;
}
static uacpi_status prepare_for_wake_from_sleep_state_hw_reduced(uacpi_u8 state)
{
uacpi_u8 sleep_control;
UACPI_UNUSED(state);
if (g_uacpi_rt_ctx.s0_sleep_typ_a == UACPI_SLEEP_TYP_INVALID)
goto out;
sleep_control = make_hw_reduced_sleep_control(
g_uacpi_rt_ctx.s0_sleep_typ_a
);
uacpi_write_register(UACPI_REGISTER_SLP_CNT, sleep_control);
out:
return UACPI_STATUS_OK;
}
static uacpi_status wake_from_sleep_state_hw_reduced(uacpi_u8 state)
{
g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
// Set the status to 2 (waking) while we execute the wake method.
eval_sst(2);
eval_wak(state);
// Apparently some BIOSes expect us to clear this, so do it
uacpi_write_register_field(
UACPI_REGISTER_FIELD_HWR_WAK_STS, ACPI_SLP_STS_CLEAR
);
// Now that we're awake set the status to 1 (running)
eval_sst(1);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_enter_sleep_state(enum uacpi_sleep_state state_enum)
{
uacpi_u8 state = state_enum;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
if (uacpi_unlikely(g_uacpi_rt_ctx.last_sleep_typ_a > ACPI_SLP_TYP_MAX ||
g_uacpi_rt_ctx.last_sleep_typ_b > ACPI_SLP_TYP_MAX)) {
uacpi_error("invalid SLP_TYP values: 0x%02X:0x%02X\n",
g_uacpi_rt_ctx.last_sleep_typ_a,
g_uacpi_rt_ctx.last_sleep_typ_b);
return UACPI_STATUS_AML_BAD_ENCODING;
}
return CALL_SLEEP_FN(enter_sleep_state, state);
}
uacpi_status uacpi_prepare_for_wake_from_sleep_state(
uacpi_sleep_state state_enum
)
{
uacpi_u8 state = state_enum;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
return CALL_SLEEP_FN(prepare_for_wake_from_sleep_state, state);
}
uacpi_status uacpi_wake_from_sleep_state(
uacpi_sleep_state state_enum
)
{
uacpi_u8 state = state_enum;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
return CALL_SLEEP_FN(wake_from_sleep_state, state);
}
uacpi_status uacpi_reboot(void)
{
uacpi_status ret;
uacpi_handle pci_dev = UACPI_NULL, io_handle = UACPI_NULL;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
struct acpi_gas *reset_reg = &fadt->reset_reg;
/*
* Allow restarting earlier than namespace load so that the kernel can
* use this in case of some initialization error.
*/
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (!(fadt->flags & ACPI_RESET_REG_SUP) || !reset_reg->address)
return UACPI_STATUS_NOT_FOUND;
switch (reset_reg->address_space_id) {
case UACPI_ADDRESS_SPACE_SYSTEM_IO:
/*
* For SystemIO we don't do any checking, and we ignore bit width
* because that's what NT does.
*/
ret = uacpi_kernel_io_map(reset_reg->address, 1, &io_handle);
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_kernel_io_write8(io_handle, 0, fadt->reset_value);
break;
case UACPI_ADDRESS_SPACE_SYSTEM_MEMORY:
ret = uacpi_write_register(UACPI_REGISTER_RESET, fadt->reset_value);
break;
case UACPI_ADDRESS_SPACE_PCI_CONFIG: {
uacpi_pci_address address = { 0 };
// Bus is assumed to be 0 here
address.segment = 0;
address.bus = 0;
address.device = (reset_reg->address >> 32) & 0xFF;
address.function = (reset_reg->address >> 16) & 0xFF;
ret = uacpi_kernel_pci_device_open(address, &pci_dev);
if (uacpi_unlikely_error(ret))
break;
ret = uacpi_kernel_pci_write8(
pci_dev, reset_reg->address & 0xFFFF, fadt->reset_value
);
break;
}
default:
uacpi_warn(
"unable to perform a reset: unsupported address space '%s' (%d)\n",
uacpi_address_space_to_string(reset_reg->address_space_id),
reset_reg->address_space_id
);
ret = UACPI_STATUS_UNIMPLEMENTED;
}
if (ret == UACPI_STATUS_OK) {
/*
* This should've worked but we're still here.
* Spin for a bit then give up.
*/
uacpi_u64 stalled_time = 0;
while (stalled_time < (1000 * 1000)) {
uacpi_kernel_stall(100);
stalled_time += 100;
}
uacpi_error("reset timeout\n");
ret = UACPI_STATUS_HARDWARE_TIMEOUT;
}
if (pci_dev != UACPI_NULL)
uacpi_kernel_pci_device_close(pci_dev);
if (io_handle != UACPI_NULL)
uacpi_kernel_io_unmap(io_handle);
return ret;
}
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,728 @@
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/utilities.h>
#ifdef UACPI_USE_BUILTIN_STRING
#ifndef uacpi_memcpy
void *uacpi_memcpy(void *dest, const void *src, uacpi_size count)
{
uacpi_char *cd = dest;
const uacpi_char *cs = src;
while (count--)
*cd++ = *cs++;
return dest;
}
#endif
#ifndef uacpi_memmove
void *uacpi_memmove(void *dest, const void *src, uacpi_size count)
{
uacpi_char *cd = dest;
const uacpi_char *cs = src;
if (src < dest) {
cs += count;
cd += count;
while (count--)
*--cd = *--cs;
} else {
while (count--)
*cd++ = *cs++;
}
return dest;
}
#endif
#ifndef uacpi_memset
void *uacpi_memset(void *dest, uacpi_i32 ch, uacpi_size count)
{
uacpi_u8 fill = ch;
uacpi_u8 *cdest = dest;
while (count--)
*cdest++ = fill;
return dest;
}
#endif
#ifndef uacpi_memcmp
uacpi_i32 uacpi_memcmp(const void *lhs, const void *rhs, uacpi_size count)
{
const uacpi_u8 *byte_lhs = lhs;
const uacpi_u8 *byte_rhs = rhs;
uacpi_size i;
for (i = 0; i < count; ++i) {
if (byte_lhs[i] != byte_rhs[i])
return byte_lhs[i] - byte_rhs[i];
}
return 0;
}
#endif
#endif // UACPI_USE_BUILTIN_STRING
#ifndef uacpi_strlen
uacpi_size uacpi_strlen(const uacpi_char *str)
{
const uacpi_char *str1;
for (str1 = str; *str1; str1++);
return str1 - str;
}
#endif
#ifndef UACPI_BAREBONES_MODE
#ifndef uacpi_strnlen
uacpi_size uacpi_strnlen(const uacpi_char *str, uacpi_size max)
{
const uacpi_char *str1;
for (str1 = str; max-- && *str1; str1++);
return str1 - str;
}
#endif
#ifndef uacpi_strcmp
uacpi_i32 uacpi_strcmp(const uacpi_char *lhs, const uacpi_char *rhs)
{
uacpi_size i = 0;
typedef const uacpi_u8 *cucp;
while (lhs[i] && rhs[i]) {
if (lhs[i] != rhs[i])
return *(cucp)&lhs[i] - *(cucp)&rhs[i];
i++;
}
return *(cucp)&lhs[i] - *(cucp)&rhs[i];
}
#endif
void uacpi_memcpy_zerout(void *dst, const void *src,
uacpi_size dst_size, uacpi_size src_size)
{
uacpi_size bytes_to_copy = UACPI_MIN(src_size, dst_size);
if (bytes_to_copy)
uacpi_memcpy(dst, src, bytes_to_copy);
if (dst_size > bytes_to_copy)
uacpi_memzero((uacpi_u8 *)dst + bytes_to_copy, dst_size - bytes_to_copy);
}
uacpi_u8 uacpi_bit_scan_forward(uacpi_u64 value)
{
#if defined(_MSC_VER) && !defined(__clang__)
unsigned char ret;
unsigned long index;
#ifdef _WIN64
ret = _BitScanForward64(&index, value);
if (ret == 0)
return 0;
return (uacpi_u8)index + 1;
#else
ret = _BitScanForward(&index, value);
if (ret == 0) {
ret = _BitScanForward(&index, value >> 32);
if (ret == 0)
return 0;
return (uacpi_u8)index + 33;
}
return (uacpi_u8)index + 1;
#endif
#elif defined(__WATCOMC__)
// TODO: Use compiler intrinsics or inline ASM here
uacpi_u8 index;
uacpi_u64 mask = 1;
for (index = 1; index <= 64; index++, mask <<= 1) {
if (value & mask) {
return index;
}
}
return 0;
#else
return __builtin_ffsll(value);
#endif
}
uacpi_u8 uacpi_bit_scan_backward(uacpi_u64 value)
{
#if defined(_MSC_VER) && !defined(__clang__)
unsigned char ret;
unsigned long index;
#ifdef _WIN64
ret = _BitScanReverse64(&index, value);
if (ret == 0)
return 0;
return (uacpi_u8)index + 1;
#else
ret = _BitScanReverse(&index, value >> 32);
if (ret == 0) {
ret = _BitScanReverse(&index, value);
if (ret == 0)
return 0;
return (uacpi_u8)index + 1;
}
return (uacpi_u8)index + 33;
#endif
#elif defined(__WATCOMC__)
// TODO: Use compiler intrinsics or inline ASM here
uacpi_u8 index;
uacpi_u64 mask = (1ull << 63);
for (index = 64; index > 0; index--, mask >>= 1) {
if (value & mask) {
return index;
}
}
return 0;
#else
if (value == 0)
return 0;
return 64 - __builtin_clzll(value);
#endif
}
#ifndef UACPI_NATIVE_ALLOC_ZEROED
void *uacpi_builtin_alloc_zeroed(uacpi_size size)
{
void *ptr;
ptr = uacpi_kernel_alloc(size);
if (uacpi_unlikely(ptr == UACPI_NULL))
return ptr;
uacpi_memzero(ptr, size);
return ptr;
}
#endif
#endif // !UACPI_BAREBONES_MODE
#ifndef uacpi_vsnprintf
struct fmt_buf_state {
uacpi_char *buffer;
uacpi_size capacity;
uacpi_size bytes_written;
};
struct fmt_spec {
uacpi_u8 is_signed : 1;
uacpi_u8 prepend : 1;
uacpi_u8 uppercase : 1;
uacpi_u8 left_justify : 1;
uacpi_u8 alternate_form : 1;
uacpi_u8 has_precision : 1;
uacpi_char pad_char;
uacpi_char prepend_char;
uacpi_u64 min_width;
uacpi_u64 precision;
uacpi_u32 base;
};
static void write_one(struct fmt_buf_state *fb_state, uacpi_char c)
{
if (fb_state->bytes_written < fb_state->capacity)
fb_state->buffer[fb_state->bytes_written] = c;
fb_state->bytes_written++;
}
static void write_many(
struct fmt_buf_state *fb_state, const uacpi_char *string, uacpi_size count
)
{
if (fb_state->bytes_written < fb_state->capacity) {
uacpi_size count_to_write;
count_to_write = UACPI_MIN(
count, fb_state->capacity - fb_state->bytes_written
);
uacpi_memcpy(
&fb_state->buffer[fb_state->bytes_written], string, count_to_write
);
}
fb_state->bytes_written += count;
}
static uacpi_char hex_char(uacpi_bool upper, uacpi_u64 value)
{
static const uacpi_char upper_hex[] = "0123456789ABCDEF";
static const uacpi_char lower_hex[] = "0123456789abcdef";
return (upper ? upper_hex : lower_hex)[value];
}
static void write_padding(
struct fmt_buf_state *fb_state, struct fmt_spec *fm, uacpi_size repr_size
)
{
uacpi_u64 mw = fm->min_width;
if (mw <= repr_size)
return;
mw -= repr_size;
while (mw--)
write_one(fb_state, fm->left_justify ? ' ' : fm->pad_char);
}
#define REPR_BUFFER_SIZE 32
static void write_integer(
struct fmt_buf_state *fb_state, struct fmt_spec *fm, uacpi_u64 value
)
{
uacpi_char repr_buffer[REPR_BUFFER_SIZE];
uacpi_size index = REPR_BUFFER_SIZE;
uacpi_u64 remainder;
uacpi_char repr;
uacpi_bool negative = UACPI_FALSE;
uacpi_size repr_size;
if (fm->is_signed) {
uacpi_i64 as_ll = value;
if (as_ll < 0) {
value = -as_ll;
negative = UACPI_TRUE;
}
}
if (fm->prepend || negative)
write_one(fb_state, negative ? '-' : fm->prepend_char);
while (value) {
remainder = value % fm->base;
value /= fm->base;
if (fm->base == 16) {
repr = hex_char(fm->uppercase, remainder);
} else if (fm->base == 8 || fm->base == 10) {
repr = remainder + '0';
} else {
repr = '?';
}
repr_buffer[--index] = repr;
}
repr_size = REPR_BUFFER_SIZE - index;
if (repr_size == 0) {
repr_buffer[--index] = '0';
repr_size = 1;
}
if (fm->alternate_form) {
if (fm->base == 16) {
repr_buffer[--index] = fm->uppercase ? 'X' : 'x';
repr_buffer[--index] = '0';
repr_size += 2;
} else if (fm->base == 8) {
repr_buffer[--index] = '0';
repr_size += 1;
}
}
if (fm->left_justify) {
write_many(fb_state, &repr_buffer[index], repr_size);
write_padding(fb_state, fm, repr_size);
} else {
write_padding(fb_state, fm, repr_size);
write_many(fb_state, &repr_buffer[index], repr_size);
}
}
static uacpi_bool string_has_at_least(
const uacpi_char *string, uacpi_size characters
)
{
while (*string) {
if (--characters == 0)
return UACPI_TRUE;
string++;
}
return UACPI_FALSE;
}
static uacpi_bool consume_digits(
const uacpi_char **string, uacpi_size *out_size
)
{
uacpi_size size = 0;
for (;;) {
char c = **string;
if (c < '0' || c > '9')
break;
size++;
*string += 1;
}
if (size == 0)
return UACPI_FALSE;
*out_size = size;
return UACPI_TRUE;
}
enum parse_number_mode {
PARSE_NUMBER_MODE_MAYBE,
PARSE_NUMBER_MODE_MUST,
};
static uacpi_bool parse_number(
const uacpi_char **fmt, enum parse_number_mode mode, uacpi_u64 *out_value
)
{
uacpi_status ret;
uacpi_size num_digits;
const uacpi_char *digits = *fmt;
if (!consume_digits(fmt, &num_digits))
return mode != PARSE_NUMBER_MODE_MUST;
ret = uacpi_string_to_integer(digits, num_digits, UACPI_BASE_DEC, out_value);
return ret == UACPI_STATUS_OK;
}
static uacpi_bool consume(const uacpi_char **string, const uacpi_char *token)
{
uacpi_size token_size;
token_size = uacpi_strlen(token);
if (!string_has_at_least(*string, token_size))
return UACPI_FALSE;
if (!uacpi_memcmp(*string, token, token_size)) {
*string += token_size;
return UACPI_TRUE;
}
return UACPI_FALSE;
}
static uacpi_bool is_one_of(uacpi_char c, const uacpi_char *list)
{
for (; *list; list++) {
if (c == *list)
return UACPI_TRUE;
}
return UACPI_FALSE;
}
static uacpi_bool consume_one_of(
const uacpi_char **string, const uacpi_char *list, uacpi_char *consumed_char
)
{
uacpi_char c = **string;
if (!c)
return UACPI_FALSE;
if (is_one_of(c, list)) {
*consumed_char = c;
*string += 1;
return UACPI_TRUE;
}
return UACPI_FALSE;
}
static uacpi_u32 base_from_specifier(uacpi_char specifier)
{
switch (specifier)
{
case 'x':
case 'X':
return 16;
case 'o':
return 8;
default:
return 10;
}
}
static uacpi_bool is_uppercase_specifier(uacpi_char specifier)
{
return specifier == 'X';
}
static const uacpi_char *find_next_conversion(
const uacpi_char *fmt, uacpi_size *offset
)
{
*offset = 0;
while (*fmt) {
if (*fmt == '%')
return fmt;
fmt++;
*offset += 1;
}
return UACPI_NULL;
}
uacpi_i32 uacpi_vsnprintf(
uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt,
uacpi_va_list vlist
)
{
struct fmt_buf_state fb_state = { 0 };
uacpi_u64 value;
const uacpi_char *next_conversion;
uacpi_size next_offset;
uacpi_char flag;
fb_state.buffer = buffer;
fb_state.capacity = capacity;
fb_state.bytes_written = 0;
while (*fmt) {
struct fmt_spec fm = {
.pad_char = ' ',
.base = 10,
};
next_conversion = find_next_conversion(fmt, &next_offset);
if (next_offset)
write_many(&fb_state, fmt, next_offset);
if (!next_conversion)
break;
fmt = next_conversion;
if (consume(&fmt, "%%")) {
write_one(&fb_state, '%');
continue;
}
// consume %
fmt++;
while (consume_one_of(&fmt, "+- 0#", &flag)) {
switch (flag) {
case '+':
case ' ':
fm.prepend = UACPI_TRUE;
fm.prepend_char = flag;
continue;
case '-':
fm.left_justify = UACPI_TRUE;
continue;
case '0':
fm.pad_char = '0';
continue;
case '#':
fm.alternate_form = UACPI_TRUE;
continue;
default:
return -1;
}
}
if (consume(&fmt, "*")) {
fm.min_width = uacpi_va_arg(vlist, int);
} else if (!parse_number(&fmt, PARSE_NUMBER_MODE_MAYBE, &fm.min_width)) {
return -1;
}
if (consume(&fmt, ".")) {
fm.has_precision = UACPI_TRUE;
if (consume(&fmt, "*")) {
fm.precision = uacpi_va_arg(vlist, int);
} else {
if (!parse_number(&fmt, PARSE_NUMBER_MODE_MUST, &fm.precision))
return -1;
}
}
flag = 0;
if (consume(&fmt, "c")) {
uacpi_char c = uacpi_va_arg(vlist, int);
write_one(&fb_state, c);
continue;
}
if (consume(&fmt, "s")) {
const uacpi_char *string = uacpi_va_arg(vlist, uacpi_char*);
uacpi_size i;
if (uacpi_unlikely(string == UACPI_NULL))
string = "<null>";
for (i = 0; (!fm.has_precision || i < fm.precision) && string[i]; ++i)
write_one(&fb_state, string[i]);
while (i++ < fm.min_width)
write_one(&fb_state, ' ');
continue;
}
if (consume(&fmt, "p")) {
value = (uacpi_uintptr)uacpi_va_arg(vlist, void*);
fm.base = 16;
fm.min_width = UACPI_POINTER_SIZE * 2;
fm.pad_char = '0';
goto write_int;
}
if (consume(&fmt, "hh")) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = (signed char)uacpi_va_arg(vlist, int);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = (unsigned char)uacpi_va_arg(vlist, int);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "h")) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = (signed short)uacpi_va_arg(vlist, int);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = (unsigned short)uacpi_va_arg(vlist, int);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "ll") ||
(sizeof(uacpi_size) == sizeof(long long) && consume(&fmt, "z"))) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = uacpi_va_arg(vlist, long long);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = uacpi_va_arg(vlist, unsigned long long);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "l") ||
(sizeof(uacpi_size) == sizeof(long) && consume(&fmt, "z"))) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = uacpi_va_arg(vlist, long);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = uacpi_va_arg(vlist, unsigned long);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = uacpi_va_arg(vlist, uacpi_i32);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = uacpi_va_arg(vlist, uacpi_u32);
} else {
return -1;
}
write_int:
if (flag != 0) {
fm.base = base_from_specifier(flag);
fm.uppercase = is_uppercase_specifier(flag);
}
write_integer(&fb_state, &fm, value);
}
if (fb_state.capacity) {
uacpi_size last_char;
last_char = UACPI_MIN(fb_state.bytes_written, fb_state.capacity - 1);
fb_state.buffer[last_char] = '\0';
}
return fb_state.bytes_written;
}
#endif
#ifndef uacpi_snprintf
uacpi_i32 uacpi_snprintf(
uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, ...
)
{
uacpi_va_list vlist;
uacpi_i32 ret;
uacpi_va_start(vlist, fmt);
ret = uacpi_vsnprintf(buffer, capacity, fmt, vlist);
uacpi_va_end(vlist);
return ret;
}
#endif
#ifndef UACPI_FORMATTED_LOGGING
void uacpi_log(uacpi_log_level lvl, const uacpi_char *str, ...)
{
uacpi_char buf[UACPI_PLAIN_LOG_BUFFER_SIZE];
int ret;
uacpi_va_list vlist;
uacpi_va_start(vlist, str);
ret = uacpi_vsnprintf(buf, sizeof(buf), str, vlist);
if (uacpi_unlikely(ret < 0))
return;
/*
* If this log message is too large for the configured buffer size, cut off
* the end and transform into "...\n" to indicate that it didn't fit and
* prevent the newline from being truncated.
*/
if (uacpi_unlikely(ret >= UACPI_PLAIN_LOG_BUFFER_SIZE)) {
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 5] = '.';
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 4] = '.';
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 3] = '.';
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 2] = '\n';
}
uacpi_kernel_log(lvl, buf);
uacpi_va_end(vlist);
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,998 @@
#include <uacpi/uacpi.h>
#include <uacpi/acpi.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/context.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/tables.h>
#include <uacpi/internal/interpreter.h>
#include <uacpi/internal/namespace.h>
#include <uacpi/internal/opregion.h>
#include <uacpi/internal/registers.h>
#include <uacpi/internal/event.h>
#include <uacpi/internal/notify.h>
#include <uacpi/internal/osi.h>
#include <uacpi/internal/registers.h>
struct uacpi_runtime_context g_uacpi_rt_ctx = { 0 };
void uacpi_context_set_log_level(uacpi_log_level lvl)
{
if (lvl == 0)
lvl = UACPI_DEFAULT_LOG_LEVEL;
g_uacpi_rt_ctx.log_level = lvl;
}
void uacpi_logger_initialize(void)
{
static uacpi_bool version_printed = UACPI_FALSE;
if (g_uacpi_rt_ctx.log_level == 0)
uacpi_context_set_log_level(UACPI_DEFAULT_LOG_LEVEL);
if (!version_printed) {
version_printed = UACPI_TRUE;
uacpi_info(
"starting uACPI, version %d.%d.%d\n",
UACPI_MAJOR, UACPI_MINOR, UACPI_PATCH
);
}
}
void uacpi_context_set_proactive_table_checksum(uacpi_bool setting)
{
if (setting)
g_uacpi_rt_ctx.flags |= UACPI_FLAG_PROACTIVE_TBL_CSUM;
else
g_uacpi_rt_ctx.flags &= ~UACPI_FLAG_PROACTIVE_TBL_CSUM;
}
const uacpi_char *uacpi_status_to_string(uacpi_status st)
{
switch (st) {
case UACPI_STATUS_OK:
return "no error";
case UACPI_STATUS_MAPPING_FAILED:
return "failed to map memory";
case UACPI_STATUS_OUT_OF_MEMORY:
return "out of memory";
case UACPI_STATUS_BAD_CHECKSUM:
return "bad table checksum";
case UACPI_STATUS_INVALID_SIGNATURE:
return "invalid table signature";
case UACPI_STATUS_INVALID_TABLE_LENGTH:
return "invalid table length";
case UACPI_STATUS_NOT_FOUND:
return "not found";
case UACPI_STATUS_INVALID_ARGUMENT:
return "invalid argument";
case UACPI_STATUS_UNIMPLEMENTED:
return "unimplemented";
case UACPI_STATUS_ALREADY_EXISTS:
return "already exists";
case UACPI_STATUS_INTERNAL_ERROR:
return "internal error";
case UACPI_STATUS_TYPE_MISMATCH:
return "object type mismatch";
case UACPI_STATUS_INIT_LEVEL_MISMATCH:
return "init level too low/high for this action";
case UACPI_STATUS_NAMESPACE_NODE_DANGLING:
return "attempting to use a dangling namespace node";
case UACPI_STATUS_NO_HANDLER:
return "no handler found";
case UACPI_STATUS_NO_RESOURCE_END_TAG:
return "resource template without an end tag";
case UACPI_STATUS_COMPILED_OUT:
return "this functionality has been compiled out of this build";
case UACPI_STATUS_HARDWARE_TIMEOUT:
return "timed out waiting for hardware response";
case UACPI_STATUS_TIMEOUT:
return "wait timed out";
case UACPI_STATUS_OVERRIDDEN:
return "the requested action has been overridden";
case UACPI_STATUS_DENIED:
return "the requested action has been denied";
case UACPI_STATUS_AML_UNDEFINED_REFERENCE:
return "AML referenced an undefined object";
case UACPI_STATUS_AML_INVALID_NAMESTRING:
return "invalid AML name string";
case UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS:
return "object already exists";
case UACPI_STATUS_AML_INVALID_OPCODE:
return "invalid AML opcode";
case UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE:
return "incompatible AML object type";
case UACPI_STATUS_AML_BAD_ENCODING:
return "bad AML instruction encoding";
case UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX:
return "out of bounds AML index";
case UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH:
return "AML attempted to acquire a mutex with a lower sync level";
case UACPI_STATUS_AML_INVALID_RESOURCE:
return "invalid resource template encoding or type";
case UACPI_STATUS_AML_LOOP_TIMEOUT:
return "hanging AML while loop";
case UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT:
return "reached maximum AML call stack depth";
default:
return "<invalid status>";
}
}
void uacpi_state_reset(void)
{
#ifndef UACPI_BAREBONES_MODE
uacpi_deinitialize_namespace();
uacpi_deinitialize_interfaces();
uacpi_deinitialize_events();
uacpi_deinitialize_notify();
uacpi_deinitialize_opregion();
#endif
uacpi_deinitialize_tables();
#ifndef UACPI_BAREBONES_MODE
#ifndef UACPI_REDUCED_HARDWARE
if (g_uacpi_rt_ctx.was_in_legacy_mode)
uacpi_leave_acpi_mode();
#endif
uacpi_deinitialize_registers();
#ifndef UACPI_REDUCED_HARDWARE
if (g_uacpi_rt_ctx.global_lock_event)
uacpi_kernel_free_event(g_uacpi_rt_ctx.global_lock_event);
if (g_uacpi_rt_ctx.global_lock_spinlock)
uacpi_kernel_free_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
#endif
#endif // !UACPI_BAREBONES_MODE
uacpi_memzero(&g_uacpi_rt_ctx, sizeof(g_uacpi_rt_ctx));
#if defined(UACPI_KERNEL_INITIALIZATION) && !defined(UACPI_BAREBONES_MODE)
uacpi_kernel_deinitialize();
#endif
}
#ifndef UACPI_BAREBONES_MODE
void uacpi_context_set_loop_timeout(uacpi_u32 seconds)
{
if (seconds == 0)
seconds = UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS;
g_uacpi_rt_ctx.loop_timeout_seconds = seconds;
}
void uacpi_context_set_max_call_stack_depth(uacpi_u32 depth)
{
if (depth == 0)
depth = UACPI_DEFAULT_MAX_CALL_STACK_DEPTH;
g_uacpi_rt_ctx.max_call_stack_depth = depth;
}
uacpi_u32 uacpi_context_get_loop_timeout(void)
{
return g_uacpi_rt_ctx.loop_timeout_seconds;
}
#ifndef UACPI_REDUCED_HARDWARE
enum hw_mode {
HW_MODE_ACPI = 0,
HW_MODE_LEGACY = 1,
};
static enum hw_mode read_mode(void)
{
uacpi_status ret;
uacpi_u64 raw_value;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
if (!fadt->smi_cmd)
return HW_MODE_ACPI;
ret = uacpi_read_register_field(UACPI_REGISTER_FIELD_SCI_EN, &raw_value);
if (uacpi_unlikely_error(ret))
return HW_MODE_LEGACY;
return raw_value ? HW_MODE_ACPI : HW_MODE_LEGACY;
}
static uacpi_status set_mode(enum hw_mode mode)
{
uacpi_status ret;
uacpi_u64 raw_value, stalled_time = 0;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
if (uacpi_unlikely(!fadt->smi_cmd)) {
uacpi_error("SMI_CMD is not implemented by the firmware\n");
return UACPI_STATUS_NOT_FOUND;
}
if (uacpi_unlikely(!fadt->acpi_enable && !fadt->acpi_disable)) {
uacpi_error("mode transition is not implemented by the hardware\n");
return UACPI_STATUS_NOT_FOUND;
}
switch (mode) {
case HW_MODE_ACPI:
raw_value = fadt->acpi_enable;
break;
case HW_MODE_LEGACY:
raw_value = fadt->acpi_disable;
break;
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
ret = uacpi_write_register(UACPI_REGISTER_SMI_CMD, raw_value);
if (uacpi_unlikely_error(ret))
return ret;
// Allow up to 5 seconds for the hardware to enter the desired mode
while (stalled_time < (5 * 1000 * 1000)) {
if (read_mode() == mode)
return UACPI_STATUS_OK;
uacpi_kernel_stall(100);
stalled_time += 100;
}
uacpi_error("hardware time out while changing modes\n");
return UACPI_STATUS_HARDWARE_TIMEOUT;
}
static uacpi_status enter_mode(enum hw_mode mode, uacpi_bool *did_change)
{
uacpi_status ret;
const uacpi_char *mode_str;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_is_hardware_reduced())
return UACPI_STATUS_OK;
mode_str = mode == HW_MODE_LEGACY ? "legacy" : "acpi";
if (read_mode() == mode) {
uacpi_trace("%s mode already enabled\n", mode_str);
return UACPI_STATUS_OK;
}
ret = set_mode(mode);
if (uacpi_unlikely_error(ret)) {
uacpi_warn(
"unable to enter %s mode: %s\n",
mode_str, uacpi_status_to_string(ret)
);
return ret;
}
uacpi_trace("entered %s mode\n", mode_str);
if (did_change != UACPI_NULL)
*did_change = UACPI_TRUE;
return ret;
}
uacpi_status uacpi_enter_acpi_mode(void)
{
return enter_mode(HW_MODE_ACPI, UACPI_NULL);
}
uacpi_status uacpi_leave_acpi_mode(void)
{
return enter_mode(HW_MODE_LEGACY, UACPI_NULL);
}
static void enter_acpi_mode_initial(void)
{
enter_mode(HW_MODE_ACPI, &g_uacpi_rt_ctx.was_in_legacy_mode);
}
#else
static void enter_acpi_mode_initial(void) { }
#endif
uacpi_init_level uacpi_get_current_init_level(void)
{
return g_uacpi_rt_ctx.init_level;
}
uacpi_status uacpi_initialize(uacpi_u64 flags)
{
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_EARLY);
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_EARLY);
if (uacpi_unlikely_error(ret))
return ret;
#endif
g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED;
g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.s0_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.s0_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.flags = flags;
uacpi_logger_initialize();
if (g_uacpi_rt_ctx.loop_timeout_seconds == 0)
uacpi_context_set_loop_timeout(UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS);
if (g_uacpi_rt_ctx.max_call_stack_depth == 0)
uacpi_context_set_max_call_stack_depth(UACPI_DEFAULT_MAX_CALL_STACK_DEPTH);
ret = uacpi_initialize_tables();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_registers();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_events_early();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_opregion();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_interfaces();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_namespace();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_notify();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
uacpi_install_default_address_space_handlers();
if (!uacpi_check_flag(UACPI_FLAG_NO_ACPI_MODE))
enter_acpi_mode_initial();
return UACPI_STATUS_OK;
out_fatal_error:
uacpi_state_reset();
return ret;
}
struct table_load_stats {
uacpi_u32 load_counter;
uacpi_u32 failure_counter;
};
static void trace_table_load_failure(
struct acpi_sdt_hdr *tbl, uacpi_log_level lvl, uacpi_status ret
)
{
uacpi_log_lvl(
lvl,
"failed to load "UACPI_PRI_TBL_HDR": %s\n",
UACPI_FMT_TBL_HDR(tbl), uacpi_status_to_string(ret)
);
}
static uacpi_bool match_ssdt_or_psdt(struct uacpi_installed_table *tbl)
{
if (tbl->flags & UACPI_TABLE_LOADED)
return UACPI_FALSE;
return uacpi_signatures_match(tbl->hdr.signature, ACPI_SSDT_SIGNATURE) ||
uacpi_signatures_match(tbl->hdr.signature, ACPI_PSDT_SIGNATURE);
}
static uacpi_u64 elapsed_ms(uacpi_u64 begin_ns, uacpi_u64 end_ns)
{
return (end_ns - begin_ns) / (1000ull * 1000ull);
}
static uacpi_bool warn_on_bad_timesource(uacpi_u64 begin_ts, uacpi_u64 end_ts)
{
const uacpi_char *reason;
if (uacpi_unlikely(begin_ts == 0 && end_ts == 0)) {
reason = "uacpi_kernel_get_nanoseconds_since_boot() appears to be a stub";
goto out_bad_timesource;
}
if (uacpi_unlikely(begin_ts == end_ts)) {
reason = "poor time source precision detected";
goto out_bad_timesource;
}
if (uacpi_unlikely(end_ts < begin_ts)) {
reason = "time source backwards drift detected";
goto out_bad_timesource;
}
return UACPI_FALSE;
out_bad_timesource:
uacpi_warn("%s, this may cause problems\n", reason);
return UACPI_TRUE;
}
uacpi_status uacpi_namespace_load(void)
{
struct uacpi_table tbl;
uacpi_status ret;
uacpi_u64 begin_ts, end_ts;
struct table_load_stats st = { 0 };
uacpi_size cur_index;
UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
#endif
begin_ts = uacpi_kernel_get_nanoseconds_since_boot();
ret = uacpi_table_find_by_signature(ACPI_DSDT_SIGNATURE, &tbl);
if (uacpi_unlikely_error(ret)) {
uacpi_error("unable to find DSDT: %s\n", uacpi_status_to_string(ret));
goto out_fatal_error;
}
ret = uacpi_table_load_with_cause(tbl.index, UACPI_TABLE_LOAD_CAUSE_INIT);
if (uacpi_unlikely_error(ret)) {
trace_table_load_failure(tbl.hdr, UACPI_LOG_ERROR, ret);
st.failure_counter++;
}
st.load_counter++;
uacpi_table_unref(&tbl);
for (cur_index = 0;; cur_index = tbl.index + 1) {
ret = uacpi_table_match(cur_index, match_ssdt_or_psdt, &tbl);
if (ret != UACPI_STATUS_OK) {
if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND))
goto out_fatal_error;
break;
}
ret = uacpi_table_load_with_cause(tbl.index, UACPI_TABLE_LOAD_CAUSE_INIT);
if (uacpi_unlikely_error(ret)) {
trace_table_load_failure(tbl.hdr, UACPI_LOG_WARN, ret);
st.failure_counter++;
}
st.load_counter++;
uacpi_table_unref(&tbl);
}
end_ts = uacpi_kernel_get_nanoseconds_since_boot();
g_uacpi_rt_ctx.bad_timesource = warn_on_bad_timesource(begin_ts, end_ts);
if (uacpi_unlikely(st.failure_counter != 0 || g_uacpi_rt_ctx.bad_timesource)) {
uacpi_info(
"loaded %u AML blob%s (%u error%s)\n",
st.load_counter, st.load_counter > 1 ? "s" : "", st.failure_counter,
st.failure_counter == 1 ? "" : "s"
);
} else {
uacpi_u64 ops = g_uacpi_rt_ctx.opcodes_executed;
uacpi_u64 ops_per_sec = ops * UACPI_NANOSECONDS_PER_SEC;
ops_per_sec /= end_ts - begin_ts;
uacpi_info(
"successfully loaded %u AML blob%s, %"UACPI_PRIu64" ops in "
"%"UACPI_PRIu64"ms (avg %"UACPI_PRIu64"/s)\n",
st.load_counter, st.load_counter > 1 ? "s" : "",
UACPI_FMT64(ops), UACPI_FMT64(elapsed_ms(begin_ts, end_ts)),
UACPI_FMT64(ops_per_sec)
);
}
ret = uacpi_initialize_events();
if (uacpi_unlikely_error(ret)) {
uacpi_error("event initialization failed: %s\n",
uacpi_status_to_string(ret));
goto out_fatal_error;
}
g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_LOADED;
return UACPI_STATUS_OK;
out_fatal_error:
uacpi_state_reset();
return ret;
}
struct ns_init_context {
uacpi_size ini_executed;
uacpi_size ini_errors;
uacpi_size sta_executed;
uacpi_size sta_errors;
uacpi_size devices;
uacpi_size thermal_zones;
};
static void ini_eval(struct ns_init_context *ctx, uacpi_namespace_node *node)
{
uacpi_status ret;
ret = uacpi_eval(node, "_INI", UACPI_NULL, UACPI_NULL);
if (ret == UACPI_STATUS_NOT_FOUND)
return;
ctx->ini_executed++;
if (uacpi_unlikely_error(ret))
ctx->ini_errors++;
}
static uacpi_status sta_eval(
struct ns_init_context *ctx, uacpi_namespace_node *node,
uacpi_u32 *value
)
{
uacpi_status ret;
ret = uacpi_eval_sta(node, value);
if (*value == 0xFFFFFFFF)
return ret;
ctx->sta_executed++;
if (uacpi_unlikely_error(ret))
ctx->sta_errors++;
return ret;
}
static uacpi_iteration_decision do_sta_ini(
void *opaque, uacpi_namespace_node *node, uacpi_u32 depth
)
{
struct ns_init_context *ctx = opaque;
uacpi_status ret;
uacpi_object_type type = UACPI_OBJECT_UNINITIALIZED;
uacpi_u32 sta_ret;
UACPI_UNUSED(depth);
// We don't care about aliases
if (uacpi_namespace_node_is_alias(node))
return UACPI_ITERATION_DECISION_NEXT_PEER;
ret = uacpi_namespace_node_type(node, &type);
switch (type) {
case UACPI_OBJECT_DEVICE:
case UACPI_OBJECT_PROCESSOR:
ctx->devices++;
break;
case UACPI_OBJECT_THERMAL_ZONE:
ctx->thermal_zones++;
break;
default:
if (node != uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_TZ))
return UACPI_ITERATION_DECISION_CONTINUE;
}
ret = sta_eval(ctx, node, &sta_ret);
if (uacpi_unlikely_error(ret))
return UACPI_ITERATION_DECISION_CONTINUE;
if (!(sta_ret & ACPI_STA_RESULT_DEVICE_PRESENT)) {
if (!(sta_ret & ACPI_STA_RESULT_DEVICE_FUNCTIONING))
return UACPI_ITERATION_DECISION_NEXT_PEER;
/*
* ACPI 6.5 specification:
* _STA may return bit 0 clear (not present) with bit [3] set (device
* is functional). This case is used to indicate a valid device for
* which no device driver should be loaded (for example, a bridge
* device.) Children of this device may be present and valid. OSPM
* should continue enumeration below a device whose _STA returns this
* bit combination.
*/
return UACPI_ITERATION_DECISION_CONTINUE;
}
ini_eval(ctx, node);
return UACPI_ITERATION_DECISION_CONTINUE;
}
uacpi_status uacpi_namespace_initialize(void)
{
struct ns_init_context ctx = { 0 };
uacpi_namespace_node *root;
uacpi_u64 begin_ts, end_ts;
uacpi_address_space_handlers *handlers;
uacpi_address_space_handler *handler;
uacpi_status ret = UACPI_STATUS_OK;
UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
if (uacpi_unlikely_error(ret))
goto out;
#endif
/*
* Initialization order here is identical to ACPICA because ACPI
* specification doesn't really have any detailed steps that explain
* how to do it.
*/
root = uacpi_namespace_root();
begin_ts = uacpi_kernel_get_nanoseconds_since_boot();
// Step 1 - Execute \_INI
ini_eval(&ctx, root);
// Step 2 - Execute \_SB._INI
ini_eval(
&ctx, uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SB)
);
/*
* Step 3 - Run _REG methods for all globally installed
* address space handlers.
*/
handlers = uacpi_node_get_address_space_handlers(root);
if (handlers) {
handler = handlers->head;
while (handler) {
if (uacpi_address_space_handler_is_default(handler))
uacpi_reg_all_opregions(root, handler->space);
handler = handler->next;
}
}
// Step 4 - Run all other _STA and _INI methods
uacpi_namespace_for_each_child(
root, do_sta_ini, UACPI_NULL,
UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, &ctx
);
end_ts = uacpi_kernel_get_nanoseconds_since_boot();
if (uacpi_likely(!g_uacpi_rt_ctx.bad_timesource)) {
uacpi_info(
"namespace initialization done in %"UACPI_PRIu64"ms: "
"%zu devices, %zu thermal zones\n",
UACPI_FMT64(elapsed_ms(begin_ts, end_ts)),
ctx.devices, ctx.thermal_zones
);
} else {
uacpi_info(
"namespace initialization done: %zu devices, %zu thermal zones\n",
ctx.devices, ctx.thermal_zones
);
}
uacpi_trace(
"_STA calls: %zu (%zu errors), _INI calls: %zu (%zu errors)\n",
ctx.sta_executed, ctx.sta_errors, ctx.ini_executed,
ctx.ini_errors
);
g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED;
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
out:
if (uacpi_unlikely_error(ret))
uacpi_state_reset();
#endif
return ret;
}
uacpi_status uacpi_eval(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **out_obj
)
{
struct uacpi_namespace_node *node;
uacpi_control_method *method;
uacpi_object *obj;
uacpi_status ret = UACPI_STATUS_INVALID_ARGUMENT;
if (uacpi_unlikely(parent == UACPI_NULL && path == UACPI_NULL))
return ret;
ret = uacpi_namespace_read_lock();
if (uacpi_unlikely_error(ret))
return ret;
if (path != UACPI_NULL) {
ret = uacpi_namespace_node_resolve(
parent, path, UACPI_SHOULD_LOCK_NO,
UACPI_MAY_SEARCH_ABOVE_PARENT_NO, UACPI_PERMANENT_ONLY_YES,
&node
);
if (uacpi_unlikely_error(ret))
goto out_read_unlock;
} else {
node = parent;
}
obj = uacpi_namespace_node_get_object(node);
if (uacpi_unlikely(obj == UACPI_NULL)) {
ret = UACPI_STATUS_INVALID_ARGUMENT;
goto out_read_unlock;
}
if (obj->type != UACPI_OBJECT_METHOD) {
uacpi_object *new_obj;
if (uacpi_unlikely(out_obj == UACPI_NULL))
goto out_read_unlock;
new_obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
if (uacpi_unlikely(new_obj == UACPI_NULL)) {
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out_read_unlock;
}
ret = uacpi_object_assign(
new_obj, obj, UACPI_ASSIGN_BEHAVIOR_DEEP_COPY
);
if (uacpi_unlikely_error(ret)) {
uacpi_object_unref(new_obj);
goto out_read_unlock;
}
*out_obj = new_obj;
out_read_unlock:
uacpi_namespace_read_unlock();
return ret;
}
method = obj->method;
uacpi_shareable_ref(method);
uacpi_namespace_read_unlock();
// Upgrade to a write-lock since we're about to run a method
ret = uacpi_namespace_write_lock();
if (uacpi_unlikely_error(ret))
goto out_no_write_lock;
ret = uacpi_execute_control_method(node, method, args, out_obj);
uacpi_namespace_write_unlock();
out_no_write_lock:
uacpi_method_unref(method);
return ret;
}
uacpi_status uacpi_eval_simple(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval(parent, path, UACPI_NULL, ret);
}
uacpi_status uacpi_execute(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args
)
{
return uacpi_eval(parent, path, args, UACPI_NULL);
}
uacpi_status uacpi_execute_simple(
uacpi_namespace_node *parent, const uacpi_char *path
)
{
return uacpi_eval(parent, path, UACPI_NULL, UACPI_NULL);
}
#define TRACE_BAD_RET(path_fmt, type, ...) \
uacpi_warn( \
"unexpected '%s' object returned by method "path_fmt \
", expected type mask: %08X\n", uacpi_object_type_to_string(type), \
__VA_ARGS__ \
)
#define TRACE_NO_RET(path_fmt, ...) \
uacpi_warn( \
"no value returned from method "path_fmt", expected type mask: " \
"%08X\n", __VA_ARGS__ \
)
static void trace_invalid_return_type(
uacpi_namespace_node *parent, const uacpi_char *path,
uacpi_object_type_bits expected_mask, uacpi_object_type actual_type
)
{
const uacpi_char *abs_path;
uacpi_bool dynamic_abs_path = UACPI_FALSE;
if (parent == UACPI_NULL || (path != UACPI_NULL && path[0] == '\\')) {
abs_path = path;
} else {
abs_path = uacpi_namespace_node_generate_absolute_path(parent);
dynamic_abs_path = UACPI_TRUE;
}
if (dynamic_abs_path && path != UACPI_NULL) {
if (actual_type == UACPI_OBJECT_UNINITIALIZED)
TRACE_NO_RET("%s.%s", abs_path, path, expected_mask);
else
TRACE_BAD_RET("%s.%s", actual_type, abs_path, path, expected_mask);
} else {
if (actual_type == UACPI_OBJECT_UNINITIALIZED) {
TRACE_NO_RET("%s", abs_path, expected_mask);
} else {
TRACE_BAD_RET("%s", actual_type, abs_path, expected_mask);
}
}
if (dynamic_abs_path)
uacpi_free_dynamic_string(abs_path);
}
uacpi_status uacpi_eval_typed(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object_type_bits ret_mask,
uacpi_object **out_obj
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_object_type returned_type = UACPI_OBJECT_UNINITIALIZED;
if (uacpi_unlikely(out_obj == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_eval(parent, path, args, &obj);
if (uacpi_unlikely_error(ret))
return ret;
if (obj != UACPI_NULL)
returned_type = obj->type;
if (ret_mask && (ret_mask & (1 << returned_type)) == 0) {
trace_invalid_return_type(parent, path, ret_mask, returned_type);
uacpi_object_unref(obj);
return UACPI_STATUS_TYPE_MISMATCH;
}
*out_obj = obj;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_eval_simple_typed(
uacpi_namespace_node *parent, const uacpi_char *path,
uacpi_object_type_bits ret_mask, uacpi_object **ret
)
{
return uacpi_eval_typed(parent, path, UACPI_NULL, ret_mask, ret);
}
uacpi_status uacpi_eval_integer(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_u64 *out_value
)
{
uacpi_object *int_obj;
uacpi_status ret;
ret = uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_INTEGER_BIT, &int_obj
);
if (uacpi_unlikely_error(ret))
return ret;
*out_value = int_obj->integer;
uacpi_object_unref(int_obj);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_eval_simple_integer(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u64 *out_value
)
{
return uacpi_eval_integer(parent, path, UACPI_NULL, out_value);
}
uacpi_status uacpi_eval_buffer_or_string(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args,
UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT,
ret
);
}
uacpi_status uacpi_eval_simple_buffer_or_string(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL,
UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT,
ret
);
}
uacpi_status uacpi_eval_string(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_STRING_BIT, ret
);
}
uacpi_status uacpi_eval_simple_string(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL, UACPI_OBJECT_STRING_BIT, ret
);
}
uacpi_status uacpi_eval_buffer(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_BUFFER_BIT, ret
);
}
uacpi_status uacpi_eval_simple_buffer(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL, UACPI_OBJECT_BUFFER_BIT, ret
);
}
uacpi_status uacpi_eval_package(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_PACKAGE_BIT, ret
);
}
uacpi_status uacpi_eval_simple_package(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL, UACPI_OBJECT_PACKAGE_BIT, ret
);
}
uacpi_status uacpi_get_aml_bitness(uacpi_u8 *out_bitness)
{
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
*out_bitness = g_uacpi_rt_ctx.is_rev1 ? 32 : 64;
return UACPI_STATUS_OK;
}
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff