Integrate uACPI

This commit is contained in:
2025-08-17 18:37:57 +02:00
parent 069870cd0d
commit 92ccd189e7
166 changed files with 42104 additions and 33 deletions

View File

@ -0,0 +1,38 @@
#pragma once
#ifdef UACPI_OVERRIDE_ARCH_HELPERS
#include "uacpi_arch_helpers.h"
#else
#include <uacpi/platform/atomic.h>
#ifndef UACPI_ARCH_FLUSH_CPU_CACHE
#define UACPI_ARCH_FLUSH_CPU_CACHE() do {} while (0)
#endif
typedef unsigned long uacpi_cpu_flags;
typedef void *uacpi_thread_id;
/*
* Replace as needed depending on your platform's way to represent thread ids.
* uACPI offers a few more helpers like uacpi_atomic_{load,store}{8,16,32,64,ptr}
* (or you could provide your own helpers)
*/
#ifndef UACPI_ATOMIC_LOAD_THREAD_ID
#define UACPI_ATOMIC_LOAD_THREAD_ID(ptr) ((uacpi_thread_id)uacpi_atomic_load_ptr(ptr))
#endif
#ifndef UACPI_ATOMIC_STORE_THREAD_ID
#define UACPI_ATOMIC_STORE_THREAD_ID(ptr, value) uacpi_atomic_store_ptr(ptr, value)
#endif
/*
* A sentinel value that the kernel promises to NEVER return from
* uacpi_kernel_get_current_thread_id or this will break
*/
#ifndef UACPI_THREAD_ID_NONE
#define UACPI_THREAD_ID_NONE ((uacpi_thread_id)-1)
#endif
#endif

View File

@ -0,0 +1,347 @@
#pragma once
/*
* Most of this header is a giant workaround for MSVC to make atomics into a
* somewhat unified interface with how GCC and Clang handle them.
*
* We don't use the absolutely disgusting C11 stdatomic.h header because it is
* unable to operate on non _Atomic types, which enforce implicit sequential
* consistency and alter the behavior of the standard C binary/unary operators.
*
* The strictness of the atomic helpers defined here is assumed to be at least
* acquire for loads and release for stores. Cmpxchg uses the standard acq/rel
* for success, acq for failure, and is assumed to be strong.
*/
#ifdef UACPI_OVERRIDE_ATOMIC
#include "uacpi_atomic.h"
#else
#include <uacpi/platform/compiler.h>
#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
// mimic __atomic_compare_exchange_n that doesn't exist on MSVC
#define UACPI_MAKE_MSVC_CMPXCHG(width, type, suffix) \
static inline int uacpi_do_atomic_cmpxchg##width( \
type volatile *ptr, type volatile *expected, type desired \
) \
{ \
type current; \
\
current = _InterlockedCompareExchange##suffix(ptr, *expected, desired); \
if (current != *expected) { \
*expected = current; \
return 0; \
} \
return 1; \
}
#define UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, width, type) \
uacpi_do_atomic_cmpxchg##width( \
(type volatile*)ptr, (type volatile*)expected, desired \
)
#define UACPI_MSVC_ATOMIC_STORE(ptr, value, type, width) \
_InterlockedExchange##width((type volatile*)(ptr), (type)(value))
#define UACPI_MSVC_ATOMIC_LOAD(ptr, type, width) \
_InterlockedOr##width((type volatile*)(ptr), 0)
#define UACPI_MSVC_ATOMIC_INC(ptr, type, width) \
_InterlockedIncrement##width((type volatile*)(ptr))
#define UACPI_MSVC_ATOMIC_DEC(ptr, type, width) \
_InterlockedDecrement##width((type volatile*)(ptr))
UACPI_MAKE_MSVC_CMPXCHG(64, __int64, 64)
UACPI_MAKE_MSVC_CMPXCHG(32, long,)
UACPI_MAKE_MSVC_CMPXCHG(16, short, 16)
#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \
UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 16, short)
#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \
UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 32, long)
#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \
UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 64, __int64)
#define uacpi_atomic_load8(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, char, 8)
#define uacpi_atomic_load16(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, short, 16)
#define uacpi_atomic_load32(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, long,)
#define uacpi_atomic_load64(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, __int64, 64)
#define uacpi_atomic_store8(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, char, 8)
#define uacpi_atomic_store16(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, short, 16)
#define uacpi_atomic_store32(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, long,)
#define uacpi_atomic_store64(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, __int64, 64)
#define uacpi_atomic_inc16(ptr) UACPI_MSVC_ATOMIC_INC(ptr, short, 16)
#define uacpi_atomic_inc32(ptr) UACPI_MSVC_ATOMIC_INC(ptr, long,)
#define uacpi_atomic_inc64(ptr) UACPI_MSVC_ATOMIC_INC(ptr, __int64, 64)
#define uacpi_atomic_dec16(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, short, 16)
#define uacpi_atomic_dec32(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, long,)
#define uacpi_atomic_dec64(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, __int64, 64)
#elif defined(__WATCOMC__)
#include <stdint.h>
static int uacpi_do_atomic_cmpxchg16(volatile uint16_t *ptr, volatile uint16_t *expected, uint16_t desired);
#pragma aux uacpi_do_atomic_cmpxchg16 = \
".486" \
"mov ax, [esi]" \
"lock cmpxchg [edi], bx" \
"mov [esi], ax" \
"setz al" \
"movzx eax, al" \
parm [ edi ] [ esi ] [ ebx ] \
value [ eax ]
static int uacpi_do_atomic_cmpxchg32(volatile uint32_t *ptr, volatile uint32_t *expected, uint32_t desired);
#pragma aux uacpi_do_atomic_cmpxchg32 = \
".486" \
"mov eax, [esi]" \
"lock cmpxchg [edi], ebx" \
"mov [esi], eax" \
"setz al" \
"movzx eax, al" \
parm [ edi ] [ esi ] [ ebx ] \
value [ eax ]
static int uacpi_do_atomic_cmpxchg64_asm(volatile uint64_t *ptr, volatile uint64_t *expected, uint32_t low, uint32_t high);
#pragma aux uacpi_do_atomic_cmpxchg64_asm = \
".586" \
"mov eax, [esi]" \
"mov edx, [esi + 4]" \
"lock cmpxchg8b [edi]" \
"mov [esi], eax" \
"mov [esi + 4], edx" \
"setz al" \
"movzx eax, al" \
modify [ edx ] \
parm [ edi ] [ esi ] [ ebx ] [ ecx ] \
value [ eax ]
static inline int uacpi_do_atomic_cmpxchg64(volatile uint64_t *ptr, volatile uint64_t *expected, uint64_t desired) {
return uacpi_do_atomic_cmpxchg64_asm(ptr, expected, desired, desired >> 32);
}
#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \
uacpi_do_atomic_cmpxchg16((volatile uint16_t*)ptr, (volatile uint16_t*)expected, (uint16_t)desired)
#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \
uacpi_do_atomic_cmpxchg32((volatile uint32_t*)ptr, (volatile uint32_t*)expected, (uint32_t)desired)
#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \
uacpi_do_atomic_cmpxchg64((volatile uint64_t*)ptr, (volatile uint64_t*)expected, (uint64_t)desired)
static uint8_t uacpi_do_atomic_load8(volatile uint8_t *ptr);
#pragma aux uacpi_do_atomic_load8 = \
"mov al, [esi]" \
parm [ esi ] \
value [ al ]
static uint16_t uacpi_do_atomic_load16(volatile uint16_t *ptr);
#pragma aux uacpi_do_atomic_load16 = \
"mov ax, [esi]" \
parm [ esi ] \
value [ ax ]
static uint32_t uacpi_do_atomic_load32(volatile uint32_t *ptr);
#pragma aux uacpi_do_atomic_load32 = \
"mov eax, [esi]" \
parm [ esi ] \
value [ eax ]
static void uacpi_do_atomic_load64_asm(volatile uint64_t *ptr, uint64_t *out);
#pragma aux uacpi_do_atomic_load64_asm = \
".586" \
"xor eax, eax" \
"xor ebx, ebx" \
"xor ecx, ecx" \
"xor edx, edx" \
"lock cmpxchg8b [esi]" \
"mov [edi], eax" \
"mov [edi + 4], edx" \
modify [ eax ebx ecx edx ] \
parm [ esi ] [ edi ]
static inline uint64_t uacpi_do_atomic_load64(volatile uint64_t *ptr) {
uint64_t value;
uacpi_do_atomic_load64_asm(ptr, &value);
return value;
}
#define uacpi_atomic_load8(ptr) uacpi_do_atomic_load8((volatile uint8_t*)ptr)
#define uacpi_atomic_load16(ptr) uacpi_do_atomic_load16((volatile uint16_t*)ptr)
#define uacpi_atomic_load32(ptr) uacpi_do_atomic_load32((volatile uint32_t*)ptr)
#define uacpi_atomic_load64(ptr) uacpi_do_atomic_load64((volatile uint64_t*)ptr)
static void uacpi_do_atomic_store8(volatile uint8_t *ptr, uint8_t value);
#pragma aux uacpi_do_atomic_store8 = \
"mov [edi], al" \
parm [ edi ] [ eax ]
static void uacpi_do_atomic_store16(volatile uint16_t *ptr, uint16_t value);
#pragma aux uacpi_do_atomic_store16 = \
"mov [edi], ax" \
parm [ edi ] [ eax ]
static void uacpi_do_atomic_store32(volatile uint32_t *ptr, uint32_t value);
#pragma aux uacpi_do_atomic_store32 = \
"mov [edi], eax" \
parm [ edi ] [ eax ]
static void uacpi_do_atomic_store64_asm(volatile uint64_t *ptr, uint32_t low, uint32_t high);
#pragma aux uacpi_do_atomic_store64_asm = \
".586" \
"xor eax, eax" \
"xor edx, edx" \
"retry: lock cmpxchg8b [edi]" \
"jnz retry" \
modify [ eax edx ] \
parm [ edi ] [ ebx ] [ ecx ]
static inline void uacpi_do_atomic_store64(volatile uint64_t *ptr, uint64_t value) {
uacpi_do_atomic_store64_asm(ptr, value, value >> 32);
}
#define uacpi_atomic_store8(ptr, value) uacpi_do_atomic_store8((volatile uint8_t*)ptr, (uint8_t)value)
#define uacpi_atomic_store16(ptr, value) uacpi_do_atomic_store16((volatile uint16_t*)ptr, (uint16_t)value)
#define uacpi_atomic_store32(ptr, value) uacpi_do_atomic_store32((volatile uint32_t*)ptr, (uint32_t)value)
#define uacpi_atomic_store64(ptr, value) uacpi_do_atomic_store64((volatile uint64_t*)ptr, (uint64_t)value)
static uint16_t uacpi_do_atomic_inc16(volatile uint16_t *ptr);
#pragma aux uacpi_do_atomic_inc16 = \
".486" \
"mov ax, 1" \
"lock xadd [edi], ax" \
"add ax, 1" \
parm [ edi ] \
value [ ax ]
static uint32_t uacpi_do_atomic_inc32(volatile uint32_t *ptr);
#pragma aux uacpi_do_atomic_inc32 = \
".486" \
"mov eax, 1" \
"lock xadd [edi], eax" \
"add eax, 1" \
parm [ edi ] \
value [ eax ]
static void uacpi_do_atomic_inc64_asm(volatile uint64_t *ptr, uint64_t *out);
#pragma aux uacpi_do_atomic_inc64_asm = \
".586" \
"xor eax, eax" \
"xor edx, edx" \
"mov ebx, 1" \
"mov ecx, 1" \
"retry: lock cmpxchg8b [esi]" \
"mov ebx, eax" \
"mov ecx, edx" \
"add ebx, 1" \
"adc ecx, 0" \
"jnz retry" \
"mov [edi], ebx" \
"mov [edi + 4], ecx" \
modify [ eax ebx ecx edx ] \
parm [ esi ] [ edi ]
static inline uint64_t uacpi_do_atomic_inc64(volatile uint64_t *ptr) {
uint64_t value;
uacpi_do_atomic_inc64_asm(ptr, &value);
return value;
}
#define uacpi_atomic_inc16(ptr) uacpi_do_atomic_inc16((volatile uint16_t*)ptr)
#define uacpi_atomic_inc32(ptr) uacpi_do_atomic_inc32((volatile uint32_t*)ptr)
#define uacpi_atomic_inc64(ptr) uacpi_do_atomic_inc64((volatile uint64_t*)ptr)
static uint16_t uacpi_do_atomic_dec16(volatile uint16_t *ptr);
#pragma aux uacpi_do_atomic_dec16 = \
".486" \
"mov ax, -1" \
"lock xadd [edi], ax" \
"add ax, -1" \
parm [ edi ] \
value [ ax ]
static uint32_t uacpi_do_atomic_dec32(volatile uint32_t *ptr);
#pragma aux uacpi_do_atomic_dec32 = \
".486" \
"mov eax, -1" \
"lock xadd [edi], eax" \
"add eax, -1" \
parm [ edi ] \
value [ eax ]
static void uacpi_do_atomic_dec64_asm(volatile uint64_t *ptr, uint64_t *out);
#pragma aux uacpi_do_atomic_dec64_asm = \
".586" \
"xor eax, eax" \
"xor edx, edx" \
"mov ebx, -1" \
"mov ecx, -1" \
"retry: lock cmpxchg8b [esi]" \
"mov ebx, eax" \
"mov ecx, edx" \
"sub ebx, 1" \
"sbb ecx, 0" \
"jnz retry" \
"mov [edi], ebx" \
"mov [edi + 4], ecx" \
modify [ eax ebx ecx edx ] \
parm [ esi ] [ edi ]
static inline uint64_t uacpi_do_atomic_dec64(volatile uint64_t *ptr) {
uint64_t value;
uacpi_do_atomic_dec64_asm(ptr, &value);
return value;
}
#define uacpi_atomic_dec16(ptr) uacpi_do_atomic_dec16((volatile uint16_t*)ptr)
#define uacpi_atomic_dec32(ptr) uacpi_do_atomic_dec32((volatile uint32_t*)ptr)
#define uacpi_atomic_dec64(ptr) uacpi_do_atomic_dec64((volatile uint64_t*)ptr)
#else
#define UACPI_DO_CMPXCHG(ptr, expected, desired) \
__atomic_compare_exchange_n(ptr, expected, desired, 0, \
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \
UACPI_DO_CMPXCHG(ptr, expected, desired)
#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \
UACPI_DO_CMPXCHG(ptr, expected, desired)
#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \
UACPI_DO_CMPXCHG(ptr, expected, desired)
#define uacpi_atomic_load8(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_load16(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_load32(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_load64(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_store8(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_store16(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_store32(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_store64(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_inc16(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_inc32(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_inc64(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_dec16(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_dec32(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_dec64(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#endif
#if UACPI_POINTER_SIZE == 4
#define uacpi_atomic_load_ptr(ptr_to_ptr) uacpi_atomic_load32(ptr_to_ptr)
#define uacpi_atomic_store_ptr(ptr_to_ptr, value) uacpi_atomic_store32(ptr_to_ptr, value)
#else
#define uacpi_atomic_load_ptr(ptr_to_ptr) uacpi_atomic_load64(ptr_to_ptr)
#define uacpi_atomic_store_ptr(ptr_to_ptr, value) uacpi_atomic_store64(ptr_to_ptr, value)
#endif
#endif

View File

@ -0,0 +1,125 @@
#pragma once
/*
* Compiler-specific attributes/macros go here. This is the default placeholder
* that should work for MSVC/GCC/clang.
*/
#ifdef UACPI_OVERRIDE_COMPILER
#include "uacpi_compiler.h"
#else
#define UACPI_ALIGN(x) __declspec(align(x))
#if defined(__WATCOMC__)
#define UACPI_STATIC_ASSERT(expr, msg)
#elif defined(__cplusplus)
#define UACPI_STATIC_ASSERT static_assert
#else
#define UACPI_STATIC_ASSERT _Static_assert
#endif
#ifdef _MSC_VER
#include <intrin.h>
#define UACPI_ALWAYS_INLINE __forceinline
#define UACPI_PACKED(decl) \
__pragma(pack(push, 1)) \
decl; \
__pragma(pack(pop))
#elif defined(__WATCOMC__)
#define UACPI_ALWAYS_INLINE inline
#define UACPI_PACKED(decl) _Packed decl;
#else
#define UACPI_ALWAYS_INLINE inline __attribute__((always_inline))
#define UACPI_PACKED(decl) decl __attribute__((packed));
#endif
#if defined(__GNUC__) || defined(__clang__)
#define uacpi_unlikely(expr) __builtin_expect(!!(expr), 0)
#define uacpi_likely(expr) __builtin_expect(!!(expr), 1)
#ifdef __has_attribute
#if __has_attribute(__fallthrough__)
#define UACPI_FALLTHROUGH __attribute__((__fallthrough__))
#endif
#endif
#define UACPI_MAYBE_UNUSED __attribute__ ((unused))
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wunused-parameter\"")
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END \
_Pragma("GCC diagnostic pop")
#ifdef __clang__
#define UACPI_PRINTF_DECL(fmt_idx, args_idx) \
__attribute__((format(printf, fmt_idx, args_idx)))
#else
#define UACPI_PRINTF_DECL(fmt_idx, args_idx) \
__attribute__((format(gnu_printf, fmt_idx, args_idx)))
#endif
#define UACPI_COMPILER_HAS_BUILTIN_MEMCPY
#define UACPI_COMPILER_HAS_BUILTIN_MEMMOVE
#define UACPI_COMPILER_HAS_BUILTIN_MEMSET
#define UACPI_COMPILER_HAS_BUILTIN_MEMCMP
#elif defined(__WATCOMC__)
#define uacpi_unlikely(expr) expr
#define uacpi_likely(expr) expr
/*
* The OpenWatcom documentation suggests this should be done using
* _Pragma("off (unreferenced)") and _Pragma("pop (unreferenced)"),
* but these pragmas appear to be no-ops. Use inline as the next best thing.
* Note that OpenWatcom accepts redundant modifiers without a warning,
* so UACPI_MAYBE_UNUSED inline still works.
*/
#define UACPI_MAYBE_UNUSED inline
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END
#define UACPI_PRINTF_DECL(fmt_idx, args_idx)
#else
#define uacpi_unlikely(expr) expr
#define uacpi_likely(expr) expr
#define UACPI_MAYBE_UNUSED
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END
#define UACPI_PRINTF_DECL(fmt_idx, args_idx)
#endif
#ifndef UACPI_FALLTHROUGH
#define UACPI_FALLTHROUGH do {} while (0)
#endif
#ifndef UACPI_POINTER_SIZE
#ifdef _WIN32
#ifdef _WIN64
#define UACPI_POINTER_SIZE 8
#else
#define UACPI_POINTER_SIZE 4
#endif
#elif defined(__GNUC__)
#define UACPI_POINTER_SIZE __SIZEOF_POINTER__
#elif defined(__WATCOMC__)
#ifdef __386__
#define UACPI_POINTER_SIZE 4
#elif defined(__I86__)
#error uACPI does not support 16-bit mode compilation
#else
#error Unknown target architecture
#endif
#else
#error Failed to detect pointer size
#endif
#endif
#endif

View File

@ -0,0 +1,162 @@
#pragma once
#ifdef UACPI_OVERRIDE_CONFIG
#include "uacpi_config.h"
#else
#include <uacpi/helpers.h>
#include <uacpi/log.h>
/*
* =======================
* Context-related options
* =======================
*/
#ifndef UACPI_DEFAULT_LOG_LEVEL
#define UACPI_DEFAULT_LOG_LEVEL UACPI_LOG_INFO
#endif
UACPI_BUILD_BUG_ON_WITH_MSG(
UACPI_DEFAULT_LOG_LEVEL < UACPI_LOG_ERROR ||
UACPI_DEFAULT_LOG_LEVEL > UACPI_LOG_DEBUG,
"configured default log level is invalid"
);
#ifndef UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS
#define UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS 30
#endif
UACPI_BUILD_BUG_ON_WITH_MSG(
UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS < 1,
"configured default loop timeout is invalid (expecting at least 1 second)"
);
#ifndef UACPI_DEFAULT_MAX_CALL_STACK_DEPTH
#define UACPI_DEFAULT_MAX_CALL_STACK_DEPTH 256
#endif
UACPI_BUILD_BUG_ON_WITH_MSG(
UACPI_DEFAULT_MAX_CALL_STACK_DEPTH < 4,
"configured default max call stack depth is invalid "
"(expecting at least 4 frames)"
);
/*
* ===================
* Kernel-api options
* ===================
*/
/*
* Convenience initialization/deinitialization hooks that will be called by
* uACPI automatically when appropriate if compiled-in.
*/
// #define UACPI_KERNEL_INITIALIZATION
/*
* Makes kernel api logging callbacks work with unformatted printf-style
* strings and va_args instead of a pre-formatted string. Can be useful if
* your native logging is implemented in terms of this format as well.
*/
// #define UACPI_FORMATTED_LOGGING
/*
* Makes uacpi_kernel_free take in an additional 'size_hint' parameter, which
* contains the size of the original allocation. Note that this comes with a
* performance penalty in some cases.
*/
// #define UACPI_SIZED_FREES
/*
* Makes uacpi_kernel_alloc_zeroed mandatory to implement by the host, uACPI
* will not provide a default implementation if this is enabled.
*/
// #define UACPI_NATIVE_ALLOC_ZEROED
/*
* =========================
* Platform-specific options
* =========================
*/
/*
* Makes uACPI use the internal versions of mem{cpy,move,set,cmp} instead of
* relying on the host to provide them. Note that compilers like clang and GCC
* rely on these being available by default, even in freestanding mode, so
* compiling uACPI may theoretically generate implicit dependencies on them
* even if this option is defined.
*/
// #define UACPI_USE_BUILTIN_STRING
/*
* Turns uacpi_phys_addr and uacpi_io_addr into a 32-bit type, and adds extra
* code for address truncation. Needed for e.g. i686 platforms without PAE
* support.
*/
// #define UACPI_PHYS_ADDR_IS_32BITS
/*
* Switches uACPI into reduced-hardware-only mode. Strips all full-hardware
* ACPI support code at compile-time, including the event subsystem, the global
* lock, and other full-hardware features.
*/
// #define UACPI_REDUCED_HARDWARE
/*
* Switches uACPI into tables-subsystem-only mode and strips all other code.
* This means only the table API will be usable, no other subsystems are
* compiled in. In this mode, uACPI only depends on the following kernel APIs:
* - uacpi_kernel_get_rsdp
* - uacpi_kernel_{map,unmap}
* - uacpi_kernel_log
*
* Use uacpi_setup_early_table_access to initialize, uacpi_state_reset to
* deinitialize.
*
* This mode is primarily designed for these three use-cases:
* - Bootloader/pre-kernel environments that need to parse ACPI tables, but
* don't actually need a fully-featured AML interpreter, and everything else
* that a full APCI implementation entails.
* - A micro-kernel that has the full AML interpreter running in userspace, but
* still needs to parse ACPI tables to bootstrap allocators, timers, SMP etc.
* - A WIP kernel that needs to parse ACPI tables for bootrapping SMP/timers,
* ECAM, etc., but doesn't yet have enough subsystems implemented in order
* to run a fully-featured AML interpreter.
*/
// #define UACPI_BAREBONES_MODE
/*
* =============
* Misc. options
* =============
*/
/*
* If UACPI_FORMATTED_LOGGING is not enabled, this is the maximum length of the
* pre-formatted message that is passed to the logging callback.
*/
#ifndef UACPI_PLAIN_LOG_BUFFER_SIZE
#define UACPI_PLAIN_LOG_BUFFER_SIZE 128
#endif
UACPI_BUILD_BUG_ON_WITH_MSG(
UACPI_PLAIN_LOG_BUFFER_SIZE < 16,
"configured log buffer size is too small (expecting at least 16 bytes)"
);
/*
* The size of the table descriptor inline storage. All table descriptors past
* this length will be stored in a dynamically allocated heap array. The size
* of one table descriptor is approximately 56 bytes.
*/
#ifndef UACPI_STATIC_TABLE_ARRAY_LEN
#define UACPI_STATIC_TABLE_ARRAY_LEN 16
#endif
UACPI_BUILD_BUG_ON_WITH_MSG(
UACPI_STATIC_TABLE_ARRAY_LEN < 1,
"configured static table array length is too small (expecting at least 1)"
);
#endif

View File

@ -0,0 +1,28 @@
#pragma once
#ifdef UACPI_OVERRIDE_LIBC
#include "uacpi_libc.h"
#else
/*
* The following libc functions are used internally by uACPI and have a default
* (sub-optimal) implementation:
* - strcmp
* - strnlen
* - strlen
* - snprintf
* - vsnprintf
*
* The following use a builtin implementation only if UACPI_USE_BUILTIN_STRING
* is defined (more information can be found in the config.h header):
* - memcpy
* - memmove
* - memset
* - memcmp
*
* In case your platform happens to implement optimized verisons of the helpers
* above, you are able to make uACPI use those instead by overriding them like so:
*
* #define uacpi_memcpy my_fast_memcpy
* #define uacpi_snprintf my_fast_snprintf
*/
#endif

View File

@ -0,0 +1,64 @@
#pragma once
/*
* Platform-specific types go here. This is the default placeholder using
* types from the standard headers.
*/
#ifdef UACPI_OVERRIDE_TYPES
#include "uacpi_types.h"
#else
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
#include <stdarg.h>
#include <uacpi/helpers.h>
typedef uint8_t uacpi_u8;
typedef uint16_t uacpi_u16;
typedef uint32_t uacpi_u32;
typedef uint64_t uacpi_u64;
typedef int8_t uacpi_i8;
typedef int16_t uacpi_i16;
typedef int32_t uacpi_i32;
typedef int64_t uacpi_i64;
#define UACPI_TRUE true
#define UACPI_FALSE false
typedef bool uacpi_bool;
#define UACPI_NULL NULL
typedef uintptr_t uacpi_uintptr;
typedef uacpi_uintptr uacpi_virt_addr;
typedef size_t uacpi_size;
typedef va_list uacpi_va_list;
#define uacpi_va_start va_start
#define uacpi_va_end va_end
#define uacpi_va_arg va_arg
typedef char uacpi_char;
#define uacpi_offsetof offsetof
/*
* We use unsignd long long for 64-bit number formatting because 64-bit types
* don't have a standard way to format them. The inttypes.h header is not
* freestanding therefore it's not practical to force the user to define the
* corresponding PRI macros. Moreover, unsignd long long is required to be
* at least 64-bits as per C99.
*/
UACPI_BUILD_BUG_ON_WITH_MSG(
sizeof(unsigned long long) < 8,
"unsigned long long must be at least 64 bits large as per C99"
);
#define UACPI_PRIu64 "llu"
#define UACPI_PRIx64 "llx"
#define UACPI_PRIX64 "llX"
#define UACPI_FMT64(val) ((unsigned long long)(val))
#endif