Integrate uACPI

This commit is contained in:
2025-08-17 18:37:57 +02:00
parent 069870cd0d
commit 92ccd189e7
166 changed files with 42104 additions and 33 deletions

View File

@ -13,7 +13,12 @@ CFLAGS += -I. \
-DLFS_NO_ASSERT \
-DLFS_NO_DEBUG \
-DLFS_NO_WARN \
-DLFS_NO_ERROR
-DLFS_NO_ERROR \
-DUACPI_BAREBONES_MODE
ifeq ($(ARCH),x86_64)
CFLAGS += -I./hal/x86_64/uACPI/include
endif
ifeq ($(PUTCHAR_),fb)
CFLAGS += -DPUTCHAR_=PUTCHAR_FB
@ -51,6 +56,12 @@ SRCFILES := $(wildcard *.c) \
$(wildcard std/*.c) \
$(wildcard flanterm/src/*.c) \
$(wildcard flanterm/src/flanterm_backends/*.c)
ifeq ($(ARCH),x86_64)
SRCFILES += $(wildcard hal/x86_64/uACPI/source/*.c)
SRCFILES += $(wildcard hal/x86_64/port-uACPI/*.c)
endif
CFILES := $(filter %.c,$(SRCFILES))
ASFILES := $(filter %.S,$(SRCFILES))
OBJ := $(patsubst %.c,%.o,$(CFILES)) $(patsubst %.S,%.o,$(ASFILES))

View File

@ -12,7 +12,7 @@ CFLAGS += -m64 \
-mno-red-zone \
-fno-stack-protector \
-fno-stack-check \
-fno-lto
-Os \
LDFLAGS += -m elf_x86_64 \
-pie \

View File

@ -0,0 +1,6 @@
#ifndef COMPILER_BUILTINS_H_
#define COMPILER_BUILTINS_H_
#define unreachable() __builtin_unreachable()
#endif // COMPILER_BUILTINS_H_

View File

@ -18,6 +18,7 @@ size_t hal_strcspn(const char *s, const char *reject);
size_t hal_strspn(const char *s, const char *accept);
char *hal_strcpy(char *dest, const char *src);
char *hal_strchr(const char *s, int c);
void hal_init_withmalloc(void);
#if defined(__x86_64__)
# define HAL_PAGE_SIZE 0x1000

32
kernel/hal/x86_64/acpi.c Normal file
View File

@ -0,0 +1,32 @@
#include <stddef.h>
#include <stdint.h>
#include "uacpi/uacpi.h"
#include "uacpi/utilities.h"
#include "hal/hal.h"
#include "kprintf.h"
#include "dlmalloc/malloc.h"
#define PREINIT_BUFFER_SIZE 0x1000
void acpi_init(void) {
uacpi_status ret;
void *preinit_buffer = dlmalloc(PREINIT_BUFFER_SIZE);
ret = uacpi_setup_early_table_access(preinit_buffer, PREINIT_BUFFER_SIZE);
if (uacpi_unlikely_error(ret)) {
ERR("acpi", "init err %s\n", uacpi_status_to_string(ret));
hal_hang();
}
/* if (uacpi_unlikely_error(ret)) { */
/* ERR("acpi", "init err %s\n", uacpi_status_to_string(ret)); */
/* hal_hang(); */
/* } */
/* if (uacpi_unlikely_error(ret)) { */
/* ERR("acpi", "init err %s\n", uacpi_status_to_string(ret)); */
/* hal_hang(); */
/* } */
LOG("hal", "acpi init\n");
}

6
kernel/hal/x86_64/acpi.h Normal file
View File

@ -0,0 +1,6 @@
#ifndef HAL_ACPI_H_
#define HAL_ACPI_H_
void acpi_init(void);
#endif // HAL_ACPI_H_

View File

@ -5,6 +5,7 @@
#include "serial.h"
#include "gdt.h"
#include "idt.h"
#include "acpi.h"
void hal_init(void) {
if (!serial_init()) {
@ -12,7 +13,6 @@ void hal_init(void) {
}
LOG("hal", "serial init\n");
gdt_init();
idt_init();
}
__attribute__((noreturn)) void hal_hang(void) {
@ -21,3 +21,7 @@ __attribute__((noreturn)) void hal_hang(void) {
}
}
void hal_init_withmalloc(void) {
acpi_init();
}

View File

@ -1,13 +0,0 @@
.global io_outb
io_outb:
mov %di, %dx
mov %sil, %al
out %al, %dx
ret
.global io_inb
io_inb:
mov %di, %dx
in %dx, %al
movzx %al, %rax
ret

28
kernel/hal/x86_64/io.c Normal file
View File

@ -0,0 +1,28 @@
#include <stdint.h>
uint8_t io_in8(uint16_t port) {
uint8_t r;
asm volatile("inb %1, %0" : "=a"(r) : "dN"(port));
return r;
}
void io_out8(uint16_t port, uint8_t value) {
asm volatile("outb %1, %0" :: "dN"(port), "a"(value));
}
uint16_t io_in16(uint16_t port) {
uint16_t r;
asm volatile("in %%dx, %%ax" : "=a"(r) : "d"(port));
return r;
}
void io_out16(uint16_t port, uint16_t value) {
asm volatile("out %%ax, %%dx" :: "a"(value), "d"(port));
}
uint32_t io_in32(uint16_t port) {
uint32_t r;
asm volatile("inl %%dx, %%eax" : "=a"(r) : "d"(port));
return r;
}
void io_out32(uint16_t port, uint32_t value) {
asm volatile("outl %%eax, %%dx" :: "d"(port), "a"(value));
}

View File

@ -1,7 +1,15 @@
#ifndef HAL_IO_H_
#define HAL_IO_H_
extern uint8_t io_inb(uint16_t port);
extern void io_outb(uint16_t port, uint8_t value);
#include <stdint.h>
uint8_t io_in8(uint16_t port);
void io_out8(uint16_t port, uint8_t value);
uint16_t io_in16(uint16_t port);
void io_out16(uint16_t port, uint16_t value);
uint32_t io_in32(uint16_t port);
void io_out32(uint16_t port, uint32_t value);
#endif // HAL_IO_H_

View File

@ -0,0 +1,45 @@
#include "uacpi/uacpi.h"
#include "dlmalloc/malloc.h"
#include "bootinfo/bootinfo.h"
#include "hal/x86_64/io.h"
#include "kprintf.h"
#include "compiler/builtins.h"
void *uacpi_kernel_alloc(uacpi_size size) {
return dlmalloc(size);
}
void uacpi_kernel_free(void *ptr) {
return dlfree(ptr);
}
void uacpi_kernel_log(uacpi_log_level lvl, const uacpi_char *s) {
char *t;
switch (lvl) {
case UACPI_LOG_DEBUG: t = "Debug"; break;
case UACPI_LOG_TRACE: t = "Trace"; break;
case UACPI_LOG_INFO: t = "Info"; break;
case UACPI_LOG_WARN: t = "Warn"; break;
case UACPI_LOG_ERROR: t = "Error"; break;
default:
unreachable();
break;
}
LOG("uACPI", "[%s] %s", t, s);
}
void *uacpi_kernel_map(uacpi_phys_addr addr, uacpi_size len) {
(void)len;
return (void *)(BOOT_INFO.hhdm_off + addr);
}
void uacpi_kernel_unmap(void *addr, uacpi_size len) {
(void)addr;
(void)len;
}
uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out) {
*out = BOOT_INFO.rsdp;
return UACPI_STATUS_OK;
}

View File

@ -6,40 +6,40 @@
#define SERIAL_PORT 0x3f8
static int serial_received(void) {
return io_inb(SERIAL_PORT + 5) & 1;
return io_in8(SERIAL_PORT + 5) & 1;
}
static uint8_t serial_read(void) {
while (serial_received() == 0);
return io_inb(SERIAL_PORT);
return io_in8(SERIAL_PORT);
}
static int serial_trans_empty(void) {
return io_inb(SERIAL_PORT + 5) & 0x20;
return io_in8(SERIAL_PORT + 5) & 0x20;
}
static void serial_write(uint8_t value) {
while (!serial_trans_empty());
io_outb(SERIAL_PORT, value);
io_out8(SERIAL_PORT, value);
}
// REFERENCE: https://wiki.osdev.org/Serial_Ports
bool serial_init(void) {
io_outb(SERIAL_PORT + 1, 0x00);
io_outb(SERIAL_PORT + 3, 0x80);
io_outb(SERIAL_PORT + 0, 0x03);
io_outb(SERIAL_PORT + 1, 0x00);
io_outb(SERIAL_PORT + 3, 0x03);
io_outb(SERIAL_PORT + 2, 0xc7);
io_outb(SERIAL_PORT + 4, 0x0b);
io_outb(SERIAL_PORT + 4, 0x1e);
io_outb(SERIAL_PORT + 0, 0xae);
io_out8(SERIAL_PORT + 1, 0x00);
io_out8(SERIAL_PORT + 3, 0x80);
io_out8(SERIAL_PORT + 0, 0x03);
io_out8(SERIAL_PORT + 1, 0x00);
io_out8(SERIAL_PORT + 3, 0x03);
io_out8(SERIAL_PORT + 2, 0xc7);
io_out8(SERIAL_PORT + 4, 0x0b);
io_out8(SERIAL_PORT + 4, 0x1e);
io_out8(SERIAL_PORT + 0, 0xae);
if (io_inb(SERIAL_PORT + 0) != 0xae) {
if (io_in8(SERIAL_PORT + 0) != 0xae) {
return false;
}
io_outb(SERIAL_PORT + 4, 0x0f);
io_out8(SERIAL_PORT + 4, 0x0f);
return true;
}

View File

@ -0,0 +1,81 @@
name: CI
on: [push, pull_request]
jobs:
lint-python-scripts:
runs-on: ubuntu-latest
strategy:
fail-fast: true
steps:
- uses: actions/checkout@v3
- name: Install flake8 & mypy
run: |
sudo apt update
sudo apt install python3 python3-pip
export PIP_BREAK_SYSTEM_PACKAGES=1
pip install flake8 mypy
- name: Run flake8 on the project
run: flake8 --ignore=E743 tests/*.py tests/utilities/*.py tests/generated_test_cases/*.py
- name: Run mypy on the project
run: mypy --disallow-incomplete-defs --no-implicit-optional tests/*.py tests/utilities/*.py tests/generated_test_cases/*.py
build-and-run-tests:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: true
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v3
with:
submodules: true
- if: ${{ matrix.os != 'macos-latest' }}
name: Set up OpenWatcom
uses: open-watcom/setup-watcom@v0
with:
version: '2.0'
- if: ${{ matrix.os == 'ubuntu-latest' }}
name: Install tools & libraries (Ubuntu)
run: |
sudo apt update
sudo apt install python3 python3-pytest acpica-tools cmake gcc-multilib g++-multilib
# https://github.com/actions/runner-images/issues/9491#issuecomment-1989718917
sudo sysctl vm.mmap_rnd_bits=28
- if: ${{ matrix.os == 'macos-latest' }}
name: Install tools & libraries (MacOS)
run: |
export PIP_BREAK_SYSTEM_PACKAGES=1
brew install python3 acpica cmake
python3 -m pip install pytest
- if: ${{ matrix.os == 'windows-latest' }}
name: Install tools & libraries (Windows)
run: |
choco install python3 iasl cmake llvm
python3 -m pip install pytest
- name: Ensure reduced-hardware/unsized-frees/fmt-logging/no-kernel-init/builtin-string build compiles
run: |
cd ${{ github.workspace}}/tests/runner
mkdir reduced-hw-build && cd reduced-hw-build
cmake .. -DREDUCED_HARDWARE_BUILD=1 -DSIZED_FREES_BUILD=0 -DFORMATTED_LOGGING_BUILD=1 -DNATIVE_ALLOC_ZEROED=1 -DKERNEL_INITIALIZATION=0 -DBUILTIN_STRING=1
cmake --build .
- name: Run tests (64-bit)
run: python3 ${{ github.workspace }}/tests/run_tests.py --bitness=64 --large --barebones
# MacOS doesn't want to compile i386 (at least easily) so just ignore it:
# ld: warning: The i386 architecture is deprecated for macOS
# ld: dynamic executables or dylibs must link with libSystem.dylib for architecture i386
# clang: error: linker command failed with exit code 1 (use -v to see invocation)
- if: ${{ matrix.os != 'macos-latest' }}
name: Run tests (32-bit)
run: python3 ${{ github.workspace }}/tests/run_tests.py --bitness=32 --large --barebones
- if: ${{ matrix.os != 'macos-latest' }}
name: Run tests (OpenWatcom)
run: python3 ${{ github.workspace }}/tests/run_tests.py --large --barebones --watcom

9
kernel/hal/x86_64/uACPI/.gitignore vendored Normal file
View File

@ -0,0 +1,9 @@
.DS_Store
*.aml
*.dsl
.idea/
cmake-build-*/
build-*/
tests/bin/
tests/acpi-dumps/
__pycache__

View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022-2025 Daniil Tatianin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,399 @@
# uACPI
A portable and easy-to-integrate implementation of the Advanced Configuration and Power Interface (ACPI).
[![CI](https://github.com/UltraOS/uACPI/actions/workflows/main.yml/badge.svg)](https://github.com/UltraOS/uACPI/actions/workflows/main.yml)
## Features
- A fast and well-tested AML interpreter optimized to use very little stack space
- NT-compatible on a fundamental level (see [examples](#more-detailed-overview))
- Very easy to integrate (ships with own overridable standard library implementation)
- Highly flexible and configurable (optional sized frees, reduced-hw-only mode, etc.)
- A fairly advanced event subsystem (GPE/fixed, wake, implicit notify, AML handlers)
- Table management API (search, dynamic installation/loading, overrides, etc.)
- Operation region subsystem (user handlers, support for BufferAcc opregions, builtins for common types)
- Sleep state management (transition to any S state, wake vector programming)
- PCI routing table retrieval & interrupt model API
- Device search API
- Resource subsystem supporting every resource defined by ACPI 6.6
- Interface & feature management exposed via _OSI
- Client-defined Notify() handlers
- Firmware global lock management (_GL, locked fields, public API)
- GAS read/write API
- Fully thread safe
- Supports both 32-bit and 64-bit platforms
- A special barebones mode with only table API (see [config.h](include/uacpi/platform/config.h#L127))
## Why would I use this over ACPICA?
### 1. Performance
uACPI shows a consistent speedup of about **3.5x** over ACPICA in synthetic AML tests.
<details><summary>More details</summary>
Code that was tested:
```asl
Method (TOMS, 2, NotSerialized) {
Return ((Arg1 - Arg0) / 10000)
}
Method (ADDM, 1, NotSerialized) {
Local0 = 0
While (Arg0) {
Local0 += Arg0 + Arg0
Arg0--
}
Return (Local0)
}
// Record start time
Local0 = Timer
// Run 10 million additions
Local1 = ADDM(10000000)
// Make sure the answer matches expected
If (Local1 != 0x5AF31112D680) {
Printf("Bad test result %o", Local1)
Return (1)
}
// Record end time
Local2 = Timer
Printf("10,000,000 additions took %o ms",
ToDecimalString(TOMS(Local0, Local2)))
```
Compile options (acpiexec and uACPI's test-runner): `-O3 -flto -march=znver4 -mtune=znver4`
CPU: AMD Ryzen 9 9950X3D
Raw test scores (~average over 10 runs):
- ACPICA: 16661 ms
- uACPI: 4753 ms
**Raw difference: 3.5053x**
</details>
Real hardware tests of the same operating system using uACPI vs ACPICA show
at least a **1.75-2x speedup** while measuring the time it takes to load the initial
AML namespace.
<details><summary>More details</summary>
OS: [proxima](https://github.com/proxima-os)
Compile options: `-O3 -flto`
### Test Subject 1
Specs: Gigabyte B550M S2H, AMD Ryzen 5800X, 64GB RAM
Firmware: F19d (10097 AML opcodes)
Results:
- ACPICA: 3,936,953 ns
- uACPI: 1,902,077 ns
**Raw difference: 2.0698x**
### Test Subject 2
Specs: Toshiba Portege R30-A, Intel Core i5-4200M, 4GB RAM
Firmware: 4.40 (4962 AML opcodes)
Results:
- ACPICA: 10,899,233 ns
- uACPI: 6,227,036 ns
**Raw difference: 1.7503x**
</details>
### 2. NT-compatible from the ground up
Over the decades of development, ACPICA has accumulated a lot of workarounds for
AML expecting NT-specific behaviors, and is still missing compatibility in a lot
of critical aspects.
uACPI, on the other hand, is built to be natively NT-compatible without extra
workarounds.
Some specific highlights include:
- Reference objects, especially multi-level reference chains
- Implicit cast semantics
- Object mutability
- Named object resolution, especially for named objects inside packages
### 3. Fundamental safety
uACPI is built to always assume the worst about the AML byte code it's executing,
and as such, has a more sophisticated object lifetime tracking system, as well
as carefully designed handling for various edge-cases, including race conditions.
Some of the standard uACPI test cases crash both ACPICA, and the NT AML
interpreters.
While a permanent fuzzing solution for uACPI is currently WIP, it has already
been fuzzed quite extensively and all known issues have been fixed.
### 4. No recursion
Running at kernel level has a lot of very strict limitations, one of which is a
tiny stack size, which can sometimes be only a few pages in length.
Of course, both ACPICA and uACPI have non-recursive AML interpreters, but there
are still edge cases that cause potentially unbounded recursion.
One such example are the dynamic table load operators from AML
(`Load`/`LoadTable`): these cause a linear growth in stack usage per call in
ACPICA, whereas in uACPI these are treated as special method calls,
and as such, don't increase stack usage whatsoever.
### More detailed overview
Expressions within package:
```asl
Method (TEST) {
Local0 = 10
Local1 = Package { Local0 * 5 }
Return (DerefOf(Local1[0]))
}
// ACPICA: AE_SUPPORT, Expressions within package elements are not supported
// Windows, uACPI: Local0 = 50
Local0 = TEST()
```
Packages outside of a control method:
```asl
// ACPICA: internal error
// Windows, uACPI: ok
Local0 = Package { 1 }
```
Reference rebind semantics:
```asl
Local0 = 123
Local1 = RefOf(Local0)
// ACPICA: Local1 = 321, Local0 = 123
// Windows, uACPI: Local1 = reference->Local0, Local0 = 321
Local1 = 321
```
Increment/Decrement:
```asl
Local0 = 123
Local1 = RefOf(Local0)
// ACPICA: error
// Windows, uACPI: Local0 = 124
Local1++
```
Multilevel references:
```asl
Local0 = 123
Local1 = RefOf(Local0)
Local2 = RefOf(Local1)
// ACPICA: Local3 = reference->Local0
// Windows, uACPI: Local3 = 123
Local3 = DerefOf(Local2)
```
Implict-cast semantics:
```asl
Name (TEST, "BAR")
// ACPICA: TEST = "00000000004F4F46"
// Windows, uACPI: TEST = "FOO"
TEST = 0x4F4F46
```
Buffer size mutability:
```asl
Name (TEST, "XXXX")
Name (VAL, "")
// ACPICA: TEST = "LONGSTRING"
// Windows, UACPI: TEST = "LONG"
TEST = "LONGSTRING"
// ACPICA: VAL = "FOO"
// Windows, UACPI: VAL = ""
VAL = "FOO"
```
Returning a reference to a local object:
```asl
Method (TEST) {
Local0 = 123
// Use-after-free in ACPICA, perfectly fine in uACPI
Return (RefOf(Local0))
}
Method (FOO) {
Name (TEST, 123)
// Use-after-free in ACPICA, object lifetime prolonged in uACPI (node is still removed from the namespace)
Return (RefOf(TEST))
}
```
CopyObject into self:
```asl
Method (TEST) {
CopyObject(123, TEST)
Return (1)
}
// Segfault in ACPICA, prints 1 in uACPI
Debug = TEST()
// Unreachable in ACPICA, prints 123 in uACPI
Debug = TEST
```
There's even more examples, but this should be enough to demonstrate the fundamental differences in designs.
## Integrating into a kernel
### 1. Add uACPI sources & include directories into your project
#### If you're using CMake
Simply add the following lines to your cmake:
```cmake
include(uacpi/uacpi.cmake)
target_sources(
my-kernel
PRIVATE
${UACPI_SOURCES}
)
target_include_directories(
my-kernel
PRIVATE
${UACPI_INCLUDES}
)
```
#### If you're using Meson
Add the following lines to your meson.build:
```meson
uacpi = subproject('uacpi')
uacpi_sources = uacpi.get_variable('sources')
my_kernel_sources += uacpi_sources
uacpi_includes = uacpi.get_variable('includes')
my_kernel_includes += uacpi_includes
```
#### Any other build system
- Add all .c files from [source](source) into your target sources
- Add [include](include) into your target include directories
### 2. Implement/override platform-specific headers
uACPI defines all platform/architecture-specific functionality in a few headers inside [include/uacpi/platform](include/uacpi/platform)
All of the headers can be "implemented" by your project in a few ways:
- Implement the expected helpers exposed by the headers
- Replace the expected helpers by your own and override uACPI to use them by defining the respective `UACPI_OVERRIDE_X` variable.
In this case, the header becomes a proxy that includes a corresponding `uacpi_x.h` header exported by your project.
Currently used platform-specific headers are:
- [arch_helpers.h](include/uacpi/platform/arch_helpers.h) - defines architecture/cpu-specific helpers & thread-id-related interfaces
- [compiler.h](include/uacpi/platform/compiler.h) - defines compiler-specific helpers like attributes and intrinsics.
This already works for MSVC, clang & GCC so you most likely won't have to override it.
- [atomic.h](include/uacpi/platform/atomic.h) - defines compiler-specific helpers for dealing with atomic operations.
Same as the header above, this should work out of the box for MSVC, clang & GCC.
- [libc.h](include/uacpi/platform/libc.h) - an empty header by default, but may be overriden by your project
if it implements any of the libc functions used by uACPI (by default uACPI uses its
own implementations to be platform-independent and to make porting easier). The
internal implementation is just the bare minimum and not optimized in any way.
- [types.h](include/uacpi/platform/types.h) - typedefs a bunch of uacpi-specific types using the `stdint.h` header. You don't have to override this
unless you don't provide `stdint.h`.
- [config.h](include/uacpi/platform/config.h) - various compile-time options and settings, preconfigured to reasonable defaults.
### 3. Implement kernel API
uACPI relies on kernel-specific API to do things like mapping/unmapping memory, writing/reading to/from IO, PCI config space, and many more things.
This API is declared in [kernel_api.h](include/uacpi/kernel_api.h) and is implemented by your kernel.
### 4. Initialize uACPI
That's it, uACPI is now integrated into your project.
You should proceed to initialization.
Refer to the [uACPI page](https://wiki.osdev.org/uACPI) on osdev wiki to see a
snippet for basic initialization, as well as some code examples of how you may
want to use certain APIs.
All of the headers and APIs defined in [uacpi](include/uacpi/) are public and may be utilized by your project.
Anything inside [uacpi/internal](include/uacpi/internal) is considered private/undocumented and unstable API.
## Developing and contributing
Most development work is fully doable in userland using the test runner.
### Setting up an IDE:
Simply open [tests/runner/CMakeLists.txt](tests/runner/CMakeLists.txt) in your favorite IDE.
For Visual Studio:
```
cd tests\runner && mkdir build && cd build && cmake ..
```
Then just simply open the .sln file generated by cmake.
### Running the test suite:
```
./tests/run_tests.py
```
If you want to contribute:
- Commits are expected to be atomic (changing one specific thing, or introducing one feature) with detailed description (if one is warranted for), an S-o-b line is welcome
- Code style is 4-space tabs, 80 cols, the rest can be seen by just looking at the current code
**All contributions are very welcome!**
## Notable projects using uACPI & performance leaderboards
| Project | Description | (qemu w/ Q35 + KVM) ops/s | CPU |
|--- |--- |--- |--- |
| [proxima](https://github.com/proxima-os/) | Unix-like microkernel-based operating system with uACPI running in userspace | 10,454,158 | AMD Ryzen 9 9950X3D |
| [ilobilix](https://github.com/ilobilo/ilobilix) | Yet another monolithic Linux clone wannabe. Currently under a rewrite | 8,703,286 | AMD Ryzen 9 9950X3D |
| [Crescent2](https://github.com/Qwinci/crescent2) | An NT driver compatible kernel and userspace | 6,818,418 | Intel Core i5-13600K |
| [davix](https://github.com/dbstream/davix) | Yet another unix-like by some bored nerd | 6,364,623 | Intel Core i7-13700K |
| [Managarm](https://github.com/managarm/managarm) | Pragmatic microkernel-based OS with fully asynchronous I/O | 5,618,646 | Intel Core i7-14700K |
| [ChronOS](https://github.com/BUGO07/chronos) | Another basic hobby os held together by duct tape, made in rust | 5,416,703 | Intel Core Ultra 7 265KF |
| [pmOS](https://gitlab.com/mishakov/pmos) | Microkernel-based operating system written from scratch with uACPI running in userspace | 5,354,445 | AMD Ryzen 9 5900X |
| [menix](https://github.com/menix-os/menix) | A minimal and expandable Unix-like operating system | 5,239,043 | Intel Core Ultra 7 265KF |
| [Ironclad](https://ironclad.nongnu.org) | Formally verified, hard real-time capable kernel written in SPARK and Ada | 4,802,816 | Intel Core i9-13900KS |
| [Astral](https://github.com/mathewnd/astral) | Operating system written in C which aims be POSIX-compliant | 4,189,189 | Intel Core i5-13600K |
| [Keyronex](https://github.com/Keyronex/Keyronex) | Layered kernel with fundamentally asynchronous I/O and working set model-based memory management | 4,013,691 | AMD Ryzen 5800X |
| [Orange](https://github.com/cppLover0/Orange) | x86_64 Unix-like OS | 2,377,330 | AMD Ryzen 5 3600 |
| [OBOS](https://github.com/OBOS-dev/obos) | Hybrid Kernel with advanced driver loading | 2,141,179 | Intel Core i5-13600K |
| [NyauxKC](https://github.com/rayanmargham/NyauxKC) | Monolithic UNIX-like multi-architecture kernel | 1,966,580 | Intel Core i7-13700K |
| [ElysiumOS](https://github.com/imwux/elysium-os) | Hybrid Unix-like kernel | 1,737,654 | AMD Ryzen 7 5800X3D |
| [imaginarium](https://github.com/Khitiara/imaginarium) | Ziggy osdev experiments inspired by the NT kernel (using the zig general purpose allocator) | 1,504,436 | AMD Ryzen 7 3700X |
| [BadgerOS](https://github.com/badgeteam/BadgerOS) | A monolithic lightweight UNIX clone | 1,018,518 | AMD Ryzen 5 3600 |
| [Hyra](https://github.com/sigsegv7/Hyra) | Monolithic UNIX-like OS by [OSMORA.ORG](https://osmora.org) | 199,873 | Intel Core i3-3220 |
## License
<a href="https://opensource.org/licenses/MIT">
<img align="right" height="96" alt="MIT License" src="https://branding.cute.engineering/licenses/mit.svg" />
</a>
uACPI is licensed under the **MIT License**.
The full license text is provided in the [LICENSE](LICENSE) file inside the root directory.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,53 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/log.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Set the minimum log level to be accepted by the logging facilities. Any logs
* below this level are discarded and not passed to uacpi_kernel_log, etc.
*
* 0 is treated as a special value that resets the setting to the default value.
*
* E.g. for a log level of UACPI_LOG_INFO:
* UACPI_LOG_DEBUG -> discarded
* UACPI_LOG_TRACE -> discarded
* UACPI_LOG_INFO -> allowed
* UACPI_LOG_WARN -> allowed
* UACPI_LOG_ERROR -> allowed
*/
void uacpi_context_set_log_level(uacpi_log_level);
/*
* Enables table checksum validation at installation time instead of first use.
* Note that this makes uACPI map the entire table at once, which not all
* hosts are able to handle at early init.
*/
void uacpi_context_set_proactive_table_checksum(uacpi_bool);
#ifndef UACPI_BAREBONES_MODE
/*
* Set the maximum number of seconds a While loop is allowed to run for before
* getting timed out.
*
* 0 is treated a special value that resets the setting to the default value.
*/
void uacpi_context_set_loop_timeout(uacpi_u32 seconds);
/*
* Set the maximum call stack depth AML can reach before getting aborted.
*
* 0 is treated as a special value that resets the setting to the default value.
*/
void uacpi_context_set_max_call_stack_depth(uacpi_u32 depth);
uacpi_u32 uacpi_context_get_loop_timeout(void);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,286 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/uacpi.h>
#include <uacpi/acpi.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef UACPI_BAREBONES_MODE
typedef enum uacpi_fixed_event {
UACPI_FIXED_EVENT_TIMER_STATUS = 1,
UACPI_FIXED_EVENT_POWER_BUTTON,
UACPI_FIXED_EVENT_SLEEP_BUTTON,
UACPI_FIXED_EVENT_RTC,
UACPI_FIXED_EVENT_MAX = UACPI_FIXED_EVENT_RTC,
} uacpi_fixed_event;
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_install_fixed_event_handler(
uacpi_fixed_event event, uacpi_interrupt_handler handler, uacpi_handle user
))
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_uninstall_fixed_event_handler(
uacpi_fixed_event event
))
/*
* Enable/disable a fixed event. Note that the event is automatically enabled
* upon installing a handler to it.
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_enable_fixed_event(uacpi_fixed_event event)
)
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_disable_fixed_event(uacpi_fixed_event event)
)
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_clear_fixed_event(uacpi_fixed_event event)
)
typedef enum uacpi_event_info {
// Event is enabled in software
UACPI_EVENT_INFO_ENABLED = (1 << 0),
// Event is enabled in software (only for wake)
UACPI_EVENT_INFO_ENABLED_FOR_WAKE = (1 << 1),
// Event is masked
UACPI_EVENT_INFO_MASKED = (1 << 2),
// Event has a handler attached
UACPI_EVENT_INFO_HAS_HANDLER = (1 << 3),
// Hardware enable bit is set
UACPI_EVENT_INFO_HW_ENABLED = (1 << 4),
// Hardware status bit is set
UACPI_EVENT_INFO_HW_STATUS = (1 << 5),
} uacpi_event_info;
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_fixed_event_info(
uacpi_fixed_event event, uacpi_event_info *out_info
))
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_gpe_info(
uacpi_namespace_node *gpe_device, uacpi_u16 idx,
uacpi_event_info *out_info
))
// Set if the handler wishes to reenable the GPE it just handled
#define UACPI_GPE_REENABLE (1 << 7)
typedef uacpi_interrupt_ret (*uacpi_gpe_handler)(
uacpi_handle ctx, uacpi_namespace_node *gpe_device, uacpi_u16 idx
);
typedef enum uacpi_gpe_triggering {
UACPI_GPE_TRIGGERING_LEVEL = 0,
UACPI_GPE_TRIGGERING_EDGE = 1,
UACPI_GPE_TRIGGERING_MAX = UACPI_GPE_TRIGGERING_EDGE,
} uacpi_gpe_triggering;
const uacpi_char *uacpi_gpe_triggering_to_string(
uacpi_gpe_triggering triggering
);
/*
* Installs a handler to the provided GPE at 'idx' controlled by device
* 'gpe_device'. The GPE is automatically disabled & cleared according to the
* configured triggering upon invoking the handler. The event is optionally
* re-enabled (by returning UACPI_GPE_REENABLE from the handler)
*
* NOTE: 'gpe_device' may be null for GPEs managed by \_GPE
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_install_gpe_handler(
uacpi_namespace_node *gpe_device, uacpi_u16 idx,
uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, uacpi_handle ctx
))
/*
* Installs a raw handler to the provided GPE at 'idx' controlled by device
* 'gpe_device'. The handler is dispatched immediately after the event is
* received, status & enable bits are untouched.
*
* NOTE: 'gpe_device' may be null for GPEs managed by \_GPE
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_install_gpe_handler_raw(
uacpi_namespace_node *gpe_device, uacpi_u16 idx,
uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, uacpi_handle ctx
))
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_uninstall_gpe_handler(
uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_gpe_handler handler
))
/*
* Marks the GPE 'idx' managed by 'gpe_device' as wake-capable. 'wake_device' is
* optional and configures the GPE to generate an implicit notification whenever
* an event occurs.
*
* NOTE: 'gpe_device' may be null for GPEs managed by \_GPE
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_setup_gpe_for_wake(
uacpi_namespace_node *gpe_device, uacpi_u16 idx,
uacpi_namespace_node *wake_device
))
/*
* Mark a GPE managed by 'gpe_device' as enabled/disabled for wake. The GPE must
* have previously been marked by calling uacpi_gpe_setup_for_wake. This
* function only affects the GPE enable register state following the call to
* uacpi_gpe_enable_all_for_wake.
*
* NOTE: 'gpe_device' may be null for GPEs managed by \_GPE
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_enable_gpe_for_wake(
uacpi_namespace_node *gpe_device, uacpi_u16 idx
))
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_disable_gpe_for_wake(
uacpi_namespace_node *gpe_device, uacpi_u16 idx
))
/*
* Finalize GPE initialization by enabling all GPEs not configured for wake and
* having a matching AML handler detected.
*
* This should be called after the kernel power managment subsystem has
* enumerated all of the devices, executing their _PRW methods etc., and
* marking those it wishes to use for wake by calling uacpi_setup_gpe_for_wake
* or uacpi_mark_gpe_for_wake.
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_finalize_gpe_initialization(void)
)
/*
* Enable/disable a general purpose event managed by 'gpe_device'. Internally
* this uses reference counting to make sure a GPE is not disabled until all
* possible users of it do so. GPEs not marked for wake are enabled
* automatically so this API is only needed for wake events or those that don't
* have a corresponding AML handler.
*
* NOTE: 'gpe_device' may be null for GPEs managed by \_GPE
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_enable_gpe(
uacpi_namespace_node *gpe_device, uacpi_u16 idx
))
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_disable_gpe(
uacpi_namespace_node *gpe_device, uacpi_u16 idx
))
/*
* Clear the status bit of the event 'idx' managed by 'gpe_device'.
*
* NOTE: 'gpe_device' may be null for GPEs managed by \_GPE
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_clear_gpe(
uacpi_namespace_node *gpe_device, uacpi_u16 idx
))
/*
* Suspend/resume a general purpose event managed by 'gpe_device'. This bypasses
* the reference counting mechanism and unconditionally clears/sets the
* corresponding bit in the enable registers. This is used for switching the GPE
* to poll mode.
*
* NOTE: 'gpe_device' may be null for GPEs managed by \_GPE
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_suspend_gpe(
uacpi_namespace_node *gpe_device, uacpi_u16 idx
))
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_resume_gpe(
uacpi_namespace_node *gpe_device, uacpi_u16 idx
))
/*
* Finish handling the GPE managed by 'gpe_device' at 'idx'. This clears the
* status registers if it hasn't been cleared yet and re-enables the event if
* it was enabled before.
*
* NOTE: 'gpe_device' may be null for GPEs managed by \_GPE
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_finish_handling_gpe(
uacpi_namespace_node *gpe_device, uacpi_u16 idx
))
/*
* Hard mask/umask a general purpose event at 'idx' managed by 'gpe_device'.
* This is used to permanently silence an event so that further calls to
* enable/disable as well as suspend/resume get ignored. This might be necessary
* for GPEs that cause an event storm due to the kernel's inability to properly
* handle them. The only way to enable a masked event is by a call to unmask.
*
* NOTE: 'gpe_device' may be null for GPEs managed by \_GPE
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_mask_gpe(
uacpi_namespace_node *gpe_device, uacpi_u16 idx
))
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_unmask_gpe(
uacpi_namespace_node *gpe_device, uacpi_u16 idx
))
/*
* Disable all GPEs currently set up on the system.
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_disable_all_gpes(void)
)
/*
* Enable all GPEs not marked as wake. This is only needed after the system
* wakes from a shallow sleep state and is called automatically by wake code.
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_enable_all_runtime_gpes(void)
)
/*
* Enable all GPEs marked as wake. This is only needed before the system goes
* to sleep is called automatically by sleep code.
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_enable_all_wake_gpes(void)
)
/*
* Install/uninstall a new GPE block, usually defined by a device in the
* namespace with a _HID of ACPI0006.
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_install_gpe_block(
uacpi_namespace_node *gpe_device, uacpi_u64 address,
uacpi_address_space address_space, uacpi_u16 num_registers,
uacpi_u32 irq
))
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_uninstall_gpe_block(
uacpi_namespace_node *gpe_device
))
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,12 @@
#pragma once
#include <uacpi/platform/compiler.h>
#define UACPI_BUILD_BUG_ON_WITH_MSG(expr, msg) UACPI_STATIC_ASSERT(!(expr), msg)
#define UACPI_BUILD_BUG_ON(expr) \
UACPI_BUILD_BUG_ON_WITH_MSG(expr, "BUILD BUG: " #expr " evaluated to true")
#define UACPI_EXPECT_SIZEOF(type, size) \
UACPI_BUILD_BUG_ON_WITH_MSG(sizeof(type) != size, \
"BUILD BUG: invalid type size")

View File

@ -0,0 +1,3 @@
#pragma once
#include <uacpi/platform/compiler.h>

View File

@ -0,0 +1,155 @@
#pragma once
#include <uacpi/acpi.h>
#include <uacpi/types.h>
#include <uacpi/uacpi.h>
#include <uacpi/internal/dynamic_array.h>
#include <uacpi/internal/shareable.h>
#include <uacpi/context.h>
struct uacpi_runtime_context {
/*
* A local copy of FADT that has been verified & converted to most optimal
* format for faster access to the registers.
*/
struct acpi_fadt fadt;
uacpi_u64 flags;
#ifndef UACPI_BAREBONES_MODE
/*
* A cached pointer to FACS so that we don't have to look it up in interrupt
* contexts as we can't take mutexes.
*/
struct acpi_facs *facs;
/*
* pm1{a,b}_evt_blk split into two registers for convenience
*/
struct acpi_gas pm1a_status_blk;
struct acpi_gas pm1b_status_blk;
struct acpi_gas pm1a_enable_blk;
struct acpi_gas pm1b_enable_blk;
#define UACPI_SLEEP_TYP_INVALID 0xFF
uacpi_u8 last_sleep_typ_a;
uacpi_u8 last_sleep_typ_b;
uacpi_u8 s0_sleep_typ_a;
uacpi_u8 s0_sleep_typ_b;
uacpi_bool global_lock_acquired;
#ifndef UACPI_REDUCED_HARDWARE
uacpi_bool was_in_legacy_mode;
uacpi_bool has_global_lock;
uacpi_bool sci_handle_valid;
uacpi_handle sci_handle;
#endif
uacpi_u64 opcodes_executed;
uacpi_u32 loop_timeout_seconds;
uacpi_u32 max_call_stack_depth;
uacpi_u32 global_lock_seq_num;
/*
* These are stored here to protect against stuff like:
* - CopyObject(JUNK, \)
* - CopyObject(JUNK, \_GL)
*/
uacpi_mutex *global_lock_mutex;
uacpi_object *root_object;
#ifndef UACPI_REDUCED_HARDWARE
uacpi_handle *global_lock_event;
uacpi_handle *global_lock_spinlock;
uacpi_bool global_lock_pending;
#endif
uacpi_bool bad_timesource;
uacpi_u8 init_level;
#endif // !UACPI_BAREBONES_MODE
#ifndef UACPI_REDUCED_HARDWARE
uacpi_bool is_hardware_reduced;
#endif
/*
* This is a per-table value but we mimic the NT implementation:
* treat all other definition blocks as if they were the same revision
* as DSDT.
*/
uacpi_bool is_rev1;
uacpi_u8 log_level;
};
extern struct uacpi_runtime_context g_uacpi_rt_ctx;
static inline uacpi_bool uacpi_check_flag(uacpi_u64 flag)
{
return (g_uacpi_rt_ctx.flags & flag) == flag;
}
static inline uacpi_bool uacpi_should_log(enum uacpi_log_level lvl)
{
return lvl <= g_uacpi_rt_ctx.log_level;
}
static inline uacpi_bool uacpi_is_hardware_reduced(void)
{
#ifndef UACPI_REDUCED_HARDWARE
return g_uacpi_rt_ctx.is_hardware_reduced;
#else
return UACPI_TRUE;
#endif
}
#ifndef UACPI_BAREBONES_MODE
static inline const uacpi_char *uacpi_init_level_to_string(uacpi_u8 lvl)
{
switch (lvl) {
case UACPI_INIT_LEVEL_EARLY:
return "early";
case UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED:
return "subsystem initialized";
case UACPI_INIT_LEVEL_NAMESPACE_LOADED:
return "namespace loaded";
case UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED:
return "namespace initialized";
default:
return "<invalid>";
}
}
#define UACPI_ENSURE_INIT_LEVEL_AT_LEAST(lvl) \
do { \
if (uacpi_unlikely(g_uacpi_rt_ctx.init_level < lvl)) { \
uacpi_error( \
"while evaluating %s: init level %d (%s) is too low, " \
"expected at least %d (%s)\n", __FUNCTION__, \
g_uacpi_rt_ctx.init_level, \
uacpi_init_level_to_string(g_uacpi_rt_ctx.init_level), lvl, \
uacpi_init_level_to_string(lvl) \
); \
return UACPI_STATUS_INIT_LEVEL_MISMATCH; \
} \
} while (0)
#define UACPI_ENSURE_INIT_LEVEL_IS(lvl) \
do { \
if (uacpi_unlikely(g_uacpi_rt_ctx.init_level != lvl)) { \
uacpi_error( \
"while evaluating %s: invalid init level %d (%s), " \
"expected %d (%s)\n", __FUNCTION__, \
g_uacpi_rt_ctx.init_level, \
uacpi_init_level_to_string(g_uacpi_rt_ctx.init_level), lvl, \
uacpi_init_level_to_string(lvl) \
); \
return UACPI_STATUS_INIT_LEVEL_MISMATCH; \
} \
} while (0)
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,185 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/kernel_api.h>
#define DYNAMIC_ARRAY_WITH_INLINE_STORAGE(name, type, inline_capacity) \
struct name { \
type inline_storage[inline_capacity]; \
type *dynamic_storage; \
uacpi_size dynamic_capacity; \
uacpi_size size_including_inline; \
}; \
#define DYNAMIC_ARRAY_SIZE(arr) ((arr)->size_including_inline)
#define DYNAMIC_ARRAY_WITH_INLINE_STORAGE_EXPORTS(name, type, prefix) \
prefix uacpi_size name##_inline_capacity(struct name *arr); \
prefix type *name##_at(struct name *arr, uacpi_size idx); \
prefix type *name##_alloc(struct name *arr); \
prefix type *name##_calloc(struct name *arr); \
prefix void name##_pop(struct name *arr); \
prefix uacpi_size name##_size(struct name *arr); \
prefix type *name##_last(struct name *arr) \
prefix void name##_clear(struct name *arr);
#ifndef UACPI_BAREBONES_MODE
#define DYNAMIC_ARRAY_ALLOC_FN(name, type, prefix) \
UACPI_MAYBE_UNUSED \
prefix type *name##_alloc(struct name *arr) \
{ \
uacpi_size inline_cap; \
type *out_ptr; \
\
inline_cap = name##_inline_capacity(arr); \
\
if (arr->size_including_inline >= inline_cap) { \
uacpi_size dynamic_size; \
\
dynamic_size = arr->size_including_inline - inline_cap; \
if (dynamic_size == arr->dynamic_capacity) { \
uacpi_size bytes, type_size; \
void *new_buf; \
\
type_size = sizeof(*arr->dynamic_storage); \
\
if (arr->dynamic_capacity == 0) { \
bytes = type_size * inline_cap; \
} else { \
bytes = (arr->dynamic_capacity / 2) * type_size; \
if (bytes == 0) \
bytes += type_size; \
\
bytes += arr->dynamic_capacity * type_size; \
} \
\
new_buf = uacpi_kernel_alloc(bytes); \
if (uacpi_unlikely(new_buf == UACPI_NULL)) \
return UACPI_NULL; \
\
arr->dynamic_capacity = bytes / type_size; \
\
if (arr->dynamic_storage) { \
uacpi_memcpy(new_buf, arr->dynamic_storage, \
dynamic_size * type_size); \
} \
uacpi_free(arr->dynamic_storage, dynamic_size * type_size); \
arr->dynamic_storage = new_buf; \
} \
\
out_ptr = &arr->dynamic_storage[dynamic_size]; \
goto ret; \
} \
out_ptr = &arr->inline_storage[arr->size_including_inline]; \
ret: \
arr->size_including_inline++; \
return out_ptr; \
}
#define DYNAMIC_ARRAY_CLEAR_FN(name, type, prefix) \
prefix void name##_clear(struct name *arr) \
{ \
uacpi_free( \
arr->dynamic_storage, \
arr->dynamic_capacity * sizeof(*arr->dynamic_storage) \
); \
arr->size_including_inline = 0; \
arr->dynamic_capacity = 0; \
arr->dynamic_storage = UACPI_NULL; \
}
#else
#define DYNAMIC_ARRAY_ALLOC_FN(name, type, prefix) \
UACPI_MAYBE_UNUSED \
prefix type *name##_alloc(struct name *arr) \
{ \
uacpi_size inline_cap; \
type *out_ptr; \
\
inline_cap = name##_inline_capacity(arr); \
\
if (arr->size_including_inline >= inline_cap) { \
uacpi_size dynamic_size; \
\
dynamic_size = arr->size_including_inline - inline_cap; \
if (uacpi_unlikely(dynamic_size == arr->dynamic_capacity)) \
return UACPI_NULL; \
\
out_ptr = &arr->dynamic_storage[dynamic_size]; \
goto ret; \
} \
out_ptr = &arr->inline_storage[arr->size_including_inline]; \
ret: \
arr->size_including_inline++; \
return out_ptr; \
}
#define DYNAMIC_ARRAY_CLEAR_FN(name, type, prefix) \
prefix void name##_clear(struct name *arr) \
{ \
arr->size_including_inline = 0; \
arr->dynamic_capacity = 0; \
arr->dynamic_storage = UACPI_NULL; \
}
#endif
#define DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(name, type, prefix) \
UACPI_MAYBE_UNUSED \
prefix uacpi_size name##_inline_capacity(struct name *arr) \
{ \
return sizeof(arr->inline_storage) / sizeof(arr->inline_storage[0]); \
} \
\
UACPI_MAYBE_UNUSED \
prefix uacpi_size name##_capacity(struct name *arr) \
{ \
return name##_inline_capacity(arr) + arr->dynamic_capacity; \
} \
\
prefix type *name##_at(struct name *arr, uacpi_size idx) \
{ \
if (idx >= arr->size_including_inline) \
return UACPI_NULL; \
\
if (idx < name##_inline_capacity(arr)) \
return &arr->inline_storage[idx]; \
\
return &arr->dynamic_storage[idx - name##_inline_capacity(arr)]; \
} \
\
DYNAMIC_ARRAY_ALLOC_FN(name, type, prefix) \
\
UACPI_MAYBE_UNUSED \
prefix type *name##_calloc(struct name *arr) \
{ \
type *ret; \
\
ret = name##_alloc(arr); \
if (ret) \
uacpi_memzero(ret, sizeof(*ret)); \
\
return ret; \
} \
\
UACPI_MAYBE_UNUSED \
prefix void name##_pop(struct name *arr) \
{ \
if (arr->size_including_inline == 0) \
return; \
\
arr->size_including_inline--; \
} \
\
UACPI_MAYBE_UNUSED \
prefix uacpi_size name##_size(struct name *arr) \
{ \
return arr->size_including_inline; \
} \
\
UACPI_MAYBE_UNUSED \
prefix type *name##_last(struct name *arr) \
{ \
return name##_at(arr, arr->size_including_inline - 1); \
} \
\
DYNAMIC_ARRAY_CLEAR_FN(name, type, prefix)

View File

@ -0,0 +1,25 @@
#pragma once
#include <uacpi/event.h>
// This fixed event is internal-only, and we don't expose it in the enum
#define UACPI_FIXED_EVENT_GLOBAL_LOCK 0
UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_initialize_events_early(void)
)
UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_initialize_events(void)
)
UACPI_STUB_IF_REDUCED_HARDWARE(
void uacpi_deinitialize_events(void)
)
UACPI_STUB_IF_REDUCED_HARDWARE(
void uacpi_events_match_post_dynamic_table_load(void)
)
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_clear_all_events(void)
)

View File

@ -0,0 +1,7 @@
#pragma once
#include <uacpi/helpers.h>
#define UACPI_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#define UACPI_UNUSED(x) (void)(x)

View File

@ -0,0 +1,24 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/status.h>
#include <uacpi/internal/namespace.h>
#ifndef UACPI_BAREBONES_MODE
enum uacpi_table_load_cause {
UACPI_TABLE_LOAD_CAUSE_LOAD_OP,
UACPI_TABLE_LOAD_CAUSE_LOAD_TABLE_OP,
UACPI_TABLE_LOAD_CAUSE_INIT,
UACPI_TABLE_LOAD_CAUSE_HOST,
};
uacpi_status uacpi_execute_table(void*, enum uacpi_table_load_cause cause);
uacpi_status uacpi_osi(uacpi_handle handle, uacpi_object *retval);
uacpi_status uacpi_execute_control_method(
uacpi_namespace_node *scope, uacpi_control_method *method,
const uacpi_object_array *args, uacpi_object **ret
);
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,77 @@
#pragma once
#include <uacpi/internal/types.h>
#include <uacpi/acpi.h>
#include <uacpi/io.h>
#ifndef UACPI_BAREBONES_MODE
typedef struct uacpi_mapped_gas {
uacpi_handle mapping;
uacpi_u8 access_bit_width;
uacpi_u8 total_bit_width;
uacpi_u8 bit_offset;
uacpi_status (*read)(
uacpi_handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out
);
uacpi_status (*write)(
uacpi_handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in
);
void (*unmap)(uacpi_handle, uacpi_size);
} uacpi_mapped_gas;
uacpi_status uacpi_map_gas_noalloc(
const struct acpi_gas *gas, uacpi_mapped_gas *out_mapped
);
void uacpi_unmap_gas_nofree(uacpi_mapped_gas *gas);
uacpi_size uacpi_round_up_bits_to_bytes(uacpi_size bit_length);
void uacpi_read_buffer_field(
const uacpi_buffer_field *field, void *dst
);
void uacpi_write_buffer_field(
uacpi_buffer_field *field, const void *src, uacpi_size size
);
uacpi_status uacpi_field_unit_get_read_type(
struct uacpi_field_unit *field, uacpi_object_type *out_type
);
uacpi_status uacpi_field_unit_get_bit_length(
struct uacpi_field_unit *field, uacpi_size *out_length
);
uacpi_status uacpi_read_field_unit(
uacpi_field_unit *field, void *dst, uacpi_size size,
uacpi_data_view *wtr_response
);
uacpi_status uacpi_write_field_unit(
uacpi_field_unit *field, const void *src, uacpi_size size,
uacpi_data_view *wtr_response
);
uacpi_status uacpi_system_memory_read(
void *ptr, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out
);
uacpi_status uacpi_system_memory_write(
void *ptr, uacpi_size offset, uacpi_u8 width, uacpi_u64 in
);
uacpi_status uacpi_system_io_read(
uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out
);
uacpi_status uacpi_system_io_write(
uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in
);
uacpi_status uacpi_pci_read(
uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out
);
uacpi_status uacpi_pci_write(
uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in
);
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,23 @@
#pragma once
#include <uacpi/kernel_api.h>
#include <uacpi/internal/context.h>
#include <uacpi/log.h>
#ifdef UACPI_FORMATTED_LOGGING
#define uacpi_log uacpi_kernel_log
#else
UACPI_PRINTF_DECL(2, 3)
void uacpi_log(uacpi_log_level, const uacpi_char*, ...);
#endif
#define uacpi_log_lvl(lvl, ...) \
do { if (uacpi_should_log(lvl)) uacpi_log(lvl, __VA_ARGS__); } while (0)
#define uacpi_debug(...) uacpi_log_lvl(UACPI_LOG_DEBUG, __VA_ARGS__)
#define uacpi_trace(...) uacpi_log_lvl(UACPI_LOG_TRACE, __VA_ARGS__)
#define uacpi_info(...) uacpi_log_lvl(UACPI_LOG_INFO, __VA_ARGS__)
#define uacpi_warn(...) uacpi_log_lvl(UACPI_LOG_WARN, __VA_ARGS__)
#define uacpi_error(...) uacpi_log_lvl(UACPI_LOG_ERROR, __VA_ARGS__)
void uacpi_logger_initialize(void);

View File

@ -0,0 +1,82 @@
#pragma once
#include <uacpi/internal/types.h>
#include <uacpi/kernel_api.h>
#ifndef UACPI_BAREBONES_MODE
uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex*);
uacpi_status uacpi_acquire_aml_mutex(uacpi_mutex*, uacpi_u16 timeout);
uacpi_status uacpi_release_aml_mutex(uacpi_mutex*);
static inline uacpi_status uacpi_acquire_native_mutex(uacpi_handle mtx)
{
if (uacpi_unlikely(mtx == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
return uacpi_kernel_acquire_mutex(mtx, 0xFFFF);
}
uacpi_status uacpi_acquire_native_mutex_with_timeout(
uacpi_handle mtx, uacpi_u16 timeout
);
static inline uacpi_status uacpi_release_native_mutex(uacpi_handle mtx)
{
if (uacpi_unlikely(mtx == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
uacpi_kernel_release_mutex(mtx);
return UACPI_STATUS_OK;
}
static inline uacpi_status uacpi_acquire_native_mutex_may_be_null(
uacpi_handle mtx
)
{
if (mtx == UACPI_NULL)
return UACPI_STATUS_OK;
return uacpi_kernel_acquire_mutex(mtx, 0xFFFF);
}
static inline uacpi_status uacpi_release_native_mutex_may_be_null(
uacpi_handle mtx
)
{
if (mtx == UACPI_NULL)
return UACPI_STATUS_OK;
uacpi_kernel_release_mutex(mtx);
return UACPI_STATUS_OK;
}
struct uacpi_recursive_lock {
uacpi_handle mutex;
uacpi_size depth;
uacpi_thread_id owner;
};
uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock);
uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock);
uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock);
uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock);
struct uacpi_rw_lock {
uacpi_handle read_mutex;
uacpi_handle write_mutex;
uacpi_size num_readers;
};
uacpi_status uacpi_rw_lock_init(struct uacpi_rw_lock *lock);
uacpi_status uacpi_rw_lock_deinit(struct uacpi_rw_lock *lock);
uacpi_status uacpi_rw_lock_read(struct uacpi_rw_lock *lock);
uacpi_status uacpi_rw_unlock_read(struct uacpi_rw_lock *lock);
uacpi_status uacpi_rw_lock_write(struct uacpi_rw_lock *lock);
uacpi_status uacpi_rw_unlock_write(struct uacpi_rw_lock *lock);
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,123 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/internal/shareable.h>
#include <uacpi/status.h>
#include <uacpi/namespace.h>
#ifndef UACPI_BAREBONES_MODE
#define UACPI_NAMESPACE_NODE_FLAG_ALIAS (1 << 0)
/*
* This node has been uninstalled and has no object associated with it.
*
* This is used to handle edge cases where an object needs to reference
* a namespace node, where the node might end up going out of scope before
* the object lifetime ends.
*/
#define UACPI_NAMESPACE_NODE_FLAG_DANGLING (1u << 1)
/*
* This node is method-local and must not be exposed via public API as its
* lifetime is limited.
*/
#define UACPI_NAMESPACE_NODE_FLAG_TEMPORARY (1u << 2)
#define UACPI_NAMESPACE_NODE_PREDEFINED (1u << 31)
typedef struct uacpi_namespace_node {
struct uacpi_shareable shareable;
uacpi_object_name name;
uacpi_u32 flags;
uacpi_object *object;
struct uacpi_namespace_node *parent;
struct uacpi_namespace_node *child;
struct uacpi_namespace_node *next;
} uacpi_namespace_node;
uacpi_status uacpi_initialize_namespace(void);
void uacpi_deinitialize_namespace(void);
uacpi_namespace_node *uacpi_namespace_node_alloc(uacpi_object_name name);
void uacpi_namespace_node_unref(uacpi_namespace_node *node);
uacpi_status uacpi_namespace_node_type_unlocked(
const uacpi_namespace_node *node, uacpi_object_type *out_type
);
uacpi_status uacpi_namespace_node_is_one_of_unlocked(
const uacpi_namespace_node *node, uacpi_object_type_bits type_mask,
uacpi_bool *out
);
uacpi_object *uacpi_namespace_node_get_object(const uacpi_namespace_node *node);
uacpi_object *uacpi_namespace_node_get_object_typed(
const uacpi_namespace_node *node, uacpi_object_type_bits type_mask
);
uacpi_status uacpi_namespace_node_acquire_object(
const uacpi_namespace_node *node, uacpi_object **out_obj
);
uacpi_status uacpi_namespace_node_acquire_object_typed(
const uacpi_namespace_node *node, uacpi_object_type_bits,
uacpi_object **out_obj
);
uacpi_status uacpi_namespace_node_reacquire_object(
uacpi_object *obj
);
uacpi_status uacpi_namespace_node_release_object(
uacpi_object *obj
);
uacpi_status uacpi_namespace_node_install(
uacpi_namespace_node *parent, uacpi_namespace_node *node
);
uacpi_status uacpi_namespace_node_uninstall(uacpi_namespace_node *node);
uacpi_namespace_node *uacpi_namespace_node_find_sub_node(
uacpi_namespace_node *parent,
uacpi_object_name name
);
enum uacpi_may_search_above_parent {
UACPI_MAY_SEARCH_ABOVE_PARENT_NO,
UACPI_MAY_SEARCH_ABOVE_PARENT_YES,
};
enum uacpi_permanent_only {
UACPI_PERMANENT_ONLY_NO,
UACPI_PERMANENT_ONLY_YES,
};
enum uacpi_should_lock {
UACPI_SHOULD_LOCK_NO,
UACPI_SHOULD_LOCK_YES,
};
uacpi_status uacpi_namespace_node_resolve(
uacpi_namespace_node *scope, const uacpi_char *path, enum uacpi_should_lock,
enum uacpi_may_search_above_parent, enum uacpi_permanent_only,
uacpi_namespace_node **out_node
);
uacpi_status uacpi_namespace_do_for_each_child(
uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback,
uacpi_iteration_callback ascending_callback,
uacpi_object_type_bits, uacpi_u32 max_depth, enum uacpi_should_lock,
enum uacpi_permanent_only, void *user
);
uacpi_bool uacpi_namespace_node_is_dangling(uacpi_namespace_node *node);
uacpi_bool uacpi_namespace_node_is_temporary(uacpi_namespace_node *node);
uacpi_bool uacpi_namespace_node_is_predefined(uacpi_namespace_node *node);
uacpi_status uacpi_namespace_read_lock(void);
uacpi_status uacpi_namespace_read_unlock(void);
uacpi_status uacpi_namespace_write_lock(void);
uacpi_status uacpi_namespace_write_unlock(void);
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,13 @@
#pragma once
#include <uacpi/internal/types.h>
#include <uacpi/notify.h>
#ifndef UACPI_BAREBONES_MODE
uacpi_status uacpi_initialize_notify(void);
void uacpi_deinitialize_notify(void);
uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value);
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,49 @@
#pragma once
#include <uacpi/internal/types.h>
#include <uacpi/opregion.h>
#ifndef UACPI_BAREBONES_MODE
uacpi_status uacpi_initialize_opregion(void);
void uacpi_deinitialize_opregion(void);
void uacpi_trace_region_error(
uacpi_namespace_node *node, uacpi_char *message, uacpi_status ret
);
uacpi_status uacpi_install_address_space_handler_with_flags(
uacpi_namespace_node *device_node, enum uacpi_address_space space,
uacpi_region_handler handler, uacpi_handle handler_context,
uacpi_u16 flags
);
void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node);
uacpi_bool uacpi_address_space_handler_is_default(
uacpi_address_space_handler *handler
);
uacpi_address_space_handlers *uacpi_node_get_address_space_handlers(
uacpi_namespace_node *node
);
uacpi_status uacpi_initialize_opregion_node(uacpi_namespace_node *node);
uacpi_status uacpi_opregion_attach(uacpi_namespace_node *node);
void uacpi_install_default_address_space_handlers(void);
uacpi_bool uacpi_is_buffer_access_address_space(uacpi_address_space space);
union uacpi_opregion_io_data {
uacpi_u64 *integer;
uacpi_data_view buffer;
};
uacpi_status uacpi_dispatch_opregion_io(
uacpi_field_unit *field, uacpi_u32 offset,
uacpi_region_op op, union uacpi_opregion_io_data data
);
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,8 @@
#pragma once
#include <uacpi/osi.h>
uacpi_status uacpi_initialize_interfaces(void);
void uacpi_deinitialize_interfaces(void);
uacpi_status uacpi_handle_osi(const uacpi_char *string, uacpi_bool *out_value);

View File

@ -0,0 +1,7 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/registers.h>
uacpi_status uacpi_initialize_registers(void);
void uacpi_deinitialize_registers(void);

View File

@ -0,0 +1,327 @@
#pragma once
#include <uacpi/internal/types.h>
#include <uacpi/resources.h>
#ifndef UACPI_BAREBONES_MODE
enum uacpi_aml_resource {
UACPI_AML_RESOURCE_TYPE_INVALID = 0,
// Small resources
UACPI_AML_RESOURCE_IRQ,
UACPI_AML_RESOURCE_DMA,
UACPI_AML_RESOURCE_START_DEPENDENT,
UACPI_AML_RESOURCE_END_DEPENDENT,
UACPI_AML_RESOURCE_IO,
UACPI_AML_RESOURCE_FIXED_IO,
UACPI_AML_RESOURCE_FIXED_DMA,
UACPI_AML_RESOURCE_VENDOR_TYPE0,
UACPI_AML_RESOURCE_END_TAG,
// Large resources
UACPI_AML_RESOURCE_MEMORY24,
UACPI_AML_RESOURCE_GENERIC_REGISTER,
UACPI_AML_RESOURCE_VENDOR_TYPE1,
UACPI_AML_RESOURCE_MEMORY32,
UACPI_AML_RESOURCE_FIXED_MEMORY32,
UACPI_AML_RESOURCE_ADDRESS32,
UACPI_AML_RESOURCE_ADDRESS16,
UACPI_AML_RESOURCE_EXTENDED_IRQ,
UACPI_AML_RESOURCE_ADDRESS64,
UACPI_AML_RESOURCE_ADDRESS64_EXTENDED,
UACPI_AML_RESOURCE_GPIO_CONNECTION,
UACPI_AML_RESOURCE_PIN_FUNCTION,
UACPI_AML_RESOURCE_SERIAL_CONNECTION,
UACPI_AML_RESOURCE_PIN_CONFIGURATION,
UACPI_AML_RESOURCE_PIN_GROUP,
UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION,
UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION,
UACPI_AML_RESOURCE_CLOCK_INPUT,
UACPI_AML_RESOURCE_MAX = UACPI_AML_RESOURCE_CLOCK_INPUT,
};
enum uacpi_aml_resource_size_kind {
UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
UACPI_AML_RESOURCE_SIZE_KIND_FIXED_OR_ONE_LESS,
UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
};
enum uacpi_aml_resource_kind {
UACPI_AML_RESOURCE_KIND_SMALL = 0,
UACPI_AML_RESOURCE_KIND_LARGE,
};
enum uacpi_resource_convert_opcode {
UACPI_RESOURCE_CONVERT_OPCODE_END = 0,
/*
* AML -> native:
* Take the mask at 'aml_offset' and convert to an array of uacpi_u8
* at 'native_offset' with the value corresponding to the bit index.
* The array size is written to the byte at offset 'arg2'.
*
* native -> AML:
* Walk each element of the array at 'native_offset' and set the
* corresponding bit in the mask at 'aml_offset' to 1. The array size is
* read from the byte at offset 'arg2'.
*/
UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_8,
UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16,
/*
* AML -> native:
* Grab the bits at the byte at 'aml_offset' + 'bit_index', and copy its
* value into the byte at 'native_offset'.
*
* native -> AML:
* Grab first N bits at 'native_offset' and copy to 'aml_offset' starting
* at the 'bit_index'.
*
* NOTE:
* These must be contiguous in this order.
*/
UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_1,
UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_2,
UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_3,
UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_6 =
UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_3 + 3,
/*
* AML -> native:
* Copy N bytes at 'aml_offset' to 'native_offset'.
*
* native -> AML:
* Copy N bytes at 'native_offset' to 'aml_offset'.
*
* 'imm' is added to the accumulator.
*
* NOTE: These are affected by the current value in the accumulator. If it's
* set to 0 at the time of evalution, this is executed once, N times
* otherwise. 0xFF is considered a special value, which resets the
* accumulator to 0 unconditionally.
*/
UACPI_RESOURCE_CONVERT_OPCODE_FIELD_8,
UACPI_RESOURCE_CONVERT_OPCODE_FIELD_16,
UACPI_RESOURCE_CONVERT_OPCODE_FIELD_32,
UACPI_RESOURCE_CONVERT_OPCODE_FIELD_64,
/*
* If the length of the current resource is less than 'arg0', then skip
* 'imm' instructions.
*/
UACPI_RESOURCE_CONVERT_OPCODE_SKIP_IF_AML_SIZE_LESS_THAN,
/*
* Skip 'imm' instructions if 'arg0' is not equal to the value in the
* accumulator.
*/
UACPI_RESOURCE_CONVERT_OPCODE_SKIP_IF_NOT_EQUALS,
/*
* AML -> native:
* Set the byte at 'native_offset' to 'imm'.
*
* native -> AML:
* Set the byte at 'aml_offset' to 'imm'.
*/
UACPI_RESOURCE_CONVERT_OPCODE_SET_TO_IMM,
/*
* AML -> native:
* Load the AML resoruce length into the accumulator as well as the field at
* 'native_offset' of width N.
*
* native -> AML:
* Load the resource length into the accumulator.
*/
UACPI_RESOURCE_CONVERT_OPCODE_LOAD_AML_SIZE_32,
/*
* AML -> native:
* Load the 8 bit field at 'aml_offset' into the accumulator and store at
* 'native_offset'.
*
* native -> AML:
* Load the 8 bit field at 'native_offset' into the accumulator and store
* at 'aml_offset'.
*
* The accumulator is multiplied by 'imm' unless it's set to zero.
*/
UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_STORE,
/*
* Load the N bit field at 'native_offset' into the accumulator
*/
UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_NATIVE,
UACPI_RESOURCE_CONVERT_OPCODE_LOAD_16_NATIVE,
/*
* Load 'imm' into the accumulator.
*/
UACPI_RESOURCE_CONVERT_OPCODE_LOAD_IMM,
/*
* AML -> native:
* Load the resource source at offset = aml size + accumulator into the
* uacpi_resource_source struct at 'native_offset'. The string bytes are
* written to the offset at resource size + accumulator. The presence is
* detected by comparing the length of the resource to the offset,
* 'arg2' optionally specifies the offset to the upper bound of the string.
*
* native -> AML:
* Load the resource source from the uacpi_resource_source struct at
* 'native_offset' to aml_size + accumulator. aml_size + accumulator is
* optionally written to 'aml_offset' if it's specified.
*/
UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE,
UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE_NO_INDEX,
UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL,
/*
* AML -> native:
* Load the pin table with upper bound specified at 'aml_offset'.
* The table length is calculated by subtracting the upper bound from
* aml_size and is written into the accumulator.
*
* native -> AML:
* Load the pin table length from 'native_offset' and multiply by 2, store
* the result in the accumulator.
*/
UACPI_RESOURCE_CONVERT_OPCODE_LOAD_PIN_TABLE_LENGTH,
/*
* AML -> native:
* Store the accumulator divided by 2 at 'native_offset'.
* The table is copied to the offset at resource size from offset at
* aml_size with the pointer written to the offset at 'arg2'.
*
* native -> AML:
* Read the pin table from resource size offset, write aml_size to
* 'aml_offset'. Copy accumulator bytes to the offset at aml_size.
*/
UACPI_RESOURCE_CONVERT_OPCODE_PIN_TABLE,
/*
* AML -> native:
* Load vendor data with offset stored at 'aml_offset'. The length is
* calculated as aml_size - aml_offset and is written to 'native_offset'.
* The data is written to offset - aml_size with the pointer written back
* to the offset at 'arg2'.
*
* native -> AML:
* Read vendor data from the pointer at offset 'arg2' and size at
* 'native_offset', the offset to write to is calculated as the difference
* between the data pointer and the native resource end pointer.
* offset + aml_size is written to 'aml_offset' and the data is copied
* there as well.
*/
UACPI_RESOURCE_CONVERT_OPCODE_VENDOR_DATA,
/*
* AML -> native:
* Read the serial type from the byte at 'aml_offset' and write it to the
* type field of the uacpi_resource_serial_bus_common structure. Convert
* the serial type to native and set the resource type to it. Copy the
* vendor data to the offset at native size, the length is calculated
* as type_data_length - extra-type-specific-size, and is written to
* vendor_data_length, as well as the accumulator. The data pointer is
* written to vendor_data.
*
* native -> AML:
* Set the serial type at 'aml_offset' to the value stored at
* 'native_offset'. Load the vendor data to the offset at aml_size,
* the length is read from 'vendor_data_length', and the data is copied from
* 'vendor_data'.
*/
UACPI_RESOURCE_CONVERT_OPCODE_SERIAL_TYPE_SPECIFIC,
/*
* Produces an error if encountered in the instruction stream.
* Used to trap invalid/unexpected code flow.
*/
UACPI_RESOURCE_CONVERT_OPCODE_UNREACHABLE,
};
struct uacpi_resource_convert_instruction {
uacpi_u8 code;
union {
uacpi_u8 aml_offset;
uacpi_u8 arg0;
} f1;
union {
uacpi_u8 native_offset;
uacpi_u8 arg1;
} f2;
union {
uacpi_u8 imm;
uacpi_u8 bit_index;
uacpi_u8 arg2;
} f3;
};
struct uacpi_resource_spec {
uacpi_u8 type : 5;
uacpi_u8 native_type : 5;
uacpi_u8 resource_kind : 1;
uacpi_u8 size_kind : 2;
/*
* Size of the resource as appears in the AML byte stream, for variable
* length resources this is the minimum.
*/
uacpi_u16 aml_size;
/*
* Size of the native human-readable uacpi resource, for variable length
* resources this is the minimum. The final length is this field plus the
* result of extra_size_for_native().
*/
uacpi_u16 native_size;
/*
* Calculate the amount of extra bytes that must be allocated for a specific
* native resource given the AML counterpart. This being NULL means no extra
* bytes are needed, aka native resources is always the same size.
*/
uacpi_size (*extra_size_for_native)(
const struct uacpi_resource_spec*, void*, uacpi_size
);
/*
* Calculate the number of bytes needed to represent a native resource as
* AML. The 'aml_size' field is used if this is NULL.
*/
uacpi_size (*size_for_aml)(
const struct uacpi_resource_spec*, uacpi_resource*
);
const struct uacpi_resource_convert_instruction *to_native;
const struct uacpi_resource_convert_instruction *to_aml;
};
typedef uacpi_iteration_decision (*uacpi_aml_resource_iteration_callback)(
void*, uacpi_u8 *data, uacpi_u16 resource_size,
const struct uacpi_resource_spec*
);
uacpi_status uacpi_for_each_aml_resource(
uacpi_data_view, uacpi_aml_resource_iteration_callback cb, void *user
);
uacpi_status uacpi_find_aml_resource_end_tag(
uacpi_data_view, uacpi_size *out_offset
);
uacpi_status uacpi_native_resources_from_aml(
uacpi_data_view, uacpi_resources **out_resources
);
uacpi_status uacpi_native_resources_to_aml(
uacpi_resources *resources, uacpi_object **out_template
);
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,21 @@
#pragma once
#include <uacpi/types.h>
struct uacpi_shareable {
uacpi_u32 reference_count;
};
void uacpi_shareable_init(uacpi_handle);
uacpi_bool uacpi_bugged_shareable(uacpi_handle);
void uacpi_make_shareable_bugged(uacpi_handle);
uacpi_u32 uacpi_shareable_ref(uacpi_handle);
uacpi_u32 uacpi_shareable_unref(uacpi_handle);
void uacpi_shareable_unref_and_delete_if_last(
uacpi_handle, void (*do_free)(uacpi_handle)
);
uacpi_u32 uacpi_shareable_refcount(uacpi_handle);

View File

@ -0,0 +1,128 @@
#pragma once
#include <uacpi/internal/types.h>
#include <uacpi/internal/helpers.h>
#include <uacpi/platform/libc.h>
#include <uacpi/platform/config.h>
#include <uacpi/kernel_api.h>
#ifdef UACPI_USE_BUILTIN_STRING
#ifndef uacpi_memcpy
void *uacpi_memcpy(void *dest, const void *src, uacpi_size count);
#endif
#ifndef uacpi_memmove
void *uacpi_memmove(void *dest, const void *src, uacpi_size count);
#endif
#ifndef uacpi_memset
void *uacpi_memset(void *dest, uacpi_i32 ch, uacpi_size count);
#endif
#ifndef uacpi_memcmp
uacpi_i32 uacpi_memcmp(const void *lhs, const void *rhs, uacpi_size count);
#endif
#else
#ifndef uacpi_memcpy
#ifdef UACPI_COMPILER_HAS_BUILTIN_MEMCPY
#define uacpi_memcpy __builtin_memcpy
#else
extern void *memcpy(void *dest, const void *src, uacpi_size count);
#define uacpi_memcpy memcpy
#endif
#endif
#ifndef uacpi_memmove
#ifdef UACPI_COMPILER_HAS_BUILTIN_MEMMOVE
#define uacpi_memmove __builtin_memmove
#else
extern void *memmove(void *dest, const void *src, uacpi_size count);
#define uacpi_memmove memmove
#endif
#endif
#ifndef uacpi_memset
#ifdef UACPI_COMPILER_HAS_BUILTIN_MEMSET
#define uacpi_memset __builtin_memset
#else
extern void *memset(void *dest, int ch, uacpi_size count);
#define uacpi_memset memset
#endif
#endif
#ifndef uacpi_memcmp
#ifdef UACPI_COMPILER_HAS_BUILTIN_MEMCMP
#define uacpi_memcmp __builtin_memcmp
#else
extern int memcmp(const void *lhs, const void *rhs, uacpi_size count);
#define uacpi_memcmp memcmp
#endif
#endif
#endif
#ifndef uacpi_strlen
uacpi_size uacpi_strlen(const uacpi_char *str);
#endif
#ifndef uacpi_strnlen
uacpi_size uacpi_strnlen(const uacpi_char *str, uacpi_size max);
#endif
#ifndef uacpi_strcmp
uacpi_i32 uacpi_strcmp(const uacpi_char *lhs, const uacpi_char *rhs);
#endif
#ifndef uacpi_snprintf
UACPI_PRINTF_DECL(3, 4)
uacpi_i32 uacpi_snprintf(
uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, ...
);
#endif
#ifndef uacpi_vsnprintf
uacpi_i32 uacpi_vsnprintf(
uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt,
uacpi_va_list vlist
);
#endif
#ifdef UACPI_SIZED_FREES
#define uacpi_free(mem, size) uacpi_kernel_free(mem, size)
#else
#define uacpi_free(mem, _) uacpi_kernel_free(mem)
#endif
#define uacpi_memzero(ptr, size) uacpi_memset(ptr, 0, size)
#define UACPI_COMPARE(x, y, op) ((x) op (y) ? (x) : (y))
#define UACPI_MIN(x, y) UACPI_COMPARE(x, y, <)
#define UACPI_MAX(x, y) UACPI_COMPARE(x, y, >)
#define UACPI_ALIGN_UP_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define UACPI_ALIGN_UP(x, val, type) UACPI_ALIGN_UP_MASK(x, (type)(val) - 1)
#define UACPI_ALIGN_DOWN_MASK(x, mask) ((x) & ~(mask))
#define UACPI_ALIGN_DOWN(x, val, type) UACPI_ALIGN_DOWN_MASK(x, (type)(val) - 1)
#define UACPI_IS_ALIGNED_MASK(x, mask) (((x) & (mask)) == 0)
#define UACPI_IS_ALIGNED(x, val, type) UACPI_IS_ALIGNED_MASK(x, (type)(val) - 1)
#define UACPI_IS_POWER_OF_TWO(x, type) UACPI_IS_ALIGNED(x, x, type)
void uacpi_memcpy_zerout(void *dst, const void *src,
uacpi_size dst_size, uacpi_size src_size);
// Returns the one-based bit location of LSb or 0
uacpi_u8 uacpi_bit_scan_forward(uacpi_u64);
// Returns the one-based bit location of MSb or 0
uacpi_u8 uacpi_bit_scan_backward(uacpi_u64);
#ifndef UACPI_NATIVE_ALLOC_ZEROED
void *uacpi_builtin_alloc_zeroed(uacpi_size size);
#define uacpi_kernel_alloc_zeroed uacpi_builtin_alloc_zeroed
#endif

View File

@ -0,0 +1,70 @@
#pragma once
#include <uacpi/internal/context.h>
#include <uacpi/internal/interpreter.h>
#include <uacpi/types.h>
#include <uacpi/status.h>
#include <uacpi/tables.h>
enum uacpi_table_origin {
#ifndef UACPI_BAREBONES_MODE
UACPI_TABLE_ORIGIN_FIRMWARE_VIRTUAL = 0,
#endif
UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL = 1,
UACPI_TABLE_ORIGIN_HOST_VIRTUAL,
UACPI_TABLE_ORIGIN_HOST_PHYSICAL,
};
struct uacpi_installed_table {
uacpi_phys_addr phys_addr;
struct acpi_sdt_hdr hdr;
void *ptr;
uacpi_u16 reference_count;
#define UACPI_TABLE_LOADED (1 << 0)
#define UACPI_TABLE_CSUM_VERIFIED (1 << 1)
#define UACPI_TABLE_INVALID (1 << 2)
uacpi_u8 flags;
uacpi_u8 origin;
};
uacpi_status uacpi_initialize_tables(void);
void uacpi_deinitialize_tables(void);
uacpi_bool uacpi_signatures_match(const void *const lhs, const void *const rhs);
uacpi_status uacpi_check_table_signature(void *table, const uacpi_char *expect);
uacpi_status uacpi_verify_table_checksum(void *table, uacpi_size size);
uacpi_status uacpi_table_install_physical_with_origin(
uacpi_phys_addr phys, enum uacpi_table_origin origin, uacpi_table *out_table
);
uacpi_status uacpi_table_install_with_origin(
void *virt, enum uacpi_table_origin origin, uacpi_table *out_table
);
#ifndef UACPI_BAREBONES_MODE
void uacpi_table_mark_as_loaded(uacpi_size idx);
uacpi_status uacpi_table_load_with_cause(
uacpi_size idx, enum uacpi_table_load_cause cause
);
#endif // !UACPI_BAREBONES_MODE
typedef uacpi_iteration_decision (*uacpi_table_iteration_callback)
(void *user, struct uacpi_installed_table *tbl, uacpi_size idx);
uacpi_status uacpi_for_each_table(
uacpi_size base_idx, uacpi_table_iteration_callback, void *user
);
typedef uacpi_bool (*uacpi_table_match_callback)
(struct uacpi_installed_table *tbl);
uacpi_status uacpi_table_match(
uacpi_size base_idx, uacpi_table_match_callback, uacpi_table *out_table
);
#define UACPI_PRI_TBL_HDR "'%.4s' (OEM ID '%.6s' OEM Table ID '%.8s')"
#define UACPI_FMT_TBL_HDR(hdr) (hdr)->signature, (hdr)->oemid, (hdr)->oem_table_id

View File

@ -0,0 +1,310 @@
#pragma once
#include <uacpi/status.h>
#include <uacpi/types.h>
#include <uacpi/internal/shareable.h>
#ifndef UACPI_BAREBONES_MODE
// object->flags field if object->type == UACPI_OBJECT_REFERENCE
enum uacpi_reference_kind {
UACPI_REFERENCE_KIND_REFOF = 0,
UACPI_REFERENCE_KIND_LOCAL = 1,
UACPI_REFERENCE_KIND_ARG = 2,
UACPI_REFERENCE_KIND_NAMED = 3,
UACPI_REFERENCE_KIND_PKG_INDEX = 4,
};
// object->flags field if object->type == UACPI_OBJECT_STRING
enum uacpi_string_kind {
UACPI_STRING_KIND_NORMAL = 0,
UACPI_STRING_KIND_PATH,
};
typedef struct uacpi_buffer {
struct uacpi_shareable shareable;
union {
void *data;
uacpi_u8 *byte_data;
uacpi_char *text;
};
uacpi_size size;
} uacpi_buffer;
typedef struct uacpi_package {
struct uacpi_shareable shareable;
uacpi_object **objects;
uacpi_size count;
} uacpi_package;
typedef struct uacpi_buffer_field {
uacpi_buffer *backing;
uacpi_size bit_index;
uacpi_u32 bit_length;
uacpi_bool force_buffer;
} uacpi_buffer_field;
typedef struct uacpi_buffer_index {
uacpi_size idx;
uacpi_buffer *buffer;
} uacpi_buffer_index;
typedef struct uacpi_mutex {
struct uacpi_shareable shareable;
uacpi_handle handle;
uacpi_thread_id owner;
uacpi_u16 depth;
uacpi_u8 sync_level;
} uacpi_mutex;
typedef struct uacpi_event {
struct uacpi_shareable shareable;
uacpi_handle handle;
} uacpi_event;
typedef struct uacpi_address_space_handler {
struct uacpi_shareable shareable;
uacpi_region_handler callback;
uacpi_handle user_context;
struct uacpi_address_space_handler *next;
struct uacpi_operation_region *regions;
uacpi_u16 space;
#define UACPI_ADDRESS_SPACE_HANDLER_DEFAULT (1 << 0)
uacpi_u16 flags;
} uacpi_address_space_handler;
/*
* NOTE: These are common object headers.
* Any changes to these structs must be propagated to all objects.
* ==============================================================
* Common for the following objects:
* - UACPI_OBJECT_OPERATION_REGION
* - UACPI_OBJECT_PROCESSOR
* - UACPI_OBJECT_DEVICE
* - UACPI_OBJECT_THERMAL_ZONE
*/
typedef struct uacpi_address_space_handlers {
struct uacpi_shareable shareable;
uacpi_address_space_handler *head;
} uacpi_address_space_handlers;
typedef struct uacpi_device_notify_handler {
uacpi_notify_handler callback;
uacpi_handle user_context;
struct uacpi_device_notify_handler *next;
} uacpi_device_notify_handler;
/*
* Common for the following objects:
* - UACPI_OBJECT_PROCESSOR
* - UACPI_OBJECT_DEVICE
* - UACPI_OBJECT_THERMAL_ZONE
*/
typedef struct uacpi_handlers {
struct uacpi_shareable shareable;
uacpi_address_space_handler *address_space_head;
uacpi_device_notify_handler *notify_head;
} uacpi_handlers;
// This region has a corresponding _REG method that was succesfully executed
#define UACPI_OP_REGION_STATE_REG_EXECUTED (1 << 0)
// This region was successfully attached to a handler
#define UACPI_OP_REGION_STATE_ATTACHED (1 << 1)
typedef struct uacpi_operation_region {
struct uacpi_shareable shareable;
uacpi_address_space_handler *handler;
uacpi_handle user_context;
uacpi_u16 space;
uacpi_u8 state_flags;
uacpi_u64 offset;
uacpi_u64 length;
union {
// If space == TABLE_DATA
uacpi_u64 table_idx;
// If space == PCC
uacpi_u8 *internal_buffer;
};
// Used to link regions sharing the same handler
struct uacpi_operation_region *next;
} uacpi_operation_region;
typedef struct uacpi_device {
struct uacpi_shareable shareable;
uacpi_address_space_handler *address_space_handlers;
uacpi_device_notify_handler *notify_handlers;
} uacpi_device;
typedef struct uacpi_processor {
struct uacpi_shareable shareable;
uacpi_address_space_handler *address_space_handlers;
uacpi_device_notify_handler *notify_handlers;
uacpi_u8 id;
uacpi_u32 block_address;
uacpi_u8 block_length;
} uacpi_processor;
typedef struct uacpi_thermal_zone {
struct uacpi_shareable shareable;
uacpi_address_space_handler *address_space_handlers;
uacpi_device_notify_handler *notify_handlers;
} uacpi_thermal_zone;
typedef struct uacpi_power_resource {
uacpi_u8 system_level;
uacpi_u16 resource_order;
} uacpi_power_resource;
typedef uacpi_status (*uacpi_native_call_handler)(
uacpi_handle ctx, uacpi_object *retval
);
typedef struct uacpi_control_method {
struct uacpi_shareable shareable;
union {
uacpi_u8 *code;
uacpi_native_call_handler handler;
};
uacpi_mutex *mutex;
uacpi_u32 size;
uacpi_u8 sync_level : 4;
uacpi_u8 args : 3;
uacpi_u8 is_serialized : 1;
uacpi_u8 named_objects_persist: 1;
uacpi_u8 native_call : 1;
uacpi_u8 owns_code : 1;
} uacpi_control_method;
typedef enum uacpi_access_type {
UACPI_ACCESS_TYPE_ANY = 0,
UACPI_ACCESS_TYPE_BYTE = 1,
UACPI_ACCESS_TYPE_WORD = 2,
UACPI_ACCESS_TYPE_DWORD = 3,
UACPI_ACCESS_TYPE_QWORD = 4,
UACPI_ACCESS_TYPE_BUFFER = 5,
} uacpi_access_type;
typedef enum uacpi_lock_rule {
UACPI_LOCK_RULE_NO_LOCK = 0,
UACPI_LOCK_RULE_LOCK = 1,
} uacpi_lock_rule;
typedef enum uacpi_update_rule {
UACPI_UPDATE_RULE_PRESERVE = 0,
UACPI_UPDATE_RULE_WRITE_AS_ONES = 1,
UACPI_UPDATE_RULE_WRITE_AS_ZEROES = 2,
} uacpi_update_rule;
typedef enum uacpi_field_unit_kind {
UACPI_FIELD_UNIT_KIND_NORMAL = 0,
UACPI_FIELD_UNIT_KIND_INDEX = 1,
UACPI_FIELD_UNIT_KIND_BANK = 2,
} uacpi_field_unit_kind;
typedef struct uacpi_field_unit {
struct uacpi_shareable shareable;
union {
// UACPI_FIELD_UNIT_KIND_NORMAL
struct {
uacpi_namespace_node *region;
};
// UACPI_FIELD_UNIT_KIND_INDEX
struct {
struct uacpi_field_unit *index;
struct uacpi_field_unit *data;
};
// UACPI_FIELD_UNIT_KIND_BANK
struct {
uacpi_namespace_node *bank_region;
struct uacpi_field_unit *bank_selection;
uacpi_u64 bank_value;
};
};
uacpi_object *connection;
uacpi_u32 byte_offset;
uacpi_u32 bit_length;
uacpi_u32 pin_offset;
uacpi_u8 bit_offset_within_first_byte;
uacpi_u8 access_width_bytes;
uacpi_u8 access_length;
uacpi_u8 attributes : 4;
uacpi_u8 update_rule : 2;
uacpi_u8 kind : 2;
uacpi_u8 lock_rule : 1;
} uacpi_field_unit;
typedef struct uacpi_object {
struct uacpi_shareable shareable;
uacpi_u8 type;
uacpi_u8 flags;
union {
uacpi_u64 integer;
uacpi_package *package;
uacpi_buffer_field buffer_field;
uacpi_object *inner_object;
uacpi_control_method *method;
uacpi_buffer *buffer;
uacpi_mutex *mutex;
uacpi_event *event;
uacpi_buffer_index buffer_index;
uacpi_operation_region *op_region;
uacpi_device *device;
uacpi_processor *processor;
uacpi_thermal_zone *thermal_zone;
uacpi_address_space_handlers *address_space_handlers;
uacpi_handlers *handlers;
uacpi_power_resource power_resource;
uacpi_field_unit *field_unit;
};
} uacpi_object;
uacpi_object *uacpi_create_object(uacpi_object_type type);
enum uacpi_assign_behavior {
UACPI_ASSIGN_BEHAVIOR_DEEP_COPY,
UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY,
};
uacpi_status uacpi_object_assign(uacpi_object *dst, uacpi_object *src,
enum uacpi_assign_behavior);
void uacpi_object_attach_child(uacpi_object *parent, uacpi_object *child);
void uacpi_object_detach_child(uacpi_object *parent);
struct uacpi_object *uacpi_create_internal_reference(
enum uacpi_reference_kind kind, uacpi_object *child
);
uacpi_object *uacpi_unwrap_internal_reference(uacpi_object *object);
enum uacpi_prealloc_objects {
UACPI_PREALLOC_OBJECTS_NO,
UACPI_PREALLOC_OBJECTS_YES,
};
uacpi_bool uacpi_package_fill(
uacpi_package *pkg, uacpi_size num_elements,
enum uacpi_prealloc_objects prealloc_objects
);
uacpi_mutex *uacpi_create_mutex(void);
void uacpi_mutex_unref(uacpi_mutex*);
void uacpi_method_unref(uacpi_control_method*);
void uacpi_address_space_handler_unref(uacpi_address_space_handler *handler);
void uacpi_buffer_to_view(uacpi_buffer*, uacpi_data_view*);
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,45 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/utilities.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/stdlib.h>
static inline uacpi_phys_addr uacpi_truncate_phys_addr_with_warn(uacpi_u64 large_addr)
{
if (sizeof(uacpi_phys_addr) < 8 && large_addr > 0xFFFFFFFF) {
uacpi_warn(
"truncating a physical address 0x%"UACPI_PRIX64
" outside of address space\n", UACPI_FMT64(large_addr)
);
}
return (uacpi_phys_addr)large_addr;
}
#define UACPI_PTR_TO_VIRT_ADDR(ptr) ((uacpi_virt_addr)(ptr))
#define UACPI_VIRT_ADDR_TO_PTR(vaddr) ((void*)(vaddr))
#define UACPI_PTR_ADD(ptr, value) ((void*)(((uacpi_u8*)(ptr)) + value))
/*
* Target buffer must have a length of at least 8 bytes.
*/
void uacpi_eisa_id_to_string(uacpi_u32, uacpi_char *out_string);
enum uacpi_base {
UACPI_BASE_AUTO,
UACPI_BASE_OCT = 8,
UACPI_BASE_DEC = 10,
UACPI_BASE_HEX = 16,
};
uacpi_status uacpi_string_to_integer(
const uacpi_char *str, uacpi_size max_chars, enum uacpi_base base,
uacpi_u64 *out_value
);
uacpi_bool uacpi_is_valid_nameseg(uacpi_u8 *nameseg);
void uacpi_free_dynamic_string(const uacpi_char *str);
#define UACPI_NANOSECONDS_PER_SEC (1000ull * 1000ull * 1000ull)

View File

@ -0,0 +1,36 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/acpi.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef UACPI_BAREBONES_MODE
uacpi_status uacpi_gas_read(const struct acpi_gas *gas, uacpi_u64 *value);
uacpi_status uacpi_gas_write(const struct acpi_gas *gas, uacpi_u64 value);
typedef struct uacpi_mapped_gas uacpi_mapped_gas;
/*
* Map a GAS for faster access in the future. The handle returned via
* 'out_mapped' must be freed & unmapped using uacpi_unmap_gas() when
* no longer needed.
*/
uacpi_status uacpi_map_gas(const struct acpi_gas *gas, uacpi_mapped_gas **out_mapped);
void uacpi_unmap_gas(uacpi_mapped_gas*);
/*
* Same as uacpi_gas_{read,write} but operates on a pre-mapped handle for faster
* access and/or ability to use in critical sections/irq contexts.
*/
uacpi_status uacpi_gas_read_mapped(const uacpi_mapped_gas *gas, uacpi_u64 *value);
uacpi_status uacpi_gas_write_mapped(const uacpi_mapped_gas *gas, uacpi_u64 value);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,375 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/platform/arch_helpers.h>
#ifdef __cplusplus
extern "C" {
#endif
// Returns the PHYSICAL address of the RSDP structure via *out_rsdp_address.
uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rsdp_address);
/*
* Map a physical memory range starting at 'addr' with length 'len', and return
* a virtual address that can be used to access it.
*
* NOTE: 'addr' may be misaligned, in this case the host is expected to round it
* down to the nearest page-aligned boundary and map that, while making
* sure that at least 'len' bytes are still mapped starting at 'addr'. The
* return value preserves the misaligned offset.
*
* Example for uacpi_kernel_map(0x1ABC, 0xF00):
* 1. Round down the 'addr' we got to the nearest page boundary.
* Considering a PAGE_SIZE of 4096 (or 0x1000), 0x1ABC rounded down
* is 0x1000, offset within the page is 0x1ABC - 0x1000 => 0xABC
* 2. Requested 'len' is 0xF00 bytes, but we just rounded the address
* down by 0xABC bytes, so add those on top. 0xF00 + 0xABC => 0x19BC
* 3. Round up the final 'len' to the nearest PAGE_SIZE boundary, in
* this case 0x19BC is 0x2000 bytes (2 pages if PAGE_SIZE is 4096)
* 4. Call the VMM to map the aligned address 0x1000 (from step 1)
* with length 0x2000 (from step 3). Let's assume the returned
* virtual address for the mapping is 0xF000.
* 5. Add the original offset within page 0xABC (from step 1) to the
* resulting virtual address 0xF000 + 0xABC => 0xFABC. Return it
* to uACPI.
*/
void *uacpi_kernel_map(uacpi_phys_addr addr, uacpi_size len);
/*
* Unmap a virtual memory range at 'addr' with a length of 'len' bytes.
*
* NOTE: 'addr' may be misaligned, see the comment above 'uacpi_kernel_map'.
* Similar steps to uacpi_kernel_map can be taken to retrieve the
* virtual address originally returned by the VMM for this mapping
* as well as its true length.
*/
void uacpi_kernel_unmap(void *addr, uacpi_size len);
#ifndef UACPI_FORMATTED_LOGGING
void uacpi_kernel_log(uacpi_log_level, const uacpi_char*);
#else
UACPI_PRINTF_DECL(2, 3)
void uacpi_kernel_log(uacpi_log_level, const uacpi_char*, ...);
void uacpi_kernel_vlog(uacpi_log_level, const uacpi_char*, uacpi_va_list);
#endif
/*
* Only the above ^^^ API may be used by early table access and
* UACPI_BAREBONES_MODE.
*/
#ifndef UACPI_BAREBONES_MODE
/*
* Convenience initialization/deinitialization hooks that will be called by
* uACPI automatically when appropriate if compiled-in.
*/
#ifdef UACPI_KERNEL_INITIALIZATION
/*
* This API is invoked for each initialization level so that appropriate parts
* of the host kernel and/or glue code can be initialized at different stages.
*
* uACPI API that triggers calls to uacpi_kernel_initialize and the respective
* 'current_init_lvl' passed to the hook at that stage:
* 1. uacpi_initialize() -> UACPI_INIT_LEVEL_EARLY
* 2. uacpi_namespace_load() -> UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED
* 3. (start of) uacpi_namespace_initialize() -> UACPI_INIT_LEVEL_NAMESPACE_LOADED
* 4. (end of) uacpi_namespace_initialize() -> UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED
*/
uacpi_status uacpi_kernel_initialize(uacpi_init_level current_init_lvl);
void uacpi_kernel_deinitialize(void);
#endif
/*
* Open a PCI device at 'address' for reading & writing.
*
* Note that this must be able to open any arbitrary PCI device, not just those
* detected during kernel PCI enumeration, since the following pattern is
* relatively common in AML firmware:
* Device (THC0)
* {
* // Device at 00:10.06
* Name (_ADR, 0x00100006) // _ADR: Address
*
* OperationRegion (THCR, PCI_Config, Zero, 0x0100)
* Field (THCR, ByteAcc, NoLock, Preserve)
* {
* // Vendor ID field in the PCI configuration space
* VDID, 32
* }
*
* // Check if the device at 00:10.06 actually exists, that is reading
* // from its configuration space returns something other than 0xFFs.
* If ((VDID != 0xFFFFFFFF))
* {
* // Actually create the rest of the device's body if it's present
* // in the system, otherwise skip it.
* }
* }
*
* The handle returned via 'out_handle' is used to perform IO on the
* configuration space of the device.
*/
uacpi_status uacpi_kernel_pci_device_open(
uacpi_pci_address address, uacpi_handle *out_handle
);
void uacpi_kernel_pci_device_close(uacpi_handle);
/*
* Read & write the configuration space of a previously open PCI device.
*/
uacpi_status uacpi_kernel_pci_read8(
uacpi_handle device, uacpi_size offset, uacpi_u8 *value
);
uacpi_status uacpi_kernel_pci_read16(
uacpi_handle device, uacpi_size offset, uacpi_u16 *value
);
uacpi_status uacpi_kernel_pci_read32(
uacpi_handle device, uacpi_size offset, uacpi_u32 *value
);
uacpi_status uacpi_kernel_pci_write8(
uacpi_handle device, uacpi_size offset, uacpi_u8 value
);
uacpi_status uacpi_kernel_pci_write16(
uacpi_handle device, uacpi_size offset, uacpi_u16 value
);
uacpi_status uacpi_kernel_pci_write32(
uacpi_handle device, uacpi_size offset, uacpi_u32 value
);
/*
* Map a SystemIO address at [base, base + len) and return a kernel-implemented
* handle that can be used for reading and writing the IO range.
*
* NOTE: The x86 architecture uses the in/out family of instructions
* to access the SystemIO address space.
*/
uacpi_status uacpi_kernel_io_map(
uacpi_io_addr base, uacpi_size len, uacpi_handle *out_handle
);
void uacpi_kernel_io_unmap(uacpi_handle handle);
/*
* Read/Write the IO range mapped via uacpi_kernel_io_map
* at a 0-based 'offset' within the range.
*
* NOTE:
* The x86 architecture uses the in/out family of instructions
* to access the SystemIO address space.
*
* You are NOT allowed to break e.g. a 4-byte access into four 1-byte accesses.
* Hardware ALWAYS expects accesses to be of the exact width.
*/
uacpi_status uacpi_kernel_io_read8(
uacpi_handle, uacpi_size offset, uacpi_u8 *out_value
);
uacpi_status uacpi_kernel_io_read16(
uacpi_handle, uacpi_size offset, uacpi_u16 *out_value
);
uacpi_status uacpi_kernel_io_read32(
uacpi_handle, uacpi_size offset, uacpi_u32 *out_value
);
uacpi_status uacpi_kernel_io_write8(
uacpi_handle, uacpi_size offset, uacpi_u8 in_value
);
uacpi_status uacpi_kernel_io_write16(
uacpi_handle, uacpi_size offset, uacpi_u16 in_value
);
uacpi_status uacpi_kernel_io_write32(
uacpi_handle, uacpi_size offset, uacpi_u32 in_value
);
/*
* Allocate a block of memory of 'size' bytes.
* The contents of the allocated memory are unspecified.
*/
void *uacpi_kernel_alloc(uacpi_size size);
#ifdef UACPI_NATIVE_ALLOC_ZEROED
/*
* Allocate a block of memory of 'size' bytes.
* The returned memory block is expected to be zero-filled.
*/
void *uacpi_kernel_alloc_zeroed(uacpi_size size);
#endif
/*
* Free a previously allocated memory block.
*
* 'mem' might be a NULL pointer. In this case, the call is assumed to be a
* no-op.
*
* An optionally enabled 'size_hint' parameter contains the size of the original
* allocation. Note that in some scenarios this incurs additional cost to
* calculate the object size.
*/
#ifndef UACPI_SIZED_FREES
void uacpi_kernel_free(void *mem);
#else
void uacpi_kernel_free(void *mem, uacpi_size size_hint);
#endif
/*
* Returns the number of nanosecond ticks elapsed since boot,
* strictly monotonic.
*/
uacpi_u64 uacpi_kernel_get_nanoseconds_since_boot(void);
/*
* Spin for N microseconds.
*/
void uacpi_kernel_stall(uacpi_u8 usec);
/*
* Sleep for N milliseconds.
*/
void uacpi_kernel_sleep(uacpi_u64 msec);
/*
* Create/free an opaque non-recursive kernel mutex object.
*/
uacpi_handle uacpi_kernel_create_mutex(void);
void uacpi_kernel_free_mutex(uacpi_handle);
/*
* Create/free an opaque kernel (semaphore-like) event object.
*/
uacpi_handle uacpi_kernel_create_event(void);
void uacpi_kernel_free_event(uacpi_handle);
/*
* Returns a unique identifier of the currently executing thread.
*
* The returned thread id cannot be UACPI_THREAD_ID_NONE.
*/
uacpi_thread_id uacpi_kernel_get_thread_id(void);
/*
* Try to acquire the mutex with a millisecond timeout.
*
* The timeout value has the following meanings:
* 0x0000 - Attempt to acquire the mutex once, in a non-blocking manner
* 0x0001...0xFFFE - Attempt to acquire the mutex for at least 'timeout'
* milliseconds
* 0xFFFF - Infinite wait, block until the mutex is acquired
*
* The following are possible return values:
* 1. UACPI_STATUS_OK - successful acquire operation
* 2. UACPI_STATUS_TIMEOUT - timeout reached while attempting to acquire (or the
* single attempt to acquire was not successful for
* calls with timeout=0)
* 3. Any other value - signifies a host internal error and is treated as such
*/
uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle, uacpi_u16);
void uacpi_kernel_release_mutex(uacpi_handle);
/*
* Try to wait for an event (counter > 0) with a millisecond timeout.
* A timeout value of 0xFFFF implies infinite wait.
*
* The internal counter is decremented by 1 if wait was successful.
*
* A successful wait is indicated by returning UACPI_TRUE.
*/
uacpi_bool uacpi_kernel_wait_for_event(uacpi_handle, uacpi_u16);
/*
* Signal the event object by incrementing its internal counter by 1.
*
* This function may be used in interrupt contexts.
*/
void uacpi_kernel_signal_event(uacpi_handle);
/*
* Reset the event counter to 0.
*/
void uacpi_kernel_reset_event(uacpi_handle);
/*
* Handle a firmware request.
*
* Currently either a Breakpoint or Fatal operators.
*/
uacpi_status uacpi_kernel_handle_firmware_request(uacpi_firmware_request*);
/*
* Install an interrupt handler at 'irq', 'ctx' is passed to the provided
* handler for every invocation.
*
* 'out_irq_handle' is set to a kernel-implemented value that can be used to
* refer to this handler from other API.
*/
uacpi_status uacpi_kernel_install_interrupt_handler(
uacpi_u32 irq, uacpi_interrupt_handler, uacpi_handle ctx,
uacpi_handle *out_irq_handle
);
/*
* Uninstall an interrupt handler. 'irq_handle' is the value returned via
* 'out_irq_handle' during installation.
*/
uacpi_status uacpi_kernel_uninstall_interrupt_handler(
uacpi_interrupt_handler, uacpi_handle irq_handle
);
/*
* Create/free a kernel spinlock object.
*
* Unlike other types of locks, spinlocks may be used in interrupt contexts.
*/
uacpi_handle uacpi_kernel_create_spinlock(void);
void uacpi_kernel_free_spinlock(uacpi_handle);
/*
* Lock/unlock helpers for spinlocks.
*
* These are expected to disable interrupts, returning the previous state of cpu
* flags, that can be used to possibly re-enable interrupts if they were enabled
* before.
*
* Note that lock is infalliable.
*/
uacpi_cpu_flags uacpi_kernel_lock_spinlock(uacpi_handle);
void uacpi_kernel_unlock_spinlock(uacpi_handle, uacpi_cpu_flags);
typedef enum uacpi_work_type {
/*
* Schedule a GPE handler method for execution.
* This should be scheduled to run on CPU0 to avoid potential SMI-related
* firmware bugs.
*/
UACPI_WORK_GPE_EXECUTION,
/*
* Schedule a Notify(device) firmware request for execution.
* This can run on any CPU.
*/
UACPI_WORK_NOTIFICATION,
} uacpi_work_type;
typedef void (*uacpi_work_handler)(uacpi_handle);
/*
* Schedules deferred work for execution.
* Might be invoked from an interrupt context.
*/
uacpi_status uacpi_kernel_schedule_work(
uacpi_work_type, uacpi_work_handler, uacpi_handle ctx
);
/*
* Waits for two types of work to finish:
* 1. All in-flight interrupts installed via uacpi_kernel_install_interrupt_handler
* 2. All work scheduled via uacpi_kernel_schedule_work
*
* Note that the waits must be done in this order specifically.
*/
uacpi_status uacpi_kernel_wait_for_work_completion(void);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,40 @@
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
typedef enum uacpi_log_level {
/*
* Super verbose logging, every op & uop being processed is logged.
* Mostly useful for tracking down hangs/lockups.
*/
UACPI_LOG_DEBUG = 5,
/*
* A little verbose, every operation region access is traced with a bit of
* extra information on top.
*/
UACPI_LOG_TRACE = 4,
/*
* Only logs the bare minimum information about state changes and/or
* initialization progress.
*/
UACPI_LOG_INFO = 3,
/*
* Logs recoverable errors and/or non-important aborts.
*/
UACPI_LOG_WARN = 2,
/*
* Logs only critical errors that might affect the ability to initialize or
* prevent stable runtime.
*/
UACPI_LOG_ERROR = 1,
} uacpi_log_level;
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,186 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/status.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef UACPI_BAREBONES_MODE
typedef struct uacpi_namespace_node uacpi_namespace_node;
uacpi_namespace_node *uacpi_namespace_root(void);
typedef enum uacpi_predefined_namespace {
UACPI_PREDEFINED_NAMESPACE_ROOT = 0,
UACPI_PREDEFINED_NAMESPACE_GPE,
UACPI_PREDEFINED_NAMESPACE_PR,
UACPI_PREDEFINED_NAMESPACE_SB,
UACPI_PREDEFINED_NAMESPACE_SI,
UACPI_PREDEFINED_NAMESPACE_TZ,
UACPI_PREDEFINED_NAMESPACE_GL,
UACPI_PREDEFINED_NAMESPACE_OS,
UACPI_PREDEFINED_NAMESPACE_OSI,
UACPI_PREDEFINED_NAMESPACE_REV,
UACPI_PREDEFINED_NAMESPACE_MAX = UACPI_PREDEFINED_NAMESPACE_REV,
} uacpi_predefined_namespace;
uacpi_namespace_node *uacpi_namespace_get_predefined(
uacpi_predefined_namespace
);
/*
* Returns UACPI_TRUE if the provided 'node' is an alias.
*/
uacpi_bool uacpi_namespace_node_is_alias(uacpi_namespace_node *node);
uacpi_object_name uacpi_namespace_node_name(const uacpi_namespace_node *node);
/*
* Returns the type of object stored at the namespace node.
*
* NOTE: due to the existance of the CopyObject operator in AML, the
* return value of this function is subject to TOCTOU bugs.
*/
uacpi_status uacpi_namespace_node_type(
const uacpi_namespace_node *node, uacpi_object_type *out_type
);
/*
* Returns UACPI_TRUE via 'out' if the type of the object stored at the
* namespace node matches the provided value, UACPI_FALSE otherwise.
*
* NOTE: due to the existance of the CopyObject operator in AML, the
* return value of this function is subject to TOCTOU bugs.
*/
uacpi_status uacpi_namespace_node_is(
const uacpi_namespace_node *node, uacpi_object_type type, uacpi_bool *out
);
/*
* Returns UACPI_TRUE via 'out' if the type of the object stored at the
* namespace node matches any of the type bits in the provided value,
* UACPI_FALSE otherwise.
*
* NOTE: due to the existance of the CopyObject operator in AML, the
* return value of this function is subject to TOCTOU bugs.
*/
uacpi_status uacpi_namespace_node_is_one_of(
const uacpi_namespace_node *node, uacpi_object_type_bits type_mask,
uacpi_bool *out
);
uacpi_size uacpi_namespace_node_depth(const uacpi_namespace_node *node);
uacpi_namespace_node *uacpi_namespace_node_parent(
uacpi_namespace_node *node
);
uacpi_status uacpi_namespace_node_find(
uacpi_namespace_node *parent,
const uacpi_char *path,
uacpi_namespace_node **out_node
);
/*
* Same as uacpi_namespace_node_find, except the search recurses upwards when
* the namepath consists of only a single nameseg. Usually, this behavior is
* only desired if resolving a namepath specified in an aml-provided object,
* such as a package element.
*/
uacpi_status uacpi_namespace_node_resolve_from_aml_namepath(
uacpi_namespace_node *scope,
const uacpi_char *path,
uacpi_namespace_node **out_node
);
typedef uacpi_iteration_decision (*uacpi_iteration_callback) (
void *user, uacpi_namespace_node *node, uacpi_u32 node_depth
);
#define UACPI_MAX_DEPTH_ANY 0xFFFFFFFF
/*
* Depth-first iterate the namespace starting at the first child of 'parent'.
*/
uacpi_status uacpi_namespace_for_each_child_simple(
uacpi_namespace_node *parent, uacpi_iteration_callback callback, void *user
);
/*
* Depth-first iterate the namespace starting at the first child of 'parent'.
*
* 'descending_callback' is invoked the first time a node is visited when
* walking down. 'ascending_callback' is invoked the second time a node is
* visited after we reach the leaf node without children and start walking up.
* Either of the callbacks may be NULL, but not both at the same time.
*
* Only nodes matching 'type_mask' are passed to the callbacks.
*
* 'max_depth' is used to limit the maximum reachable depth from 'parent',
* where 1 is only direct children of 'parent', 2 is children of first-level
* children etc. Use UACPI_MAX_DEPTH_ANY or -1 to specify infinite depth.
*/
uacpi_status uacpi_namespace_for_each_child(
uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback,
uacpi_iteration_callback ascending_callback,
uacpi_object_type_bits type_mask, uacpi_u32 max_depth, void *user
);
/*
* Retrieve the next peer namespace node of '*iter', or, if '*iter' is
* UACPI_NULL, retrieve the first child of 'parent' instead. The resulting
* namespace node is stored at '*iter'.
*
* This API can be used to implement an "iterator" version of the
* for_each_child helpers.
*
* Example usage:
* void recurse(uacpi_namespace_node *parent) {
* uacpi_namespace_node *iter = UACPI_NULL;
*
* while (uacpi_namespace_node_next(parent, &iter) == UACPI_STATUS_OK) {
* // Do something with iter...
* descending_callback(iter);
*
* // Recurse down to walk over the children of iter
* recurse(iter);
* }
* }
*
* Prefer the for_each_child family of helpers if possible instead of this API
* as they avoid recursion and/or the need to use dynamic data structures
* entirely.
*/
uacpi_status uacpi_namespace_node_next(
uacpi_namespace_node *parent, uacpi_namespace_node **iter
);
/*
* Retrieve the next peer namespace node of '*iter', or, if '*iter' is
* UACPI_NULL, retrieve the first child of 'parent' instead. The resulting
* namespace node is stored at '*iter'. Only nodes which type matches one
* of the types set in 'type_mask' are returned.
*
* See comment above 'uacpi_namespace_node_next' for usage examples.
*
* Prefer the for_each_child family of helpers if possible instead of this API
* as they avoid recursion and/or the need to use dynamic data structures
* entirely.
*/
uacpi_status uacpi_namespace_node_next_typed(
uacpi_namespace_node *parent, uacpi_namespace_node **iter,
uacpi_object_type_bits type_mask
);
const uacpi_char *uacpi_namespace_node_generate_absolute_path(
const uacpi_namespace_node *node
);
void uacpi_free_absolute_path(const uacpi_char *path);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,30 @@
#pragma once
#include <uacpi/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef UACPI_BAREBONES_MODE
/*
* Install a Notify() handler to a device node.
* A handler installed to the root node will receive all notifications, even if
* a device already has a dedicated Notify handler.
* 'handler_context' is passed to the handler on every invocation.
*/
uacpi_status uacpi_install_notify_handler(
uacpi_namespace_node *node, uacpi_notify_handler handler,
uacpi_handle handler_context
);
uacpi_status uacpi_uninstall_notify_handler(
uacpi_namespace_node *node, uacpi_notify_handler handler
);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,47 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/status.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef UACPI_BAREBONES_MODE
/*
* Install an address space handler to a device node.
* The handler is recursively connected to all of the operation regions of
* type 'space' underneath 'device_node'. Note that this recursion stops as
* soon as another device node that already has an address space handler of
* this type installed is encountered.
*/
uacpi_status uacpi_install_address_space_handler(
uacpi_namespace_node *device_node, enum uacpi_address_space space,
uacpi_region_handler handler, uacpi_handle handler_context
);
/*
* Uninstall the handler of type 'space' from a given device node.
*/
uacpi_status uacpi_uninstall_address_space_handler(
uacpi_namespace_node *device_node,
enum uacpi_address_space space
);
/*
* Execute _REG(space, ACPI_REG_CONNECT) for all of the opregions with this
* address space underneath this device. This should only be called manually
* if you want to register an early handler that must be available before the
* call to uacpi_namespace_initialize().
*/
uacpi_status uacpi_reg_all_opregions(
uacpi_namespace_node *device_node,
enum uacpi_address_space space
);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,125 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/status.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef UACPI_BAREBONES_MODE
typedef enum uacpi_vendor_interface {
UACPI_VENDOR_INTERFACE_NONE = 0,
UACPI_VENDOR_INTERFACE_WINDOWS_2000,
UACPI_VENDOR_INTERFACE_WINDOWS_XP,
UACPI_VENDOR_INTERFACE_WINDOWS_XP_SP1,
UACPI_VENDOR_INTERFACE_WINDOWS_SERVER_2003,
UACPI_VENDOR_INTERFACE_WINDOWS_XP_SP2,
UACPI_VENDOR_INTERFACE_WINDOWS_SERVER_2003_SP1,
UACPI_VENDOR_INTERFACE_WINDOWS_VISTA,
UACPI_VENDOR_INTERFACE_WINDOWS_SERVER_2008,
UACPI_VENDOR_INTERFACE_WINDOWS_VISTA_SP1,
UACPI_VENDOR_INTERFACE_WINDOWS_VISTA_SP2,
UACPI_VENDOR_INTERFACE_WINDOWS_7,
UACPI_VENDOR_INTERFACE_WINDOWS_8,
UACPI_VENDOR_INTERFACE_WINDOWS_8_1,
UACPI_VENDOR_INTERFACE_WINDOWS_10,
UACPI_VENDOR_INTERFACE_WINDOWS_10_RS1,
UACPI_VENDOR_INTERFACE_WINDOWS_10_RS2,
UACPI_VENDOR_INTERFACE_WINDOWS_10_RS3,
UACPI_VENDOR_INTERFACE_WINDOWS_10_RS4,
UACPI_VENDOR_INTERFACE_WINDOWS_10_RS5,
UACPI_VENDOR_INTERFACE_WINDOWS_10_19H1,
UACPI_VENDOR_INTERFACE_WINDOWS_10_20H1,
UACPI_VENDOR_INTERFACE_WINDOWS_11,
UACPI_VENDOR_INTERFACE_WINDOWS_11_22H2,
} uacpi_vendor_interface;
/*
* Returns the "latest" AML-queried _OSI vendor interface.
*
* E.g. for the following AML code:
* _OSI("Windows 2021")
* _OSI("Windows 2000")
*
* This function will return UACPI_VENDOR_INTERFACE_WINDOWS_11, since this is
* the latest version of the interface the code queried, even though the
* "Windows 2000" query came after "Windows 2021".
*/
uacpi_vendor_interface uacpi_latest_queried_vendor_interface(void);
typedef enum uacpi_interface_kind {
UACPI_INTERFACE_KIND_VENDOR = (1 << 0),
UACPI_INTERFACE_KIND_FEATURE = (1 << 1),
UACPI_INTERFACE_KIND_ALL = UACPI_INTERFACE_KIND_VENDOR |
UACPI_INTERFACE_KIND_FEATURE,
} uacpi_interface_kind;
/*
* Install or uninstall an interface.
*
* The interface kind is used for matching during interface enumeration in
* uacpi_bulk_configure_interfaces().
*
* After installing an interface, all _OSI queries report it as supported.
*/
uacpi_status uacpi_install_interface(
const uacpi_char *name, uacpi_interface_kind
);
uacpi_status uacpi_uninstall_interface(const uacpi_char *name);
typedef enum uacpi_host_interface {
UACPI_HOST_INTERFACE_MODULE_DEVICE = 1,
UACPI_HOST_INTERFACE_PROCESSOR_DEVICE,
UACPI_HOST_INTERFACE_3_0_THERMAL_MODEL,
UACPI_HOST_INTERFACE_3_0_SCP_EXTENSIONS,
UACPI_HOST_INTERFACE_PROCESSOR_AGGREGATOR_DEVICE,
} uacpi_host_interface;
/*
* Same as install/uninstall interface, but comes with an enum of known
* interfaces defined by the ACPI specification. These are disabled by default
* as they depend on the host kernel support.
*/
uacpi_status uacpi_enable_host_interface(uacpi_host_interface);
uacpi_status uacpi_disable_host_interface(uacpi_host_interface);
typedef uacpi_bool (*uacpi_interface_handler)
(const uacpi_char *name, uacpi_bool supported);
/*
* Set a custom interface query (_OSI) handler.
*
* This callback will be invoked for each _OSI query with the value
* passed in the _OSI, as well as whether the interface was detected as
* supported. The callback is able to override the return value dynamically
* or leave it untouched if desired (e.g. if it simply wants to log something or
* do internal bookkeeping of some kind).
*/
uacpi_status uacpi_set_interface_query_handler(uacpi_interface_handler);
typedef enum uacpi_interface_action {
UACPI_INTERFACE_ACTION_DISABLE = 0,
UACPI_INTERFACE_ACTION_ENABLE,
} uacpi_interface_action;
/*
* Bulk interface configuration, used to disable or enable all interfaces that
* match 'kind'.
*
* This is generally only needed to work around buggy hardware, for example if
* requested from the kernel command line.
*
* By default, all vendor strings (like "Windows 2000") are enabled, and all
* host features (like "3.0 Thermal Model") are disabled.
*/
uacpi_status uacpi_bulk_configure_interfaces(
uacpi_interface_action action, uacpi_interface_kind kind
);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,38 @@
#pragma once
#ifdef UACPI_OVERRIDE_ARCH_HELPERS
#include "uacpi_arch_helpers.h"
#else
#include <uacpi/platform/atomic.h>
#ifndef UACPI_ARCH_FLUSH_CPU_CACHE
#define UACPI_ARCH_FLUSH_CPU_CACHE() do {} while (0)
#endif
typedef unsigned long uacpi_cpu_flags;
typedef void *uacpi_thread_id;
/*
* Replace as needed depending on your platform's way to represent thread ids.
* uACPI offers a few more helpers like uacpi_atomic_{load,store}{8,16,32,64,ptr}
* (or you could provide your own helpers)
*/
#ifndef UACPI_ATOMIC_LOAD_THREAD_ID
#define UACPI_ATOMIC_LOAD_THREAD_ID(ptr) ((uacpi_thread_id)uacpi_atomic_load_ptr(ptr))
#endif
#ifndef UACPI_ATOMIC_STORE_THREAD_ID
#define UACPI_ATOMIC_STORE_THREAD_ID(ptr, value) uacpi_atomic_store_ptr(ptr, value)
#endif
/*
* A sentinel value that the kernel promises to NEVER return from
* uacpi_kernel_get_current_thread_id or this will break
*/
#ifndef UACPI_THREAD_ID_NONE
#define UACPI_THREAD_ID_NONE ((uacpi_thread_id)-1)
#endif
#endif

View File

@ -0,0 +1,347 @@
#pragma once
/*
* Most of this header is a giant workaround for MSVC to make atomics into a
* somewhat unified interface with how GCC and Clang handle them.
*
* We don't use the absolutely disgusting C11 stdatomic.h header because it is
* unable to operate on non _Atomic types, which enforce implicit sequential
* consistency and alter the behavior of the standard C binary/unary operators.
*
* The strictness of the atomic helpers defined here is assumed to be at least
* acquire for loads and release for stores. Cmpxchg uses the standard acq/rel
* for success, acq for failure, and is assumed to be strong.
*/
#ifdef UACPI_OVERRIDE_ATOMIC
#include "uacpi_atomic.h"
#else
#include <uacpi/platform/compiler.h>
#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
// mimic __atomic_compare_exchange_n that doesn't exist on MSVC
#define UACPI_MAKE_MSVC_CMPXCHG(width, type, suffix) \
static inline int uacpi_do_atomic_cmpxchg##width( \
type volatile *ptr, type volatile *expected, type desired \
) \
{ \
type current; \
\
current = _InterlockedCompareExchange##suffix(ptr, *expected, desired); \
if (current != *expected) { \
*expected = current; \
return 0; \
} \
return 1; \
}
#define UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, width, type) \
uacpi_do_atomic_cmpxchg##width( \
(type volatile*)ptr, (type volatile*)expected, desired \
)
#define UACPI_MSVC_ATOMIC_STORE(ptr, value, type, width) \
_InterlockedExchange##width((type volatile*)(ptr), (type)(value))
#define UACPI_MSVC_ATOMIC_LOAD(ptr, type, width) \
_InterlockedOr##width((type volatile*)(ptr), 0)
#define UACPI_MSVC_ATOMIC_INC(ptr, type, width) \
_InterlockedIncrement##width((type volatile*)(ptr))
#define UACPI_MSVC_ATOMIC_DEC(ptr, type, width) \
_InterlockedDecrement##width((type volatile*)(ptr))
UACPI_MAKE_MSVC_CMPXCHG(64, __int64, 64)
UACPI_MAKE_MSVC_CMPXCHG(32, long,)
UACPI_MAKE_MSVC_CMPXCHG(16, short, 16)
#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \
UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 16, short)
#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \
UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 32, long)
#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \
UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 64, __int64)
#define uacpi_atomic_load8(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, char, 8)
#define uacpi_atomic_load16(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, short, 16)
#define uacpi_atomic_load32(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, long,)
#define uacpi_atomic_load64(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, __int64, 64)
#define uacpi_atomic_store8(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, char, 8)
#define uacpi_atomic_store16(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, short, 16)
#define uacpi_atomic_store32(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, long,)
#define uacpi_atomic_store64(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, __int64, 64)
#define uacpi_atomic_inc16(ptr) UACPI_MSVC_ATOMIC_INC(ptr, short, 16)
#define uacpi_atomic_inc32(ptr) UACPI_MSVC_ATOMIC_INC(ptr, long,)
#define uacpi_atomic_inc64(ptr) UACPI_MSVC_ATOMIC_INC(ptr, __int64, 64)
#define uacpi_atomic_dec16(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, short, 16)
#define uacpi_atomic_dec32(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, long,)
#define uacpi_atomic_dec64(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, __int64, 64)
#elif defined(__WATCOMC__)
#include <stdint.h>
static int uacpi_do_atomic_cmpxchg16(volatile uint16_t *ptr, volatile uint16_t *expected, uint16_t desired);
#pragma aux uacpi_do_atomic_cmpxchg16 = \
".486" \
"mov ax, [esi]" \
"lock cmpxchg [edi], bx" \
"mov [esi], ax" \
"setz al" \
"movzx eax, al" \
parm [ edi ] [ esi ] [ ebx ] \
value [ eax ]
static int uacpi_do_atomic_cmpxchg32(volatile uint32_t *ptr, volatile uint32_t *expected, uint32_t desired);
#pragma aux uacpi_do_atomic_cmpxchg32 = \
".486" \
"mov eax, [esi]" \
"lock cmpxchg [edi], ebx" \
"mov [esi], eax" \
"setz al" \
"movzx eax, al" \
parm [ edi ] [ esi ] [ ebx ] \
value [ eax ]
static int uacpi_do_atomic_cmpxchg64_asm(volatile uint64_t *ptr, volatile uint64_t *expected, uint32_t low, uint32_t high);
#pragma aux uacpi_do_atomic_cmpxchg64_asm = \
".586" \
"mov eax, [esi]" \
"mov edx, [esi + 4]" \
"lock cmpxchg8b [edi]" \
"mov [esi], eax" \
"mov [esi + 4], edx" \
"setz al" \
"movzx eax, al" \
modify [ edx ] \
parm [ edi ] [ esi ] [ ebx ] [ ecx ] \
value [ eax ]
static inline int uacpi_do_atomic_cmpxchg64(volatile uint64_t *ptr, volatile uint64_t *expected, uint64_t desired) {
return uacpi_do_atomic_cmpxchg64_asm(ptr, expected, desired, desired >> 32);
}
#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \
uacpi_do_atomic_cmpxchg16((volatile uint16_t*)ptr, (volatile uint16_t*)expected, (uint16_t)desired)
#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \
uacpi_do_atomic_cmpxchg32((volatile uint32_t*)ptr, (volatile uint32_t*)expected, (uint32_t)desired)
#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \
uacpi_do_atomic_cmpxchg64((volatile uint64_t*)ptr, (volatile uint64_t*)expected, (uint64_t)desired)
static uint8_t uacpi_do_atomic_load8(volatile uint8_t *ptr);
#pragma aux uacpi_do_atomic_load8 = \
"mov al, [esi]" \
parm [ esi ] \
value [ al ]
static uint16_t uacpi_do_atomic_load16(volatile uint16_t *ptr);
#pragma aux uacpi_do_atomic_load16 = \
"mov ax, [esi]" \
parm [ esi ] \
value [ ax ]
static uint32_t uacpi_do_atomic_load32(volatile uint32_t *ptr);
#pragma aux uacpi_do_atomic_load32 = \
"mov eax, [esi]" \
parm [ esi ] \
value [ eax ]
static void uacpi_do_atomic_load64_asm(volatile uint64_t *ptr, uint64_t *out);
#pragma aux uacpi_do_atomic_load64_asm = \
".586" \
"xor eax, eax" \
"xor ebx, ebx" \
"xor ecx, ecx" \
"xor edx, edx" \
"lock cmpxchg8b [esi]" \
"mov [edi], eax" \
"mov [edi + 4], edx" \
modify [ eax ebx ecx edx ] \
parm [ esi ] [ edi ]
static inline uint64_t uacpi_do_atomic_load64(volatile uint64_t *ptr) {
uint64_t value;
uacpi_do_atomic_load64_asm(ptr, &value);
return value;
}
#define uacpi_atomic_load8(ptr) uacpi_do_atomic_load8((volatile uint8_t*)ptr)
#define uacpi_atomic_load16(ptr) uacpi_do_atomic_load16((volatile uint16_t*)ptr)
#define uacpi_atomic_load32(ptr) uacpi_do_atomic_load32((volatile uint32_t*)ptr)
#define uacpi_atomic_load64(ptr) uacpi_do_atomic_load64((volatile uint64_t*)ptr)
static void uacpi_do_atomic_store8(volatile uint8_t *ptr, uint8_t value);
#pragma aux uacpi_do_atomic_store8 = \
"mov [edi], al" \
parm [ edi ] [ eax ]
static void uacpi_do_atomic_store16(volatile uint16_t *ptr, uint16_t value);
#pragma aux uacpi_do_atomic_store16 = \
"mov [edi], ax" \
parm [ edi ] [ eax ]
static void uacpi_do_atomic_store32(volatile uint32_t *ptr, uint32_t value);
#pragma aux uacpi_do_atomic_store32 = \
"mov [edi], eax" \
parm [ edi ] [ eax ]
static void uacpi_do_atomic_store64_asm(volatile uint64_t *ptr, uint32_t low, uint32_t high);
#pragma aux uacpi_do_atomic_store64_asm = \
".586" \
"xor eax, eax" \
"xor edx, edx" \
"retry: lock cmpxchg8b [edi]" \
"jnz retry" \
modify [ eax edx ] \
parm [ edi ] [ ebx ] [ ecx ]
static inline void uacpi_do_atomic_store64(volatile uint64_t *ptr, uint64_t value) {
uacpi_do_atomic_store64_asm(ptr, value, value >> 32);
}
#define uacpi_atomic_store8(ptr, value) uacpi_do_atomic_store8((volatile uint8_t*)ptr, (uint8_t)value)
#define uacpi_atomic_store16(ptr, value) uacpi_do_atomic_store16((volatile uint16_t*)ptr, (uint16_t)value)
#define uacpi_atomic_store32(ptr, value) uacpi_do_atomic_store32((volatile uint32_t*)ptr, (uint32_t)value)
#define uacpi_atomic_store64(ptr, value) uacpi_do_atomic_store64((volatile uint64_t*)ptr, (uint64_t)value)
static uint16_t uacpi_do_atomic_inc16(volatile uint16_t *ptr);
#pragma aux uacpi_do_atomic_inc16 = \
".486" \
"mov ax, 1" \
"lock xadd [edi], ax" \
"add ax, 1" \
parm [ edi ] \
value [ ax ]
static uint32_t uacpi_do_atomic_inc32(volatile uint32_t *ptr);
#pragma aux uacpi_do_atomic_inc32 = \
".486" \
"mov eax, 1" \
"lock xadd [edi], eax" \
"add eax, 1" \
parm [ edi ] \
value [ eax ]
static void uacpi_do_atomic_inc64_asm(volatile uint64_t *ptr, uint64_t *out);
#pragma aux uacpi_do_atomic_inc64_asm = \
".586" \
"xor eax, eax" \
"xor edx, edx" \
"mov ebx, 1" \
"mov ecx, 1" \
"retry: lock cmpxchg8b [esi]" \
"mov ebx, eax" \
"mov ecx, edx" \
"add ebx, 1" \
"adc ecx, 0" \
"jnz retry" \
"mov [edi], ebx" \
"mov [edi + 4], ecx" \
modify [ eax ebx ecx edx ] \
parm [ esi ] [ edi ]
static inline uint64_t uacpi_do_atomic_inc64(volatile uint64_t *ptr) {
uint64_t value;
uacpi_do_atomic_inc64_asm(ptr, &value);
return value;
}
#define uacpi_atomic_inc16(ptr) uacpi_do_atomic_inc16((volatile uint16_t*)ptr)
#define uacpi_atomic_inc32(ptr) uacpi_do_atomic_inc32((volatile uint32_t*)ptr)
#define uacpi_atomic_inc64(ptr) uacpi_do_atomic_inc64((volatile uint64_t*)ptr)
static uint16_t uacpi_do_atomic_dec16(volatile uint16_t *ptr);
#pragma aux uacpi_do_atomic_dec16 = \
".486" \
"mov ax, -1" \
"lock xadd [edi], ax" \
"add ax, -1" \
parm [ edi ] \
value [ ax ]
static uint32_t uacpi_do_atomic_dec32(volatile uint32_t *ptr);
#pragma aux uacpi_do_atomic_dec32 = \
".486" \
"mov eax, -1" \
"lock xadd [edi], eax" \
"add eax, -1" \
parm [ edi ] \
value [ eax ]
static void uacpi_do_atomic_dec64_asm(volatile uint64_t *ptr, uint64_t *out);
#pragma aux uacpi_do_atomic_dec64_asm = \
".586" \
"xor eax, eax" \
"xor edx, edx" \
"mov ebx, -1" \
"mov ecx, -1" \
"retry: lock cmpxchg8b [esi]" \
"mov ebx, eax" \
"mov ecx, edx" \
"sub ebx, 1" \
"sbb ecx, 0" \
"jnz retry" \
"mov [edi], ebx" \
"mov [edi + 4], ecx" \
modify [ eax ebx ecx edx ] \
parm [ esi ] [ edi ]
static inline uint64_t uacpi_do_atomic_dec64(volatile uint64_t *ptr) {
uint64_t value;
uacpi_do_atomic_dec64_asm(ptr, &value);
return value;
}
#define uacpi_atomic_dec16(ptr) uacpi_do_atomic_dec16((volatile uint16_t*)ptr)
#define uacpi_atomic_dec32(ptr) uacpi_do_atomic_dec32((volatile uint32_t*)ptr)
#define uacpi_atomic_dec64(ptr) uacpi_do_atomic_dec64((volatile uint64_t*)ptr)
#else
#define UACPI_DO_CMPXCHG(ptr, expected, desired) \
__atomic_compare_exchange_n(ptr, expected, desired, 0, \
__ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)
#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \
UACPI_DO_CMPXCHG(ptr, expected, desired)
#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \
UACPI_DO_CMPXCHG(ptr, expected, desired)
#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \
UACPI_DO_CMPXCHG(ptr, expected, desired)
#define uacpi_atomic_load8(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_load16(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_load32(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_load64(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
#define uacpi_atomic_store8(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_store16(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_store32(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_store64(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE)
#define uacpi_atomic_inc16(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_inc32(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_inc64(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_dec16(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_dec32(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#define uacpi_atomic_dec64(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL)
#endif
#if UACPI_POINTER_SIZE == 4
#define uacpi_atomic_load_ptr(ptr_to_ptr) uacpi_atomic_load32(ptr_to_ptr)
#define uacpi_atomic_store_ptr(ptr_to_ptr, value) uacpi_atomic_store32(ptr_to_ptr, value)
#else
#define uacpi_atomic_load_ptr(ptr_to_ptr) uacpi_atomic_load64(ptr_to_ptr)
#define uacpi_atomic_store_ptr(ptr_to_ptr, value) uacpi_atomic_store64(ptr_to_ptr, value)
#endif
#endif

View File

@ -0,0 +1,125 @@
#pragma once
/*
* Compiler-specific attributes/macros go here. This is the default placeholder
* that should work for MSVC/GCC/clang.
*/
#ifdef UACPI_OVERRIDE_COMPILER
#include "uacpi_compiler.h"
#else
#define UACPI_ALIGN(x) __declspec(align(x))
#if defined(__WATCOMC__)
#define UACPI_STATIC_ASSERT(expr, msg)
#elif defined(__cplusplus)
#define UACPI_STATIC_ASSERT static_assert
#else
#define UACPI_STATIC_ASSERT _Static_assert
#endif
#ifdef _MSC_VER
#include <intrin.h>
#define UACPI_ALWAYS_INLINE __forceinline
#define UACPI_PACKED(decl) \
__pragma(pack(push, 1)) \
decl; \
__pragma(pack(pop))
#elif defined(__WATCOMC__)
#define UACPI_ALWAYS_INLINE inline
#define UACPI_PACKED(decl) _Packed decl;
#else
#define UACPI_ALWAYS_INLINE inline __attribute__((always_inline))
#define UACPI_PACKED(decl) decl __attribute__((packed));
#endif
#if defined(__GNUC__) || defined(__clang__)
#define uacpi_unlikely(expr) __builtin_expect(!!(expr), 0)
#define uacpi_likely(expr) __builtin_expect(!!(expr), 1)
#ifdef __has_attribute
#if __has_attribute(__fallthrough__)
#define UACPI_FALLTHROUGH __attribute__((__fallthrough__))
#endif
#endif
#define UACPI_MAYBE_UNUSED __attribute__ ((unused))
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wunused-parameter\"")
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END \
_Pragma("GCC diagnostic pop")
#ifdef __clang__
#define UACPI_PRINTF_DECL(fmt_idx, args_idx) \
__attribute__((format(printf, fmt_idx, args_idx)))
#else
#define UACPI_PRINTF_DECL(fmt_idx, args_idx) \
__attribute__((format(gnu_printf, fmt_idx, args_idx)))
#endif
#define UACPI_COMPILER_HAS_BUILTIN_MEMCPY
#define UACPI_COMPILER_HAS_BUILTIN_MEMMOVE
#define UACPI_COMPILER_HAS_BUILTIN_MEMSET
#define UACPI_COMPILER_HAS_BUILTIN_MEMCMP
#elif defined(__WATCOMC__)
#define uacpi_unlikely(expr) expr
#define uacpi_likely(expr) expr
/*
* The OpenWatcom documentation suggests this should be done using
* _Pragma("off (unreferenced)") and _Pragma("pop (unreferenced)"),
* but these pragmas appear to be no-ops. Use inline as the next best thing.
* Note that OpenWatcom accepts redundant modifiers without a warning,
* so UACPI_MAYBE_UNUSED inline still works.
*/
#define UACPI_MAYBE_UNUSED inline
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END
#define UACPI_PRINTF_DECL(fmt_idx, args_idx)
#else
#define uacpi_unlikely(expr) expr
#define uacpi_likely(expr) expr
#define UACPI_MAYBE_UNUSED
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN
#define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END
#define UACPI_PRINTF_DECL(fmt_idx, args_idx)
#endif
#ifndef UACPI_FALLTHROUGH
#define UACPI_FALLTHROUGH do {} while (0)
#endif
#ifndef UACPI_POINTER_SIZE
#ifdef _WIN32
#ifdef _WIN64
#define UACPI_POINTER_SIZE 8
#else
#define UACPI_POINTER_SIZE 4
#endif
#elif defined(__GNUC__)
#define UACPI_POINTER_SIZE __SIZEOF_POINTER__
#elif defined(__WATCOMC__)
#ifdef __386__
#define UACPI_POINTER_SIZE 4
#elif defined(__I86__)
#error uACPI does not support 16-bit mode compilation
#else
#error Unknown target architecture
#endif
#else
#error Failed to detect pointer size
#endif
#endif
#endif

View File

@ -0,0 +1,162 @@
#pragma once
#ifdef UACPI_OVERRIDE_CONFIG
#include "uacpi_config.h"
#else
#include <uacpi/helpers.h>
#include <uacpi/log.h>
/*
* =======================
* Context-related options
* =======================
*/
#ifndef UACPI_DEFAULT_LOG_LEVEL
#define UACPI_DEFAULT_LOG_LEVEL UACPI_LOG_INFO
#endif
UACPI_BUILD_BUG_ON_WITH_MSG(
UACPI_DEFAULT_LOG_LEVEL < UACPI_LOG_ERROR ||
UACPI_DEFAULT_LOG_LEVEL > UACPI_LOG_DEBUG,
"configured default log level is invalid"
);
#ifndef UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS
#define UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS 30
#endif
UACPI_BUILD_BUG_ON_WITH_MSG(
UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS < 1,
"configured default loop timeout is invalid (expecting at least 1 second)"
);
#ifndef UACPI_DEFAULT_MAX_CALL_STACK_DEPTH
#define UACPI_DEFAULT_MAX_CALL_STACK_DEPTH 256
#endif
UACPI_BUILD_BUG_ON_WITH_MSG(
UACPI_DEFAULT_MAX_CALL_STACK_DEPTH < 4,
"configured default max call stack depth is invalid "
"(expecting at least 4 frames)"
);
/*
* ===================
* Kernel-api options
* ===================
*/
/*
* Convenience initialization/deinitialization hooks that will be called by
* uACPI automatically when appropriate if compiled-in.
*/
// #define UACPI_KERNEL_INITIALIZATION
/*
* Makes kernel api logging callbacks work with unformatted printf-style
* strings and va_args instead of a pre-formatted string. Can be useful if
* your native logging is implemented in terms of this format as well.
*/
// #define UACPI_FORMATTED_LOGGING
/*
* Makes uacpi_kernel_free take in an additional 'size_hint' parameter, which
* contains the size of the original allocation. Note that this comes with a
* performance penalty in some cases.
*/
// #define UACPI_SIZED_FREES
/*
* Makes uacpi_kernel_alloc_zeroed mandatory to implement by the host, uACPI
* will not provide a default implementation if this is enabled.
*/
// #define UACPI_NATIVE_ALLOC_ZEROED
/*
* =========================
* Platform-specific options
* =========================
*/
/*
* Makes uACPI use the internal versions of mem{cpy,move,set,cmp} instead of
* relying on the host to provide them. Note that compilers like clang and GCC
* rely on these being available by default, even in freestanding mode, so
* compiling uACPI may theoretically generate implicit dependencies on them
* even if this option is defined.
*/
// #define UACPI_USE_BUILTIN_STRING
/*
* Turns uacpi_phys_addr and uacpi_io_addr into a 32-bit type, and adds extra
* code for address truncation. Needed for e.g. i686 platforms without PAE
* support.
*/
// #define UACPI_PHYS_ADDR_IS_32BITS
/*
* Switches uACPI into reduced-hardware-only mode. Strips all full-hardware
* ACPI support code at compile-time, including the event subsystem, the global
* lock, and other full-hardware features.
*/
// #define UACPI_REDUCED_HARDWARE
/*
* Switches uACPI into tables-subsystem-only mode and strips all other code.
* This means only the table API will be usable, no other subsystems are
* compiled in. In this mode, uACPI only depends on the following kernel APIs:
* - uacpi_kernel_get_rsdp
* - uacpi_kernel_{map,unmap}
* - uacpi_kernel_log
*
* Use uacpi_setup_early_table_access to initialize, uacpi_state_reset to
* deinitialize.
*
* This mode is primarily designed for these three use-cases:
* - Bootloader/pre-kernel environments that need to parse ACPI tables, but
* don't actually need a fully-featured AML interpreter, and everything else
* that a full APCI implementation entails.
* - A micro-kernel that has the full AML interpreter running in userspace, but
* still needs to parse ACPI tables to bootstrap allocators, timers, SMP etc.
* - A WIP kernel that needs to parse ACPI tables for bootrapping SMP/timers,
* ECAM, etc., but doesn't yet have enough subsystems implemented in order
* to run a fully-featured AML interpreter.
*/
// #define UACPI_BAREBONES_MODE
/*
* =============
* Misc. options
* =============
*/
/*
* If UACPI_FORMATTED_LOGGING is not enabled, this is the maximum length of the
* pre-formatted message that is passed to the logging callback.
*/
#ifndef UACPI_PLAIN_LOG_BUFFER_SIZE
#define UACPI_PLAIN_LOG_BUFFER_SIZE 128
#endif
UACPI_BUILD_BUG_ON_WITH_MSG(
UACPI_PLAIN_LOG_BUFFER_SIZE < 16,
"configured log buffer size is too small (expecting at least 16 bytes)"
);
/*
* The size of the table descriptor inline storage. All table descriptors past
* this length will be stored in a dynamically allocated heap array. The size
* of one table descriptor is approximately 56 bytes.
*/
#ifndef UACPI_STATIC_TABLE_ARRAY_LEN
#define UACPI_STATIC_TABLE_ARRAY_LEN 16
#endif
UACPI_BUILD_BUG_ON_WITH_MSG(
UACPI_STATIC_TABLE_ARRAY_LEN < 1,
"configured static table array length is too small (expecting at least 1)"
);
#endif

View File

@ -0,0 +1,28 @@
#pragma once
#ifdef UACPI_OVERRIDE_LIBC
#include "uacpi_libc.h"
#else
/*
* The following libc functions are used internally by uACPI and have a default
* (sub-optimal) implementation:
* - strcmp
* - strnlen
* - strlen
* - snprintf
* - vsnprintf
*
* The following use a builtin implementation only if UACPI_USE_BUILTIN_STRING
* is defined (more information can be found in the config.h header):
* - memcpy
* - memmove
* - memset
* - memcmp
*
* In case your platform happens to implement optimized verisons of the helpers
* above, you are able to make uACPI use those instead by overriding them like so:
*
* #define uacpi_memcpy my_fast_memcpy
* #define uacpi_snprintf my_fast_snprintf
*/
#endif

View File

@ -0,0 +1,64 @@
#pragma once
/*
* Platform-specific types go here. This is the default placeholder using
* types from the standard headers.
*/
#ifdef UACPI_OVERRIDE_TYPES
#include "uacpi_types.h"
#else
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
#include <stdarg.h>
#include <uacpi/helpers.h>
typedef uint8_t uacpi_u8;
typedef uint16_t uacpi_u16;
typedef uint32_t uacpi_u32;
typedef uint64_t uacpi_u64;
typedef int8_t uacpi_i8;
typedef int16_t uacpi_i16;
typedef int32_t uacpi_i32;
typedef int64_t uacpi_i64;
#define UACPI_TRUE true
#define UACPI_FALSE false
typedef bool uacpi_bool;
#define UACPI_NULL NULL
typedef uintptr_t uacpi_uintptr;
typedef uacpi_uintptr uacpi_virt_addr;
typedef size_t uacpi_size;
typedef va_list uacpi_va_list;
#define uacpi_va_start va_start
#define uacpi_va_end va_end
#define uacpi_va_arg va_arg
typedef char uacpi_char;
#define uacpi_offsetof offsetof
/*
* We use unsignd long long for 64-bit number formatting because 64-bit types
* don't have a standard way to format them. The inttypes.h header is not
* freestanding therefore it's not practical to force the user to define the
* corresponding PRI macros. Moreover, unsignd long long is required to be
* at least 64-bits as per C99.
*/
UACPI_BUILD_BUG_ON_WITH_MSG(
sizeof(unsigned long long) < 8,
"unsigned long long must be at least 64 bits large as per C99"
);
#define UACPI_PRIu64 "llu"
#define UACPI_PRIx64 "llx"
#define UACPI_PRIX64 "llX"
#define UACPI_FMT64(val) ((unsigned long long)(val))
#endif

View File

@ -0,0 +1,105 @@
#include <uacpi/types.h>
/*
* BEFORE YOU USE THIS API:
* uACPI manages FADT registers on its own entirely, you should only use this
* API directly if there's absolutely no other way for your use case, e.g.
* implementing a CPU idle state driver that does C state switching or similar.
*/
#ifdef __cplusplus
extern "C" {
#endif
#ifndef UACPI_BAREBONES_MODE
typedef enum uacpi_register {
UACPI_REGISTER_PM1_STS = 0,
UACPI_REGISTER_PM1_EN,
UACPI_REGISTER_PM1_CNT,
UACPI_REGISTER_PM_TMR,
UACPI_REGISTER_PM2_CNT,
UACPI_REGISTER_SLP_CNT,
UACPI_REGISTER_SLP_STS,
UACPI_REGISTER_RESET,
UACPI_REGISTER_SMI_CMD,
UACPI_REGISTER_MAX = UACPI_REGISTER_SMI_CMD,
} uacpi_register;
/*
* Read a register from FADT
*
* NOTE: write-only bits (if any) are cleared automatically
*/
uacpi_status uacpi_read_register(uacpi_register, uacpi_u64*);
/*
* Write a register from FADT
*
* NOTE:
* - Preserved bits (if any) are preserved automatically
* - If a register is made up of two (e.g. PM1a and PM1b) parts, the input
* is written to both at the same time
*/
uacpi_status uacpi_write_register(uacpi_register, uacpi_u64);
/*
* Write a register from FADT
*
* NOTE:
* - Preserved bits (if any) are preserved automatically
* - For registers that are made up of two (e.g. PM1a and PM1b) parts, the
* provided values are written to their respective physical register
*/
uacpi_status uacpi_write_registers(uacpi_register, uacpi_u64, uacpi_u64);
typedef enum uacpi_register_field {
UACPI_REGISTER_FIELD_TMR_STS = 0,
UACPI_REGISTER_FIELD_BM_STS,
UACPI_REGISTER_FIELD_GBL_STS,
UACPI_REGISTER_FIELD_PWRBTN_STS,
UACPI_REGISTER_FIELD_SLPBTN_STS,
UACPI_REGISTER_FIELD_RTC_STS,
UACPI_REGISTER_FIELD_PCIEX_WAKE_STS,
UACPI_REGISTER_FIELD_HWR_WAK_STS,
UACPI_REGISTER_FIELD_WAK_STS,
UACPI_REGISTER_FIELD_TMR_EN,
UACPI_REGISTER_FIELD_GBL_EN,
UACPI_REGISTER_FIELD_PWRBTN_EN,
UACPI_REGISTER_FIELD_SLPBTN_EN,
UACPI_REGISTER_FIELD_RTC_EN,
UACPI_REGISTER_FIELD_PCIEXP_WAKE_DIS,
UACPI_REGISTER_FIELD_SCI_EN,
UACPI_REGISTER_FIELD_BM_RLD,
UACPI_REGISTER_FIELD_GBL_RLS,
UACPI_REGISTER_FIELD_SLP_TYP,
UACPI_REGISTER_FIELD_HWR_SLP_TYP,
UACPI_REGISTER_FIELD_SLP_EN,
UACPI_REGISTER_FIELD_HWR_SLP_EN,
UACPI_REGISTER_FIELD_ARB_DIS,
UACPI_REGISTER_FIELD_MAX = UACPI_REGISTER_FIELD_ARB_DIS,
} uacpi_register_field;
/*
* Read a field from a FADT register
*
* NOTE: The value is automatically masked and shifted down as appropriate,
* the client code doesn't have to do any bit manipulation. E.g. for
* a field at 0b???XX??? the returned value will contain just the 0bXX
*/
uacpi_status uacpi_read_register_field(uacpi_register_field, uacpi_u64*);
/*
* Write to a field of a FADT register
*
* NOTE: The value is automatically masked and shifted up as appropriate,
* the client code doesn't have to do any bit manipulation. E.g. for
* a field at 0b???XX??? the passed value should be just 0bXX
*/
uacpi_status uacpi_write_register_field(uacpi_register_field, uacpi_u64);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,759 @@
#pragma once
#include <uacpi/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef UACPI_BAREBONES_MODE
typedef enum uacpi_resource_type {
UACPI_RESOURCE_TYPE_IRQ,
UACPI_RESOURCE_TYPE_EXTENDED_IRQ,
UACPI_RESOURCE_TYPE_DMA,
UACPI_RESOURCE_TYPE_FIXED_DMA,
UACPI_RESOURCE_TYPE_IO,
UACPI_RESOURCE_TYPE_FIXED_IO,
UACPI_RESOURCE_TYPE_ADDRESS16,
UACPI_RESOURCE_TYPE_ADDRESS32,
UACPI_RESOURCE_TYPE_ADDRESS64,
UACPI_RESOURCE_TYPE_ADDRESS64_EXTENDED,
UACPI_RESOURCE_TYPE_MEMORY24,
UACPI_RESOURCE_TYPE_MEMORY32,
UACPI_RESOURCE_TYPE_FIXED_MEMORY32,
UACPI_RESOURCE_TYPE_START_DEPENDENT,
UACPI_RESOURCE_TYPE_END_DEPENDENT,
// Up to 7 bytes
UACPI_RESOURCE_TYPE_VENDOR_SMALL,
// Up to 2^16 - 1 bytes
UACPI_RESOURCE_TYPE_VENDOR_LARGE,
UACPI_RESOURCE_TYPE_GENERIC_REGISTER,
UACPI_RESOURCE_TYPE_GPIO_CONNECTION,
// These must always be contiguous in this order
UACPI_RESOURCE_TYPE_SERIAL_I2C_CONNECTION,
UACPI_RESOURCE_TYPE_SERIAL_SPI_CONNECTION,
UACPI_RESOURCE_TYPE_SERIAL_UART_CONNECTION,
UACPI_RESOURCE_TYPE_SERIAL_CSI2_CONNECTION,
UACPI_RESOURCE_TYPE_PIN_FUNCTION,
UACPI_RESOURCE_TYPE_PIN_CONFIGURATION,
UACPI_RESOURCE_TYPE_PIN_GROUP,
UACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION,
UACPI_RESOURCE_TYPE_PIN_GROUP_CONFIGURATION,
UACPI_RESOURCE_TYPE_CLOCK_INPUT,
UACPI_RESOURCE_TYPE_END_TAG,
UACPI_RESOURCE_TYPE_MAX = UACPI_RESOURCE_TYPE_END_TAG,
} uacpi_resource_type;
typedef struct uacpi_resource_source {
uacpi_u8 index;
uacpi_bool index_present;
uacpi_u16 length;
uacpi_char *string;
} uacpi_resource_source;
/*
* This applies to IRQ & StartDependent resources only. The DONT_CARE value is
* used for deserialization into the AML format to signify that the serializer
* is allowed to optimize the length down if possible. Note that this is
* generally not allowed unless the resource is generated by the caller:
*
* -- ACPI 6.5 ------------------------------------------------------------
* The resource descriptors in the byte stream argument must be specified
* exactly as listed in the _CRS byte stream - meaning that the identical
* resource descriptors must appear in the identical order, resulting in a
* buffer of exactly the same length. Optimizations such as changing an
* IRQ descriptor to an IRQNoFlags descriptor (or vice-versa) must not be
* performed. Similarly, changing StartDependentFn to StartDependentFnNoPri
* is not allowed.
* ------------------------------------------------------------------------
*/
enum uacpi_resource_length_kind {
UACPI_RESOURCE_LENGTH_KIND_DONT_CARE = 0,
UACPI_RESOURCE_LENGTH_KIND_ONE_LESS,
UACPI_RESOURCE_LENGTH_KIND_FULL,
};
// triggering fields
#define UACPI_TRIGGERING_EDGE 1
#define UACPI_TRIGGERING_LEVEL 0
// polarity
#define UACPI_POLARITY_ACTIVE_HIGH 0
#define UACPI_POLARITY_ACTIVE_LOW 1
#define UACPI_POLARITY_ACTIVE_BOTH 2
// sharing
#define UACPI_EXCLUSIVE 0
#define UACPI_SHARED 1
// wake_capability
#define UACPI_WAKE_CAPABLE 1
#define UACPI_NOT_WAKE_CAPABLE 0
typedef struct uacpi_resource_irq {
uacpi_u8 length_kind;
uacpi_u8 triggering;
uacpi_u8 polarity;
uacpi_u8 sharing;
uacpi_u8 wake_capability;
uacpi_u8 num_irqs;
uacpi_u8 irqs[];
} uacpi_resource_irq;
typedef struct uacpi_resource_extended_irq {
uacpi_u8 direction;
uacpi_u8 triggering;
uacpi_u8 polarity;
uacpi_u8 sharing;
uacpi_u8 wake_capability;
uacpi_u8 num_irqs;
uacpi_resource_source source;
uacpi_u32 irqs[];
} uacpi_resource_extended_irq;
// transfer_type
#define UACPI_TRANSFER_TYPE_8_BIT 0b00
#define UACPI_TRANSFER_TYPE_8_AND_16_BIT 0b01
#define UACPI_TRANSFER_TYPE_16_BIT 0b10
// bus_master_status
#define UACPI_BUS_MASTER 0b1
// channel_speed
#define UACPI_DMA_COMPATIBILITY 0b00
#define UACPI_DMA_TYPE_A 0b01
#define UACPI_DMA_TYPE_B 0b10
#define UACPI_DMA_TYPE_F 0b11
// transfer_width
#define UACPI_TRANSFER_WIDTH_8 0x00
#define UACPI_TRANSFER_WIDTH_16 0x01
#define UACPI_TRANSFER_WIDTH_32 0x02
#define UACPI_TRANSFER_WIDTH_64 0x03
#define UACPI_TRANSFER_WIDTH_128 0x04
#define UACPI_TRANSFER_WIDTH_256 0x05
typedef struct uacpi_resource_dma {
uacpi_u8 transfer_type;
uacpi_u8 bus_master_status;
uacpi_u8 channel_speed;
uacpi_u8 num_channels;
uacpi_u8 channels[];
} uacpi_resource_dma;
typedef struct uacpi_resource_fixed_dma {
uacpi_u16 request_line;
uacpi_u16 channel;
uacpi_u8 transfer_width;
} uacpi_resource_fixed_dma;
// decode_type
#define UACPI_DECODE_16 0b1
#define UACPI_DECODE_10 0b0
typedef struct uacpi_resource_io {
uacpi_u8 decode_type;
uacpi_u16 minimum;
uacpi_u16 maximum;
uacpi_u8 alignment;
uacpi_u8 length;
} uacpi_resource_io;
typedef struct uacpi_resource_fixed_io {
uacpi_u16 address;
uacpi_u8 length;
} uacpi_resource_fixed_io;
// write_status
#define UACPI_NON_WRITABLE 0
#define UACPI_WRITABLE 1
// caching
#define UACPI_NON_CACHEABLE 0
#define UACPI_CACHEABLE 1
#define UACPI_CACHEABLE_WRITE_COMBINING 2
#define UACPI_PREFETCHABLE 3
// range_type
#define UACPI_RANGE_TYPE_MEMORY 0
#define UACPI_RANGE_TYPE_RESERVED 1
#define UACPI_RANGE_TYPE_ACPI 2
#define UACPI_RANGE_TYPE_NVS 3
// address_common->type
#define UACPI_RANGE_MEMORY 0
#define UACPI_RANGE_IO 1
#define UACPI_RANGE_BUS 2
// translation
#define UACPI_IO_MEM_TRANSLATION 1
#define UACPI_IO_MEM_STATIC 0
// translation_type
#define UACPI_TRANSLATION_DENSE 0
#define UACPI_TRANSLATION_SPARSE 1
// direction
#define UACPI_PRODUCER 0
#define UACPI_CONSUMER 1
// decode_type
#define UACPI_POSITIVE_DECODE 0
#define UACPI_SUBTRACTIVE_DECODE 1
// fixed_min_address & fixed_max_address
#define UACPI_ADDRESS_NOT_FIXED 0
#define UACPI_ADDRESS_FIXED 1
typedef struct uacpi_memory_attribute {
uacpi_u8 write_status;
uacpi_u8 caching;
uacpi_u8 range_type;
uacpi_u8 translation;
} uacpi_memory_attribute;
typedef struct uacpi_io_attribute {
uacpi_u8 range_type;
uacpi_u8 translation;
uacpi_u8 translation_type;
} uacpi_io_attribute;
typedef union uacpi_address_attribute {
uacpi_memory_attribute memory;
uacpi_io_attribute io;
uacpi_u8 type_specific;
} uacpi_address_attribute;
typedef struct uacpi_resource_address_common {
uacpi_address_attribute attribute;
uacpi_u8 type;
uacpi_u8 direction;
uacpi_u8 decode_type;
uacpi_u8 fixed_min_address;
uacpi_u8 fixed_max_address;
} uacpi_resource_address_common;
typedef struct uacpi_resource_address16 {
uacpi_resource_address_common common;
uacpi_u16 granularity;
uacpi_u16 minimum;
uacpi_u16 maximum;
uacpi_u16 translation_offset;
uacpi_u16 address_length;
uacpi_resource_source source;
} uacpi_resource_address16;
typedef struct uacpi_resource_address32 {
uacpi_resource_address_common common;
uacpi_u32 granularity;
uacpi_u32 minimum;
uacpi_u32 maximum;
uacpi_u32 translation_offset;
uacpi_u32 address_length;
uacpi_resource_source source;
} uacpi_resource_address32;
typedef struct uacpi_resource_address64 {
uacpi_resource_address_common common;
uacpi_u64 granularity;
uacpi_u64 minimum;
uacpi_u64 maximum;
uacpi_u64 translation_offset;
uacpi_u64 address_length;
uacpi_resource_source source;
} uacpi_resource_address64;
typedef struct uacpi_resource_address64_extended {
uacpi_resource_address_common common;
uacpi_u8 revision_id;
uacpi_u64 granularity;
uacpi_u64 minimum;
uacpi_u64 maximum;
uacpi_u64 translation_offset;
uacpi_u64 address_length;
uacpi_u64 attributes;
} uacpi_resource_address64_extended;
typedef struct uacpi_resource_memory24 {
uacpi_u8 write_status;
uacpi_u16 minimum;
uacpi_u16 maximum;
uacpi_u16 alignment;
uacpi_u16 length;
} uacpi_resource_memory24;
typedef struct uacpi_resource_memory32 {
uacpi_u8 write_status;
uacpi_u32 minimum;
uacpi_u32 maximum;
uacpi_u32 alignment;
uacpi_u32 length;
} uacpi_resource_memory32;
typedef struct uacpi_resource_fixed_memory32 {
uacpi_u8 write_status;
uacpi_u32 address;
uacpi_u32 length;
} uacpi_resource_fixed_memory32;
// compatibility & performance
#define UACPI_GOOD 0
#define UACPI_ACCEPTABLE 1
#define UACPI_SUB_OPTIMAL 2
typedef struct uacpi_resource_start_dependent {
uacpi_u8 length_kind;
uacpi_u8 compatibility;
uacpi_u8 performance;
} uacpi_resource_start_dependent;
typedef struct uacpi_resource_vendor_defined {
uacpi_u8 length;
uacpi_u8 data[];
} uacpi_resource_vendor;
typedef struct uacpi_resource_vendor_typed {
uacpi_u16 length;
uacpi_u8 sub_type;
uacpi_u8 uuid[16];
uacpi_u8 data[];
} uacpi_resource_vendor_typed;
typedef struct uacpi_resource_generic_register {
uacpi_u8 address_space_id;
uacpi_u8 bit_width;
uacpi_u8 bit_offset;
uacpi_u8 access_size;
uacpi_u64 address;
} uacpi_resource_generic_register;
// type
#define UACPI_GPIO_CONNECTION_INTERRUPT 0x00
#define UACPI_GPIO_CONNECTION_IO 0x01
typedef struct uacpi_interrupt_connection_flags {
uacpi_u8 triggering;
uacpi_u8 polarity;
uacpi_u8 sharing;
uacpi_u8 wake_capability;
} uacpi_interrupt_connection_flags;
// restriction
#define UACPI_IO_RESTRICTION_NONE 0x0
#define UACPI_IO_RESTRICTION_INPUT 0x1
#define UACPI_IO_RESTRICTION_OUTPUT 0x2
#define UACPI_IO_RESTRICTION_NONE_PRESERVE 0x3
typedef struct uacpi_io_connection_flags {
uacpi_u8 restriction;
uacpi_u8 sharing;
} uacpi_io_connection_flags;
// pull_configuration
#define UACPI_PIN_CONFIG_DEFAULT 0x00
#define UACPI_PIN_CONFIG_PULL_UP 0x01
#define UACPI_PIN_CONFIG_PULL_DOWN 0x02
#define UACPI_PIN_CONFIG_NO_PULL 0x03
typedef struct uacpi_resource_gpio_connection {
uacpi_u8 revision_id;
uacpi_u8 type;
uacpi_u8 direction;
union {
uacpi_interrupt_connection_flags intr;
uacpi_io_connection_flags io;
uacpi_u16 type_specific;
};
uacpi_u8 pull_configuration;
uacpi_u16 drive_strength;
uacpi_u16 debounce_timeout;
uacpi_u16 vendor_data_length;
uacpi_u16 pin_table_length;
uacpi_resource_source source;
uacpi_u16 *pin_table;
uacpi_u8 *vendor_data;
} uacpi_resource_gpio_connection;
// mode
#define UACPI_MODE_CONTROLLER_INITIATED 0x0
#define UACPI_MODE_DEVICE_INITIATED 0x1
typedef struct uacpi_resource_serial_bus_common {
uacpi_u8 revision_id;
uacpi_u8 type;
uacpi_u8 mode;
uacpi_u8 direction;
uacpi_u8 sharing;
uacpi_u8 type_revision_id;
uacpi_u16 type_data_length;
uacpi_u16 vendor_data_length;
uacpi_resource_source source;
uacpi_u8 *vendor_data;
} uacpi_resource_serial_bus_common;
// addressing_mode
#define UACPI_I2C_7BIT 0x0
#define UACPI_I2C_10BIT 0x1
typedef struct uacpi_resource_i2c_connection {
uacpi_resource_serial_bus_common common;
uacpi_u8 addressing_mode;
uacpi_u16 slave_address;
uacpi_u32 connection_speed;
} uacpi_resource_i2c_connection;
// wire_mode
#define UACPI_SPI_4_WIRES 0
#define UACPI_SPI_3_WIRES 1
// device_polarity
#define UACPI_SPI_ACTIVE_LOW 0
#define UACPI_SPI_ACTIVE_HIGH 1
// phase
#define UACPI_SPI_PHASE_FIRST 0
#define UACPI_SPI_PHASE_SECOND 1
// polarity
#define UACPI_SPI_START_LOW 0
#define UACPI_SPI_START_HIGH 1
typedef struct uacpi_resource_spi_connection {
uacpi_resource_serial_bus_common common;
uacpi_u8 wire_mode;
uacpi_u8 device_polarity;
uacpi_u8 data_bit_length;
uacpi_u8 phase;
uacpi_u8 polarity;
uacpi_u16 device_selection;
uacpi_u32 connection_speed;
} uacpi_resource_spi_connection;
// stop_bits
#define UACPI_UART_STOP_BITS_NONE 0b00
#define UACPI_UART_STOP_BITS_1 0b01
#define UACPI_UART_STOP_BITS_1_5 0b10
#define UACPI_UART_STOP_BITS_2 0b11
// data_bits
#define UACPI_UART_DATA_5BITS 0b000
#define UACPI_UART_DATA_6BITS 0b001
#define UACPI_UART_DATA_7BITS 0b010
#define UACPI_UART_DATA_8BITS 0b011
#define UACPI_UART_DATA_9BITS 0b100
// endianness
#define UACPI_UART_LITTLE_ENDIAN 0
#define UACPI_UART_BIG_ENDIAN 1
// parity
#define UACPI_UART_PARITY_NONE 0x00
#define UACPI_UART_PARITY_EVEN 0x01
#define UACPI_UART_PARITY_ODD 0x02
#define UACPI_UART_PARITY_MARK 0x03
#define UACPI_UART_PARITY_SPACE 0x04
// lines_enabled
#define UACPI_UART_DATA_CARRIER_DETECT (1 << 2)
#define UACPI_UART_RING_INDICATOR (1 << 3)
#define UACPI_UART_DATA_SET_READY (1 << 4)
#define UACPI_UART_DATA_TERMINAL_READY (1 << 5)
#define UACPI_UART_CLEAR_TO_SEND (1 << 6)
#define UACPI_UART_REQUEST_TO_SEND (1 << 7)
// flow_control
#define UACPI_UART_FLOW_CONTROL_NONE 0b00
#define UACPI_UART_FLOW_CONTROL_HW 0b01
#define UACPI_UART_FLOW_CONTROL_XON_XOFF 0b10
typedef struct uacpi_resource_uart_connection {
uacpi_resource_serial_bus_common common;
uacpi_u8 stop_bits;
uacpi_u8 data_bits;
uacpi_u8 endianness;
uacpi_u8 parity;
uacpi_u8 lines_enabled;
uacpi_u8 flow_control;
uacpi_u32 baud_rate;
uacpi_u16 rx_fifo;
uacpi_u16 tx_fifo;
} uacpi_resource_uart_connection;
// phy_type
#define UACPI_CSI2_PHY_C 0b00
#define UACPI_CSI2_PHY_D 0b01
typedef struct uacpi_resource_csi2_connection {
uacpi_resource_serial_bus_common common;
uacpi_u8 phy_type;
uacpi_u8 local_port;
} uacpi_resource_csi2_connection;
typedef struct uacpi_resource_pin_function {
uacpi_u8 revision_id;
uacpi_u8 sharing;
uacpi_u8 pull_configuration;
uacpi_u16 function_number;
uacpi_u16 pin_table_length;
uacpi_u16 vendor_data_length;
uacpi_resource_source source;
uacpi_u16 *pin_table;
uacpi_u8 *vendor_data;
} uacpi_resource_pin_function;
// type
#define UACPI_PIN_CONFIG_DEFAULT 0x00
#define UACPI_PIN_CONFIG_BIAS_PULL_UP 0x01
#define UACPI_PIN_CONFIG_BIAS_PULL_DOWN 0x02
#define UACPI_PIN_CONFIG_BIAS_DEFAULT 0x03
#define UACPI_PIN_CONFIG_BIAS_DISABLE 0x04
#define UACPI_PIN_CONFIG_BIAS_HIGH_IMPEDANCE 0x05
#define UACPI_PIN_CONFIG_BIAS_BUS_HOLD 0x06
#define UACPI_PIN_CONFIG_DRIVE_OPEN_DRAIN 0x07
#define UACPI_PIN_CONFIG_DRIVE_OPEN_SOURCE 0x08
#define UACPI_PIN_CONFIG_DRIVE_PUSH_PULL 0x09
#define UACPI_PIN_CONFIG_DRIVE_STRENGTH 0x0A
#define UACPI_PIN_CONFIG_SLEW_RATE 0x0B
#define UACPI_PIN_CONFIG_INPUT_DEBOUNCE 0x0C
#define UACPI_PIN_CONFIG_INPUT_SCHMITT_TRIGGER 0x0D
typedef struct uacpi_resource_pin_configuration {
uacpi_u8 revision_id;
uacpi_u8 sharing;
uacpi_u8 direction;
uacpi_u8 type;
uacpi_u32 value;
uacpi_u16 pin_table_length;
uacpi_u16 vendor_data_length;
uacpi_resource_source source;
uacpi_u16 *pin_table;
uacpi_u8 *vendor_data;
} uacpi_resource_pin_configuration;
typedef struct uacpi_resource_label {
uacpi_u16 length;
const uacpi_char *string;
} uacpi_resource_label;
typedef struct uacpi_resource_pin_group {
uacpi_u8 revision_id;
uacpi_u8 direction;
uacpi_u16 pin_table_length;
uacpi_u16 vendor_data_length;
uacpi_resource_label label;
uacpi_u16 *pin_table;
uacpi_u8 *vendor_data;
} uacpi_resource_pin_group;
typedef struct uacpi_resource_pin_group_function {
uacpi_u8 revision_id;
uacpi_u8 sharing;
uacpi_u8 direction;
uacpi_u16 function;
uacpi_u16 vendor_data_length;
uacpi_resource_source source;
uacpi_resource_label label;
uacpi_u8 *vendor_data;
} uacpi_resource_pin_group_function;
typedef struct uacpi_resource_pin_group_configuration {
uacpi_u8 revision_id;
uacpi_u8 sharing;
uacpi_u8 direction;
uacpi_u8 type;
uacpi_u32 value;
uacpi_u16 vendor_data_length;
uacpi_resource_source source;
uacpi_resource_label label;
uacpi_u8 *vendor_data;
} uacpi_resource_pin_group_configuration;
// scale
#define UACPI_SCALE_HZ 0b00
#define UACPI_SCALE_KHZ 0b01
#define UACPI_SCALE_MHZ 0b10
// frequency
#define UACPI_FREQUENCY_FIXED 0x0
#define UACPI_FREQUENCY_VARIABLE 0x1
typedef struct uacpi_resource_clock_input {
uacpi_u8 revision_id;
uacpi_u8 frequency;
uacpi_u8 scale;
uacpi_u16 divisor;
uacpi_u32 numerator;
uacpi_resource_source source;
} uacpi_resource_clock_input;
typedef struct uacpi_resource {
uacpi_u32 type;
uacpi_u32 length;
union {
uacpi_resource_irq irq;
uacpi_resource_extended_irq extended_irq;
uacpi_resource_dma dma;
uacpi_resource_fixed_dma fixed_dma;
uacpi_resource_io io;
uacpi_resource_fixed_io fixed_io;
uacpi_resource_address16 address16;
uacpi_resource_address32 address32;
uacpi_resource_address64 address64;
uacpi_resource_address64_extended address64_extended;
uacpi_resource_memory24 memory24;
uacpi_resource_memory32 memory32;
uacpi_resource_fixed_memory32 fixed_memory32;
uacpi_resource_start_dependent start_dependent;
uacpi_resource_vendor vendor;
uacpi_resource_vendor_typed vendor_typed;
uacpi_resource_generic_register generic_register;
uacpi_resource_gpio_connection gpio_connection;
uacpi_resource_serial_bus_common serial_bus_common;
uacpi_resource_i2c_connection i2c_connection;
uacpi_resource_spi_connection spi_connection;
uacpi_resource_uart_connection uart_connection;
uacpi_resource_csi2_connection csi2_connection;
uacpi_resource_pin_function pin_function;
uacpi_resource_pin_configuration pin_configuration;
uacpi_resource_pin_group pin_group;
uacpi_resource_pin_group_function pin_group_function;
uacpi_resource_pin_group_configuration pin_group_configuration;
uacpi_resource_clock_input clock_input;
};
} uacpi_resource;
#define UACPI_NEXT_RESOURCE(cur) \
((uacpi_resource*)((uacpi_u8*)(cur) + (cur)->length))
typedef struct uacpi_resources {
/*
* Length of the 'entries' array in BYTES (NOT the count of resources),
* see comment above 'entries' for more information.
*/
uacpi_size length;
/*
* Resources are variable length! See UACPI_NEXT_RESOURCE to see how to
* retrieve the next resource. You can alternatively use
* uacpi_for_each_resource instead of iterating manually.
*
* Resources are guaranteed to be naturally aligned and are always
* terminated by a resource of type UACPI_RESOURCE_TYPE_END_TAG.
*/
uacpi_resource *entries;
} uacpi_resources;
void uacpi_free_resources(uacpi_resources*);
typedef uacpi_iteration_decision (*uacpi_resource_iteration_callback)
(void *user, uacpi_resource *resource);
/*
* Evaluate the _CRS method for a 'device' and get the returned resource list
* via 'out_resources'.
*
* NOTE: the returned buffer must be released via uacpi_free_resources()
*
* If you don't need to keep the resource array for later use you can
* uacpi_for_each_device_resource(device, "_CRS", ...) instead, which takes
* care of iteration & memory management on its own.
*/
uacpi_status uacpi_get_current_resources(
uacpi_namespace_node *device, uacpi_resources **out_resources
);
/*
* Evaluate the _PRS method for a 'device' and get the returned resource list
* via 'out_resources'.
*
* NOTE: the returned buffer must be released via uacpi_free_resources()
*
* If you don't need to keep the resource array for later use you can
* uacpi_for_each_device_resource(device, "_PRS", ...) instead, which takes
* care of iteration & memory management on its own.
*/
uacpi_status uacpi_get_possible_resources(
uacpi_namespace_node *device, uacpi_resources **out_resources
);
/*
* Evaluate an arbitrary method that is expected to return an AML resource
* buffer for a 'device' and get the returned resource list via 'out_resources'.
*
* NOTE: the returned buffer must be released via uacpi_free_resources()
*
* If you don't need to keep the resource array for later use you can
* uacpi_for_each_device_resource(device, method, ...) instead, which takes
* care of iteration & memory management on its own.
*/
uacpi_status uacpi_get_device_resources(
uacpi_namespace_node *device, const uacpi_char *method,
uacpi_resources **out_resources
);
/*
* Set the configuration to be used by the 'device' by calling its _SRS method.
*
* Note that this expects 'resources' in the normal 'uacpi_resources' format,
* and not the raw AML resources bytestream, the conversion to the latter is
* done automatically by this API. If you want to _SRS a raw AML resources
* bytestream, use 'uacpi_execute' or similar API directly.
*/
uacpi_status uacpi_set_resources(
uacpi_namespace_node *device, uacpi_resources *resources
);
/*
* A convenience helper for iterating over the resource list returned by any
* of the uacpi_get_*_resources functions.
*/
uacpi_status uacpi_for_each_resource(
uacpi_resources *resources, uacpi_resource_iteration_callback cb, void *user
);
/*
* A shorthand for uacpi_get_device_resources() + uacpi_for_each_resource().
*
* Use if you don't actually want to save the 'resources' list, but simply want
* to iterate it once to extract the resources you care about and then free it
* right away.
*/
uacpi_status uacpi_for_each_device_resource(
uacpi_namespace_node *device, const uacpi_char *method,
uacpi_resource_iteration_callback cb, void *user
);
/*
* Convert a single AML-encoded resource to native format.
*
* This should be used for converting Connection() fields (passed during IO on
* GeneralPurposeIO or GenericSerialBus operation regions) or other similar
* buffers with only one resource to native format.
*
* NOTE: the returned buffer must be released via uacpi_free_resource()
*/
uacpi_status uacpi_get_resource_from_buffer(
uacpi_data_view aml_buffer, uacpi_resource **out_resource
);
void uacpi_free_resource(uacpi_resource*);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,67 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/status.h>
#include <uacpi/uacpi.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef UACPI_BAREBONES_MODE
/*
* Set the firmware waking vector in FACS.
*
* 'addr32' is the real mode entry-point address
* 'addr64' is the protected mode entry-point address
*/
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_set_waking_vector(
uacpi_phys_addr addr32, uacpi_phys_addr addr64
))
typedef enum uacpi_sleep_state {
UACPI_SLEEP_STATE_S0 = 0,
UACPI_SLEEP_STATE_S1,
UACPI_SLEEP_STATE_S2,
UACPI_SLEEP_STATE_S3,
UACPI_SLEEP_STATE_S4,
UACPI_SLEEP_STATE_S5,
UACPI_SLEEP_STATE_MAX = UACPI_SLEEP_STATE_S5,
} uacpi_sleep_state;
/*
* Prepare for a given sleep state.
* Must be caled with interrupts ENABLED.
*/
uacpi_status uacpi_prepare_for_sleep_state(uacpi_sleep_state);
/*
* Enter the given sleep state after preparation.
* Must be called with interrupts DISABLED.
*/
uacpi_status uacpi_enter_sleep_state(uacpi_sleep_state);
/*
* Prepare to leave the given sleep state.
* Must be called with interrupts DISABLED.
*/
uacpi_status uacpi_prepare_for_wake_from_sleep_state(uacpi_sleep_state);
/*
* Wake from the given sleep state.
* Must be called with interrupts ENABLED.
*/
uacpi_status uacpi_wake_from_sleep_state(uacpi_sleep_state);
/*
* Attempt reset via the FADT reset register.
*/
uacpi_status uacpi_reboot(void);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,57 @@
#pragma once
#include <uacpi/internal/compiler.h>
#include <uacpi/platform/types.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum uacpi_status {
UACPI_STATUS_OK = 0,
UACPI_STATUS_MAPPING_FAILED = 1,
UACPI_STATUS_OUT_OF_MEMORY = 2,
UACPI_STATUS_BAD_CHECKSUM = 3,
UACPI_STATUS_INVALID_SIGNATURE = 4,
UACPI_STATUS_INVALID_TABLE_LENGTH = 5,
UACPI_STATUS_NOT_FOUND = 6,
UACPI_STATUS_INVALID_ARGUMENT = 7,
UACPI_STATUS_UNIMPLEMENTED = 8,
UACPI_STATUS_ALREADY_EXISTS = 9,
UACPI_STATUS_INTERNAL_ERROR = 10,
UACPI_STATUS_TYPE_MISMATCH = 11,
UACPI_STATUS_INIT_LEVEL_MISMATCH = 12,
UACPI_STATUS_NAMESPACE_NODE_DANGLING = 13,
UACPI_STATUS_NO_HANDLER = 14,
UACPI_STATUS_NO_RESOURCE_END_TAG = 15,
UACPI_STATUS_COMPILED_OUT = 16,
UACPI_STATUS_HARDWARE_TIMEOUT = 17,
UACPI_STATUS_TIMEOUT = 18,
UACPI_STATUS_OVERRIDDEN = 19,
UACPI_STATUS_DENIED = 20,
// All errors that have bytecode-related origin should go here
UACPI_STATUS_AML_UNDEFINED_REFERENCE = 0x0EFF0000,
UACPI_STATUS_AML_INVALID_NAMESTRING = 0x0EFF0001,
UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS = 0x0EFF0002,
UACPI_STATUS_AML_INVALID_OPCODE = 0x0EFF0003,
UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE = 0x0EFF0004,
UACPI_STATUS_AML_BAD_ENCODING = 0x0EFF0005,
UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX = 0x0EFF0006,
UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH = 0x0EFF0007,
UACPI_STATUS_AML_INVALID_RESOURCE = 0x0EFF0008,
UACPI_STATUS_AML_LOOP_TIMEOUT = 0x0EFF0009,
UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT = 0x0EFF000A,
} uacpi_status;
const uacpi_char *uacpi_status_to_string(uacpi_status);
#define uacpi_unlikely_error(expr) uacpi_unlikely((expr) != UACPI_STATUS_OK)
#define uacpi_likely_error(expr) uacpi_likely((expr) != UACPI_STATUS_OK)
#define uacpi_unlikely_success(expr) uacpi_unlikely((expr) == UACPI_STATUS_OK)
#define uacpi_likely_success(expr) uacpi_likely((expr) == UACPI_STATUS_OK)
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,141 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/status.h>
#ifdef __cplusplus
extern "C" {
#endif
// Forward-declared to avoid including the entire acpi.h here
struct acpi_fadt;
typedef struct uacpi_table_identifiers {
uacpi_object_name signature;
// if oemid[0] == 0 this field is ignored
char oemid[6];
// if oem_table_id[0] == 0 this field is ignored
char oem_table_id[8];
} uacpi_table_identifiers;
typedef struct uacpi_table {
union {
uacpi_virt_addr virt_addr;
void *ptr;
struct acpi_sdt_hdr *hdr;
};
// Index number used to identify this table internally
uacpi_size index;
} uacpi_table;
/*
* Install a table from either a virtual or a physical address.
* The table is simply stored in the internal table array, and not loaded by
* the interpreter (see uacpi_table_load).
*
* The table is optionally returned via 'out_table'.
*
* Manual calls to uacpi_table_install are not subject to filtering via the
* table installation callback (if any).
*/
uacpi_status uacpi_table_install(
void*, uacpi_table *out_table
);
uacpi_status uacpi_table_install_physical(
uacpi_phys_addr, uacpi_table *out_table
);
#ifndef UACPI_BAREBONES_MODE
/*
* Load a previously installed table by feeding it to the interpreter.
*/
uacpi_status uacpi_table_load(uacpi_size index);
#endif // !UACPI_BAREBONES_MODE
/*
* Helpers for finding tables.
*
* for find_by_signature:
* 'signature' is an array of 4 characters, a null terminator is not
* necessary and can be omitted (especially useful for non-C language
* bindings)
*
* 'out_table' is a pointer to a caller allocated uacpi_table structure that
* receives the table pointer & its internal index in case the call was
* successful.
*
* NOTE:
* The returned table's reference count is incremented by 1, which keeps its
* mapping alive forever unless uacpi_table_unref() is called for this table
* later on. Calling uacpi_table_find_next_with_same_signature() on a table also
* drops its reference count by 1, so if you want to keep it mapped you must
* manually call uacpi_table_ref() beforehand.
*/
uacpi_status uacpi_table_find_by_signature(
const uacpi_char *signature, uacpi_table *out_table
);
uacpi_status uacpi_table_find_next_with_same_signature(
uacpi_table *in_out_table
);
uacpi_status uacpi_table_find(
const uacpi_table_identifiers *id, uacpi_table *out_table
);
/*
* Increment/decrement a table's reference count.
* The table is unmapped when the reference count drops to 0.
*/
uacpi_status uacpi_table_ref(uacpi_table*);
uacpi_status uacpi_table_unref(uacpi_table*);
/*
* Returns the pointer to a sanitized internal version of FADT.
*
* The revision is guaranteed to be correct. All of the registers are converted
* to GAS format. Fields that might contain garbage are cleared.
*/
uacpi_status uacpi_table_fadt(struct acpi_fadt**);
typedef enum uacpi_table_installation_disposition {
// Allow the table to be installed as-is
UACPI_TABLE_INSTALLATION_DISPOSITON_ALLOW = 0,
/*
* Deny the table from being installed completely. This is useful for
* debugging various problems, e.g. AML loading bad SSDTs that cause the
* system to hang or enter an undesired state.
*/
UACPI_TABLE_INSTALLATION_DISPOSITON_DENY,
/*
* Override the table being installed with the table at the virtual address
* returned in 'out_override_address'.
*/
UACPI_TABLE_INSTALLATION_DISPOSITON_VIRTUAL_OVERRIDE,
/*
* Override the table being installed with the table at the physical address
* returned in 'out_override_address'.
*/
UACPI_TABLE_INSTALLATION_DISPOSITON_PHYSICAL_OVERRIDE,
} uacpi_table_installation_disposition;
typedef uacpi_table_installation_disposition (*uacpi_table_installation_handler)
(struct acpi_sdt_hdr *hdr, uacpi_u64 *out_override_address);
/*
* Set a handler that is invoked for each table before it gets installed.
*
* Depending on the return value, the table is either allowed to be installed
* as-is, denied, or overriden with a new one.
*/
uacpi_status uacpi_set_table_installation_handler(
uacpi_table_installation_handler handler
);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,544 @@
#pragma once
#include <uacpi/status.h>
#include <uacpi/platform/types.h>
#include <uacpi/platform/compiler.h>
#include <uacpi/platform/arch_helpers.h>
#include <uacpi/platform/config.h>
#ifdef __cplusplus
extern "C" {
#endif
#if UACPI_POINTER_SIZE == 4 && defined(UACPI_PHYS_ADDR_IS_32BITS)
typedef uacpi_u32 uacpi_phys_addr;
typedef uacpi_u32 uacpi_io_addr;
#else
typedef uacpi_u64 uacpi_phys_addr;
typedef uacpi_u64 uacpi_io_addr;
#endif
typedef void *uacpi_handle;
typedef union uacpi_object_name {
uacpi_char text[4];
uacpi_u32 id;
} uacpi_object_name;
typedef enum uacpi_iteration_decision {
UACPI_ITERATION_DECISION_CONTINUE = 0,
UACPI_ITERATION_DECISION_BREAK,
// Only applicable for uacpi_namespace_for_each_child
UACPI_ITERATION_DECISION_NEXT_PEER,
} uacpi_iteration_decision;
typedef enum uacpi_address_space {
UACPI_ADDRESS_SPACE_SYSTEM_MEMORY = 0,
UACPI_ADDRESS_SPACE_SYSTEM_IO = 1,
UACPI_ADDRESS_SPACE_PCI_CONFIG = 2,
UACPI_ADDRESS_SPACE_EMBEDDED_CONTROLLER = 3,
UACPI_ADDRESS_SPACE_SMBUS = 4,
UACPI_ADDRESS_SPACE_SYSTEM_CMOS = 5,
UACPI_ADDRESS_SPACE_PCI_BAR_TARGET = 6,
UACPI_ADDRESS_SPACE_IPMI = 7,
UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO = 8,
UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS = 9,
UACPI_ADDRESS_SPACE_PCC = 0x0A,
UACPI_ADDRESS_SPACE_PRM = 0x0B,
UACPI_ADDRESS_SPACE_FFIXEDHW = 0x7F,
// Internal type
UACPI_ADDRESS_SPACE_TABLE_DATA = 0xDA1A,
} uacpi_address_space;
const uacpi_char *uacpi_address_space_to_string(uacpi_address_space space);
#ifndef UACPI_BAREBONES_MODE
typedef enum uacpi_init_level {
// Reboot state, nothing is available
UACPI_INIT_LEVEL_EARLY = 0,
/*
* State after a successfull call to uacpi_initialize. Table API and
* other helpers that don't depend on the ACPI namespace may be used.
*/
UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED = 1,
/*
* State after a successfull call to uacpi_namespace_load. Most API may be
* used, namespace can be iterated, etc.
*/
UACPI_INIT_LEVEL_NAMESPACE_LOADED = 2,
/*
* The final initialization stage, this is entered after the call to
* uacpi_namespace_initialize. All API is available to use.
*/
UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED = 3,
} uacpi_init_level;
typedef struct uacpi_pci_address {
uacpi_u16 segment;
uacpi_u8 bus;
uacpi_u8 device;
uacpi_u8 function;
} uacpi_pci_address;
typedef struct uacpi_data_view {
union {
uacpi_u8 *bytes;
const uacpi_u8 *const_bytes;
uacpi_char *text;
const uacpi_char *const_text;
void *data;
const void *const_data;
};
uacpi_size length;
} uacpi_data_view;
typedef struct uacpi_namespace_node uacpi_namespace_node;
typedef enum uacpi_object_type {
UACPI_OBJECT_UNINITIALIZED = 0,
UACPI_OBJECT_INTEGER = 1,
UACPI_OBJECT_STRING = 2,
UACPI_OBJECT_BUFFER = 3,
UACPI_OBJECT_PACKAGE = 4,
UACPI_OBJECT_FIELD_UNIT = 5,
UACPI_OBJECT_DEVICE = 6,
UACPI_OBJECT_EVENT = 7,
UACPI_OBJECT_METHOD = 8,
UACPI_OBJECT_MUTEX = 9,
UACPI_OBJECT_OPERATION_REGION = 10,
UACPI_OBJECT_POWER_RESOURCE = 11,
UACPI_OBJECT_PROCESSOR = 12,
UACPI_OBJECT_THERMAL_ZONE = 13,
UACPI_OBJECT_BUFFER_FIELD = 14,
UACPI_OBJECT_DEBUG = 16,
UACPI_OBJECT_REFERENCE = 20,
UACPI_OBJECT_BUFFER_INDEX = 21,
UACPI_OBJECT_MAX_TYPE_VALUE = UACPI_OBJECT_BUFFER_INDEX
} uacpi_object_type;
// Type bits for API requiring a bit mask, e.g. uacpi_eval_typed
typedef enum uacpi_object_type_bits {
UACPI_OBJECT_INTEGER_BIT = (1 << UACPI_OBJECT_INTEGER),
UACPI_OBJECT_STRING_BIT = (1 << UACPI_OBJECT_STRING),
UACPI_OBJECT_BUFFER_BIT = (1 << UACPI_OBJECT_BUFFER),
UACPI_OBJECT_PACKAGE_BIT = (1 << UACPI_OBJECT_PACKAGE),
UACPI_OBJECT_FIELD_UNIT_BIT = (1 << UACPI_OBJECT_FIELD_UNIT),
UACPI_OBJECT_DEVICE_BIT = (1 << UACPI_OBJECT_DEVICE),
UACPI_OBJECT_EVENT_BIT = (1 << UACPI_OBJECT_EVENT),
UACPI_OBJECT_METHOD_BIT = (1 << UACPI_OBJECT_METHOD),
UACPI_OBJECT_MUTEX_BIT = (1 << UACPI_OBJECT_MUTEX),
UACPI_OBJECT_OPERATION_REGION_BIT = (1 << UACPI_OBJECT_OPERATION_REGION),
UACPI_OBJECT_POWER_RESOURCE_BIT = (1 << UACPI_OBJECT_POWER_RESOURCE),
UACPI_OBJECT_PROCESSOR_BIT = (1 << UACPI_OBJECT_PROCESSOR),
UACPI_OBJECT_THERMAL_ZONE_BIT = (1 << UACPI_OBJECT_THERMAL_ZONE),
UACPI_OBJECT_BUFFER_FIELD_BIT = (1 << UACPI_OBJECT_BUFFER_FIELD),
UACPI_OBJECT_DEBUG_BIT = (1 << UACPI_OBJECT_DEBUG),
UACPI_OBJECT_REFERENCE_BIT = (1 << UACPI_OBJECT_REFERENCE),
UACPI_OBJECT_BUFFER_INDEX_BIT = (1 << UACPI_OBJECT_BUFFER_INDEX),
UACPI_OBJECT_ANY_BIT = 0xFFFFFFFF,
} uacpi_object_type_bits;
typedef struct uacpi_object uacpi_object;
void uacpi_object_ref(uacpi_object *obj);
void uacpi_object_unref(uacpi_object *obj);
uacpi_object_type uacpi_object_get_type(uacpi_object*);
uacpi_object_type_bits uacpi_object_get_type_bit(uacpi_object*);
/*
* Returns UACPI_TRUE if the provided object's type matches this type.
*/
uacpi_bool uacpi_object_is(uacpi_object*, uacpi_object_type);
/*
* Returns UACPI_TRUE if the provided object's type is one of the values
* specified in the 'type_mask' of UACPI_OBJECT_*_BIT.
*/
uacpi_bool uacpi_object_is_one_of(
uacpi_object*, uacpi_object_type_bits type_mask
);
const uacpi_char *uacpi_object_type_to_string(uacpi_object_type);
/*
* Create an uninitialized object. The object can be further overwritten via
* uacpi_object_assign_* to anything.
*/
uacpi_object *uacpi_object_create_uninitialized(void);
/*
* Create an integer object with the value provided.
*/
uacpi_object *uacpi_object_create_integer(uacpi_u64);
typedef enum uacpi_overflow_behavior {
UACPI_OVERFLOW_ALLOW = 0,
UACPI_OVERFLOW_TRUNCATE,
UACPI_OVERFLOW_DISALLOW,
} uacpi_overflow_behavior;
/*
* Same as uacpi_object_create_integer, but introduces additional ways to
* control what happens if the provided integer is larger than 32-bits, and the
* AML code expects 32-bit integers.
*
* - UACPI_OVERFLOW_ALLOW -> do nothing, same as the vanilla helper
* - UACPI_OVERFLOW_TRUNCATE -> truncate the integer to 32-bits if it happens to
* be larger than allowed by the DSDT
* - UACPI_OVERFLOW_DISALLOW -> fail object creation with
* UACPI_STATUS_INVALID_ARGUMENT if the provided
* value happens to be too large
*/
uacpi_status uacpi_object_create_integer_safe(
uacpi_u64, uacpi_overflow_behavior, uacpi_object **out_obj
);
uacpi_status uacpi_object_assign_integer(uacpi_object*, uacpi_u64 value);
uacpi_status uacpi_object_get_integer(uacpi_object*, uacpi_u64 *out);
/*
* Create a string/buffer object. Takes in a constant view of the data.
*
* NOTE: The data is copied to a separately allocated buffer and is not taken
* ownership of.
*/
uacpi_object *uacpi_object_create_string(uacpi_data_view);
uacpi_object *uacpi_object_create_cstring(const uacpi_char*);
uacpi_object *uacpi_object_create_buffer(uacpi_data_view);
/*
* Returns a writable view of the data stored in the string or buffer type
* object.
*/
uacpi_status uacpi_object_get_string_or_buffer(
uacpi_object*, uacpi_data_view *out
);
uacpi_status uacpi_object_get_string(uacpi_object*, uacpi_data_view *out);
uacpi_status uacpi_object_get_buffer(uacpi_object*, uacpi_data_view *out);
/*
* Returns UACPI_TRUE if the provided string object is actually an AML namepath.
*
* This can only be the case for package elements. If a package element is
* specified as a path to an object in AML, it's not resolved by the interpreter
* right away as it might not have been defined at that point yet, and is
* instead stored as a special string object to be resolved by client code
* when needed.
*
* Example usage:
* uacpi_namespace_node *target_node = UACPI_NULL;
*
* uacpi_object *obj = UACPI_NULL;
* uacpi_eval(scope, path, UACPI_NULL, &obj);
*
* uacpi_object_array arr;
* uacpi_object_get_package(obj, &arr);
*
* if (uacpi_object_is_aml_namepath(arr.objects[0])) {
* uacpi_object_resolve_as_aml_namepath(
* arr.objects[0], scope, &target_node
* );
* }
*/
uacpi_bool uacpi_object_is_aml_namepath(uacpi_object*);
/*
* Resolve an AML namepath contained in a string object.
*
* This is only applicable to objects that are package elements. See an
* explanation of how this works in the comment above the declaration of
* uacpi_object_is_aml_namepath.
*
* This is a shorthand for:
* uacpi_data_view view;
* uacpi_object_get_string(object, &view);
*
* target_node = uacpi_namespace_node_resolve_from_aml_namepath(
* scope, view.text
* );
*/
uacpi_status uacpi_object_resolve_as_aml_namepath(
uacpi_object*, uacpi_namespace_node *scope, uacpi_namespace_node **out_node
);
/*
* Make the provided object a string/buffer.
* Takes in a constant view of the data to be stored in the object.
*
* NOTE: The data is copied to a separately allocated buffer and is not taken
* ownership of.
*/
uacpi_status uacpi_object_assign_string(uacpi_object*, uacpi_data_view in);
uacpi_status uacpi_object_assign_buffer(uacpi_object*, uacpi_data_view in);
typedef struct uacpi_object_array {
uacpi_object **objects;
uacpi_size count;
} uacpi_object_array;
/*
* Create a package object and store all of the objects in the array inside.
* The array is allowed to be empty.
*
* NOTE: the reference count of each object is incremented before being stored
* in the object. Client code must remove all of the locally created
* references at its own discretion.
*/
uacpi_object *uacpi_object_create_package(uacpi_object_array in);
/*
* Returns the list of objects stored in a package object.
*
* NOTE: the reference count of the objects stored inside is not incremented,
* which means destorying/overwriting the object also potentially destroys
* all of the objects stored inside unless the reference count is
* incremented by the client via uacpi_object_ref.
*/
uacpi_status uacpi_object_get_package(uacpi_object*, uacpi_object_array *out);
/*
* Make the provided object a package and store all of the objects in the array
* inside. The array is allowed to be empty.
*
* NOTE: the reference count of each object is incremented before being stored
* in the object. Client code must remove all of the locally created
* references at its own discretion.
*/
uacpi_status uacpi_object_assign_package(uacpi_object*, uacpi_object_array in);
/*
* Create a reference object and make it point to 'child'.
*
* NOTE: child's reference count is incremented by one. Client code must remove
* all of the locally created references at its own discretion.
*/
uacpi_object *uacpi_object_create_reference(uacpi_object *child);
/*
* Make the provided object a reference and make it point to 'child'.
*
* NOTE: child's reference count is incremented by one. Client code must remove
* all of the locally created references at its own discretion.
*/
uacpi_status uacpi_object_assign_reference(uacpi_object*, uacpi_object *child);
/*
* Retrieve the object pointed to by a reference object.
*
* NOTE: the reference count of the returned object is incremented by one and
* must be uacpi_object_unref'ed by the client when no longer needed.
*/
uacpi_status uacpi_object_get_dereferenced(uacpi_object*, uacpi_object **out);
typedef struct uacpi_processor_info {
uacpi_u8 id;
uacpi_u32 block_address;
uacpi_u8 block_length;
} uacpi_processor_info;
/*
* Returns the information about the provided processor object.
*/
uacpi_status uacpi_object_get_processor_info(
uacpi_object*, uacpi_processor_info *out
);
typedef struct uacpi_power_resource_info {
uacpi_u8 system_level;
uacpi_u16 resource_order;
} uacpi_power_resource_info;
/*
* Returns the information about the provided power resource object.
*/
uacpi_status uacpi_object_get_power_resource_info(
uacpi_object*, uacpi_power_resource_info *out
);
typedef enum uacpi_region_op {
// data => uacpi_region_attach_data
UACPI_REGION_OP_ATTACH = 0,
// data => uacpi_region_detach_data
UACPI_REGION_OP_DETACH,
// data => uacpi_region_rw_data
UACPI_REGION_OP_READ,
UACPI_REGION_OP_WRITE,
// data => uacpi_region_pcc_send_data
UACPI_REGION_OP_PCC_SEND,
// data => uacpi_region_gpio_rw_data
UACPI_REGION_OP_GPIO_READ,
UACPI_REGION_OP_GPIO_WRITE,
// data => uacpi_region_ipmi_rw_data
UACPI_REGION_OP_IPMI_COMMAND,
// data => uacpi_region_ffixedhw_rw_data
UACPI_REGION_OP_FFIXEDHW_COMMAND,
// data => uacpi_region_prm_rw_data
UACPI_REGION_OP_PRM_COMMAND,
// data => uacpi_region_serial_rw_data
UACPI_REGION_OP_SERIAL_READ,
UACPI_REGION_OP_SERIAL_WRITE,
} uacpi_region_op;
typedef struct uacpi_generic_region_info {
uacpi_u64 base;
uacpi_u64 length;
} uacpi_generic_region_info;
typedef struct uacpi_pcc_region_info {
uacpi_data_view buffer;
uacpi_u8 subspace_id;
} uacpi_pcc_region_info;
typedef struct uacpi_gpio_region_info
{
uacpi_u64 num_pins;
} uacpi_gpio_region_info;
typedef struct uacpi_region_attach_data {
void *handler_context;
uacpi_namespace_node *region_node;
union {
uacpi_generic_region_info generic_info;
uacpi_pcc_region_info pcc_info;
uacpi_gpio_region_info gpio_info;
};
void *out_region_context;
} uacpi_region_attach_data;
typedef struct uacpi_region_rw_data {
void *handler_context;
void *region_context;
union {
uacpi_phys_addr address;
uacpi_u64 offset;
};
uacpi_u64 value;
uacpi_u8 byte_width;
} uacpi_region_rw_data;
typedef struct uacpi_region_pcc_send_data {
void *handler_context;
void *region_context;
uacpi_data_view buffer;
} uacpi_region_pcc_send_data;
typedef struct uacpi_region_gpio_rw_data
{
void *handler_context;
void *region_context;
uacpi_data_view connection;
uacpi_u32 pin_offset;
uacpi_u32 num_pins;
uacpi_u64 value;
} uacpi_region_gpio_rw_data;
typedef struct uacpi_region_ipmi_rw_data
{
void *handler_context;
void *region_context;
uacpi_data_view in_out_message;
uacpi_u64 command;
} uacpi_region_ipmi_rw_data;
typedef uacpi_region_ipmi_rw_data uacpi_region_ffixedhw_rw_data;
typedef struct uacpi_region_prm_rw_data
{
void *handler_context;
void *region_context;
uacpi_data_view in_out_message;
} uacpi_region_prm_rw_data;
typedef enum uacpi_access_attribute {
UACPI_ACCESS_ATTRIBUTE_QUICK = 0x02,
UACPI_ACCESS_ATTRIBUTE_SEND_RECEIVE = 0x04,
UACPI_ACCESS_ATTRIBUTE_BYTE = 0x06,
UACPI_ACCESS_ATTRIBUTE_WORD = 0x08,
UACPI_ACCESS_ATTRIBUTE_BLOCK = 0x0A,
UACPI_ACCESS_ATTRIBUTE_BYTES = 0x0B,
UACPI_ACCESS_ATTRIBUTE_PROCESS_CALL = 0x0C,
UACPI_ACCESS_ATTRIBUTE_BLOCK_PROCESS_CALL = 0x0D,
UACPI_ACCESS_ATTRIBUTE_RAW_BYTES = 0x0E,
UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES = 0x0F,
} uacpi_access_attribute;
typedef struct uacpi_region_serial_rw_data {
void *handler_context;
void *region_context;
uacpi_u64 command;
uacpi_data_view connection;
uacpi_data_view in_out_buffer;
uacpi_access_attribute access_attribute;
/*
* Applicable if access_attribute is one of:
* - UACPI_ACCESS_ATTRIBUTE_BYTES
* - UACPI_ACCESS_ATTRIBUTE_RAW_BYTES
* - UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES
*/
uacpi_u8 access_length;
} uacpi_region_serial_rw_data;
typedef struct uacpi_region_detach_data {
void *handler_context;
void *region_context;
uacpi_namespace_node *region_node;
} uacpi_region_detach_data;
typedef uacpi_status (*uacpi_region_handler)
(uacpi_region_op op, uacpi_handle op_data);
typedef uacpi_status (*uacpi_notify_handler)
(uacpi_handle context, uacpi_namespace_node *node, uacpi_u64 value);
typedef enum uacpi_firmware_request_type {
UACPI_FIRMWARE_REQUEST_TYPE_BREAKPOINT,
UACPI_FIRMWARE_REQUEST_TYPE_FATAL,
} uacpi_firmware_request_type;
typedef struct uacpi_firmware_request {
uacpi_u8 type;
union {
// UACPI_FIRMWARE_REQUEST_BREAKPOINT
struct {
// The context of the method currently being executed
uacpi_handle ctx;
} breakpoint;
// UACPI_FIRMWARE_REQUEST_FATAL
struct {
uacpi_u8 type;
uacpi_u32 code;
uacpi_u64 arg;
} fatal;
};
} uacpi_firmware_request;
#define UACPI_INTERRUPT_NOT_HANDLED 0
#define UACPI_INTERRUPT_HANDLED 1
typedef uacpi_u32 uacpi_interrupt_ret;
typedef uacpi_interrupt_ret (*uacpi_interrupt_handler)(uacpi_handle);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,269 @@
#pragma once
#include <uacpi/types.h>
#include <uacpi/status.h>
#include <uacpi/kernel_api.h>
#include <uacpi/namespace.h>
#define UACPI_MAJOR 3
#define UACPI_MINOR 0
#define UACPI_PATCH 0
#ifdef UACPI_REDUCED_HARDWARE
#define UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn, ret) \
UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN \
static inline fn { return ret; } \
UACPI_NO_UNUSED_PARAMETER_WARNINGS_END
#define UACPI_STUB_IF_REDUCED_HARDWARE(fn) \
UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn,)
#define UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(fn) \
UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn, UACPI_STATUS_COMPILED_OUT)
#define UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(fn) \
UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn, UACPI_STATUS_OK)
#else
#define UACPI_STUB_IF_REDUCED_HARDWARE(fn) fn;
#define UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(fn) fn;
#define UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(fn) fn;
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* Set up early access to the table subsystem. What this means is:
* - uacpi_table_find() and similar API becomes usable before the call to
* uacpi_initialize().
* - No kernel API besides logging and map/unmap will be invoked at this stage,
* allowing for heap and scheduling to still be fully offline.
* - The provided 'temporary_buffer' will be used as a temporary storage for the
* internal metadata about the tables (list, reference count, addresses,
* sizes, etc).
* - The 'temporary_buffer' is replaced with a normal heap buffer allocated via
* uacpi_kernel_alloc() after the call to uacpi_initialize() and can therefore
* be reclaimed by the kernel.
*
* The approximate overhead per table is 56 bytes, so a buffer of 4096 bytes
* yields about 73 tables in terms of capacity. uACPI also has an internal
* static buffer for tables, "UACPI_STATIC_TABLE_ARRAY_LEN", which is configured
* as 16 descriptors in length by default.
*
* This function is used to initialize the barebones mode, see
* UACPI_BAREBONES_MODE in config.h for more information.
*/
uacpi_status uacpi_setup_early_table_access(
void *temporary_buffer, uacpi_size buffer_size
);
/*
* Bad table checksum should be considered a fatal error
* (table load is fully aborted in this case)
*/
#define UACPI_FLAG_BAD_CSUM_FATAL (1ull << 0)
/*
* Unexpected table signature should be considered a fatal error
* (table load is fully aborted in this case)
*/
#define UACPI_FLAG_BAD_TBL_SIGNATURE_FATAL (1ull << 1)
/*
* Force uACPI to use RSDT even for later revisions
*/
#define UACPI_FLAG_BAD_XSDT (1ull << 2)
/*
* If this is set, ACPI mode is not entered during the call to
* uacpi_initialize. The caller is expected to enter it later at their own
* discretion by using uacpi_enter_acpi_mode().
*/
#define UACPI_FLAG_NO_ACPI_MODE (1ull << 3)
/*
* Don't create the \_OSI method when building the namespace.
* Only enable this if you're certain that having this method breaks your AML
* blob, a more atomic/granular interface management is available via osi.h
*/
#define UACPI_FLAG_NO_OSI (1ull << 4)
/*
* Validate table checksums at installation time instead of first use.
* Note that this makes uACPI map the entire table at once, which not all
* hosts are able to handle at early init.
*/
#define UACPI_FLAG_PROACTIVE_TBL_CSUM (1ull << 5)
#ifndef UACPI_BAREBONES_MODE
/*
* Initializes the uACPI subsystem, iterates & records all relevant RSDT/XSDT
* tables. Enters ACPI mode.
*
* 'flags' is any combination of UACPI_FLAG_* above
*/
uacpi_status uacpi_initialize(uacpi_u64 flags);
/*
* Parses & executes all of the DSDT/SSDT tables.
* Initializes the event subsystem.
*/
uacpi_status uacpi_namespace_load(void);
/*
* Initializes all the necessary objects in the namespaces by calling
* _STA/_INI etc.
*/
uacpi_status uacpi_namespace_initialize(void);
// Returns the current subsystem initialization level
uacpi_init_level uacpi_get_current_init_level(void);
/*
* Evaluate an object within the namespace and get back its value.
* Either root or path must be valid.
* A value of NULL for 'parent' implies uacpi_namespace_root() relative
* lookups, unless 'path' is already absolute.
*/
uacpi_status uacpi_eval(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
);
uacpi_status uacpi_eval_simple(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
);
/*
* Same as uacpi_eval() but without a return value.
*/
uacpi_status uacpi_execute(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args
);
uacpi_status uacpi_execute_simple(
uacpi_namespace_node *parent, const uacpi_char *path
);
/*
* Same as uacpi_eval, but the return value type is validated against
* the 'ret_mask'. UACPI_STATUS_TYPE_MISMATCH is returned on error.
*/
uacpi_status uacpi_eval_typed(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object_type_bits ret_mask,
uacpi_object **ret
);
uacpi_status uacpi_eval_simple_typed(
uacpi_namespace_node *parent, const uacpi_char *path,
uacpi_object_type_bits ret_mask, uacpi_object **ret
);
/*
* A shorthand for uacpi_eval_typed with UACPI_OBJECT_INTEGER_BIT.
*/
uacpi_status uacpi_eval_integer(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_u64 *out_value
);
uacpi_status uacpi_eval_simple_integer(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u64 *out_value
);
/*
* A shorthand for uacpi_eval_typed with
* UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT
*
* Use uacpi_object_get_string_or_buffer to retrieve the resulting buffer data.
*/
uacpi_status uacpi_eval_buffer_or_string(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
);
uacpi_status uacpi_eval_simple_buffer_or_string(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
);
/*
* A shorthand for uacpi_eval_typed with UACPI_OBJECT_STRING_BIT.
*
* Use uacpi_object_get_string to retrieve the resulting buffer data.
*/
uacpi_status uacpi_eval_string(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
);
uacpi_status uacpi_eval_simple_string(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
);
/*
* A shorthand for uacpi_eval_typed with UACPI_OBJECT_BUFFER_BIT.
*
* Use uacpi_object_get_buffer to retrieve the resulting buffer data.
*/
uacpi_status uacpi_eval_buffer(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
);
uacpi_status uacpi_eval_simple_buffer(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
);
/*
* A shorthand for uacpi_eval_typed with UACPI_OBJECT_PACKAGE_BIT.
*
* Use uacpi_object_get_package to retrieve the resulting object array.
*/
uacpi_status uacpi_eval_package(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
);
uacpi_status uacpi_eval_simple_package(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
);
/*
* Get the bitness of the currently loaded AML code according to the DSDT.
*
* Returns either 32 or 64.
*/
uacpi_status uacpi_get_aml_bitness(uacpi_u8 *out_bitness);
/*
* Helpers for entering & leaving ACPI mode. Note that ACPI mode is entered
* automatically during the call to uacpi_initialize().
*/
UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_enter_acpi_mode(void)
)
UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_leave_acpi_mode(void)
)
/*
* Attempt to acquire the global lock for 'timeout' milliseconds.
* 0xFFFF implies infinite wait.
*
* On success, 'out_seq' is set to a unique sequence number for the current
* acquire transaction. This number is used for validation during release.
*/
uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq);
uacpi_status uacpi_release_global_lock(uacpi_u32 seq);
#endif // !UACPI_BAREBONES_MODE
/*
* Reset the global uACPI state by freeing all internally allocated data
* structures & resetting any global variables. After this call, uACPI must be
* re-initialized from scratch to be used again.
*
* This is called by uACPI automatically if a fatal error occurs during a call
* to uacpi_initialize/uacpi_namespace_load etc. in order to prevent accidental
* use of partially uninitialized subsystems.
*/
void uacpi_state_reset(void);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,192 @@
#pragma once
#include <uacpi/status.h>
#include <uacpi/types.h>
#include <uacpi/namespace.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef UACPI_BAREBONES_MODE
/*
* Checks whether the device at 'node' matches any of the PNP ids provided in
* 'list' (terminated by a UACPI_NULL). This is done by first attempting to
* match the value returned from _HID and then the value(s) from _CID.
*
* Note that the presence of the device (_STA) is not verified here.
*/
uacpi_bool uacpi_device_matches_pnp_id(
uacpi_namespace_node *node,
const uacpi_char *const *list
);
/*
* Find all the devices in the namespace starting at 'parent' matching the
* specified 'hids' (terminated by a UACPI_NULL) against any value from _HID or
* _CID. Only devices reported as present via _STA are checked. Any matching
* devices are then passed to the 'cb'.
*/
uacpi_status uacpi_find_devices_at(
uacpi_namespace_node *parent,
const uacpi_char *const *hids,
uacpi_iteration_callback cb,
void *user
);
/*
* Same as uacpi_find_devices_at, except this starts at the root and only
* matches one hid.
*/
uacpi_status uacpi_find_devices(
const uacpi_char *hid,
uacpi_iteration_callback cb,
void *user
);
typedef enum uacpi_interrupt_model {
UACPI_INTERRUPT_MODEL_PIC = 0,
UACPI_INTERRUPT_MODEL_IOAPIC,
UACPI_INTERRUPT_MODEL_IOSAPIC,
UACPI_INTERRUPT_MODEL_PLATFORM_SPECIFIC,
UACPI_INTERRUPT_MODEL_GIC,
UACPI_INTERRUPT_MODEL_LPIC,
UACPI_INTERRUPT_MODEL_RINTC,
} uacpi_interrupt_model;
uacpi_status uacpi_set_interrupt_model(uacpi_interrupt_model);
typedef struct uacpi_pci_routing_table_entry {
uacpi_u32 address;
uacpi_u32 index;
uacpi_namespace_node *source;
uacpi_u8 pin;
} uacpi_pci_routing_table_entry;
typedef struct uacpi_pci_routing_table {
uacpi_size num_entries;
uacpi_pci_routing_table_entry entries[];
} uacpi_pci_routing_table;
void uacpi_free_pci_routing_table(uacpi_pci_routing_table*);
uacpi_status uacpi_get_pci_routing_table(
uacpi_namespace_node *parent, uacpi_pci_routing_table **out_table
);
typedef struct uacpi_id_string {
// size of the string including the null byte
uacpi_u32 size;
uacpi_char *value;
} uacpi_id_string;
void uacpi_free_id_string(uacpi_id_string *id);
/*
* Evaluate a device's _HID method and get its value.
* The returned struture must be freed using uacpi_free_id_string.
*/
uacpi_status uacpi_eval_hid(uacpi_namespace_node*, uacpi_id_string **out_id);
typedef struct uacpi_pnp_id_list {
// number of 'ids' in the list
uacpi_u32 num_ids;
// size of the 'ids' list including the string lengths
uacpi_u32 size;
// list of PNP ids
uacpi_id_string ids[];
} uacpi_pnp_id_list;
void uacpi_free_pnp_id_list(uacpi_pnp_id_list *list);
/*
* Evaluate a device's _CID method and get its value.
* The returned structure must be freed using uacpi_free_pnp_id_list.
*/
uacpi_status uacpi_eval_cid(uacpi_namespace_node*, uacpi_pnp_id_list **out_list);
/*
* Evaluate a device's _STA method and get its value.
* If this method is not found, the value of 'flags' is set to all ones.
*/
uacpi_status uacpi_eval_sta(uacpi_namespace_node*, uacpi_u32 *flags);
/*
* Evaluate a device's _ADR method and get its value.
*/
uacpi_status uacpi_eval_adr(uacpi_namespace_node*, uacpi_u64 *out);
/*
* Evaluate a device's _CLS method and get its value.
* The format of returned string is BBSSPP where:
* BB => Base Class (e.g. 01 => Mass Storage)
* SS => Sub-Class (e.g. 06 => SATA)
* PP => Programming Interface (e.g. 01 => AHCI)
* The returned struture must be freed using uacpi_free_id_string.
*/
uacpi_status uacpi_eval_cls(uacpi_namespace_node*, uacpi_id_string **out_id);
/*
* Evaluate a device's _UID method and get its value.
* The returned struture must be freed using uacpi_free_id_string.
*/
uacpi_status uacpi_eval_uid(uacpi_namespace_node*, uacpi_id_string **out_uid);
// uacpi_namespace_node_info->flags
#define UACPI_NS_NODE_INFO_HAS_ADR (1 << 0)
#define UACPI_NS_NODE_INFO_HAS_HID (1 << 1)
#define UACPI_NS_NODE_INFO_HAS_UID (1 << 2)
#define UACPI_NS_NODE_INFO_HAS_CID (1 << 3)
#define UACPI_NS_NODE_INFO_HAS_CLS (1 << 4)
#define UACPI_NS_NODE_INFO_HAS_SXD (1 << 5)
#define UACPI_NS_NODE_INFO_HAS_SXW (1 << 6)
typedef struct uacpi_namespace_node_info {
// Size of the entire structure
uacpi_u32 size;
// Object information
uacpi_object_name name;
uacpi_object_type type;
uacpi_u8 num_params;
// UACPI_NS_NODE_INFO_HAS_*
uacpi_u8 flags;
/*
* A mapping of [S1..S4] to the shallowest D state supported by the device
* in that S state.
*/
uacpi_u8 sxd[4];
/*
* A mapping of [S0..S4] to the deepest D state supported by the device
* in that S state to be able to wake itself.
*/
uacpi_u8 sxw[5];
uacpi_u64 adr;
uacpi_id_string hid;
uacpi_id_string uid;
uacpi_id_string cls;
uacpi_pnp_id_list cid;
} uacpi_namespace_node_info;
void uacpi_free_namespace_node_info(uacpi_namespace_node_info*);
/*
* Retrieve information about a namespace node. This includes the attached
* object's type, name, number of parameters (if it's a method), the result of
* evaluating _ADR, _UID, _CLS, _HID, _CID, as well as _SxD and _SxW.
*
* The returned structure must be freed with uacpi_free_namespace_node_info.
*/
uacpi_status uacpi_get_namespace_node_info(
uacpi_namespace_node *node, uacpi_namespace_node_info **out_info
);
#endif // !UACPI_BAREBONES_MODE
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,25 @@
project('uacpi', 'c')
sources = files(
'source/tables.c',
'source/types.c',
'source/uacpi.c',
'source/utilities.c',
'source/interpreter.c',
'source/opcodes.c',
'source/namespace.c',
'source/stdlib.c',
'source/shareable.c',
'source/opregion.c',
'source/default_handlers.c',
'source/io.c',
'source/notify.c',
'source/sleep.c',
'source/registers.c',
'source/resources.c',
'source/event.c',
'source/mutex.c',
'source/osi.c',
)
includes = include_directories('include')

View File

@ -0,0 +1,336 @@
#include <uacpi/internal/opregion.h>
#include <uacpi/internal/namespace.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/helpers.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/io.h>
#include <uacpi/kernel_api.h>
#include <uacpi/uacpi.h>
#ifndef UACPI_BAREBONES_MODE
#define PCI_ROOT_PNP_ID "PNP0A03"
#define PCI_EXPRESS_ROOT_PNP_ID "PNP0A08"
static uacpi_namespace_node *find_pci_root(uacpi_namespace_node *node)
{
static const uacpi_char *pci_root_ids[] = {
PCI_ROOT_PNP_ID,
PCI_EXPRESS_ROOT_PNP_ID,
UACPI_NULL
};
uacpi_namespace_node *parent = node->parent;
while (parent != uacpi_namespace_root()) {
if (uacpi_device_matches_pnp_id(parent, pci_root_ids)) {
uacpi_trace(
"found a PCI root node %.4s controlling region %.4s\n",
parent->name.text, node->name.text
);
return parent;
}
parent = parent->parent;
}
uacpi_trace_region_error(
node, "unable to find PCI root controlling",
UACPI_STATUS_NOT_FOUND
);
return node;
}
static uacpi_status pci_region_attach(uacpi_region_attach_data *data)
{
uacpi_namespace_node *node, *pci_root, *device;
uacpi_pci_address address = { 0 };
uacpi_u64 value;
uacpi_status ret;
node = data->region_node;
pci_root = find_pci_root(node);
/*
* Find the actual device object that is supposed to be controlling
* this operation region.
*/
device = node;
while (device) {
uacpi_object_type type;
ret = uacpi_namespace_node_type(device, &type);
if (uacpi_unlikely_error(ret))
return ret;
if (type == UACPI_OBJECT_DEVICE)
break;
device = device->parent;
}
if (uacpi_unlikely(device == UACPI_NULL)) {
ret = UACPI_STATUS_NOT_FOUND;
uacpi_trace_region_error(
node, "unable to find device responsible for", ret
);
return ret;
}
ret = uacpi_eval_simple_integer(device, "_ADR", &value);
if (ret == UACPI_STATUS_OK) {
address.function = (value >> 0) & 0xFF;
address.device = (value >> 16) & 0xFF;
}
ret = uacpi_eval_simple_integer(pci_root, "_SEG", &value);
if (ret == UACPI_STATUS_OK)
address.segment = value;
ret = uacpi_eval_simple_integer(pci_root, "_BBN", &value);
if (ret == UACPI_STATUS_OK)
address.bus = value;
uacpi_trace(
"detected PCI device %.4s@%04X:%02X:%02X:%01X\n",
device->name.text, address.segment, address.bus,
address.device, address.function
);
return uacpi_kernel_pci_device_open(address, &data->out_region_context);
}
static uacpi_status pci_region_detach(uacpi_region_detach_data *data)
{
uacpi_kernel_pci_device_close(data->region_context);
return UACPI_STATUS_OK;
}
static uacpi_status pci_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
uacpi_handle dev = data->region_context;
uacpi_u8 width;
uacpi_size offset;
offset = data->offset;
width = data->byte_width;
return op == UACPI_REGION_OP_READ ?
uacpi_pci_read(dev, offset, width, &data->value) :
uacpi_pci_write(dev, offset, width, data->value);
}
static uacpi_status handle_pci_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
return pci_region_attach(op_data);
case UACPI_REGION_OP_DETACH:
return pci_region_detach(op_data);
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return pci_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
struct memory_region_ctx {
uacpi_phys_addr phys;
uacpi_u8 *virt;
uacpi_size size;
};
static uacpi_status memory_region_attach(uacpi_region_attach_data *data)
{
struct memory_region_ctx *ctx;
uacpi_status ret = UACPI_STATUS_OK;
ctx = uacpi_kernel_alloc(sizeof(*ctx));
if (ctx == UACPI_NULL)
return UACPI_STATUS_OUT_OF_MEMORY;
ctx->size = data->generic_info.length;
// FIXME: this really shouldn't try to map everything at once
ctx->phys = data->generic_info.base;
ctx->virt = uacpi_kernel_map(ctx->phys, ctx->size);
if (uacpi_unlikely(ctx->virt == UACPI_NULL)) {
ret = UACPI_STATUS_MAPPING_FAILED;
uacpi_trace_region_error(data->region_node, "unable to map", ret);
uacpi_free(ctx, sizeof(*ctx));
goto out;
}
data->out_region_context = ctx;
out:
return ret;
}
static uacpi_status memory_region_detach(uacpi_region_detach_data *data)
{
struct memory_region_ctx *ctx = data->region_context;
uacpi_kernel_unmap(ctx->virt, ctx->size);
uacpi_free(ctx, sizeof(*ctx));
return UACPI_STATUS_OK;
}
struct io_region_ctx {
uacpi_io_addr base;
uacpi_handle handle;
};
static uacpi_status io_region_attach(uacpi_region_attach_data *data)
{
struct io_region_ctx *ctx;
uacpi_generic_region_info *info = &data->generic_info;
uacpi_status ret;
ctx = uacpi_kernel_alloc(sizeof(*ctx));
if (ctx == UACPI_NULL)
return UACPI_STATUS_OUT_OF_MEMORY;
ctx->base = info->base;
ret = uacpi_kernel_io_map(ctx->base, info->length, &ctx->handle);
if (uacpi_unlikely_error(ret)) {
uacpi_trace_region_error(
data->region_node, "unable to map an IO", ret
);
uacpi_free(ctx, sizeof(*ctx));
return ret;
}
data->out_region_context = ctx;
return ret;
}
static uacpi_status io_region_detach(uacpi_region_detach_data *data)
{
struct io_region_ctx *ctx = data->region_context;
uacpi_kernel_io_unmap(ctx->handle);
uacpi_free(ctx, sizeof(*ctx));
return UACPI_STATUS_OK;
}
static uacpi_status memory_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
struct memory_region_ctx *ctx = data->region_context;
uacpi_size offset;
offset = data->address - ctx->phys;
return op == UACPI_REGION_OP_READ ?
uacpi_system_memory_read(ctx->virt, offset, data->byte_width, &data->value) :
uacpi_system_memory_write(ctx->virt, offset, data->byte_width, data->value);
}
static uacpi_status handle_memory_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
return memory_region_attach(op_data);
case UACPI_REGION_OP_DETACH:
return memory_region_detach(op_data);
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return memory_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
static uacpi_status table_data_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
void *addr = UACPI_VIRT_ADDR_TO_PTR((uacpi_virt_addr)data->offset);
return op == UACPI_REGION_OP_READ ?
uacpi_system_memory_read(addr, 0, data->byte_width, &data->value) :
uacpi_system_memory_write(addr, 0, data->byte_width, data->value);
}
static uacpi_status handle_table_data_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
case UACPI_REGION_OP_DETACH:
return UACPI_STATUS_OK;
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return table_data_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
static uacpi_status io_region_do_rw(
uacpi_region_op op, uacpi_region_rw_data *data
)
{
struct io_region_ctx *ctx = data->region_context;
uacpi_u8 width;
uacpi_size offset;
offset = data->offset - ctx->base;
width = data->byte_width;
return op == UACPI_REGION_OP_READ ?
uacpi_system_io_read(ctx->handle, offset, width, &data->value) :
uacpi_system_io_write(ctx->handle, offset, width, data->value);
}
static uacpi_status handle_io_region(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_ATTACH:
return io_region_attach(op_data);
case UACPI_REGION_OP_DETACH:
return io_region_detach(op_data);
case UACPI_REGION_OP_READ:
case UACPI_REGION_OP_WRITE:
return io_region_do_rw(op, op_data);
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
void uacpi_install_default_address_space_handlers(void)
{
uacpi_namespace_node *root;
root = uacpi_namespace_root();
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_SYSTEM_MEMORY,
handle_memory_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_SYSTEM_IO,
handle_io_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_PCI_CONFIG,
handle_pci_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
uacpi_install_address_space_handler_with_flags(
root, UACPI_ADDRESS_SPACE_TABLE_DATA,
handle_table_data_region, UACPI_NULL,
UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
);
}
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,21 @@
uacpi_add_sources(
tables.c
types.c
uacpi.c
utilities.c
interpreter.c
opcodes.c
namespace.c
stdlib.c
shareable.c
opregion.c
default_handlers.c
io.c
notify.c
sleep.c
registers.c
resources.c
event.c
mutex.c
osi.c
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,396 @@
#include <uacpi/platform/atomic.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/registers.h>
#include <uacpi/internal/context.h>
#include <uacpi/kernel_api.h>
#include <uacpi/internal/namespace.h>
#ifndef UACPI_BAREBONES_MODE
#ifndef UACPI_REDUCED_HARDWARE
#define GLOBAL_LOCK_PENDING (1 << 0)
#define GLOBAL_LOCK_OWNED_BIT 1
#define GLOBAL_LOCK_OWNED (1 << GLOBAL_LOCK_OWNED_BIT)
#define GLOBAL_LOCK_MASK 3u
static uacpi_bool try_acquire_global_lock_from_firmware(uacpi_u32 *lock)
{
uacpi_u32 value, new_value;
uacpi_bool was_owned;
value = *(volatile uacpi_u32*)lock;
do {
was_owned = (value & GLOBAL_LOCK_OWNED) >> GLOBAL_LOCK_OWNED_BIT;
// Clear both owned & pending bits.
new_value = value & ~GLOBAL_LOCK_MASK;
// Set owned unconditionally
new_value |= GLOBAL_LOCK_OWNED;
// Set pending iff the lock was owned at the time of reading
if (was_owned)
new_value |= GLOBAL_LOCK_PENDING;
} while (!uacpi_atomic_cmpxchg32(lock, &value, new_value));
return !was_owned;
}
static uacpi_bool do_release_global_lock_to_firmware(uacpi_u32 *lock)
{
uacpi_u32 value, new_value;
value = *(volatile uacpi_u32*)lock;
do {
new_value = value & ~GLOBAL_LOCK_MASK;
} while (!uacpi_atomic_cmpxchg32(lock, &value, new_value));
return value & GLOBAL_LOCK_PENDING;
}
static uacpi_status uacpi_acquire_global_lock_from_firmware(void)
{
uacpi_cpu_flags flags;
uacpi_u16 spins = 0;
uacpi_bool success;
if (!g_uacpi_rt_ctx.has_global_lock)
return UACPI_STATUS_OK;
flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
for (;;) {
spins++;
uacpi_trace(
"trying to acquire the global lock from firmware... (attempt %u)\n",
spins
);
success = try_acquire_global_lock_from_firmware(
&g_uacpi_rt_ctx.facs->global_lock
);
if (success)
break;
if (uacpi_unlikely(spins == 0xFFFF))
break;
g_uacpi_rt_ctx.global_lock_pending = UACPI_TRUE;
uacpi_trace(
"global lock is owned by firmware, waiting for a release "
"notification...\n"
);
uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
uacpi_kernel_wait_for_event(g_uacpi_rt_ctx.global_lock_event, 0xFFFF);
flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
}
g_uacpi_rt_ctx.global_lock_pending = UACPI_FALSE;
uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
if (uacpi_unlikely(!success)) {
uacpi_error("unable to acquire global lock after %u attempts\n", spins);
return UACPI_STATUS_HARDWARE_TIMEOUT;
}
uacpi_trace("global lock successfully acquired after %u attempt%s\n",
spins, spins > 1 ? "s" : "");
return UACPI_STATUS_OK;
}
static void uacpi_release_global_lock_to_firmware(void)
{
if (!g_uacpi_rt_ctx.has_global_lock)
return;
uacpi_trace("releasing the global lock to firmware...\n");
if (do_release_global_lock_to_firmware(&g_uacpi_rt_ctx.facs->global_lock)) {
uacpi_trace("notifying firmware of the global lock release since the "
"pending bit was set\n");
uacpi_write_register_field(UACPI_REGISTER_FIELD_GBL_RLS, 1);
}
}
#endif
UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(
uacpi_status uacpi_acquire_global_lock_from_firmware(void)
)
UACPI_STUB_IF_REDUCED_HARDWARE(
void uacpi_release_global_lock_to_firmware(void)
)
uacpi_status uacpi_acquire_native_mutex_with_timeout(
uacpi_handle mtx, uacpi_u16 timeout
)
{
uacpi_status ret;
if (uacpi_unlikely(mtx == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_kernel_acquire_mutex(mtx, timeout);
if (uacpi_likely_success(ret))
return ret;
if (uacpi_unlikely(ret != UACPI_STATUS_TIMEOUT || timeout == 0xFFFF)) {
uacpi_error(
"unexpected status %08X (%s) while acquiring %p (timeout=%04X)\n",
ret, uacpi_status_to_string(ret), mtx, timeout
);
}
return ret;
}
uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq)
{
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely(out_seq == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_acquire_native_mutex_with_timeout(
g_uacpi_rt_ctx.global_lock_mutex->handle, timeout
);
if (ret != UACPI_STATUS_OK)
return ret;
ret = uacpi_acquire_global_lock_from_firmware();
if (uacpi_unlikely_error(ret)) {
uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle);
return ret;
}
if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_seq_num == 0xFFFFFFFF))
g_uacpi_rt_ctx.global_lock_seq_num = 0;
*out_seq = g_uacpi_rt_ctx.global_lock_seq_num++;
g_uacpi_rt_ctx.global_lock_acquired = UACPI_TRUE;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_release_global_lock(uacpi_u32 seq)
{
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely(!g_uacpi_rt_ctx.global_lock_acquired ||
seq != g_uacpi_rt_ctx.global_lock_seq_num))
return UACPI_STATUS_INVALID_ARGUMENT;
g_uacpi_rt_ctx.global_lock_acquired = UACPI_FALSE;
uacpi_release_global_lock_to_firmware();
uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle);
return UACPI_STATUS_OK;
}
uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex *mutex)
{
uacpi_thread_id id;
id = UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner);
return id == uacpi_kernel_get_thread_id();
}
uacpi_status uacpi_acquire_aml_mutex(uacpi_mutex *mutex, uacpi_u16 timeout)
{
uacpi_thread_id this_id;
uacpi_status ret = UACPI_STATUS_OK;
this_id = uacpi_kernel_get_thread_id();
if (UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner) == this_id) {
if (uacpi_unlikely(mutex->depth == 0xFFFF)) {
uacpi_warn(
"failing an attempt to acquire mutex @%p, too many recursive "
"acquires\n", mutex
);
return UACPI_STATUS_DENIED;
}
mutex->depth++;
return ret;
}
uacpi_namespace_write_unlock();
ret = uacpi_acquire_native_mutex_with_timeout(mutex->handle, timeout);
if (ret != UACPI_STATUS_OK)
goto out;
if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle) {
ret = uacpi_acquire_global_lock_from_firmware();
if (uacpi_unlikely_error(ret)) {
uacpi_release_native_mutex(mutex->handle);
goto out;
}
}
UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, this_id);
mutex->depth = 1;
out:
uacpi_namespace_write_lock();
return ret;
}
uacpi_status uacpi_release_aml_mutex(uacpi_mutex *mutex)
{
if (mutex->depth-- > 1)
return UACPI_STATUS_OK;
if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle)
uacpi_release_global_lock_to_firmware();
UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, UACPI_THREAD_ID_NONE);
uacpi_release_native_mutex(mutex->handle);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock)
{
lock->mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(lock->mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
lock->owner = UACPI_THREAD_ID_NONE;
lock->depth = 0;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock)
{
if (uacpi_unlikely(lock->depth)) {
uacpi_warn(
"de-initializing active recursive lock %p with depth=%zu\n",
lock, lock->depth
);
lock->depth = 0;
}
lock->owner = UACPI_THREAD_ID_NONE;
if (lock->mutex != UACPI_NULL) {
uacpi_kernel_free_mutex(lock->mutex);
lock->mutex = UACPI_NULL;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock)
{
uacpi_thread_id this_id;
uacpi_status ret = UACPI_STATUS_OK;
this_id = uacpi_kernel_get_thread_id();
if (UACPI_ATOMIC_LOAD_THREAD_ID(&lock->owner) == this_id) {
lock->depth++;
return ret;
}
ret = uacpi_acquire_native_mutex(lock->mutex);
if (uacpi_unlikely_error(ret))
return ret;
UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, this_id);
lock->depth = 1;
return ret;
}
uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock)
{
if (lock->depth-- > 1)
return UACPI_STATUS_OK;
UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, UACPI_THREAD_ID_NONE);
return uacpi_release_native_mutex(lock->mutex);
}
uacpi_status uacpi_rw_lock_init(struct uacpi_rw_lock *lock)
{
lock->read_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(lock->read_mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
lock->write_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(lock->write_mutex == UACPI_NULL)) {
uacpi_kernel_free_mutex(lock->read_mutex);
lock->read_mutex = UACPI_NULL;
return UACPI_STATUS_OUT_OF_MEMORY;
}
lock->num_readers = 0;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_rw_lock_deinit(struct uacpi_rw_lock *lock)
{
if (uacpi_unlikely(lock->num_readers)) {
uacpi_warn("de-initializing rw_lock %p with %zu active readers\n",
lock, lock->num_readers);
lock->num_readers = 0;
}
if (lock->read_mutex != UACPI_NULL) {
uacpi_kernel_free_mutex(lock->read_mutex);
lock->read_mutex = UACPI_NULL;
}
if (lock->write_mutex != UACPI_NULL) {
uacpi_kernel_free_mutex(lock->write_mutex);
lock->write_mutex = UACPI_NULL;
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_rw_lock_read(struct uacpi_rw_lock *lock)
{
uacpi_status ret;
ret = uacpi_acquire_native_mutex(lock->read_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (lock->num_readers++ == 0) {
ret = uacpi_acquire_native_mutex(lock->write_mutex);
if (uacpi_unlikely_error(ret))
lock->num_readers = 0;
}
uacpi_kernel_release_mutex(lock->read_mutex);
return ret;
}
uacpi_status uacpi_rw_unlock_read(struct uacpi_rw_lock *lock)
{
uacpi_status ret;
ret = uacpi_acquire_native_mutex(lock->read_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (lock->num_readers-- == 1)
uacpi_release_native_mutex(lock->write_mutex);
uacpi_kernel_release_mutex(lock->read_mutex);
return ret;
}
uacpi_status uacpi_rw_lock_write(struct uacpi_rw_lock *lock)
{
return uacpi_acquire_native_mutex(lock->write_mutex);
}
uacpi_status uacpi_rw_unlock_write(struct uacpi_rw_lock *lock)
{
return uacpi_release_native_mutex(lock->write_mutex);
}
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,255 @@
#include <uacpi/internal/notify.h>
#include <uacpi/internal/shareable.h>
#include <uacpi/internal/namespace.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/kernel_api.h>
#ifndef UACPI_BAREBONES_MODE
static uacpi_handle notify_mutex;
uacpi_status uacpi_initialize_notify(void)
{
notify_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(notify_mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
return UACPI_STATUS_OK;
}
void uacpi_deinitialize_notify(void)
{
if (notify_mutex != UACPI_NULL)
uacpi_kernel_free_mutex(notify_mutex);
notify_mutex = UACPI_NULL;
}
struct notification_ctx {
uacpi_namespace_node *node;
uacpi_u64 value;
uacpi_object *node_object;
};
static void free_notification_ctx(struct notification_ctx *ctx)
{
uacpi_namespace_node_release_object(ctx->node_object);
uacpi_namespace_node_unref(ctx->node);
uacpi_free(ctx, sizeof(*ctx));
}
static void do_notify(uacpi_handle opaque)
{
struct notification_ctx *ctx = opaque;
uacpi_device_notify_handler *handler;
uacpi_bool did_notify_root = UACPI_FALSE;
handler = ctx->node_object->handlers->notify_head;
for (;;) {
if (handler == UACPI_NULL) {
if (did_notify_root) {
free_notification_ctx(ctx);
return;
}
handler = g_uacpi_rt_ctx.root_object->handlers->notify_head;
did_notify_root = UACPI_TRUE;
continue;
}
handler->callback(handler->user_context, ctx->node, ctx->value);
handler = handler->next;
}
}
uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value)
{
uacpi_status ret;
struct notification_ctx *ctx;
uacpi_object *node_object;
node_object = uacpi_namespace_node_get_object_typed(
node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
UACPI_OBJECT_PROCESSOR_BIT
);
if (uacpi_unlikely(node_object == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_acquire_native_mutex(notify_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (node_object->handlers->notify_head == UACPI_NULL &&
g_uacpi_rt_ctx.root_object->handlers->notify_head == UACPI_NULL) {
ret = UACPI_STATUS_NO_HANDLER;
goto out;
}
ctx = uacpi_kernel_alloc(sizeof(*ctx));
if (uacpi_unlikely(ctx == UACPI_NULL)) {
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out;
}
ctx->node = node;
// In case this node goes out of scope
uacpi_shareable_ref(node);
ctx->value = value;
ctx->node_object = uacpi_namespace_node_get_object(node);
uacpi_object_ref(ctx->node_object);
ret = uacpi_kernel_schedule_work(UACPI_WORK_NOTIFICATION, do_notify, ctx);
if (uacpi_unlikely_error(ret)) {
uacpi_warn("unable to schedule notification work: %s\n",
uacpi_status_to_string(ret));
free_notification_ctx(ctx);
}
out:
uacpi_release_native_mutex(notify_mutex);
return ret;
}
static uacpi_device_notify_handler *handler_container(
uacpi_handlers *handlers, uacpi_notify_handler target_handler
)
{
uacpi_device_notify_handler *handler = handlers->notify_head;
while (handler) {
if (handler->callback == target_handler)
return handler;
handler = handler->next;
}
return UACPI_NULL;
}
uacpi_status uacpi_install_notify_handler(
uacpi_namespace_node *node, uacpi_notify_handler handler,
uacpi_handle handler_context
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_handlers *handlers;
uacpi_device_notify_handler *new_handler;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (node == uacpi_namespace_root()) {
obj = g_uacpi_rt_ctx.root_object;
} else {
ret = uacpi_namespace_node_acquire_object_typed(
node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
UACPI_OBJECT_PROCESSOR_BIT, &obj
);
if (uacpi_unlikely_error(ret))
return ret;
}
ret = uacpi_acquire_native_mutex(notify_mutex);
if (uacpi_unlikely_error(ret))
goto out_no_mutex;
uacpi_kernel_wait_for_work_completion();
handlers = obj->handlers;
if (handler_container(handlers, handler) != UACPI_NULL) {
ret = UACPI_STATUS_ALREADY_EXISTS;
goto out;
}
new_handler = uacpi_kernel_alloc_zeroed(sizeof(*new_handler));
if (uacpi_unlikely(new_handler == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
new_handler->callback = handler;
new_handler->user_context = handler_context;
new_handler->next = handlers->notify_head;
handlers->notify_head = new_handler;
out:
uacpi_release_native_mutex(notify_mutex);
out_no_mutex:
if (node != uacpi_namespace_root())
uacpi_object_unref(obj);
return ret;
}
uacpi_status uacpi_uninstall_notify_handler(
uacpi_namespace_node *node, uacpi_notify_handler handler
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_handlers *handlers;
uacpi_device_notify_handler *prev_handler, *containing = UACPI_NULL;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (node == uacpi_namespace_root()) {
obj = g_uacpi_rt_ctx.root_object;
} else {
ret = uacpi_namespace_node_acquire_object_typed(
node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
UACPI_OBJECT_PROCESSOR_BIT, &obj
);
if (uacpi_unlikely_error(ret))
return ret;
}
ret = uacpi_acquire_native_mutex(notify_mutex);
if (uacpi_unlikely_error(ret))
goto out_no_mutex;
uacpi_kernel_wait_for_work_completion();
handlers = obj->handlers;
containing = handler_container(handlers, handler);
if (containing == UACPI_NULL) {
ret = UACPI_STATUS_NOT_FOUND;
goto out;
}
prev_handler = handlers->notify_head;
// Are we the last linked handler?
if (prev_handler == containing) {
handlers->notify_head = containing->next;
goto out;
}
// Nope, we're somewhere in the middle. Do a search.
while (prev_handler) {
if (prev_handler->next == containing) {
prev_handler->next = containing->next;
goto out;
}
prev_handler = prev_handler->next;
}
out:
uacpi_release_native_mutex(notify_mutex);
out_no_mutex:
if (node != uacpi_namespace_root())
uacpi_object_unref(obj);
if (uacpi_likely_success(ret))
uacpi_free(containing, sizeof(*containing));
return ret;
}
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,265 @@
#include <uacpi/internal/opcodes.h>
#ifndef UACPI_BAREBONES_MODE
#define UACPI_OP(opname, opcode, props, ...) \
{ #opname, { .decode_ops = __VA_ARGS__ }, .properties = props, .code = opcode },
#define UACPI_OUT_OF_LINE_OP(opname, opcode, out_of_line_buf, props) \
{ \
.name = #opname, \
{ .indirect_decode_ops = out_of_line_buf }, \
.properties = props, \
.code = opcode, \
},
static const struct uacpi_op_spec opcode_table[0x100] = {
UACPI_ENUMERATE_OPCODES
};
static const struct uacpi_op_spec ext_opcode_table[] = {
UACPI_ENUMERATE_EXT_OPCODES
};
#define _(op) (op & 0x00FF)
static const uacpi_u8 ext_op_to_idx[0x100] = {
[_(UACPI_AML_OP_MutexOp)] = 1, [_(UACPI_AML_OP_EventOp)] = 2,
[_(UACPI_AML_OP_CondRefOfOp)] = 3, [_(UACPI_AML_OP_CreateFieldOp)] = 4,
[_(UACPI_AML_OP_LoadTableOp)] = 5, [_(UACPI_AML_OP_LoadOp)] = 6,
[_(UACPI_AML_OP_StallOp)] = 7, [_(UACPI_AML_OP_SleepOp)] = 8,
[_(UACPI_AML_OP_AcquireOp)] = 9, [_(UACPI_AML_OP_SignalOp)] = 10,
[_(UACPI_AML_OP_WaitOp)] = 11, [_(UACPI_AML_OP_ResetOp)] = 12,
[_(UACPI_AML_OP_ReleaseOp)] = 13, [_(UACPI_AML_OP_FromBCDOp)] = 14,
[_(UACPI_AML_OP_ToBCDOp)] = 15, [_(UACPI_AML_OP_UnloadOp)] = 16,
[_(UACPI_AML_OP_RevisionOp)] = 17, [_(UACPI_AML_OP_DebugOp)] = 18,
[_(UACPI_AML_OP_FatalOp)] = 19, [_(UACPI_AML_OP_TimerOp)] = 20,
[_(UACPI_AML_OP_OpRegionOp)] = 21, [_(UACPI_AML_OP_FieldOp)] = 22,
[_(UACPI_AML_OP_DeviceOp)] = 23, [_(UACPI_AML_OP_ProcessorOp)] = 24,
[_(UACPI_AML_OP_PowerResOp)] = 25, [_(UACPI_AML_OP_ThermalZoneOp)] = 26,
[_(UACPI_AML_OP_IndexFieldOp)] = 27, [_(UACPI_AML_OP_BankFieldOp)] = 28,
[_(UACPI_AML_OP_DataRegionOp)] = 29,
};
const struct uacpi_op_spec *uacpi_get_op_spec(uacpi_aml_op op)
{
if (op > 0xFF)
return &ext_opcode_table[ext_op_to_idx[_(op)]];
return &opcode_table[op];
}
#define PARSE_FIELD_ELEMENTS(parse_loop_pc) \
/* Parse every field element found inside */ \
UACPI_PARSE_OP_IF_HAS_DATA, 44, \
/* Look at the first byte */ \
UACPI_PARSE_OP_LOAD_IMM, 1, \
\
/* ReservedField := 0x00 PkgLength */ \
UACPI_PARSE_OP_IF_LAST_EQUALS, 0x00, 3, \
UACPI_PARSE_OP_PKGLEN, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* AccessField := 0x01 AccessType AccessAttrib */ \
UACPI_PARSE_OP_IF_LAST_EQUALS, 0x01, 6, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* ConnectField := <0x02 NameString> | <0x02 BufferData> */ \
UACPI_PARSE_OP_IF_LAST_EQUALS, 0x02, 5, \
UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \
UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* ExtendedAccessField := 0x03 AccessType ExtendedAccessAttrib \
* AccessLength */ \
UACPI_PARSE_OP_IF_LAST_EQUALS, 0x03, 8, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_LOAD_IMM, 1, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
/* NamedField := NameSeg PkgLength */ \
\
/* \
* Discard the immediate, as it's the first byte of the \
* nameseg. We don't need it. \
*/ \
UACPI_PARSE_OP_ITEM_POP, \
UACPI_PARSE_OP_AML_PC_DECREMENT, \
UACPI_PARSE_OP_CREATE_NAMESTRING, \
UACPI_PARSE_OP_PKGLEN, \
UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_FIELD_UNIT, \
UACPI_PARSE_OP_JMP, parse_loop_pc, \
\
UACPI_PARSE_OP_INVOKE_HANDLER, \
UACPI_PARSE_OP_END
uacpi_u8 uacpi_field_op_decode_ops[] = {
UACPI_PARSE_OP_TRACKED_PKGLEN,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_LOAD_IMM, 1,
PARSE_FIELD_ELEMENTS(4),
};
uacpi_u8 uacpi_bank_field_op_decode_ops[] = {
UACPI_PARSE_OP_TRACKED_PKGLEN,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_OPERAND,
UACPI_PARSE_OP_LOAD_IMM, 1,
PARSE_FIELD_ELEMENTS(6),
};
uacpi_u8 uacpi_index_field_op_decode_ops[] = {
UACPI_PARSE_OP_TRACKED_PKGLEN,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_EXISTING_NAMESTRING,
UACPI_PARSE_OP_LOAD_IMM, 1,
PARSE_FIELD_ELEMENTS(5),
};
uacpi_u8 uacpi_load_op_decode_ops[] = {
// Storage for the scope pointer, this is left as 0 in case of errors
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD,
// Index of the table we are going to be loading to unref it later
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL,
UACPI_PARSE_OP_TARGET,
/*
* Invoke the handler here to initialize the table. If this fails, it's
* expected to keep the item 0 as NULL, which is checked below to return
* false to the caller of Load.
*/
UACPI_PARSE_OP_INVOKE_HANDLER,
UACPI_PARSE_OP_IF_NULL, 0, 3,
UACPI_PARSE_OP_LOAD_FALSE_OBJECT,
UACPI_PARSE_OP_JMP, 16,
UACPI_PARSE_OP_LOAD_TRUE_OBJECT,
UACPI_PARSE_OP_DISPATCH_TABLE_LOAD,
/*
* Invoke the handler a second time to initialize any AML GPE handlers that
* might've been loaded from this table.
*/
UACPI_PARSE_OP_INVOKE_HANDLER,
UACPI_PARSE_OP_STORE_TO_TARGET, 4,
UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV,
UACPI_PARSE_OP_END,
};
uacpi_u8 uacpi_load_table_op_decode_ops[] = {
// Storage for the scope pointer, this is left as 0 in case of errors
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD,
// Index of the table we are going to be loading to unref it later
UACPI_PARSE_OP_LOAD_ZERO_IMM,
// Storage for the target pointer, this is left as 0 if none was requested
UACPI_PARSE_OP_LOAD_ZERO_IMM,
UACPI_PARSE_OP_LOAD_INLINE_IMM, 1, 5,
UACPI_PARSE_OP_IF_NOT_NULL, 4, 5,
UACPI_PARSE_OP_STRING,
UACPI_PARSE_OP_IMM_DECREMENT, 4,
UACPI_PARSE_OP_JMP, 8,
UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL,
UACPI_PARSE_OP_INVOKE_HANDLER,
UACPI_PARSE_OP_LOAD_TRUE_OBJECT,
UACPI_PARSE_OP_DISPATCH_TABLE_LOAD,
/*
* Invoke the handler a second time to block the store to target in case
* the load above failed, as well as do any AML GPE handler initialization.
*/
UACPI_PARSE_OP_INVOKE_HANDLER,
// If we were given a target to store to, do the store
UACPI_PARSE_OP_IF_NOT_NULL, 3, 3,
UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, 3, 10,
UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV,
UACPI_PARSE_OP_END,
};
#define POP(x) UACPI_PARSE_OP_##x
static
const uacpi_char *const pop_names[UACPI_PARSE_OP_MAX + 1] = {
[POP(END)] = "<END-OF-OP>",
[POP(SKIP_WITH_WARN_IF_NULL)] = "SKIP_WITH_WARN_IF_NULL",
[POP(EMIT_SKIP_WARN)] = "EMIT_SKIP_WARN",
[POP(SIMPLE_NAME)] = "SIMPLE_NAME",
[POP(SUPERNAME)] = "SUPERNAME",
[POP(SUPERNAME_OR_UNRESOLVED)] = "SUPERNAME_OR_UNRESOLVED",
[POP(TERM_ARG)] = "TERM_ARG",
[POP(TERM_ARG_UNWRAP_INTERNAL)] = "TERM_ARG_UNWRAP_INTERNAL",
[POP(TERM_ARG_OR_NAMED_OBJECT)] = "TERM_ARG_OR_NAMED_OBJECT",
[POP(TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED)] = "TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED",
[POP(OPERAND)] = "OPERAND",
[POP(STRING)] = "STRING",
[POP(COMPUTATIONAL_DATA)] = "COMPUTATIONAL_DATA",
[POP(TARGET)] = "TARGET",
[POP(PKGLEN)] = "PKGLEN",
[POP(TRACKED_PKGLEN)] = "TRACKED_PKGLEN",
[POP(CREATE_NAMESTRING)] = "CREATE_NAMESTRING",
[POP(CREATE_NAMESTRING_OR_NULL_IF_LOAD)] = "CREATE_NAMESTRING_OR_NULL_IF_LOAD",
[POP(EXISTING_NAMESTRING)] = "EXISTING_NAMESTRING",
[POP(EXISTING_NAMESTRING_OR_NULL)] = "EXISTING_NAMESTRING_OR_NULL",
[POP(EXISTING_NAMESTRING_OR_NULL_IF_LOAD)] = "EXISTING_NAMESTRING_OR_NULL_IF_LOAD",
[POP(INVOKE_HANDLER)] = "INVOKE_HANDLER",
[POP(OBJECT_ALLOC)] = "OBJECT_ALLOC",
[POP(EMPTY_OBJECT_ALLOC)] = "EMPTY_OBJECT_ALLOC",
[POP(OBJECT_CONVERT_TO_SHALLOW_COPY)] = "OBJECT_CONVERT_TO_SHALLOW_COPY",
[POP(OBJECT_CONVERT_TO_DEEP_COPY)] = "OBJECT_CONVERT_TO_DEEP_COPY",
[POP(OBJECT_ALLOC_TYPED)] = "OBJECT_ALLOC_TYPED",
[POP(RECORD_AML_PC)] = "RECORD_AML_PC",
[POP(LOAD_INLINE_IMM_AS_OBJECT)] = "LOAD_INLINE_IMM_AS_OBJECT",
[POP(LOAD_INLINE_IMM)] = "LOAD_INLINE_IMM",
[POP(LOAD_ZERO_IMM)] = "LOAD_ZERO_IMM",
[POP(LOAD_IMM)] = "LOAD_IMM",
[POP(LOAD_IMM_AS_OBJECT)] = "LOAD_IMM_AS_OBJECT",
[POP(LOAD_FALSE_OBJECT)] = "LOAD_FALSE_OBJECT",
[POP(LOAD_TRUE_OBJECT)] = "LOAD_TRUE_OBJECT",
[POP(TRUNCATE_NUMBER)] = "TRUNCATE_NUMBER",
[POP(TYPECHECK)] = "TYPECHECK",
[POP(INSTALL_NAMESPACE_NODE)] = "INSTALL_NAMESPACE_NODE",
[POP(OBJECT_TRANSFER_TO_PREV)] = "OBJECT_TRANSFER_TO_PREV",
[POP(OBJECT_COPY_TO_PREV)] = "OBJECT_COPY_TO_PREV",
[POP(STORE_TO_TARGET)] = "STORE_TO_TARGET",
[POP(STORE_TO_TARGET_INDIRECT)] = "STORE_TO_TARGET_INDIRECT",
[POP(UNREACHABLE)] = "UNREACHABLE",
[POP(BAD_OPCODE)] = "BAD_OPCODE",
[POP(AML_PC_DECREMENT)] = "AML_PC_DECREMENT",
[POP(IMM_DECREMENT)] = "IMM_DECREMENT",
[POP(ITEM_POP)] = "ITEM_POP",
[POP(DISPATCH_METHOD_CALL)] = "DISPATCH_METHOD_CALL",
[POP(DISPATCH_TABLE_LOAD)] = "DISPATCH_TABLE_LOAD",
[POP(CONVERT_NAMESTRING)] = "CONVERT_NAMESTRING",
[POP(IF_HAS_DATA)] = "IF_HAS_DATA",
[POP(IF_NULL)] = "IF_NULL",
[POP(IF_LAST_NULL)] = "IF_LAST_NULL",
[POP(IF_NOT_NULL)] = "IF_NOT_NULL",
[POP(IF_LAST_NOT_NULL)] = "IF_NOT_NULL",
[POP(IF_LAST_EQUALS)] = "IF_LAST_EQUALS",
[POP(IF_LAST_FALSE)] = "IF_LAST_FALSE",
[POP(IF_LAST_TRUE)] = "IF_LAST_TRUE",
[POP(SWITCH_TO_NEXT_IF_EQUALS)] = "SWITCH_TO_NEXT_IF_EQUALS",
[POP(IF_SWITCHED_FROM)] = "IF_SWITCHED_FROM",
[POP(JMP)] = "JMP",
};
const uacpi_char *uacpi_parse_op_to_string(enum uacpi_parse_op op)
{
if (uacpi_unlikely(op > UACPI_PARSE_OP_MAX))
return "<INVALID-OP>";
return pop_names[op];
}
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,388 @@
#include <uacpi/platform/atomic.h>
#include <uacpi/internal/osi.h>
#include <uacpi/internal/helpers.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/mutex.h>
#include <uacpi/kernel_api.h>
#ifndef UACPI_BAREBONES_MODE
struct registered_interface {
const uacpi_char *name;
uacpi_u8 weight;
uacpi_u8 kind;
// Only applicable for predefined host interfaces
uacpi_u8 host_type;
// Only applicable for predefined interfaces
uacpi_u8 disabled : 1;
uacpi_u8 dynamic : 1;
struct registered_interface *next;
};
static uacpi_handle interface_mutex;
static struct registered_interface *registered_interfaces;
static uacpi_interface_handler interface_handler;
static uacpi_u32 latest_queried_interface;
#define WINDOWS(string, interface) \
{ \
.name = "Windows "string, \
.weight = UACPI_VENDOR_INTERFACE_WINDOWS_##interface, \
.kind = UACPI_INTERFACE_KIND_VENDOR, \
.host_type = 0, \
.disabled = 0, \
.dynamic = 0, \
.next = UACPI_NULL \
}
#define HOST_FEATURE(string, type) \
{ \
.name = string, \
.weight = 0, \
.kind = UACPI_INTERFACE_KIND_FEATURE, \
.host_type = UACPI_HOST_INTERFACE_##type, \
.disabled = 1, \
.dynamic = 0, \
.next = UACPI_NULL, \
}
static struct registered_interface predefined_interfaces[] = {
// Vendor strings
WINDOWS("2000", 2000),
WINDOWS("2001", XP),
WINDOWS("2001 SP1", XP_SP1),
WINDOWS("2001.1", SERVER_2003),
WINDOWS("2001 SP2", XP_SP2),
WINDOWS("2001.1 SP1", SERVER_2003_SP1),
WINDOWS("2006", VISTA),
WINDOWS("2006.1", SERVER_2008),
WINDOWS("2006 SP1", VISTA_SP1),
WINDOWS("2006 SP2", VISTA_SP2),
WINDOWS("2009", 7),
WINDOWS("2012", 8),
WINDOWS("2013", 8_1),
WINDOWS("2015", 10),
WINDOWS("2016", 10_RS1),
WINDOWS("2017", 10_RS2),
WINDOWS("2017.2", 10_RS3),
WINDOWS("2018", 10_RS4),
WINDOWS("2018.2", 10_RS5),
WINDOWS("2019", 10_19H1),
WINDOWS("2020", 10_20H1),
WINDOWS("2021", 11),
WINDOWS("2022", 11_22H2),
// Feature strings
HOST_FEATURE("Module Device", MODULE_DEVICE),
HOST_FEATURE("Processor Device", PROCESSOR_DEVICE),
HOST_FEATURE("3.0 Thermal Model", 3_0_THERMAL_MODEL),
HOST_FEATURE("3.0 _SCP Extensions", 3_0_SCP_EXTENSIONS),
HOST_FEATURE("Processor Aggregator Device", PROCESSOR_AGGREGATOR_DEVICE),
// Interpreter features
{ .name = "Extended Address Space Descriptor" },
};
uacpi_status uacpi_initialize_interfaces(void)
{
uacpi_size i;
registered_interfaces = &predefined_interfaces[0];
interface_mutex = uacpi_kernel_create_mutex();
if (uacpi_unlikely(interface_mutex == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
for (i = 0; i < (UACPI_ARRAY_SIZE(predefined_interfaces) - 1); ++i)
predefined_interfaces[i].next = &predefined_interfaces[i + 1];
return UACPI_STATUS_OK;
}
void uacpi_deinitialize_interfaces(void)
{
struct registered_interface *iface, *next_iface = registered_interfaces;
while (next_iface) {
iface = next_iface;
next_iface = iface->next;
iface->next = UACPI_NULL;
if (iface->dynamic) {
uacpi_free_dynamic_string(iface->name);
uacpi_free(iface, sizeof(*iface));
continue;
}
// Only features are disabled by default
iface->disabled = iface->kind == UACPI_INTERFACE_KIND_FEATURE ?
UACPI_TRUE : UACPI_FALSE;
}
if (interface_mutex)
uacpi_kernel_free_mutex(interface_mutex);
interface_mutex = UACPI_NULL;
interface_handler = UACPI_NULL;
latest_queried_interface = 0;
registered_interfaces = UACPI_NULL;
}
uacpi_vendor_interface uacpi_latest_queried_vendor_interface(void)
{
return uacpi_atomic_load32(&latest_queried_interface);
}
static struct registered_interface *find_interface_unlocked(
const uacpi_char *name
)
{
struct registered_interface *interface = registered_interfaces;
while (interface) {
if (uacpi_strcmp(interface->name, name) == 0)
return interface;
interface = interface->next;
}
return UACPI_NULL;
}
static struct registered_interface *find_host_interface_unlocked(
uacpi_host_interface type
)
{
struct registered_interface *interface = registered_interfaces;
while (interface) {
if (interface->host_type == type)
return interface;
interface = interface->next;
}
return UACPI_NULL;
}
uacpi_status uacpi_install_interface(
const uacpi_char *name, uacpi_interface_kind kind
)
{
struct registered_interface *interface;
uacpi_status ret;
uacpi_char *name_copy;
uacpi_size name_size;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = find_interface_unlocked(name);
if (interface != UACPI_NULL) {
if (interface->disabled)
interface->disabled = UACPI_FALSE;
ret = UACPI_STATUS_ALREADY_EXISTS;
goto out;
}
interface = uacpi_kernel_alloc(sizeof(*interface));
if (uacpi_unlikely(interface == UACPI_NULL)) {
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out;
}
name_size = uacpi_strlen(name) + 1;
name_copy = uacpi_kernel_alloc(name_size);
if (uacpi_unlikely(name_copy == UACPI_NULL)) {
uacpi_free(interface, sizeof(*interface));
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out;
}
uacpi_memcpy(name_copy, name, name_size);
interface->name = name_copy;
interface->weight = 0;
interface->kind = kind;
interface->host_type = 0;
interface->disabled = 0;
interface->dynamic = 1;
interface->next = registered_interfaces;
registered_interfaces = interface;
out:
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_uninstall_interface(const uacpi_char *name)
{
struct registered_interface *cur, *prev;
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
cur = registered_interfaces;
prev = cur;
ret = UACPI_STATUS_NOT_FOUND;
while (cur) {
if (uacpi_strcmp(cur->name, name) != 0) {
prev = cur;
cur = cur->next;
continue;
}
if (cur->dynamic) {
if (prev == cur) {
registered_interfaces = cur->next;
} else {
prev->next = cur->next;
}
uacpi_release_native_mutex(interface_mutex);
uacpi_free_dynamic_string(cur->name);
uacpi_free(cur, sizeof(*cur));
return UACPI_STATUS_OK;
}
/*
* If this interface was already disabled, pretend we didn't actually
* find it and keep ret as UACPI_STATUS_NOT_FOUND. The fact that it's
* still in the registered list is an implementation detail of
* predefined interfaces.
*/
if (!cur->disabled) {
cur->disabled = UACPI_TRUE;
ret = UACPI_STATUS_OK;
}
break;
}
uacpi_release_native_mutex(interface_mutex);
return ret;
}
static uacpi_status configure_host_interface(
uacpi_host_interface type, uacpi_bool enabled
)
{
struct registered_interface *interface;
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = find_host_interface_unlocked(type);
if (interface == UACPI_NULL) {
ret = UACPI_STATUS_NOT_FOUND;
goto out;
}
interface->disabled = !enabled;
out:
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_enable_host_interface(uacpi_host_interface type)
{
return configure_host_interface(type, UACPI_TRUE);
}
uacpi_status uacpi_disable_host_interface(uacpi_host_interface type)
{
return configure_host_interface(type, UACPI_FALSE);
}
uacpi_status uacpi_set_interface_query_handler(
uacpi_interface_handler handler
)
{
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
if (interface_handler != UACPI_NULL && handler != UACPI_NULL) {
ret = UACPI_STATUS_ALREADY_EXISTS;
goto out;
}
interface_handler = handler;
out:
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_bulk_configure_interfaces(
uacpi_interface_action action, uacpi_interface_kind kind
)
{
uacpi_status ret;
struct registered_interface *interface;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = registered_interfaces;
while (interface) {
if (kind & interface->kind)
interface->disabled = (action == UACPI_INTERFACE_ACTION_DISABLE);
interface = interface->next;
}
uacpi_release_native_mutex(interface_mutex);
return ret;
}
uacpi_status uacpi_handle_osi(const uacpi_char *string, uacpi_bool *out_value)
{
uacpi_status ret;
struct registered_interface *interface;
uacpi_bool is_supported = UACPI_FALSE;
ret = uacpi_acquire_native_mutex(interface_mutex);
if (uacpi_unlikely_error(ret))
return ret;
interface = find_interface_unlocked(string);
if (interface == UACPI_NULL)
goto out;
if (interface->weight > latest_queried_interface)
uacpi_atomic_store32(&latest_queried_interface, interface->weight);
is_supported = !interface->disabled;
if (interface_handler)
is_supported = interface_handler(string, is_supported);
out:
uacpi_release_native_mutex(interface_mutex);
*out_value = is_supported;
return UACPI_STATUS_OK;
}
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,572 @@
#include <uacpi/internal/registers.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/context.h>
#include <uacpi/internal/io.h>
#include <uacpi/internal/log.h>
#include <uacpi/platform/atomic.h>
#include <uacpi/acpi.h>
#ifndef UACPI_BAREBONES_MODE
static uacpi_handle g_reg_lock;
enum register_kind {
REGISTER_KIND_GAS,
REGISTER_KIND_IO,
};
enum register_access_kind {
REGISTER_ACCESS_KIND_PRESERVE,
REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
REGISTER_ACCESS_KIND_NORMAL,
};
struct register_spec {
uacpi_u8 kind;
uacpi_u8 access_kind;
uacpi_u8 access_width; // only REGISTER_KIND_IO
void *accessors[2];
uacpi_u64 write_only_mask;
uacpi_u64 preserve_mask;
};
static const struct register_spec g_registers[UACPI_REGISTER_MAX + 1] = {
[UACPI_REGISTER_PM1_STS] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
.accessors = {
&g_uacpi_rt_ctx.pm1a_status_blk,
&g_uacpi_rt_ctx.pm1b_status_blk,
},
.preserve_mask = ACPI_PM1_STS_IGN0_MASK,
},
[UACPI_REGISTER_PM1_EN] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessors = {
&g_uacpi_rt_ctx.pm1a_enable_blk,
&g_uacpi_rt_ctx.pm1b_enable_blk,
},
},
[UACPI_REGISTER_PM1_CNT] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessors = {
&g_uacpi_rt_ctx.fadt.x_pm1a_cnt_blk,
&g_uacpi_rt_ctx.fadt.x_pm1b_cnt_blk,
},
.write_only_mask = ACPI_PM1_CNT_SLP_EN_MASK |
ACPI_PM1_CNT_GBL_RLS_MASK,
.preserve_mask = ACPI_PM1_CNT_PRESERVE_MASK,
},
[UACPI_REGISTER_PM_TMR] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessors = { &g_uacpi_rt_ctx.fadt.x_pm_tmr_blk, },
},
[UACPI_REGISTER_PM2_CNT] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessors = { &g_uacpi_rt_ctx.fadt.x_pm2_cnt_blk, },
.preserve_mask = ACPI_PM2_CNT_PRESERVE_MASK,
},
[UACPI_REGISTER_SLP_CNT] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_PRESERVE,
.accessors = { &g_uacpi_rt_ctx.fadt.sleep_control_reg, },
.write_only_mask = ACPI_SLP_CNT_SLP_EN_MASK,
.preserve_mask = ACPI_SLP_CNT_PRESERVE_MASK,
},
[UACPI_REGISTER_SLP_STS] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
.accessors = { &g_uacpi_rt_ctx.fadt.sleep_status_reg, },
.preserve_mask = ACPI_SLP_STS_PRESERVE_MASK,
},
[UACPI_REGISTER_RESET] = {
.kind = REGISTER_KIND_GAS,
.access_kind = REGISTER_ACCESS_KIND_NORMAL,
.accessors = { &g_uacpi_rt_ctx.fadt.reset_reg, },
},
[UACPI_REGISTER_SMI_CMD] = {
.kind = REGISTER_KIND_IO,
.access_kind = REGISTER_ACCESS_KIND_NORMAL,
.access_width = 1,
.accessors = { &g_uacpi_rt_ctx.fadt.smi_cmd, },
},
};
enum register_mapping_state {
REGISTER_MAPPING_STATE_NONE = 0,
REGISTER_MAPPING_STATE_NOT_NEEDED,
REGISTER_MAPPING_STATE_MAPPED,
};
struct register_mapping {
uacpi_mapped_gas mappings[2];
uacpi_u8 states[2];
};
static struct register_mapping g_register_mappings[UACPI_REGISTER_MAX + 1];
static uacpi_status map_one(
const struct register_spec *spec, struct register_mapping *mapping,
uacpi_u8 idx
)
{
uacpi_status ret = UACPI_STATUS_OK;
if (mapping->states[idx] != REGISTER_MAPPING_STATE_NONE)
return ret;
if (spec->kind == REGISTER_KIND_GAS) {
struct acpi_gas *gas = spec->accessors[idx];
if (gas == UACPI_NULL || gas->address == 0) {
mapping->states[idx] = REGISTER_MAPPING_STATE_NOT_NEEDED;
return ret;
}
ret = uacpi_map_gas_noalloc(gas, &mapping->mappings[idx]);
} else {
struct acpi_gas temp_gas = { 0 };
if (idx != 0) {
mapping->states[idx] = REGISTER_MAPPING_STATE_NOT_NEEDED;
return ret;
}
temp_gas.address_space_id = UACPI_ADDRESS_SPACE_SYSTEM_IO;
temp_gas.address = *(uacpi_u32*)spec->accessors[0];
temp_gas.register_bit_width = spec->access_width * 8;
ret = uacpi_map_gas_noalloc(&temp_gas, &mapping->mappings[idx]);
}
if (uacpi_likely_success(ret))
mapping->states[idx] = REGISTER_MAPPING_STATE_MAPPED;
return ret;
}
static uacpi_status ensure_register_mapped(
const struct register_spec *spec, struct register_mapping *mapping
)
{
uacpi_status ret;
uacpi_bool needs_mapping = UACPI_FALSE;
uacpi_u8 state;
uacpi_cpu_flags flags;
state = uacpi_atomic_load8(&mapping->states[0]);
needs_mapping |= state == REGISTER_MAPPING_STATE_NONE;
state = uacpi_atomic_load8(&mapping->states[1]);
needs_mapping |= state == REGISTER_MAPPING_STATE_NONE;
if (!needs_mapping)
return UACPI_STATUS_OK;
flags = uacpi_kernel_lock_spinlock(g_reg_lock);
ret = map_one(spec, mapping, 0);
if (uacpi_unlikely_error(ret))
goto out;
ret = map_one(spec, mapping, 1);
out:
uacpi_kernel_unlock_spinlock(g_reg_lock, flags);
return ret;
}
static uacpi_status get_reg(
uacpi_u8 idx, const struct register_spec **out_spec,
struct register_mapping **out_mapping
)
{
if (idx > UACPI_REGISTER_MAX)
return UACPI_STATUS_INVALID_ARGUMENT;
*out_spec = &g_registers[idx];
*out_mapping = &g_register_mappings[idx];
return UACPI_STATUS_OK;
}
static uacpi_status do_read_one(
struct register_mapping *mapping, uacpi_u8 idx, uacpi_u64 *out_value
)
{
if (mapping->states[idx] != REGISTER_MAPPING_STATE_MAPPED)
return UACPI_STATUS_OK;
return uacpi_gas_read_mapped(&mapping->mappings[idx], out_value);
}
static uacpi_status do_read_register(
const struct register_spec *reg, struct register_mapping *mapping,
uacpi_u64 *out_value
)
{
uacpi_status ret;
uacpi_u64 value0 = 0, value1 = 0;
ret = do_read_one(mapping, 0, &value0);
if (uacpi_unlikely_error(ret))
return ret;
ret = do_read_one(mapping, 1, &value1);
if (uacpi_unlikely_error(ret))
return ret;
*out_value = value0 | value1;
if (reg->write_only_mask)
*out_value &= ~reg->write_only_mask;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_read_register(
enum uacpi_register reg_enum, uacpi_u64 *out_value
)
{
uacpi_status ret;
const struct register_spec *reg;
struct register_mapping *mapping;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = get_reg(reg_enum, &reg, &mapping);
if (uacpi_unlikely_error(ret))
return ret;
ret = ensure_register_mapped(reg, mapping);
if (uacpi_unlikely_error(ret))
return ret;
return do_read_register(reg, mapping, out_value);
}
static uacpi_status do_write_one(
struct register_mapping *mapping, uacpi_u8 idx, uacpi_u64 in_value
)
{
if (mapping->states[idx] != REGISTER_MAPPING_STATE_MAPPED)
return UACPI_STATUS_OK;
return uacpi_gas_write_mapped(&mapping->mappings[idx], in_value);
}
static uacpi_status do_write_register(
const struct register_spec *reg, struct register_mapping *mapping,
uacpi_u64 in_value
)
{
uacpi_status ret;
if (reg->preserve_mask) {
in_value &= ~reg->preserve_mask;
if (reg->access_kind == REGISTER_ACCESS_KIND_PRESERVE) {
uacpi_u64 data;
ret = do_read_register(reg, mapping, &data);
if (uacpi_unlikely_error(ret))
return ret;
in_value |= data & reg->preserve_mask;
}
}
ret = do_write_one(mapping, 0, in_value);
if (uacpi_unlikely_error(ret))
return ret;
return do_write_one(mapping, 1, in_value);
}
uacpi_status uacpi_write_register(
enum uacpi_register reg_enum, uacpi_u64 in_value
)
{
uacpi_status ret;
const struct register_spec *reg;
struct register_mapping *mapping;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = get_reg(reg_enum, &reg, &mapping);
if (uacpi_unlikely_error(ret))
return ret;
ret = ensure_register_mapped(reg, mapping);
if (uacpi_unlikely_error(ret))
return ret;
return do_write_register(reg, mapping, in_value);
}
uacpi_status uacpi_write_registers(
enum uacpi_register reg_enum, uacpi_u64 in_value0, uacpi_u64 in_value1
)
{
uacpi_status ret;
const struct register_spec *reg;
struct register_mapping *mapping;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
ret = get_reg(reg_enum, &reg, &mapping);
if (uacpi_unlikely_error(ret))
return ret;
ret = ensure_register_mapped(reg, mapping);
if (uacpi_unlikely_error(ret))
return ret;
ret = do_write_one(mapping, 0, in_value0);
if (uacpi_unlikely_error(ret))
return ret;
return do_write_one(mapping, 1, in_value1);
}
struct register_field {
uacpi_u8 reg;
uacpi_u8 offset;
uacpi_u16 mask;
};
static const struct register_field g_fields[UACPI_REGISTER_FIELD_MAX + 1] = {
[UACPI_REGISTER_FIELD_TMR_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_TMR_STS_IDX,
.mask = ACPI_PM1_STS_TMR_STS_MASK,
},
[UACPI_REGISTER_FIELD_BM_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_BM_STS_IDX,
.mask = ACPI_PM1_STS_BM_STS_MASK,
},
[UACPI_REGISTER_FIELD_GBL_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_GBL_STS_IDX,
.mask = ACPI_PM1_STS_GBL_STS_MASK,
},
[UACPI_REGISTER_FIELD_PWRBTN_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_PWRBTN_STS_IDX,
.mask = ACPI_PM1_STS_PWRBTN_STS_MASK,
},
[UACPI_REGISTER_FIELD_SLPBTN_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_SLPBTN_STS_IDX,
.mask = ACPI_PM1_STS_SLPBTN_STS_MASK,
},
[UACPI_REGISTER_FIELD_RTC_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_RTC_STS_IDX,
.mask = ACPI_PM1_STS_RTC_STS_MASK,
},
[UACPI_REGISTER_FIELD_HWR_WAK_STS] = {
.reg = UACPI_REGISTER_SLP_STS,
.offset = ACPI_SLP_STS_WAK_STS_IDX,
.mask = ACPI_SLP_STS_WAK_STS_MASK,
},
[UACPI_REGISTER_FIELD_WAK_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_WAKE_STS_IDX,
.mask = ACPI_PM1_STS_WAKE_STS_MASK,
},
[UACPI_REGISTER_FIELD_PCIEX_WAKE_STS] = {
.reg = UACPI_REGISTER_PM1_STS,
.offset = ACPI_PM1_STS_PCIEXP_WAKE_STS_IDX,
.mask = ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK,
},
[UACPI_REGISTER_FIELD_TMR_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_TMR_EN_IDX,
.mask = ACPI_PM1_EN_TMR_EN_MASK,
},
[UACPI_REGISTER_FIELD_GBL_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_GBL_EN_IDX,
.mask = ACPI_PM1_EN_GBL_EN_MASK,
},
[UACPI_REGISTER_FIELD_PWRBTN_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_PWRBTN_EN_IDX,
.mask = ACPI_PM1_EN_PWRBTN_EN_MASK,
},
[UACPI_REGISTER_FIELD_SLPBTN_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_SLPBTN_EN_IDX,
.mask = ACPI_PM1_EN_SLPBTN_EN_MASK,
},
[UACPI_REGISTER_FIELD_RTC_EN] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_RTC_EN_IDX,
.mask = ACPI_PM1_EN_RTC_EN_MASK,
},
[UACPI_REGISTER_FIELD_PCIEXP_WAKE_DIS] = {
.reg = UACPI_REGISTER_PM1_EN,
.offset = ACPI_PM1_EN_PCIEXP_WAKE_DIS_IDX,
.mask = ACPI_PM1_EN_PCIEXP_WAKE_DIS_MASK,
},
[UACPI_REGISTER_FIELD_SCI_EN] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_SCI_EN_IDX,
.mask = ACPI_PM1_CNT_SCI_EN_MASK,
},
[UACPI_REGISTER_FIELD_BM_RLD] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_BM_RLD_IDX,
.mask = ACPI_PM1_CNT_BM_RLD_MASK,
},
[UACPI_REGISTER_FIELD_GBL_RLS] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_GBL_RLS_IDX,
.mask = ACPI_PM1_CNT_GBL_RLS_MASK,
},
[UACPI_REGISTER_FIELD_SLP_TYP] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_SLP_TYP_IDX,
.mask = ACPI_PM1_CNT_SLP_TYP_MASK,
},
[UACPI_REGISTER_FIELD_SLP_EN] = {
.reg = UACPI_REGISTER_PM1_CNT,
.offset = ACPI_PM1_CNT_SLP_EN_IDX,
.mask = ACPI_PM1_CNT_SLP_EN_MASK,
},
[UACPI_REGISTER_FIELD_HWR_SLP_TYP] = {
.reg = UACPI_REGISTER_SLP_CNT,
.offset = ACPI_SLP_CNT_SLP_TYP_IDX,
.mask = ACPI_SLP_CNT_SLP_TYP_MASK,
},
[UACPI_REGISTER_FIELD_HWR_SLP_EN] = {
.reg = UACPI_REGISTER_SLP_CNT,
.offset = ACPI_SLP_CNT_SLP_EN_IDX,
.mask = ACPI_SLP_CNT_SLP_EN_MASK,
},
[UACPI_REGISTER_FIELD_ARB_DIS] = {
.reg = UACPI_REGISTER_PM2_CNT,
.offset = ACPI_PM2_CNT_ARB_DIS_IDX,
.mask = ACPI_PM2_CNT_ARB_DIS_MASK,
},
};
uacpi_status uacpi_initialize_registers(void)
{
g_reg_lock = uacpi_kernel_create_spinlock();
if (uacpi_unlikely(g_reg_lock == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
return UACPI_STATUS_OK;
}
void uacpi_deinitialize_registers(void)
{
uacpi_u8 i;
struct register_mapping *mapping;
if (g_reg_lock != UACPI_NULL) {
uacpi_kernel_free_spinlock(g_reg_lock);
g_reg_lock = UACPI_NULL;
}
for (i = 0; i <= UACPI_REGISTER_MAX; ++i) {
mapping = &g_register_mappings[i];
if (mapping->states[0] == REGISTER_MAPPING_STATE_MAPPED)
uacpi_unmap_gas_nofree(&mapping->mappings[0]);
if (mapping->states[1] == REGISTER_MAPPING_STATE_MAPPED)
uacpi_unmap_gas_nofree(&mapping->mappings[1]);
}
uacpi_memzero(&g_register_mappings, sizeof(g_register_mappings));
}
uacpi_status uacpi_read_register_field(
enum uacpi_register_field field_enum, uacpi_u64 *out_value
)
{
uacpi_status ret;
uacpi_u8 field_idx = field_enum;
const struct register_field *field;
const struct register_spec *reg;
struct register_mapping *mapping;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
field = &g_fields[field_idx];
reg = &g_registers[field->reg];
mapping = &g_register_mappings[field->reg];
ret = ensure_register_mapped(reg, mapping);
if (uacpi_unlikely_error(ret))
return ret;
ret = do_read_register(reg, mapping, out_value);
if (uacpi_unlikely_error(ret))
return ret;
*out_value = (*out_value & field->mask) >> field->offset;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_write_register_field(
enum uacpi_register_field field_enum, uacpi_u64 in_value
)
{
uacpi_status ret;
uacpi_u8 field_idx = field_enum;
const struct register_field *field;
const struct register_spec *reg;
struct register_mapping *mapping;
uacpi_u64 data;
uacpi_cpu_flags flags;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
field = &g_fields[field_idx];
reg = &g_registers[field->reg];
mapping = &g_register_mappings[field->reg];
ret = ensure_register_mapped(reg, mapping);
if (uacpi_unlikely_error(ret))
return ret;
in_value = (in_value << field->offset) & field->mask;
flags = uacpi_kernel_lock_spinlock(g_reg_lock);
if (reg->kind == REGISTER_ACCESS_KIND_WRITE_TO_CLEAR) {
if (in_value == 0) {
ret = UACPI_STATUS_OK;
goto out;
}
ret = do_write_register(reg, mapping, in_value);
goto out;
}
ret = do_read_register(reg, mapping, &data);
if (uacpi_unlikely_error(ret))
goto out;
data &= ~field->mask;
data |= in_value;
ret = do_write_register(reg, mapping, data);
out:
uacpi_kernel_unlock_spinlock(g_reg_lock, flags);
return ret;
}
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,71 @@
#include <uacpi/internal/shareable.h>
#include <uacpi/internal/stdlib.h>
#include <uacpi/platform/atomic.h>
#ifndef UACPI_BAREBONES_MODE
#define BUGGED_REFCOUNT 0xFFFFFFFF
void uacpi_shareable_init(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
shareable->reference_count = 1;
}
uacpi_bool uacpi_bugged_shareable(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
if (uacpi_unlikely(shareable->reference_count == 0))
uacpi_make_shareable_bugged(shareable);
return uacpi_atomic_load32(&shareable->reference_count) == BUGGED_REFCOUNT;
}
void uacpi_make_shareable_bugged(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
uacpi_atomic_store32(&shareable->reference_count, BUGGED_REFCOUNT);
}
uacpi_u32 uacpi_shareable_ref(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
if (uacpi_unlikely(uacpi_bugged_shareable(shareable)))
return BUGGED_REFCOUNT;
return uacpi_atomic_inc32(&shareable->reference_count) - 1;
}
uacpi_u32 uacpi_shareable_unref(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
if (uacpi_unlikely(uacpi_bugged_shareable(shareable)))
return BUGGED_REFCOUNT;
return uacpi_atomic_dec32(&shareable->reference_count) + 1;
}
void uacpi_shareable_unref_and_delete_if_last(
uacpi_handle handle, void (*do_free)(uacpi_handle)
)
{
if (handle == UACPI_NULL)
return;
if (uacpi_unlikely(uacpi_bugged_shareable(handle)))
return;
if (uacpi_shareable_unref(handle) == 1)
do_free(handle);
}
uacpi_u32 uacpi_shareable_refcount(uacpi_handle handle)
{
struct uacpi_shareable *shareable = handle;
return uacpi_atomic_load32(&shareable->reference_count);
}
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,616 @@
#include <uacpi/sleep.h>
#include <uacpi/internal/context.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/io.h>
#include <uacpi/internal/registers.h>
#include <uacpi/internal/event.h>
#include <uacpi/platform/arch_helpers.h>
#ifndef UACPI_BAREBONES_MODE
#ifndef UACPI_REDUCED_HARDWARE
#define CALL_SLEEP_FN(name, state) \
(uacpi_is_hardware_reduced() ? \
name##_hw_reduced(state) : name##_hw_full(state))
#else
#define CALL_SLEEP_FN(name, state) name##_hw_reduced(state);
#endif
static uacpi_status eval_wak(uacpi_u8 state);
static uacpi_status eval_sst(uacpi_u8 value);
#ifndef UACPI_REDUCED_HARDWARE
uacpi_status uacpi_set_waking_vector(
uacpi_phys_addr addr32, uacpi_phys_addr addr64
)
{
struct acpi_facs *facs = g_uacpi_rt_ctx.facs;
if (facs == UACPI_NULL)
return UACPI_STATUS_OK;
facs->firmware_waking_vector = addr32;
// The 64-bit wake vector doesn't exist, we're done
if (facs->length < 32)
return UACPI_STATUS_OK;
// Only allow 64-bit wake vector on 1.0 and above FACS
if (facs->version >= 1)
facs->x_firmware_waking_vector = addr64;
else
facs->x_firmware_waking_vector = 0;
return UACPI_STATUS_OK;
}
static uacpi_status enter_sleep_state_hw_full(uacpi_u8 state)
{
uacpi_status ret;
uacpi_u64 wake_status, pm1a, pm1b;
ret = uacpi_write_register_field(
UACPI_REGISTER_FIELD_WAK_STS, ACPI_PM1_STS_CLEAR
);
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_disable_all_gpes();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_clear_all_events();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_enable_all_wake_gpes();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_read_register(UACPI_REGISTER_PM1_CNT, &pm1a);
if (uacpi_unlikely_error(ret))
return ret;
pm1a &= ~((uacpi_u64)(ACPI_PM1_CNT_SLP_TYP_MASK | ACPI_PM1_CNT_SLP_EN_MASK));
pm1b = pm1a;
pm1a |= g_uacpi_rt_ctx.last_sleep_typ_a << ACPI_PM1_CNT_SLP_TYP_IDX;
pm1b |= g_uacpi_rt_ctx.last_sleep_typ_b << ACPI_PM1_CNT_SLP_TYP_IDX;
/*
* Just like ACPICA, split writing SLP_TYP and SLP_EN to work around
* buggy firmware that can't handle both written at the same time.
*/
ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
if (uacpi_unlikely_error(ret))
return ret;
pm1a |= ACPI_PM1_CNT_SLP_EN_MASK;
pm1b |= ACPI_PM1_CNT_SLP_EN_MASK;
if (state < UACPI_SLEEP_STATE_S4)
UACPI_ARCH_FLUSH_CPU_CACHE();
ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
if (uacpi_unlikely_error(ret))
return ret;
if (state > UACPI_SLEEP_STATE_S3) {
/*
* We're still here, this is a bug or very slow firmware.
* Just try spinning for a bit.
*/
uacpi_u64 stalled_time = 0;
// 10 seconds max
while (stalled_time < (10 * 1000 * 1000)) {
uacpi_kernel_stall(100);
stalled_time += 100;
}
// Try one more time
ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
if (uacpi_unlikely_error(ret))
return ret;
// Nothing we can do here, give up
return UACPI_STATUS_HARDWARE_TIMEOUT;
}
do {
ret = uacpi_read_register_field(
UACPI_REGISTER_FIELD_WAK_STS, &wake_status
);
if (uacpi_unlikely_error(ret))
return ret;
} while (wake_status != 1);
return UACPI_STATUS_OK;
}
static uacpi_status prepare_for_wake_from_sleep_state_hw_full(uacpi_u8 state)
{
uacpi_status ret;
uacpi_u64 pm1a, pm1b;
UACPI_UNUSED(state);
/*
* Some hardware apparently relies on S0 values being written to the PM1
* control register on wake, so do this here.
*/
if (g_uacpi_rt_ctx.s0_sleep_typ_a == UACPI_SLEEP_TYP_INVALID)
goto out;
ret = uacpi_read_register(UACPI_REGISTER_PM1_CNT, &pm1a);
if (uacpi_unlikely_error(ret))
goto out;
pm1a &= ~((uacpi_u64)(ACPI_PM1_CNT_SLP_TYP_MASK | ACPI_PM1_CNT_SLP_EN_MASK));
pm1b = pm1a;
pm1a |= g_uacpi_rt_ctx.s0_sleep_typ_a << ACPI_PM1_CNT_SLP_TYP_IDX;
pm1b |= g_uacpi_rt_ctx.s0_sleep_typ_b << ACPI_PM1_CNT_SLP_TYP_IDX;
uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
out:
// Errors ignored intentionally, we don't want to abort because of this
return UACPI_STATUS_OK;
}
static uacpi_status wake_from_sleep_state_hw_full(uacpi_u8 state)
{
uacpi_status ret;
g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
// Set the status to 2 (waking) while we execute the wake method.
eval_sst(2);
ret = uacpi_disable_all_gpes();
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_enable_all_runtime_gpes();
if (uacpi_unlikely_error(ret))
return ret;
eval_wak(state);
// Apparently some BIOSes expect us to clear this, so do it
uacpi_write_register_field(
UACPI_REGISTER_FIELD_WAK_STS, ACPI_PM1_STS_CLEAR
);
// Now that we're awake set the status to 1 (running)
eval_sst(1);
return UACPI_STATUS_OK;
}
#endif
static uacpi_status get_slp_type_for_state(
uacpi_u8 state, uacpi_u8 *a, uacpi_u8 *b
)
{
uacpi_char path[] = "_S0";
uacpi_status ret;
uacpi_object *obj0, *obj1, *ret_obj = UACPI_NULL;
path[2] += state;
ret = uacpi_eval_typed(
uacpi_namespace_root(), path, UACPI_NULL,
UACPI_OBJECT_PACKAGE_BIT, &ret_obj
);
if (ret != UACPI_STATUS_OK) {
if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND)) {
uacpi_warn("error while evaluating %s: %s\n", path,
uacpi_status_to_string(ret));
} else {
uacpi_trace("sleep state %d is not supported as %s was not found\n",
state, path);
}
goto out;
}
switch (ret_obj->package->count) {
case 0:
uacpi_error("empty package while evaluating %s!\n", path);
ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
goto out;
case 1:
obj0 = ret_obj->package->objects[0];
if (uacpi_unlikely(obj0->type != UACPI_OBJECT_INTEGER)) {
uacpi_error(
"invalid object type at pkg[0] => %s when evaluating %s\n",
uacpi_object_type_to_string(obj0->type), path
);
goto out;
}
*a = obj0->integer;
*b = obj0->integer >> 8;
break;
default:
obj0 = ret_obj->package->objects[0];
obj1 = ret_obj->package->objects[1];
if (uacpi_unlikely(obj0->type != UACPI_OBJECT_INTEGER ||
obj1->type != UACPI_OBJECT_INTEGER)) {
uacpi_error(
"invalid object type when evaluating %s: "
"pkg[0] => %s, pkg[1] => %s\n", path,
uacpi_object_type_to_string(obj0->type),
uacpi_object_type_to_string(obj1->type)
);
ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
goto out;
}
*a = obj0->integer;
*b = obj1->integer;
break;
}
out:
if (ret != UACPI_STATUS_OK) {
*a = UACPI_SLEEP_TYP_INVALID;
*b = UACPI_SLEEP_TYP_INVALID;
}
uacpi_object_unref(ret_obj);
return ret;
}
static uacpi_status eval_sleep_helper(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u8 value
)
{
uacpi_object *arg;
uacpi_object_array args;
uacpi_status ret;
arg = uacpi_create_object(UACPI_OBJECT_INTEGER);
if (uacpi_unlikely(arg == UACPI_NULL))
return UACPI_STATUS_OUT_OF_MEMORY;
arg->integer = value;
args.objects = &arg;
args.count = 1;
ret = uacpi_eval(parent, path, &args, UACPI_NULL);
switch (ret) {
case UACPI_STATUS_OK:
break;
case UACPI_STATUS_NOT_FOUND:
ret = UACPI_STATUS_OK;
break;
default:
uacpi_error("error while evaluating %s: %s\n",
path, uacpi_status_to_string(ret));
break;
}
uacpi_object_unref(arg);
return ret;
}
static uacpi_status eval_pts(uacpi_u8 state)
{
return eval_sleep_helper(uacpi_namespace_root(), "_PTS", state);
}
static uacpi_status eval_wak(uacpi_u8 state)
{
return eval_sleep_helper(uacpi_namespace_root(), "_WAK", state);
}
static uacpi_status eval_sst(uacpi_u8 value)
{
return eval_sleep_helper(
uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SI),
"_SST", value
);
}
static uacpi_status eval_sst_for_state(enum uacpi_sleep_state state)
{
uacpi_u8 arg;
/*
* This optional object is a control method that OSPM invokes to set the
* system status indicator as desired.
* Arguments:(1)
* Arg0 - An Integer containing the system status indicator identifier:
* 0 - No system state indication. Indicator off
* 1 - Working
* 2 - Waking
* 3 - Sleeping. Used to indicate system state S1, S2, or S3
* 4 - Sleeping with context saved to non-volatile storage
*/
switch (state) {
case UACPI_SLEEP_STATE_S0:
arg = 1;
break;
case UACPI_SLEEP_STATE_S1:
case UACPI_SLEEP_STATE_S2:
case UACPI_SLEEP_STATE_S3:
arg = 3;
break;
case UACPI_SLEEP_STATE_S4:
arg = 4;
break;
case UACPI_SLEEP_STATE_S5:
arg = 0;
break;
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
return eval_sst(arg);
}
uacpi_status uacpi_prepare_for_sleep_state(enum uacpi_sleep_state state_enum)
{
uacpi_u8 state = state_enum;
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_S5))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = get_slp_type_for_state(
state,
&g_uacpi_rt_ctx.last_sleep_typ_a,
&g_uacpi_rt_ctx.last_sleep_typ_b
);
if (ret != UACPI_STATUS_OK)
return ret;
ret = get_slp_type_for_state(
0,
&g_uacpi_rt_ctx.s0_sleep_typ_a,
&g_uacpi_rt_ctx.s0_sleep_typ_b
);
ret = eval_pts(state);
if (uacpi_unlikely_error(ret))
return ret;
eval_sst_for_state(state);
return UACPI_STATUS_OK;
}
static uacpi_u8 make_hw_reduced_sleep_control(uacpi_u8 slp_typ)
{
uacpi_u8 value;
value = (slp_typ << ACPI_SLP_CNT_SLP_TYP_IDX);
value &= ACPI_SLP_CNT_SLP_TYP_MASK;
value |= ACPI_SLP_CNT_SLP_EN_MASK;
return value;
}
static uacpi_status enter_sleep_state_hw_reduced(uacpi_u8 state)
{
uacpi_status ret;
uacpi_u8 sleep_control;
uacpi_u64 wake_status;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
if (!fadt->sleep_control_reg.address || !fadt->sleep_status_reg.address)
return UACPI_STATUS_NOT_FOUND;
ret = uacpi_write_register_field(
UACPI_REGISTER_FIELD_HWR_WAK_STS,
ACPI_SLP_STS_CLEAR
);
if (uacpi_unlikely_error(ret))
return ret;
sleep_control = make_hw_reduced_sleep_control(
g_uacpi_rt_ctx.last_sleep_typ_a
);
if (state < UACPI_SLEEP_STATE_S4)
UACPI_ARCH_FLUSH_CPU_CACHE();
/*
* To put the system into a sleep state, software will write the HW-reduced
* Sleep Type value (obtained from the \_Sx object in the DSDT) and the
* SLP_EN bit to the sleep control register.
*/
ret = uacpi_write_register(UACPI_REGISTER_SLP_CNT, sleep_control);
if (uacpi_unlikely_error(ret))
return ret;
/*
* The OSPM then polls the WAK_STS bit of the SLEEP_STATUS_REG waiting for
* it to be one (1), indicating that the system has been transitioned
* back to the Working state.
*/
do {
ret = uacpi_read_register_field(
UACPI_REGISTER_FIELD_HWR_WAK_STS, &wake_status
);
if (uacpi_unlikely_error(ret))
return ret;
} while (wake_status != 1);
return UACPI_STATUS_OK;
}
static uacpi_status prepare_for_wake_from_sleep_state_hw_reduced(uacpi_u8 state)
{
uacpi_u8 sleep_control;
UACPI_UNUSED(state);
if (g_uacpi_rt_ctx.s0_sleep_typ_a == UACPI_SLEEP_TYP_INVALID)
goto out;
sleep_control = make_hw_reduced_sleep_control(
g_uacpi_rt_ctx.s0_sleep_typ_a
);
uacpi_write_register(UACPI_REGISTER_SLP_CNT, sleep_control);
out:
return UACPI_STATUS_OK;
}
static uacpi_status wake_from_sleep_state_hw_reduced(uacpi_u8 state)
{
g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
// Set the status to 2 (waking) while we execute the wake method.
eval_sst(2);
eval_wak(state);
// Apparently some BIOSes expect us to clear this, so do it
uacpi_write_register_field(
UACPI_REGISTER_FIELD_HWR_WAK_STS, ACPI_SLP_STS_CLEAR
);
// Now that we're awake set the status to 1 (running)
eval_sst(1);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_enter_sleep_state(enum uacpi_sleep_state state_enum)
{
uacpi_u8 state = state_enum;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
if (uacpi_unlikely(g_uacpi_rt_ctx.last_sleep_typ_a > ACPI_SLP_TYP_MAX ||
g_uacpi_rt_ctx.last_sleep_typ_b > ACPI_SLP_TYP_MAX)) {
uacpi_error("invalid SLP_TYP values: 0x%02X:0x%02X\n",
g_uacpi_rt_ctx.last_sleep_typ_a,
g_uacpi_rt_ctx.last_sleep_typ_b);
return UACPI_STATUS_AML_BAD_ENCODING;
}
return CALL_SLEEP_FN(enter_sleep_state, state);
}
uacpi_status uacpi_prepare_for_wake_from_sleep_state(
uacpi_sleep_state state_enum
)
{
uacpi_u8 state = state_enum;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
return CALL_SLEEP_FN(prepare_for_wake_from_sleep_state, state);
}
uacpi_status uacpi_wake_from_sleep_state(
uacpi_sleep_state state_enum
)
{
uacpi_u8 state = state_enum;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
return UACPI_STATUS_INVALID_ARGUMENT;
return CALL_SLEEP_FN(wake_from_sleep_state, state);
}
uacpi_status uacpi_reboot(void)
{
uacpi_status ret;
uacpi_handle pci_dev = UACPI_NULL, io_handle = UACPI_NULL;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
struct acpi_gas *reset_reg = &fadt->reset_reg;
/*
* Allow restarting earlier than namespace load so that the kernel can
* use this in case of some initialization error.
*/
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (!(fadt->flags & ACPI_RESET_REG_SUP) || !reset_reg->address)
return UACPI_STATUS_NOT_FOUND;
switch (reset_reg->address_space_id) {
case UACPI_ADDRESS_SPACE_SYSTEM_IO:
/*
* For SystemIO we don't do any checking, and we ignore bit width
* because that's what NT does.
*/
ret = uacpi_kernel_io_map(reset_reg->address, 1, &io_handle);
if (uacpi_unlikely_error(ret))
return ret;
ret = uacpi_kernel_io_write8(io_handle, 0, fadt->reset_value);
break;
case UACPI_ADDRESS_SPACE_SYSTEM_MEMORY:
ret = uacpi_write_register(UACPI_REGISTER_RESET, fadt->reset_value);
break;
case UACPI_ADDRESS_SPACE_PCI_CONFIG: {
uacpi_pci_address address = { 0 };
// Bus is assumed to be 0 here
address.segment = 0;
address.bus = 0;
address.device = (reset_reg->address >> 32) & 0xFF;
address.function = (reset_reg->address >> 16) & 0xFF;
ret = uacpi_kernel_pci_device_open(address, &pci_dev);
if (uacpi_unlikely_error(ret))
break;
ret = uacpi_kernel_pci_write8(
pci_dev, reset_reg->address & 0xFFFF, fadt->reset_value
);
break;
}
default:
uacpi_warn(
"unable to perform a reset: unsupported address space '%s' (%d)\n",
uacpi_address_space_to_string(reset_reg->address_space_id),
reset_reg->address_space_id
);
ret = UACPI_STATUS_UNIMPLEMENTED;
}
if (ret == UACPI_STATUS_OK) {
/*
* This should've worked but we're still here.
* Spin for a bit then give up.
*/
uacpi_u64 stalled_time = 0;
while (stalled_time < (1000 * 1000)) {
uacpi_kernel_stall(100);
stalled_time += 100;
}
uacpi_error("reset timeout\n");
ret = UACPI_STATUS_HARDWARE_TIMEOUT;
}
if (pci_dev != UACPI_NULL)
uacpi_kernel_pci_device_close(pci_dev);
if (io_handle != UACPI_NULL)
uacpi_kernel_io_unmap(io_handle);
return ret;
}
#endif // !UACPI_BAREBONES_MODE

View File

@ -0,0 +1,728 @@
#include <uacpi/internal/stdlib.h>
#include <uacpi/internal/utilities.h>
#ifdef UACPI_USE_BUILTIN_STRING
#ifndef uacpi_memcpy
void *uacpi_memcpy(void *dest, const void *src, uacpi_size count)
{
uacpi_char *cd = dest;
const uacpi_char *cs = src;
while (count--)
*cd++ = *cs++;
return dest;
}
#endif
#ifndef uacpi_memmove
void *uacpi_memmove(void *dest, const void *src, uacpi_size count)
{
uacpi_char *cd = dest;
const uacpi_char *cs = src;
if (src < dest) {
cs += count;
cd += count;
while (count--)
*--cd = *--cs;
} else {
while (count--)
*cd++ = *cs++;
}
return dest;
}
#endif
#ifndef uacpi_memset
void *uacpi_memset(void *dest, uacpi_i32 ch, uacpi_size count)
{
uacpi_u8 fill = ch;
uacpi_u8 *cdest = dest;
while (count--)
*cdest++ = fill;
return dest;
}
#endif
#ifndef uacpi_memcmp
uacpi_i32 uacpi_memcmp(const void *lhs, const void *rhs, uacpi_size count)
{
const uacpi_u8 *byte_lhs = lhs;
const uacpi_u8 *byte_rhs = rhs;
uacpi_size i;
for (i = 0; i < count; ++i) {
if (byte_lhs[i] != byte_rhs[i])
return byte_lhs[i] - byte_rhs[i];
}
return 0;
}
#endif
#endif // UACPI_USE_BUILTIN_STRING
#ifndef uacpi_strlen
uacpi_size uacpi_strlen(const uacpi_char *str)
{
const uacpi_char *str1;
for (str1 = str; *str1; str1++);
return str1 - str;
}
#endif
#ifndef UACPI_BAREBONES_MODE
#ifndef uacpi_strnlen
uacpi_size uacpi_strnlen(const uacpi_char *str, uacpi_size max)
{
const uacpi_char *str1;
for (str1 = str; max-- && *str1; str1++);
return str1 - str;
}
#endif
#ifndef uacpi_strcmp
uacpi_i32 uacpi_strcmp(const uacpi_char *lhs, const uacpi_char *rhs)
{
uacpi_size i = 0;
typedef const uacpi_u8 *cucp;
while (lhs[i] && rhs[i]) {
if (lhs[i] != rhs[i])
return *(cucp)&lhs[i] - *(cucp)&rhs[i];
i++;
}
return *(cucp)&lhs[i] - *(cucp)&rhs[i];
}
#endif
void uacpi_memcpy_zerout(void *dst, const void *src,
uacpi_size dst_size, uacpi_size src_size)
{
uacpi_size bytes_to_copy = UACPI_MIN(src_size, dst_size);
if (bytes_to_copy)
uacpi_memcpy(dst, src, bytes_to_copy);
if (dst_size > bytes_to_copy)
uacpi_memzero((uacpi_u8 *)dst + bytes_to_copy, dst_size - bytes_to_copy);
}
uacpi_u8 uacpi_bit_scan_forward(uacpi_u64 value)
{
#if defined(_MSC_VER) && !defined(__clang__)
unsigned char ret;
unsigned long index;
#ifdef _WIN64
ret = _BitScanForward64(&index, value);
if (ret == 0)
return 0;
return (uacpi_u8)index + 1;
#else
ret = _BitScanForward(&index, value);
if (ret == 0) {
ret = _BitScanForward(&index, value >> 32);
if (ret == 0)
return 0;
return (uacpi_u8)index + 33;
}
return (uacpi_u8)index + 1;
#endif
#elif defined(__WATCOMC__)
// TODO: Use compiler intrinsics or inline ASM here
uacpi_u8 index;
uacpi_u64 mask = 1;
for (index = 1; index <= 64; index++, mask <<= 1) {
if (value & mask) {
return index;
}
}
return 0;
#else
return __builtin_ffsll(value);
#endif
}
uacpi_u8 uacpi_bit_scan_backward(uacpi_u64 value)
{
#if defined(_MSC_VER) && !defined(__clang__)
unsigned char ret;
unsigned long index;
#ifdef _WIN64
ret = _BitScanReverse64(&index, value);
if (ret == 0)
return 0;
return (uacpi_u8)index + 1;
#else
ret = _BitScanReverse(&index, value >> 32);
if (ret == 0) {
ret = _BitScanReverse(&index, value);
if (ret == 0)
return 0;
return (uacpi_u8)index + 1;
}
return (uacpi_u8)index + 33;
#endif
#elif defined(__WATCOMC__)
// TODO: Use compiler intrinsics or inline ASM here
uacpi_u8 index;
uacpi_u64 mask = (1ull << 63);
for (index = 64; index > 0; index--, mask >>= 1) {
if (value & mask) {
return index;
}
}
return 0;
#else
if (value == 0)
return 0;
return 64 - __builtin_clzll(value);
#endif
}
#ifndef UACPI_NATIVE_ALLOC_ZEROED
void *uacpi_builtin_alloc_zeroed(uacpi_size size)
{
void *ptr;
ptr = uacpi_kernel_alloc(size);
if (uacpi_unlikely(ptr == UACPI_NULL))
return ptr;
uacpi_memzero(ptr, size);
return ptr;
}
#endif
#endif // !UACPI_BAREBONES_MODE
#ifndef uacpi_vsnprintf
struct fmt_buf_state {
uacpi_char *buffer;
uacpi_size capacity;
uacpi_size bytes_written;
};
struct fmt_spec {
uacpi_u8 is_signed : 1;
uacpi_u8 prepend : 1;
uacpi_u8 uppercase : 1;
uacpi_u8 left_justify : 1;
uacpi_u8 alternate_form : 1;
uacpi_u8 has_precision : 1;
uacpi_char pad_char;
uacpi_char prepend_char;
uacpi_u64 min_width;
uacpi_u64 precision;
uacpi_u32 base;
};
static void write_one(struct fmt_buf_state *fb_state, uacpi_char c)
{
if (fb_state->bytes_written < fb_state->capacity)
fb_state->buffer[fb_state->bytes_written] = c;
fb_state->bytes_written++;
}
static void write_many(
struct fmt_buf_state *fb_state, const uacpi_char *string, uacpi_size count
)
{
if (fb_state->bytes_written < fb_state->capacity) {
uacpi_size count_to_write;
count_to_write = UACPI_MIN(
count, fb_state->capacity - fb_state->bytes_written
);
uacpi_memcpy(
&fb_state->buffer[fb_state->bytes_written], string, count_to_write
);
}
fb_state->bytes_written += count;
}
static uacpi_char hex_char(uacpi_bool upper, uacpi_u64 value)
{
static const uacpi_char upper_hex[] = "0123456789ABCDEF";
static const uacpi_char lower_hex[] = "0123456789abcdef";
return (upper ? upper_hex : lower_hex)[value];
}
static void write_padding(
struct fmt_buf_state *fb_state, struct fmt_spec *fm, uacpi_size repr_size
)
{
uacpi_u64 mw = fm->min_width;
if (mw <= repr_size)
return;
mw -= repr_size;
while (mw--)
write_one(fb_state, fm->left_justify ? ' ' : fm->pad_char);
}
#define REPR_BUFFER_SIZE 32
static void write_integer(
struct fmt_buf_state *fb_state, struct fmt_spec *fm, uacpi_u64 value
)
{
uacpi_char repr_buffer[REPR_BUFFER_SIZE];
uacpi_size index = REPR_BUFFER_SIZE;
uacpi_u64 remainder;
uacpi_char repr;
uacpi_bool negative = UACPI_FALSE;
uacpi_size repr_size;
if (fm->is_signed) {
uacpi_i64 as_ll = value;
if (as_ll < 0) {
value = -as_ll;
negative = UACPI_TRUE;
}
}
if (fm->prepend || negative)
write_one(fb_state, negative ? '-' : fm->prepend_char);
while (value) {
remainder = value % fm->base;
value /= fm->base;
if (fm->base == 16) {
repr = hex_char(fm->uppercase, remainder);
} else if (fm->base == 8 || fm->base == 10) {
repr = remainder + '0';
} else {
repr = '?';
}
repr_buffer[--index] = repr;
}
repr_size = REPR_BUFFER_SIZE - index;
if (repr_size == 0) {
repr_buffer[--index] = '0';
repr_size = 1;
}
if (fm->alternate_form) {
if (fm->base == 16) {
repr_buffer[--index] = fm->uppercase ? 'X' : 'x';
repr_buffer[--index] = '0';
repr_size += 2;
} else if (fm->base == 8) {
repr_buffer[--index] = '0';
repr_size += 1;
}
}
if (fm->left_justify) {
write_many(fb_state, &repr_buffer[index], repr_size);
write_padding(fb_state, fm, repr_size);
} else {
write_padding(fb_state, fm, repr_size);
write_many(fb_state, &repr_buffer[index], repr_size);
}
}
static uacpi_bool string_has_at_least(
const uacpi_char *string, uacpi_size characters
)
{
while (*string) {
if (--characters == 0)
return UACPI_TRUE;
string++;
}
return UACPI_FALSE;
}
static uacpi_bool consume_digits(
const uacpi_char **string, uacpi_size *out_size
)
{
uacpi_size size = 0;
for (;;) {
char c = **string;
if (c < '0' || c > '9')
break;
size++;
*string += 1;
}
if (size == 0)
return UACPI_FALSE;
*out_size = size;
return UACPI_TRUE;
}
enum parse_number_mode {
PARSE_NUMBER_MODE_MAYBE,
PARSE_NUMBER_MODE_MUST,
};
static uacpi_bool parse_number(
const uacpi_char **fmt, enum parse_number_mode mode, uacpi_u64 *out_value
)
{
uacpi_status ret;
uacpi_size num_digits;
const uacpi_char *digits = *fmt;
if (!consume_digits(fmt, &num_digits))
return mode != PARSE_NUMBER_MODE_MUST;
ret = uacpi_string_to_integer(digits, num_digits, UACPI_BASE_DEC, out_value);
return ret == UACPI_STATUS_OK;
}
static uacpi_bool consume(const uacpi_char **string, const uacpi_char *token)
{
uacpi_size token_size;
token_size = uacpi_strlen(token);
if (!string_has_at_least(*string, token_size))
return UACPI_FALSE;
if (!uacpi_memcmp(*string, token, token_size)) {
*string += token_size;
return UACPI_TRUE;
}
return UACPI_FALSE;
}
static uacpi_bool is_one_of(uacpi_char c, const uacpi_char *list)
{
for (; *list; list++) {
if (c == *list)
return UACPI_TRUE;
}
return UACPI_FALSE;
}
static uacpi_bool consume_one_of(
const uacpi_char **string, const uacpi_char *list, uacpi_char *consumed_char
)
{
uacpi_char c = **string;
if (!c)
return UACPI_FALSE;
if (is_one_of(c, list)) {
*consumed_char = c;
*string += 1;
return UACPI_TRUE;
}
return UACPI_FALSE;
}
static uacpi_u32 base_from_specifier(uacpi_char specifier)
{
switch (specifier)
{
case 'x':
case 'X':
return 16;
case 'o':
return 8;
default:
return 10;
}
}
static uacpi_bool is_uppercase_specifier(uacpi_char specifier)
{
return specifier == 'X';
}
static const uacpi_char *find_next_conversion(
const uacpi_char *fmt, uacpi_size *offset
)
{
*offset = 0;
while (*fmt) {
if (*fmt == '%')
return fmt;
fmt++;
*offset += 1;
}
return UACPI_NULL;
}
uacpi_i32 uacpi_vsnprintf(
uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt,
uacpi_va_list vlist
)
{
struct fmt_buf_state fb_state = { 0 };
uacpi_u64 value;
const uacpi_char *next_conversion;
uacpi_size next_offset;
uacpi_char flag;
fb_state.buffer = buffer;
fb_state.capacity = capacity;
fb_state.bytes_written = 0;
while (*fmt) {
struct fmt_spec fm = {
.pad_char = ' ',
.base = 10,
};
next_conversion = find_next_conversion(fmt, &next_offset);
if (next_offset)
write_many(&fb_state, fmt, next_offset);
if (!next_conversion)
break;
fmt = next_conversion;
if (consume(&fmt, "%%")) {
write_one(&fb_state, '%');
continue;
}
// consume %
fmt++;
while (consume_one_of(&fmt, "+- 0#", &flag)) {
switch (flag) {
case '+':
case ' ':
fm.prepend = UACPI_TRUE;
fm.prepend_char = flag;
continue;
case '-':
fm.left_justify = UACPI_TRUE;
continue;
case '0':
fm.pad_char = '0';
continue;
case '#':
fm.alternate_form = UACPI_TRUE;
continue;
default:
return -1;
}
}
if (consume(&fmt, "*")) {
fm.min_width = uacpi_va_arg(vlist, int);
} else if (!parse_number(&fmt, PARSE_NUMBER_MODE_MAYBE, &fm.min_width)) {
return -1;
}
if (consume(&fmt, ".")) {
fm.has_precision = UACPI_TRUE;
if (consume(&fmt, "*")) {
fm.precision = uacpi_va_arg(vlist, int);
} else {
if (!parse_number(&fmt, PARSE_NUMBER_MODE_MUST, &fm.precision))
return -1;
}
}
flag = 0;
if (consume(&fmt, "c")) {
uacpi_char c = uacpi_va_arg(vlist, int);
write_one(&fb_state, c);
continue;
}
if (consume(&fmt, "s")) {
const uacpi_char *string = uacpi_va_arg(vlist, uacpi_char*);
uacpi_size i;
if (uacpi_unlikely(string == UACPI_NULL))
string = "<null>";
for (i = 0; (!fm.has_precision || i < fm.precision) && string[i]; ++i)
write_one(&fb_state, string[i]);
while (i++ < fm.min_width)
write_one(&fb_state, ' ');
continue;
}
if (consume(&fmt, "p")) {
value = (uacpi_uintptr)uacpi_va_arg(vlist, void*);
fm.base = 16;
fm.min_width = UACPI_POINTER_SIZE * 2;
fm.pad_char = '0';
goto write_int;
}
if (consume(&fmt, "hh")) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = (signed char)uacpi_va_arg(vlist, int);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = (unsigned char)uacpi_va_arg(vlist, int);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "h")) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = (signed short)uacpi_va_arg(vlist, int);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = (unsigned short)uacpi_va_arg(vlist, int);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "ll") ||
(sizeof(uacpi_size) == sizeof(long long) && consume(&fmt, "z"))) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = uacpi_va_arg(vlist, long long);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = uacpi_va_arg(vlist, unsigned long long);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "l") ||
(sizeof(uacpi_size) == sizeof(long) && consume(&fmt, "z"))) {
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = uacpi_va_arg(vlist, long);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = uacpi_va_arg(vlist, unsigned long);
} else {
return -1;
}
goto write_int;
}
if (consume(&fmt, "d") || consume(&fmt, "i")) {
value = uacpi_va_arg(vlist, uacpi_i32);
fm.is_signed = UACPI_TRUE;
} else if (consume_one_of(&fmt, "oxXu", &flag)) {
value = uacpi_va_arg(vlist, uacpi_u32);
} else {
return -1;
}
write_int:
if (flag != 0) {
fm.base = base_from_specifier(flag);
fm.uppercase = is_uppercase_specifier(flag);
}
write_integer(&fb_state, &fm, value);
}
if (fb_state.capacity) {
uacpi_size last_char;
last_char = UACPI_MIN(fb_state.bytes_written, fb_state.capacity - 1);
fb_state.buffer[last_char] = '\0';
}
return fb_state.bytes_written;
}
#endif
#ifndef uacpi_snprintf
uacpi_i32 uacpi_snprintf(
uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, ...
)
{
uacpi_va_list vlist;
uacpi_i32 ret;
uacpi_va_start(vlist, fmt);
ret = uacpi_vsnprintf(buffer, capacity, fmt, vlist);
uacpi_va_end(vlist);
return ret;
}
#endif
#ifndef UACPI_FORMATTED_LOGGING
void uacpi_log(uacpi_log_level lvl, const uacpi_char *str, ...)
{
uacpi_char buf[UACPI_PLAIN_LOG_BUFFER_SIZE];
int ret;
uacpi_va_list vlist;
uacpi_va_start(vlist, str);
ret = uacpi_vsnprintf(buf, sizeof(buf), str, vlist);
if (uacpi_unlikely(ret < 0))
return;
/*
* If this log message is too large for the configured buffer size, cut off
* the end and transform into "...\n" to indicate that it didn't fit and
* prevent the newline from being truncated.
*/
if (uacpi_unlikely(ret >= UACPI_PLAIN_LOG_BUFFER_SIZE)) {
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 5] = '.';
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 4] = '.';
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 3] = '.';
buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 2] = '\n';
}
uacpi_kernel_log(lvl, buf);
uacpi_va_end(vlist);
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,998 @@
#include <uacpi/uacpi.h>
#include <uacpi/acpi.h>
#include <uacpi/internal/log.h>
#include <uacpi/internal/context.h>
#include <uacpi/internal/utilities.h>
#include <uacpi/internal/tables.h>
#include <uacpi/internal/interpreter.h>
#include <uacpi/internal/namespace.h>
#include <uacpi/internal/opregion.h>
#include <uacpi/internal/registers.h>
#include <uacpi/internal/event.h>
#include <uacpi/internal/notify.h>
#include <uacpi/internal/osi.h>
#include <uacpi/internal/registers.h>
struct uacpi_runtime_context g_uacpi_rt_ctx = { 0 };
void uacpi_context_set_log_level(uacpi_log_level lvl)
{
if (lvl == 0)
lvl = UACPI_DEFAULT_LOG_LEVEL;
g_uacpi_rt_ctx.log_level = lvl;
}
void uacpi_logger_initialize(void)
{
static uacpi_bool version_printed = UACPI_FALSE;
if (g_uacpi_rt_ctx.log_level == 0)
uacpi_context_set_log_level(UACPI_DEFAULT_LOG_LEVEL);
if (!version_printed) {
version_printed = UACPI_TRUE;
uacpi_info(
"starting uACPI, version %d.%d.%d\n",
UACPI_MAJOR, UACPI_MINOR, UACPI_PATCH
);
}
}
void uacpi_context_set_proactive_table_checksum(uacpi_bool setting)
{
if (setting)
g_uacpi_rt_ctx.flags |= UACPI_FLAG_PROACTIVE_TBL_CSUM;
else
g_uacpi_rt_ctx.flags &= ~UACPI_FLAG_PROACTIVE_TBL_CSUM;
}
const uacpi_char *uacpi_status_to_string(uacpi_status st)
{
switch (st) {
case UACPI_STATUS_OK:
return "no error";
case UACPI_STATUS_MAPPING_FAILED:
return "failed to map memory";
case UACPI_STATUS_OUT_OF_MEMORY:
return "out of memory";
case UACPI_STATUS_BAD_CHECKSUM:
return "bad table checksum";
case UACPI_STATUS_INVALID_SIGNATURE:
return "invalid table signature";
case UACPI_STATUS_INVALID_TABLE_LENGTH:
return "invalid table length";
case UACPI_STATUS_NOT_FOUND:
return "not found";
case UACPI_STATUS_INVALID_ARGUMENT:
return "invalid argument";
case UACPI_STATUS_UNIMPLEMENTED:
return "unimplemented";
case UACPI_STATUS_ALREADY_EXISTS:
return "already exists";
case UACPI_STATUS_INTERNAL_ERROR:
return "internal error";
case UACPI_STATUS_TYPE_MISMATCH:
return "object type mismatch";
case UACPI_STATUS_INIT_LEVEL_MISMATCH:
return "init level too low/high for this action";
case UACPI_STATUS_NAMESPACE_NODE_DANGLING:
return "attempting to use a dangling namespace node";
case UACPI_STATUS_NO_HANDLER:
return "no handler found";
case UACPI_STATUS_NO_RESOURCE_END_TAG:
return "resource template without an end tag";
case UACPI_STATUS_COMPILED_OUT:
return "this functionality has been compiled out of this build";
case UACPI_STATUS_HARDWARE_TIMEOUT:
return "timed out waiting for hardware response";
case UACPI_STATUS_TIMEOUT:
return "wait timed out";
case UACPI_STATUS_OVERRIDDEN:
return "the requested action has been overridden";
case UACPI_STATUS_DENIED:
return "the requested action has been denied";
case UACPI_STATUS_AML_UNDEFINED_REFERENCE:
return "AML referenced an undefined object";
case UACPI_STATUS_AML_INVALID_NAMESTRING:
return "invalid AML name string";
case UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS:
return "object already exists";
case UACPI_STATUS_AML_INVALID_OPCODE:
return "invalid AML opcode";
case UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE:
return "incompatible AML object type";
case UACPI_STATUS_AML_BAD_ENCODING:
return "bad AML instruction encoding";
case UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX:
return "out of bounds AML index";
case UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH:
return "AML attempted to acquire a mutex with a lower sync level";
case UACPI_STATUS_AML_INVALID_RESOURCE:
return "invalid resource template encoding or type";
case UACPI_STATUS_AML_LOOP_TIMEOUT:
return "hanging AML while loop";
case UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT:
return "reached maximum AML call stack depth";
default:
return "<invalid status>";
}
}
void uacpi_state_reset(void)
{
#ifndef UACPI_BAREBONES_MODE
uacpi_deinitialize_namespace();
uacpi_deinitialize_interfaces();
uacpi_deinitialize_events();
uacpi_deinitialize_notify();
uacpi_deinitialize_opregion();
#endif
uacpi_deinitialize_tables();
#ifndef UACPI_BAREBONES_MODE
#ifndef UACPI_REDUCED_HARDWARE
if (g_uacpi_rt_ctx.was_in_legacy_mode)
uacpi_leave_acpi_mode();
#endif
uacpi_deinitialize_registers();
#ifndef UACPI_REDUCED_HARDWARE
if (g_uacpi_rt_ctx.global_lock_event)
uacpi_kernel_free_event(g_uacpi_rt_ctx.global_lock_event);
if (g_uacpi_rt_ctx.global_lock_spinlock)
uacpi_kernel_free_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
#endif
#endif // !UACPI_BAREBONES_MODE
uacpi_memzero(&g_uacpi_rt_ctx, sizeof(g_uacpi_rt_ctx));
#if defined(UACPI_KERNEL_INITIALIZATION) && !defined(UACPI_BAREBONES_MODE)
uacpi_kernel_deinitialize();
#endif
}
#ifndef UACPI_BAREBONES_MODE
void uacpi_context_set_loop_timeout(uacpi_u32 seconds)
{
if (seconds == 0)
seconds = UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS;
g_uacpi_rt_ctx.loop_timeout_seconds = seconds;
}
void uacpi_context_set_max_call_stack_depth(uacpi_u32 depth)
{
if (depth == 0)
depth = UACPI_DEFAULT_MAX_CALL_STACK_DEPTH;
g_uacpi_rt_ctx.max_call_stack_depth = depth;
}
uacpi_u32 uacpi_context_get_loop_timeout(void)
{
return g_uacpi_rt_ctx.loop_timeout_seconds;
}
#ifndef UACPI_REDUCED_HARDWARE
enum hw_mode {
HW_MODE_ACPI = 0,
HW_MODE_LEGACY = 1,
};
static enum hw_mode read_mode(void)
{
uacpi_status ret;
uacpi_u64 raw_value;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
if (!fadt->smi_cmd)
return HW_MODE_ACPI;
ret = uacpi_read_register_field(UACPI_REGISTER_FIELD_SCI_EN, &raw_value);
if (uacpi_unlikely_error(ret))
return HW_MODE_LEGACY;
return raw_value ? HW_MODE_ACPI : HW_MODE_LEGACY;
}
static uacpi_status set_mode(enum hw_mode mode)
{
uacpi_status ret;
uacpi_u64 raw_value, stalled_time = 0;
struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
if (uacpi_unlikely(!fadt->smi_cmd)) {
uacpi_error("SMI_CMD is not implemented by the firmware\n");
return UACPI_STATUS_NOT_FOUND;
}
if (uacpi_unlikely(!fadt->acpi_enable && !fadt->acpi_disable)) {
uacpi_error("mode transition is not implemented by the hardware\n");
return UACPI_STATUS_NOT_FOUND;
}
switch (mode) {
case HW_MODE_ACPI:
raw_value = fadt->acpi_enable;
break;
case HW_MODE_LEGACY:
raw_value = fadt->acpi_disable;
break;
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
ret = uacpi_write_register(UACPI_REGISTER_SMI_CMD, raw_value);
if (uacpi_unlikely_error(ret))
return ret;
// Allow up to 5 seconds for the hardware to enter the desired mode
while (stalled_time < (5 * 1000 * 1000)) {
if (read_mode() == mode)
return UACPI_STATUS_OK;
uacpi_kernel_stall(100);
stalled_time += 100;
}
uacpi_error("hardware time out while changing modes\n");
return UACPI_STATUS_HARDWARE_TIMEOUT;
}
static uacpi_status enter_mode(enum hw_mode mode, uacpi_bool *did_change)
{
uacpi_status ret;
const uacpi_char *mode_str;
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_is_hardware_reduced())
return UACPI_STATUS_OK;
mode_str = mode == HW_MODE_LEGACY ? "legacy" : "acpi";
if (read_mode() == mode) {
uacpi_trace("%s mode already enabled\n", mode_str);
return UACPI_STATUS_OK;
}
ret = set_mode(mode);
if (uacpi_unlikely_error(ret)) {
uacpi_warn(
"unable to enter %s mode: %s\n",
mode_str, uacpi_status_to_string(ret)
);
return ret;
}
uacpi_trace("entered %s mode\n", mode_str);
if (did_change != UACPI_NULL)
*did_change = UACPI_TRUE;
return ret;
}
uacpi_status uacpi_enter_acpi_mode(void)
{
return enter_mode(HW_MODE_ACPI, UACPI_NULL);
}
uacpi_status uacpi_leave_acpi_mode(void)
{
return enter_mode(HW_MODE_LEGACY, UACPI_NULL);
}
static void enter_acpi_mode_initial(void)
{
enter_mode(HW_MODE_ACPI, &g_uacpi_rt_ctx.was_in_legacy_mode);
}
#else
static void enter_acpi_mode_initial(void) { }
#endif
uacpi_init_level uacpi_get_current_init_level(void)
{
return g_uacpi_rt_ctx.init_level;
}
uacpi_status uacpi_initialize(uacpi_u64 flags)
{
uacpi_status ret;
UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_EARLY);
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_EARLY);
if (uacpi_unlikely_error(ret))
return ret;
#endif
g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED;
g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.s0_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.s0_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
g_uacpi_rt_ctx.flags = flags;
uacpi_logger_initialize();
if (g_uacpi_rt_ctx.loop_timeout_seconds == 0)
uacpi_context_set_loop_timeout(UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS);
if (g_uacpi_rt_ctx.max_call_stack_depth == 0)
uacpi_context_set_max_call_stack_depth(UACPI_DEFAULT_MAX_CALL_STACK_DEPTH);
ret = uacpi_initialize_tables();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_registers();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_events_early();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_opregion();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_interfaces();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_namespace();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
ret = uacpi_initialize_notify();
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
uacpi_install_default_address_space_handlers();
if (!uacpi_check_flag(UACPI_FLAG_NO_ACPI_MODE))
enter_acpi_mode_initial();
return UACPI_STATUS_OK;
out_fatal_error:
uacpi_state_reset();
return ret;
}
struct table_load_stats {
uacpi_u32 load_counter;
uacpi_u32 failure_counter;
};
static void trace_table_load_failure(
struct acpi_sdt_hdr *tbl, uacpi_log_level lvl, uacpi_status ret
)
{
uacpi_log_lvl(
lvl,
"failed to load "UACPI_PRI_TBL_HDR": %s\n",
UACPI_FMT_TBL_HDR(tbl), uacpi_status_to_string(ret)
);
}
static uacpi_bool match_ssdt_or_psdt(struct uacpi_installed_table *tbl)
{
if (tbl->flags & UACPI_TABLE_LOADED)
return UACPI_FALSE;
return uacpi_signatures_match(tbl->hdr.signature, ACPI_SSDT_SIGNATURE) ||
uacpi_signatures_match(tbl->hdr.signature, ACPI_PSDT_SIGNATURE);
}
static uacpi_u64 elapsed_ms(uacpi_u64 begin_ns, uacpi_u64 end_ns)
{
return (end_ns - begin_ns) / (1000ull * 1000ull);
}
static uacpi_bool warn_on_bad_timesource(uacpi_u64 begin_ts, uacpi_u64 end_ts)
{
const uacpi_char *reason;
if (uacpi_unlikely(begin_ts == 0 && end_ts == 0)) {
reason = "uacpi_kernel_get_nanoseconds_since_boot() appears to be a stub";
goto out_bad_timesource;
}
if (uacpi_unlikely(begin_ts == end_ts)) {
reason = "poor time source precision detected";
goto out_bad_timesource;
}
if (uacpi_unlikely(end_ts < begin_ts)) {
reason = "time source backwards drift detected";
goto out_bad_timesource;
}
return UACPI_FALSE;
out_bad_timesource:
uacpi_warn("%s, this may cause problems\n", reason);
return UACPI_TRUE;
}
uacpi_status uacpi_namespace_load(void)
{
struct uacpi_table tbl;
uacpi_status ret;
uacpi_u64 begin_ts, end_ts;
struct table_load_stats st = { 0 };
uacpi_size cur_index;
UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
if (uacpi_unlikely_error(ret))
goto out_fatal_error;
#endif
begin_ts = uacpi_kernel_get_nanoseconds_since_boot();
ret = uacpi_table_find_by_signature(ACPI_DSDT_SIGNATURE, &tbl);
if (uacpi_unlikely_error(ret)) {
uacpi_error("unable to find DSDT: %s\n", uacpi_status_to_string(ret));
goto out_fatal_error;
}
ret = uacpi_table_load_with_cause(tbl.index, UACPI_TABLE_LOAD_CAUSE_INIT);
if (uacpi_unlikely_error(ret)) {
trace_table_load_failure(tbl.hdr, UACPI_LOG_ERROR, ret);
st.failure_counter++;
}
st.load_counter++;
uacpi_table_unref(&tbl);
for (cur_index = 0;; cur_index = tbl.index + 1) {
ret = uacpi_table_match(cur_index, match_ssdt_or_psdt, &tbl);
if (ret != UACPI_STATUS_OK) {
if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND))
goto out_fatal_error;
break;
}
ret = uacpi_table_load_with_cause(tbl.index, UACPI_TABLE_LOAD_CAUSE_INIT);
if (uacpi_unlikely_error(ret)) {
trace_table_load_failure(tbl.hdr, UACPI_LOG_WARN, ret);
st.failure_counter++;
}
st.load_counter++;
uacpi_table_unref(&tbl);
}
end_ts = uacpi_kernel_get_nanoseconds_since_boot();
g_uacpi_rt_ctx.bad_timesource = warn_on_bad_timesource(begin_ts, end_ts);
if (uacpi_unlikely(st.failure_counter != 0 || g_uacpi_rt_ctx.bad_timesource)) {
uacpi_info(
"loaded %u AML blob%s (%u error%s)\n",
st.load_counter, st.load_counter > 1 ? "s" : "", st.failure_counter,
st.failure_counter == 1 ? "" : "s"
);
} else {
uacpi_u64 ops = g_uacpi_rt_ctx.opcodes_executed;
uacpi_u64 ops_per_sec = ops * UACPI_NANOSECONDS_PER_SEC;
ops_per_sec /= end_ts - begin_ts;
uacpi_info(
"successfully loaded %u AML blob%s, %"UACPI_PRIu64" ops in "
"%"UACPI_PRIu64"ms (avg %"UACPI_PRIu64"/s)\n",
st.load_counter, st.load_counter > 1 ? "s" : "",
UACPI_FMT64(ops), UACPI_FMT64(elapsed_ms(begin_ts, end_ts)),
UACPI_FMT64(ops_per_sec)
);
}
ret = uacpi_initialize_events();
if (uacpi_unlikely_error(ret)) {
uacpi_error("event initialization failed: %s\n",
uacpi_status_to_string(ret));
goto out_fatal_error;
}
g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_LOADED;
return UACPI_STATUS_OK;
out_fatal_error:
uacpi_state_reset();
return ret;
}
struct ns_init_context {
uacpi_size ini_executed;
uacpi_size ini_errors;
uacpi_size sta_executed;
uacpi_size sta_errors;
uacpi_size devices;
uacpi_size thermal_zones;
};
static void ini_eval(struct ns_init_context *ctx, uacpi_namespace_node *node)
{
uacpi_status ret;
ret = uacpi_eval(node, "_INI", UACPI_NULL, UACPI_NULL);
if (ret == UACPI_STATUS_NOT_FOUND)
return;
ctx->ini_executed++;
if (uacpi_unlikely_error(ret))
ctx->ini_errors++;
}
static uacpi_status sta_eval(
struct ns_init_context *ctx, uacpi_namespace_node *node,
uacpi_u32 *value
)
{
uacpi_status ret;
ret = uacpi_eval_sta(node, value);
if (*value == 0xFFFFFFFF)
return ret;
ctx->sta_executed++;
if (uacpi_unlikely_error(ret))
ctx->sta_errors++;
return ret;
}
static uacpi_iteration_decision do_sta_ini(
void *opaque, uacpi_namespace_node *node, uacpi_u32 depth
)
{
struct ns_init_context *ctx = opaque;
uacpi_status ret;
uacpi_object_type type = UACPI_OBJECT_UNINITIALIZED;
uacpi_u32 sta_ret;
UACPI_UNUSED(depth);
// We don't care about aliases
if (uacpi_namespace_node_is_alias(node))
return UACPI_ITERATION_DECISION_NEXT_PEER;
ret = uacpi_namespace_node_type(node, &type);
switch (type) {
case UACPI_OBJECT_DEVICE:
case UACPI_OBJECT_PROCESSOR:
ctx->devices++;
break;
case UACPI_OBJECT_THERMAL_ZONE:
ctx->thermal_zones++;
break;
default:
if (node != uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_TZ))
return UACPI_ITERATION_DECISION_CONTINUE;
}
ret = sta_eval(ctx, node, &sta_ret);
if (uacpi_unlikely_error(ret))
return UACPI_ITERATION_DECISION_CONTINUE;
if (!(sta_ret & ACPI_STA_RESULT_DEVICE_PRESENT)) {
if (!(sta_ret & ACPI_STA_RESULT_DEVICE_FUNCTIONING))
return UACPI_ITERATION_DECISION_NEXT_PEER;
/*
* ACPI 6.5 specification:
* _STA may return bit 0 clear (not present) with bit [3] set (device
* is functional). This case is used to indicate a valid device for
* which no device driver should be loaded (for example, a bridge
* device.) Children of this device may be present and valid. OSPM
* should continue enumeration below a device whose _STA returns this
* bit combination.
*/
return UACPI_ITERATION_DECISION_CONTINUE;
}
ini_eval(ctx, node);
return UACPI_ITERATION_DECISION_CONTINUE;
}
uacpi_status uacpi_namespace_initialize(void)
{
struct ns_init_context ctx = { 0 };
uacpi_namespace_node *root;
uacpi_u64 begin_ts, end_ts;
uacpi_address_space_handlers *handlers;
uacpi_address_space_handler *handler;
uacpi_status ret = UACPI_STATUS_OK;
UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
if (uacpi_unlikely_error(ret))
goto out;
#endif
/*
* Initialization order here is identical to ACPICA because ACPI
* specification doesn't really have any detailed steps that explain
* how to do it.
*/
root = uacpi_namespace_root();
begin_ts = uacpi_kernel_get_nanoseconds_since_boot();
// Step 1 - Execute \_INI
ini_eval(&ctx, root);
// Step 2 - Execute \_SB._INI
ini_eval(
&ctx, uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SB)
);
/*
* Step 3 - Run _REG methods for all globally installed
* address space handlers.
*/
handlers = uacpi_node_get_address_space_handlers(root);
if (handlers) {
handler = handlers->head;
while (handler) {
if (uacpi_address_space_handler_is_default(handler))
uacpi_reg_all_opregions(root, handler->space);
handler = handler->next;
}
}
// Step 4 - Run all other _STA and _INI methods
uacpi_namespace_for_each_child(
root, do_sta_ini, UACPI_NULL,
UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, &ctx
);
end_ts = uacpi_kernel_get_nanoseconds_since_boot();
if (uacpi_likely(!g_uacpi_rt_ctx.bad_timesource)) {
uacpi_info(
"namespace initialization done in %"UACPI_PRIu64"ms: "
"%zu devices, %zu thermal zones\n",
UACPI_FMT64(elapsed_ms(begin_ts, end_ts)),
ctx.devices, ctx.thermal_zones
);
} else {
uacpi_info(
"namespace initialization done: %zu devices, %zu thermal zones\n",
ctx.devices, ctx.thermal_zones
);
}
uacpi_trace(
"_STA calls: %zu (%zu errors), _INI calls: %zu (%zu errors)\n",
ctx.sta_executed, ctx.sta_errors, ctx.ini_executed,
ctx.ini_errors
);
g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED;
#ifdef UACPI_KERNEL_INITIALIZATION
ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
out:
if (uacpi_unlikely_error(ret))
uacpi_state_reset();
#endif
return ret;
}
uacpi_status uacpi_eval(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **out_obj
)
{
struct uacpi_namespace_node *node;
uacpi_control_method *method;
uacpi_object *obj;
uacpi_status ret = UACPI_STATUS_INVALID_ARGUMENT;
if (uacpi_unlikely(parent == UACPI_NULL && path == UACPI_NULL))
return ret;
ret = uacpi_namespace_read_lock();
if (uacpi_unlikely_error(ret))
return ret;
if (path != UACPI_NULL) {
ret = uacpi_namespace_node_resolve(
parent, path, UACPI_SHOULD_LOCK_NO,
UACPI_MAY_SEARCH_ABOVE_PARENT_NO, UACPI_PERMANENT_ONLY_YES,
&node
);
if (uacpi_unlikely_error(ret))
goto out_read_unlock;
} else {
node = parent;
}
obj = uacpi_namespace_node_get_object(node);
if (uacpi_unlikely(obj == UACPI_NULL)) {
ret = UACPI_STATUS_INVALID_ARGUMENT;
goto out_read_unlock;
}
if (obj->type != UACPI_OBJECT_METHOD) {
uacpi_object *new_obj;
if (uacpi_unlikely(out_obj == UACPI_NULL))
goto out_read_unlock;
new_obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
if (uacpi_unlikely(new_obj == UACPI_NULL)) {
ret = UACPI_STATUS_OUT_OF_MEMORY;
goto out_read_unlock;
}
ret = uacpi_object_assign(
new_obj, obj, UACPI_ASSIGN_BEHAVIOR_DEEP_COPY
);
if (uacpi_unlikely_error(ret)) {
uacpi_object_unref(new_obj);
goto out_read_unlock;
}
*out_obj = new_obj;
out_read_unlock:
uacpi_namespace_read_unlock();
return ret;
}
method = obj->method;
uacpi_shareable_ref(method);
uacpi_namespace_read_unlock();
// Upgrade to a write-lock since we're about to run a method
ret = uacpi_namespace_write_lock();
if (uacpi_unlikely_error(ret))
goto out_no_write_lock;
ret = uacpi_execute_control_method(node, method, args, out_obj);
uacpi_namespace_write_unlock();
out_no_write_lock:
uacpi_method_unref(method);
return ret;
}
uacpi_status uacpi_eval_simple(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval(parent, path, UACPI_NULL, ret);
}
uacpi_status uacpi_execute(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args
)
{
return uacpi_eval(parent, path, args, UACPI_NULL);
}
uacpi_status uacpi_execute_simple(
uacpi_namespace_node *parent, const uacpi_char *path
)
{
return uacpi_eval(parent, path, UACPI_NULL, UACPI_NULL);
}
#define TRACE_BAD_RET(path_fmt, type, ...) \
uacpi_warn( \
"unexpected '%s' object returned by method "path_fmt \
", expected type mask: %08X\n", uacpi_object_type_to_string(type), \
__VA_ARGS__ \
)
#define TRACE_NO_RET(path_fmt, ...) \
uacpi_warn( \
"no value returned from method "path_fmt", expected type mask: " \
"%08X\n", __VA_ARGS__ \
)
static void trace_invalid_return_type(
uacpi_namespace_node *parent, const uacpi_char *path,
uacpi_object_type_bits expected_mask, uacpi_object_type actual_type
)
{
const uacpi_char *abs_path;
uacpi_bool dynamic_abs_path = UACPI_FALSE;
if (parent == UACPI_NULL || (path != UACPI_NULL && path[0] == '\\')) {
abs_path = path;
} else {
abs_path = uacpi_namespace_node_generate_absolute_path(parent);
dynamic_abs_path = UACPI_TRUE;
}
if (dynamic_abs_path && path != UACPI_NULL) {
if (actual_type == UACPI_OBJECT_UNINITIALIZED)
TRACE_NO_RET("%s.%s", abs_path, path, expected_mask);
else
TRACE_BAD_RET("%s.%s", actual_type, abs_path, path, expected_mask);
} else {
if (actual_type == UACPI_OBJECT_UNINITIALIZED) {
TRACE_NO_RET("%s", abs_path, expected_mask);
} else {
TRACE_BAD_RET("%s", actual_type, abs_path, expected_mask);
}
}
if (dynamic_abs_path)
uacpi_free_dynamic_string(abs_path);
}
uacpi_status uacpi_eval_typed(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object_type_bits ret_mask,
uacpi_object **out_obj
)
{
uacpi_status ret;
uacpi_object *obj;
uacpi_object_type returned_type = UACPI_OBJECT_UNINITIALIZED;
if (uacpi_unlikely(out_obj == UACPI_NULL))
return UACPI_STATUS_INVALID_ARGUMENT;
ret = uacpi_eval(parent, path, args, &obj);
if (uacpi_unlikely_error(ret))
return ret;
if (obj != UACPI_NULL)
returned_type = obj->type;
if (ret_mask && (ret_mask & (1 << returned_type)) == 0) {
trace_invalid_return_type(parent, path, ret_mask, returned_type);
uacpi_object_unref(obj);
return UACPI_STATUS_TYPE_MISMATCH;
}
*out_obj = obj;
return UACPI_STATUS_OK;
}
uacpi_status uacpi_eval_simple_typed(
uacpi_namespace_node *parent, const uacpi_char *path,
uacpi_object_type_bits ret_mask, uacpi_object **ret
)
{
return uacpi_eval_typed(parent, path, UACPI_NULL, ret_mask, ret);
}
uacpi_status uacpi_eval_integer(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_u64 *out_value
)
{
uacpi_object *int_obj;
uacpi_status ret;
ret = uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_INTEGER_BIT, &int_obj
);
if (uacpi_unlikely_error(ret))
return ret;
*out_value = int_obj->integer;
uacpi_object_unref(int_obj);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_eval_simple_integer(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u64 *out_value
)
{
return uacpi_eval_integer(parent, path, UACPI_NULL, out_value);
}
uacpi_status uacpi_eval_buffer_or_string(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args,
UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT,
ret
);
}
uacpi_status uacpi_eval_simple_buffer_or_string(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL,
UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT,
ret
);
}
uacpi_status uacpi_eval_string(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_STRING_BIT, ret
);
}
uacpi_status uacpi_eval_simple_string(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL, UACPI_OBJECT_STRING_BIT, ret
);
}
uacpi_status uacpi_eval_buffer(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_BUFFER_BIT, ret
);
}
uacpi_status uacpi_eval_simple_buffer(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL, UACPI_OBJECT_BUFFER_BIT, ret
);
}
uacpi_status uacpi_eval_package(
uacpi_namespace_node *parent, const uacpi_char *path,
const uacpi_object_array *args, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, args, UACPI_OBJECT_PACKAGE_BIT, ret
);
}
uacpi_status uacpi_eval_simple_package(
uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
)
{
return uacpi_eval_typed(
parent, path, UACPI_NULL, UACPI_OBJECT_PACKAGE_BIT, ret
);
}
uacpi_status uacpi_get_aml_bitness(uacpi_u8 *out_bitness)
{
UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
*out_bitness = g_uacpi_rt_ctx.is_rev1 ? 32 : 64;
return UACPI_STATUS_OK;
}
#endif // !UACPI_BAREBONES_MODE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,255 @@
import os
import copy
import subprocess
from typing import List, Optional, Callable
from utilities.asl import ASL, ASLSource
ACPICA_BUFFER_PRINT_PREFIX = " 0000: "
def _parse_acpiexec_buffers(raw_output: str) -> List[List[int]]:
lines = raw_output.split("\n")
answers = []
for i, line in enumerate(lines):
if "Evaluating" in line:
lines = lines[i + 1:]
break
for line in lines:
if not line.startswith(ACPICA_BUFFER_PRINT_PREFIX):
continue
line = line.removeprefix(ACPICA_BUFFER_PRINT_PREFIX)
buffer_bytes = []
for x in line.split(" "):
# Buffers are printed out with ascii disassembly at the end.
# Skip as soon as we encounter empty space.
if x == "":
break
buffer_bytes.append(int(x, base=16))
answers.append(buffer_bytes)
return answers
def _generate_for_each_bit_combination(
src: ASLSource, per_combo_cb: Callable,
final_cb: Optional[Callable] = None
) -> None:
methods = []
for i in range(0, 64):
method_name = f"FT{i}"
methods.append(method_name)
src.l(ASL.method(method_name))
src.block_begin()
for j in range(0, 65):
if (i >= j):
continue
per_combo_cb(i, j, src)
src.block_end()
src.l(ASL.method("MAIN"))
src.block_begin()
for method in methods:
src.l(ASL.invoke(method))
if final_cb is not None:
final_cb(src)
src.block_end()
src.finalize()
_READS_ANSWERS_NAME = "buffer-reads-answers"
_WRITES_ANSWERS_NAME = "buffer-writes-answers"
def _generate_buffer_reads_answers(
compiler: str, bin_dir: str, src: ASLSource
) -> List[List[int]]:
output_path = os.path.join(bin_dir, _READS_ANSWERS_NAME + ".aml")
if not os.path.exists(output_path):
_do_generate_buffer_reads_answers(compiler, bin_dir, src)
raw_answers = subprocess.check_output(
["acpiexec", "-b", "execute MAIN", output_path],
universal_newlines=True
)
return _parse_acpiexec_buffers(raw_answers)
def _generate_buffer_writes_answers(
compiler: str, bin_dir: str, src: ASLSource
) -> List[List[int]]:
output_path = os.path.join(bin_dir, _WRITES_ANSWERS_NAME + ".aml")
if not os.path.exists(output_path):
_do_generate_buffer_writes_answers(compiler, bin_dir, src)
raw_answers = subprocess.check_output(
["acpiexec", "-b", "execute MAIN", output_path],
universal_newlines=True
)
return _parse_acpiexec_buffers(raw_answers)
def _do_generate_buffer_reads_answers(
compiler: str, bin_dir: str, src: ASLSource
) -> None:
def gen_buffer_dump(i, j, src):
field_size = j - i
field_name = f"FI{field_size:02X}"
src.l(ASL.create_field("BUFF", i, field_size, field_name))
src.l(ASL.assign("Debug", field_name))
_generate_for_each_bit_combination(src, gen_buffer_dump)
answers_src_path = os.path.join(bin_dir, _READS_ANSWERS_NAME + ".asl")
src.dump(answers_src_path)
ASLSource.compile(answers_src_path, compiler, bin_dir)
def _do_generate_buffer_writes_answers(
compiler: str, bin_dir: str, src: ASLSource
) -> None:
def gen_buffer_dump(i, j, src):
field_size = j - i
field_name = f"FI{field_size:02X}"
src.l(ASL.create_field("BUFX", i, field_size, field_name))
src.l(ASL.assign(field_name, "BUFF"))
src.l(ASL.assign("Debug", field_name))
_generate_for_each_bit_combination(src, gen_buffer_dump)
writes_src_path = os.path.join(bin_dir, _WRITES_ANSWERS_NAME + ".asl")
src.dump(writes_src_path)
ASLSource.compile(writes_src_path, compiler, bin_dir)
_READS_TEST_NAME = "2080-buffer-reads"
_WRITES_TEST_NAME = "2080-buffer-writes"
def generate_buffer_reads_test(compiler: str, bin_dir: str) -> str:
output_path = os.path.join(bin_dir, _READS_TEST_NAME + ".asl")
if os.path.exists(output_path):
return output_path
return _do_generate_buffer_reads_test(compiler, bin_dir)
def generate_buffer_writes_test(compiler: str, bin_dir: str) -> str:
output_path = os.path.join(bin_dir, _WRITES_TEST_NAME + ".asl")
if os.path.exists(output_path):
return output_path
return _do_generate_buffer_writes_test(compiler, bin_dir)
def _generate_buffer_test_prologue() -> ASLSource:
src = ASLSource(2)
src.l(ASL.name(
"BUFF",
ASL.buffer([0xAC, 0x12, 0x42, 0xCA, 0xDE, 0xFF, 0xCB, 0xDD])
))
src.l(ASL.name(
"BUFX",
ASL.buffer(count=8)
))
return src
def _generate_buffer_test_harness(src: ASLSource) -> None:
src.l(ASL.name("FAIL", 0))
src.l(ASL.name("PASS", 0))
src.l(ASL.method("FDBG", 3))
src.block_begin()
src.l(ASL.assign("Debug", "Arg0"))
src.l(ASL.assign("Debug", "Arg1"))
src.l(ASL.assign("Debug", "Arg2"))
src.l(ASL.increment("FAIL"))
src.block_end()
def _do_generate_buffer_reads_test(compiler: str, bin_dir: str) -> str:
src = _generate_buffer_test_prologue()
answers = _generate_buffer_reads_answers(compiler, bin_dir,
copy.deepcopy(src))
_generate_buffer_test_harness(src)
answer_idx = 0
def gen_buffer_check(i, j, src):
nonlocal answer_idx
field_size = j - i
field_name = f"FI{field_size:02X}"
src.l(ASL.create_field("BUFF", i, field_size, field_name))
src.iff(ASL.equal(field_name, ASL.buffer(answers[answer_idx])))
answer_idx += 1
src.l(ASL.increment("PASS"))
src.elsee()
src.l(ASL.invoke("FDBG", [
field_name, "__LINE__", f'"{field_name}"'
]))
src.block_end()
_generate_for_each_bit_combination(src, gen_buffer_check,
lambda src: src.l(ASL.returnn("FAIL")))
test_src_path = os.path.join(bin_dir, _READS_TEST_NAME + ".asl")
src.dump_as_test_case(test_src_path, "Reads from buffer fields",
"int", "0")
return test_src_path
def _do_generate_buffer_writes_test(compiler: str, bin_dir: str) -> str:
src = _generate_buffer_test_prologue()
answers = _generate_buffer_writes_answers(compiler, bin_dir,
copy.deepcopy(src))
_generate_buffer_test_harness(src)
answer_idx = 0
def gen_buffer_check(i, j, src):
nonlocal answer_idx
field_size = j - i
field_name = f"FI{field_size:02X}"
src.l(ASL.create_field("BUFX", i, field_size, field_name))
src.l(ASL.assign(field_name, "BUFF"))
src.iff(ASL.equal(field_name, ASL.buffer(answers[answer_idx])))
answer_idx += 1
src.l(ASL.increment("PASS"))
src.elsee()
src.l(ASL.invoke("FDBG", [
field_name, "__LINE__", f'"{field_name}"'
]))
src.block_end()
src.l(ASL.assign("BUFX", 0))
_generate_for_each_bit_combination(src, gen_buffer_check,
lambda src: src.l(ASL.returnn("FAIL")))
test_src_path = os.path.join(bin_dir, _WRITES_TEST_NAME + ".asl")
src.dump_as_test_case(test_src_path, "Writes to buffer fields",
"int", "0")
return test_src_path

View File

@ -0,0 +1,476 @@
#!/usr/bin/python3
import subprocess
import argparse
import os
import sys
import time
import platform
from multiprocessing import Manager, Pool, Queue
from typing import List, Tuple, Optional
from types import TracebackType
from abc import ABC, abstractmethod
from utilities.asl import ASLSource
import generated_test_cases.buffer_field as bf
def abs_path_to_current_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
def generate_test_cases(compiler: str, bin_dir: str) -> List[str]:
return [
bf.generate_buffer_reads_test(compiler, bin_dir),
bf.generate_buffer_writes_test(compiler, bin_dir),
]
ACPI_DUMPS_URL = "https://github.com/UltraOS/ACPIDumps.git"
class TestCase(ABC):
def __init__(self, path: str, name: str):
self.path = path
self.name = name
@abstractmethod
def extra_runner_args(self) -> List[str]:
pass
class BarebonesTestCase(TestCase):
def __init__(
self, name: str
) -> None:
super().__init__(name, name)
def extra_runner_args(self) -> List[str]:
return []
class TestCaseWithMain(TestCase):
def __init__(
self, path: str, name: str, rtype: str, value: str
) -> None:
super().__init__(path, f"{os.path.basename(path)}:{name}")
self.rtype = rtype
self.value = value
def extra_runner_args(self) -> List[str]:
return ["--expect", self.rtype, self.value]
class TestCaseHardwareBlob(TestCase):
def __init__(self, path: str) -> None:
dsdt_path = os.path.join(path, "dsdt.dat")
super().__init__(dsdt_path, os.path.basename(path))
self.ssdt_paths = [
path for path in os.listdir(path)
if path.startswith("ssdt") and path.endswith(".dat")
]
def extract_ssdt_number(path: str) -> int:
number = ""
assert path.startswith("ssdt")
for c in path[4:]:
if not c.isdigit():
break
number += c
# some blobs apparently come with just "ssdt.dat" and not
# "ssdtX.dat", take that into account here.
return 0 if not number else int(number)
if self.ssdt_paths:
self.ssdt_paths.sort(key=extract_ssdt_number)
self.ssdt_paths = [
os.path.join(path, ssdt_path) for ssdt_path in self.ssdt_paths
]
def extra_runner_args(self) -> List[str]:
args = ["--enumerate-namespace"]
if self.ssdt_paths:
args.append("--extra-tables")
args.extend(self.ssdt_paths)
return args
def generate_large_test_cases(extractor: str, bin_dir: str) -> List[TestCase]:
acpi_dumps_dir = os.path.join(abs_path_to_current_dir(), "acpi-dumps")
large_tests_dir = os.path.join(bin_dir, "large-tests")
if not os.path.exists(acpi_dumps_dir):
subprocess.check_call(["git", "clone", ACPI_DUMPS_URL, acpi_dumps_dir])
os.makedirs(large_tests_dir, exist_ok=True)
test_cases = []
def recurse_one(path, depth=1):
for obj in os.listdir(path):
if obj.startswith("."):
continue
obj_path = os.path.join(path, obj)
if os.path.isdir(obj_path):
recurse_one(obj_path, depth + 1)
continue
if depth == 1 or not obj.endswith(".bin"):
continue
print(f"Preparing HW blob {obj_path}...")
split_path = obj_path.split(os.path.sep)[-depth:]
fixed_up_path = [
seg.replace(" ", "_").lower() for seg in split_path
]
test_case_name = "_".join(fixed_up_path).replace(".bin", "")
this_test_dir = os.path.join(large_tests_dir, test_case_name)
if (not os.path.exists(this_test_dir) or not
os.path.exists(os.path.join(this_test_dir, "dsdt.dat"))):
os.makedirs(this_test_dir, exist_ok=True)
# These are two separate invocations because of a bug in
# acpixtract where it exits with -1 when there isn't an SSDT
# inside a blob, even though it's specified as optional in
# code. Merge once https://github.com/acpica/acpica/pull/959
# is shipped everywhere.
subprocess.check_call(
[extractor, "-sDSDT", obj_path], cwd=this_test_dir,
stdout=subprocess.DEVNULL
)
subprocess.run(
[extractor, "-sSSDT", obj_path], cwd=this_test_dir,
stdout=subprocess.DEVNULL
)
test_cases.append(TestCaseHardwareBlob(this_test_dir))
recurse_one(acpi_dumps_dir)
return test_cases
def get_case_name_and_expected_result(case: str) -> Tuple[str, str, str]:
with open(case) as tc:
name = tc.readline()
name = name[name.find(":") + 1:].strip()
expected_line = tc.readline()
expected_line = expected_line[expected_line.find(":") + 1:].strip()
expected = [val.strip() for val in expected_line.split("=>")]
return name, expected[0], expected[1]
class TestHeaderFooter:
def __init__(self, text: str) -> None:
self.hdr = "{:=^80}".format(" " + text + " ")
def __enter__(self) -> None:
print(self.hdr, flush=True)
def __exit__(
self, exc_type: Optional[type[BaseException]],
ex: Optional[BaseException], traceback: Optional[TracebackType]
) -> Optional[bool]:
print("=" * len(self.hdr), flush=True)
return None
def run_resource_tests(runner: str) -> int:
with TestHeaderFooter("Resource Conversion Tests"):
return subprocess.run([runner, "resource-tests"]).returncode
def compile_test_cases(
test_cases: List[str], compiler: str, bin_dir: str
) -> List[TestCase]:
compiled_cases: List[TestCase] = []
for case in test_cases:
print(f"Compiling {case}...", end="")
# Skip the table loading test for old iASL, it prints bogus error
# messages and refuses to compile the test case no matter what I try:
#
# If (!Load(TABL)) {
# Error 6126 - syntax error ^
#
if os.path.basename(case) == "table-loading-0.asl":
out = subprocess.check_output([compiler, "-v"],
universal_newlines=True)
# I don't know which versions it's broken for specifically, this
# one comes with Ubuntu 22.04, so hardcode it.
if "20200925" in out:
print("SKIPPED (bugged iASL)", flush=True)
continue
compiled_cases.append(
TestCaseWithMain(
ASLSource.compile(case, compiler, bin_dir),
*get_case_name_and_expected_result(case)
)
)
print("")
return compiled_cases
def run_single_test(case: TestCase, results: Queue, runner: str) -> bool:
timeout = False
start_time = time.time()
proc = subprocess.Popen(
[runner, case.path, *case.extra_runner_args()],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True
)
try:
stdout, stderr = proc.communicate(timeout=60)
elapsed_time = time.time() - start_time
except subprocess.TimeoutExpired:
proc.kill()
stdout, stderr = proc.communicate()
timeout = True
elapsed_time = time.time() - start_time
if proc.returncode == 0:
results.put((True, case, elapsed_time))
return True
else:
results.put((False, case, elapsed_time, stdout, stderr, timeout))
return False
def run_tests(cases: List[TestCase], runner: str, parallelism: int) -> bool:
pass_count = 0
fail_count = 0
start_time = time.time()
def print_test_header(case: TestCase, success: bool, timeout: bool,
elapsed: float) -> None:
status_str = "OK" if success else "TIMEOUT" if timeout else "FAILED"
print(f"[{pass_count}/{len(cases)}] {case.name} "
f"{status_str} in {elapsed:.2f}s", flush=True)
def format_output(data: str) -> str:
return "\n".join(["\t" + line for line in data.split("\n")])
with Pool(processes=parallelism) as pool:
manager = Manager()
result_queue = manager.Queue()
pool.starmap_async(run_single_test,
[(case, result_queue, runner) for case in cases])
while pass_count + fail_count < len(cases):
success, case, elapsed_time, *args = result_queue.get()
if success:
pass_count += 1
print_test_header(case, True, False, elapsed_time)
else:
fail_count += 1
stdout, stderr, timeout = args
print_test_header(case, False, timeout, elapsed_time)
stdout_output = format_output(stdout)
stderr_output = format_output(stderr)
if stdout_output:
print(f"STDOUT FOR {case.name}:", flush=True)
print(stdout_output, flush=True)
else:
print(f"NO STDOUT FROM TEST {case.name}", flush=True)
if stderr_output:
print(f"STDERR FOR {case.name}:", flush=True)
print(stderr_output, flush=True)
else:
print(f"NO STDERR FROM TEST {case.name}", flush=True)
pool.close()
pool.join()
elapsed_time = time.time() - start_time
print(f"SUMMARY: {pass_count}/{len(cases)} in {elapsed_time:.2f}s", end="")
if not fail_count:
print(" (ALL PASS!)")
else:
print(f" ({fail_count} FAILED)")
return not fail_count
def test_relpath(*args: str) -> str:
return os.path.join(abs_path_to_current_dir(), *args)
def platform_name_for_binary(binary: str) -> str:
if platform.system() == "Windows":
binary += ".exe"
return binary
def test_runner_binary() -> str:
return platform_name_for_binary("test-runner")
def barebones_test_runner_binary() -> str:
return platform_name_for_binary("barebones-test-runner")
def build_test_runner(bitness: int, watcom: bool) -> Tuple[str, str]:
build_dir = f"build-{platform.system().lower()}-{bitness}bits"
if watcom:
build_dir = f"{build_dir}-watcom"
runner_build_dir = test_relpath("runner", build_dir)
runner_exe = os.path.join(runner_build_dir, test_runner_binary())
barebones_runner_exe = os.path.join(
runner_build_dir, barebones_test_runner_binary()
)
use_ninja = False
if platform.system() != "Windows":
try:
subprocess.run(["ninja", "--version"], check=True,
stdout=subprocess.DEVNULL)
use_ninja = True
except FileNotFoundError:
pass
cmake_args: List[str] = ["cmake"]
if watcom:
cmake_args.extend(["-G", "Watcom WMake"])
elif use_ninja:
cmake_args.extend(["-G", "Ninja"])
cmake_args.append("..")
if not watcom and bitness == 32:
if platform.system() == "Windows":
cmake_args.extend(["-A", "Win32"])
else:
cmake_args.extend([
"-DCMAKE_CXX_FLAGS=-m32",
"-DCMAKE_C_FLAGS=-m32"
])
if not os.path.isdir(runner_build_dir):
os.makedirs(runner_build_dir, exist_ok=True)
subprocess.run(cmake_args, cwd=runner_build_dir, check=True)
subprocess.run(["cmake", "--build", "."], cwd=runner_build_dir, check=True)
return barebones_runner_exe, runner_exe
def main() -> int:
parser = argparse.ArgumentParser(description="Run uACPI tests")
parser.add_argument("--asl-compiler",
help="Compiler to use to build test cases",
default="iasl")
parser.add_argument("--acpi-extractor",
help="ACPI extractor utility to use for ACPI dumps",
default="acpixtract")
parser.add_argument("--test-dir",
default=test_relpath("test-cases"),
help="The directory to run tests from, defaults to "
"'test-cases' in the same directory")
parser.add_argument("--test-runner",
help="The test runner binary to invoke")
parser.add_argument("--barebones-test-runner",
help="The barebones test runner binary to invoke")
parser.add_argument("--binary-directory",
default=test_relpath("bin"),
help="The directory to store intermediate files in, "
"created & deleted automatically. Defaults to "
"'bin' in the same directory")
parser.add_argument("--bitness", default=64, choices=[32, 64], type=int,
help="uACPI build bitness")
parser.add_argument("--large", action="store_true",
help="Run the large test suite as well")
parser.add_argument("--barebones", action="store_true",
help="Run the barebones test suite as well")
parser.add_argument("--parallelism", type=int,
default=os.cpu_count() or 1,
help="Number of test runners to run in parallel")
parser.add_argument("--watcom", action="store_true",
help="Use OpenWatcom to build test runners")
args = parser.parse_args()
if args.watcom:
args.bitness = 32
test_compiler = args.asl_compiler
test_dir = args.test_dir
test_runner = args.test_runner
bare_test_runner = args.barebones_test_runner
if test_runner is None or (args.barebones and bare_test_runner is None):
bare_runner_default, runner_default = build_test_runner(args.bitness,
args.watcom)
if bare_test_runner is None:
bare_test_runner = bare_runner_default
if test_runner is None:
test_runner = runner_default
ret = run_resource_tests(test_runner)
if ret != 0:
sys.exit(ret)
bin_dir = args.binary_directory
os.makedirs(bin_dir, exist_ok=True)
test_cases = [
os.path.join(test_dir, f)
for f in os.listdir(test_dir)
if os.path.splitext(f)[1] == ".asl"
]
test_cases.extend(generate_test_cases(test_compiler, bin_dir))
base_test_cases = compile_test_cases(
test_cases, test_compiler, bin_dir
)
with TestHeaderFooter("AML Tests"):
ret = run_tests(base_test_cases, test_runner, args.parallelism)
if ret and args.large:
large_test_cases = generate_large_test_cases(
args.acpi_extractor, bin_dir
)
with TestHeaderFooter("Large AML Tests"):
ret = run_tests(large_test_cases, test_runner, args.parallelism)
if ret and args.barebones:
bare_cases: List[TestCase] = [
BarebonesTestCase("basic-operation"),
BarebonesTestCase("table-installation"),
]
with TestHeaderFooter("Barebones Mode Tests"):
ret = run_tests(bare_cases, bare_test_runner, args.parallelism)
sys.exit(not ret)
if __name__ == "__main__":
main()

View File

@ -0,0 +1 @@
build-*

View File

@ -0,0 +1,168 @@
cmake_minimum_required(VERSION 3.16)
project(TestRunner C)
set(CMAKE_C_STANDARD 17)
set(CMAKE_C_STANDARD_REQUIRED ON)
include(${CMAKE_CURRENT_SOURCE_DIR}/../../uacpi.cmake)
foreach(CONF_TYPE ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER ${CONF_TYPE} CONF_TYPE)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${CONF_TYPE} ${CMAKE_BINARY_DIR})
endforeach(CONF_TYPE ${CMAKE_CONFIGURATION_TYPES})
macro (define_test_runner NAME)
add_executable(
${NAME}
${ARGN}
)
target_sources(
${NAME}
PRIVATE
${UACPI_SOURCES}
)
target_include_directories(
${NAME}
PRIVATE
${UACPI_INCLUDES}
)
if (WATCOM)
# Address sanitizer doesn't exist on Watcom.
target_compile_definitions(
${NAME}
PRIVATE
_LINUX_SOURCE
)
target_compile_options(
${NAME}
PRIVATE
-we -wx
)
elseif (MSVC)
# Address sanitizer on MSVC depends on a dynamic library that is not present in
# PATH by default. Lets just not enable it here.
target_compile_options(
${NAME}
PRIVATE
/W3 /WX
/wd4200 /wd4267 /wd4244
)
else ()
target_compile_definitions(
${NAME}
PRIVATE
_GNU_SOURCE
)
target_compile_options(
${NAME}
PRIVATE
-fsanitize=address,undefined -g3 -Wall -Wextra -Werror
)
target_link_options(
${NAME}
PRIVATE
-fsanitize=address,undefined -g3
)
add_compile_options(
$<$<COMPILE_LANGUAGE:C>:-Wstrict-prototypes>
)
endif ()
endmacro ()
define_test_runner(
test-runner
test_runner.c
helpers.c
interface_impl.c
resource_tests.c
api_tests.c
)
find_package(Threads REQUIRED)
target_link_libraries(test-runner PRIVATE Threads::Threads)
define_test_runner(
barebones-test-runner
helpers.c
barebones_runner.c
)
target_compile_definitions(
barebones-test-runner
PRIVATE
-DUACPI_BAREBONES_MODE
)
if (NOT REDUCED_HARDWARE_BUILD)
set(REDUCED_HARDWARE_BUILD 0)
endif()
if (REDUCED_HARDWARE_BUILD)
target_compile_definitions(
test-runner
PRIVATE
-DUACPI_REDUCED_HARDWARE
)
endif ()
if (NOT DEFINED SIZED_FREES_BUILD)
set(SIZED_FREES_BUILD 1)
endif()
if (SIZED_FREES_BUILD)
target_compile_definitions(
test-runner
PRIVATE
-DUACPI_SIZED_FREES
)
endif ()
if (NOT FORMATTED_LOGGING_BUILD)
set(FORMATTED_LOGGING_BUILD 0)
endif()
if (FORMATTED_LOGGING_BUILD)
target_compile_definitions(
test-runner
PRIVATE
-DUACPI_FORMATTED_LOGGING
)
endif ()
if (NOT NATIVE_ALLOC_ZEROED)
set(NATIVE_ALLOC_ZEROED 0)
endif()
if (NATIVE_ALLOC_ZEROED)
target_compile_definitions(
test-runner
PRIVATE
-DUACPI_NATIVE_ALLOC_ZEROED
)
endif ()
if (NOT KERNEL_INITIALIZATION)
set(KERNEL_INITIALIZATION 1)
endif()
if (KERNEL_INITIALIZATION)
target_compile_definitions(
test-runner
PRIVATE
-DUACPI_KERNEL_INITIALIZATION
)
endif ()
if (NOT BUILTIN_STRING)
set(BUILTIN_STRING 0)
endif()
if (BUILTIN_STRING)
target_compile_definitions(
test-runner
PRIVATE
-DUACPI_USE_BUILTIN_STRING
)
endif ()

View File

@ -0,0 +1,475 @@
#include "helpers.h"
#include <inttypes.h>
#include <string.h>
#include <uacpi/opregion.h>
#include <uacpi/resources.h>
#include <uacpi/types.h>
static void check_ok(uacpi_object **objects, uacpi_object_array *arr)
{
uacpi_u64 ret;
uacpi_status st = uacpi_eval_integer(UACPI_NULL, "CHEK", arr, &ret);
ensure_ok_status(st);
if (!ret)
error("integer check failed");
uacpi_object_unref(objects[1]);
}
void test_object_api(void)
{
uacpi_status st;
uacpi_object_array arr;
uacpi_object *objects[2];
uacpi_data_view view;
uacpi_object *tmp;
uint8_t buffer[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
uacpi_object *pkg[3];
uacpi_object_array arr1;
arr.objects = objects;
arr.count = UACPI_ARRAY_SIZE(objects);
objects[0] = uacpi_object_create_integer(1);
st = uacpi_object_create_integer_safe(
0xDEADBEEFDEADBEEF, UACPI_OVERFLOW_DISALLOW, &objects[1]
);
if (st != UACPI_STATUS_INVALID_ARGUMENT)
error("expected integer creation to fail");
objects[1] = uacpi_object_create_integer(0xDEADBEEF);
check_ok(objects, &arr);
st = uacpi_object_assign_integer(objects[0], 2);
ensure_ok_status(st);
objects[1] = uacpi_object_create_cstring("Hello World");
uacpi_object_ref(objects[1]);
check_ok(objects, &arr);
view.const_text = "Hello World";
// Don't include the null byte to check if this is accounted for
view.length = 11;
uacpi_object_assign_string(objects[1], view);
check_ok(objects, &arr);
st = uacpi_object_assign_integer(objects[0], 3);
ensure_ok_status(st);
tmp = uacpi_object_create_cstring("XXXX");
objects[1] = uacpi_object_create_reference(tmp);
uacpi_object_unref(tmp);
check_ok(objects, &arr);
st = uacpi_object_assign_integer(objects[0], 4);
ensure_ok_status(st);
view.const_bytes = buffer;
view.length = sizeof(buffer);
objects[1] = uacpi_object_create_buffer(view);
check_ok(objects, &arr);
st = uacpi_object_assign_integer(objects[0], 5);
ensure_ok_status(st);
pkg[0] = uacpi_object_create_uninitialized();
view.const_text = "First Element";
view.length = strlen(view.const_text);
uacpi_object_assign_string(pkg[0], view);
pkg[1] = uacpi_object_create_cstring("test");
st = uacpi_object_assign_integer(pkg[1], 2);
ensure_ok_status(st);
buffer[0] = 1;
buffer[1] = 2;
buffer[2] = 3;
view.const_bytes = buffer;
view.length = 3;
pkg[2] = uacpi_object_create_buffer(view);
st = uacpi_object_assign_buffer(pkg[2], view);
arr1.objects = pkg;
arr1.count = 3;
objects[1] = uacpi_object_create_package(arr1);
uacpi_object_assign_package(objects[1], arr1);
check_ok(objects, &arr);
uacpi_object_unref(pkg[0]);
uacpi_object_unref(pkg[1]);
uacpi_object_unref(pkg[2]);
uacpi_object_unref(objects[0]);
}
#define CHECK_VALUE(x, y) \
if ((x) != (y)) \
error("check at %d failed", __LINE__);
#define CHECK_STRING(x, y) \
if (strcmp((x), (y))) \
error("check at %d failed", __LINE__);
static void eval_one(uacpi_object *arg, uacpi_address_space type)
{
uacpi_object_array arr = { 0 };
uacpi_u64 out_value;
uacpi_status st;
arr.objects = &arg;
arr.count = 1;
st = uacpi_object_assign_integer(arg, type);
ensure_ok_status(st);
st = uacpi_eval_integer(NULL, "CHEK", &arr, &out_value);
ensure_ok_status(st);
if (!out_value)
error("%s test failed", uacpi_address_space_to_string(type));
}
static uacpi_status ipmi_handler(uacpi_region_op op, uacpi_handle op_data)
{
uacpi_region_ipmi_rw_data *ipmi = op_data;
uint64_t response;
const char *command;
if (op == UACPI_REGION_OP_ATTACH || op == UACPI_REGION_OP_DETACH)
return UACPI_STATUS_OK;
CHECK_VALUE(op, UACPI_REGION_OP_IPMI_COMMAND);
CHECK_VALUE(ipmi->in_out_message.length, 66);
command = ipmi->in_out_message.const_text;
if (!strcmp(command, "IPMICommandDEADBEE0"))
response = 0xDEADBEE0;
else if (!strcmp(command, "IPMICommandDEADBEEF"))
response = 0xDEADBEEF;
else
error("invalid IPMI command %s", command);
CHECK_VALUE(ipmi->command, response);
memcpy(ipmi->in_out_message.data, &response, sizeof(response));
return UACPI_STATUS_OK;
}
static uacpi_status gpio_handler(uacpi_region_op op, uacpi_handle op_data)
{
uacpi_region_gpio_rw_data *rw_data = op_data;
uacpi_resource *res;
uacpi_status ret;
uacpi_resource_gpio_connection *gpio;
uacpi_namespace_node *gpio_node;
uacpi_u64 bit_offset;
uacpi_u64 *state;
uacpi_u64 i;
switch (op) {
case UACPI_REGION_OP_ATTACH: {
uacpi_region_attach_data *att_data = op_data;
CHECK_VALUE(att_data->gpio_info.num_pins, 6);
att_data->out_region_context = do_calloc(1, sizeof(uint64_t));
return UACPI_STATUS_OK;
}
case UACPI_REGION_OP_DETACH: {
uacpi_region_detach_data *det_data = op_data;
free(det_data->region_context);
return UACPI_STATUS_OK;
}
default:
break;
}
ret = uacpi_get_resource_from_buffer(rw_data->connection, &res);
ensure_ok_status(ret);
CHECK_VALUE(res->type, UACPI_RESOURCE_TYPE_GPIO_CONNECTION);
gpio = &res->gpio_connection;
ret = uacpi_namespace_node_find(NULL, gpio->source.string, &gpio_node);
ensure_ok_status(ret);
ret = uacpi_eval_simple_integer(gpio_node, "_UID", &bit_offset);
ensure_ok_status(ret);
bit_offset *= 16;
state = rw_data->region_context;
if (rw_data->num_pins == 0 || rw_data->num_pins > 3)
error("bogus number of pins %d", rw_data->num_pins);
if (op == UACPI_REGION_OP_GPIO_READ)
rw_data->value = 0;
for (i = 0; i < rw_data->num_pins; ++i) {
uint64_t abs_pin = i + rw_data->pin_offset;
bool value;
if (op == UACPI_REGION_OP_GPIO_READ) {
value = (*state >> bit_offset) & (1ull << abs_pin);
if (value)
rw_data->value |= (1ull << i);
} else {
unsigned long long mask = 1ull << abs_pin;
CHECK_VALUE(op, UACPI_REGION_OP_GPIO_WRITE);
value = rw_data->value & (1ull << i);
if (value)
*state |= mask;
else
*state &= ~mask;
}
}
uacpi_free_resource(res);
return UACPI_STATUS_OK;
}
static uacpi_status pcc_handler(uacpi_region_op op, uacpi_handle op_data)
{
uacpi_region_pcc_send_data *rw_data = op_data;
uint32_t x;
if (op == UACPI_REGION_OP_ATTACH) {
uacpi_region_attach_data *att_data = op_data;
CHECK_VALUE(att_data->pcc_info.buffer.length, 0xFF);
CHECK_VALUE(att_data->pcc_info.subspace_id, 0xCA)
att_data->out_region_context = att_data->pcc_info.buffer.data;
return UACPI_STATUS_OK;
}
if (op == UACPI_REGION_OP_DETACH)
return UACPI_STATUS_OK;
CHECK_VALUE(op, UACPI_REGION_OP_PCC_SEND);
CHECK_VALUE(rw_data->buffer.data, rw_data->region_context);
CHECK_STRING(rw_data->buffer.const_text, "HELLO");
memcpy(&x, rw_data->buffer.bytes + 12, sizeof(x));
CHECK_VALUE(x, 0xDEADBEEF);
x = 0xBEEFDEAD;
memcpy(rw_data->buffer.bytes + 12, &x, sizeof(x));
return UACPI_STATUS_OK;
}
static uacpi_status prm_handler(uacpi_region_op op, uacpi_handle op_data)
{
static const char response[] = "goodbyeworld";
uacpi_region_prm_rw_data *rw_data = op_data;
if (op == UACPI_REGION_OP_ATTACH || op == UACPI_REGION_OP_DETACH)
return UACPI_STATUS_OK;
CHECK_VALUE(op, UACPI_REGION_OP_PRM_COMMAND);
CHECK_VALUE(rw_data->in_out_message.length, 26);
CHECK_STRING(rw_data->in_out_message.const_text, "helloworld");
memcpy(rw_data->in_out_message.text, response, sizeof(response));
return UACPI_STATUS_OK;
}
static uacpi_status ffixedhw_handler(uacpi_region_op op, uacpi_handle op_data)
{
static const char response[] = "ok";
uacpi_region_ffixedhw_rw_data *rw_data = op_data;
if (op == UACPI_REGION_OP_ATTACH) {
uacpi_region_attach_data *att_data = op_data;
CHECK_VALUE(att_data->generic_info.base, 0xCAFEBABE);
CHECK_VALUE(att_data->generic_info.length, 0xFEFECACA)
return UACPI_STATUS_OK;
}
if (op == UACPI_REGION_OP_DETACH)
return UACPI_STATUS_OK;
CHECK_VALUE(op, UACPI_REGION_OP_FFIXEDHW_COMMAND);
CHECK_VALUE(rw_data->in_out_message.length, 256);
CHECK_STRING(rw_data->in_out_message.const_text, "someguidandstuff");
memcpy(rw_data->in_out_message.text, "ok", sizeof(response));
return UACPI_STATUS_OK;
}
static uacpi_status generic_serial_bus_handler(
uacpi_region_op op, uacpi_handle op_data
)
{
uacpi_region_serial_rw_data *rw_data = op_data;
uacpi_resource *res;
uacpi_status ret;
uacpi_resource_i2c_connection *gpio;
uacpi_namespace_node *i2c_node;
uacpi_u64 i2c_offset;
uacpi_u16 response;
if (op == UACPI_REGION_OP_ATTACH || op == UACPI_REGION_OP_DETACH)
return UACPI_STATUS_OK;
CHECK_VALUE(
true, (op == UACPI_REGION_OP_SERIAL_READ ||
op == UACPI_REGION_OP_SERIAL_WRITE)
);
ret = uacpi_get_resource_from_buffer(rw_data->connection, &res);
ensure_ok_status(ret);
CHECK_VALUE(res->type, UACPI_RESOURCE_TYPE_SERIAL_I2C_CONNECTION);
gpio = &res->i2c_connection;
ret = uacpi_namespace_node_find(
NULL, gpio->common.source.string, &i2c_node
);
ensure_ok_status(ret);
ret = uacpi_eval_simple_integer(i2c_node, "_UID", &i2c_offset);
ensure_ok_status(ret);
switch ((int)rw_data->command) {
case 0x111:
CHECK_VALUE(op, UACPI_REGION_OP_SERIAL_WRITE);
CHECK_VALUE(i2c_offset, 0);
CHECK_VALUE(rw_data->in_out_buffer.length, 2);
CHECK_VALUE(rw_data->access_attribute, UACPI_ACCESS_ATTRIBUTE_QUICK);
break;
case 0x121:
CHECK_VALUE(op, UACPI_REGION_OP_SERIAL_WRITE);
CHECK_VALUE(i2c_offset, 0);
CHECK_VALUE(rw_data->in_out_buffer.length, 3);
CHECK_VALUE(
rw_data->access_attribute, UACPI_ACCESS_ATTRIBUTE_SEND_RECEIVE
);
break;
case 0x122:
CHECK_VALUE(op, UACPI_REGION_OP_SERIAL_WRITE);
CHECK_VALUE(i2c_offset, 0);
CHECK_VALUE(rw_data->in_out_buffer.length, 3);
CHECK_VALUE(rw_data->access_attribute, UACPI_ACCESS_ATTRIBUTE_BYTE);
break;
case 0x124:
CHECK_VALUE(op, UACPI_REGION_OP_SERIAL_READ);
CHECK_VALUE(i2c_offset, 0);
CHECK_VALUE(rw_data->in_out_buffer.length, 4);
CHECK_VALUE(rw_data->access_attribute, UACPI_ACCESS_ATTRIBUTE_WORD);
break;
case 0x128:
CHECK_VALUE(op, UACPI_REGION_OP_SERIAL_READ);
CHECK_VALUE(i2c_offset, 0);
CHECK_VALUE(rw_data->in_out_buffer.length, 257);
CHECK_VALUE(rw_data->access_attribute, UACPI_ACCESS_ATTRIBUTE_BLOCK);
break;
case 0x228:
CHECK_VALUE(op, UACPI_REGION_OP_SERIAL_WRITE);
CHECK_VALUE(i2c_offset, 0);
CHECK_VALUE(rw_data->in_out_buffer.length, 4);
CHECK_VALUE(
rw_data->access_attribute, UACPI_ACCESS_ATTRIBUTE_PROCESS_CALL
);
break;
case 0x229:
CHECK_VALUE(op, UACPI_REGION_OP_SERIAL_READ);
CHECK_VALUE(i2c_offset, 0);
CHECK_VALUE(rw_data->in_out_buffer.length, 257);
CHECK_VALUE(
rw_data->access_attribute, UACPI_ACCESS_ATTRIBUTE_BLOCK_PROCESS_CALL
);
break;
case 0x23B:
CHECK_VALUE(op, UACPI_REGION_OP_SERIAL_WRITE);
CHECK_VALUE(i2c_offset, 1);
CHECK_VALUE(rw_data->in_out_buffer.length, 17);
CHECK_VALUE(rw_data->access_attribute, UACPI_ACCESS_ATTRIBUTE_BYTES);
CHECK_VALUE(rw_data->access_length, 15);
break;
case 0x23C:
CHECK_VALUE(op, UACPI_REGION_OP_SERIAL_READ);
CHECK_VALUE(i2c_offset, 1);
CHECK_VALUE(rw_data->in_out_buffer.length, 257);
CHECK_VALUE(
rw_data->access_attribute, UACPI_ACCESS_ATTRIBUTE_RAW_BYTES
);
CHECK_VALUE(rw_data->access_length, 255);
break;
case 0x23D:
CHECK_VALUE(op, UACPI_REGION_OP_SERIAL_READ);
CHECK_VALUE(i2c_offset, 1);
CHECK_VALUE(rw_data->in_out_buffer.length, 257);
CHECK_VALUE(
rw_data->access_attribute, UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES
);
CHECK_VALUE(rw_data->access_length, 123);
break;
default:
error("bad serial command %" PRIu64, rw_data->command);
}
if (op == UACPI_REGION_OP_SERIAL_WRITE) {
uacpi_u16 value;
memcpy(&value, rw_data->in_out_buffer.const_bytes, sizeof(value));
CHECK_VALUE(value, rw_data->command);
}
response = rw_data->command + 1;
memcpy(rw_data->in_out_buffer.bytes, &response, sizeof(response));
uacpi_free_resource(res);
return UACPI_STATUS_OK;
}
void test_address_spaces(void)
{
uacpi_status st;
uacpi_object *arg;
arg = uacpi_object_create_integer(0);
st = uacpi_install_address_space_handler(
uacpi_namespace_root(), UACPI_ADDRESS_SPACE_IPMI, ipmi_handler, NULL
);
ensure_ok_status(st);
eval_one(arg, UACPI_ADDRESS_SPACE_IPMI);
st = uacpi_install_address_space_handler(
uacpi_namespace_root(), UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO,
gpio_handler, NULL
);
ensure_ok_status(st);
eval_one(arg, UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO);
st = uacpi_install_address_space_handler(
uacpi_namespace_root(), UACPI_ADDRESS_SPACE_PCC, pcc_handler, NULL
);
ensure_ok_status(st);
eval_one(arg, UACPI_ADDRESS_SPACE_PCC);
st = uacpi_install_address_space_handler(
uacpi_namespace_root(), UACPI_ADDRESS_SPACE_PRM, prm_handler, NULL
);
ensure_ok_status(st);
eval_one(arg, UACPI_ADDRESS_SPACE_PRM);
st = uacpi_install_address_space_handler(
uacpi_namespace_root(), UACPI_ADDRESS_SPACE_FFIXEDHW, ffixedhw_handler,
NULL
);
ensure_ok_status(st);
eval_one(arg, UACPI_ADDRESS_SPACE_FFIXEDHW);
st = uacpi_install_address_space_handler(
uacpi_namespace_root(), UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS,
generic_serial_bus_handler, NULL
);
ensure_ok_status(st);
eval_one(arg, UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS);
uacpi_object_unref(arg);
}

View File

@ -0,0 +1,219 @@
#pragma once
#include "helpers.h"
#include <stdbool.h>
#include <string.h>
typedef enum {
ARG_FLAG,
ARG_PARAM,
ARG_LIST,
ARG_HELP,
ARG_POSITIONAL,
} arg_type_t;
typedef struct {
const char *as_full;
char as_short;
arg_type_t type;
const char *description;
bool is_optional;
bool parsed;
vector_t values;
} arg_spec_t;
#define ARG_POS(name, desc) \
{ \
.as_full = name, \
.type = ARG_POSITIONAL, \
.description = desc, \
}
#define ARG_LIST(name, short, desc) \
{ \
.as_full = name, \
.as_short = short, \
.type = ARG_LIST, \
.description = desc, \
.is_optional = true, \
}
#define ARG_FLAG(name, short, desc) \
{ \
.as_full = name, \
.as_short = short, \
.type = ARG_FLAG, \
.description = desc, \
.is_optional = true, \
}
#define ARG_PARAM(name, short, desc) \
{ \
.as_full = name, \
.as_short = short, \
.type = ARG_PARAM, \
.description = desc, \
.is_optional = true, \
}
#define ARG_HELP(name, short, desc) \
{ \
.as_full = name, \
.as_short = short, \
.type = ARG_HELP, \
.description = desc, \
.is_optional = true, \
}
typedef struct {
arg_spec_t *const *positional_args;
size_t num_positional_args;
arg_spec_t *const *option_args;
size_t num_option_args;
} arg_parser_t;
static inline void print_help(const arg_parser_t *parser)
{
size_t i;
printf("uACPI test runner:\n");
for (i = 0; i < parser->num_positional_args; i++)
printf(
" [%s] %s\n", parser->positional_args[i]->as_full,
parser->positional_args[i]->description
);
for (i = 0; i < parser->num_option_args; i++)
printf(
"%s [--%s/-%c] %s\n",
parser->option_args[i]->is_optional ? "(optional)" : " ",
parser->option_args[i]->as_full,
parser->option_args[i]->as_short,
parser->option_args[i]->description
);
}
static inline bool is_arg(const char *arg)
{
size_t length = strlen(arg);
switch (length) {
case 0:
case 1:
return false;
case 2:
return arg[0] == '-';
default:
return arg[0] == '-' && arg[1] == '-';
}
}
static inline void parse_args(
const arg_parser_t *parser, int argc, char *argv[]
)
{
size_t num_args = argc;
arg_spec_t *active_spec = NULL;
size_t arg_index;
if (num_args < 2) {
print_help(parser);
exit(1);
}
if (parser->num_positional_args) {
if ((num_args - 1) < parser->num_positional_args)
error(
"expected at least %zu positional arguments",
parser->num_positional_args
);
for (arg_index = 0; arg_index < parser->num_positional_args;
++arg_index)
vector_add(
&parser->positional_args[arg_index]->values,
argv[1 + arg_index], 0
);
}
for (arg_index = 1 + parser->num_positional_args; arg_index < num_args;
++arg_index) {
char *current_arg = argv[arg_index];
bool is_new_arg = is_arg(current_arg);
arg_spec_t *new_spec = NULL;
size_t length;
if (active_spec) {
if (!is_new_arg) {
if (active_spec->type == ARG_FLAG)
error("unexpected argument %s", current_arg);
if (active_spec->type == ARG_PARAM &&
active_spec->values.count == 1)
error("too many arguments for %s", active_spec->as_full);
vector_add(&active_spec->values, current_arg, 0);
continue;
}
if ((active_spec->type == ARG_PARAM ||
active_spec->type == ARG_LIST) &&
active_spec->values.count == 0)
error("expected an argument for %s", active_spec->as_full);
}
length = strlen(current_arg);
if (length >= 2) {
size_t i;
for (i = 0; i < parser->num_option_args; i++) {
arg_spec_t *spec = parser->option_args[i];
if (length == 2 && spec->as_short == current_arg[1]) {
new_spec = spec;
break;
} else if (strcmp(spec->as_full, &current_arg[2]) == 0) {
new_spec = spec;
break;
}
}
}
if (new_spec == NULL)
error("unexpected argument %s", current_arg);
active_spec = new_spec;
if (active_spec->type == ARG_HELP) {
print_help(parser);
exit(1);
}
active_spec->parsed = true;
}
}
static inline bool is_set(const arg_spec_t *spec)
{
return spec->parsed;
}
static inline const char *get(const arg_spec_t *spec)
{
if (spec->values.count == 0)
error("no argument provided for %s", spec->as_full);
return spec->values.blobs[0].data;
}
static inline uint64_t get_uint(const arg_spec_t *spec)
{
return strtoull(get(spec), NULL, 10);
}
static inline uint64_t get_uint_or(
const arg_spec_t *spec, uint64_t default_value
)
{
if (is_set(spec))
return get_uint(spec);
return default_value;
}

View File

@ -0,0 +1,170 @@
#include "argparser.h"
#include "helpers.h"
#include <stdio.h>
#include <string.h>
#include <uacpi/acpi.h>
#include <uacpi/kernel_api.h>
#include <uacpi/tables.h>
#include <uacpi/uacpi.h>
void uacpi_kernel_log(enum uacpi_log_level lvl, const char *text)
{
printf("[%s] %s", uacpi_log_level_to_string(lvl), text);
}
void *uacpi_kernel_map(uacpi_phys_addr addr, uacpi_size len)
{
UACPI_UNUSED(len);
return (void*)((uintptr_t)addr);
}
void uacpi_kernel_unmap(void *ptr, uacpi_size len)
{
UACPI_UNUSED(ptr);
UACPI_UNUSED(len);
}
uacpi_phys_addr g_rsdp;
uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out_addr)
{
*out_addr = g_rsdp;
return UACPI_STATUS_OK;
}
static uint8_t test_dsdt[] = {
0x53, 0x53, 0x44, 0x54, 0x35, 0x00, 0x00, 0x00,
0x01, 0xa1, 0x75, 0x54, 0x45, 0x53, 0x54, 0x00,
0x4f, 0x56, 0x45, 0x52, 0x52, 0x49, 0x44, 0x45,
0xf0, 0xf0, 0xf0, 0xf0, 0x49, 0x4e, 0x54, 0x4c,
0x25, 0x09, 0x20, 0x20, 0x08, 0x56, 0x41, 0x4c,
0x5f, 0x0d, 0x54, 0x65, 0x73, 0x74, 0x52, 0x75,
0x6e, 0x6e, 0x65, 0x72, 0x00
};
static uint8_t test_mcfg[] = {
0x4d, 0x43, 0x46, 0x47, 0x3c, 0x00, 0x00, 0x00,
0x01, 0x39, 0x48, 0x50, 0x51, 0x4f, 0x45, 0x4d,
0x38, 0x35, 0x34, 0x39, 0x20, 0x20, 0x20, 0x20,
0x01, 0x00, 0x00, 0x00, 0x48, 0x50, 0x20, 0x20,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f,
0x00, 0x00, 0x00, 0x00
};
static void ensure_signature_is(const char *signature, uacpi_table tbl)
{
if (strncmp(tbl.hdr->signature, signature, 4) == 0)
return;
error(
"incorrect table signature: expected %s got %.4s\n", signature,
tbl.hdr->signature
);
}
static void find_one_table(const char *signature)
{
uacpi_table tbl;
uacpi_status st;
st = uacpi_table_find_by_signature(signature, &tbl);
ensure_ok_status(st);
ensure_signature_is(signature, tbl);
printf("%4.4s OK\n", signature);
uacpi_table_unref(&tbl);
}
static void test_basic_operation(void)
{
find_one_table(ACPI_FADT_SIGNATURE);
find_one_table(ACPI_DSDT_SIGNATURE);
}
static void test_table_installation(void)
{
uacpi_status st;
uacpi_table tbl;
st = uacpi_table_install(test_mcfg, &tbl);
ensure_ok_status(st);
ensure_signature_is(ACPI_MCFG_SIGNATURE, tbl);
uacpi_table_unref(&tbl);
find_one_table(ACPI_MCFG_SIGNATURE);
st = uacpi_table_install_physical(
(uacpi_phys_addr)((uintptr_t)test_mcfg), &tbl
);
ensure_ok_status(st);
ensure_signature_is(ACPI_MCFG_SIGNATURE, tbl);
uacpi_table_unref(&tbl);
}
static struct {
const char *name;
void (*func)(void);
} test_cases[] = {
{ "basic-operation", test_basic_operation },
{ "table-installation", test_table_installation },
};
static arg_spec_t TEST_CASE_ARG = ARG_POS("test-case", "name of the test case");
static arg_spec_t HELP_ARG = ARG_HELP(
"help", 'h', "Display this menu and exit"
);
static arg_spec_t *const POSITIONAL_ARGS[] = {
&TEST_CASE_ARG,
};
static arg_spec_t *const OPTION_ARGS[] = {
&HELP_ARG,
};
static const arg_parser_t PARSER = {
.positional_args = POSITIONAL_ARGS,
.num_positional_args = UACPI_ARRAY_SIZE(POSITIONAL_ARGS),
.option_args = OPTION_ARGS,
.num_option_args = UACPI_ARRAY_SIZE(OPTION_ARGS),
};
int main(int argc, char *argv[])
{
static uint8_t early_table_buf[4096];
struct acpi_rsdp rsdp = { 0 };
struct full_xsdt *xsdt;
uacpi_status st;
const char *test_case;
size_t i;
parse_args(&PARSER, argc, argv);
xsdt = make_xsdt_blob(&rsdp, test_dsdt, sizeof(test_dsdt));
g_rsdp = (uacpi_phys_addr)((uintptr_t)&rsdp);
st = uacpi_setup_early_table_access(
early_table_buf, sizeof(early_table_buf)
);
ensure_ok_status(st);
test_case = get(&TEST_CASE_ARG);
for (i = 0; i < UACPI_ARRAY_SIZE(test_cases); i++) {
if (strcmp(test_case, test_cases[i].name) == 0) {
test_cases[i].func();
uacpi_state_reset();
delete_xsdt(xsdt, 0);
return 0;
}
}
error("unknown test case '%s'", test_case);
return 1;
}

View File

@ -0,0 +1,237 @@
#include "helpers.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <uacpi/acpi.h>
static uacpi_u8 gen_checksum(void *table, uacpi_size size)
{
uacpi_u8 *bytes = table;
uacpi_u8 csum = 0;
uacpi_size i;
for (i = 0; i < size; ++i)
csum += bytes[i];
return 256 - csum;
}
void set_oem(char (*oemid)[6])
{
memcpy(oemid, "uOEMID", sizeof(*oemid));
}
void set_oem_table_id(char (*oemid_table_id)[8])
{
memcpy(oemid_table_id, "uTESTTBL", sizeof(*oemid_table_id));
}
static void get_table_path(blob_t *item, const char *path)
{
*item = read_entire_file(path, sizeof(struct acpi_sdt_hdr));
}
static void get_table_blob(blob_t *item, const void *data, size_t size)
{
item->data = do_malloc(size);
item->size = size;
memcpy(item->data, data, size);
}
static struct full_xsdt *do_make_xsdt(
struct acpi_rsdp *rsdp, const blob_t *tables, size_t num_tables
)
{
size_t xsdt_bytes = sizeof(struct full_xsdt);
struct full_xsdt *xsdt;
size_t i;
struct acpi_fadt *fadt;
struct acpi_facs *facs;
struct acpi_sdt_hdr *dsdt;
memcpy(
&rsdp->signature, ACPI_RSDP_SIGNATURE,
sizeof(ACPI_RSDP_SIGNATURE) - 1
);
set_oem(&rsdp->oemid);
xsdt_bytes += (num_tables - 1) * sizeof(struct acpi_sdt_hdr*);
xsdt = do_calloc(xsdt_bytes, 1);
set_oem(&xsdt->hdr.oemid);
set_oem_table_id(&xsdt->hdr.oem_table_id);
for (i = 0; i < num_tables; ++i) {
struct acpi_sdt_hdr *hdr = tables[i].data;
char *signature = ACPI_DSDT_SIGNATURE;
if (hdr->length > tables[i].size)
error("invalid table %zu size", i);
if (i > 0) {
signature = ACPI_SSDT_SIGNATURE;
xsdt->ssdts[i - 1] = hdr;
}
memcpy(hdr, signature, sizeof(uacpi_object_name));
hdr->checksum = 0;
hdr->checksum = gen_checksum(hdr, hdr->length);
}
fadt = do_calloc(1, sizeof(*fadt));
set_oem(&fadt->hdr.oemid);
set_oem_table_id(&fadt->hdr.oem_table_id);
fadt->hdr.length = sizeof(*fadt);
fadt->hdr.revision = 6;
fadt->pm1a_cnt_blk = 0xFFEE;
fadt->pm1_cnt_len = 2;
fadt->pm1a_evt_blk = 0xDEAD;
fadt->pm1_evt_len = 4;
fadt->pm2_cnt_blk = 0xCCDD;
fadt->pm2_cnt_len = 1;
fadt->gpe0_blk_len = 0x20;
fadt->gpe0_blk = 0xDEAD;
fadt->gpe1_base = 128;
fadt->gpe1_blk = 0xBEEF;
fadt->gpe1_blk_len = 0x20;
fadt->x_dsdt = (uacpi_phys_addr)((uintptr_t)tables[0].data);
memcpy(
fadt->hdr.signature, ACPI_FADT_SIGNATURE,
sizeof(ACPI_FADT_SIGNATURE) - 1
);
facs = do_calloc(1, sizeof(*facs));
facs->length = sizeof(*facs);
memcpy(
facs->signature, ACPI_FACS_SIGNATURE,
sizeof(ACPI_FACS_SIGNATURE) - 1
);
fadt->x_firmware_ctrl = (uintptr_t)facs;
fadt->hdr.checksum = gen_checksum(fadt, sizeof(*fadt));
xsdt->fadt = fadt;
xsdt->hdr.length = sizeof(*xsdt) +
sizeof(struct acpi_sdt_hdr*) * (num_tables - 1);
dsdt = tables[0].data;
xsdt->hdr.revision = dsdt->revision;
memcpy(xsdt->hdr.oemid, dsdt->oemid, sizeof(dsdt->oemid));
xsdt->hdr.oem_revision = dsdt->oem_revision;
if (sizeof(void*) == 4) {
memcpy(
xsdt->hdr.signature, ACPI_RSDT_SIGNATURE,
sizeof(ACPI_XSDT_SIGNATURE) - 1
);
rsdp->rsdt_addr = (size_t)xsdt;
rsdp->revision = 1;
rsdp->checksum = gen_checksum(rsdp, offsetof(struct acpi_rsdp, length));
} else {
memcpy(
xsdt->hdr.signature, ACPI_XSDT_SIGNATURE,
sizeof(ACPI_XSDT_SIGNATURE) - 1
);
rsdp->xsdt_addr = (size_t)xsdt;
rsdp->length = sizeof(*rsdp);
rsdp->revision = 2;
rsdp->checksum = gen_checksum(rsdp, offsetof(struct acpi_rsdp, length));
rsdp->extended_checksum = gen_checksum(rsdp, sizeof(*rsdp));
}
xsdt->hdr.checksum = gen_checksum(xsdt, xsdt->hdr.length);
return xsdt;
}
struct full_xsdt *make_xsdt(
struct acpi_rsdp *rsdp, const char *dsdt_path, const vector_t *ssdts
)
{
vector_t tables;
size_t i;
struct full_xsdt *xsdt;
vector_init(&tables, ssdts->count + 1);
get_table_path(&tables.blobs[0], dsdt_path);
for (i = 0; i < ssdts->count; ++i)
get_table_path(&tables.blobs[1 + i], ssdts->blobs[i].data);
xsdt = do_make_xsdt(rsdp, tables.blobs, tables.count);
vector_cleanup(&tables);
return xsdt;
}
struct full_xsdt *make_xsdt_blob(
struct acpi_rsdp *rsdp, const void *dsdt, size_t dsdt_size
)
{
blob_t blob;
get_table_blob(&blob, dsdt, dsdt_size);
return do_make_xsdt(rsdp, &blob, 1);
}
void delete_xsdt(struct full_xsdt *xsdt, size_t num_tables)
{
size_t i;
if (xsdt->fadt) {
free((void*)((uintptr_t)xsdt->fadt->x_dsdt));
free((struct acpi_facs*)((uintptr_t)xsdt->fadt->x_firmware_ctrl));
free(xsdt->fadt);
}
for (i = 0; i < num_tables; i++)
free(xsdt->ssdts[i]);
free(xsdt);
}
blob_t read_entire_file(const char *path, size_t min_size)
{
FILE *file = fopen(path, "rb");
long size;
void *buf;
blob_t blob = { 0 };
if (!file)
error("failed to open file %s", path);
if (fseek(file, 0, SEEK_END))
error("failed to seek file %s", path);
size = ftell(file);
if (size < 0)
error("failed to get size of file %s", path);
if (size < (long)min_size)
error("file %s is too small", path);
if (fseek(file, 0, SEEK_SET))
error("failed to seek file %s", path);
buf = do_malloc(size);
if (fread(buf, size, 1, file) != 1)
error("failed to read from %s", path);
if (fclose(file))
error("failed to close file %s", path);
blob.data = buf;
blob.size = size;
return blob;
}

View File

@ -0,0 +1,294 @@
#pragma once
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <uacpi/acpi.h>
#include <uacpi/internal/helpers.h>
#include <uacpi/status.h>
#include <uacpi/uacpi.h>
#ifdef __GNUC__
#define NORETURN __attribute__((__noreturn__))
#else
#define NORETURN
#endif
NORETURN static inline void error(const char *format, ...)
{
va_list args;
fprintf(stderr, "unexpected error: ");
va_start(args, format);
vfprintf(stderr, format, args);
va_end(args);
fputc('\n', stderr);
uacpi_state_reset();
exit(1);
}
static inline void *do_malloc(size_t size)
{
void *ptr = malloc(size);
if (!ptr)
error("failed to allocate %zu bytes of memory", size);
return ptr;
}
static inline void *do_calloc(size_t nmemb, size_t size)
{
void *ptr = calloc(nmemb, size);
if (!ptr)
error("failed to allocate %zu bytes of memory", nmemb * size);
return ptr;
}
static inline void *do_realloc(void *ptr, size_t size)
{
ptr = realloc(ptr, size);
if (!ptr)
error("failed to allocate %zu bytes of memory", size);
return ptr;
}
typedef struct {
void *data;
size_t size;
} blob_t;
typedef struct {
blob_t *blobs;
size_t capacity;
size_t count;
} vector_t;
static inline void vector_init(vector_t *vector, size_t items)
{
vector->blobs = do_calloc(items, sizeof(*vector->blobs));
vector->capacity = items;
vector->count = items;
}
static inline void vector_add(vector_t *vector, void *data, size_t size)
{
if (vector->count >= vector->capacity) {
vector->capacity = vector->capacity ? vector->capacity * 2 : 8;
vector->blobs = do_realloc(
vector->blobs, vector->capacity * sizeof(*vector->blobs)
);
}
vector->blobs[vector->count].data = data;
vector->blobs[vector->count].size = size;
vector->count += 1;
}
static inline void vector_cleanup(vector_t *vector)
{
free(vector->blobs);
vector->capacity = 0;
vector->count = 0;
}
static inline void *get_container(void *value, size_t offset)
{
return value ? (void*)((char*)value - offset) : NULL;
}
#define CONTAINER(type, field, value) \
((type*)get_container((value), offsetof(type, field)))
typedef struct hash_node {
struct hash_node *prev;
struct hash_node *next;
uint64_t key;
} hash_node_t;
typedef struct {
hash_node_t **entries;
size_t capacity;
size_t count;
} hash_table_t;
static inline uint64_t make_hash(uint64_t x)
{
x *= 0xe9770214b82cf957;
x ^= x >> 47;
x *= 0x2bdd9d20d060fc9b;
x ^= x >> 44;
x *= 0x65c487023b406173;
return x;
}
static inline hash_node_t *hash_table_find(hash_table_t *table, uint64_t key)
{
hash_node_t *current;
if (!table->capacity)
return NULL;
current = table->entries[make_hash(key) % table->capacity];
while (current != NULL && current->key != key)
current = current->next;
return current;
}
#define HASH_TABLE_FIND(table, key, type, field) \
CONTAINER(type, field, hash_table_find((table), (key)))
static inline hash_node_t *hash_table_get_or_add(
hash_table_t *table, uint64_t key, size_t size, size_t offset
)
{
uint64_t hash = make_hash(key);
void *value;
hash_node_t *node;
size_t bucket;
if (table->capacity) {
hash_node_t *current = table->entries[hash % table->capacity];
while (current != NULL) {
if (current->key == key)
return current;
current = current->next;
}
}
if (table->count >= table->capacity - (table->capacity / 4)) {
size_t new_cap = table->capacity ? table->capacity * 2 : 8;
hash_node_t **new_entries = do_calloc(new_cap, sizeof(*table->entries));
size_t i;
for (i = 0; i < table->capacity; i++) {
hash_node_t *current = table->entries[i];
while (current != NULL) {
hash_node_t *next = current->next;
size_t bucket = make_hash(current->key) % new_cap;
current->prev = NULL;
current->next = new_entries[bucket];
if (current->next)
current->next->prev = current;
new_entries[bucket] = current;
current = next;
}
}
free(table->entries);
table->entries = new_entries;
table->capacity = new_cap;
}
value = do_calloc(1, size);
node = (void*)((char*)value + offset);
node->key = key;
bucket = hash % table->capacity;
node->prev = NULL;
node->next = table->entries[bucket];
if (node->next)
node->next->prev = node;
table->entries[bucket] = node;
table->count += 1;
return node;
}
#define HASH_TABLE_GET_OR_ADD(table, key, type, field) \
CONTAINER( \
type, field, \
hash_table_get_or_add( \
(table), (key), sizeof(type), offsetof(type, field) \
) \
)
static inline void hash_table_remove(
hash_table_t *table, hash_node_t *node, size_t offset
)
{
if (node->prev)
node->prev->next = node->next;
else
table->entries[make_hash(node->key) % table->capacity] = node->next;
if (node->next)
node->next->prev = node->prev;
table->count -= 1;
free((void*)((char*)node - offset));
}
#define HASH_TABLE_REMOVE(table, value, type, field) \
hash_table_remove((table), &(value)->field, offsetof(type, field))
static inline bool hash_table_empty(hash_table_t *table)
{
return table->count == 0;
}
static inline void hash_table_cleanup(hash_table_t *table)
{
free(table->entries);
table->capacity = 0;
table->count = 0;
}
extern bool g_expect_virtual_addresses;
extern uacpi_phys_addr g_rsdp;
UACPI_PACKED(struct full_xsdt {
struct acpi_sdt_hdr hdr;
struct acpi_fadt *fadt;
struct acpi_sdt_hdr *ssdts[];
})
void set_oem(char (*oemid)[6]);
void set_oem_table_id(char (*oemid_table_id)[8]);
struct full_xsdt *make_xsdt(
struct acpi_rsdp *rsdp, const char *dsdt_path, const vector_t *ssdt_paths
);
struct full_xsdt *make_xsdt_blob(
struct acpi_rsdp *rsdp, const void *dsdt, size_t dsdt_size
);
void delete_xsdt(struct full_xsdt *xsdt, size_t num_tables);
blob_t read_entire_file(const char *path, size_t min_size);
static inline void ensure_ok_status(uacpi_status st)
{
if (st == UACPI_STATUS_OK)
return;
error("uACPI error: %s", uacpi_status_to_string(st));
}
static inline const char *uacpi_log_level_to_string(uacpi_log_level lvl)
{
switch (lvl) {
case UACPI_LOG_DEBUG:
return "DEBUG";
case UACPI_LOG_TRACE:
return "TRACE";
case UACPI_LOG_INFO:
return "INFO";
case UACPI_LOG_WARN:
return "WARN";
case UACPI_LOG_ERROR:
return "ERROR";
default:
abort();
return NULL;
}
}

View File

@ -0,0 +1,605 @@
#include "helpers.h"
#include "os.h"
#include <inttypes.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <uacpi/kernel_api.h>
#include <uacpi/status.h>
#include <uacpi/types.h>
uacpi_phys_addr g_rsdp;
uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rsdp_address)
{
*out_rsdp_address = g_rsdp;
return UACPI_STATUS_OK;
}
static uint8_t *io_space;
#ifdef UACPI_KERNEL_INITIALIZATION
uacpi_status uacpi_kernel_initialize(uacpi_init_level lvl)
{
if (lvl == UACPI_INIT_LEVEL_EARLY)
io_space = do_malloc(UINT16_MAX + 1);
return UACPI_STATUS_OK;
}
void uacpi_kernel_deinitialize(void)
{
free(io_space);
io_space = NULL;
}
#endif
uacpi_status uacpi_kernel_io_map(
uacpi_io_addr base, uacpi_size len, uacpi_handle *out_handle
)
{
UACPI_UNUSED(len);
*out_handle = (uacpi_handle)((uintptr_t)base);
return UACPI_STATUS_OK;
}
void uacpi_kernel_io_unmap(uacpi_handle handle)
{
UACPI_UNUSED(handle);
}
#define UACPI_IO_READ(bits) \
uacpi_status uacpi_kernel_io_read##bits( \
uacpi_handle handle, uacpi_size offset, uacpi_u##bits *out_value \
) \
{ \
uacpi_io_addr addr = (uacpi_io_addr)((uintptr_t)handle) + offset; \
\
if (io_space && addr <= UINT16_MAX) \
memcpy(out_value, &io_space[addr], bits / 8); \
else \
*out_value = (uacpi_u##bits)0xFFFFFFFFFFFFFFFF; \
\
return UACPI_STATUS_OK; \
}
#define UACPI_IO_WRITE(bits) \
uacpi_status uacpi_kernel_io_write##bits( \
uacpi_handle handle, uacpi_size offset, uacpi_u##bits in_value \
) \
{ \
uacpi_io_addr addr = (uacpi_io_addr)((uintptr_t)handle) + offset; \
\
if (io_space && addr <= UINT16_MAX) \
memcpy(&io_space[addr], &in_value, bits / 8); \
\
return UACPI_STATUS_OK; \
}
#define UACPI_PCI_READ(bits) \
uacpi_status uacpi_kernel_pci_read##bits( \
uacpi_handle handle, uacpi_size offset, uacpi_u##bits *value \
) \
{ \
UACPI_UNUSED(handle); \
UACPI_UNUSED(offset); \
\
*value = (uacpi_u##bits)0xFFFFFFFFFFFFFFFF; \
return UACPI_STATUS_OK; \
}
#define UACPI_PCI_WRITE(bits) \
uacpi_status uacpi_kernel_pci_write##bits( \
uacpi_handle handle, uacpi_size offset, uacpi_u##bits value \
) \
{ \
UACPI_UNUSED(handle); \
UACPI_UNUSED(offset); \
UACPI_UNUSED(value); \
\
return UACPI_STATUS_OK; \
}
UACPI_IO_READ(8)
UACPI_IO_READ(16)
UACPI_IO_READ(32)
UACPI_IO_WRITE(8)
UACPI_IO_WRITE(16)
UACPI_IO_WRITE(32)
UACPI_PCI_READ(8)
UACPI_PCI_READ(16)
UACPI_PCI_READ(32)
UACPI_PCI_WRITE(8)
UACPI_PCI_WRITE(16)
UACPI_PCI_WRITE(32)
uacpi_status uacpi_kernel_pci_device_open(
uacpi_pci_address address, uacpi_handle *out_handle
)
{
UACPI_UNUSED(address);
*out_handle = NULL;
return UACPI_STATUS_OK;
}
void uacpi_kernel_pci_device_close(uacpi_handle handle)
{
UACPI_UNUSED(handle);
}
bool g_expect_virtual_addresses = true;
typedef struct {
hash_node_t node;
uint64_t phys;
size_t references;
} virt_location_t;
typedef struct {
hash_node_t node;
void *virt;
} mapping_t;
typedef struct {
hash_node_t node;
hash_table_t mappings;
} phys_location_t;
static hash_table_t virt_locations;
static hash_table_t phys_locations;
void *uacpi_kernel_map(uacpi_phys_addr addr, uacpi_size len)
{
if (!g_expect_virtual_addresses) {
phys_location_t *phys_location = HASH_TABLE_FIND(
&phys_locations, addr, phys_location_t, node
);
void *virt;
virt_location_t *location;
mapping_t *mapping;
if (phys_location != NULL) {
mapping = HASH_TABLE_FIND(
&phys_location->mappings, len, mapping_t, node
);
if (mapping != NULL) {
location = HASH_TABLE_FIND(
&virt_locations, (uintptr_t)mapping->virt, virt_location_t,
node
);
location->references += 1;
return mapping->virt;
}
printf(
"WARN: remapping physical 0x%016" PRIX64 " with size %zu\n",
addr, len
);
}
virt = do_calloc(len, 1);
location = HASH_TABLE_GET_OR_ADD(
&virt_locations, (uintptr_t)virt, virt_location_t, node
);
location->phys = addr;
location->references = 1;
phys_location = HASH_TABLE_GET_OR_ADD(
&phys_locations, addr, phys_location_t, node
);
mapping = HASH_TABLE_GET_OR_ADD(
&phys_location->mappings, len, mapping_t, node
);
mapping->virt = virt;
return virt;
}
return (void*)((uintptr_t)addr);
}
void uacpi_kernel_unmap(void *addr, uacpi_size len)
{
virt_location_t *virt_location = HASH_TABLE_FIND(
&virt_locations, (uintptr_t)addr, virt_location_t, node
);
phys_location_t *phys_location;
mapping_t *mapping;
if (!virt_location)
return;
if (--virt_location->references > 0)
return;
phys_location = HASH_TABLE_FIND(
&phys_locations, virt_location->phys, phys_location_t, node
);
mapping = HASH_TABLE_FIND(&phys_location->mappings, len, mapping_t, node);
if (!mapping) {
printf(
"WARN: cannot identify mapping virt=%p phys=0x%016" PRIX64 " with "
"size %zu\n", addr, phys_location->node.key, len
);
return;
}
HASH_TABLE_REMOVE(&phys_location->mappings, mapping, mapping_t, node);
if (hash_table_empty(&phys_location->mappings)) {
hash_table_cleanup(&phys_location->mappings);
HASH_TABLE_REMOVE(
&phys_locations, phys_location, phys_location_t, node
);
}
free((void*)((uintptr_t)virt_location->node.key));
HASH_TABLE_REMOVE(&virt_locations, virt_location, virt_location_t, node);
}
void interface_cleanup(void)
{
size_t i;
for (i = 0; i < phys_locations.capacity; i++) {
phys_location_t *location = CONTAINER(
phys_location_t, node, phys_locations.entries[i]
);
while (location) {
hash_table_cleanup(&location->mappings);
location = CONTAINER(phys_location_t, node, location->node.next);
}
}
hash_table_cleanup(&phys_locations);
hash_table_cleanup(&virt_locations);
}
#ifdef UACPI_SIZED_FREES
typedef struct {
hash_node_t node;
size_t size;
} allocation_t;
static hash_table_t allocations;
void *uacpi_kernel_alloc(uacpi_size size)
{
void *ret;
allocation_t *allocation;
if (size == 0)
abort();
ret = malloc(size);
if (ret == NULL)
return ret;
allocation = HASH_TABLE_GET_OR_ADD(
&allocations, (uintptr_t)ret, allocation_t, node
);
allocation->size = size;
return ret;
}
void uacpi_kernel_free(void *mem, uacpi_size size_hint)
{
allocation_t *allocation;
if (mem == NULL)
return;
allocation = HASH_TABLE_FIND(
&allocations, (uintptr_t)mem, allocation_t, node
);
if (!allocation)
error("unable to find heap allocation %p\n", mem);
if (allocation->size != size_hint)
error(
"invalid free size: originally allocated %zu bytes, freeing as %zu",
allocation->size, size_hint
);
HASH_TABLE_REMOVE(&allocations, allocation, allocation_t, node);
free(mem);
}
#else
void *uacpi_kernel_alloc(uacpi_size size)
{
if (size == 0)
error("attempted to allocate zero bytes");
return malloc(size);
}
void uacpi_kernel_free(void *mem)
{
free(mem);
}
#endif
#ifdef UACPI_NATIVE_ALLOC_ZEROED
void *uacpi_kernel_alloc_zeroed(uacpi_size size)
{
void *ret = uacpi_kernel_alloc(size);
if (ret == NULL)
return ret;
memset(ret, 0, size);
return ret;
}
#endif
#ifdef UACPI_FORMATTED_LOGGING
void uacpi_kernel_vlog(
uacpi_log_level level, const uacpi_char *format, va_list args
)
{
printf("[uACPI][%s] ", uacpi_log_level_to_string(level));
vprintf(format, args);
}
void uacpi_kernel_log(uacpi_log_level level, const uacpi_char *format, ...)
{
va_list args;
va_start(args, format);
uacpi_kernel_vlog(level, format, args);
va_end(args);
}
#else
void uacpi_kernel_log(uacpi_log_level level, const uacpi_char *str)
{
printf("[uACPI][%s] %s", uacpi_log_level_to_string(level), str);
}
#endif
uacpi_u64 uacpi_kernel_get_nanoseconds_since_boot(void)
{
return get_nanosecond_timer();
}
void uacpi_kernel_stall(uacpi_u8 usec)
{
uint64_t end = get_nanosecond_timer() + (uint64_t)usec * 1000;
for (;;)
if (get_nanosecond_timer() >= end)
break;
}
void uacpi_kernel_sleep(uacpi_u64 msec)
{
millisecond_sleep(msec);
}
uacpi_handle uacpi_kernel_create_mutex(void)
{
mutex_t *mutex = do_malloc(sizeof(*mutex));
mutex_init(mutex);
return mutex;
}
void uacpi_kernel_free_mutex(uacpi_handle handle)
{
mutex_free(handle);
free(handle);
}
uacpi_thread_id uacpi_kernel_get_thread_id(void)
{
return get_thread_id();
}
uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle handle, uacpi_u16 timeout)
{
if (timeout == 0)
return mutex_try_lock(handle) ? UACPI_STATUS_OK : UACPI_STATUS_TIMEOUT;
if (timeout == 0xFFFF) {
mutex_lock(handle);
return UACPI_STATUS_OK;
}
if (mutex_lock_timeout(handle, timeout * 1000000ull))
return UACPI_STATUS_OK;
return UACPI_STATUS_TIMEOUT;
}
void uacpi_kernel_release_mutex(uacpi_handle handle)
{
mutex_unlock(handle);
}
typedef struct {
mutex_t mutex;
condvar_t condvar;
size_t counter;
} event_t;
uacpi_handle uacpi_kernel_create_event(void)
{
event_t *event = do_calloc(1, sizeof(*event));
mutex_init(&event->mutex);
condvar_init(&event->condvar);
return event;
}
void uacpi_kernel_free_event(uacpi_handle handle)
{
event_t *event = handle;
condvar_free(&event->condvar);
mutex_free(&event->mutex);
free(handle);
}
static bool event_pred(void *ptr)
{
event_t *event = ptr;
return event->counter != 0;
}
uacpi_bool uacpi_kernel_wait_for_event(uacpi_handle handle, uacpi_u16 timeout)
{
event_t *event = handle;
bool ok;
mutex_lock(&event->mutex);
if (event->counter > 0) {
event->counter -= 1;
mutex_unlock(&event->mutex);
return UACPI_TRUE;
}
if (timeout == 0) {
mutex_unlock(&event->mutex);
return UACPI_FALSE;
}
if (timeout == 0xFFFF) {
condvar_wait(&event->condvar, &event->mutex, event_pred, event);
event->counter -= 1;
mutex_unlock(&event->mutex);
return UACPI_TRUE;
}
ok = condvar_wait_timeout(
&event->condvar, &event->mutex, event_pred, event, timeout * 1000000ull
);
if (ok)
event->counter -= 1;
mutex_unlock(&event->mutex);
return ok ? UACPI_TRUE : UACPI_FALSE;
}
void uacpi_kernel_signal_event(uacpi_handle handle)
{
event_t *event = handle;
mutex_lock(&event->mutex);
event->counter += 1;
condvar_signal(&event->condvar);
mutex_unlock(&event->mutex);
}
void uacpi_kernel_reset_event(uacpi_handle handle)
{
event_t *event = handle;
mutex_lock(&event->mutex);
event->counter = 0;
mutex_unlock(&event->mutex);
}
uacpi_status uacpi_kernel_handle_firmware_request(uacpi_firmware_request *req)
{
switch (req->type) {
case UACPI_FIRMWARE_REQUEST_TYPE_BREAKPOINT:
printf("Ignoring breakpoint\n");
break;
case UACPI_FIRMWARE_REQUEST_TYPE_FATAL:
printf(
"Fatal firmware error: type: %" PRIx8 " code: %" PRIx32 " arg: "
"%" PRIx64 "\n", req->fatal.type, req->fatal.code, req->fatal.arg
);
break;
default:
error("unknown firmware request type %d", req->type);
}
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_install_interrupt_handler(
uacpi_u32 irq, uacpi_interrupt_handler handler, uacpi_handle ctx,
uacpi_handle *out_irq_handle
)
{
UACPI_UNUSED(irq);
UACPI_UNUSED(handler);
UACPI_UNUSED(ctx);
UACPI_UNUSED(out_irq_handle);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_uninstall_interrupt_handler(
uacpi_interrupt_handler handler, uacpi_handle irq_handle
)
{
UACPI_UNUSED(handler);
UACPI_UNUSED(irq_handle);
return UACPI_STATUS_OK;
}
uacpi_handle uacpi_kernel_create_spinlock(void)
{
return uacpi_kernel_create_mutex();
}
void uacpi_kernel_free_spinlock(uacpi_handle handle)
{
uacpi_kernel_free_mutex(handle);
}
uacpi_cpu_flags uacpi_kernel_lock_spinlock(uacpi_handle handle)
{
uacpi_kernel_acquire_mutex(handle, 0xFFFF);
return 0;
}
void uacpi_kernel_unlock_spinlock(uacpi_handle handle, uacpi_cpu_flags flags)
{
UACPI_UNUSED(flags);
uacpi_kernel_release_mutex(handle);
}
uacpi_status uacpi_kernel_schedule_work(
uacpi_work_type type, uacpi_work_handler handler, uacpi_handle ctx
)
{
UACPI_UNUSED(type);
handler(ctx);
return UACPI_STATUS_OK;
}
uacpi_status uacpi_kernel_wait_for_work_completion(void)
{
return UACPI_STATUS_OK;
}

View File

@ -0,0 +1,290 @@
#pragma once
#include "helpers.h"
#include <stdbool.h>
#include <stdint.h>
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#else
#ifdef __WATCOMC__
#include <process.h> // provides gettid
#elif defined(__APPLE__)
#include <mach/mach_time.h>
#endif
#include <errno.h>
#include <pthread.h>
#include <time.h>
#include <unistd.h>
#endif
#ifdef _WIN32
typedef CRITICAL_SECTION mutex_t;
typedef CONDITION_VARIABLE condvar_t;
#define HAVE_TIMED_WAIT 0
#else
typedef pthread_mutex_t mutex_t;
typedef pthread_cond_t condvar_t;
#if defined(__WATCOMC__) || defined(__APPLE__)
#define HAVE_TIMED_WAIT 0
#else
#define HAVE_TIMED_WAIT 1
#endif
#endif
#define NANOSECONDS_PER_SECOND 1000000000ull
static inline uint64_t get_nanosecond_timer(void)
{
#ifdef _WIN32
static LARGE_INTEGER frequency;
LARGE_INTEGER counter;
if (frequency.QuadPart == 0)
if (!QueryPerformanceFrequency(&frequency))
error("QueryPerformanceFrequency failed");
if (!QueryPerformanceCounter(&counter))
error("QueryPerformanceCounter failed");
counter.QuadPart *= NANOSECONDS_PER_SECOND;
return counter.QuadPart / frequency.QuadPart;
#elif defined(__APPLE__)
static struct mach_timebase_info tb;
static bool initialized;
if (!initialized) {
if (mach_timebase_info(&tb) != KERN_SUCCESS)
error("mach_timebase_info failed");
initialized = true;
}
return (mach_absolute_time() * tb.numer) / tb.denom;
#else
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts))
error("clock_gettime failed");
return ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec;
#endif
}
static inline void *get_thread_id(void)
{
#ifdef _WIN32
return (void*)((uintptr_t)GetCurrentThreadId());
#elif defined(__APPLE__)
uint64_t id;
if (pthread_threadid_np(NULL, &id))
error("pthread_threadid_np failed");
return (void*)id;
#else
return (void*)((uintptr_t)gettid());
#endif
}
static inline void millisecond_sleep(uint64_t milliseconds)
{
#ifdef _WIN32
Sleep(milliseconds);
#else
if (usleep(milliseconds * 1000))
error("usleep failed");
#endif
}
static inline void mutex_init(mutex_t *mutex)
{
#ifdef _WIN32
InitializeCriticalSection(mutex);
#else
if (pthread_mutex_init(mutex, NULL))
error("pthread_mutex_init failed");
#endif
}
static inline void mutex_free(mutex_t *mutex)
{
#ifdef _WIN32
DeleteCriticalSection(mutex);
#else
if (pthread_mutex_destroy(mutex))
error("pthread_mutex_destroy failed");
#endif
}
static inline bool mutex_try_lock(mutex_t *mutex)
{
#ifdef _WIN32
return TryEnterCriticalSection(mutex);
#else
int err = pthread_mutex_trylock(mutex);
if (err == 0)
return true;
if (err != EBUSY)
error("pthread_mutex_trylock failed");
return false;
#endif
}
static inline void mutex_lock(mutex_t *mutex)
{
#ifdef _WIN32
EnterCriticalSection(mutex);
#else
if (pthread_mutex_lock(mutex))
error("pthread_mutex_lock failed");
#endif
}
static inline bool mutex_lock_timeout(mutex_t *mutex, uint64_t timeout_ns)
{
#if !HAVE_TIMED_WAIT
uint64_t end = get_nanosecond_timer() + timeout_ns;
do {
if (mutex_try_lock(mutex))
return true;
millisecond_sleep(1);
} while (get_nanosecond_timer() < end);
return false;
#else
struct timespec spec;
int err;
if (clock_gettime(CLOCK_MONOTONIC, &spec))
error("clock_gettime failed");
spec.tv_nsec += timeout_ns;
spec.tv_sec += spec.tv_nsec / NANOSECONDS_PER_SECOND;
spec.tv_nsec %= NANOSECONDS_PER_SECOND;
err = pthread_mutex_clocklock(mutex, CLOCK_MONOTONIC, &spec);
if (err == 0)
return true;
if (err != ETIMEDOUT)
error("pthread_mutex_clocklock failed");
return false;
#endif
}
static inline void mutex_unlock(mutex_t *mutex)
{
#ifdef _WIN32
LeaveCriticalSection(mutex);
#else
if (pthread_mutex_unlock(mutex))
error("pthread_mutex_unlock failed");
#endif
}
static inline void condvar_init(condvar_t *var)
{
#ifdef _WIN32
InitializeConditionVariable(var);
#else
if (pthread_cond_init(var, NULL))
error("pthread_cond_init failed");
#endif
}
static inline void condvar_free(condvar_t *var)
{
#ifdef _WIN32
UACPI_UNUSED(var);
#else
if (pthread_cond_destroy(var))
error("pthread_cond_destroy failed");
#endif
}
typedef bool (*condvar_pred_t)(void *ctx);
static inline void condvar_wait(
condvar_t *var, mutex_t *mutex, condvar_pred_t pred, void *ctx
)
{
while (!pred(ctx))
#ifdef _WIN32
if (!SleepConditionVariableCS(var, mutex, INFINITE))
error("SleepConditionVariableCS failed");
#else
if (pthread_cond_wait(var, mutex))
error("pthread_cond_wait failed");
#endif
}
static inline bool condvar_wait_timeout(
condvar_t *var, mutex_t *mutex, condvar_pred_t pred, void *ctx,
uint64_t timeout_ns
)
{
#if !HAVE_TIMED_WAIT
uint64_t end = get_nanosecond_timer() + timeout_ns;
while (!pred(ctx)) {
uint64_t cur = get_nanosecond_timer();
#ifdef _WIN32
DWORD milliseconds;
#endif
if (cur >= end)
return false;
#ifdef _WIN32
milliseconds = (end - cur) / 1000;
if (milliseconds == 0)
milliseconds = 1;
if (!SleepConditionVariableCS(var, mutex, milliseconds) &&
GetLastError() != ERROR_TIMEOUT) {
error("SleepConditionVariableCS failed");
}
#else
UACPI_UNUSED(var);
mutex_unlock(mutex);
millisecond_sleep(1);
mutex_lock(mutex);
#endif
}
return true;
#else
struct timespec spec;
if (clock_gettime(CLOCK_MONOTONIC, &spec))
error("clock_gettime failed");
spec.tv_nsec += timeout_ns;
spec.tv_sec += spec.tv_nsec / NANOSECONDS_PER_SECOND;
spec.tv_nsec %= NANOSECONDS_PER_SECOND;
while (!pred(ctx)) {
int err = pthread_cond_clockwait(var, mutex, CLOCK_MONOTONIC, &spec);
if (err == 0)
continue;
if (err != ETIMEDOUT)
error("pthread_cond_clockwait failed");
return false;
}
return true;
#endif
}
static inline void condvar_signal(condvar_t *var)
{
#ifdef _WIN32
WakeConditionVariable(var);
#else
if (pthread_cond_signal(var))
error("pthread_cond_signal failed");
#endif
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,570 @@
#include "argparser.h"
#include "helpers.h"
#include <inttypes.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <uacpi/acpi.h>
#include <uacpi/context.h>
#include <uacpi/event.h>
#include <uacpi/log.h>
#include <uacpi/namespace.h>
#include <uacpi/notify.h>
#include <uacpi/opregion.h>
#include <uacpi/osi.h>
#include <uacpi/platform/types.h>
#include <uacpi/resources.h>
#include <uacpi/status.h>
#include <uacpi/tables.h>
#include <uacpi/types.h>
#include <uacpi/uacpi.h>
#include <uacpi/utilities.h>
void run_resource_tests(void);
void test_object_api(void);
void test_address_spaces(void);
void interface_cleanup(void);
static uacpi_object_type string_to_object_type(const char *str)
{
if (strcmp(str, "int") == 0)
return UACPI_OBJECT_INTEGER;
if (strcmp(str, "str") == 0)
return UACPI_OBJECT_STRING;
error("Unsupported type for validation: %s", str);
return UACPI_OBJECT_UNINITIALIZED;
}
static void validate_ret_against_expected(
uacpi_object *obj, uacpi_object_type expected_type, const char *expected_val
)
{
uacpi_object_type type = uacpi_object_get_type(obj);
if (type != expected_type)
error(
"returned type '%s' doesn't match expected '%s",
uacpi_object_type_to_string(expected_type),
uacpi_object_type_to_string(type)
);
switch (type) {
case UACPI_OBJECT_INTEGER: {
uacpi_u64 expected_int = strtoull(expected_val, NULL, 0);
uacpi_u64 actual_int;
uacpi_object_get_integer(obj, &actual_int);
if (expected_int != actual_int)
error(
"returned value '%" PRIu64 "' doesn't match expected '%" PRIu64
"'", actual_int, expected_int
);
break;
}
case UACPI_OBJECT_STRING: {
uacpi_data_view view;
const char *actual_str;
uacpi_object_get_string_or_buffer(obj, &view);
actual_str = view.text;
if (strcmp(expected_val, actual_str) != 0)
error(
"returned value '%s' doesn't match expected '%s'",
actual_str, expected_val
);
break;
}
default:
abort();
}
}
static void nested_printf(uacpi_u32 depth, const char *fmt, ...)
{
va_list va;
size_t padding = depth * 4;
while (padding-- > 0)
printf(" ");
va_start(va, fmt);
vprintf(fmt, va);
va_end(va);
}
static void dump_resources(
uacpi_u32 depth, uacpi_namespace_node *node,
uacpi_status (*cb)(uacpi_namespace_node *, uacpi_resources **),
const char *name
)
{
uacpi_resources *res;
uacpi_status ret = cb(node, &res);
if (ret == UACPI_STATUS_OK) {
// TODO: dump resources here
nested_printf(depth, " %s: <%u bytes>\n", name, res->length);
uacpi_free_resources(res);
} else if (ret != UACPI_STATUS_NOT_FOUND)
nested_printf(
depth, " %s: unable to evaluate (%s)\n", name,
uacpi_status_to_string(ret)
);
}
static uacpi_iteration_decision dump_one_node(
void *ptr, uacpi_namespace_node *node, uacpi_u32 depth
)
{
struct uacpi_namespace_node_info *info;
uacpi_status ret = uacpi_get_namespace_node_info(node, &info);
const char *path;
UACPI_UNUSED(ptr);
if (uacpi_unlikely_error(ret)) {
uacpi_object_name name = uacpi_namespace_node_name(node);
fprintf(
stderr, "unable to get node %.4s info: %s\n", name.text,
uacpi_status_to_string(ret)
);
exit(1);
}
path = uacpi_namespace_node_generate_absolute_path(node);
nested_printf(
depth, "%s [%s]", path, uacpi_object_type_to_string(info->type)
);
uacpi_free_absolute_path(path);
if (info->type == UACPI_OBJECT_METHOD)
printf(" (%d args)", info->num_params);
if (info->flags)
printf(" {\n");
if (info->flags)
nested_printf(depth, " _ADR: %016" PRIX64 "\n", info->adr);
if (info->flags & UACPI_NS_NODE_INFO_HAS_HID)
nested_printf(depth, " _HID: %s\n", info->hid.value);
if (info->flags & UACPI_NS_NODE_INFO_HAS_CID) {
size_t i;
nested_printf(depth, " _CID: ");
for (i = 0; i < info->cid.num_ids; ++i)
printf("%s ", info->cid.ids[i].value);
printf("\n");
}
if (info->flags & UACPI_NS_NODE_INFO_HAS_UID)
nested_printf(depth, " _UID: %s\n", info->uid.value);
if (info->flags & UACPI_NS_NODE_INFO_HAS_CLS)
nested_printf(depth, " _CLS: %s\n", info->cls.value);
if (info->flags & UACPI_NS_NODE_INFO_HAS_SXD)
nested_printf(
depth, " _SxD: S1->D%d S2->D%d S3->D%d S4->D%d\n", info->sxd[0],
info->sxd[1], info->sxd[2], info->sxd[3]
);
if (info->flags & UACPI_NS_NODE_INFO_HAS_SXW)
nested_printf(
depth, " _SxW: S0->D%d S1->D%d S2->D%d S3->D%d S4->D%d\n",
info->sxw[0], info->sxw[1], info->sxw[2], info->sxw[3], info->sxw[4]
);
if (info->flags) {
if (info->type == UACPI_OBJECT_DEVICE) {
dump_resources(depth, node, uacpi_get_current_resources, "_CRS");
dump_resources(depth, node, uacpi_get_possible_resources, "_PRS");
}
nested_printf(depth, "}\n");
} else
printf("\n");
uacpi_free_namespace_node_info(info);
return UACPI_ITERATION_DECISION_CONTINUE;
}
static void enumerate_namespace(void)
{
uacpi_namespace_node *root = uacpi_namespace_root();
dump_one_node(NULL, root, 0);
uacpi_namespace_for_each_child_simple(root, dump_one_node, NULL);
}
/*
* DefinitionBlock ("x.aml", "SSDT", 1, "uTEST", "OVERRIDE", 0xF0F0F0F0)
* {
* Name (VAL, "TestRunner")
* }
*/
static uint8_t table_override[] = {
0x53, 0x53, 0x44, 0x54, 0x35, 0x00, 0x00, 0x00,
0x01, 0xa1, 0x75, 0x54, 0x45, 0x53, 0x54, 0x00,
0x4f, 0x56, 0x45, 0x52, 0x52, 0x49, 0x44, 0x45,
0xf0, 0xf0, 0xf0, 0xf0, 0x49, 0x4e, 0x54, 0x4c,
0x25, 0x09, 0x20, 0x20, 0x08, 0x56, 0x41, 0x4c,
0x5f, 0x0d, 0x54, 0x65, 0x73, 0x74, 0x52, 0x75,
0x6e, 0x6e, 0x65, 0x72, 0x00
};
/*
* DefinitionBlock ("x.aml", "SSDT", 1, "uTEST", "RUNRIDTB", 0xF0F0F0F0)
* {
* Name (\_SI.TID, "uACPI")
* Printf("TestRunner ID SSDT loaded!")
* }
*/
static uint8_t runner_id_table[] = {
0x53, 0x53, 0x44, 0x54, 0x55, 0x00, 0x00, 0x00,
0x01, 0x45, 0x75, 0x54, 0x45, 0x53, 0x54, 0x00,
0x52, 0x55, 0x4e, 0x52, 0x49, 0x44, 0x54, 0x42,
0xf0, 0xf0, 0xf0, 0xf0, 0x49, 0x4e, 0x54, 0x4c,
0x25, 0x09, 0x20, 0x20, 0x08, 0x5c, 0x2e, 0x5f,
0x53, 0x49, 0x5f, 0x54, 0x49, 0x44, 0x5f, 0x0d,
0x75, 0x41, 0x43, 0x50, 0x49, 0x00, 0x70, 0x0d,
0x54, 0x65, 0x73, 0x74, 0x52, 0x75, 0x6e, 0x6e,
0x65, 0x72, 0x20, 0x49, 0x44, 0x20, 0x53, 0x53,
0x44, 0x54, 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x65,
0x64, 0x21, 0x00, 0x5b, 0x31
};
static uacpi_table_installation_disposition handle_table_install(
struct acpi_sdt_hdr *hdr, uacpi_u64 *out_override
)
{
if (!strncmp(hdr->oem_table_id, "DENYTABL", sizeof(hdr->oem_table_id)))
return UACPI_TABLE_INSTALLATION_DISPOSITON_DENY;
if (strncmp(hdr->oem_table_id, "OVERTABL", sizeof(hdr->oem_table_id)))
return UACPI_TABLE_INSTALLATION_DISPOSITON_ALLOW;
*out_override = (uacpi_virt_addr)table_override;
return UACPI_TABLE_INSTALLATION_DISPOSITON_VIRTUAL_OVERRIDE;
}
static uacpi_status handle_notify(
uacpi_handle handle, uacpi_namespace_node *node, uacpi_u64 value
)
{
const char *path = uacpi_namespace_node_generate_absolute_path(node);
UACPI_UNUSED(handle);
printf("Received a notification from %s %" PRIx64 "\n", path, value);
free((void*)path);
return UACPI_STATUS_OK;
}
static uacpi_status handle_ec(uacpi_region_op op, uacpi_handle op_data)
{
switch (op) {
case UACPI_REGION_OP_READ: {
uacpi_region_rw_data *rw_data = (uacpi_region_rw_data*)op_data;
rw_data->value = 0;
UACPI_FALLTHROUGH;
}
case UACPI_REGION_OP_ATTACH:
case UACPI_REGION_OP_DETACH:
case UACPI_REGION_OP_WRITE:
return UACPI_STATUS_OK;
default:
return UACPI_STATUS_INVALID_ARGUMENT;
}
}
static uacpi_interrupt_ret handle_gpe(
uacpi_handle handle, uacpi_namespace_node *node, uint16_t idx
)
{
UACPI_UNUSED(handle);
UACPI_UNUSED(node);
UACPI_UNUSED(idx);
return UACPI_INTERRUPT_HANDLED | UACPI_GPE_REENABLE;
}
static void run_test(
const char *dsdt_path, const vector_t *ssdt_paths,
uacpi_object_type expected_type, const char *expected_value,
bool dump_namespace
)
{
static uint8_t early_table_buf[4096];
struct acpi_rsdp rsdp = { 0 };
struct full_xsdt *xsdt = make_xsdt(&rsdp, dsdt_path, ssdt_paths);
uacpi_status st;
uacpi_table tbl;
bool is_test_mode;
uacpi_object *ret = NULL;
g_rsdp = (uacpi_phys_addr)((uintptr_t)&rsdp);
st = uacpi_setup_early_table_access(
early_table_buf, sizeof(early_table_buf)
);
ensure_ok_status(st);
st = uacpi_table_find_by_signature(ACPI_DSDT_SIGNATURE, &tbl);
ensure_ok_status(st);
if (strncmp(tbl.hdr->signature, ACPI_DSDT_SIGNATURE, 4) != 0)
error("broken early table access!");
st = uacpi_table_unref(&tbl);
ensure_ok_status(st);
st = uacpi_initialize(UACPI_FLAG_NO_ACPI_MODE);
ensure_ok_status(st);
/*
* Go through all AML tables and manually bump their reference counts here
* so that they're mapped before the call to uacpi_namespace_load(). The
* reason we need this is to disambiguate calls to uacpi_kernel_map() with
* a synthetic physical address (that is actually a virtual address for
* tables that we constructed earlier) or a real physical address that comes
* from some operation region or any other AML code or action.
*/
uacpi_table_find_by_signature(ACPI_DSDT_SIGNATURE, &tbl);
st = uacpi_table_find_by_signature(ACPI_SSDT_SIGNATURE, &tbl);
while (st == UACPI_STATUS_OK) {
uacpi_table_ref(&tbl);
st = uacpi_table_find_next_with_same_signature(&tbl);
}
g_expect_virtual_addresses = false;
st = uacpi_install_notify_handler(
uacpi_namespace_root(), handle_notify, NULL
);
ensure_ok_status(st);
st = uacpi_set_table_installation_handler(handle_table_install);
ensure_ok_status(st);
st = uacpi_install_interface("TestRunner", UACPI_INTERFACE_KIND_FEATURE);
ensure_ok_status(st);
st = uacpi_uninstall_interface("Windows 2006");
ensure_ok_status(st);
st = uacpi_uninstall_interface("Windows 2006");
if (st != UACPI_STATUS_NOT_FOUND)
error("couldn't uninstall interface");
st = uacpi_enable_host_interface(UACPI_HOST_INTERFACE_3_0_THERMAL_MODEL);
ensure_ok_status(st);
st = uacpi_enable_host_interface(UACPI_HOST_INTERFACE_MODULE_DEVICE);
ensure_ok_status(st);
is_test_mode = expected_type != UACPI_OBJECT_UNINITIALIZED;
if (is_test_mode) {
st = uacpi_table_install(runner_id_table, NULL);
ensure_ok_status(st);
}
st = uacpi_namespace_load();
ensure_ok_status(st);
if (is_test_mode) {
uacpi_object *runner_id = UACPI_NULL;
uacpi_data_view view;
st = uacpi_eval_typed(
UACPI_NULL, "\\_SI.TID", UACPI_NULL, UACPI_OBJECT_STRING_BIT,
&runner_id
);
ensure_ok_status(st);
st = uacpi_object_get_string_or_buffer(runner_id, &view);
ensure_ok_status(st);
if (strcmp(view.text, "uACPI") != 0)
error("invalid test runner id");
uacpi_object_unref(runner_id);
}
st = uacpi_install_address_space_handler(
uacpi_namespace_root(), UACPI_ADDRESS_SPACE_EMBEDDED_CONTROLLER,
handle_ec, NULL
);
ensure_ok_status(st);
st = uacpi_install_gpe_handler(
UACPI_NULL, 123, UACPI_GPE_TRIGGERING_EDGE, handle_gpe, NULL
);
ensure_ok_status(st);
st = uacpi_enable_gpe(UACPI_NULL, 123);
ensure_ok_status(st);
st = uacpi_disable_gpe(UACPI_NULL, 123);
ensure_ok_status(st);
st = uacpi_uninstall_gpe_handler(UACPI_NULL, 123, handle_gpe);
ensure_ok_status(st);
st = uacpi_namespace_initialize();
ensure_ok_status(st);
if (dump_namespace)
enumerate_namespace();
if (!is_test_mode)
goto done;
if (strcmp(expected_value, "check-object-api-works") == 0) {
test_object_api();
goto done;
}
if (strcmp(expected_value, "check-address-spaces-work") == 0) {
test_address_spaces();
goto done;
}
st = uacpi_eval(UACPI_NULL, "\\MAIN", UACPI_NULL, &ret);
ensure_ok_status(st);
if (ret == NULL)
error("\\MAIN didn't return a value");
validate_ret_against_expected(ret, expected_type, expected_value);
uacpi_object_unref(ret);
done:
uacpi_state_reset();
delete_xsdt(xsdt, ssdt_paths->count);
interface_cleanup();
}
static uacpi_log_level log_level_from_string(const char *arg)
{
static struct {
const char *str;
uacpi_log_level level;
} log_levels[] = {
{ "debug", UACPI_LOG_DEBUG },
{ "trace", UACPI_LOG_TRACE },
{ "info", UACPI_LOG_INFO },
{ "warning", UACPI_LOG_WARN },
{ "error", UACPI_LOG_ERROR },
};
size_t i;
for (i = 0; i < UACPI_ARRAY_SIZE(log_levels); i++)
if (strcmp(log_levels[i].str, arg) == 0)
return log_levels[i].level;
error("invalid log level %s", arg);
return UACPI_LOG_INFO;
}
static arg_spec_t DSDT_PATH_ARG = ARG_POS(
"dsdt-path-or-keyword",
"path to the DSDT to run or \"resource-tests\" to run the resource tests"
);
static arg_spec_t EXPECT_ARG = ARG_LIST(
"expect", 'r',
"test mode, evaluate \\MAIN and expect <expected-type> <expected-value>"
);
static arg_spec_t EXTRA_TABLES_ARG = ARG_LIST(
"extra-tables", 'x', "a list of extra SSDTs to load"
);
static arg_spec_t ENUMERATE_NAMESPACE_ARG = ARG_FLAG(
"enumerate-namespace", 'd', "dump the entire namespace after loading it"
);
static arg_spec_t WHILE_LOOP_TIMEOUT_ARG = ARG_PARAM(
"while-loop-timeout", 't',
"number of seconds to use for the while loop timeout"
);
static arg_spec_t LOG_LEVEL_ARG = ARG_PARAM(
"log-level", 'l',
"log level to set, one of: debug, trace, info, warning, error"
);
static arg_spec_t HELP_ARG = ARG_HELP(
"help", 'h', "Display this menu and exit"
);
static arg_spec_t *const POSITIONAL_ARGS[] = {
&DSDT_PATH_ARG,
};
static arg_spec_t *const OPTION_ARGS[] = {
&EXPECT_ARG,
&EXTRA_TABLES_ARG,
&ENUMERATE_NAMESPACE_ARG,
&WHILE_LOOP_TIMEOUT_ARG,
&LOG_LEVEL_ARG,
&HELP_ARG,
};
static const arg_parser_t PARSER = {
.positional_args = POSITIONAL_ARGS,
.num_positional_args = UACPI_ARRAY_SIZE(POSITIONAL_ARGS),
.option_args = OPTION_ARGS,
.num_option_args = UACPI_ARRAY_SIZE(OPTION_ARGS),
};
int main(int argc, char *argv[])
{
const char *dsdt_path_or_keyword;
const char *expected_value = NULL;
uacpi_object_type expected_type = UACPI_OBJECT_UNINITIALIZED;
bool dump_namespace;
uacpi_log_level log_level;
parse_args(&PARSER, argc, argv);
uacpi_context_set_loop_timeout(get_uint_or(&WHILE_LOOP_TIMEOUT_ARG, 3));
dsdt_path_or_keyword = get(&DSDT_PATH_ARG);
if (strcmp(dsdt_path_or_keyword, "resource-tests") == 0) {
run_resource_tests();
return 0;
}
if (is_set(&EXPECT_ARG)) {
if (EXPECT_ARG.values.count != 2)
error("bad --expect format");
expected_type = string_to_object_type(EXPECT_ARG.values.blobs[0].data);
expected_value = EXPECT_ARG.values.blobs[1].data;
}
dump_namespace = is_set(&ENUMERATE_NAMESPACE_ARG);
// Don't spam the log with traces if enumeration is enabled
log_level = dump_namespace ? UACPI_LOG_INFO : UACPI_LOG_TRACE;
if (is_set(&LOG_LEVEL_ARG))
log_level = log_level_from_string(get(&LOG_LEVEL_ARG));
uacpi_context_set_log_level(log_level);
run_test(
dsdt_path_or_keyword, &EXTRA_TABLES_ARG.values, expected_type,
expected_value, dump_namespace
);
return 0;
}

View File

@ -0,0 +1,415 @@
// Name: Support for various address spaces works
// Expect: str => check-address-spaces-work
DefinitionBlock ("x.aml", "SSDT", 1, "uTEST", "ASPTESTS", 0xF0F0F0F0)
{
Method (MAIN) {
// Skip for non-uacpi test runners
Return ("check-address-spaces-work")
}
Method (DOIP, 1, Serialized) {
If (Arg0 == 0) {
Local0 = "DEADBEE0"
Local1 = 0xDEADBEE0
} Else {
Local0 = "DEADBEEF"
Local1 = 0xDEADBEEF
}
OperationRegion (IPMR, IPMI, 0xDEADBEE0, 10)
Field (IPMR, BufferAcc, NoLock, Preserve) {
CMD0, 120,
// Offset = base + 0xF
CMD1, 1234,
}
Name (REQ, Buffer (32) { })
Name (RET, 0)
REQ = Concatenate("IPMICommand", Local0)
If (Arg0 == 0) {
Local0 = CMD0 = REQ
} Else {
Local0 = CMD1 = REQ
}
If (SizeOf(Local0) != 66) {
Printf("Unexpected IPMI response size %o", SizeOf(Local0))
Return (Zero)
}
RET = Local0
If (RET != Local1) {
Printf("Unexpected IMPI response %o, expected %o", RET, Local1)
Return (Zero)
}
Return (Ones)
}
Device (GPO0)
{
Name (_HID, "INT33FC" /* Intel Baytrail GPIO Controller */)
Name (_DDN, "ValleyView General Purpose Input/Output (GPIO) controller")
Name (_UID, 0)
}
Device (GPO1)
{
Name (_HID, "INT33FC" /* Intel Baytrail GPIO Controller */)
Name (_DDN, "ValleyView GPNCORE controller")
Name (_UID, 1)
}
Method (DGIO, 0, Serialized) {
OperationRegion (GPOP, GeneralPurposeIo, Zero, 0x06)
Field (GPOP, ByteAcc, NoLock, Preserve)
{
Connection (
GpioIo (Exclusive, PullDefault, 0x0000, 0x0000, IoRestrictionOutputOnly,
"\\GPO0", 0x00, ResourceConsumer, ,
)
{ // Pin list
0x0002, 0x0003, 0x0004, 0x0005, 0x0006
}
),
CCU0, 1,
CCU1, 3,
CCU2, 1,
Connection (
GpioIo (Exclusive, PullDefault, 0x0000, 0x0000, IoRestrictionOutputOnly,
"\\GPO1", 0x00, ResourceConsumer, ,
)
{ // Pin list
0x005F
}
),
CCU3, 1
}
CCU0 = 1
CCU1 = 2
CCU2 = 0
Local0 = CCU0
If (Local0 != 1) {
Printf("Bad CCU0 return %o", Local0)
Return (Zero)
}
Local0 = CCU1
If (Local0 != 2) {
Printf ("Bad CCU1 return %o", Local0)
Return (Zero)
}
Local0 = CCU2
If (Local0 != 0) {
Printf ("Bad CCU2 return %o", Local0)
Return (Zero)
}
Local0 = CCU3
if (Local0 != 0) {
Printf ("Bad CCU3 value %o", Local0)
Return (Zero)
}
Return (Ones)
}
Method (DPCC, 0, Serialized) {
OperationRegion (GPOP, PCC, 0xCA, 0xFF)
Field (GPOP, DWordAcc, NoLock, Preserve)
{
H, 8,
E, 8,
L0, 8,
L1, 8,
O, 8,
Offset(12),
CMD, 32,
}
Field (GPOP, DWordAcc, NoLock, Preserve)
{
HELL, 48,
}
H = "H"
E = "E"
L0 = "L"
L1 = "L"
O = "O"
If (ToString(HELL) != "HELLO") {
Printf ("Unexpected HELL value %o", ToString(HELL))
Return (Zero)
}
// Invoke the test runner handler
CMD = 0xDEADBEEF
// We expect it to modify the CMD field as a response
If (CMD != 0xBEEFDEAD) {
Printf ("Unexpected CMD value %o", CMD)
Return (Zero)
}
Return (Ones)
}
Method (DPRM, 0, Serialized) {
OperationRegion (GPOP, PlatformRtMechanism, 0x00, 0xFF)
Field (GPOP, BufferAcc, NoLock, Preserve)
{
DEAD, 80,
}
Local0 = DEAD = "helloworld"
Printf("Got a PRM response: %o", Local0)
If (SizeOf(Local0) != 26) {
Printf ("Unexpected Local0 size %o", SizeOf(Local0))
Return (Zero)
}
If (ToString(Local0) != "goodbyeworld") {
Printf ("Unexpected Local0 value %o", ToString(Local0))
Return (Zero)
}
Return (Ones)
}
Method (DFHW, 0, Serialized) {
OperationRegion (GPOP, FFixedHW, 0xCAFEBABE, 0xFEFECACA)
Field (GPOP, BufferAcc, NoLock, Preserve)
{
X, 1,
}
Local0 = X = "someguidandstuff"
Printf("Got a FFixedHW response: %o", Local0)
If (SizeOf(Local0) != 256) {
Printf ("Unexpected Local0 size %o", SizeOf(Local0))
Return (Zero)
}
If (ToString(Local0) != "ok") {
Printf ("Unexpected Local0 value %o", ToString(Local0))
Return (Zero)
}
Return (Ones)
}
Scope (_SB) {
Device (I2C0)
{
Name (_HID, "INT34B2")
Name (_UID, 0)
}
Device (I2C1)
{
Name (_HID, "80860F41" /* Intel Baytrail I2C Host Controller */)
Name (_CID, "80860F41" /* Intel Baytrail I2C Host Controller */)
Name (_DDN, "Intel(R) I2C Controller #5 - 80860F45")
Name (_UID, 1)
}
}
Name (RES1, ResourceTemplate ()
{
I2cSerialBusV2 (0x0008, ControllerInitiated, 0x00061A80,
AddressingMode7Bit, "\\_SB.I2C0",
0x00, ResourceConsumer, , Exclusive,
)
})
Name (RES2, ResourceTemplate ()
{
I2cSerialBusV2 (0x0040, ControllerInitiated, 0x00061A80,
AddressingMode7Bit, "\\_SB.I2C1",
0x00, ResourceConsumer, , Exclusive,
)
})
Method (DGSB, 0, Serialized) {
Method (CHEK, 3) {
If (SizeOf(Arg0) != Arg1) {
Printf(
"Bad resulting buffer length %o, expected %o",
SizeOf(Arg0), Arg1
)
Return (Zero)
}
Name (INT, 0)
INT = Arg0
If (INT != Arg2) {
Printf("Unexpected response %o, expected %o", INT, Arg2)
Return (Zero)
}
Return (Ones)
}
OperationRegion (RCH1, GenericSerialBus, 0x100, 0x0100)
Field (RCH1, BufferAcc, NoLock, Preserve)
{
Connection (RES1),
Offset (0x11),
// Command == 0x111
AccessAs (BufferAcc, AttribQuick),
CMD0, 128,
// Command == 0x121
AccessAs (BufferAcc, AttribSendReceive),
CMD1, 8,
// Command == 0x122
AccessAs (BufferAcc, AttribByte),
CMD2, 16,
// Command == 0x124
AccessAs (BufferAcc, AttribWord),
CMD3, 32,
// Command == 0x128
AccessAs (BufferAcc, AttribBlock),
CMD4, 2048,
// Command == 0x228
AccessAs (BufferAcc, AttribProcessCall),
CMD5, 8,
// Command == 0x229
AccessAs (BufferAcc, AttribBlockProcessCall),
CMD6, 144,
Connection (RES2),
// Command == 0x23B
AccessAs (BufferAcc, AttribBytes(15)),
CMD7, 8,
// Command == 0x23C
AccessAs (BufferAcc, AttribRawBytes(255)),
CMD8, 8,
// Command == 0x23D
AccessAs (BufferAcc, AttribRawProcessBytes(123)),
CMD9, 8,
}
Local0 = CMD0 = 0x111
If (CHEK(Local0, 2, 0x112) != Ones) {
Return (Zero)
}
Local0 = 0x121
Local0 = CMD1 = Local0
If (CHEK(Local0, 3, 0x122) != Ones) {
Return (Zero)
}
Local0 = CMD2 = 0x122
If (CHEK(Local0, 3, 0x123) != Ones) {
Return (Zero)
}
Local0 = CMD3
If (CHEK(Local0, 4, 0x125) != Ones) {
Return (Zero)
}
Local0 = CMD4
If (CHEK(Local0, 257, 0x129) != Ones) {
Return (Zero)
}
Local0 = CMD5 = 0x228
If (CHEK(Local0, 4, 0x229) != Ones) {
Return (Zero)
}
Local0 = CMD6
If (CHEK(Local0, 257, 0x22A) != Ones) {
Return (Zero)
}
Local0 = CMD7 = 0x23B
If (CHEK(Local0, 15 + 2, 0x23C) != Ones) {
Return (Zero)
}
Local0 = CMD8
If (CHEK(Local0, 255 + 2, 0x23D) != Ones) {
Return (Zero)
}
Local0 = CMD9
If (CHEK(Local0, 255 + 2, 0x23E) != Ones) {
Return (Zero)
}
Return (Ones)
}
/*
* Arg0 -> The address space type
* Return -> Ones on succeess, Zero on failure
*/
Method (CHEK, 1, Serialized) {
Switch (Arg0) {
Case (7) { // IPMI
Local0 = DOIP(0)
If (Local0 != Ones) {
Break
}
Local0 = DOIP(1)
Break
}
Case (8) { // General Purpose IO
Local0 = DGIO()
Break
}
Case (9) { // Generic Serial Bus
Local0 = DGSB()
Break
}
Case (0x0A) { // PCC
Local0 = DPCC()
Break
}
Case (0x0B) { // PRM
Local0 = DPRM()
Break
}
Case (0x7F) { // FFixedHW
Local0 = DFHW()
Break
}
}
If (Local0 != Ones) {
Printf("Address space %o failed: expected '%o', got '%o'!",
Arg0, Ones, Local0)
Return (Zero)
}
Printf("Address space %o OK", ToHexString(Arg0))
Return (Ones)
}
}

View File

@ -0,0 +1,71 @@
// Name: Nested Package Doesn't Leak Memory
// Expect: int => 1
DefinitionBlock ("", "DSDT", 2, "uTEST", "TESTTABL", 0xF0F0F0F0)
{
Method(GPKG) {
Local1 = 10
Local0 = Package (Local1) {
0x123,
0x321,
Package {
0x321,
"123",
Package {
0x321,
Package {
0x321,
"123",
Package {
0x321,
"123",
Package {
0x321,
Package {
0x321,
"123",
Package (Local1) {
0x321,
"123",
999,
},
999,
},
"123",
999,
},
999,
},
999,
},
"123",
999,
},
999,
},
"Hello world",
Package {
0x321,
"Hello",
},
Package {
0x321,
"World",
},
Package {
Buffer (Local1) { 0xFF },
0xDEADBEEF,
},
Buffer { 1, 2, 3 }
}
Return (Local0)
}
Method (MAIN) {
Local0 = GPKG()
Debug = Local0
Local0 = 1
Return (Local0)
}
}

View File

@ -0,0 +1,127 @@
// Name: Concatenate Resources
// Expect: int => 1
DefinitionBlock ("", "DSDT", 2, "uTEST", "TESTTABL", 0xF0F0F0F0)
{
Name (BUF0, ResourceTemplate ()
{
WordBusNumber (ResourceProducer, MinFixed, MaxFixed, PosDecode,
0x0000,
0x0000,
0x00FF,
0x0000,
0x0100,
,, _Y00)
DWordIO (ResourceProducer, MinFixed, MaxFixed, PosDecode, EntireRange,
0x00000000,
0x00000000,
0x00000CF7,
0x00000000,
0x00000CF8,
1, "\\SOME.PATH",, TypeStatic, DenseTranslation)
IO (Decode16,
0x0CF8,
0x0CF8,
0x01,
0x08,
)
DWordMemory (ResourceProducer, PosDecode, MinFixed, MaxFixed, Cacheable, ReadWrite,
0x00000000,
0x000A0000,
0x000BFFFF,
0x00000000,
0x00020000,
123, "^^^^^^^^^ANOT.ER.PATH", , AddressRangeMemory, TypeStatic)
})
Name (IIC0, ResourceTemplate ()
{
I2cSerialBusV2 (0x0000, ControllerInitiated, 0x00061A80,
AddressingMode7Bit, "\\_SB.PCI0.I2C0",
0x00, ResourceConsumer, _Y10, Exclusive,
)
})
Name (RBUF, ResourceTemplate ()
{
I2cSerialBusV2 (0x0029, ControllerInitiated, 0x00061A80,
AddressingMode7Bit, "\\_SB.PCI0.I2C0",
0x00, ResourceConsumer, , Exclusive,
)
GpioInt (Level, ActiveHigh, Exclusive, PullNone, 0x0000,
"\\_SB.PCI0.GPI0", 0x00, ResourceConsumer, ,
)
{
0x012A
}
GpioIo (Exclusive, PullDefault, 0x0000, 0x0000, IoRestrictionOutputOnly,
"\\_SB.PCI0.GPI0", 0x00, ResourceConsumer, ,
)
{
0x002F
}
GpioIo (Exclusive, PullDefault, 0x0000, 0x0000, IoRestrictionOutputOnly,
"\\_SB.PCI0.GPI0", 0x00, ResourceConsumer, ,
)
{
0x0124
}
})
// src0, src1, dst
Method (CHEK, 3)
{
Local0 = (SizeOf(Arg0) + SizeOf(Arg1)) - 2
If (Local0 != SizeOf(Arg2)) {
Printf("Invalid final buffer size: %o, expected %o",
Local0, SizeOf(Arg2))
Return (0)
}
Local0 = 0
Local1 = 0
While (Local0 < (SizeOf(Arg0) - 2)) {
Local2 = DerefOf(Arg0[Local0])
Local3 = DerefOf(Arg2[Local1])
If (Local2 != Local3) {
Printf("Byte src=%o (dst=%o) mismatch, expected %o got %o",
Local0, Local1, ToHexString(Local2), ToHexString(Local3))
Return (0)
}
Local0 += 1
Local1 += 1
}
Local0 = 0
While (Local0 < SizeOf(Arg1)) {
Local2 = DerefOf(Arg1[Local0])
Local3 = DerefOf(Arg2[Local1])
If (Local2 != Local3) {
Printf("Byte src=%o (dst=%o) mismatch, expected %o got %o",
Local0, Local1, ToHexString(Local2), ToHexString(Local3))
Return (0)
}
Local0 += 1
Local1 += 1
}
Return (1)
}
Method (MAIN, 0, NotSerialized)
{
Local0 = ConcatenateResTemplate(BUF0, IIC0)
If (CHEK(BUF0, IIC0, Local0) != 1) {
Return (0)
}
Local1 = ConcatenateResTemplate(Local0, RBUF)
Return(CHEK(Local0, RBUF, Local1))
}
}

Some files were not shown because too many files have changed in this diff Show More