Compare commits

..

63 Commits

Author SHA1 Message Date
38e26a9c12 Implement argument_ptr () syscall for handling process arguments
All checks were successful
Build documentation / build-and-deploy (push) Successful in 37s
2026-01-30 14:05:47 +01:00
124aa12f5b Redesign scheduling points
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-30 02:36:27 +01:00
d2f5c032d9 Fix TLS alignment issues, works on BOCHS now too!
All checks were successful
Build documentation / build-and-deploy (push) Successful in 40s
2026-01-29 18:18:24 +01:00
73e42588fb Fix BOCHS clock
All checks were successful
Build documentation / build-and-deploy (push) Successful in 41s
2026-01-29 15:04:06 +01:00
e78bfb9984 Move suspension q code into proc/suspension_q.c
All checks were successful
Build documentation / build-and-deploy (push) Successful in 24s
2026-01-29 01:52:18 +01:00
d2a88b3641 Move suspension q's cleanup to proc/suspension_q.c 2026-01-29 01:43:01 +01:00
fdda2e2df8 Unlock mutexes on process death 2026-01-29 01:38:44 +01:00
388418a718 Nice wrappers around process management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 34s
2026-01-29 00:08:54 +01:00
1c64d608bd Rename make/libc.mk -> make/libmsl.mk
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-28 23:57:28 +01:00
3d23187acf Implement userspace TLS, remove RW Locks 2026-01-28 23:52:48 +01:00
a3b62ebd3d Clean up AMD64 memory management code, remove dependency on pd.lock 2026-01-27 19:03:03 +01:00
8bda300f6a Fix sys_clone () wrong argument bug
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-27 18:05:02 +01:00
cf51600c6a Cleanup syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 34s
2026-01-27 17:34:43 +01:00
b388b30b24 Redesign userspace memory management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-27 17:04:08 +01:00
600886a7ee Organize resources into process groups 2026-01-27 14:18:05 +01:00
67b66f2b39 Implement proper mutex cleanup
All checks were successful
Build documentation / build-and-deploy (push) Successful in 23s
2026-01-25 23:10:12 +01:00
18f791222e Remove dead process from it's suspension queues 2026-01-25 22:39:29 +01:00
5e16bb647c Multiple process suspension queues 2026-01-25 22:10:04 +01:00
a68373e4ee Dynamically assign cpu upon mutex unlock
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-25 20:39:51 +01:00
8650010992 Fix user CPU context saving
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
2026-01-25 17:39:34 +01:00
95f590fb3b multi-cpu scheduling WIP 2026-01-25 15:54:00 +01:00
7bb3b77ede Disable kernel preemption, fix requesting rescheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
2026-01-22 19:32:15 +01:00
c26fd3cb2b Fix scheduler locking hierarchy 2026-01-22 15:59:29 +01:00
fea0999726 Fix scheduler starvation, use lists for scheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-22 11:54:52 +01:00
7eceecf6e3 Add mutex syscalls 2026-01-20 22:18:43 +01:00
fff51321bc Redesign syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 40s
2026-01-20 20:46:34 +01:00
a29233f853 Rename proc_spawn_thread to proc_clone 2026-01-19 22:01:44 +01:00
38a43b59b0 Resolve strange IRQ issues which cause the scheduler to behave weirdly (IRQ mapping)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 52s
2026-01-19 01:51:34 +01:00
ddafc4eb19 Rewrite resource subsystem 2026-01-18 20:50:45 +01:00
4f7077d458 Move mutex and mem create/cleanup functions into mutex.c and mem.c respectively
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-16 22:13:17 +01:00
9a7dbf0594 Properly implement liballoc_free () 2026-01-16 22:09:16 +01:00
ab8093cc6c CI install pymdown-extensions from pip
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-16 20:28:26 +01:00
ddbb66b5e4 Docs processes overview 2026-01-16 20:26:23 +01:00
11a1eb52aa Move status codes into a separate header
All checks were successful
Build documentation / build-and-deploy (push) Successful in 36s
2026-01-16 19:07:32 +01:00
a054257336 Port liballoc to userspace 2026-01-16 18:50:40 +01:00
9fc8521e63 sys_proc_mutex_unlock () automatically reschedule at the end
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
2026-01-16 00:28:46 +01:00
711da8aeab Implement proc_spawn_thread syscall, fix proc_resume and proc_suspend
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-16 00:26:37 +01:00
ebd9f0cac6 Let the user application decide upon the resource ID (RID)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 22s
2026-01-14 23:19:39 +01:00
7cd5623d36 Use reference counting to track filetime of process PD
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-14 23:11:06 +01:00
270ff507d4 Implement lock IRQ nesting via stack variables/contexts
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s
2026-01-14 22:11:56 +01:00
55166f9d5f syscall doesn't need RPL 3 bits on kernel code
All checks were successful
Build documentation / build-and-deploy (push) Successful in 24s
2026-01-14 21:21:20 +01:00
e5cc3a64d3 Fix syscall return value - preserve RAX register
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-14 20:58:00 +01:00
2ab308d678 Drop m_ prefix from libmsl 2026-01-14 20:56:09 +01:00
d1d772cb42 Fix user apps randomly crashing (APIC, GDT layout, syscall entry)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 23s
2026-01-14 19:51:18 +01:00
0d8f9e565f Fix missing CPU_REQUEST_SCHED IDT entry 2026-01-11 12:07:17 +01:00
f80a26e5eb Load kernel CR3 2026-01-11 03:45:32 +01:00
5bf10c1218 Extra compiler flags for AMD64
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-11 03:42:15 +01:00
41a458b925 Implement Mutexes and supporting syscalls, cleanup/optimize scheduler
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-10 00:12:42 +01:00
6a474c21a0 Use RW spin locks
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-09 19:53:08 +01:00
a5283283f6 Hold proc->lock while killing the process 2026-01-09 00:00:18 +01:00
79768d94e6 Preserve syscall return value in RAX
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-08 23:06:32 +01:00
0555ddd041 Clean up IOAPIC and LAPIC implementations
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-08 22:05:11 +01:00
ebb026b807 proc_cleanup_resources () drop instead of immediate removal
All checks were successful
Build documentation / build-and-deploy (push) Successful in 30s
2026-01-07 23:09:13 +01:00
d7b734306f Introduce concept of Process Resources (PR_MEM), implement necessary syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 42s
2026-01-07 22:47:30 +01:00
28aef30f77 Implement proc_map () and proc_unmap () syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s
2026-01-06 23:32:11 +01:00
9f107a1a5e Implement proc_unmap () 2026-01-06 17:47:21 +01:00
e50f8940a9 Redesign linked list
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-06 16:38:42 +01:00
d09e4d97ad Fix missing headers, generate compile db with bear
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
2026-01-06 03:08:13 +01:00
7915986902 Remove Doxygen-style comments, change formatting to wrap comments
All checks were successful
Build documentation / build-and-deploy (push) Successful in 28s
2026-01-06 02:04:32 +01:00
902682ac11 Remove doxygen infra
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
2026-01-06 01:41:07 +01:00
7747e5e0aa Docs update theme
All checks were successful
Build documentation / build-and-deploy (push) Successful in 47s
2026-01-06 01:37:51 +01:00
a8423fe657 Better proc_kill () and process cleanup
All checks were successful
Build documentation / build-and-deploy (push) Successful in 27s
2026-01-06 01:19:11 +01:00
6538fd8023 Generate new PIDs for processes 2026-01-05 20:24:26 +01:00
122 changed files with 3167 additions and 8378 deletions

View File

@@ -50,7 +50,7 @@ AlignOperands: false
SortIncludes: true SortIncludes: true
# Comments # Comments
ReflowComments: false ReflowComments: true
CommentPragmas: '^ IWYU pragma:' CommentPragmas: '^ IWYU pragma:'
# Misc # Misc

View File

@@ -25,7 +25,7 @@ jobs:
- name: Install mkdocs - name: Install mkdocs
run: | run: |
pip install --upgrade pip pip install --upgrade pip
pip install mkdocs mkdocs-material pip install mkdocs mkdocs-material pymdown-extensions
- name: Build - name: Build
run: make docs run: make docs

View File

@@ -4,4 +4,4 @@ include make/apps.mk
include make/kernel.mk include make/kernel.mk
include make/dist.mk include make/dist.mk
include make/docs.mk include make/docs.mk
include make/libc.mk include make/libmsl.mk

View File

@@ -4,7 +4,8 @@ cflags += --target=x86_64-pc-none-elf \
-mno-avx \ -mno-avx \
-mno-mmx \ -mno-mmx \
-mno-80387 \ -mno-80387 \
-mno-red-zone -mno-red-zone \
-mcmodel=large
ldflags += --target=x86_64-pc-none-elf \ ldflags += --target=x86_64-pc-none-elf \
-Wl,-zmax-page-size=0x1000 -Wl,-zmax-page-size=0x1000

View File

@@ -6,6 +6,8 @@ PHDRS {
text PT_LOAD; text PT_LOAD;
rodata PT_LOAD; rodata PT_LOAD;
data PT_LOAD; data PT_LOAD;
bss PT_LOAD;
tls PT_TLS;
} }
SECTIONS { SECTIONS {
@@ -13,32 +15,53 @@ SECTIONS {
.text : { .text : {
*(.text .text.*) *(.text .text.*)
*(.ltext .ltext.*)
} :text } :text
. = ALIGN(CONSTANT(MAXPAGESIZE)); . = ALIGN(0x1000);
.rodata : { .rodata : {
*(.rodata .rodata.*) *(.rodata .rodata.*)
} :rodata } :rodata
.note.gnu.build-id : { . = ALIGN(0x1000);
*(.note.gnu.build-id)
} :rodata
. = ALIGN(CONSTANT(MAXPAGESIZE));
.data : { .data : {
*(.data .data.*) *(.data .data.*)
*(.ldata .ldata.*)
} :data } :data
. = ALIGN(0x1000);
__bss_start = .; __bss_start = .;
.bss : { .bss : {
*(.bss .bss.*) *(.bss .bss.*)
} :data *(.lbss .lbss.*)
} :bss
__bss_end = .; __bss_end = .;
. = ALIGN(0x1000);
__tdata_start = .;
.tdata : {
*(.tdata .tdata.*)
} :tls
__tdata_end = .;
__tbss_start = .;
.tbss : {
*(.tbss .tbss.*)
} :tls
__tbss_end = .;
__tls_size = __tbss_end - __tdata_start;
/DISCARD/ : { /DISCARD/ : {
*(.eh_frame*) *(.eh_frame*)
*(.note .note.*) *(.note .note.*)

View File

@@ -1,4 +1,4 @@
cpu: model=p4_prescott_celeron_336 cpu: model=p4_prescott_celeron_336, ips=200000000
memory: guest=4096 host=2048 memory: guest=4096 host=2048
@@ -9,6 +9,7 @@ ata0: enabled=1
ata0-master: type=cdrom, path=mop3.iso, status=inserted ata0-master: type=cdrom, path=mop3.iso, status=inserted
com1: enabled=1, mode=file, dev=bochs-com1.txt com1: enabled=1, mode=file, dev=bochs-com1.txt
pci: enabled=1, chipset=i440fx pci: enabled=1, chipset=i440fx
clock: sync=realtime, time0=local
boot: cdrom boot: cdrom

1
docs/.gitignore vendored
View File

@@ -1 +0,0 @@
doxygen/

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

View File

@@ -2,7 +2,3 @@
MOP3 is a hobby OS project of mine ;). MOP3 is a hobby OS project of mine ;).
# Kernel documentation
- [Doxygen docs](kernel/doxygen/html/index.html)
- [Building](building_kernel/index.html)

View File

@@ -0,0 +1,30 @@
# Overview of processes in MOP3
## What is a process?
A process is a structure defined to represent an internal state of a user application's environment. This includes
the necessary stacks, code, data and other resources. A process (usually) has it's own address, but in certain
circumstances may share it with another process.
## Only processes vs. processes-threads model
### Overview
MOP3 doesn't have a process-thread separation. Ususally in operating systems you'd have a "process", which consists
of multiple worker threads. For eg. a single-threaded application is a process, which consists of one worker. In MOP3
we do things a little differently. We only have processes, but some processes may work within the same pool of (generally speaking)
"resources", such as a shared address space, shared memory allocations, mutexes and so on. An application then consists of
not threads, but processes, which are loosely tied together via shared data.
#### Processes-threads model diagram
![Processes-threads model](assets/images/processes-threads.png)
#### Only processes model diagram
![Only processes model](assets/images/only-processes.png)
## Scheduling
MOP3 uses a round-robin based scheduler. For now priorities are left unimplemented, ie. every processes has
equal priority, but this may change in the future.
A good explaination of round-robin scheduling can be found on the OSDev wiki: [the article](https://wiki.osdev.org/Scheduling_Algorithms#Round_Robin)

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021 - 2023 jothepro
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,207 +0,0 @@
# Doxygen Awesome
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/jothepro/doxygen-awesome-css)](https://github.com/jothepro/doxygen-awesome-css/releases/latest)
[![GitHub](https://img.shields.io/github/license/jothepro/doxygen-awesome-css)](https://github.com/jothepro/doxygen-awesome-css/blob/main/LICENSE)
![GitHub Repo stars](https://img.shields.io/github/stars/jothepro/doxygen-awesome-css)
<div class="title_screenshot">
![Screenshot of Doxygen Awesome CSS](img/screenshot.png)
</div>
**Doxygen Awesome** is a custom CSS theme for Doxygen HTML documentation with many customization options.
## Motivation
I really like how the Doxygen HTML documentation is structured, but IMHO it looks a bit outdated.
This theme is an attempt to modernize the visuals of Doxygen without changing its overall layout too much.
## Features
- 🌈 Clean, modern design
- 🚀 Highly customizable by adjusting CSS variables
- 🧩 No changes to the HTML structure of Doxygen are required
- 📱 Improved mobile usability
- 🌘 Dark mode support!
- 🥇 Works best with **Doxygen 1.9.1** - **1.9.4** and **1.9.6** - **1.14.0**
## Examples
Some websites using this theme:
- [Documentation of this repository](https://jothepro.github.io/doxygen-awesome-css/)
- [wxWidgets](https://docs.wxwidgets.org/3.2/)
- [OpenCV 5.x](https://docs.opencv.org/5.x/)
- [Zephyr](https://docs.zephyrproject.org/latest/doxygen/html/index.html)
- [Spatial Audio Framework (SAF)](https://leomccormack.github.io/Spatial_Audio_Framework/index.html)
- [Randolf Richardson's C++ classes](https://www.randolf.ca/c++/docs/)
- [libsl3](https://a4z.github.io/libsl3/)
- [DuMu<sup>x</sup>](https://dumux.org/docs/doxygen/master/)
- [OpenRemise](https://openremise.at/)
## Installation
To use the theme when generating your documentation, bring the required CSS and JS files from this repository into your project.
This can be done in several ways:
- manually copying the files
- adding the project as a Git submodule
- downloading the project with CMake FetchContent
- adding the project as an npm/xpm dependency
- installing the theme system-wide
All theme files are located in the root of this repository and start with the prefix `doxygen-awesome-`. You may not need all of them. Follow the installation instructions to determine which files are required for your setup.
### Git submodule
For projects that use Git, add the repository as a submodule and check out the desired release:
```sh
git submodule add https://github.com/jothepro/doxygen-awesome-css.git
cd doxygen-awesome-css
git checkout v2.4.1
```
### CMake with FetchContent
For projects that build with CMake, the `FetchContent` module can be used to download the repository at configuration time.
Add the following snippet to your `CMakeLists.txt`:
```cmake
include(FetchContent)
FetchContent_Declare(
doxygen-awesome-css
URL https://github.com/jothepro/doxygen-awesome-css/archive/refs/heads/main.zip
)
FetchContent_MakeAvailable(doxygen-awesome-css)
# Save the location the files were cloned into
# This allows us to get the path to doxygen-awesome.css
FetchContent_GetProperties(doxygen-awesome-css SOURCE_DIR AWESOME_CSS_DIR)
# Generate the Doxyfile
set(DOXYFILE_IN ${CMAKE_CURRENT_SOURCE_DIR}/doc/Doxyfile.in)
set(DOXYFILE_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
configure_file(${DOXYFILE_IN} ${DOXYFILE_OUT} @ONLY)
```
This downloads the latest main (but any other revision could be used) and unpacks in the build folder. The `Doxyfile.in` can reference this location in the `HTML_EXTRA_STYLESHEET` field
```text
HTML_EXTRA_STYLESHEET = @AWESOME_CSS_DIR@/doxygen-awesome.css
```
When the configure stage of CMake is run, the `Doxyfile.in` is rendered to Doxyfile and Doxygen can be run as usual.
### npm/xpm dependency
In the npm ecosystem, this project can be added as a development dependency
to your project:
```sh
cd your-project
npm install https://github.com/jothepro/doxygen-awesome-css#v2.4.1 --save-dev
ls -l node_modules/@jothepro/doxygen-awesome-css
```
Similarly, in the [xPack](https://xpack.github.io) ecosystem, this project can be added
as a development dependency to an [`xpm`](https://xpack.github.io/xpm/)
managed project.
### System-wide
You can even install the theme system-wide by running `make install`.
The files will be installed to `/usr/local/share/` by default,
but you can customize the install location with `make PREFIX=/my/custom/path install`.
### Choosing a layout
There are two layout options. Choose one of them and configure Doxygen accordingly:
<div class="tabbed">
- <b class="tab-title">Base Theme</b><div class="darkmode_inverted_image">
![](img/theme-variants-base.drawio.svg)
</div>
Comes with the typical Doxygen titlebar. Optionally the treeview in the sidebar can be enabled.
Required files: `doxygen-awesome.css`
Required `Doxyfile` configuration:
```
GENERATE_TREEVIEW = YES # optional. Also works without treeview
DISABLE_INDEX = NO
FULL_SIDEBAR = NO
HTML_EXTRA_STYLESHEET = doxygen-awesome-css/doxygen-awesome.css
HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5
```
- <b class="tab-title">Sidebar-Only Theme</b><div class="darkmode_inverted_image">
![](img/theme-variants-sidebar-only.drawio.svg)
</div>
Hides the top titlebar to give more space to the content. The treeview must be enabled in order for this theme to work.
Required files: `doxygen-awesome.css`, `doxygen-awesome-sidebar-only.css`
Required `Doxyfile` configuration:
```
GENERATE_TREEVIEW = YES # required!
DISABLE_INDEX = NO
FULL_SIDEBAR = NO
HTML_EXTRA_STYLESHEET = doxygen-awesome-css/doxygen-awesome.css \
doxygen-awesome-css/doxygen-awesome-sidebar-only.css
HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5
```
</div>
<br>
@warning
- This theme is not compatible with the `FULL_SIDEBAR = YES` option provided by Doxygen!
- `HTML_COLORSTYLE` must be set to `LIGHT` since Doxygen 1.9.5!
### Further installation instructions
- [Installing extensions](docs/extensions.md)
- [Customizing the theme (colors, spacing, border-radius, ...)](docs/customization.md)
- [Tips and Tricks for further configuration](docs/tricks.md)
## Browser support
Tested with
- Chrome 140, Chrome 140 for Android, Chrome 141 for iOS
- Safari 26, Safari for iOS 26
- Firefox 143, Firefox 142 for Android, Firefox 143 for iOS
- Edge 140
- Opera One 122
The theme does not strive to be backward compatible with (significantly) older browser versions.
## Credits
Thanks for all the bug reports and inspiring feedback on GitHub!
Special thanks to all the contributors:
<br><br>
<a href="https://github.com/jothepro/doxygen-awesome-css/graphs/contributors">
<img src="https://contrib.rocks/image?repo=jothepro/doxygen-awesome-css" />
</a>
<div class="section_buttons">
| Read Next |
|---------------------------------:|
| [Extensions](docs/extensions.md) |
</div>

View File

@@ -1,138 +0,0 @@
// SPDX-License-Identifier: MIT
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2021 - 2025 jothepro
*/
class DoxygenAwesomeDarkModeToggle extends HTMLElement {
// SVG icons from https://fonts.google.com/icons
// Licensed under the Apache 2.0 license:
// https://www.apache.org/licenses/LICENSE-2.0.html
static lightModeIcon = `<svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24px" viewBox="0 0 24 24" width="24px" fill="#FCBF00"><rect fill="none" height="24" width="24"/><circle cx="12" cy="12" opacity=".3" r="3"/><path d="M12,9c1.65,0,3,1.35,3,3s-1.35,3-3,3s-3-1.35-3-3S10.35,9,12,9 M12,7c-2.76,0-5,2.24-5,5s2.24,5,5,5s5-2.24,5-5 S14.76,7,12,7L12,7z M2,13l2,0c0.55,0,1-0.45,1-1s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S1.45,13,2,13z M20,13l2,0c0.55,0,1-0.45,1-1 s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S19.45,13,20,13z M11,2v2c0,0.55,0.45,1,1,1s1-0.45,1-1V2c0-0.55-0.45-1-1-1S11,1.45,11,2z M11,20v2c0,0.55,0.45,1,1,1s1-0.45,1-1v-2c0-0.55-0.45-1-1-1C11.45,19,11,19.45,11,20z M5.99,4.58c-0.39-0.39-1.03-0.39-1.41,0 c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0s0.39-1.03,0-1.41L5.99,4.58z M18.36,16.95 c-0.39-0.39-1.03-0.39-1.41,0c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0c0.39-0.39,0.39-1.03,0-1.41 L18.36,16.95z M19.42,5.99c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06c-0.39,0.39-0.39,1.03,0,1.41 s1.03,0.39,1.41,0L19.42,5.99z M7.05,18.36c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06 c-0.39,0.39-0.39,1.03,0,1.41s1.03,0.39,1.41,0L7.05,18.36z"/></svg>`
static darkModeIcon = `<svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24px" viewBox="0 0 24 24" width="24px" fill="#FE9700"><rect fill="none" height="24" width="24"/><path d="M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27 C17.45,17.19,14.93,19,12,19c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z" opacity=".3"/><path d="M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27C17.45,17.19,14.93,19,12,19 c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z M12,3c-4.97,0-9,4.03-9,9s4.03,9,9,9s9-4.03,9-9c0-0.46-0.04-0.92-0.1-1.36 c-0.98,1.37-2.58,2.26-4.4,2.26c-2.98,0-5.4-2.42-5.4-5.4c0-1.81,0.89-3.42,2.26-4.4C12.92,3.04,12.46,3,12,3L12,3z"/></svg>`
static title = "Toggle Light/Dark Mode"
static prefersLightModeInDarkModeKey = "prefers-light-mode-in-dark-mode"
static prefersDarkModeInLightModeKey = "prefers-dark-mode-in-light-mode"
static _staticConstructor = function() {
DoxygenAwesomeDarkModeToggle.enableDarkMode(DoxygenAwesomeDarkModeToggle.userPreference)
// Update the color scheme when the browsers preference changes
// without user interaction on the website.
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', event => {
DoxygenAwesomeDarkModeToggle.onSystemPreferenceChanged()
})
// Update the color scheme when the tab is made visible again.
// It is possible that the appearance was changed in another tab
// while this tab was in the background.
document.addEventListener("visibilitychange", visibilityState => {
if (document.visibilityState === 'visible') {
DoxygenAwesomeDarkModeToggle.onSystemPreferenceChanged()
}
});
}()
static init() {
$(function() {
$(document).ready(function() {
const toggleButton = document.createElement('doxygen-awesome-dark-mode-toggle')
toggleButton.title = DoxygenAwesomeDarkModeToggle.title
toggleButton.updateIcon()
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', event => {
toggleButton.updateIcon()
})
document.addEventListener("visibilitychange", visibilityState => {
if (document.visibilityState === 'visible') {
toggleButton.updateIcon()
}
});
$(document).ready(function(){
document.getElementById("MSearchBox").parentNode.appendChild(toggleButton)
})
$(window).resize(function(){
document.getElementById("MSearchBox").parentNode.appendChild(toggleButton)
})
})
})
}
constructor() {
super();
this.onclick=this.toggleDarkMode
}
/**
* @returns `true` for dark-mode, `false` for light-mode system preference
*/
static get systemPreference() {
return window.matchMedia('(prefers-color-scheme: dark)').matches
}
/**
* @returns `true` for dark-mode, `false` for light-mode user preference
*/
static get userPreference() {
return (!DoxygenAwesomeDarkModeToggle.systemPreference && localStorage.getItem(DoxygenAwesomeDarkModeToggle.prefersDarkModeInLightModeKey)) ||
(DoxygenAwesomeDarkModeToggle.systemPreference && !localStorage.getItem(DoxygenAwesomeDarkModeToggle.prefersLightModeInDarkModeKey))
}
static set userPreference(userPreference) {
DoxygenAwesomeDarkModeToggle.darkModeEnabled = userPreference
if(!userPreference) {
if(DoxygenAwesomeDarkModeToggle.systemPreference) {
localStorage.setItem(DoxygenAwesomeDarkModeToggle.prefersLightModeInDarkModeKey, true)
} else {
localStorage.removeItem(DoxygenAwesomeDarkModeToggle.prefersDarkModeInLightModeKey)
}
} else {
if(!DoxygenAwesomeDarkModeToggle.systemPreference) {
localStorage.setItem(DoxygenAwesomeDarkModeToggle.prefersDarkModeInLightModeKey, true)
} else {
localStorage.removeItem(DoxygenAwesomeDarkModeToggle.prefersLightModeInDarkModeKey)
}
}
DoxygenAwesomeDarkModeToggle.onUserPreferenceChanged()
}
static enableDarkMode(enable) {
if(enable) {
DoxygenAwesomeDarkModeToggle.darkModeEnabled = true
document.documentElement.classList.add("dark-mode")
document.documentElement.classList.remove("light-mode")
} else {
DoxygenAwesomeDarkModeToggle.darkModeEnabled = false
document.documentElement.classList.remove("dark-mode")
document.documentElement.classList.add("light-mode")
}
}
static onSystemPreferenceChanged() {
DoxygenAwesomeDarkModeToggle.darkModeEnabled = DoxygenAwesomeDarkModeToggle.userPreference
DoxygenAwesomeDarkModeToggle.enableDarkMode(DoxygenAwesomeDarkModeToggle.darkModeEnabled)
}
static onUserPreferenceChanged() {
DoxygenAwesomeDarkModeToggle.enableDarkMode(DoxygenAwesomeDarkModeToggle.darkModeEnabled)
}
toggleDarkMode() {
DoxygenAwesomeDarkModeToggle.userPreference = !DoxygenAwesomeDarkModeToggle.userPreference
this.updateIcon()
}
updateIcon() {
if(DoxygenAwesomeDarkModeToggle.darkModeEnabled) {
this.innerHTML = DoxygenAwesomeDarkModeToggle.darkModeIcon
} else {
this.innerHTML = DoxygenAwesomeDarkModeToggle.lightModeIcon
}
}
}
customElements.define("doxygen-awesome-dark-mode-toggle", DoxygenAwesomeDarkModeToggle);

View File

@@ -1,66 +0,0 @@
// SPDX-License-Identifier: MIT
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2022 - 2025 jothepro
*/
class DoxygenAwesomeFragmentCopyButton extends HTMLElement {
constructor() {
super();
this.onclick=this.copyContent
}
static title = "Copy to clipboard"
static copyIcon = `<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M23.04,10.322c0,-2.582 -2.096,-4.678 -4.678,-4.678l-6.918,-0c-2.582,-0 -4.678,2.096 -4.678,4.678c0,-0 0,8.04 0,8.04c0,2.582 2.096,4.678 4.678,4.678c0,-0 6.918,-0 6.918,-0c2.582,-0 4.678,-2.096 4.678,-4.678c0,-0 0,-8.04 0,-8.04Zm-2.438,-0l-0,8.04c-0,1.236 -1.004,2.24 -2.24,2.24l-6.918,-0c-1.236,-0 -2.239,-1.004 -2.239,-2.24l-0,-8.04c-0,-1.236 1.003,-2.24 2.239,-2.24c0,0 6.918,0 6.918,0c1.236,0 2.24,1.004 2.24,2.24Z"/><path d="M5.327,16.748c-0,0.358 -0.291,0.648 -0.649,0.648c0,0 0,0 0,0c-2.582,0 -4.678,-2.096 -4.678,-4.678c0,0 0,-8.04 0,-8.04c0,-2.582 2.096,-4.678 4.678,-4.678l6.918,0c2.168,0 3.994,1.478 4.523,3.481c0.038,0.149 0.005,0.306 -0.09,0.428c-0.094,0.121 -0.239,0.191 -0.392,0.191c-0.451,0.005 -1.057,0.005 -1.457,0.005c-0.238,0 -0.455,-0.14 -0.553,-0.357c-0.348,-0.773 -1.128,-1.31 -2.031,-1.31c-0,0 -6.918,0 -6.918,0c-1.236,0 -2.24,1.004 -2.24,2.24l0,8.04c0,1.236 1.004,2.24 2.24,2.24l0,-0c0.358,-0 0.649,0.29 0.649,0.648c-0,0.353 -0,0.789 -0,1.142Z" style="fill-opacity:0.6;"/></svg>`
static successIcon = `<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24"><path d="M8.084,16.111c-0.09,0.09 -0.212,0.141 -0.34,0.141c-0.127,-0 -0.249,-0.051 -0.339,-0.141c-0.746,-0.746 -2.538,-2.538 -3.525,-3.525c-0.375,-0.375 -0.983,-0.375 -1.357,0c-0.178,0.178 -0.369,0.369 -0.547,0.547c-0.375,0.375 -0.375,0.982 -0,1.357c1.135,1.135 3.422,3.422 4.75,4.751c0.27,0.27 0.637,0.421 1.018,0.421c0.382,0 0.749,-0.151 1.019,-0.421c2.731,-2.732 10.166,-10.167 12.454,-12.455c0.375,-0.375 0.375,-0.982 -0,-1.357c-0.178,-0.178 -0.369,-0.369 -0.547,-0.547c-0.375,-0.375 -0.982,-0.375 -1.357,0c-2.273,2.273 -9.567,9.567 -11.229,11.229Z"/></svg>`
static successDuration = 980
static init() {
$(function() {
$(document).ready(function() {
if(navigator.clipboard) {
const fragments = document.getElementsByClassName("fragment")
for(const fragment of fragments) {
const fragmentWrapper = document.createElement("div")
fragmentWrapper.className = "doxygen-awesome-fragment-wrapper"
const fragmentCopyButton = document.createElement("doxygen-awesome-fragment-copy-button")
fragmentCopyButton.innerHTML = DoxygenAwesomeFragmentCopyButton.copyIcon
fragmentCopyButton.title = DoxygenAwesomeFragmentCopyButton.title
fragment.parentNode.replaceChild(fragmentWrapper, fragment)
fragmentWrapper.appendChild(fragment)
fragmentWrapper.appendChild(fragmentCopyButton)
}
}
})
})
}
copyContent() {
const content = this.previousSibling.cloneNode(true)
// filter out line number from file listings
content.querySelectorAll(".lineno, .ttc").forEach((node) => {
node.remove()
})
let textContent = content.textContent
// remove trailing newlines that appear in file listings
let numberOfTrailingNewlines = 0
while(textContent.charAt(textContent.length - (numberOfTrailingNewlines + 1)) == '\n') {
numberOfTrailingNewlines++;
}
textContent = textContent.substring(0, textContent.length - numberOfTrailingNewlines)
navigator.clipboard.writeText(textContent);
this.classList.add("success")
this.innerHTML = DoxygenAwesomeFragmentCopyButton.successIcon
window.setTimeout(() => {
this.classList.remove("success")
this.innerHTML = DoxygenAwesomeFragmentCopyButton.copyIcon
}, DoxygenAwesomeFragmentCopyButton.successDuration);
}
}
customElements.define("doxygen-awesome-fragment-copy-button", DoxygenAwesomeFragmentCopyButton)

View File

@@ -1,72 +0,0 @@
// SPDX-License-Identifier: MIT
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2022 - 2025 jothepro
*/
class DoxygenAwesomeInteractiveToc {
static topOffset = 38
static hideMobileMenu = true
static headers = []
static init() {
window.addEventListener("load", () => {
let toc = document.querySelector(".contents > .toc")
if(toc) {
toc.classList.add("interactive")
if(!DoxygenAwesomeInteractiveToc.hideMobileMenu) {
toc.classList.add("open")
}
document.querySelector(".contents > .toc > h3")?.addEventListener("click", () => {
if(toc.classList.contains("open")) {
toc.classList.remove("open")
} else {
toc.classList.add("open")
}
})
document.querySelectorAll(".contents > .toc > ul a").forEach((node) => {
let id = node.getAttribute("href").substring(1)
DoxygenAwesomeInteractiveToc.headers.push({
node: node,
headerNode: document.getElementById(id)
})
document.getElementById("doc-content")?.addEventListener("scroll",this.throttle(DoxygenAwesomeInteractiveToc.update, 100))
})
DoxygenAwesomeInteractiveToc.update()
}
})
}
static update() {
let active = DoxygenAwesomeInteractiveToc.headers[0]?.node
DoxygenAwesomeInteractiveToc.headers.forEach((header) => {
let position = header.headerNode.getBoundingClientRect().top
header.node.classList.remove("active")
header.node.classList.remove("aboveActive")
if(position < DoxygenAwesomeInteractiveToc.topOffset) {
active = header.node
active?.classList.add("aboveActive")
}
})
active?.classList.add("active")
active?.classList.remove("aboveActive")
}
static throttle(func, delay) {
let lastCall = 0;
return function (...args) {
const now = new Date().getTime();
if (now - lastCall < delay) {
return;
}
lastCall = now;
return setTimeout(() => {func(...args)}, delay);
};
}
}

View File

@@ -1,32 +0,0 @@
// SPDX-License-Identifier: MIT
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2022 - 2025 jothepro
*/
class DoxygenAwesomeParagraphLink {
// Icon from https://fonts.google.com/icons
// Licensed under the Apache 2.0 license:
// https://www.apache.org/licenses/LICENSE-2.0.html
static icon = `<svg xmlns="http://www.w3.org/2000/svg" height="20px" viewBox="0 0 24 24" width="20px"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M17 7h-4v2h4c1.65 0 3 1.35 3 3s-1.35 3-3 3h-4v2h4c2.76 0 5-2.24 5-5s-2.24-5-5-5zm-6 8H7c-1.65 0-3-1.35-3-3s1.35-3 3-3h4V7H7c-2.76 0-5 2.24-5 5s2.24 5 5 5h4v-2zm-3-4h8v2H8z"/></svg>`
static title = "Permanent Link"
static init() {
$(function() {
$(document).ready(function() {
document.querySelectorAll(".contents a.anchor[id], .contents .groupheader > a[id]").forEach((node) => {
let anchorlink = document.createElement("a")
anchorlink.setAttribute("href", `#${node.getAttribute("id")}`)
anchorlink.setAttribute("title", DoxygenAwesomeParagraphLink.title)
anchorlink.classList.add("anchorlink")
node.classList.add("anchor")
anchorlink.innerHTML = DoxygenAwesomeParagraphLink.icon
node.parentElement.appendChild(anchorlink)
})
})
})
}
}

View File

@@ -1,20 +0,0 @@
/* SPDX-License-Identifier: MIT */
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2021 - 2025 jothepro
*/
@media screen and (min-width: 768px) {
#MSearchBox {
width: calc(var(--side-nav-fixed-width) - calc(2 * var(--spacing-medium)) - var(--searchbar-height) - 1px);
}
#MSearchField {
width: calc(var(--side-nav-fixed-width) - calc(2 * var(--spacing-medium)) - 66px - var(--searchbar-height));
}
}

View File

@@ -1,105 +0,0 @@
/* SPDX-License-Identifier: MIT */
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2021 - 2025 jothepro
*/
html {
/* side nav width. MUST be = `TREEVIEW_WIDTH`.
* Make sure it is wide enough to contain the page title (logo + title + version)
*/
--side-nav-fixed-width: 335px;
--menu-display: none;
--top-height: 120px;
--toc-sticky-top: -25px;
--toc-max-height: calc(100vh - 2 * var(--spacing-medium) - 25px);
}
#projectname {
white-space: nowrap;
}
@media screen and (min-width: 768px) {
html {
--searchbar-background: var(--page-background-color);
}
#side-nav {
min-width: var(--side-nav-fixed-width);
max-width: var(--side-nav-fixed-width);
top: var(--top-height);
overflow: visible;
}
#nav-tree, #side-nav {
height: calc(100vh - var(--top-height)) !important;
}
#top {
display: block;
border-bottom: none;
height: var(--top-height);
margin-bottom: calc(0px - var(--top-height));
max-width: var(--side-nav-fixed-width);
overflow: hidden;
background: var(--side-nav-background);
}
#main-nav {
float: left;
padding-right: 0;
}
.ui-resizable-handle {
display: none;
}
.ui-resizable-e {
width: 0;
}
#nav-path {
position: fixed;
right: 0;
left: calc(var(--side-nav-fixed-width) + 1px);
bottom: 0;
width: auto;
}
#doc-content {
height: calc(100vh - 31px) !important;
padding-bottom: calc(3 * var(--spacing-large));
padding-top: calc(var(--top-height) - 80px);
box-sizing: border-box;
margin-left: var(--side-nav-fixed-width) !important;
}
#MSearchBox {
width: calc(var(--side-nav-fixed-width) - calc(2 * var(--spacing-medium)));
}
#MSearchField {
width: calc(var(--side-nav-fixed-width) - calc(2 * var(--spacing-medium)) - 65px);
}
#MSearchResultsWindow {
left: var(--spacing-medium) !important;
right: auto;
}
#nav-sync {
bottom: 4px;
right: auto;
left: 300px;
width: 35px;
top: auto !important;
user-select: none;
position: fixed
}
}

View File

@@ -1,71 +0,0 @@
// SPDX-License-Identifier: MIT
/**
Doxygen Awesome
https://github.com/jothepro/doxygen-awesome-css
Copyright (c) 2023 - 2025 jothepro
*/
class DoxygenAwesomeTabs {
static init() {
window.addEventListener("load", () => {
document.querySelectorAll(".tabbed:not(:empty)").forEach((tabbed, tabbedIndex) => {
let tabLinkList = []
tabbed.querySelectorAll(":scope > ul > li").forEach((tab, tabIndex) => {
tab.id = "tab_" + tabbedIndex + "_" + tabIndex
let header = tab.querySelector(".tab-title")
let tabLink = document.createElement("button")
tabLink.classList.add("tab-button")
tabLink.appendChild(header)
header.title = header.textContent
tabLink.addEventListener("click", () => {
tabbed.querySelectorAll(":scope > ul > li").forEach((tab) => {
tab.classList.remove("selected")
})
tabLinkList.forEach((tabLink) => {
tabLink.classList.remove("active")
})
tab.classList.add("selected")
tabLink.classList.add("active")
})
tabLinkList.push(tabLink)
if(tabIndex == 0) {
tab.classList.add("selected")
tabLink.classList.add("active")
}
})
let tabsOverview = document.createElement("div")
tabsOverview.classList.add("tabs-overview")
let tabsOverviewContainer = document.createElement("div")
tabsOverviewContainer.classList.add("tabs-overview-container")
tabLinkList.forEach((tabLink) => {
tabsOverview.appendChild(tabLink)
})
tabsOverviewContainer.appendChild(tabsOverview)
tabbed.before(tabsOverviewContainer)
function resize() {
let maxTabHeight = 0
tabbed.querySelectorAll(":scope > ul > li").forEach((tab, tabIndex) => {
let visibility = tab.style.display
tab.style.display = "block"
maxTabHeight = Math.max(tab.offsetHeight, maxTabHeight)
tab.style.display = visibility
})
tabbed.style.height = `${maxTabHeight + 10}px`
}
resize()
new ResizeObserver(resize).observe(tabbed)
})
})
}
static resize(tabbed) {
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,9 @@ cflags += -nostdinc \
-std=c11 \ -std=c11 \
-pedantic \ -pedantic \
-Wall \ -Wall \
-Wextra -Wextra \
-ffunction-sections \
-fdata-sections
cflags += -isystem ../include cflags += -isystem ../include
@@ -13,4 +15,7 @@ ldflags += -ffreestanding \
-nostdlib \ -nostdlib \
-fno-builtin \ -fno-builtin \
-fuse-ld=lld \ -fuse-ld=lld \
-static -static \
-Wl,--gc-sections \
-Wl,--strip-all \
-flto

13
include/m/status.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _M_STATUS_H
#define _M_STATUS_H
#define ST_OK 0
#define ST_SYSCALL_NOT_FOUND 1
#define ST_UNALIGNED 2
#define ST_OOM_ERROR 3
#define ST_NOT_FOUND 4
#define ST_BAD_ADDRESS_SPACE 5
#define ST_PERMISSION_ERROR 6
#define ST_BAD_RESOURCE 7
#endif // _M_STATUS_H

View File

@@ -1,10 +1,16 @@
#ifndef _M_SYSCALL_DEFS_H #ifndef _M_SYSCALL_DEFS_H
#define _M_SYSCALL_DEFS_H #define _M_SYSCALL_DEFS_H
#define SYS_PROC_QUIT 1 #define SYS_QUIT 1
#define SYS_PROC_TEST 2 #define SYS_TEST 2
#define SYS_MAP 3
#define SR_OK 0 #define SYS_UNMAP 4
#define SR_SYSCALL_NOT_FOUND 1 #define SYS_CLONE 5
#define SYS_SCHED 6
#define SYS_MUTEX_CREATE 7
#define SYS_MUTEX_DELETE 8
#define SYS_MUTEX_LOCK 9
#define SYS_MUTEX_UNLOCK 10
#define SYS_ARGUMENT_PTR 11
#endif // _M_SYSCALL_DEFS_H #endif // _M_SYSCALL_DEFS_H

View File

@@ -1,9 +1,46 @@
#include <limits.h> #include <limits.h>
#include <m/proc.h> #include <proc/local.h>
#include <proc/proc.h>
#include <stddef.h>
#include <stdint.h>
#include <string/string.h>
#define MUTEX 2000
LOCAL volatile char letter = 'c';
void app_proc (void) {
char arg_letter = (char)(uintptr_t)argument_ptr ();
letter = arg_letter;
for (;;) {
mutex_lock (MUTEX);
for (int i = 0; i < 3; i++)
test (letter);
mutex_unlock (MUTEX);
}
process_quit ();
}
void app_main (void) { void app_main (void) {
m_proc_test (); mutex_create (MUTEX);
m_proc_test ();
m_proc_test (); letter = 'a';
m_proc_test ();
process_spawn (&app_proc, (void*)'a');
process_spawn (&app_proc, (void*)'b');
process_spawn (&app_proc, (void*)'c');
for (;;) {
mutex_lock (MUTEX);
for (int i = 0; i < 3; i++)
test (letter);
mutex_unlock (MUTEX);
}
} }

View File

@@ -1,3 +1,3 @@
S += init.S c += init.c
o += init.o o += init.o

2
kernel/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.json
.cache

File diff suppressed because it is too large Load Diff

View File

@@ -32,8 +32,4 @@ format:
':!uACPI/tests/**' \ ':!uACPI/tests/**' \
':!libk/printf*') ':!libk/printf*')
doxygen: .PHONY: all clean format
mkdir -p ../docs/kernel/doxygen
doxygen
.PHONY: all clean format doxygen

View File

@@ -4,6 +4,7 @@
#include <amd64/msr.h> #include <amd64/msr.h>
#include <libk/std.h> #include <libk/std.h>
#include <limine/requests.h> #include <limine/requests.h>
#include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/mm.h> #include <sys/mm.h>
#include <sys/spin.h> #include <sys/spin.h>
@@ -16,86 +17,94 @@
#define IOAPICS_MAX 24 #define IOAPICS_MAX 24
#define INTERRUPT_SRC_OVERRIDES_MAX 24 #define INTERRUPT_SRC_OVERRIDES_MAX 24
/// ID of Local APIC /* ID of Local APIC */
#define LAPIC_ID 0x20 #define LAPIC_ID 0x20
/// End of interrupt register /* End of interrupt register */
#define LAPIC_EOI 0xB0 #define LAPIC_EOI 0xB0
/// Spurious interrupt vector register /* Spurious interrupt vector register */
#define LAPIC_SIVR 0xF0 #define LAPIC_SIVR 0xF0
/// Interrupt command register /* Interrupt command register */
#define LAPIC_ICR 0x300 #define LAPIC_ICR 0x300
/// LVT timer register /* LVT timer register */
#define LAPIC_LVTTR 0x320 #define LAPIC_LVTTR 0x320
/// Timer initial count register /* Timer initial count register */
#define LAPIC_TIMICT 0x380 #define LAPIC_TIMICT 0x380
/// Timer current count register /* Timer current count register */
#define LAPIC_TIMCCT 0x390 #define LAPIC_TIMCCT 0x390
/// Divide config register /* Divide config register */
#define LAPIC_DCR 0x3E0 #define LAPIC_DCR 0x3E0
/// Table of IOAPICS #define DIVIDER_VALUE 0x0B
static struct acpi_madt_ioapic apics[IOAPICS_MAX];
struct ioapic {
struct acpi_madt_ioapic table_data;
spin_lock_t lock;
uintptr_t mmio_base;
};
/* Table of IOAPICS */
static struct ioapic ioapics[IOAPICS_MAX];
/* Table of interrupt source overrides */
/* clang-format off */ /* clang-format off */
/// Table of interrupt source overrides
static struct acpi_madt_interrupt_source_override intr_src_overrides[INTERRUPT_SRC_OVERRIDES_MAX]; static struct acpi_madt_interrupt_source_override intr_src_overrides[INTERRUPT_SRC_OVERRIDES_MAX];
/* clang-format on */ /* clang-format on */
/// Count of actual IOAPIC entries /* Count of actual IOAPIC entries */
static size_t ioapic_entries = 0; static size_t ioapic_entries = 0;
/// Count of actual interrupt source overrides /* Count of actual interrupt source overrides */
static size_t intr_src_override_entries = 0; static size_t intr_src_override_entries = 0;
/// Local APIC MMIO base address. It comes from MSR_APIC_BASE
static uintptr_t lapic_mmio_base = 0;
/// Read IOAPIC static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
static uint32_t amd64_ioapic_read (uintptr_t vaddr, uint32_t reg) {
*(volatile uint32_t*)vaddr = reg; /* Read IOAPIC */
return *(volatile uint32_t*)(vaddr + 0x10); static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
spin_lock_ctx_t ctxioar;
spin_lock (&ioapic->lock, &ctxioar);
*(volatile uint32_t*)ioapic->mmio_base = reg;
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
spin_unlock (&ioapic->lock, &ctxioar);
return ret;
} }
/// Write IOAPIC /* Write IOAPIC */
static void amd64_ioapic_write (uintptr_t vaddr, uint32_t reg, uint32_t value) { static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
*(volatile uint32_t*)vaddr = reg; spin_lock_ctx_t ctxioaw;
*(volatile uint32_t*)(vaddr + 0x10) = value;
spin_lock (&ioapic->lock, &ctxioaw);
*(volatile uint32_t*)ioapic->mmio_base = reg;
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
spin_unlock (&ioapic->lock, &ctxioaw);
} }
/// Find an IOAPIC corresposting to provided IRQ /* Find an IOAPIC corresposting to provided IRQ */
static struct acpi_madt_ioapic* amd64_ioapic_find (uint8_t irq) { static struct ioapic* amd64_ioapic_find (uint32_t irq) {
struct acpi_madt_ioapic* apic = NULL; struct ioapic* ioapic = NULL;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
for (size_t i = 0; i < ioapic_entries; i++) { for (size_t i = 0; i < ioapic_entries; i++) {
apic = &apics[i]; ioapic = &ioapics[i];
uint32_t version = amd64_ioapic_read ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, 1); uint32_t version = amd64_ioapic_read (ioapic, 1);
uint32_t max = ((version >> 16) & 0xFF); uint32_t max = ((version >> 16) & 0xFF);
if ((irq >= apic->gsi_base) && (irq <= (apic->gsi_base + max))) if ((irq >= ioapic->table_data.gsi_base) && (irq <= (ioapic->table_data.gsi_base + max)))
return apic; return ioapic;
} }
return NULL; return NULL;
} }
/** /*
* @brief Route IRQ to an IDT entry of a given Local APIC. * Route IRQ to an IDT entry of a given Local APIC.
* *
* @param vec * vec - Interrupt vector number, which will be delivered to the CPU.
* Interrupt vector number, which will be delivered to the CPU * irq -Legacy IRQ number to be routed. Can be changed by an interrupt source override
*
* @param irq
* Legacy IRQ number to be routed. Can be changed by an interrupt source override
* into a different GSI. * into a different GSI.
* * flags - IOAPIC redirection flags.
* @param flags * lapic_id - Local APIC that will receive the interrupt.
* IOAPIC redirection flags.
*
* @param lapic_id
* Local APIC that will receive the interrupt.
*/ */
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id) { void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id) {
struct acpi_madt_ioapic* apic = NULL; struct ioapic* ioapic = NULL;
struct acpi_madt_interrupt_source_override* override; struct acpi_madt_interrupt_source_override* override;
bool found_override = false; bool found_override = false;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
for (size_t i = 0; i < intr_src_override_entries; i++) { for (size_t i = 0; i < intr_src_override_entries; i++) {
override = &intr_src_overrides[i]; override = &intr_src_overrides[i];
@@ -108,65 +117,26 @@ void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t
uint64_t calc_flags = (lapic_id << 56) | (flags) | (vec & 0xFF); uint64_t calc_flags = (lapic_id << 56) | (flags) | (vec & 0xFF);
if (found_override) { if (found_override) {
uint8_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0; uint32_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
uint8_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0; uint32_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
calc_flags |= (uint64_t)mode << 15; calc_flags |= (uint64_t)mode << 15;
calc_flags |= (uint64_t)polarity << 13; calc_flags |= (uint64_t)polarity << 13;
calc_flags |= flags;
} else {
calc_flags |= flags;
} }
apic = amd64_ioapic_find (irq); uint32_t gsi = found_override ? override->gsi : irq;
if (apic == NULL) ioapic = amd64_ioapic_find (gsi);
if (ioapic == NULL)
return; return;
uint32_t irq_reg = ((irq - apic->gsi_base) * 2) + 0x10; uint32_t irq_reg = ((gsi - ioapic->table_data.gsi_base) * 2) + 0x10;
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg, amd64_ioapic_write (ioapic, irq_reg + 1, (uint32_t)(calc_flags >> 32));
(uint32_t)calc_flags); amd64_ioapic_write (ioapic, irq_reg, (uint32_t)calc_flags);
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg + 1,
(uint32_t)(calc_flags >> 32));
} }
/// Mask a given IRQ /* Find and initialize the IOAPIC */
void amd64_ioapic_mask (uint8_t irq) {
struct acpi_madt_ioapic* apic;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
apic = amd64_ioapic_find (irq);
if (apic == NULL)
return;
uint32_t irq_reg = ((irq - apic->gsi_base) * 2) + 0x10;
uint32_t value = amd64_ioapic_read ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg);
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg,
value | (1 << 16));
}
/// Unmask a given IRQ
void amd64_ioapic_unmask (uint8_t irq) {
struct acpi_madt_ioapic* apic;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
apic = amd64_ioapic_find (irq);
if (apic == NULL)
return;
uint32_t irq_reg = ((irq - apic->gsi_base) * 2) + 0x10;
uint32_t value = amd64_ioapic_read ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg);
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg,
value & ~(1 << 16));
}
/// Find and initialize the IOAPIC
void amd64_ioapic_init (void) { void amd64_ioapic_init (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -187,11 +157,15 @@ void amd64_ioapic_init (void) {
switch (current->type) { switch (current->type) {
case ACPI_MADT_ENTRY_TYPE_IOAPIC: { case ACPI_MADT_ENTRY_TYPE_IOAPIC: {
struct acpi_madt_ioapic* ioapic = (struct acpi_madt_ioapic*)current; struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
mm_map_kernel_page ((uintptr_t)ioapic->address, mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
(uintptr_t)hhdm->offset + (uintptr_t)ioapic->address, (uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD); MM_PG_PRESENT | MM_PG_RW);
apics[ioapic_entries++] = *ioapic; ioapics[ioapic_entries++] = (struct ioapic){
.lock = SPIN_LOCK_INIT,
.table_data = *ioapic_table_data,
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
};
} break; } break;
case ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE: { case ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE: {
struct acpi_madt_interrupt_source_override* override = struct acpi_madt_interrupt_source_override* override =
@@ -204,99 +178,94 @@ void amd64_ioapic_init (void) {
} }
} }
/// Get MMIO base of Local APIC /* Get MMIO base of Local APIC */
static uintptr_t amd64_lapic_base (void) { return lapic_mmio_base; } static uintptr_t amd64_lapic_base (void) { return thiscpu->lapic_mmio_base; }
/// Write Local APIC /* Write Local APIC */
static void amd64_lapic_write (uint32_t reg, uint32_t value) { static void amd64_lapic_write (uint32_t reg, uint32_t value) {
*(volatile uint32_t*)(amd64_lapic_base () + reg) = value; *(volatile uint32_t*)(amd64_lapic_base () + reg) = value;
} }
/// Read Local APIC /* Read Local APIC */
static uint32_t amd64_lapic_read (uint32_t reg) { static uint32_t amd64_lapic_read (uint32_t reg) {
return *(volatile uint32_t*)(amd64_lapic_base () + reg); return *(volatile uint32_t*)(amd64_lapic_base () + reg);
} }
/// Get ID of Local APIC /* Get ID of Local APIC */
uint32_t amd64_lapic_id (void) { return amd64_lapic_read (LAPIC_ID) >> 24; } uint32_t amd64_lapic_id (void) { return amd64_lapic_read (LAPIC_ID) >> 24; }
/// Send End of interrupt command to Local APIC /* Send End of interrupt command to Local APIC */
void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); } void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
/// Set initial counter value in Local APIC timer /*
void amd64_lapic_tick (uint32_t tick) { amd64_lapic_write (LAPIC_TIMICT, tick); } * Calibrate Local APIC to send interrupts in a set interval.
/**
* @brief Calibrate Local APIC to send interrupts in a set interval.
* *
* @param us * us - Period length in microseconds
* Period length in microseconds
*
* @return amount of ticsk in a given period
*/ */
static uint32_t amd64_lapic_calibrate (uint32_t us) { static uint32_t amd64_lapic_calibrate (uint32_t us) {
amd64_lapic_write (LAPIC_DCR, 0x0B); spin_lock_ctx_t ctxlacb;
spin_lock (&lapic_calibration_lock, &ctxlacb);
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16)); amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF); amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF);
sleep_micro (us); sleep_micro (us);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (0 << 16));
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT); uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
DEBUG ("timer ticks = %u\n", ticks);
spin_unlock (&lapic_calibration_lock, &ctxlacb);
return ticks; return ticks;
} }
/** /*
* @brief Starts a Local APIC, configures LVT timer to * Starts a Local APIC, configures LVT timer to send interrupts at SCHED_PREEMPT_TIMER.
* send interrupts at \ref SCHED_PREEMPT_TIMER.
* *
* @param ticks * ticks - Initial tick count
* Initial tick count
*/ */
static void amd64_lapic_start (uint32_t ticks) { static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, 0x0B); amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
amd64_lapic_write (LAPIC_TIMICT, ticks); amd64_lapic_write (LAPIC_TIMICT, ticks);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
} }
/** /*
* @brief Initialize Local APIC, configure to send timer interrupts * Initialize Local APIC, configure to send timer interrupts at a given period. See
* at a given period. See \ref amd64_lapic_calibrate and \ref amd64_lapic_start. * amd64_lapic_calibrate and amd64_lapic_start.
*/ */
uint64_t amd64_lapic_init (uint32_t us) { void amd64_lapic_init (uint32_t us) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
amd64_wrmsr (MSR_APIC_BASE, amd64_rdmsr (MSR_APIC_BASE) | (1 << 11)); amd64_wrmsr (MSR_APIC_BASE, amd64_rdmsr (MSR_APIC_BASE) | (1 << 11));
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000; uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset; thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
mm_map_kernel_page (lapic_paddr, lapic_mmio_base, mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK | MM_PD_RELOAD);
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8)); amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
uint32_t ticks = amd64_lapic_calibrate (us); thiscpu->lapic_ticks = amd64_lapic_calibrate (us);
amd64_lapic_start (thiscpu->lapic_ticks);
amd64_lapic_start (ticks);
return ticks;
} }
/** /*
* @brief Send an IPI to a given Local APIC. This till invoke an IDT stub located at vec. * Send an IPI to a given Local APIC. This till invoke an IDT stub located at vec.
* *
* @param lapic_id * lapic_id - Target Local APIC
* Target Local APIC * vec - Interrupt vector/IDT stub, which will be invoked by the IPI.
*
* @param vec
* Interrupt vector/IDT stub, which will be invoked by the IPI.
*/ */
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec) { void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) {
/* wait for previous IPI to finish */
while (amd64_lapic_read (LAPIC_ICR) & (1 << 12)) {
__asm__ volatile ("pause");
}
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24)); amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
amd64_lapic_write (LAPIC_ICR, vec); amd64_lapic_write (LAPIC_ICR, vec | (1 << 14));
} }

View File

@@ -3,15 +3,12 @@
#include <libk/std.h> #include <libk/std.h>
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id); void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id);
void amd64_ioapic_mask (uint8_t irq);
void amd64_ioapic_unmask (uint8_t irq);
void amd64_ioapic_init (void); void amd64_ioapic_init (void);
uint32_t amd64_lapic_id (void); uint32_t amd64_lapic_id (void);
void amd64_lapic_tick (uint32_t tick);
void amd64_lapic_eoi (void); void amd64_lapic_eoi (void);
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec); void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec);
uint64_t amd64_lapic_init (uint32_t us); void amd64_lapic_init (uint32_t us);
#endif // _KERNEL_AMD64_APIC_H #endif // _KERNEL_AMD64_APIC_H

View File

@@ -1,5 +1,3 @@
/** @file */
#include <amd64/apic.h> #include <amd64/apic.h>
#include <amd64/debug.h> #include <amd64/debug.h>
#include <amd64/hpet.h> #include <amd64/hpet.h>
@@ -11,6 +9,7 @@
#include <irq/irq.h> #include <irq/irq.h>
#include <libk/std.h> #include <libk/std.h>
#include <limine/limine.h> #include <limine/limine.h>
#include <limine/requests.h>
#include <mm/liballoc.h> #include <mm/liballoc.h>
#include <mm/pmm.h> #include <mm/pmm.h>
#include <proc/proc.h> #include <proc/proc.h>
@@ -24,17 +23,16 @@
#define UACPI_MEMORY_BUFFER_MAX 4096 #define UACPI_MEMORY_BUFFER_MAX 4096
/** @cond DOXYGEN_IGNORE */
ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX]; ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
/** @endcond */
/** /*
* @brief The kernel starts booting here. This is the entry point after * The kernel starts booting here. This is the entry point after Limine hands control. We set up all
* Limine hands control. We set up all the necessary platform-dependent * the necessary platform-dependent subsystems/drivers and jump into the init app.
* subsystems/drivers and jump into the init app.
*/ */
void bootmain (void) { void bootmain (void) {
struct cpu* bsp_cpu = cpu_make (); struct limine_mp_response* mp = limine_mp_request.response;
struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id);
amd64_init (bsp_cpu, false); amd64_init (bsp_cpu, false);
syscall_init (); syscall_init ();
@@ -51,8 +49,6 @@ void bootmain (void) {
smp_init (); smp_init ();
mm_init2 ();
proc_init (); proc_init ();
for (;;) for (;;)

View File

@@ -6,32 +6,37 @@
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
/// Port for printing to serial /* Port for printing to serial */
/* TODO: Make this configurable */
#define PORT_COM1 0x03F8 #define PORT_COM1 0x03F8
/// \ref debugprintf buffer size /* debugprintf buffer size */
#define BUFFER_SIZE 1024 #define BUFFER_SIZE 1024
/// Lock, which ensures that prints to the serial port are atomic /*
* Lock, which ensures that prints to the serial port are atomic (ie. one debugprintf is atomic in
* itself).
*/
static spin_lock_t serial_lock = SPIN_LOCK_INIT; static spin_lock_t serial_lock = SPIN_LOCK_INIT;
static bool debug_init = false; static bool debug_init = false;
/// Block until TX buffer is empty /* Block until TX buffer is empty */
static bool amd64_debug_serial_tx_empty (void) { static bool amd64_debug_serial_tx_empty (void) {
return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20); return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20);
} }
/// Write a single character to serial /* Write a single character to serial */
static void amd64_debug_serial_write (char x) { static void amd64_debug_serial_write (char x) {
while (!amd64_debug_serial_tx_empty ()) while (!amd64_debug_serial_tx_empty ())
; ;
amd64_io_outb (PORT_COM1, (uint8_t)x); amd64_io_outb (PORT_COM1, (uint8_t)x);
} }
/** /*
* @brief Formatted printing to serial. \ref serial_lock ensures that * Formatted printing to serial. serial_lock ensures that all prints are atomic.
* all prints are atomic.
*/ */
void debugprintf (const char* fmt, ...) { void debugprintf (const char* fmt, ...) {
spin_lock_ctx_t ctxdbgp;
if (!debug_init) if (!debug_init)
return; return;
@@ -47,17 +52,17 @@ void debugprintf (const char* fmt, ...) {
const char* p = buffer; const char* p = buffer;
spin_lock (&serial_lock); spin_lock (&serial_lock, &ctxdbgp);
while (*p) { while (*p) {
amd64_debug_serial_write (*p); amd64_debug_serial_write (*p);
p++; p++;
} }
spin_unlock (&serial_lock); spin_unlock (&serial_lock, &ctxdbgp);
} }
/// Initialize serial /* Initialize serial */
void amd64_debug_init (void) { void amd64_debug_init (void) {
amd64_io_outb (PORT_COM1 + 1, 0x00); amd64_io_outb (PORT_COM1 + 1, 0x00);
amd64_io_outb (PORT_COM1 + 3, 0x80); amd64_io_outb (PORT_COM1 + 3, 0x80);

View File

@@ -7,17 +7,15 @@
#define GDT_KCODE 0x08 #define GDT_KCODE 0x08
#define GDT_KDATA 0x10 #define GDT_KDATA 0x10
#define GDT_UCODE 0x18 #define GDT_UDATA 0x18
#define GDT_UDATA 0x20 #define GDT_UCODE 0x20
#define GDT_TSS 0x28 #define GDT_TSS 0x28
/// Size of kernel stack /* Size of kernel stack */
#define KSTACK_SIZE (32 * 1024) #define KSTACK_SIZE (32 * 1024)
/** /*
* @file * 64-bit GDT structure. For more info see:
*
* @brief 64-bit GDT structure. For more info see:
* - https://wiki.osdev.org/Global_Descriptor_Table * - https://wiki.osdev.org/Global_Descriptor_Table
* - https://wiki.osdev.org/GDT_Tutorial * - https://wiki.osdev.org/GDT_Tutorial
*/ */
@@ -31,11 +29,13 @@ struct gdt_entry {
uint8_t basehigh; uint8_t basehigh;
} PACKED; } PACKED;
/* Struct that gets loaded into GDTR */
struct gdt_ptr { struct gdt_ptr {
uint16_t limit; uint16_t limit;
uint64_t base; uint64_t base;
} PACKED; } PACKED;
/* New, extended GDT (we need to extend Limine's GDT) */
struct gdt_extended { struct gdt_extended {
struct gdt_entry old[5]; struct gdt_entry old[5];
struct gdt_entry tsslow; struct gdt_entry tsslow;

View File

@@ -10,71 +10,113 @@
#include <uacpi/tables.h> #include <uacpi/tables.h>
#include <uacpi/uacpi.h> #include <uacpi/uacpi.h>
/** /*
* @file * HPET (High Precision Event Timer) driver code. See more at https://wiki.osdev.org/HPET
*
* @brief HPET (High Precision Event Timer) driver code.
* See more at https://wiki.osdev.org/HPET
*/ */
/// HPET Main Counter Value Register /* HPET Main Counter Value Register */
#define HPET_MCVR 0xF0 #define HPET_MCVR 0xF0
/// HPET General Configuration Register /* HPET General Configuration Register */
#define HPET_GCR 0x10 #define HPET_GCR 0x10
/// HPET General Capabilities and ID Register /* HPET General Capabilities and ID Register */
#define HPET_GCIDR 0x00 #define HPET_GCIDR 0x00
/// Set whether we sould use 32-bit or 64-bit reads/writes /* Set whether we sould use 32-bit or 64-bit reads/writes */
static bool hpet_32bits = 1; static bool hpet_32bits = 1;
/// Physical address for HPET MMIO /* Physical address for HPET MMIO */
static uintptr_t hpet_paddr; static uintptr_t hpet_paddr;
/// HPET period in femtoseconds /* HPET period in femtoseconds */
static uint64_t hpet_period_fs; static uint64_t hpet_period_fs;
/// Lock, which protects concurrent access. See \ref amd64/smp.c /* Lock, which protects concurrent access. See amd64/smp.c */
static spin_lock_t hpet_lock = SPIN_LOCK_INIT; static spin_lock_t hpet_lock = SPIN_LOCK_INIT;
/// Read a HPET register. Assumes caller holds \ref hpet_lock /* Read a HPET register. Assumes caller holds hpet_lock */
static uint64_t amd64_hpet_read (uint32_t reg) { static uint64_t amd64_hpet_read64 (uint32_t reg) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset; uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
return (hpet_32bits ? *(volatile uint32_t*)(hpet_vaddr + reg) return *(volatile uint64_t*)(hpet_vaddr + reg);
: *(volatile uint64_t*)(hpet_vaddr + reg));
} }
/// Write a HPET register. Assumes caller holds \ref hpet_lock static uint32_t amd64_hpet_read32 (uint32_t reg) {
static void amd64_hpet_write (uint32_t reg, uint64_t value) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
return *(volatile uint32_t*)(hpet_vaddr + reg);
}
/* Write a HPET register. Assumes caller holds hpet_lock */
static void amd64_hpet_write64 (uint32_t reg, uint64_t value) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset; uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
if (hpet_32bits)
*(volatile uint32_t*)(hpet_vaddr + reg) = (value & 0xFFFFFFFF);
else
*(volatile uint64_t*)(hpet_vaddr + reg) = value; *(volatile uint64_t*)(hpet_vaddr + reg) = value;
} }
/// Read current value of \ref HPET_MCVR register. static void amd64_hpet_write32 (uint32_t reg, uint32_t value) {
static uint64_t amd64_hpet_timestamp (void) { return amd64_hpet_read (HPET_MCVR); } struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
*(volatile uint32_t*)(hpet_vaddr + reg) = value;
}
/// Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being held. /* Read current value of HPET_MCVR register. */
static uint64_t amd64_hpet_read_counter (void) {
uint64_t value;
spin_lock_ctx_t ctxhrc;
spin_lock (&hpet_lock, &ctxhrc);
if (!hpet_32bits)
value = amd64_hpet_read64 (HPET_MCVR);
else {
uint32_t hi1, lo, hi2;
do {
hi1 = amd64_hpet_read32 (HPET_MCVR + 4);
lo = amd64_hpet_read32 (HPET_MCVR + 0);
hi2 = amd64_hpet_read32 (HPET_MCVR + 4);
} while (hi1 != hi2);
value = ((uint64_t)hi1 << 32) | lo;
}
spin_unlock (&hpet_lock, &ctxhrc);
return value;
}
static void amd64_hpet_write_counter (uint64_t value) {
spin_lock_ctx_t ctxhwc;
spin_lock (&hpet_lock, &ctxhwc);
if (!hpet_32bits)
amd64_hpet_write64 (HPET_MCVR, value);
else {
amd64_hpet_write32 (HPET_MCVR, (uint32_t)value);
amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32));
}
spin_unlock (&hpet_lock, &ctxhwc);
}
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being
* held. */
void amd64_hpet_sleep_micro (uint64_t us) { void amd64_hpet_sleep_micro (uint64_t us) {
spin_lock (&hpet_lock); if (hpet_period_fs == 0)
return;
uint64_t start = amd64_hpet_timestamp (); uint64_t ticks_to_wait = (us * 1000ULL) / (hpet_period_fs / 1000000ULL);
uint64_t target_fs = us * 1000000000ULL; uint64_t start = amd64_hpet_read_counter ();
for (;;) { for (;;) {
uint64_t current = amd64_hpet_timestamp (); uint64_t now = amd64_hpet_read_counter ();
uint64_t dt = current - start;
if ((dt * hpet_period_fs) >= target_fs) if ((now - start) >= ticks_to_wait)
break; break;
__asm__ volatile ("pause" ::: "memory"); __asm__ volatile ("pause" ::: "memory");
} }
spin_unlock (&hpet_lock);
} }
/// Initialize HPET /* Initialize HPET */
void amd64_hpet_init (void) { void amd64_hpet_init (void) {
struct uacpi_table hpet_table; struct uacpi_table hpet_table;
uacpi_status status = uacpi_table_find_by_signature (ACPI_HPET_SIGNATURE, &hpet_table); uacpi_status status = uacpi_table_find_by_signature (ACPI_HPET_SIGNATURE, &hpet_table);
@@ -87,22 +129,14 @@ void amd64_hpet_init (void) {
hpet_paddr = (uintptr_t)hpet->address.address; hpet_paddr = (uintptr_t)hpet->address.address;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
hpet_32bits = (amd64_hpet_read (HPET_GCIDR) & (1 << 13)) ? 0 : 1; uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
/* reset */ hpet_period_fs = (uint32_t)(caps >> 32);
amd64_hpet_write (HPET_GCR, 0);
amd64_hpet_write (HPET_MCVR, 0);
amd64_hpet_write (HPET_GCR, 1);
uint64_t gcidr = amd64_hpet_read (HPET_GCIDR); amd64_hpet_write64 (HPET_GCR, 0);
if (hpet_32bits) { amd64_hpet_write_counter (0);
uint32_t low = (uint32_t)gcidr; amd64_hpet_write64 (HPET_GCR, 1);
uint32_t high = (uint32_t)amd64_hpet_read (HPET_GCIDR + 4);
gcidr = (((uint64_t)high << 32) | low);
}
hpet_period_fs = (gcidr >> 32);
} }

View File

@@ -9,7 +9,7 @@
#define TSS 0x80 #define TSS 0x80
#define TSS_PRESENT 0x89 #define TSS_PRESENT 0x89
/// Set a GDT entry /* Set a GDT entry */
static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit, static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit,
uint8_t acc, uint8_t gran) { uint8_t acc, uint8_t gran) {
ent->baselow = (base & 0xFFFF); ent->baselow = (base & 0xFFFF);
@@ -20,7 +20,7 @@ static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32
ent->access = acc; ent->access = acc;
} }
/// Initialize GDT and TSS structures for a given CPU /* Initialize GDT and TSS structures for a given CPU */
static void amd64_gdt_init (struct cpu* cpu) { static void amd64_gdt_init (struct cpu* cpu) {
volatile struct tss* tss = &cpu->tss; volatile struct tss* tss = &cpu->tss;
volatile struct gdt_extended* gdt = &cpu->gdt; volatile struct gdt_extended* gdt = &cpu->gdt;
@@ -39,8 +39,8 @@ static void amd64_gdt_init (struct cpu* cpu) {
amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0); amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0);
amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0); amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0);
amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0); amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0);
amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xFA, 0xA0); amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xF2, 0xC0);
amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xF2, 0xC0); amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xFA, 0xA0);
amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0); amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
uint32_t tssbasehigh = (tssbase >> 32); uint32_t tssbasehigh = (tssbase >> 32);
@@ -51,11 +51,13 @@ static void amd64_gdt_init (struct cpu* cpu) {
gdt->tsshigh.access = 0; gdt->tsshigh.access = 0;
gdt->tsshigh.gran = 0; gdt->tsshigh.gran = 0;
/* Load GDTR */
struct gdt_ptr gdtr; struct gdt_ptr gdtr;
gdtr.limit = sizeof (*gdt) - 1; gdtr.limit = sizeof (*gdt) - 1;
gdtr.base = (uint64_t)gdt; gdtr.base = (uint64_t)gdt;
__asm__ volatile ("lgdt %0" ::"m"(gdtr) : "memory"); __asm__ volatile ("lgdt %0" ::"m"(gdtr) : "memory");
/* Reload CS */
__asm__ volatile ("pushq %[kcode]\n" __asm__ volatile ("pushq %[kcode]\n"
"lea 1f(%%rip), %%rax\n" "lea 1f(%%rip), %%rax\n"
"pushq %%rax\n" "pushq %%rax\n"
@@ -72,11 +74,10 @@ static void amd64_gdt_init (struct cpu* cpu) {
__asm__ volatile ("ltr %0" ::"r"((uint16_t)GDT_TSS)); __asm__ volatile ("ltr %0" ::"r"((uint16_t)GDT_TSS));
} }
/** /*
* @brief Initialize essentials (GDT, TSS, IDT) for a given CPU * Initialize essentials (GDT, TSS, IDT) for a given CPU
* *
* @param load_idt * load_idt - Tell whether the IDT needs to be loaded. It only has to be loaded once on
* Tell whether the IDT needs to be loaded. It only has to be loaded once on
* the BSP * the BSP
*/ */
void amd64_init (struct cpu* cpu, bool load_idt) { void amd64_init (struct cpu* cpu, bool load_idt) {

View File

@@ -7,10 +7,12 @@
#include <irq/irq.h> #include <irq/irq.h>
#include <libk/std.h> #include <libk/std.h>
#include <libk/string.h> #include <libk/string.h>
#include <m/syscall_defs.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/irq.h> #include <sys/irq.h>
#include <sys/smp.h> #include <sys/smp.h>
#include <sys/spin.h> #include <sys/spin.h>
#include <syscall/syscall.h>
/* 8259 PIC defs. */ /* 8259 PIC defs. */
#define PIC1 0x20 #define PIC1 0x20
@@ -39,7 +41,7 @@
#define IDT_ENTRIES_MAX 256 #define IDT_ENTRIES_MAX 256
/// 64-bit <IDT entry structure: https://wiki.osdev.org/Interrupt_Descriptor_Table /* 64-bit <IDT entry structure: https://wiki.osdev.org/Interrupt_Descriptor_Table */
struct idt_entry { struct idt_entry {
uint16_t intrlow; uint16_t intrlow;
uint16_t kernel_cs; uint16_t kernel_cs;
@@ -55,18 +57,14 @@ struct idt {
uint64_t base; uint64_t base;
} PACKED; } PACKED;
/** @cond DOXYGEN_IGNORE */
ALIGNED (16) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX]; ALIGNED (16) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX];
/** @endcond */
static volatile struct idt idt; static volatile struct idt idt;
/// Remaps and disables old 8259 PIC, since we'll be using APIC. /* Remaps and disables old 8259 PIC, since we'll be using APIC. */
static void amd64_init_pic (void) { static void amd64_init_pic (void) {
/** @cond DOXYGEN_IGNORE */
#define IO_OP(fn, ...) \ #define IO_OP(fn, ...) \
fn (__VA_ARGS__); \ fn (__VA_ARGS__); \
amd64_io_wait () amd64_io_wait ()
/** @endcond */
IO_OP (amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4)); IO_OP (amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4));
IO_OP (amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4)); IO_OP (amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4));
@@ -87,7 +85,7 @@ static void amd64_init_pic (void) {
#undef IO_OP #undef IO_OP
} }
/// Set IDT entry /* Set IDT entry */
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags, static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags,
uint8_t ist) { uint8_t ist) {
ent->intrlow = (handler & 0xFFFF); ent->intrlow = (handler & 0xFFFF);
@@ -99,18 +97,16 @@ static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uin
ent->resv = 0; ent->resv = 0;
} }
/// Load the IDT /* Load the IDT */
void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); } void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); }
/// Initialize IDT entries /* Initialize IDT entries */
static void amd64_idt_init (void) { static void amd64_idt_init (void) {
memset ((void*)idt_entries, 0, sizeof (idt_entries)); memset ((void*)idt_entries, 0, sizeof (idt_entries));
/** @cond DOXYGEN_IGNORE */
#define IDT_ENTRY(n, ist) \ #define IDT_ENTRY(n, ist) \
extern void amd64_intr##n (void); \ extern void amd64_intr##n (void); \
amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist)) amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist))
/** @endcond */
/* clang-format off */ /* clang-format off */
IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0); IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0);
IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0); IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0);
@@ -127,6 +123,8 @@ static void amd64_idt_init (void) {
IDT_ENTRY (SCHED_PREEMPT_TIMER, 1); IDT_ENTRY (SCHED_PREEMPT_TIMER, 1);
IDT_ENTRY (TLB_SHOOTDOWN, 1); IDT_ENTRY (TLB_SHOOTDOWN, 1);
IDT_ENTRY (CPU_REQUEST_SCHED, 1);
IDT_ENTRY (CPU_SPURIOUS, 1);
/* clang-format on */ /* clang-format on */
#undef IDT_ENTRY #undef IDT_ENTRY
@@ -136,13 +134,7 @@ static void amd64_idt_init (void) {
amd64_load_idt (); amd64_load_idt ();
} }
/** /* Handle CPU exception and dump registers. If incoming CS has CPL3, kill the process. */
* @brief Handle CPU exception and dump registers. If incoming CS has CPL3, kill the
* process.
*
* @param regs
* saved registers
*/
static void amd64_intr_exception (struct saved_regs* regs) { static void amd64_intr_exception (struct saved_regs* regs) {
DEBUG ("cpu exception %lu (%lu)\n", regs->trap, regs->error); DEBUG ("cpu exception %lu (%lu)\n", regs->trap, regs->error);
@@ -171,10 +163,23 @@ static void amd64_intr_exception (struct saved_regs* regs) {
} }
} }
/// Handle incoming interrupt, dispatch IRQ handlers. /* Handle incoming interrupt, dispatch IRQ handlers. */
void amd64_intr_handler (void* stack_ptr) { void amd64_intr_handler (void* stack_ptr) {
spin_lock_ctx_t ctxcpu, ctxpr;
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr; struct saved_regs* regs = stack_ptr;
spin_lock (&thiscpu->lock, &ctxcpu);
struct proc* proc_current = thiscpu->proc_current;
spin_lock (&proc_current->lock, &ctxpr);
memcpy (&proc_current->pdata.regs, regs, sizeof (struct saved_regs));
spin_unlock (&proc_current->lock, &ctxpr);
spin_unlock (&thiscpu->lock, &ctxcpu);
if (regs->trap <= 31) { if (regs->trap <= 31) {
amd64_intr_exception (regs); amd64_intr_exception (regs);
} else { } else {
@@ -183,18 +188,12 @@ void amd64_intr_handler (void* stack_ptr) {
struct irq* irq = irq_find (regs->trap); struct irq* irq = irq_find (regs->trap);
if (irq != NULL) { if (irq != NULL) {
if ((irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("sti");
irq->func (irq->arg, stack_ptr); irq->func (irq->arg, stack_ptr);
if ((irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("cli");
} }
} }
} }
/// Initialize interrupts /* Initialize interrupts */
void amd64_intr_init (void) { void amd64_intr_init (void) {
amd64_init_pic (); amd64_init_pic ();
amd64_idt_init (); amd64_idt_init ();
@@ -202,39 +201,21 @@ void amd64_intr_init (void) {
/* Aux. */ /* Aux. */
/// Save RFLAGS of the current CPU /* Save RFLAGS of the current CPU */
static uint64_t amd64_irq_save_flags (void) { static uint64_t amd64_irq_save_flags (void) {
uint64_t rflags; uint64_t rflags;
__asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc"); __asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc");
return rflags; return rflags;
} }
/// Restore interrupts (IF bit) from RFLAGS /* Restore interrupts (IF bit) from RFLAGS */
static void amd64_irq_restore_flags (uint64_t rflags) { static void amd64_irq_restore_flags (uint64_t rflags) {
if (rflags & (1ULL << 9)) if (rflags & (1ULL << 9))
__asm__ volatile ("sti"); __asm__ volatile ("sti");
} }
/// Save current interrupt state /* Save current interrupt state */
void irq_save (void) { void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 0)
thiscpu->irq_ctx.rflags = amd64_irq_save_flags ();
}
/// Restore interrupt state /* Restore interrupt state */
void irq_restore (void) { void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }
int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 1)
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);
}
/// Map custom IRQ mappings to legacy IRQs
uint8_t amd64_resolve_irq (uint8_t irq) {
static const uint8_t mappings[] = {
[SCHED_PREEMPT_TIMER] = 0,
[TLB_SHOOTDOWN] = 1,
};
return mappings[irq];
}

View File

@@ -32,7 +32,6 @@ struct saved_regs {
} PACKED; } PACKED;
void amd64_load_idt (void); void amd64_load_idt (void);
uint8_t amd64_resolve_irq (uint8_t irq);
void amd64_intr_init (void); void amd64_intr_init (void);
#endif // _KERNEL_AMD64_INTR_H #endif // _KERNEL_AMD64_INTR_H

View File

@@ -1,13 +1,12 @@
#ifndef _KERNEL_AMD64_INTR_DEFS_H #ifndef _KERNEL_AMD64_INTR_DEFS_H
#define _KERNEL_AMD64_INTR_DEFS_H #define _KERNEL_AMD64_INTR_DEFS_H
/** /* Definitions for custom, nonstandard IDT entries. They have to be remapped by amd64_resolve_irq
* @file * into legacy IRQs. */
* Definitions for custom, nonstandard IDT entries. They have to be remapped
* by \ref amd64_resolve_irq into legacy IRQs.
*/
#define SCHED_PREEMPT_TIMER 80 #define SCHED_PREEMPT_TIMER 80
#define TLB_SHOOTDOWN 81 #define TLB_SHOOTDOWN 81
#define CPU_REQUEST_SCHED 82
#define CPU_SPURIOUS 255
#endif // _KERNEL_AMD64_INTR_DEFS_H #endif // _KERNEL_AMD64_INTR_DEFS_H

View File

@@ -15,25 +15,33 @@
amd64_intr ## n:; \ amd64_intr ## n:; \
x(n); \ x(n); \
cli; \ cli; \
;\ ; \
push_regs; \ push_regs; \
;\ ; \
movw $0x10, %ax; \
movw %ax, %ds; \
movw %ax, %es; \
; \
cld; \ cld; \
;\ ; \
movq %rsp, %rdi; \ movq %rsp, %rdi; \
;\ ; \
movq %cr3, %rax; pushq %rax; \
; \
movq %rsp, %rbp; \ movq %rsp, %rbp; \
;\ ; \
subq $8, %rsp; \ subq $8, %rsp; \
andq $~0xF, %rsp; \ andq $-16, %rsp; \
;\ ; \
callq amd64_intr_handler; \ callq amd64_intr_handler; \
;\ ; \
movq %rbp, %rsp; \ movq %rbp, %rsp; \
;\ ; \
popq %rax; movq %rax, %cr3; \
; \
pop_regs; \ pop_regs; \
addq $16, %rsp; \ addq $16, %rsp; \
;\ ; \
iretq; iretq;
@@ -88,3 +96,5 @@ make_intr_stub(no_err, 47)
make_intr_stub(no_err, SCHED_PREEMPT_TIMER) make_intr_stub(no_err, SCHED_PREEMPT_TIMER)
make_intr_stub(no_err, TLB_SHOOTDOWN) make_intr_stub(no_err, TLB_SHOOTDOWN)
make_intr_stub(no_err, CPU_REQUEST_SCHED)
make_intr_stub(no_err, CPU_SPURIOUS)

View File

@@ -11,36 +11,43 @@
#include <sys/mm.h> #include <sys/mm.h>
#include <sys/smp.h> #include <sys/smp.h>
/// Present flag
#define AMD64_PG_PRESENT (1 << 0) #define AMD64_PG_PRESENT (1 << 0)
/// Writable flag
#define AMD64_PG_RW (1 << 1) #define AMD64_PG_RW (1 << 1)
/// User-accessible flag
#define AMD64_PG_USER (1 << 2) #define AMD64_PG_USER (1 << 2)
#define AMD64_PG_HUGE (1 << 7)
/// Auxilary struct for page directory walking /* Auxilary struct for page directory walking */
struct pg_index { struct pg_index {
uint16_t pml4, pml3, pml2, pml1; uint16_t pml4, pml3, pml2, pml1;
} PACKED; } PACKED;
/// Kernel page directory /* Kernel page directory */
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT}; static struct pd kernel_pd;
/// Lock needed to sync between map/unmap operations and TLB shootdown static spin_lock_t kernel_pd_lock;
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
/// Get current value of CR3 register void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
/* Get current value of CR3 register */
static uintptr_t amd64_current_cr3 (void) { static uintptr_t amd64_current_cr3 (void) {
uintptr_t cr3; uintptr_t cr3;
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory"); __asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory");
return cr3; return cr3;
} }
/// Load kernel CR3 as current CR3 /* Load kernel CR3 as current CR3 */
void amd64_load_kernel_cr3 (void) { void amd64_load_kernel_cr3 (void) {
uintptr_t cr3 = amd64_current_cr3 ();
if (cr3 != kernel_pd.cr3_paddr) {
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory"); __asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
}
} }
/// Extract PML info from virtual address struct pd* mm_get_kernel_pd (void) { return &kernel_pd; }
/* Extract PML info from virtual address */
static struct pg_index amd64_mm_page_index (uint64_t vaddr) { static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
struct pg_index ret; struct pg_index ret;
@@ -52,16 +59,19 @@ static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
return ret; return ret;
} }
/// Walk paging tables and allocate necessary structures along the way /* Walk paging tables and allocate necessary structures along the way */
static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) { static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) {
uint64_t entry = table[entry_idx]; uint64_t entry = table[entry_idx];
physaddr_t paddr; physaddr_t paddr;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
if (entry & AMD64_PG_PRESENT) if (entry & AMD64_PG_PRESENT) {
if (entry & AMD64_PG_HUGE)
return NULL;
paddr = entry & ~0xFFFULL; paddr = entry & ~0xFFFULL;
else { } else {
if (!alloc) if (!alloc)
return NULL; return NULL;
@@ -85,7 +95,7 @@ static bool amd64_mm_is_table_empty (uint64_t* table) {
return true; return true;
} }
/// Convert generic memory management subsystem flags into AMD64-specific flags /* Convert generic memory management subsystem flags into AMD64-specific flags */
static uint64_t amd64_mm_resolve_flags (uint32_t generic) { static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
uint64_t flags = 0; uint64_t flags = 0;
@@ -96,24 +106,15 @@ static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
return flags; return flags;
} }
/// Reload the current CR3 value ON A LOCAL CPU /* Reload the current CR3 value ON A LOCAL CPU */
static void amd64_reload_cr3 (void) { static void amd64_reload_cr3 (void) {
uint64_t cr3; uint64_t cr3;
__asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory"); __asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory");
} }
/** /* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
* @brief Map physical address to virtual address with flags. TLB needs to be flushed
* afterwards.
*/
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
uint64_t amd64_flags = amd64_mm_resolve_flags (flags); uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
@@ -122,67 +123,50 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true); uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
if (pml3 == NULL) if (pml3 == NULL)
goto done; return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true); uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
if (pml2 == NULL) if (pml2 == NULL)
goto done; return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true); uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
if (pml1 == NULL) if (pml1 == NULL)
goto done; return;
uint64_t* pte = &pml1[pg_index.pml1]; uint64_t* pte = &pml1[pg_index.pml1];
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL)); *pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
do_reload = true;
done:
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
} }
/// Map a page into kernel page directory /* Map a page into kernel page directory */
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
mm_map_page (&kernel_pd, paddr, vaddr, flags); mm_map_page (&kernel_pd, paddr, vaddr, flags);
amd64_reload_cr3 ();
} }
/// Unmap a virtual address. TLB needs to be flushed afterwards /* Unmap a virtual address. TLB needs to be flushed afterwards */
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) { void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
spin_lock (&mm_lock);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr); struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false); uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL) if (pml3 == NULL)
goto done; return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false); uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL) if (pml2 == NULL)
goto done; return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false); uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL) if (pml1 == NULL)
goto done; return;
uint64_t* pte = &pml1[pg_index.pml1]; uint64_t* pte = &pml1[pg_index.pml1];
if ((*pte) & AMD64_PG_PRESENT) { if ((*pte) & AMD64_PG_PRESENT)
*pte = 0; *pte = 0;
do_reload = true;
}
if (amd64_mm_is_table_empty (pml1)) { if (amd64_mm_is_table_empty (pml1)) {
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL; uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
@@ -201,29 +185,15 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
} }
} }
} }
}
done: /* Unmap a page from kernel page directory */
if (do_reload && (flags & MM_PD_RELOAD)) void mm_unmap_kernel_page (uintptr_t vaddr) {
mm_unmap_page (&kernel_pd, vaddr);
amd64_reload_cr3 (); amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
} }
/// Unmap a page from kernel page directory /* Allocate a userspace-ready page directory */
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
mm_unmap_page (&kernel_pd, vaddr, flags);
}
/// Lock kernel page directory
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); }
/// Unlock kernel page directory
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); }
/// Allocate a userspace-ready page directory
uintptr_t mm_alloc_user_pd_phys (void) { uintptr_t mm_alloc_user_pd_phys (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -241,37 +211,111 @@ uintptr_t mm_alloc_user_pd_phys (void) {
return cr3; return cr3;
} }
/** bool mm_validate (struct pd* pd, uintptr_t vaddr) {
* @brief Reload after map/unmap operation was performed. This function does the TLB struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
* shootdown. bool ret = false;
*/
void mm_reload (void) {
spin_lock (&mm_lock);
struct limine_mp_response* mp = limine_mp_request.response; uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
for (size_t i = 0; i < mp->cpu_count; i++) { uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN); if (pml3 == NULL)
goto done;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
goto done;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
goto done;
uint64_t pte = pml1[pg_index.pml1];
ret = (pte & AMD64_PG_PRESENT) != 0;
done:
return ret;
}
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
bool ok = true;
for (size_t i = 0; i < size; i++) {
ok = mm_validate (pd, vaddr + i);
if (!ok)
goto done;
} }
spin_unlock (&mm_lock); done:
return ok;
} }
/// TLB shootdown IRQ handler uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
static void amd64_tlb_shootdown_irq (void* arg, void* regs) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
(void)arg, (void)regs; uintptr_t ret = 0;
amd64_reload_cr3 (); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
for (size_t i4 = 0; i4 < 512; i4++) {
if (!(pml4[i4] & AMD64_PG_PRESENT))
continue;
uint64_t* pml3 = (uint64_t*)((uintptr_t)hhdm->offset + (pml4[i4] & ~0xFFFULL));
for (size_t i3 = 0; i3 < 512; i3++) {
if (!(pml3[i3] & AMD64_PG_PRESENT))
continue;
uint64_t* pml2 = (uint64_t*)((uintptr_t)hhdm->offset + (pml3[i3] & ~0xFFFULL));
for (size_t i2 = 0; i2 < 512; i2++) {
if (!(pml2[i2] & AMD64_PG_PRESENT))
continue;
uint64_t* pml1 = (uint64_t*)((uintptr_t)hhdm->offset + (pml2[i2] & ~0xFFFULL));
for (size_t i1 = 0; i1 < 512; i1++) {
if ((pml1[i1] & AMD64_PG_PRESENT) && ((pml1[i1] & ~0xFFFULL) == (paddr & ~0xFFFULL))) {
struct pg_index idx = {i4, i3, i2, i1};
ret = (((uint64_t)idx.pml4 << 39) | ((uint64_t)idx.pml3 << 30) |
((uint64_t)idx.pml2 << 21) | ((uint64_t)idx.pml1 << 12) | (paddr & 0xFFFULL));
goto done;
}
}
}
}
}
done:
return ret;
} }
/** uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
* @brief Continue initializing memory management subsystem for AMD64 after the struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
* essential parts were initialized uintptr_t ret = 0;
*/
void mm_init2 (void) { uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE); struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL)
goto done;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
goto done;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
goto done;
uint64_t pte = pml1[pg_index.pml1];
if (!(pte & AMD64_PG_PRESENT))
goto done;
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
done:
return ret;
} }
/// Initialize essentials for the AMD64 memory management subsystem /* Initialize essentials for the AMD64 memory management subsystem */
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); } void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }

View File

@@ -7,11 +7,9 @@
#define PAGE_SIZE 4096 #define PAGE_SIZE 4096
struct pd { struct pd {
spin_lock_t lock;
uintptr_t cr3_paddr; uintptr_t cr3_paddr;
}; };
void amd64_load_kernel_cr3 (void); void amd64_load_kernel_cr3 (void);
void mm_init2 (void);
#endif // _KERNEL_AMD64_MM_H #endif // _KERNEL_AMD64_MM_H

View File

@@ -85,7 +85,7 @@ patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and The precise terms and conditions for copying, distribution and
modification follow. modification follow.
GNU GENERAL PUBLIC LICENSE GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
@@ -140,7 +140,7 @@ above, provided that you also meet all of these conditions:
License. (Exception: if the Program itself is interactive but License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on does not normally print such an announcement, your work based on
the Program is not required to print an announcement.) the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program, identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in and can be reasonably considered independent and separate works in
@@ -198,7 +198,7 @@ access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not distribution of the source code, even though third parties are not
compelled to copy the source along with the object code. compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program 4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is otherwise to copy, modify, sublicense or distribute the Program is
@@ -255,7 +255,7 @@ impose that choice.
This section is intended to make thoroughly clear what is believed to This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License. be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in 8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License original copyright holder who places the Program under this License
@@ -308,7 +308,7 @@ PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES. POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest If you develop a new program, and you want it to be of the greatest

View File

@@ -1,13 +1,23 @@
#include <amd64/gdt.h> #include <amd64/gdt.h>
#include <amd64/proc.h>
#include <aux/elf.h> #include <aux/elf.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h> #include <libk/std.h>
#include <libk/string.h> #include <libk/string.h>
#include <limine/requests.h> #include <limine/requests.h>
#include <mm/liballoc.h> #include <mm/liballoc.h>
#include <mm/pmm.h> #include <mm/pmm.h>
#include <proc/mutex.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/proc.h>
static atomic_int pids = 0;
struct proc* proc_from_elf (uint8_t* elf_contents) { struct proc* proc_from_elf (uint8_t* elf_contents) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -18,32 +28,24 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
memset (proc, 0, sizeof (*proc)); memset (proc, 0, sizeof (*proc));
proc->pd.lock = SPIN_LOCK_INIT; proc->lock = SPIN_LOCK_INIT;
proc->pd.cr3_paddr = mm_alloc_user_pd_phys (); atomic_store (&proc->state, PROC_READY);
if (proc->pd.cr3_paddr == 0) { proc->pid = atomic_fetch_add (&pids, 1);
proc->procgroup = procgroup_create ();
if (proc->procgroup == NULL) {
free (proc); free (proc);
return NULL; return NULL;
} }
procgroup_attach (proc->procgroup, proc);
proc->pdata.kernel_stack = pmm_alloc (KSTACK_SIZE / PAGE_SIZE); uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
if (proc->pdata.kernel_stack == PMM_ALLOC_ERR) { proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
free (proc);
return NULL;
}
uintptr_t kernel_stack = proc->pdata.kernel_stack;
proc->pdata.kernel_stack += (uintptr_t)hhdm->offset + KSTACK_SIZE;
proc->pdata.user_stack = pmm_alloc (USTACK_SIZE / PAGE_SIZE); procgroup_map (proc->procgroup, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
if (proc->pdata.user_stack == PMM_ALLOC_ERR) { MM_PG_USER | MM_PG_PRESENT | MM_PG_RW, NULL);
free (proc);
pmm_free (kernel_stack, USTACK_SIZE / PAGE_SIZE);
return NULL;
}
uintptr_t user_stack = proc->pdata.user_stack;
proc->pdata.user_stack += USTACK_SIZE;
proc_map (proc, user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE, proc->flags |= PROC_USTK_PREALLOC;
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
struct elf_aux aux = proc_load_segments (proc, elf_contents); struct elf_aux aux = proc_load_segments (proc, elf_contents);
@@ -52,8 +54,85 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
proc->pdata.regs.rflags = 0x202; proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = GDT_UCODE | 0x03; proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = aux.entry; proc->pdata.regs.rip = aux.entry;
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);
return proc; return proc;
} }
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
uintptr_t argument_ptr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprt;
struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL)
return NULL;
memset (proc, 0, sizeof (*proc));
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1);
spin_lock (&proto->lock, &ctxprt);
proc->procgroup = proto->procgroup;
procgroup_attach (proc->procgroup, proc);
spin_unlock (&proto->lock, &ctxprt);
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
proc->pdata.regs.ss = GDT_UDATA | 0x03;
proc->pdata.regs.rsp = (uint64_t)vstack_top;
proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = (uint64_t)entry;
proc->uvaddr_argument = argument_ptr;
proc_init_tls (proc);
return proc;
}
void proc_cleanup (struct proc* proc) {
proc_sqs_cleanup (proc);
proc_mutexes_cleanup (proc);
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
procgroup_detach (proc->procgroup, proc);
/* clean the process */
free (proc);
}
void proc_init_tls (struct proc* proc) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
if (proc->procgroup->tls.tls_tmpl == NULL)
return;
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
size_t pages = proc->procgroup->tls.tls_tmpl_pages;
uintptr_t tls_paddr;
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
uintptr_t tls_vaddr = procgroup_map (proc->procgroup, 0, pages, flags, &tls_paddr);
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
memset ((void*)k_tls_addr, 0, pages * PAGE_SIZE);
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
uintptr_t ktcb = k_tls_addr + tls_size;
uintptr_t utcb = tls_vaddr + tls_size;
*(uintptr_t*)ktcb = utcb;
proc->pdata.fs_base = utcb;
proc->pdata.tls_vaddr = tls_vaddr;
}

View File

@@ -4,17 +4,19 @@
#include <amd64/intr.h> #include <amd64/intr.h>
#include <libk/std.h> #include <libk/std.h>
/// Top of userspace process' stack /* Top of userspace process' stack */
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL #define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
/// Size of userspace process' stack /* Size of userspace process' stack */
#define USTACK_SIZE (256 * PAGE_SIZE) #define USTACK_SIZE (256 * PAGE_SIZE)
/* proc_map () base address */
#define PROC_MAP_BASE 0x0000700000000000
/// Platform-dependent process data /* Platform-dependent process data */
struct proc_platformdata { struct proc_platformdata {
struct saved_regs regs; struct saved_regs regs;
uintptr_t user_stack;
uintptr_t kernel_stack; uintptr_t kernel_stack;
uint64_t gs_base; uint64_t fs_base;
uintptr_t tls_vaddr;
}; };
#endif // _KERNEL_AMD64_PROC_H #endif // _KERNEL_AMD64_PROC_H

13
kernel/amd64/procgroup.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _KERNEL_AMD64_PROCGRPUP_H
#define _KERNEL_AMD64_PROCGRPUP_H
#include <libk/std.h>
struct procgroup_tls {
uint8_t* tls_tmpl;
size_t tls_tmpl_size;
size_t tls_tmpl_total_size;
size_t tls_tmpl_pages;
};
#endif // _KERNEL_AMD64_PROCGRPUP_H

View File

@@ -35,4 +35,21 @@
popq % rcx; \ popq % rcx; \
popq % rax; popq % rax;
#define pop_regs_skip_rax \
popq % r15; \
popq % r14; \
popq % r13; \
popq % r12; \
popq % r11; \
popq % r10; \
popq % r9; \
popq % r8; \
popq % rbx; \
popq % rbp; \
popq % rdi; \
popq % rsi; \
popq % rdx; \
popq % rcx; \
addq $8, % rsp
#endif // _KERNEL_AMD64_REGSASM_H #endif // _KERNEL_AMD64_REGSASM_H

View File

@@ -2,9 +2,8 @@
.global amd64_do_sched .global amd64_do_sched
amd64_do_sched: amd64_do_sched:
cli
movq %rsi, %cr3 movq %rsi, %cr3
movq %rdi, %rsp movq %rdi, %rsp
pop_regs pop_regs
add $16, %rsp addq $16, %rsp
iretq iretq

View File

@@ -3,12 +3,21 @@
#include <amd64/sched.h> #include <amd64/sched.h>
#include <libk/std.h> #include <libk/std.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/mm.h> #include <sys/mm.h>
#include <sys/smp.h> #include <sys/smp.h>
void do_sched (struct proc* proc) { void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu) {
spin_lock_ctx_t ctxpr;
spin_lock (&proc->lock, &ctxpr);
thiscpu->tss.rsp0 = proc->pdata.kernel_stack; thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack; thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd.cr3_paddr); spin_unlock (&proc->lock, &ctxpr);
spin_unlock (cpu_lock, ctxcpu);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr);
} }

View File

@@ -1,5 +1,6 @@
#include <amd64/apic.h> #include <amd64/apic.h>
#include <amd64/init.h> #include <amd64/init.h>
#include <amd64/intr_defs.h>
#include <amd64/mm.h> #include <amd64/mm.h>
#include <amd64/msr-index.h> #include <amd64/msr-index.h>
#include <amd64/msr.h> #include <amd64/msr.h>
@@ -7,8 +8,10 @@
#include <libk/string.h> #include <libk/string.h>
#include <limine/requests.h> #include <limine/requests.h>
#include <mm/liballoc.h> #include <mm/liballoc.h>
#include <proc/proc.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/sched.h>
#include <sys/smp.h> #include <sys/smp.h>
#include <sys/syscall.h> #include <sys/syscall.h>
@@ -20,7 +23,7 @@ static struct cpu cpus[CPUS_MAX];
static atomic_int cpu_init_count; static atomic_int cpu_init_count;
/// Allocate a CPU structure /// Allocate a CPU structure
struct cpu* cpu_make (void) { struct cpu* cpu_make (uint64_t lapic_id) {
int id = atomic_fetch_add (&cpu_counter, 1); int id = atomic_fetch_add (&cpu_counter, 1);
struct cpu* cpu = &cpus[id]; struct cpu* cpu = &cpus[id];
@@ -28,7 +31,7 @@ struct cpu* cpu_make (void) {
memset (cpu, 0, sizeof (*cpu)); memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT; cpu->lock = SPIN_LOCK_INIT;
cpu->id = id; cpu->id = id;
cpu->self = cpu; cpu->lapic_id = lapic_id;
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu); amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
@@ -40,38 +43,65 @@ struct cpu* cpu_get (void) {
return ptr; return ptr;
} }
void cpu_request_sched (struct cpu* cpu) {
if (cpu == thiscpu) {
proc_sched ();
return;
}
amd64_lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED);
}
struct cpu* cpu_find_lightest (void) {
struct cpu* cpu = &cpus[0];
int load = atomic_load (&cpu->proc_run_q_count);
for (unsigned int i = 1; i < cpu_counter; i++) {
struct cpu* new_cpu = &cpus[i];
int new_load = atomic_load (&new_cpu->proc_run_q_count);
if (new_load < load) {
load = new_load;
cpu = new_cpu;
}
}
return cpu;
}
/// Bootstrap code for non-BSP CPUs /// Bootstrap code for non-BSP CPUs
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) { static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_load_kernel_cr3 (); amd64_load_kernel_cr3 ();
struct cpu* cpu = cpu_make (); struct cpu* cpu = cpu_make (mp_info->lapic_id);
amd64_init (cpu, true); /* gdt + idt */ amd64_init (cpu, true); /* gdt + idt */
syscall_init (); syscall_init ();
thiscpu->lapic_ticks = amd64_lapic_init (10000); amd64_lapic_init (1000);
amd64_lapic_tick (thiscpu->lapic_ticks);
DEBUG ("CPU %u is online!\n", thiscpu->id); DEBUG ("CPU %u is online!\n", thiscpu->id);
__asm__ volatile ("sti");
atomic_fetch_sub (&cpu_init_count, 1); atomic_fetch_sub (&cpu_init_count, 1);
for (;;) struct proc* spin_proc = proc_spawn_rd ("spin.exe");
; proc_register (spin_proc, thiscpu);
spin_lock_ctx_t ctxcpu;
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
} }
/// Initialize SMP subsystem for AMD64. Start AP CPUs /// Initialize SMP subsystem for AMD64. Start AP CPUs
void smp_init (void) { void smp_init (void) {
thiscpu->lapic_ticks = amd64_lapic_init (10000); amd64_lapic_init (1000);
struct limine_mp_response* mp = limine_mp_request.response; struct limine_mp_response* mp = limine_mp_request.response;
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */ cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
for (size_t i = 0; i < mp->cpu_count; i++) { for (size_t i = 0; i < mp->cpu_count; i++) {
if (mp->cpus[i]->lapic_id != thiscpu->id) { if (mp->cpus[i]->lapic_id != thiscpu->lapic_id) {
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id); DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
mp->cpus[i]->goto_address = &amd64_smp_bootstrap; mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
} }

View File

@@ -2,11 +2,13 @@
#define _KERNEL_AMD64_SMP_H #define _KERNEL_AMD64_SMP_H
#include <amd64/gdt.h> #include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/tss.h> #include <amd64/tss.h>
#include <aux/compiler.h> #include <aux/compiler.h>
#include <libk/rbtree.h> #include <libk/rbtree.h>
#include <libk/std.h> #include <libk/std.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <sync/spin_lock.h>
#define CPUS_MAX 32 #define CPUS_MAX 32
@@ -14,30 +16,28 @@ struct cpu {
/* for syscall instruction */ /* for syscall instruction */
uintptr_t syscall_user_stack; uintptr_t syscall_user_stack;
uintptr_t syscall_kernel_stack; uintptr_t syscall_kernel_stack;
struct cpu* self;
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16); volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16); volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16); volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
volatile struct gdt_extended gdt ALIGNED (16); volatile struct gdt_extended gdt ALIGNED (16);
volatile struct tss tss; volatile struct tss tss;
uintptr_t lapic_mmio_base;
uint64_t lapic_ticks; uint64_t lapic_ticks;
uint64_t lapic_id;
uint32_t id; uint32_t id;
struct {
uint64_t rflags;
atomic_int nesting;
} irq_ctx;
spin_lock_t lock; spin_lock_t lock;
struct rb_node_link* proc_run_q; struct list_node_link* proc_run_q;
struct proc* proc_current; struct proc* proc_current;
atomic_int proc_run_q_count;
}; };
struct cpu* cpu_make (void); struct cpu* cpu_make (uint64_t lapic_id);
struct cpu* cpu_get (void); struct cpu* cpu_get (void);
void amd64_thiscpu_set_init (void); void cpu_request_sched (struct cpu* cpu);
struct cpu* cpu_find_lightest (void);
#define thiscpu (cpu_get ()) #define thiscpu (cpu_get ())

View File

@@ -1,3 +1,4 @@
.global amd64_spin .global amd64_spin
amd64_spin: amd64_spin:
hlt
jmp amd64_spin jmp amd64_spin

View File

@@ -3,6 +3,8 @@
#include <amd64/mm.h> #include <amd64/mm.h>
#include <amd64/msr-index.h> #include <amd64/msr-index.h>
#include <amd64/msr.h> #include <amd64/msr.h>
#include <libk/string.h>
#include <m/status.h>
#include <m/syscall_defs.h> #include <m/syscall_defs.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <sys/debug.h> #include <sys/debug.h>
@@ -11,28 +13,33 @@
extern void amd64_syscall_entry (void); extern void amd64_syscall_entry (void);
int amd64_syscall_dispatch (void* stack_ptr) { uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
spin_lock_ctx_t ctxcpu, ctxpr;
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr; struct saved_regs* regs = stack_ptr;
spin_lock (&thiscpu->lock, &ctxcpu);
struct proc* caller = thiscpu->proc_current;
spin_lock (&caller->lock, &ctxpr);
memcpy (&caller->pdata.regs, regs, sizeof (struct saved_regs));
spin_unlock (&caller->lock, &ctxpr);
spin_unlock (&thiscpu->lock, &ctxcpu);
int syscall_num = regs->rax; int syscall_num = regs->rax;
syscall_handler_func_t func = syscall_find_handler (syscall_num); syscall_handler_func_t func = syscall_find_handler (syscall_num);
if (func == NULL) if (func == NULL) {
return -SR_SYSCALL_NOT_FOUND; return -ST_SYSCALL_NOT_FOUND;
}
struct proc* caller = thiscpu->proc_current; return func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
__asm__ volatile ("sti");
int result = func (caller, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
__asm__ volatile ("cli");
return result;
} }
void syscall_init (void) { void syscall_init (void) {
amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_UCODE - 16) << 48)); amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48));
amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry); amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry);
amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9)); amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9));
amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE); amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE);

View File

@@ -9,30 +9,39 @@ amd64_syscall_entry:
movq %rsp, %gs:0 movq %rsp, %gs:0
movq %gs:8, %rsp movq %gs:8, %rsp
pushq $0x23 pushq $0x1b
pushq %gs:0 pushq %gs:0
pushq %r11 pushq %r11
pushq $0x1b pushq $0x23
pushq %rcx pushq %rcx
pushq $0 pushq $0
pushq $0 pushq $0
push_regs push_regs
movw $0x10, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
cld cld
movq %rsp, %rdi movq %rsp, %rdi
movq %cr3, %rax; pushq %rax
movq %rsp, %rbp movq %rsp, %rbp
subq $8, %rsp subq $8, %rsp
andq $~0xF, %rsp andq $-16, %rsp
callq amd64_syscall_dispatch callq amd64_syscall_dispatch
movq %rbp, %rsp movq %rbp, %rsp
pop_regs popq %rbx; movq %rbx, %cr3
pop_regs_skip_rax
addq $56, %rsp addq $56, %rsp
movq %gs:0, %rsp movq %gs:0, %rsp

View File

@@ -391,10 +391,10 @@ typedef struct {
#define SHN_LORESERVE 0xff00 /* Start of reserved indices */ #define SHN_LORESERVE 0xff00 /* Start of reserved indices */
#define SHN_LOPROC 0xff00 /* Start of processor-specific */ #define SHN_LOPROC 0xff00 /* Start of processor-specific */
#define SHN_BEFORE \ #define SHN_BEFORE \
0xff00 /* Order section before all others 0xff00 /* Order section before all others \
(Solaris). */ (Solaris). */
#define SHN_AFTER \ #define SHN_AFTER \
0xff01 /* Order section after all others 0xff01 /* Order section after all others \
(Solaris). */ (Solaris). */
#define SHN_HIPROC 0xff1f /* End of processor-specific */ #define SHN_HIPROC 0xff1f /* End of processor-specific */
#define SHN_LOOS 0xff20 /* Start of OS-specific */ #define SHN_LOOS 0xff20 /* Start of OS-specific */
@@ -454,7 +454,7 @@ typedef struct {
#define SHF_INFO_LINK (1 << 6) /* `sh_info' contains SHT index */ #define SHF_INFO_LINK (1 << 6) /* `sh_info' contains SHT index */
#define SHF_LINK_ORDER (1 << 7) /* Preserve order after combining */ #define SHF_LINK_ORDER (1 << 7) /* Preserve order after combining */
#define SHF_OS_NONCONFORMING \ #define SHF_OS_NONCONFORMING \
(1 << 8) /* Non-standard OS specific handling (1 << 8) /* Non-standard OS specific handling \
required */ required */
#define SHF_GROUP (1 << 9) /* Section is member of a group. */ #define SHF_GROUP (1 << 9) /* Section is member of a group. */
#define SHF_TLS (1 << 10) /* Section hold thread-local data. */ #define SHF_TLS (1 << 10) /* Section hold thread-local data. */
@@ -463,10 +463,10 @@ typedef struct {
#define SHF_MASKPROC 0xf0000000 /* Processor-specific */ #define SHF_MASKPROC 0xf0000000 /* Processor-specific */
#define SHF_GNU_RETAIN (1 << 21) /* Not to be GCed by linker. */ #define SHF_GNU_RETAIN (1 << 21) /* Not to be GCed by linker. */
#define SHF_ORDERED \ #define SHF_ORDERED \
(1 << 30) /* Special ordering requirement (1 << 30) /* Special ordering requirement \
(Solaris). */ (Solaris). */
#define SHF_EXCLUDE \ #define SHF_EXCLUDE \
(1U << 31) /* Section is excluded unless (1U << 31) /* Section is excluded unless \
referenced or allocated (Solaris).*/ referenced or allocated (Solaris).*/
/* Section compression header. Used when SHF_COMPRESSED is set. */ /* Section compression header. Used when SHF_COMPRESSED is set. */
@@ -538,7 +538,7 @@ typedef struct {
#define SYMINFO_FLG_PASSTHRU 0x0002 /* Pass-through symbol for translator */ #define SYMINFO_FLG_PASSTHRU 0x0002 /* Pass-through symbol for translator */
#define SYMINFO_FLG_COPY 0x0004 /* Symbol is a copy-reloc */ #define SYMINFO_FLG_COPY 0x0004 /* Symbol is a copy-reloc */
#define SYMINFO_FLG_LAZYLOAD \ #define SYMINFO_FLG_LAZYLOAD \
0x0008 /* Symbol bound to object to be lazy 0x0008 /* Symbol bound to object to be lazy \
loaded */ loaded */
/* Syminfo version values. */ /* Syminfo version values. */
#define SYMINFO_NONE 0 #define SYMINFO_NONE 0
@@ -716,7 +716,7 @@ typedef struct {
#define NT_PRSTATUS 1 /* Contains copy of prstatus struct */ #define NT_PRSTATUS 1 /* Contains copy of prstatus struct */
#define NT_PRFPREG \ #define NT_PRFPREG \
2 /* Contains copy of fpregset 2 /* Contains copy of fpregset \
struct. */ struct. */
#define NT_FPREGSET 2 /* Contains copy of fpregset struct */ #define NT_FPREGSET 2 /* Contains copy of fpregset struct */
#define NT_PRPSINFO 3 /* Contains copy of prpsinfo struct */ #define NT_PRPSINFO 3 /* Contains copy of prpsinfo struct */
@@ -734,10 +734,10 @@ typedef struct {
#define NT_LWPSINFO 17 /* Contains copy of lwpinfo struct */ #define NT_LWPSINFO 17 /* Contains copy of lwpinfo struct */
#define NT_PRFPXREG 20 /* Contains copy of fprxregset struct */ #define NT_PRFPXREG 20 /* Contains copy of fprxregset struct */
#define NT_SIGINFO \ #define NT_SIGINFO \
0x53494749 /* Contains copy of siginfo_t, 0x53494749 /* Contains copy of siginfo_t, \
size might increase */ size might increase */
#define NT_FILE \ #define NT_FILE \
0x46494c45 /* Contains information about mapped 0x46494c45 /* Contains information about mapped \
files */ files */
#define NT_PRXFPREG 0x46e62b7f /* Contains copy of user_fxsr_struct */ #define NT_PRXFPREG 0x46e62b7f /* Contains copy of user_fxsr_struct */
#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ #define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
@@ -754,16 +754,16 @@ typedef struct {
#define NT_PPC_TM_CVSX 0x10b /* TM checkpointed VSX Registers */ #define NT_PPC_TM_CVSX 0x10b /* TM checkpointed VSX Registers */
#define NT_PPC_TM_SPR 0x10c /* TM Special Purpose Registers */ #define NT_PPC_TM_SPR 0x10c /* TM Special Purpose Registers */
#define NT_PPC_TM_CTAR \ #define NT_PPC_TM_CTAR \
0x10d /* TM checkpointed Target Address 0x10d /* TM checkpointed Target Address \
Register */ Register */
#define NT_PPC_TM_CPPR \ #define NT_PPC_TM_CPPR \
0x10e /* TM checkpointed Program Priority 0x10e /* TM checkpointed Program Priority \
Register */ Register */
#define NT_PPC_TM_CDSCR \ #define NT_PPC_TM_CDSCR \
0x10f /* TM checkpointed Data Stream Control 0x10f /* TM checkpointed Data Stream Control \
Register */ Register */
#define NT_PPC_PKEY \ #define NT_PPC_PKEY \
0x110 /* Memory Protection Keys 0x110 /* Memory Protection Keys \
registers. */ registers. */
#define NT_PPC_DEXCR 0x111 /* PowerPC DEXCR registers. */ #define NT_PPC_DEXCR 0x111 /* PowerPC DEXCR registers. */
#define NT_PPC_HASHKEYR 0x112 /* PowerPC HASHKEYR register. */ #define NT_PPC_HASHKEYR 0x112 /* PowerPC HASHKEYR register. */
@@ -782,12 +782,12 @@ typedef struct {
#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */ #define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */
#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */ #define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */
#define NT_S390_VXRS_LOW \ #define NT_S390_VXRS_LOW \
0x309 /* s390 vector registers 0-15 0x309 /* s390 vector registers 0-15 \
upper half. */ upper half. */
#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31. */ #define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31. */
#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers. */ #define NT_S390_GS_CB 0x30b /* s390 guarded storage registers. */
#define NT_S390_GS_BC \ #define NT_S390_GS_BC \
0x30c /* s390 guarded storage 0x30c /* s390 guarded storage \
broadcast control block. */ broadcast control block. */
#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation. */ #define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation. */
#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data. */ #define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data. */
@@ -797,22 +797,22 @@ typedef struct {
#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ #define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
#define NT_ARM_SVE \ #define NT_ARM_SVE \
0x405 /* ARM Scalable Vector Extension 0x405 /* ARM Scalable Vector Extension \
registers */ registers */
#define NT_ARM_PAC_MASK \ #define NT_ARM_PAC_MASK \
0x406 /* ARM pointer authentication 0x406 /* ARM pointer authentication \
code masks. */ code masks. */
#define NT_ARM_PACA_KEYS \ #define NT_ARM_PACA_KEYS \
0x407 /* ARM pointer authentication 0x407 /* ARM pointer authentication \
address keys. */ address keys. */
#define NT_ARM_PACG_KEYS \ #define NT_ARM_PACG_KEYS \
0x408 /* ARM pointer authentication 0x408 /* ARM pointer authentication \
generic key. */ generic key. */
#define NT_ARM_TAGGED_ADDR_CTRL \ #define NT_ARM_TAGGED_ADDR_CTRL \
0x409 /* AArch64 tagged address 0x409 /* AArch64 tagged address \
control. */ control. */
#define NT_ARM_PAC_ENABLED_KEYS \ #define NT_ARM_PAC_ENABLED_KEYS \
0x40a /* AArch64 pointer authentication 0x40a /* AArch64 pointer authentication \
enabled keys. */ enabled keys. */
#define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers. */ #define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers. */
#define NT_ARM_ZA 0x40c /* ARM SME ZA registers. */ #define NT_ARM_ZA 0x40c /* ARM SME ZA registers. */
@@ -827,20 +827,20 @@ typedef struct {
#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */ #define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */ #define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
#define NT_RISCV_TAGGED_ADDR_CTRL \ #define NT_RISCV_TAGGED_ADDR_CTRL \
0x902 /* RISC-V tagged 0x902 /* RISC-V tagged \
address control */ address control */
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers. */ #define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers. */
#define NT_LOONGARCH_CSR \ #define NT_LOONGARCH_CSR \
0xa01 /* LoongArch control and 0xa01 /* LoongArch control and \
status registers. */ status registers. */
#define NT_LOONGARCH_LSX \ #define NT_LOONGARCH_LSX \
0xa02 /* LoongArch Loongson SIMD 0xa02 /* LoongArch Loongson SIMD \
Extension registers. */ Extension registers. */
#define NT_LOONGARCH_LASX \ #define NT_LOONGARCH_LASX \
0xa03 /* LoongArch Loongson Advanced 0xa03 /* LoongArch Loongson Advanced \
SIMD Extension registers. */ SIMD Extension registers. */
#define NT_LOONGARCH_LBT \ #define NT_LOONGARCH_LBT \
0xa04 /* LoongArch Loongson Binary 0xa04 /* LoongArch Loongson Binary \
Translation registers. */ Translation registers. */
#define NT_LOONGARCH_HW_BREAK 0xa05 /* LoongArch hardware breakpoint registers */ #define NT_LOONGARCH_HW_BREAK 0xa05 /* LoongArch hardware breakpoint registers */
#define NT_LOONGARCH_HW_WATCH 0xa06 /* LoongArch hardware watchpoint registers */ #define NT_LOONGARCH_HW_WATCH 0xa06 /* LoongArch hardware watchpoint registers */
@@ -927,7 +927,7 @@ typedef struct {
#define DT_MOVESZ 0x6ffffdfb #define DT_MOVESZ 0x6ffffdfb
#define DT_FEATURE_1 0x6ffffdfc /* Feature selection (DTF_*). */ #define DT_FEATURE_1 0x6ffffdfc /* Feature selection (DTF_*). */
#define DT_POSFLAG_1 \ #define DT_POSFLAG_1 \
0x6ffffdfd /* Flags for DT_* entries, effecting 0x6ffffdfd /* Flags for DT_* entries, effecting \
the following DT_* entry. */ the following DT_* entry. */
#define DT_SYMINSZ 0x6ffffdfe /* Size of syminfo table (in bytes) */ #define DT_SYMINSZ 0x6ffffdfe /* Size of syminfo table (in bytes) */
#define DT_SYMINENT 0x6ffffdff /* Entry size of syminfo */ #define DT_SYMINENT 0x6ffffdff /* Entry size of syminfo */
@@ -966,11 +966,11 @@ typedef struct {
/* These were chosen by Sun. */ /* These were chosen by Sun. */
#define DT_FLAGS_1 0x6ffffffb /* State flags, see DF_1_* below. */ #define DT_FLAGS_1 0x6ffffffb /* State flags, see DF_1_* below. */
#define DT_VERDEF \ #define DT_VERDEF \
0x6ffffffc /* Address of version definition 0x6ffffffc /* Address of version definition \
table */ table */
#define DT_VERDEFNUM 0x6ffffffd /* Number of version definitions */ #define DT_VERDEFNUM 0x6ffffffd /* Number of version definitions */
#define DT_VERNEED \ #define DT_VERNEED \
0x6ffffffe /* Address of table with needed 0x6ffffffe /* Address of table with needed \
versions */ versions */
#define DT_VERNEEDNUM 0x6fffffff /* Number of needed versions */ #define DT_VERNEEDNUM 0x6fffffff /* Number of needed versions */
#define DT_VERSIONTAGIDX(tag) (DT_VERNEEDNUM - (tag)) /* Reverse order! */ #define DT_VERSIONTAGIDX(tag) (DT_VERNEEDNUM - (tag)) /* Reverse order! */
@@ -1031,7 +1031,7 @@ typedef struct {
/* Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry. */ /* Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry. */
#define DF_P1_LAZYLOAD 0x00000001 /* Lazyload following object. */ #define DF_P1_LAZYLOAD 0x00000001 /* Lazyload following object. */
#define DF_P1_GROUPPERM \ #define DF_P1_GROUPPERM \
0x00000002 /* Symbols from next object are not 0x00000002 /* Symbols from next object are not \
generally available. */ generally available. */
/* Version definition sections. */ /* Version definition sections. */
@@ -1066,7 +1066,7 @@ typedef struct {
/* Legal values for vd_flags (version information flags). */ /* Legal values for vd_flags (version information flags). */
#define VER_FLG_BASE 0x1 /* Version definition of file itself */ #define VER_FLG_BASE 0x1 /* Version definition of file itself */
#define VER_FLG_WEAK \ #define VER_FLG_WEAK \
0x2 /* Weak version identifier. Also 0x2 /* Weak version identifier. Also \
used by vna_flags below. */ used by vna_flags below. */
/* Versym symbol index values. */ /* Versym symbol index values. */
@@ -1187,7 +1187,7 @@ typedef struct {
/* Some more special a_type values describing the hardware. */ /* Some more special a_type values describing the hardware. */
#define AT_PLATFORM 15 /* String identifying platform. */ #define AT_PLATFORM 15 /* String identifying platform. */
#define AT_HWCAP \ #define AT_HWCAP \
16 /* Machine-dependent hints about 16 /* Machine-dependent hints about \
processor capabilities. */ processor capabilities. */
/* This entry gives some information about the FPU initialization /* This entry gives some information about the FPU initialization
@@ -1210,7 +1210,7 @@ typedef struct {
#define AT_RANDOM 25 /* Address of 16 random bytes. */ #define AT_RANDOM 25 /* Address of 16 random bytes. */
#define AT_HWCAP2 \ #define AT_HWCAP2 \
26 /* More machine-dependent hints about 26 /* More machine-dependent hints about \
processor capabilities. */ processor capabilities. */
#define AT_RSEQ_FEATURE_SIZE 27 /* rseq supported feature size. */ #define AT_RSEQ_FEATURE_SIZE 27 /* rseq supported feature size. */
@@ -1465,13 +1465,13 @@ typedef struct {
#define R_68K_TLS_IE16 35 /* 16 bit GOT offset for IE */ #define R_68K_TLS_IE16 35 /* 16 bit GOT offset for IE */
#define R_68K_TLS_IE8 36 /* 8 bit GOT offset for IE */ #define R_68K_TLS_IE8 36 /* 8 bit GOT offset for IE */
#define R_68K_TLS_LE32 \ #define R_68K_TLS_LE32 \
37 /* 32 bit offset relative to 37 /* 32 bit offset relative to \
static TLS block */ static TLS block */
#define R_68K_TLS_LE16 \ #define R_68K_TLS_LE16 \
38 /* 16 bit offset relative to 38 /* 16 bit offset relative to \
static TLS block */ static TLS block */
#define R_68K_TLS_LE8 \ #define R_68K_TLS_LE8 \
39 /* 8 bit offset relative to 39 /* 8 bit offset relative to \
static TLS block */ static TLS block */
#define R_68K_TLS_DTPMOD32 40 /* 32 bit module number */ #define R_68K_TLS_DTPMOD32 40 /* 32 bit module number */
#define R_68K_TLS_DTPREL32 41 /* 32 bit module-relative offset */ #define R_68K_TLS_DTPREL32 41 /* 32 bit module-relative offset */
@@ -1497,47 +1497,47 @@ typedef struct {
#define R_386_32PLT 11 #define R_386_32PLT 11
#define R_386_TLS_TPOFF 14 /* Offset in static TLS block */ #define R_386_TLS_TPOFF 14 /* Offset in static TLS block */
#define R_386_TLS_IE \ #define R_386_TLS_IE \
15 /* Address of GOT entry for static TLS 15 /* Address of GOT entry for static TLS \
block offset */ block offset */
#define R_386_TLS_GOTIE \ #define R_386_TLS_GOTIE \
16 /* GOT entry for static TLS block 16 /* GOT entry for static TLS block \
offset */ offset */
#define R_386_TLS_LE \ #define R_386_TLS_LE \
17 /* Offset relative to static TLS 17 /* Offset relative to static TLS \
block */ block */
#define R_386_TLS_GD \ #define R_386_TLS_GD \
18 /* Direct 32 bit for GNU version of 18 /* Direct 32 bit for GNU version of \
general dynamic thread local data */ general dynamic thread local data */
#define R_386_TLS_LDM \ #define R_386_TLS_LDM \
19 /* Direct 32 bit for GNU version of 19 /* Direct 32 bit for GNU version of \
local dynamic thread local data local dynamic thread local data \
in LE code */ in LE code */
#define R_386_16 20 #define R_386_16 20
#define R_386_PC16 21 #define R_386_PC16 21
#define R_386_8 22 #define R_386_8 22
#define R_386_PC8 23 #define R_386_PC8 23
#define R_386_TLS_GD_32 \ #define R_386_TLS_GD_32 \
24 /* Direct 32 bit for general dynamic 24 /* Direct 32 bit for general dynamic \
thread local data */ thread local data */
#define R_386_TLS_GD_PUSH 25 /* Tag for pushl in GD TLS code */ #define R_386_TLS_GD_PUSH 25 /* Tag for pushl in GD TLS code */
#define R_386_TLS_GD_CALL \ #define R_386_TLS_GD_CALL \
26 /* Relocation for call to 26 /* Relocation for call to \
__tls_get_addr() */ __tls_get_addr() */
#define R_386_TLS_GD_POP 27 /* Tag for popl in GD TLS code */ #define R_386_TLS_GD_POP 27 /* Tag for popl in GD TLS code */
#define R_386_TLS_LDM_32 \ #define R_386_TLS_LDM_32 \
28 /* Direct 32 bit for local dynamic 28 /* Direct 32 bit for local dynamic \
thread local data in LE code */ thread local data in LE code */
#define R_386_TLS_LDM_PUSH 29 /* Tag for pushl in LDM TLS code */ #define R_386_TLS_LDM_PUSH 29 /* Tag for pushl in LDM TLS code */
#define R_386_TLS_LDM_CALL \ #define R_386_TLS_LDM_CALL \
30 /* Relocation for call to 30 /* Relocation for call to \
__tls_get_addr() in LDM code */ __tls_get_addr() in LDM code */
#define R_386_TLS_LDM_POP 31 /* Tag for popl in LDM TLS code */ #define R_386_TLS_LDM_POP 31 /* Tag for popl in LDM TLS code */
#define R_386_TLS_LDO_32 32 /* Offset relative to TLS block */ #define R_386_TLS_LDO_32 32 /* Offset relative to TLS block */
#define R_386_TLS_IE_32 \ #define R_386_TLS_IE_32 \
33 /* GOT entry for negated static TLS 33 /* GOT entry for negated static TLS \
block offset */ block offset */
#define R_386_TLS_LE_32 \ #define R_386_TLS_LE_32 \
34 /* Negated offset relative to static 34 /* Negated offset relative to static \
TLS block */ TLS block */
#define R_386_TLS_DTPMOD32 35 /* ID of module containing symbol */ #define R_386_TLS_DTPMOD32 35 /* ID of module containing symbol */
#define R_386_TLS_DTPOFF32 36 /* Offset in TLS block */ #define R_386_TLS_DTPOFF32 36 /* Offset in TLS block */
@@ -1545,17 +1545,17 @@ typedef struct {
#define R_386_SIZE32 38 /* 32-bit symbol size */ #define R_386_SIZE32 38 /* 32-bit symbol size */
#define R_386_TLS_GOTDESC 39 /* GOT offset for TLS descriptor. */ #define R_386_TLS_GOTDESC 39 /* GOT offset for TLS descriptor. */
#define R_386_TLS_DESC_CALL \ #define R_386_TLS_DESC_CALL \
40 /* Marker of call through TLS 40 /* Marker of call through TLS \
descriptor for descriptor for \
relaxation. */ relaxation. */
#define R_386_TLS_DESC \ #define R_386_TLS_DESC \
41 /* TLS descriptor containing 41 /* TLS descriptor containing \
pointer to code and to pointer to code and to \
argument, returning the TLS argument, returning the TLS \
offset for the symbol. */ offset for the symbol. */
#define R_386_IRELATIVE 42 /* Adjust indirectly by program base */ #define R_386_IRELATIVE 42 /* Adjust indirectly by program base */
#define R_386_GOT32X \ #define R_386_GOT32X \
43 /* Load from 32 bit GOT entry, 43 /* Load from 32 bit GOT entry, \
relaxable. */ relaxable. */
/* Keep this the last entry. */ /* Keep this the last entry. */
#define R_386_NUM 44 #define R_386_NUM 44
@@ -1698,26 +1698,26 @@ typedef struct {
#define EF_MIPS_ABI2 32 #define EF_MIPS_ABI2 32
#define EF_MIPS_ABI_ON32 64 #define EF_MIPS_ABI_ON32 64
#define EF_MIPS_OPTIONS_FIRST \ #define EF_MIPS_OPTIONS_FIRST \
0x00000080 /* Process the .MIPS.options 0x00000080 /* Process the .MIPS.options \
section first by ld. */ section first by ld. */
#define EF_MIPS_32BITMODE \ #define EF_MIPS_32BITMODE \
0x00000100 /* Indicates code compiled for 0x00000100 /* Indicates code compiled for \
a 64-bit machine in 32-bit a 64-bit machine in 32-bit \
mode (regs are 32-bits mode (regs are 32-bits \
wide). */ wide). */
#define EF_MIPS_FP64 512 /* Uses FP64 (12 callee-saved). */ #define EF_MIPS_FP64 512 /* Uses FP64 (12 callee-saved). */
#define EF_MIPS_NAN2008 1024 /* Uses IEEE 754-2008 NaN encoding. */ #define EF_MIPS_NAN2008 1024 /* Uses IEEE 754-2008 NaN encoding. */
#define EF_MIPS_ARCH_ASE \ #define EF_MIPS_ARCH_ASE \
0x0f000000 /* Architectural Extensions 0x0f000000 /* Architectural Extensions \
used by this file. */ used by this file. */
#define EF_MIPS_ARCH_ASE_MDMX \ #define EF_MIPS_ARCH_ASE_MDMX \
0x08000000 /* Use MDMX multimedia 0x08000000 /* Use MDMX multimedia \
extensions. */ extensions. */
#define EF_MIPS_ARCH_ASE_M16 \ #define EF_MIPS_ARCH_ASE_M16 \
0x04000000 /* Use MIPS-16 ISA 0x04000000 /* Use MIPS-16 ISA \
extensions. */ extensions. */
#define EF_MIPS_ARCH_ASE_MICROMIPS \ #define EF_MIPS_ARCH_ASE_MICROMIPS \
0x02000000 /* Use MICROMIPS ISA 0x02000000 /* Use MICROMIPS ISA \
extensions. */ extensions. */
#define EF_MIPS_ARCH 0xf0000000 /* MIPS architecture level. */ #define EF_MIPS_ARCH 0xf0000000 /* MIPS architecture level. */
@@ -1735,11 +1735,11 @@ typedef struct {
#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */ #define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */
#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */ #define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */
#define EF_MIPS_ABI \ #define EF_MIPS_ABI \
0x0000F000 /* The ABI of the file. Also 0x0000F000 /* The ABI of the file. Also \
see EF_MIPS_ABI2 above. */ see EF_MIPS_ABI2 above. */
#define EF_MIPS_ABI_O32 0x00001000 /* The original o32 abi. */ #define EF_MIPS_ABI_O32 0x00001000 /* The original o32 abi. */
#define EF_MIPS_ABI_O64 \ #define EF_MIPS_ABI_O64 \
0x00002000 /* O32 extended to work on 0x00002000 /* O32 extended to work on \
64 bit architectures. */ 64 bit architectures. */
#define EF_MIPS_ABI_EABI32 0x00003000 /* EABI in 32 bit mode. */ #define EF_MIPS_ABI_EABI32 0x00003000 /* EABI in 32 bit mode. */
#define EF_MIPS_ABI_EABI64 0x00004000 /* EABI in 64 bit mode. */ #define EF_MIPS_ABI_EABI64 0x00004000 /* EABI in 64 bit mode. */
@@ -2083,27 +2083,27 @@ typedef struct {
#define DT_MIPS_RLD_MAP 0x70000016 /* Address of run time loader map. */ #define DT_MIPS_RLD_MAP 0x70000016 /* Address of run time loader map. */
#define DT_MIPS_DELTA_CLASS 0x70000017 /* Delta C++ class definition. */ #define DT_MIPS_DELTA_CLASS 0x70000017 /* Delta C++ class definition. */
#define DT_MIPS_DELTA_CLASS_NO \ #define DT_MIPS_DELTA_CLASS_NO \
0x70000018 /* Number of entries in 0x70000018 /* Number of entries in \
DT_MIPS_DELTA_CLASS. */ DT_MIPS_DELTA_CLASS. */
#define DT_MIPS_DELTA_INSTANCE 0x70000019 /* Delta C++ class instances. */ #define DT_MIPS_DELTA_INSTANCE 0x70000019 /* Delta C++ class instances. */
#define DT_MIPS_DELTA_INSTANCE_NO \ #define DT_MIPS_DELTA_INSTANCE_NO \
0x7000001a /* Number of entries in 0x7000001a /* Number of entries in \
DT_MIPS_DELTA_INSTANCE. */ DT_MIPS_DELTA_INSTANCE. */
#define DT_MIPS_DELTA_RELOC 0x7000001b /* Delta relocations. */ #define DT_MIPS_DELTA_RELOC 0x7000001b /* Delta relocations. */
#define DT_MIPS_DELTA_RELOC_NO \ #define DT_MIPS_DELTA_RELOC_NO \
0x7000001c /* Number of entries in 0x7000001c /* Number of entries in \
DT_MIPS_DELTA_RELOC. */ DT_MIPS_DELTA_RELOC. */
#define DT_MIPS_DELTA_SYM \ #define DT_MIPS_DELTA_SYM \
0x7000001d /* Delta symbols that Delta 0x7000001d /* Delta symbols that Delta \
relocations refer to. */ relocations refer to. */
#define DT_MIPS_DELTA_SYM_NO \ #define DT_MIPS_DELTA_SYM_NO \
0x7000001e /* Number of entries in 0x7000001e /* Number of entries in \
DT_MIPS_DELTA_SYM. */ DT_MIPS_DELTA_SYM. */
#define DT_MIPS_DELTA_CLASSSYM \ #define DT_MIPS_DELTA_CLASSSYM \
0x70000020 /* Delta symbols that hold the 0x70000020 /* Delta symbols that hold the \
class declaration. */ class declaration. */
#define DT_MIPS_DELTA_CLASSSYM_NO \ #define DT_MIPS_DELTA_CLASSSYM_NO \
0x70000021 /* Number of entries in 0x70000021 /* Number of entries in \
DT_MIPS_DELTA_CLASSSYM. */ DT_MIPS_DELTA_CLASSSYM. */
#define DT_MIPS_CXX_FLAGS 0x70000022 /* Flags indicating for C++ flavor. */ #define DT_MIPS_CXX_FLAGS 0x70000022 /* Flags indicating for C++ flavor. */
#define DT_MIPS_PIXIE_INIT 0x70000023 #define DT_MIPS_PIXIE_INIT 0x70000023
@@ -2117,10 +2117,10 @@ typedef struct {
#define DT_MIPS_DYNSTR_ALIGN 0x7000002b #define DT_MIPS_DYNSTR_ALIGN 0x7000002b
#define DT_MIPS_INTERFACE_SIZE 0x7000002c /* Size of the .interface section. */ #define DT_MIPS_INTERFACE_SIZE 0x7000002c /* Size of the .interface section. */
#define DT_MIPS_RLD_TEXT_RESOLVE_ADDR \ #define DT_MIPS_RLD_TEXT_RESOLVE_ADDR \
0x7000002d /* Address of rld_text_rsolve 0x7000002d /* Address of rld_text_rsolve \
function stored in GOT. */ function stored in GOT. */
#define DT_MIPS_PERF_SUFFIX \ #define DT_MIPS_PERF_SUFFIX \
0x7000002e /* Default suffix of dso to be added 0x7000002e /* Default suffix of dso to be added \
by rld on dlopen() calls. */ by rld on dlopen() calls. */
#define DT_MIPS_COMPACT_SIZE 0x7000002f /* (O32)Size of compact rel section. */ #define DT_MIPS_COMPACT_SIZE 0x7000002f /* (O32)Size of compact rel section. */
#define DT_MIPS_GP_VALUE 0x70000030 /* GP value for aux GOTs. */ #define DT_MIPS_GP_VALUE 0x70000030 /* GP value for aux GOTs. */
@@ -2293,7 +2293,7 @@ enum {
#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */ #define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */ #define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
#define EF_PARISC_NO_KABP \ #define EF_PARISC_NO_KABP \
0x00100000 /* No kernel assisted branch 0x00100000 /* No kernel assisted branch \
prediction. */ prediction. */
#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */ #define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */ #define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
@@ -2307,7 +2307,7 @@ enum {
/* Additional section indices. */ /* Additional section indices. */
#define SHN_PARISC_ANSI_COMMON \ #define SHN_PARISC_ANSI_COMMON \
0xff00 /* Section for tentatively declared 0xff00 /* Section for tentatively declared \
symbols in ANSI C. */ symbols in ANSI C. */
#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */ #define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
@@ -2555,7 +2555,7 @@ enum {
/* Cygnus local bits below */ /* Cygnus local bits below */
#define EF_PPC_RELOCATABLE 0x00010000 /* PowerPC -mrelocatable flag*/ #define EF_PPC_RELOCATABLE 0x00010000 /* PowerPC -mrelocatable flag*/
#define EF_PPC_RELOCATABLE_LIB \ #define EF_PPC_RELOCATABLE_LIB \
0x00008000 /* PowerPC -mrelocatable-lib 0x00008000 /* PowerPC -mrelocatable-lib \
flag */ flag */
/* PowerPC relocations defined by the ABIs */ /* PowerPC relocations defined by the ABIs */
@@ -2877,12 +2877,12 @@ enum {
/* ARM-specific values for sh_flags */ /* ARM-specific values for sh_flags */
#define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */ #define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */
#define SHF_ARM_COMDEF \ #define SHF_ARM_COMDEF \
0x80000000 /* Section may be multiply defined 0x80000000 /* Section may be multiply defined \
in the input to a link step. */ in the input to a link step. */
/* ARM-specific program header flags */ /* ARM-specific program header flags */
#define PF_ARM_SB \ #define PF_ARM_SB \
0x10000000 /* Segment contains the location 0x10000000 /* Segment contains the location \
addressed by the static base. */ addressed by the static base. */
#define PF_ARM_PI 0x20000000 /* Position-independent segment. */ #define PF_ARM_PI 0x20000000 /* Position-independent segment. */
#define PF_ARM_ABS 0x40000000 /* Absolute segment. */ #define PF_ARM_ABS 0x40000000 /* Absolute segment. */
@@ -3038,7 +3038,7 @@ enum {
#define R_ARM_NONE 0 /* No reloc */ #define R_ARM_NONE 0 /* No reloc */
#define R_ARM_PC24 \ #define R_ARM_PC24 \
1 /* Deprecated PC relative 26 1 /* Deprecated PC relative 26 \
bit branch. */ bit branch. */
#define R_ARM_ABS32 2 /* Direct 32 bit */ #define R_ARM_ABS32 2 /* Direct 32 bit */
#define R_ARM_REL32 3 /* PC relative 32 bit */ #define R_ARM_REL32 3 /* PC relative 32 bit */
@@ -3050,7 +3050,7 @@ enum {
#define R_ARM_SBREL32 9 #define R_ARM_SBREL32 9
#define R_ARM_THM_PC22 10 /* PC relative 24 bit (Thumb32 BL). */ #define R_ARM_THM_PC22 10 /* PC relative 24 bit (Thumb32 BL). */
#define R_ARM_THM_PC8 \ #define R_ARM_THM_PC8 \
11 /* PC relative & 0x3FC 11 /* PC relative & 0x3FC \
(Thumb16 LDR, ADD, ADR). */ (Thumb16 LDR, ADD, ADR). */
#define R_ARM_AMP_VCALL9 12 #define R_ARM_AMP_VCALL9 12
#define R_ARM_SWI24 13 /* Obsolete static relocation. */ #define R_ARM_SWI24 13 /* Obsolete static relocation. */
@@ -3071,7 +3071,7 @@ enum {
#define R_ARM_PLT32 27 /* Deprecated, 32 bit PLT address. */ #define R_ARM_PLT32 27 /* Deprecated, 32 bit PLT address. */
#define R_ARM_CALL 28 /* PC relative 24 bit (BL, BLX). */ #define R_ARM_CALL 28 /* PC relative 24 bit (BL, BLX). */
#define R_ARM_JUMP24 \ #define R_ARM_JUMP24 \
29 /* PC relative 24 bit 29 /* PC relative 24 bit \
(B, BL<cond>). */ (B, BL<cond>). */
#define R_ARM_THM_JUMP24 30 /* PC relative 24 bit (Thumb32 B.W). */ #define R_ARM_THM_JUMP24 30 /* PC relative 24 bit (Thumb32 B.W). */
#define R_ARM_BASE_ABS 31 /* Adjust by program base. */ #define R_ARM_BASE_ABS 31 /* Adjust by program base. */
@@ -3092,25 +3092,25 @@ enum {
#define R_ARM_MOVT_PREL 46 /* PC relative (MOVT). */ #define R_ARM_MOVT_PREL 46 /* PC relative (MOVT). */
#define R_ARM_THM_MOVW_ABS_NC 47 /* Direct 16 bit (Thumb32 MOVW). */ #define R_ARM_THM_MOVW_ABS_NC 47 /* Direct 16 bit (Thumb32 MOVW). */
#define R_ARM_THM_MOVT_ABS \ #define R_ARM_THM_MOVT_ABS \
48 /* Direct high 16 bit 48 /* Direct high 16 bit \
(Thumb32 MOVT). */ (Thumb32 MOVT). */
#define R_ARM_THM_MOVW_PREL_NC \ #define R_ARM_THM_MOVW_PREL_NC \
49 /* PC relative 16 bit 49 /* PC relative 16 bit \
(Thumb32 MOVW). */ (Thumb32 MOVW). */
#define R_ARM_THM_MOVT_PREL \ #define R_ARM_THM_MOVT_PREL \
50 /* PC relative high 16 bit 50 /* PC relative high 16 bit \
(Thumb32 MOVT). */ (Thumb32 MOVT). */
#define R_ARM_THM_JUMP19 \ #define R_ARM_THM_JUMP19 \
51 /* PC relative 20 bit 51 /* PC relative 20 bit \
(Thumb32 B<cond>.W). */ (Thumb32 B<cond>.W). */
#define R_ARM_THM_JUMP6 \ #define R_ARM_THM_JUMP6 \
52 /* PC relative X & 0x7E 52 /* PC relative X & 0x7E \
(Thumb16 CBZ, CBNZ). */ (Thumb16 CBZ, CBNZ). */
#define R_ARM_THM_ALU_PREL_11_0 \ #define R_ARM_THM_ALU_PREL_11_0 \
53 /* PC relative 12 bit 53 /* PC relative 12 bit \
(Thumb32 ADR.W). */ (Thumb32 ADR.W). */
#define R_ARM_THM_PC12 \ #define R_ARM_THM_PC12 \
54 /* PC relative 12 bit 54 /* PC relative 12 bit \
(Thumb32 LDR{D,SB,H,SH}). */ (Thumb32 LDR{D,SB,H,SH}). */
#define R_ARM_ABS32_NOI 55 /* Direct 32-bit. */ #define R_ARM_ABS32_NOI 55 /* Direct 32-bit. */
#define R_ARM_REL32_NOI 56 /* PC relative 32-bit. */ #define R_ARM_REL32_NOI 56 /* PC relative 32-bit. */
@@ -3122,13 +3122,13 @@ enum {
#define R_ARM_LDR_PC_G1 62 /* PC relative (LDR,STR,LDRB,STRB). */ #define R_ARM_LDR_PC_G1 62 /* PC relative (LDR,STR,LDRB,STRB). */
#define R_ARM_LDR_PC_G2 63 /* PC relative (LDR,STR,LDRB,STRB). */ #define R_ARM_LDR_PC_G2 63 /* PC relative (LDR,STR,LDRB,STRB). */
#define R_ARM_LDRS_PC_G0 \ #define R_ARM_LDRS_PC_G0 \
64 /* PC relative (STR{D,H}, 64 /* PC relative (STR{D,H}, \
LDR{D,SB,H,SH}). */ LDR{D,SB,H,SH}). */
#define R_ARM_LDRS_PC_G1 \ #define R_ARM_LDRS_PC_G1 \
65 /* PC relative (STR{D,H}, 65 /* PC relative (STR{D,H}, \
LDR{D,SB,H,SH}). */ LDR{D,SB,H,SH}). */
#define R_ARM_LDRS_PC_G2 \ #define R_ARM_LDRS_PC_G2 \
66 /* PC relative (STR{D,H}, 66 /* PC relative (STR{D,H}, \
LDR{D,SB,H,SH}). */ LDR{D,SB,H,SH}). */
#define R_ARM_LDC_PC_G0 67 /* PC relative (LDC, STC). */ #define R_ARM_LDC_PC_G0 67 /* PC relative (LDC, STC). */
#define R_ARM_LDC_PC_G1 68 /* PC relative (LDC, STC). */ #define R_ARM_LDC_PC_G1 68 /* PC relative (LDC, STC). */
@@ -3139,43 +3139,43 @@ enum {
#define R_ARM_ALU_SB_G1 73 /* Program base relative (ADD,SUB). */ #define R_ARM_ALU_SB_G1 73 /* Program base relative (ADD,SUB). */
#define R_ARM_ALU_SB_G2 74 /* Program base relative (ADD,SUB). */ #define R_ARM_ALU_SB_G2 74 /* Program base relative (ADD,SUB). */
#define R_ARM_LDR_SB_G0 \ #define R_ARM_LDR_SB_G0 \
75 /* Program base relative (LDR, 75 /* Program base relative (LDR, \
STR, LDRB, STRB). */ STR, LDRB, STRB). */
#define R_ARM_LDR_SB_G1 \ #define R_ARM_LDR_SB_G1 \
76 /* Program base relative 76 /* Program base relative \
(LDR, STR, LDRB, STRB). */ (LDR, STR, LDRB, STRB). */
#define R_ARM_LDR_SB_G2 \ #define R_ARM_LDR_SB_G2 \
77 /* Program base relative 77 /* Program base relative \
(LDR, STR, LDRB, STRB). */ (LDR, STR, LDRB, STRB). */
#define R_ARM_LDRS_SB_G0 \ #define R_ARM_LDRS_SB_G0 \
78 /* Program base relative 78 /* Program base relative \
(LDR, STR, LDRB, STRB). */ (LDR, STR, LDRB, STRB). */
#define R_ARM_LDRS_SB_G1 \ #define R_ARM_LDRS_SB_G1 \
79 /* Program base relative 79 /* Program base relative \
(LDR, STR, LDRB, STRB). */ (LDR, STR, LDRB, STRB). */
#define R_ARM_LDRS_SB_G2 \ #define R_ARM_LDRS_SB_G2 \
80 /* Program base relative 80 /* Program base relative \
(LDR, STR, LDRB, STRB). */ (LDR, STR, LDRB, STRB). */
#define R_ARM_LDC_SB_G0 81 /* Program base relative (LDC,STC). */ #define R_ARM_LDC_SB_G0 81 /* Program base relative (LDC,STC). */
#define R_ARM_LDC_SB_G1 82 /* Program base relative (LDC,STC). */ #define R_ARM_LDC_SB_G1 82 /* Program base relative (LDC,STC). */
#define R_ARM_LDC_SB_G2 83 /* Program base relative (LDC,STC). */ #define R_ARM_LDC_SB_G2 83 /* Program base relative (LDC,STC). */
#define R_ARM_MOVW_BREL_NC \ #define R_ARM_MOVW_BREL_NC \
84 /* Program base relative 16 84 /* Program base relative 16 \
bit (MOVW). */ bit (MOVW). */
#define R_ARM_MOVT_BREL \ #define R_ARM_MOVT_BREL \
85 /* Program base relative high 85 /* Program base relative high \
16 bit (MOVT). */ 16 bit (MOVT). */
#define R_ARM_MOVW_BREL \ #define R_ARM_MOVW_BREL \
86 /* Program base relative 16 86 /* Program base relative 16 \
bit (MOVW). */ bit (MOVW). */
#define R_ARM_THM_MOVW_BREL_NC \ #define R_ARM_THM_MOVW_BREL_NC \
87 /* Program base relative 16 87 /* Program base relative 16 \
bit (Thumb32 MOVW). */ bit (Thumb32 MOVW). */
#define R_ARM_THM_MOVT_BREL \ #define R_ARM_THM_MOVT_BREL \
88 /* Program base relative high 88 /* Program base relative high \
16 bit (Thumb32 MOVT). */ 16 bit (Thumb32 MOVT). */
#define R_ARM_THM_MOVW_BREL \ #define R_ARM_THM_MOVW_BREL \
89 /* Program base relative 16 89 /* Program base relative 16 \
bit (Thumb32 MOVW). */ bit (Thumb32 MOVW). */
#define R_ARM_TLS_GOTDESC 90 #define R_ARM_TLS_GOTDESC 90
#define R_ARM_TLS_CALL 91 #define R_ARM_TLS_CALL 91
@@ -3185,48 +3185,48 @@ enum {
#define R_ARM_GOT_ABS 95 /* GOT entry. */ #define R_ARM_GOT_ABS 95 /* GOT entry. */
#define R_ARM_GOT_PREL 96 /* PC relative GOT entry. */ #define R_ARM_GOT_PREL 96 /* PC relative GOT entry. */
#define R_ARM_GOT_BREL12 \ #define R_ARM_GOT_BREL12 \
97 /* GOT entry relative to GOT 97 /* GOT entry relative to GOT \
origin (LDR). */ origin (LDR). */
#define R_ARM_GOTOFF12 \ #define R_ARM_GOTOFF12 \
98 /* 12 bit, GOT entry relative 98 /* 12 bit, GOT entry relative \
to GOT origin (LDR, STR). */ to GOT origin (LDR, STR). */
#define R_ARM_GOTRELAX 99 #define R_ARM_GOTRELAX 99
#define R_ARM_GNU_VTENTRY 100 #define R_ARM_GNU_VTENTRY 100
#define R_ARM_GNU_VTINHERIT 101 #define R_ARM_GNU_VTINHERIT 101
#define R_ARM_THM_PC11 102 /* PC relative & 0xFFE (Thumb16 B). */ #define R_ARM_THM_PC11 102 /* PC relative & 0xFFE (Thumb16 B). */
#define R_ARM_THM_PC9 \ #define R_ARM_THM_PC9 \
103 /* PC relative & 0x1FE 103 /* PC relative & 0x1FE \
(Thumb16 B/B<cond>). */ (Thumb16 B/B<cond>). */
#define R_ARM_TLS_GD32 \ #define R_ARM_TLS_GD32 \
104 /* PC-rel 32 bit for global dynamic 104 /* PC-rel 32 bit for global dynamic \
thread local data */ thread local data */
#define R_ARM_TLS_LDM32 \ #define R_ARM_TLS_LDM32 \
105 /* PC-rel 32 bit for local dynamic 105 /* PC-rel 32 bit for local dynamic \
thread local data */ thread local data */
#define R_ARM_TLS_LDO32 \ #define R_ARM_TLS_LDO32 \
106 /* 32 bit offset relative to TLS 106 /* 32 bit offset relative to TLS \
block */ block */
#define R_ARM_TLS_IE32 \ #define R_ARM_TLS_IE32 \
107 /* PC-rel 32 bit for GOT entry of 107 /* PC-rel 32 bit for GOT entry of \
static TLS block offset */ static TLS block offset */
#define R_ARM_TLS_LE32 \ #define R_ARM_TLS_LE32 \
108 /* 32 bit offset relative to static 108 /* 32 bit offset relative to static \
TLS block */ TLS block */
#define R_ARM_TLS_LDO12 \ #define R_ARM_TLS_LDO12 \
109 /* 12 bit relative to TLS 109 /* 12 bit relative to TLS \
block (LDR, STR). */ block (LDR, STR). */
#define R_ARM_TLS_LE12 \ #define R_ARM_TLS_LE12 \
110 /* 12 bit relative to static 110 /* 12 bit relative to static \
TLS block (LDR, STR). */ TLS block (LDR, STR). */
#define R_ARM_TLS_IE12GP \ #define R_ARM_TLS_IE12GP \
111 /* 12 bit GOT entry relative 111 /* 12 bit GOT entry relative \
to GOT origin (LDR). */ to GOT origin (LDR). */
#define R_ARM_ME_TOO 128 /* Obsolete. */ #define R_ARM_ME_TOO 128 /* Obsolete. */
#define R_ARM_THM_TLS_DESCSEQ 129 #define R_ARM_THM_TLS_DESCSEQ 129
#define R_ARM_THM_TLS_DESCSEQ16 129 #define R_ARM_THM_TLS_DESCSEQ16 129
#define R_ARM_THM_TLS_DESCSEQ32 130 #define R_ARM_THM_TLS_DESCSEQ32 130
#define R_ARM_THM_GOT_BREL12 \ #define R_ARM_THM_GOT_BREL12 \
131 /* GOT entry relative to GOT 131 /* GOT entry relative to GOT \
origin, 12 bit (Thumb32 LDR). */ origin, 12 bit (Thumb32 LDR). */
#define R_ARM_IRELATIVE 160 #define R_ARM_IRELATIVE 160
#define R_ARM_RXPC25 249 #define R_ARM_RXPC25 249
@@ -3540,63 +3540,63 @@ enum {
#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ #define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ #define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
#define R_390_TLS_GDCALL \ #define R_390_TLS_GDCALL \
38 /* Tag for function call in general 38 /* Tag for function call in general \
dynamic TLS code. */ dynamic TLS code. */
#define R_390_TLS_LDCALL \ #define R_390_TLS_LDCALL \
39 /* Tag for function call in local 39 /* Tag for function call in local \
dynamic TLS code. */ dynamic TLS code. */
#define R_390_TLS_GD32 \ #define R_390_TLS_GD32 \
40 /* Direct 32 bit for general dynamic 40 /* Direct 32 bit for general dynamic \
thread local data. */ thread local data. */
#define R_390_TLS_GD64 \ #define R_390_TLS_GD64 \
41 /* Direct 64 bit for general dynamic 41 /* Direct 64 bit for general dynamic \
thread local data. */ thread local data. */
#define R_390_TLS_GOTIE12 \ #define R_390_TLS_GOTIE12 \
42 /* 12 bit GOT offset for static TLS 42 /* 12 bit GOT offset for static TLS \
block offset. */ block offset. */
#define R_390_TLS_GOTIE32 \ #define R_390_TLS_GOTIE32 \
43 /* 32 bit GOT offset for static TLS 43 /* 32 bit GOT offset for static TLS \
block offset. */ block offset. */
#define R_390_TLS_GOTIE64 \ #define R_390_TLS_GOTIE64 \
44 /* 64 bit GOT offset for static TLS 44 /* 64 bit GOT offset for static TLS \
block offset. */ block offset. */
#define R_390_TLS_LDM32 \ #define R_390_TLS_LDM32 \
45 /* Direct 32 bit for local dynamic 45 /* Direct 32 bit for local dynamic \
thread local data in LE code. */ thread local data in LE code. */
#define R_390_TLS_LDM64 \ #define R_390_TLS_LDM64 \
46 /* Direct 64 bit for local dynamic 46 /* Direct 64 bit for local dynamic \
thread local data in LE code. */ thread local data in LE code. */
#define R_390_TLS_IE32 \ #define R_390_TLS_IE32 \
47 /* 32 bit address of GOT entry for 47 /* 32 bit address of GOT entry for \
negated static TLS block offset. */ negated static TLS block offset. */
#define R_390_TLS_IE64 \ #define R_390_TLS_IE64 \
48 /* 64 bit address of GOT entry for 48 /* 64 bit address of GOT entry for \
negated static TLS block offset. */ negated static TLS block offset. */
#define R_390_TLS_IEENT \ #define R_390_TLS_IEENT \
49 /* 32 bit rel. offset to GOT entry for 49 /* 32 bit rel. offset to GOT entry for \
negated static TLS block offset. */ negated static TLS block offset. */
#define R_390_TLS_LE32 \ #define R_390_TLS_LE32 \
50 /* 32 bit negated offset relative to 50 /* 32 bit negated offset relative to \
static TLS block. */ static TLS block. */
#define R_390_TLS_LE64 \ #define R_390_TLS_LE64 \
51 /* 64 bit negated offset relative to 51 /* 64 bit negated offset relative to \
static TLS block. */ static TLS block. */
#define R_390_TLS_LDO32 \ #define R_390_TLS_LDO32 \
52 /* 32 bit offset relative to TLS 52 /* 32 bit offset relative to TLS \
block. */ block. */
#define R_390_TLS_LDO64 \ #define R_390_TLS_LDO64 \
53 /* 64 bit offset relative to TLS 53 /* 64 bit offset relative to TLS \
block. */ block. */
#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ #define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ #define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
#define R_390_TLS_TPOFF \ #define R_390_TLS_TPOFF \
56 /* Negated offset in static TLS 56 /* Negated offset in static TLS \
block. */ block. */
#define R_390_20 57 /* Direct 20 bit. */ #define R_390_20 57 /* Direct 20 bit. */
#define R_390_GOT20 58 /* 20 bit GOT offset. */ #define R_390_GOT20 58 /* 20 bit GOT offset. */
#define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */ #define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */
#define R_390_TLS_GOTIE20 \ #define R_390_TLS_GOTIE20 \
60 /* 20 bit GOT offset for static TLS 60 /* 20 bit GOT offset for static TLS \
block offset. */ block offset. */
#define R_390_IRELATIVE 61 /* STT_GNU_IFUNC relocation. */ #define R_390_IRELATIVE 61 /* STT_GNU_IFUNC relocation. */
/* Keep this the last entry. */ /* Keep this the last entry. */
@@ -3637,7 +3637,7 @@ enum {
#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
#define R_X86_64_RELATIVE 8 /* Adjust by program base */ #define R_X86_64_RELATIVE 8 /* Adjust by program base */
#define R_X86_64_GOTPCREL \ #define R_X86_64_GOTPCREL \
9 /* 32 bit signed PC relative 9 /* 32 bit signed PC relative \
offset to GOT */ offset to GOT */
#define R_X86_64_32 10 /* Direct 32 bit zero extended */ #define R_X86_64_32 10 /* Direct 32 bit zero extended */
#define R_X86_64_32S 11 /* Direct 32 bit sign extended */ #define R_X86_64_32S 11 /* Direct 32 bit sign extended */
@@ -3649,35 +3649,35 @@ enum {
#define R_X86_64_DTPOFF64 17 /* Offset in module's TLS block */ #define R_X86_64_DTPOFF64 17 /* Offset in module's TLS block */
#define R_X86_64_TPOFF64 18 /* Offset in initial TLS block */ #define R_X86_64_TPOFF64 18 /* Offset in initial TLS block */
#define R_X86_64_TLSGD \ #define R_X86_64_TLSGD \
19 /* 32 bit signed PC relative offset 19 /* 32 bit signed PC relative offset \
to two GOT entries for GD symbol */ to two GOT entries for GD symbol */
#define R_X86_64_TLSLD \ #define R_X86_64_TLSLD \
20 /* 32 bit signed PC relative offset 20 /* 32 bit signed PC relative offset \
to two GOT entries for LD symbol */ to two GOT entries for LD symbol */
#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */ #define R_X86_64_DTPOFF32 21 /* Offset in TLS block */
#define R_X86_64_GOTTPOFF \ #define R_X86_64_GOTTPOFF \
22 /* 32 bit signed PC relative offset 22 /* 32 bit signed PC relative offset \
to GOT entry for IE symbol */ to GOT entry for IE symbol */
#define R_X86_64_TPOFF32 23 /* Offset in initial TLS block */ #define R_X86_64_TPOFF32 23 /* Offset in initial TLS block */
#define R_X86_64_PC64 24 /* PC relative 64 bit */ #define R_X86_64_PC64 24 /* PC relative 64 bit */
#define R_X86_64_GOTOFF64 25 /* 64 bit offset to GOT */ #define R_X86_64_GOTOFF64 25 /* 64 bit offset to GOT */
#define R_X86_64_GOTPC32 \ #define R_X86_64_GOTPC32 \
26 /* 32 bit signed pc relative 26 /* 32 bit signed pc relative \
offset to GOT */ offset to GOT */
#define R_X86_64_GOT64 27 /* 64-bit GOT entry offset */ #define R_X86_64_GOT64 27 /* 64-bit GOT entry offset */
#define R_X86_64_GOTPCREL64 \ #define R_X86_64_GOTPCREL64 \
28 /* 64-bit PC relative offset 28 /* 64-bit PC relative offset \
to GOT entry */ to GOT entry */
#define R_X86_64_GOTPC64 29 /* 64-bit PC relative offset to GOT */ #define R_X86_64_GOTPC64 29 /* 64-bit PC relative offset to GOT */
#define R_X86_64_GOTPLT64 30 /* like GOT64, says PLT entry needed */ #define R_X86_64_GOTPLT64 30 /* like GOT64, says PLT entry needed */
#define R_X86_64_PLTOFF64 \ #define R_X86_64_PLTOFF64 \
31 /* 64-bit GOT relative offset 31 /* 64-bit GOT relative offset \
to PLT entry */ to PLT entry */
#define R_X86_64_SIZE32 32 /* Size of symbol plus 32-bit addend */ #define R_X86_64_SIZE32 32 /* Size of symbol plus 32-bit addend */
#define R_X86_64_SIZE64 33 /* Size of symbol plus 64-bit addend */ #define R_X86_64_SIZE64 33 /* Size of symbol plus 64-bit addend */
#define R_X86_64_GOTPC32_TLSDESC 34 /* GOT offset for TLS descriptor. */ #define R_X86_64_GOTPC32_TLSDESC 34 /* GOT offset for TLS descriptor. */
#define R_X86_64_TLSDESC_CALL \ #define R_X86_64_TLSDESC_CALL \
35 /* Marker for call through TLS 35 /* Marker for call through TLS \
descriptor. */ descriptor. */
#define R_X86_64_TLSDESC 36 /* TLS descriptor. */ #define R_X86_64_TLSDESC 36 /* TLS descriptor. */
#define R_X86_64_IRELATIVE 37 /* Adjust indirectly by program base */ #define R_X86_64_IRELATIVE 37 /* Adjust indirectly by program base */
@@ -3685,12 +3685,12 @@ enum {
/* 39 Reserved was R_X86_64_PC32_BND */ /* 39 Reserved was R_X86_64_PC32_BND */
/* 40 Reserved was R_X86_64_PLT32_BND */ /* 40 Reserved was R_X86_64_PLT32_BND */
#define R_X86_64_GOTPCRELX \ #define R_X86_64_GOTPCRELX \
41 /* Load from 32 bit signed pc relative 41 /* Load from 32 bit signed pc relative \
offset to GOT entry without REX offset to GOT entry without REX \
prefix, relaxable. */ prefix, relaxable. */
#define R_X86_64_REX_GOTPCRELX \ #define R_X86_64_REX_GOTPCRELX \
42 /* Load from 32 bit signed pc relative 42 /* Load from 32 bit signed pc relative \
offset to GOT entry with REX prefix, offset to GOT entry with REX prefix, \
relaxable. */ relaxable. */
#define R_X86_64_NUM 43 #define R_X86_64_NUM 43
@@ -3732,22 +3732,22 @@ enum {
#define R_MN10300_TLS_LD 25 /* 32-bit offset for local dynamic. */ #define R_MN10300_TLS_LD 25 /* 32-bit offset for local dynamic. */
#define R_MN10300_TLS_LDO 26 /* Module-relative offset. */ #define R_MN10300_TLS_LDO 26 /* Module-relative offset. */
#define R_MN10300_TLS_GOTIE \ #define R_MN10300_TLS_GOTIE \
27 /* GOT offset for static TLS block 27 /* GOT offset for static TLS block \
offset. */ offset. */
#define R_MN10300_TLS_IE \ #define R_MN10300_TLS_IE \
28 /* GOT address for static TLS block 28 /* GOT address for static TLS block \
offset. */ offset. */
#define R_MN10300_TLS_LE \ #define R_MN10300_TLS_LE \
29 /* Offset relative to static TLS 29 /* Offset relative to static TLS \
block. */ block. */
#define R_MN10300_TLS_DTPMOD 30 /* ID of module containing symbol. */ #define R_MN10300_TLS_DTPMOD 30 /* ID of module containing symbol. */
#define R_MN10300_TLS_DTPOFF 31 /* Offset in module TLS block. */ #define R_MN10300_TLS_DTPOFF 31 /* Offset in module TLS block. */
#define R_MN10300_TLS_TPOFF 32 /* Offset in static TLS block. */ #define R_MN10300_TLS_TPOFF 32 /* Offset in static TLS block. */
#define R_MN10300_SYM_DIFF \ #define R_MN10300_SYM_DIFF \
33 /* Adjustment for next reloc as needed 33 /* Adjustment for next reloc as needed \
by linker relaxation. */ by linker relaxation. */
#define R_MN10300_ALIGN \ #define R_MN10300_ALIGN \
34 /* Alignment requirement for linker 34 /* Alignment requirement for linker \
relaxation. */ relaxation. */
#define R_MN10300_NUM 35 #define R_MN10300_NUM 35
@@ -3789,26 +3789,26 @@ enum {
#define R_M32R_GOTOFF 54 /* 24 bit offset to GOT */ #define R_M32R_GOTOFF 54 /* 24 bit offset to GOT */
#define R_M32R_GOTPC24 55 /* 24 bit PC relative offset to GOT */ #define R_M32R_GOTPC24 55 /* 24 bit PC relative offset to GOT */
#define R_M32R_GOT16_HI_ULO \ #define R_M32R_GOT16_HI_ULO \
56 /* High 16 bit GOT entry with unsigned 56 /* High 16 bit GOT entry with unsigned \
low */ low */
#define R_M32R_GOT16_HI_SLO \ #define R_M32R_GOT16_HI_SLO \
57 /* High 16 bit GOT entry with signed 57 /* High 16 bit GOT entry with signed \
low */ low */
#define R_M32R_GOT16_LO 58 /* Low 16 bit GOT entry */ #define R_M32R_GOT16_LO 58 /* Low 16 bit GOT entry */
#define R_M32R_GOTPC_HI_ULO \ #define R_M32R_GOTPC_HI_ULO \
59 /* High 16 bit PC relative offset to 59 /* High 16 bit PC relative offset to \
GOT with unsigned low */ GOT with unsigned low */
#define R_M32R_GOTPC_HI_SLO \ #define R_M32R_GOTPC_HI_SLO \
60 /* High 16 bit PC relative offset to 60 /* High 16 bit PC relative offset to \
GOT with signed low */ GOT with signed low */
#define R_M32R_GOTPC_LO \ #define R_M32R_GOTPC_LO \
61 /* Low 16 bit PC relative offset to 61 /* Low 16 bit PC relative offset to \
GOT */ GOT */
#define R_M32R_GOTOFF_HI_ULO \ #define R_M32R_GOTOFF_HI_ULO \
62 /* High 16 bit offset to GOT 62 /* High 16 bit offset to GOT \
with unsigned low */ with unsigned low */
#define R_M32R_GOTOFF_HI_SLO \ #define R_M32R_GOTOFF_HI_SLO \
63 /* High 16 bit offset to GOT 63 /* High 16 bit offset to GOT \
with signed low */ with signed low */
#define R_M32R_GOTOFF_LO 64 /* Low 16 bit offset to GOT */ #define R_M32R_GOTOFF_LO 64 /* Low 16 bit offset to GOT */
#define R_M32R_NUM 256 /* Keep this the last entry. */ #define R_M32R_NUM 256 /* Keep this the last entry. */
@@ -3871,7 +3871,7 @@ enum {
#define R_NIOS2_CJMP 19 /* Conditional branch. */ #define R_NIOS2_CJMP 19 /* Conditional branch. */
#define R_NIOS2_CALLR 20 /* Indirect call through register. */ #define R_NIOS2_CALLR 20 /* Indirect call through register. */
#define R_NIOS2_ALIGN \ #define R_NIOS2_ALIGN \
21 /* Alignment requirement for 21 /* Alignment requirement for \
linker relaxation. */ linker relaxation. */
#define R_NIOS2_GOT16 22 /* 16 bit GOT entry. */ #define R_NIOS2_GOT16 22 /* 16 bit GOT entry. */
#define R_NIOS2_CALL16 23 /* 16 bit GOT entry for function. */ #define R_NIOS2_CALL16 23 /* 16 bit GOT entry for function. */
@@ -4195,7 +4195,7 @@ enum {
/* RISC-V specific values for the st_other field. */ /* RISC-V specific values for the st_other field. */
#define STO_RISCV_VARIANT_CC \ #define STO_RISCV_VARIANT_CC \
0x80 /* Function uses variant calling 0x80 /* Function uses variant calling \
convention */ convention */
/* RISC-V specific values for the sh_type field. */ /* RISC-V specific values for the sh_type field. */

View File

@@ -1,269 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<doxygenlayout version="2.0">
<!-- Generated by doxygen 1.15.0 -->
<!-- Navigation index tabs for HTML output -->
<navindex>
<tab type="mainpage" visible="yes" title=""/>
<tab type="pages" visible="yes" title="" intro=""/>
<tab type="topics" visible="yes" title="" intro=""/>
<tab type="modules" visible="yes" title="" intro="">
<tab type="modulelist" visible="yes" title="" intro=""/>
<tab type="modulemembers" visible="yes" title="" intro=""/>
</tab>
<tab type="namespaces" visible="yes" title="">
<tab type="namespacelist" visible="yes" title="" intro=""/>
<tab type="namespacemembers" visible="yes" title="" intro=""/>
</tab>
<tab type="concepts" visible="yes" title="">
</tab>
<tab type="interfaces" visible="yes" title="">
<tab type="interfacelist" visible="yes" title="" intro=""/>
<tab type="interfaceindex" visible="$ALPHABETICAL_INDEX" title=""/>
<tab type="interfacehierarchy" visible="yes" title="" intro=""/>
</tab>
<tab type="classes" visible="yes" title="">
<tab type="classlist" visible="yes" title="" intro=""/>
<tab type="classindex" visible="$ALPHABETICAL_INDEX" title=""/>
<tab type="hierarchy" visible="yes" title="" intro=""/>
<tab type="classmembers" visible="yes" title="" intro=""/>
</tab>
<tab type="structs" visible="yes" title="">
<tab type="structlist" visible="yes" title="" intro=""/>
<tab type="structindex" visible="$ALPHABETICAL_INDEX" title=""/>
</tab>
<tab type="exceptions" visible="yes" title="">
<tab type="exceptionlist" visible="yes" title="" intro=""/>
<tab type="exceptionindex" visible="$ALPHABETICAL_INDEX" title=""/>
<tab type="exceptionhierarchy" visible="yes" title="" intro=""/>
</tab>
<tab type="files" visible="yes" title="">
<tab type="filelist" visible="yes" title="" intro=""/>
<tab type="globals" visible="yes" title="" intro=""/>
</tab>
<tab type="examples" visible="yes" title="" intro=""/>
</navindex>
<!-- Layout definition for a class page -->
<class>
<briefdescription visible="yes"/>
<includes visible="$SHOW_HEADERFILE"/>
<inheritancegraph visible="yes"/>
<collaborationgraph visible="yes"/>
<memberdecl>
<nestedclasses visible="yes" title=""/>
<publictypes visible="yes" title=""/>
<services visible="yes" title=""/>
<interfaces visible="yes" title=""/>
<publicslots visible="yes" title=""/>
<signals visible="yes" title=""/>
<publicmethods visible="yes" title=""/>
<publicstaticmethods visible="yes" title=""/>
<publicattributes visible="yes" title=""/>
<publicstaticattributes visible="yes" title=""/>
<protectedtypes visible="yes" title=""/>
<protectedslots visible="yes" title=""/>
<protectedmethods visible="yes" title=""/>
<protectedstaticmethods visible="yes" title=""/>
<protectedattributes visible="yes" title=""/>
<protectedstaticattributes visible="yes" title=""/>
<packagetypes visible="yes" title=""/>
<packagemethods visible="yes" title=""/>
<packagestaticmethods visible="yes" title=""/>
<packageattributes visible="yes" title=""/>
<packagestaticattributes visible="yes" title=""/>
<properties visible="yes" title=""/>
<events visible="yes" title=""/>
<privatetypes visible="yes" title=""/>
<privateslots visible="yes" title=""/>
<privatemethods visible="yes" title=""/>
<privatestaticmethods visible="yes" title=""/>
<privateattributes visible="yes" title=""/>
<privatestaticattributes visible="yes" title=""/>
<friends visible="yes" title=""/>
<related visible="yes" title="" subtitle=""/>
<membergroups visible="yes"/>
</memberdecl>
<detaileddescription visible="yes" title=""/>
<memberdef>
<inlineclasses visible="yes" title=""/>
<typedefs visible="yes" title=""/>
<enums visible="yes" title=""/>
<services visible="yes" title=""/>
<interfaces visible="yes" title=""/>
<constructors visible="yes" title=""/>
<functions visible="yes" title=""/>
<related visible="yes" title=""/>
<variables visible="yes" title=""/>
<properties visible="yes" title=""/>
<events visible="yes" title=""/>
</memberdef>
<allmemberslink visible="yes"/>
<usedfiles visible="$SHOW_USED_FILES"/>
<authorsection visible="yes"/>
</class>
<!-- Layout definition for a namespace page -->
<namespace>
<briefdescription visible="yes"/>
<memberdecl>
<nestednamespaces visible="yes" title=""/>
<constantgroups visible="yes" title=""/>
<interfaces visible="yes" title=""/>
<classes visible="yes" title=""/>
<concepts visible="yes" title=""/>
<structs visible="yes" title=""/>
<exceptions visible="yes" title=""/>
<typedefs visible="yes" title=""/>
<sequences visible="yes" title=""/>
<dictionaries visible="yes" title=""/>
<enums visible="yes" title=""/>
<functions visible="yes" title=""/>
<variables visible="yes" title=""/>
<properties visible="yes" title=""/>
<membergroups visible="yes"/>
</memberdecl>
<detaileddescription visible="yes" title=""/>
<memberdef>
<inlineclasses visible="yes" title=""/>
<typedefs visible="yes" title=""/>
<sequences visible="yes" title=""/>
<dictionaries visible="yes" title=""/>
<enums visible="yes" title=""/>
<functions visible="yes" title=""/>
<variables visible="yes" title=""/>
<properties visible="yes" title=""/>
</memberdef>
<authorsection visible="yes"/>
</namespace>
<!-- Layout definition for a concept page -->
<concept>
<briefdescription visible="yes"/>
<includes visible="$SHOW_HEADERFILE"/>
<definition visible="yes" title=""/>
<detaileddescription visible="yes" title=""/>
<authorsection visible="yes"/>
</concept>
<!-- Layout definition for a file page -->
<file>
<briefdescription visible="yes"/>
<includes visible="$SHOW_INCLUDE_FILES"/>
<includegraph visible="yes"/>
<includedbygraph visible="yes"/>
<sourcelink visible="yes"/>
<memberdecl>
<interfaces visible="yes" title=""/>
<classes visible="yes" title=""/>
<structs visible="yes" title=""/>
<exceptions visible="yes" title=""/>
<namespaces visible="yes" title=""/>
<concepts visible="yes" title=""/>
<constantgroups visible="yes" title=""/>
<defines visible="yes" title=""/>
<typedefs visible="yes" title=""/>
<sequences visible="yes" title=""/>
<dictionaries visible="yes" title=""/>
<enums visible="yes" title=""/>
<functions visible="yes" title=""/>
<variables visible="yes" title=""/>
<properties visible="yes" title=""/>
<membergroups visible="yes"/>
</memberdecl>
<detaileddescription visible="yes" title=""/>
<memberdef>
<inlineclasses visible="yes" title=""/>
<defines visible="yes" title=""/>
<typedefs visible="yes" title=""/>
<sequences visible="yes" title=""/>
<dictionaries visible="yes" title=""/>
<enums visible="yes" title=""/>
<functions visible="yes" title=""/>
<variables visible="yes" title=""/>
<properties visible="yes" title=""/>
</memberdef>
<authorsection/>
</file>
<!-- Layout definition for a group page -->
<group>
<briefdescription visible="yes"/>
<groupgraph visible="yes"/>
<memberdecl>
<nestedgroups visible="yes" title=""/>
<modules visible="yes" title=""/>
<dirs visible="yes" title=""/>
<files visible="yes" title=""/>
<namespaces visible="yes" title=""/>
<concepts visible="yes" title=""/>
<classes visible="yes" title=""/>
<defines visible="yes" title=""/>
<typedefs visible="yes" title=""/>
<sequences visible="yes" title=""/>
<dictionaries visible="yes" title=""/>
<enums visible="yes" title=""/>
<enumvalues visible="yes" title=""/>
<functions visible="yes" title=""/>
<variables visible="yes" title=""/>
<signals visible="yes" title=""/>
<publicslots visible="yes" title=""/>
<protectedslots visible="yes" title=""/>
<privateslots visible="yes" title=""/>
<events visible="yes" title=""/>
<properties visible="yes" title=""/>
<friends visible="yes" title=""/>
<membergroups visible="yes"/>
</memberdecl>
<detaileddescription visible="yes" title=""/>
<memberdef>
<pagedocs/>
<inlineclasses visible="yes" title=""/>
<defines visible="yes" title=""/>
<typedefs visible="yes" title=""/>
<sequences visible="yes" title=""/>
<dictionaries visible="yes" title=""/>
<enums visible="yes" title=""/>
<enumvalues visible="yes" title=""/>
<functions visible="yes" title=""/>
<variables visible="yes" title=""/>
<signals visible="yes" title=""/>
<publicslots visible="yes" title=""/>
<protectedslots visible="yes" title=""/>
<privateslots visible="yes" title=""/>
<events visible="yes" title=""/>
<properties visible="yes" title=""/>
<friends visible="yes" title=""/>
</memberdef>
<authorsection visible="yes"/>
</group>
<!-- Layout definition for a C++20 module page -->
<module>
<briefdescription visible="yes"/>
<exportedmodules visible="yes"/>
<memberdecl>
<concepts visible="yes" title=""/>
<classes visible="yes" title=""/>
<enums visible="yes" title=""/>
<typedefs visible="yes" title=""/>
<functions visible="yes" title=""/>
<variables visible="yes" title=""/>
<membergroups visible="yes" title=""/>
</memberdecl>
<detaileddescription visible="yes" title=""/>
<memberdecl>
<files visible="yes"/>
</memberdecl>
</module>
<!-- Layout definition for a directory page -->
<directory>
<briefdescription visible="yes"/>
<directorygraph visible="yes"/>
<memberdecl>
<dirs visible="yes"/>
<files visible="yes"/>
</memberdecl>
<detaileddescription visible="yes" title=""/>
</directory>
</doxygenlayout>

View File

@@ -3,18 +3,20 @@
#include <libk/std.h> #include <libk/std.h>
#include <mm/liballoc.h> #include <mm/liballoc.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h>
#if defined(__x86_64__) #if defined(__x86_64__)
#include <amd64/apic.h> #include <amd64/apic.h>
#include <amd64/intr.h> #include <amd64/intr.h>
#endif #endif
/* TODO: figure out a generic way to work with IRQs */ struct irq* irq_table[0x100];
static struct irq* irqs = NULL; static spin_lock_t irqs_lock = SPIN_LOCK_INIT;
static spin_lock_t irqs_lock;
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
spin_lock_ctx_t ctxiqa;
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) {
struct irq* irq = malloc (sizeof (*irq)); struct irq* irq = malloc (sizeof (*irq));
if (irq == NULL) { if (irq == NULL) {
return false; return false;
@@ -23,44 +25,22 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
irq->func = func; irq->func = func;
irq->arg = arg; irq->arg = arg;
irq->irq_num = irq_num; irq->irq_num = irq_num;
irq->flags = flags;
spin_lock (&irqs_lock); spin_lock (&irqs_lock, &ctxiqa);
linklist_append (struct irq*, irqs, irq); irq_table[irq_num] = irq;
spin_unlock (&irqs_lock); spin_unlock (&irqs_lock, &ctxiqa);
#if defined(__x86_64__)
uint8_t resolution = amd64_resolve_irq (irq_num);
amd64_ioapic_route_irq (irq_num, resolution, 0, amd64_lapic_id ());
#endif
return true; return true;
} }
void irq_detach (void (*func) (void*, void*)) {
spin_lock (&irqs_lock);
struct irq *irq, *irq_tmp;
linklist_foreach (irqs, irq, irq_tmp) {
if ((uintptr_t)irq->func == (uintptr_t)func)
linklist_remove (struct irq*, irqs, irq);
}
spin_unlock (&irqs_lock);
}
struct irq* irq_find (uint32_t irq_num) { struct irq* irq_find (uint32_t irq_num) {
spin_lock (&irqs_lock); spin_lock_ctx_t ctxiqa;
spin_lock (&irqs_lock, &ctxiqa);
struct irq* irq = irq_table[irq_num];
spin_unlock (&irqs_lock, &ctxiqa);
struct irq *irq, *irq_tmp;
linklist_foreach (irqs, irq, irq_tmp) {
if (irq->irq_num == irq_num) {
spin_unlock (&irqs_lock);
return irq; return irq;
}
}
spin_unlock (&irqs_lock);
return NULL;
} }

View File

@@ -1,24 +1,20 @@
#ifndef _KERNEL_IRQ_IRQ_H #ifndef _KERNEL_IRQ_IRQ_H
#define _KERNEL_IRQ_IRQ_H #define _KERNEL_IRQ_IRQ_H
#include <libk/list.h>
#include <libk/std.h> #include <libk/std.h>
#define IRQ_INTERRUPT_SAFE (1 << 0)
#define IRQ_INTERRUPT_UNSAFE (1 << 1)
typedef void (*irq_func_t) (void* arg, void* regs); typedef void (*irq_func_t) (void* arg, void* regs);
struct irq { struct irq {
struct irq* next; struct list_node_link irqs_link;
irq_func_t func; irq_func_t func;
void* arg; void* arg;
uint32_t irq_num; uint32_t irq_num;
uint32_t flags;
}; };
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num, uint32_t flags); bool irq_attach (irq_func_t, void* arg, uint32_t irq_num);
void irq_detach (irq_func_t func);
struct irq* irq_find (uint32_t irq_num); struct irq* irq_find (uint32_t irq_num);
#endif // _KERNEL_IRQ_IRQ_H #endif // _KERNEL_IRQ_IRQ_H

15
kernel/libk/assert.h Normal file
View File

@@ -0,0 +1,15 @@
#ifndef _KERNEL_LIBK_ASSERT_H
#define _KERNEL_LIBK_ASSERT_H
#include <sys/spin.h>
#define assert(x) \
do { \
if (!(x)) { \
DEBUG ("%s ssertion failed\n", #x); \
spin (); \
__builtin_unreachable (); \
} \
} while (0)
#endif // _KERNEL_LIBK_ASSERT_H

View File

@@ -1,12 +1,19 @@
#ifndef _KERNEL_LIBK_LIST_H #ifndef _KERNEL_LIBK_LIST_H
#define _KERNEL_LIBK_LIST_H #define _KERNEL_LIBK_LIST_H
#define dlinklist_append(type, head, new) \ struct list_node_link {
struct list_node_link* next;
struct list_node_link* prev;
};
#define list_entry(ptr, type, member) ((type*)((char*)(ptr) - offsetof (type, member)))
#define list_append(head, new) \
do { \ do { \
if ((new) != NULL) { \ if ((new) != NULL) { \
(new)->next = NULL; \ (new)->next = NULL; \
if ((head) != NULL) { \ if ((head) != NULL) { \
type __tmp = (head); \ struct list_node_link* __tmp = (head); \
while (__tmp->next != NULL) { \ while (__tmp->next != NULL) { \
__tmp = __tmp->next; \ __tmp = __tmp->next; \
} \ } \
@@ -19,7 +26,7 @@
} \ } \
} while (0) } while (0)
#define dlinklist_prepend(head, new) \ #define list_prepend(head, new) \
do { \ do { \
if ((new) != NULL) { \ if ((new) != NULL) { \
(new)->prev = NULL; \ (new)->prev = NULL; \
@@ -31,7 +38,7 @@
} \ } \
} while (0) } while (0)
#define dlinklist_remove(head, ele) \ #define list_remove(head, ele) \
do { \ do { \
if ((ele) != NULL) { \ if ((ele) != NULL) { \
if ((ele)->prev != NULL) { \ if ((ele)->prev != NULL) { \
@@ -47,10 +54,10 @@
} \ } \
} while (0) } while (0)
#define dlinklist_find(type, head, out, propname, propvalue) \ #define list_find(head, out, propname, propvalue) \
do { \ do { \
(out) = NULL; \ (out) = NULL; \
type __tmp = (head); \ struct list_node_link* __tmp = (head); \
while (__tmp) { \ while (__tmp) { \
if (__tmp->propname == (propvalue)) { \ if (__tmp->propname == (propvalue)) { \
(out) = __tmp; \ (out) = __tmp; \
@@ -60,23 +67,23 @@
} \ } \
} while (0) } while (0)
#define dlinklist_foreach(head, var, tmp) \ #define list_foreach(head, var, tmp) \
for (var = (head), tmp = (var ? var->next : NULL); var != NULL; \ for (var = (head), tmp = (var ? var->next : NULL); var != NULL; \
var = tmp, tmp = (var ? var->next : NULL)) var = tmp, tmp = (var ? var->next : NULL))
#define dlinklist_foreach_index(head, var, tmp, idx) \ #define list_foreach_index(head, var, tmp, idx) \
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL; \ for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL; \
var = tmp, tmp = (var ? var->next : NULL), (idx)++) var = tmp, tmp = (var ? var->next : NULL), (idx)++)
#define dlinklist_foreach_index_limit(head, var, tmp, idx, max) \ #define list_foreach_index_limit(head, var, tmp, idx, max) \
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL && (idx) < (max); \ for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL && (idx) < (max); \
var = tmp, tmp = (var ? var->next : NULL), (idx)++) var = tmp, tmp = (var ? var->next : NULL), (idx)++)
#define dlinklist_back(type, head, out) \ #define list_back(head, out) \
do { \ do { \
(out) = NULL; \ (out) = NULL; \
if ((head) != NULL) { \ if ((head) != NULL) { \
type __tmp = (head); \ struct list_node_link* __tmp = (head); \
while (__tmp->next != NULL) { \ while (__tmp->next != NULL) { \
__tmp = __tmp->next; \ __tmp = __tmp->next; \
} \ } \
@@ -84,11 +91,11 @@
} \ } \
} while (0) } while (0)
#define dlinklist_front(type, head, out) \ #define list_front(head, out) \
do { \ do { \
(out) = NULL; \ (out) = NULL; \
if ((head) != NULL) { \ if ((head) != NULL) { \
type __tmp = (head); \ struct list_node_link* __tmp = (head); \
while (__tmp->prev != NULL) { \ while (__tmp->prev != NULL) { \
__tmp = __tmp->prev; \ __tmp = __tmp->prev; \
} \ } \
@@ -96,7 +103,7 @@
} \ } \
} while (0) } while (0)
#define dlinklist_insert_after(head, pos, new) \ #define list_insert_after(head, pos, new) \
do { \ do { \
if ((pos) != NULL && (new) != NULL) { \ if ((pos) != NULL && (new) != NULL) { \
(new)->prev = (pos); \ (new)->prev = (pos); \
@@ -112,7 +119,7 @@
} \ } \
} while (0) } while (0)
#define dlinklist_insert_before(head, pos, new) \ #define list_insert_before(head, pos, new) \
do { \ do { \
if ((pos) != NULL && (new) != NULL) { \ if ((pos) != NULL && (new) != NULL) { \
(new)->next = (pos); \ (new)->next = (pos); \
@@ -130,11 +137,11 @@
} \ } \
} while (0) } while (0)
#define dlinklist_index_of(type, head, ele, out_idx) \ #define list_index_of(head, ele, out_idx) \
do { \ do { \
(out_idx) = -1; \ (out_idx) = -1; \
int __idx = 0; \ int __idx = 0; \
type __tmp = (head); \ struct list_node_link* __tmp = (head); \
while (__tmp != NULL) { \ while (__tmp != NULL) { \
if (__tmp == (ele)) { \ if (__tmp == (ele)) { \
(out_idx) = __idx; \ (out_idx) = __idx; \
@@ -145,11 +152,11 @@
} \ } \
} while (0) } while (0)
#define dlinklist_index_of_prop(type, head, propname, propvalue, out_idx) \ #define list_index_of_prop(head, propname, propvalue, out_idx) \
do { \ do { \
(out_idx) = -1; \ (out_idx) = -1; \
int __idx = 0; \ int __idx = 0; \
type __tmp = (head); \ struct list_node_link* __tmp = (head); \
while (__tmp != NULL) { \ while (__tmp != NULL) { \
if (__tmp->propname == (propvalue)) { \ if (__tmp->propname == (propvalue)) { \
(out_idx) = __idx; \ (out_idx) = __idx; \
@@ -160,109 +167,4 @@
} \ } \
} while (0) } while (0)
#define linklist_index_of(type, head, ele, out_idx) \
do { \
(out_idx) = -1; \
int __idx = 0; \
type __tmp = (head); \
while (__tmp != NULL) { \
if (__tmp == (ele)) { \
(out_idx) = __idx; \
break; \
} \
__tmp = __tmp->next; \
__idx++; \
} \
} while (0)
#define linklist_index_of_prop(type, head, propname, propvalue, out_idx) \
do { \
(out_idx) = -1; \
int __idx = 0; \
type __tmp = (head); \
while (__tmp != NULL) { \
if (__tmp->propname == (propvalue)) { \
(out_idx) = __idx; \
break; \
} \
__tmp = __tmp->next; \
__idx++; \
} \
} while (0)
#define linklist_append(type, head, new) \
do { \
if ((new) != NULL) { \
if ((head) != NULL) { \
type __tmp; \
(new)->next = NULL; \
__tmp = (head); \
while (__tmp->next != NULL) { \
__tmp = __tmp->next; \
} \
__tmp->next = (new); \
} else { \
(new)->next = NULL; \
(head) = (new); \
} \
} \
} while (0)
#define linklist_remove(type, head, ele) \
do { \
if ((head) != NULL && (ele) != NULL) { \
type __cur = (head); \
type __prev = NULL; \
while (__cur != NULL && __cur != (ele)) { \
__prev = __cur; \
__cur = __cur->next; \
} \
if (__cur == (ele)) { \
if (__prev != NULL) { \
__prev->next = __cur->next; \
} else { \
(head) = __cur->next; \
} \
(ele)->next = NULL; \
} \
} \
} while (0)
#define linklist_find(type, head, out, propname, propvalue) \
do { \
(out) = NULL; \
type __tmp = (head); \
while (__tmp) { \
if (__tmp->propname == (propvalue)) { \
(out) = __tmp; \
break; \
} \
__tmp = __tmp->next; \
} \
} while (0)
#define linklist_foreach(head, var, tmp) \
for (var = (head), tmp = (var ? var->next : NULL); var != NULL; \
var = tmp, tmp = (var ? var->next : NULL))
#define linklist_foreach_index(head, var, tmp, idx) \
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL; \
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
#define linklist_foreach_index_limit(head, var, tmp, idx, max) \
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL && (idx) < (max); \
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
#define linklist_back(type, head, out) \
do { \
(out) = NULL; \
if ((head) != NULL) { \
type __tmp = (head); \
while (__tmp->next != NULL) { \
__tmp = __tmp->next; \
} \
(out) = __tmp; \
} \
} while (0)
#endif // _KERNEL_LIBK_LIST_H #endif // _KERNEL_LIBK_LIST_H

View File

@@ -16,42 +16,44 @@ struct rb_node_link {
#define rbtree_right(x) ((x)->right) #define rbtree_right(x) ((x)->right)
#define rbtree_color(x) ((x)->color) #define rbtree_color(x) ((x)->color)
#define rbtree_container_of(ptr, type, member) ((type*)((char*)(ptr) - offsetof (type, member))) #define rbtree_entry(node, type, member) ((type*)((char*)(node) - offsetof (type, member)))
#define rbtree_entry(node, type, member) rbtree_container_of (node, type, member) #define rbtree_node_color(x) ((x) ? (x)->color : RBTREE_BLACK)
#define rbtree_rotate_left(root_ptr, x) \ #define rbtree_rotate_left(root_ptr, x_node) \
do { \ do { \
struct rb_node_link* __y = (x)->right; \ struct rb_node_link* __x = (x_node); \
(x)->right = __y->left; \ struct rb_node_link* __y = __x->right; \
__x->right = __y->left; \
if (__y->left) \ if (__y->left) \
__y->left->parent = (x); \ __y->left->parent = __x; \
__y->parent = (x)->parent; \ __y->parent = __x->parent; \
if (!(x)->parent) \ if (!__x->parent) \
*(root_ptr) = __y; \ *(root_ptr) = __y; \
else if ((x) == (x)->parent->left) \ else if (__x == __x->parent->left) \
(x)->parent->left = __y; \ __x->parent->left = __y; \
else \ else \
(x)->parent->right = __y; \ __x->parent->right = __y; \
__y->left = (x); \ __y->left = __x; \
(x)->parent = __y; \ __x->parent = __y; \
} while (0) } while (0)
#define rbtree_rotate_right(root_ptr, y) \ #define rbtree_rotate_right(root_ptr, y_node) \
do { \ do { \
struct rb_node_link* __x = (y)->left; \ struct rb_node_link* __y = (y_node); \
(y)->left = __x->right; \ struct rb_node_link* __x = __y->left; \
__y->left = __x->right; \
if (__x->right) \ if (__x->right) \
__x->right->parent = (y); \ __x->right->parent = __y; \
__x->parent = (y)->parent; \ __x->parent = __y->parent; \
if (!(y)->parent) \ if (!__y->parent) \
*(root_ptr) = __x; \ *(root_ptr) = __x; \
else if ((y) == (y)->parent->right) \ else if (__y == __y->parent->right) \
(y)->parent->right = __x; \ __y->parent->right = __x; \
else \ else \
(y)->parent->left = __x; \ __y->parent->left = __x; \
__x->right = (y); \ __x->right = __y; \
(y)->parent = __x; \ __y->parent = __x; \
} while (0) } while (0)
#define rbtree_insert_fixup(root_ptr, z_node) \ #define rbtree_insert_fixup(root_ptr, z_node) \
@@ -60,7 +62,7 @@ struct rb_node_link {
while (__z->parent && __z->parent->color == RBTREE_RED) { \ while (__z->parent && __z->parent->color == RBTREE_RED) { \
if (__z->parent == __z->parent->parent->left) { \ if (__z->parent == __z->parent->parent->left) { \
struct rb_node_link* __y = __z->parent->parent->right; \ struct rb_node_link* __y = __z->parent->parent->right; \
if (__y && __y->color == RBTREE_RED) { \ if (rbtree_node_color (__y) == RBTREE_RED) { \
__z->parent->color = RBTREE_BLACK; \ __z->parent->color = RBTREE_BLACK; \
__y->color = RBTREE_BLACK; \ __y->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \ __z->parent->parent->color = RBTREE_RED; \
@@ -68,15 +70,15 @@ struct rb_node_link {
} else { \ } else { \
if (__z == __z->parent->right) { \ if (__z == __z->parent->right) { \
__z = __z->parent; \ __z = __z->parent; \
rbtree_rotate_left ((root_ptr), __z); \ rbtree_rotate_left (root_ptr, __z); \
} \ } \
__z->parent->color = RBTREE_BLACK; \ __z->parent->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \ __z->parent->parent->color = RBTREE_RED; \
rbtree_rotate_right ((root_ptr), __z->parent->parent); \ rbtree_rotate_right (root_ptr, __z->parent->parent); \
} \ } \
} else { \ } else { \
struct rb_node_link* __y = __z->parent->parent->left; \ struct rb_node_link* __y = __z->parent->parent->left; \
if (__y && __y->color == RBTREE_RED) { \ if (rbtree_node_color (__y) == RBTREE_RED) { \
__z->parent->color = RBTREE_BLACK; \ __z->parent->color = RBTREE_BLACK; \
__y->color = RBTREE_BLACK; \ __y->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \ __z->parent->parent->color = RBTREE_RED; \
@@ -84,11 +86,11 @@ struct rb_node_link {
} else { \ } else { \
if (__z == __z->parent->left) { \ if (__z == __z->parent->left) { \
__z = __z->parent; \ __z = __z->parent; \
rbtree_rotate_right ((root_ptr), __z); \ rbtree_rotate_right (root_ptr, __z); \
} \ } \
__z->parent->color = RBTREE_BLACK; \ __z->parent->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \ __z->parent->parent->color = RBTREE_RED; \
rbtree_rotate_left ((root_ptr), __z->parent->parent); \ rbtree_rotate_left (root_ptr, __z->parent->parent); \
} \ } \
} \ } \
} \ } \
@@ -110,8 +112,7 @@ struct rb_node_link {
__link = &((*__link)->right); \ __link = &((*__link)->right); \
} \ } \
__new->parent = __parent; \ __new->parent = __parent; \
__new->left = NULL; \ __new->left = __new->right = NULL; \
__new->right = NULL; \
__new->color = RBTREE_RED; \ __new->color = RBTREE_RED; \
*__link = __new; \ *__link = __new; \
rbtree_insert_fixup (root_ptr, __new); \ rbtree_insert_fixup (root_ptr, __new); \
@@ -124,7 +125,7 @@ struct rb_node_link {
while (__cur) { \ while (__cur) { \
type* __obj = rbtree_entry (__cur, type, member); \ type* __obj = rbtree_entry (__cur, type, member); \
if ((keyval) == __obj->keyfield) { \ if ((keyval) == __obj->keyfield) { \
(out) = __cur; \ (out) = rbtree_entry (__cur, type, member); \
break; \ break; \
} else if ((keyval) < __obj->keyfield) \ } else if ((keyval) < __obj->keyfield) \
__cur = __cur->left; \ __cur = __cur->left; \
@@ -136,91 +137,145 @@ struct rb_node_link {
#define rbtree_min(node, out) \ #define rbtree_min(node, out) \
do { \ do { \
(out) = NULL; \ (out) = NULL; \
if ((node)) { \
struct rb_node_link* __n = (node); \ struct rb_node_link* __n = (node); \
while (__n->left) \ while (__n && __n->left) \
__n = __n->left; \ __n = __n->left; \
(out) = __n; \ (out) = __n; \
} \
} while (0) } while (0)
#define rbtree_transplant(root_ptr, u, v) \ #define rbtree_max(node, out) \
do { \ do { \
if (!(u)->parent) \ (out) = NULL; \
*(root_ptr) = (v); \ struct rb_node_link* __n = (node); \
else if ((u) == (u)->parent->left) \ while (__n && __n->right) \
(u)->parent->left = (v); \ __n = __n->right; \
(out) = __n; \
} while (0)
#define rbtree_first(root_ptr, out) rbtree_min (*(root_ptr), out)
#define rbtree_last(root_ptr, out) rbtree_max (*(root_ptr), out)
#define rbtree_transplant(root_ptr, u_node, v_node) \
do { \
struct rb_node_link* __u = (u_node); \
struct rb_node_link* __v = (v_node); \
if (!__u->parent) \
*(root_ptr) = __v; \
else if (__u == __u->parent->left) \
__u->parent->left = __v; \
else \ else \
(u)->parent->right = (v); \ __u->parent->right = __v; \
if (v) \ if (__v) \
(v)->parent = (u)->parent; \ __v->parent = __u->parent; \
} while (0) } while (0)
#define rbtree_delete_fixup(root_ptr, x_node, xparent_node) \ #define rbtree_delete_fixup(root_ptr, x_node, xparent_node) \
do { \ do { \
struct rb_node_link* __x = (x_node); \ struct rb_node_link* __rdf_x = (x_node); \
struct rb_node_link* __xparent = (xparent_node); \ struct rb_node_link* __rdf_xp = (xparent_node); \
while (__x != *(root_ptr) && (__x == NULL || __x->color == RBTREE_BLACK)) { \ while (__rdf_xp && (__rdf_x == NULL || __rdf_x->color == RBTREE_BLACK)) { \
if (__x == __xparent->left) { \ if (__rdf_x == __rdf_xp->left) { \
struct rb_node_link* __w = __xparent->right; \ struct rb_node_link* __w = __rdf_xp->right; \
if (__w && __w->color == RBTREE_RED) { \ if (rbtree_node_color (__w) == RBTREE_RED) { \
__w->color = RBTREE_BLACK; \ __w->color = RBTREE_BLACK; \
__xparent->color = RBTREE_RED; \ __rdf_xp->color = RBTREE_RED; \
rbtree_rotate_left (root_ptr, __xparent); \ rbtree_rotate_left (root_ptr, __rdf_xp); \
__w = __xparent->right; \ __w = __rdf_xp->right; \
} \ } \
if ((!__w->left || __w->left->color == RBTREE_BLACK) && \ if (rbtree_node_color (__w->left) == RBTREE_BLACK && \
(!__w->right || __w->right->color == RBTREE_BLACK)) { \ rbtree_node_color (__w->right) == RBTREE_BLACK) { \
if (__w) \
__w->color = RBTREE_RED; \ __w->color = RBTREE_RED; \
__x = __xparent; \ __rdf_x = __rdf_xp; \
__xparent = __x->parent; \ __rdf_xp = __rdf_x->parent; \
} else { \ } else { \
if (!__w->right || __w->right->color == RBTREE_BLACK) { \ if (rbtree_node_color (__w->right) == RBTREE_BLACK) { \
if (__w->left) \ if (__w->left) \
__w->left->color = RBTREE_BLACK; \ __w->left->color = RBTREE_BLACK; \
__w->color = RBTREE_RED; \ __w->color = RBTREE_RED; \
rbtree_rotate_right (root_ptr, __w); \ rbtree_rotate_right (root_ptr, __w); \
__w = __xparent->right; \ __w = __rdf_xp->right; \
} \ } \
__w->color = __xparent->color; \ __w->color = __rdf_xp->color; \
__xparent->color = RBTREE_BLACK; \ __rdf_xp->color = RBTREE_BLACK; \
if (__w->right) \ if (__w->right) \
__w->right->color = RBTREE_BLACK; \ __w->right->color = RBTREE_BLACK; \
rbtree_rotate_left (root_ptr, __xparent); \ rbtree_rotate_left (root_ptr, __rdf_xp); \
__x = *(root_ptr); \ __rdf_x = *(root_ptr); \
break; \
} \ } \
} else { \ } else { \
struct rb_node_link* __w = __xparent->left; \ struct rb_node_link* __w = __rdf_xp->left; \
if (__w && __w->color == RBTREE_RED) { \ if (rbtree_node_color (__w) == RBTREE_RED) { \
__w->color = RBTREE_BLACK; \ __w->color = RBTREE_BLACK; \
__xparent->color = RBTREE_RED; \ __rdf_xp->color = RBTREE_RED; \
rbtree_rotate_right (root_ptr, __xparent); \ rbtree_rotate_right (root_ptr, __rdf_xp); \
__w = __xparent->left; \ __w = __rdf_xp->left; \
} \ } \
if ((!__w->right || __w->right->color == RBTREE_BLACK) && \ if (rbtree_node_color (__w->right) == RBTREE_BLACK && \
(!__w->left || __w->left->color == RBTREE_BLACK)) { \ rbtree_node_color (__w->left) == RBTREE_BLACK) { \
if (__w) \
__w->color = RBTREE_RED; \ __w->color = RBTREE_RED; \
__x = __xparent; \ __rdf_x = __rdf_xp; \
__xparent = __x->parent; \ __rdf_xp = __rdf_x->parent; \
} else { \ } else { \
if (!__w->left || __w->left->color == RBTREE_BLACK) { \ if (rbtree_node_color (__w->left) == RBTREE_BLACK) { \
if (__w->right) \ if (__w->right) \
__w->right->color = RBTREE_BLACK; \ __w->right->color = RBTREE_BLACK; \
__w->color = RBTREE_RED; \ __w->color = RBTREE_RED; \
rbtree_rotate_left (root_ptr, __w); \ rbtree_rotate_left (root_ptr, __w); \
__w = __xparent->left; \ __w = __rdf_xp->left; \
} \ } \
__w->color = __xparent->color; \ __w->color = __rdf_xp->color; \
__xparent->color = RBTREE_BLACK; \ __rdf_xp->color = RBTREE_BLACK; \
if (__w->left) \ if (__w->left) \
__w->left->color = RBTREE_BLACK; \ __w->left->color = RBTREE_BLACK; \
rbtree_rotate_right (root_ptr, __xparent); \ rbtree_rotate_right (root_ptr, __rdf_xp); \
__x = *(root_ptr); \ __rdf_x = *(root_ptr); \
break; \
} \ } \
} \ } \
} \ } \
if (__x) \ if (__rdf_x) \
__x->color = RBTREE_BLACK; \ __rdf_x->color = RBTREE_BLACK; \
} while (0)
#define rbtree_delete(root_ptr, z_node) \
do { \
struct rb_node_link* __rd_z = (z_node); \
struct rb_node_link* __rd_y = __rd_z; \
struct rb_node_link* __rd_x = NULL; \
struct rb_node_link* __rd_xp = NULL; \
int __rd_y_orig_color = __rd_y->color; \
if (!__rd_z->left) { \
__rd_x = __rd_z->right; \
__rd_xp = __rd_z->parent; \
rbtree_transplant (root_ptr, __rd_z, __rd_z->right); \
} else if (!__rd_z->right) { \
__rd_x = __rd_z->left; \
__rd_xp = __rd_z->parent; \
rbtree_transplant (root_ptr, __rd_z, __rd_z->left); \
} else { \
rbtree_min (__rd_z->right, __rd_y); \
__rd_y_orig_color = __rd_y->color; \
__rd_x = __rd_y->right; \
if (__rd_y->parent == __rd_z) { \
__rd_xp = __rd_y; \
if (__rd_x) \
__rd_x->parent = __rd_y; \
} else { \
__rd_xp = __rd_y->parent; \
rbtree_transplant (root_ptr, __rd_y, __rd_y->right); \
__rd_y->right = __rd_z->right; \
__rd_y->right->parent = __rd_y; \
} \
rbtree_transplant (root_ptr, __rd_z, __rd_y); \
__rd_y->left = __rd_z->left; \
__rd_y->left->parent = __rd_y; \
__rd_y->color = __rd_z->color; \
} \
if (__rd_y_orig_color == RBTREE_BLACK) \
rbtree_delete_fixup (root_ptr, __rd_x, __rd_xp); \
} while (0) } while (0)
#define rbtree_next(node, out) \ #define rbtree_next(node, out) \
@@ -265,17 +320,4 @@ struct rb_node_link {
} \ } \
} while (0) } while (0)
#define rbtree_first(root_ptr, out) rbtree_min (*(root_ptr), out)
#define rbtree_last(root_ptr, out) \
do { \
(out) = NULL; \
struct rb_node_link* __n = *(root_ptr); \
if (__n) { \
while (__n->right) \
__n = __n->right; \
(out) = __n; \
} \
} while (0)
#endif // _KERNEL_LIBK_RBTREE_H #endif // _KERNEL_LIBK_RBTREE_H

View File

@@ -1,6 +1,8 @@
#ifndef _KERNEL_LIBK_STRING_H #ifndef _KERNEL_LIBK_STRING_H
#define _KERNEL_LIBK_STRING_H #define _KERNEL_LIBK_STRING_H
#include <libk/std.h>
size_t memset (void* dst, uint8_t b, size_t n); size_t memset (void* dst, uint8_t b, size_t n);
size_t memcpy (void* dst, const void* src, size_t n); size_t memcpy (void* dst, const void* src, size_t n);
void strncpy (char* dst, const char* src, size_t n); void strncpy (char* dst, const char* src, size_t n);

View File

@@ -20,3 +20,4 @@ DECL_REQ (memmap, MEMMAP);
DECL_REQ (rsdp, RSDP); DECL_REQ (rsdp, RSDP);
DECL_REQ (mp, MP); DECL_REQ (mp, MP);
DECL_REQ (module, MODULE); DECL_REQ (module, MODULE);
DECL_REQ (framebuffer, FRAMEBUFFER);

View File

@@ -10,5 +10,6 @@ EXTERN_REQ (memmap);
EXTERN_REQ (rsdp); EXTERN_REQ (rsdp);
EXTERN_REQ (mp); EXTERN_REQ (mp);
EXTERN_REQ (module); EXTERN_REQ (module);
EXTERN_REQ (framebuffer);
#endif // _KERNEL_LIMINE_REQUESTS_H #endif // _KERNEL_LIMINE_REQUESTS_H

View File

@@ -11,13 +11,13 @@
spin_lock_t _liballoc_lock = SPIN_LOCK_INIT; spin_lock_t _liballoc_lock = SPIN_LOCK_INIT;
int liballoc_lock (void) { int liballoc_lock (void* ctx) {
spin_lock (&_liballoc_lock); spin_lock (&_liballoc_lock, (spin_lock_ctx_t*)ctx);
return 0; return 0;
} }
int liballoc_unlock (void) { int liballoc_unlock (void* ctx) {
spin_unlock (&_liballoc_lock); spin_unlock (&_liballoc_lock, (spin_lock_ctx_t*)ctx);
return 0; return 0;
} }
@@ -45,7 +45,7 @@ int liballoc_free (void* ptr, int pages) {
/** Durand's Ridiculously Amazing Super Duper Memory functions. */ /** Durand's Ridiculously Amazing Super Duper Memory functions. */
//#define DEBUG // #define DEBUG
#define LIBALLOC_MAGIC 0xc001c0de #define LIBALLOC_MAGIC 0xc001c0de
#define MAXCOMPLETE 5 #define MAXCOMPLETE 5
@@ -243,8 +243,9 @@ void* malloc (size_t size) {
int index; int index;
void* ptr; void* ptr;
struct boundary_tag* tag = NULL; struct boundary_tag* tag = NULL;
spin_lock_ctx_t ctxliba;
liballoc_lock (); liballoc_lock (&ctxliba);
if (l_initialized == 0) { if (l_initialized == 0) {
for (index = 0; index < MAXEXP; index++) { for (index = 0; index < MAXEXP; index++) {
@@ -272,7 +273,7 @@ void* malloc (size_t size) {
// No page found. Make one. // No page found. Make one.
if (tag == NULL) { if (tag == NULL) {
if ((tag = allocate_new_tag (size)) == NULL) { if ((tag = allocate_new_tag (size)) == NULL) {
liballoc_unlock (); liballoc_unlock (&ctxliba);
return NULL; return NULL;
} }
@@ -305,23 +306,24 @@ void* malloc (size_t size) {
ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag)); ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag));
liballoc_unlock (); liballoc_unlock (&ctxliba);
return ptr; return ptr;
} }
void free (void* ptr) { void free (void* ptr) {
int index; int index;
struct boundary_tag* tag; struct boundary_tag* tag;
spin_lock_ctx_t ctxliba;
if (ptr == NULL) if (ptr == NULL)
return; return;
liballoc_lock (); liballoc_lock (&ctxliba);
tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag)); tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag));
if (tag->magic != LIBALLOC_MAGIC) { if (tag->magic != LIBALLOC_MAGIC) {
liballoc_unlock (); // release the lock liballoc_unlock (&ctxliba); // release the lock
return; return;
} }
@@ -354,7 +356,7 @@ void free (void* ptr) {
liballoc_free (tag, pages); liballoc_free (tag, pages);
liballoc_unlock (); liballoc_unlock (&ctxliba);
return; return;
} }
@@ -365,7 +367,7 @@ void free (void* ptr) {
insert_tag (tag, index); insert_tag (tag, index);
liballoc_unlock (); liballoc_unlock (&ctxliba);
} }
void* calloc (size_t nobj, size_t size) { void* calloc (size_t nobj, size_t size) {
@@ -385,6 +387,7 @@ void* realloc (void* p, size_t size) {
void* ptr; void* ptr;
struct boundary_tag* tag; struct boundary_tag* tag;
int real_size; int real_size;
spin_lock_ctx_t ctxliba;
if (size == 0) { if (size == 0) {
free (p); free (p);
@@ -394,11 +397,11 @@ void* realloc (void* p, size_t size) {
return malloc (size); return malloc (size);
if (&liballoc_lock != NULL) if (&liballoc_lock != NULL)
liballoc_lock (); // lockit liballoc_lock (&ctxliba); // lockit
tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag)); tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag));
real_size = tag->size; real_size = tag->size;
if (&liballoc_unlock != NULL) if (&liballoc_unlock != NULL)
liballoc_unlock (); liballoc_unlock (&ctxliba);
if ((size_t)real_size > size) if ((size_t)real_size > size)
real_size = size; real_size = size;

View File

@@ -47,7 +47,7 @@ struct boundary_tag {
* \return 0 if the lock was acquired successfully. Anything else is * \return 0 if the lock was acquired successfully. Anything else is
* failure. * failure.
*/ */
extern int liballoc_lock (); extern int liballoc_lock (void* ctx);
/** This function unlocks what was previously locked by the liballoc_lock /** This function unlocks what was previously locked by the liballoc_lock
* function. If it disabled interrupts, it enables interrupts. If it * function. If it disabled interrupts, it enables interrupts. If it
@@ -55,7 +55,7 @@ extern int liballoc_lock ();
* *
* \return 0 if the lock was successfully released. * \return 0 if the lock was successfully released.
*/ */
extern int liballoc_unlock (); extern int liballoc_unlock (void* ctx);
/** This is the hook into the local system which allocates pages. It /** This is the hook into the local system which allocates pages. It
* accepts an integer parameter which is the number of pages * accepts an integer parameter which is the number of pages

View File

@@ -38,8 +38,8 @@ void pmm_init (void) {
struct pmm_region* pmm_region = &pmm.regions[region]; struct pmm_region* pmm_region = &pmm.regions[region];
/* /*
* We need to calculate sizes for the pmm region and the bitmap. The bitmap MUSTN'T include it's * We need to calculate sizes for the pmm region and the bitmap. The bitmap MUSTN'T include
* own region within the bit range. * it's own region within the bit range.
* */ * */
size_t size = align_down (entry->length, PAGE_SIZE); size_t size = align_down (entry->length, PAGE_SIZE);
@@ -100,6 +100,8 @@ static size_t pmm_find_free_space (struct pmm_region* pmm_region, size_t nblks)
} }
physaddr_t pmm_alloc (size_t nblks) { physaddr_t pmm_alloc (size_t nblks) {
spin_lock_ctx_t ctxpmmr;
for (size_t region = 0; region < PMM_REGIONS_MAX; region++) { for (size_t region = 0; region < PMM_REGIONS_MAX; region++) {
struct pmm_region* pmm_region = &pmm.regions[region]; struct pmm_region* pmm_region = &pmm.regions[region];
@@ -107,7 +109,7 @@ physaddr_t pmm_alloc (size_t nblks) {
if (!(pmm_region->flags & PMM_REGION_ACTIVE)) if (!(pmm_region->flags & PMM_REGION_ACTIVE))
continue; continue;
spin_lock (&pmm_region->lock); spin_lock (&pmm_region->lock, &ctxpmmr);
/* Find starting bit of the free bit range */ /* Find starting bit of the free bit range */
size_t bit = pmm_find_free_space (pmm_region, nblks); size_t bit = pmm_find_free_space (pmm_region, nblks);
@@ -116,18 +118,19 @@ physaddr_t pmm_alloc (size_t nblks) {
if (bit != (size_t)-1) { if (bit != (size_t)-1) {
/* Mark it */ /* Mark it */
bm_set_region (&pmm_region->bm, bit, nblks); bm_set_region (&pmm_region->bm, bit, nblks);
spin_unlock (&pmm_region->lock); spin_unlock (&pmm_region->lock, &ctxpmmr);
return pmm_region->membase + bit * PAGE_SIZE; return pmm_region->membase + bit * PAGE_SIZE;
} }
spin_unlock (&pmm_region->lock); spin_unlock (&pmm_region->lock, &ctxpmmr);
} }
return PMM_ALLOC_ERR; return PMM_ALLOC_ERR;
} }
void pmm_free (physaddr_t p_addr, size_t nblks) { void pmm_free (physaddr_t p_addr, size_t nblks) {
spin_lock_ctx_t ctxpmmr;
/* Round down to nearest page boundary */ /* Round down to nearest page boundary */
physaddr_t aligned_p_addr = align_down (p_addr, PAGE_SIZE); physaddr_t aligned_p_addr = align_down (p_addr, PAGE_SIZE);
@@ -145,11 +148,11 @@ void pmm_free (physaddr_t p_addr, size_t nblks) {
size_t bit = div_align_up (addr, PAGE_SIZE); size_t bit = div_align_up (addr, PAGE_SIZE);
spin_lock (&pmm_region->lock); spin_lock (&pmm_region->lock, &ctxpmmr);
bm_clear_region (&pmm_region->bm, bit, nblks); bm_clear_region (&pmm_region->bm, bit, nblks);
spin_unlock (&pmm_region->lock); spin_unlock (&pmm_region->lock, &ctxpmmr);
break; break;
} }

10
kernel/proc/locks.txt Normal file
View File

@@ -0,0 +1,10 @@
Lock hierarchy for process scheduling:
1. proc_tree_lock
2. cpu->lock
3. procgroup->lock
4. proc->lock
5. sq->lock
1. procgroup_tree_lock
2. procgroup->lock

130
kernel/proc/mutex.c Normal file
View File

@@ -0,0 +1,130 @@
#include <libk/assert.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <libk/string.h>
#include <mm/liballoc.h>
#include <proc/mutex.h>
#include <proc/proc.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/smp.h>
#include <sys/spin_lock.h>
void proc_mutexes_cleanup (struct proc* proc) {
spin_lock_ctx_t ctxpg, ctxrs;
spin_lock (&proc->procgroup->lock, &ctxpg);
struct rb_node_link* rnode;
rbtree_first (&proc->procgroup->resource_tree, rnode);
while (rnode) {
struct rb_node_link* next;
rbtree_next (rnode, next);
struct proc_resource* resource = rbtree_entry (rnode, struct proc_resource, resource_tree_link);
rnode = next;
spin_lock (&resource->lock, &ctxrs);
if (resource->type != PR_MUTEX) {
spin_unlock (&resource->lock, &ctxrs);
continue;
}
if (resource->u.mutex.owner == proc && resource->u.mutex.locked) {
spin_unlock (&resource->lock, &ctxrs);
proc_mutex_unlock (proc, &resource->u.mutex);
}
}
spin_unlock (&proc->procgroup->lock, &ctxpg);
}
bool proc_cleanup_resource_mutex (struct proc_resource* resource) {
struct proc_mutex* mutex = &resource->u.mutex;
spin_lock_ctx_t ctxmt, ctxsq;
spin_lock (&mutex->resource->lock, &ctxmt);
spin_lock (&mutex->suspension_q.lock, &ctxsq);
bool reschedule = PROC_NO_RESCHEDULE;
while (mutex->suspension_q.proc_list != NULL) {
struct list_node_link* node = mutex->suspension_q.proc_list;
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
struct proc* suspended_proc = sq_entry->proc;
/* we will relock during resume */
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt);
reschedule = reschedule || proc_sq_resume (suspended_proc, sq_entry);
/* reacquire */
spin_lock (&mutex->resource->lock, &ctxmt);
spin_lock (&mutex->suspension_q.lock, &ctxsq);
}
mutex->locked = false;
mutex->owner = NULL;
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt);
return reschedule;
}
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
spin_lock_ctx_t ctxmt;
spin_lock (&mutex->resource->lock, &ctxmt);
if (!mutex->locked || mutex->owner == proc) {
mutex->locked = true;
mutex->owner = proc;
spin_unlock (&mutex->resource->lock, &ctxmt);
return PROC_NO_RESCHEDULE;
}
return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
}
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
spin_lock_ctx_t ctxmt, ctxsq;
spin_lock (&mutex->resource->lock, &ctxmt);
if (mutex->owner != proc) {
spin_unlock (&mutex->resource->lock, &ctxmt);
return PROC_NO_RESCHEDULE;
}
spin_lock (&mutex->suspension_q.lock, &ctxsq);
struct list_node_link* node = mutex->suspension_q.proc_list;
if (node) {
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
struct proc* resumed_proc = sq_entry->proc;
mutex->owner = resumed_proc;
mutex->locked = true;
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt);
return proc_sq_resume (resumed_proc, sq_entry);
}
mutex->locked = false;
mutex->owner = NULL;
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt);
return PROC_NEED_RESCHEDULE;
}

23
kernel/proc/mutex.h Normal file
View File

@@ -0,0 +1,23 @@
#ifndef _KERNEL_PROC_MUTEX_H
#define _KERNEL_PROC_MUTEX_H
#include <libk/std.h>
#include <proc/suspension_q.h>
struct proc;
struct proc_resource;
struct proc_mutex {
struct proc_resource* resource;
bool locked;
struct proc_suspension_q suspension_q;
struct proc* owner;
};
bool proc_cleanup_resource_mutex (struct proc_resource* resource);
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex);
void proc_mutexes_cleanup (struct proc* proc);
#endif // _KERNEL_PROC_MUTEX_H

View File

@@ -10,6 +10,8 @@
#include <mm/liballoc.h> #include <mm/liballoc.h>
#include <mm/pmm.h> #include <mm/pmm.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <rd/rd.h> #include <rd/rd.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
@@ -23,36 +25,19 @@
#include <amd64/intr_defs.h> #include <amd64/intr_defs.h>
#endif #endif
#define SCHED_REAP_FREQ 10
static struct rb_node_link* proc_tree = NULL; static struct rb_node_link* proc_tree = NULL;
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT; static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
static atomic_int sched_cycles = 0;
static bool proc_check_elf (uint8_t* elf) { static bool proc_check_elf (uint8_t* elf) {
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F'))) if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
return false; return false;
return true; return true;
} }
void proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags) {
struct proc_mapping* mapping = malloc (sizeof (*mapping));
mapping->paddr = start_paddr;
mapping->vaddr = start_vaddr;
mapping->size = pages * PAGE_SIZE;
flags &= ~MM_PD_LOCK; /* clear LOCK flag if present, because we lock manualy */
spin_lock (&proc->pd.lock);
linklist_append (struct proc_mapping*, proc->mappings, mapping);
for (uintptr_t vpage = start_vaddr, ppage = start_paddr; vpage < start_vaddr + pages * PAGE_SIZE;
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
mm_map_page (&proc->pd, ppage, vpage, flags);
}
spin_unlock (&proc->pd.lock);
}
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) { struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
struct elf_aux aux; struct elf_aux aux;
@@ -77,19 +62,37 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE); size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
uintptr_t p_addr = pmm_alloc (blks);
if (p_addr == PMM_ALLOC_ERR)
DEBUG ("pmm oom error while loading ELF segments! (tried to alloc %zu blks)\n", blks);
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT; uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
if (phdr->p_flags & PF_W) if (phdr->p_flags & PF_W)
pg_flags |= MM_PG_RW; pg_flags |= MM_PG_RW;
proc_map (proc, p_addr, v_addr, blks, pg_flags); uintptr_t p_addr;
procgroup_map (proc->procgroup, v_addr, blks, pg_flags, &p_addr);
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
} break;
case PT_TLS: {
#if defined(__x86_64__)
if (phdr->p_memsz > 0) {
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
size_t tls_size = align_up (phdr->p_memsz, tls_align);
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
proc->procgroup->tls.tls_tmpl_pages = blks;
proc->procgroup->tls.tls_tmpl_size = tls_size;
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
phdr->p_filesz);
proc_init_tls (proc);
}
#endif
} break; } break;
} }
} }
@@ -97,11 +100,10 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
return aux; return aux;
} }
static struct proc* proc_spawn_rd (char* name) { struct proc* proc_spawn_rd (char* name) {
struct rd_file* rd_file = rd_get_file (name); struct rd_file* rd_file = rd_get_file (name);
bool ok = proc_check_elf (rd_file->content); bool ok = proc_check_elf (rd_file->content);
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
if (!ok) if (!ok)
return NULL; return NULL;
@@ -109,92 +111,173 @@ static struct proc* proc_spawn_rd (char* name) {
return proc_from_elf (rd_file->content); return proc_from_elf (rd_file->content);
} }
static void proc_register (struct proc* proc, struct cpu* cpu) { struct proc* proc_find_pid (int pid) {
proc->cpu = cpu; spin_lock_ctx_t ctxprtr;
struct proc* proc = NULL;
spin_lock (&proc_tree_lock); spin_lock (&proc_tree_lock, &ctxprtr);
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
spin_unlock (&proc_tree_lock, &ctxprtr);
spin_lock (&cpu->lock); return proc;
}
void proc_register (struct proc* proc, struct cpu* cpu1) {
spin_lock_ctx_t ctxcpu, ctxprtr;
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
struct cpu* cpu = proc->cpu;
spin_lock (&proc_tree_lock, &ctxprtr);
spin_lock (&cpu->lock, &ctxcpu);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid); rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == NULL) if (cpu->proc_current == NULL)
cpu->proc_current = proc; cpu->proc_current = proc;
spin_unlock (&cpu->lock); spin_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&cpu->lock, &ctxcpu);
spin_unlock (&proc_tree_lock);
} }
static struct proc* proc_find_sched (void) { /* caller holds cpu->lock */
struct rb_node_link* node = NULL; static struct proc* proc_find_sched (struct cpu* cpu) {
struct proc* start = thiscpu->proc_current; if (!cpu->proc_run_q)
struct proc* proc = NULL; return NULL;
if (start) struct list_node_link *current, *start;
node = &start->cpu_run_q_link;
if (!node) if (cpu->proc_current)
rbtree_first (&thiscpu->proc_run_q, node); current = cpu->proc_current->cpu_run_q_link.next;
else
current = cpu->proc_run_q;
struct rb_node_link* first = node; if (!current)
while (node) { current = cpu->proc_run_q;
proc = rbtree_entry (node, struct proc, cpu_run_q_link);
start = current;
do {
struct proc* proc = list_entry (current, struct proc, cpu_run_q_link);
if (atomic_load (&proc->state) == PROC_READY) if (atomic_load (&proc->state) == PROC_READY)
return proc; return proc;
rbtree_next (node, node); current = current->next ? current->next : cpu->proc_run_q;
} while (current != start);
if (node == first)
break;
}
return NULL; return NULL;
} }
void proc_sched (void) { static void proc_reap (void) {
struct proc* next = NULL; struct proc* proc = NULL;
struct list_node_link* reap_list = NULL;
spin_lock_ctx_t ctxprtr;
spin_lock_ctx_t ctxpr;
spin_lock (&thiscpu->lock); spin_lock (&proc_tree_lock, &ctxprtr);
if (thiscpu->proc_run_q == NULL || thiscpu->proc_current == NULL) { struct rb_node_link* node;
spin_unlock (&thiscpu->lock); rbtree_first (&proc_tree, node);
goto idle;
while (node) {
struct rb_node_link* next;
rbtree_next (node, next);
proc = rbtree_entry (node, struct proc, proc_tree_link);
if (atomic_load (&proc->state) == PROC_DEAD) {
spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
list_append (reap_list, &proc->reap_link);
spin_unlock (&proc->lock, &ctxpr);
} }
next = proc_find_sched (); node = next;
}
if (next != NULL) spin_unlock (&proc_tree_lock, &ctxprtr);
thiscpu->proc_current = next;
spin_unlock (&thiscpu->lock); struct list_node_link *reap_link, *reap_link_tmp;
list_foreach (reap_list, reap_link, reap_link_tmp) {
proc = list_entry (reap_link, struct proc, reap_link);
if (next != NULL && atomic_load (&next->state) == PROC_READY) list_remove (reap_list, &proc->reap_link);
do_sched (next); DEBUG ("cleanup PID %d\n", proc->pid);
proc_cleanup (proc);
}
}
void proc_sched (void) {
spin_lock_ctx_t ctxcpu;
int s_cycles = atomic_fetch_add (&sched_cycles, 1);
if (s_cycles % SCHED_REAP_FREQ == 0)
proc_reap ();
struct proc* next = NULL;
struct cpu* cpu = thiscpu;
spin_lock (&cpu->lock, &ctxcpu);
next = proc_find_sched (cpu);
if (next) {
cpu->proc_current = next;
do_sched (next, &cpu->lock, &ctxcpu);
} else {
cpu->proc_current = NULL;
spin_unlock (&cpu->lock, &ctxcpu);
idle:
spin (); spin ();
}
} }
void proc_kill (struct proc* proc) { void proc_kill (struct proc* proc) {
/* mark for garbage collection */ spin_lock_ctx_t ctxpr, ctxcpu;
struct cpu* cpu = proc->cpu;
spin_lock (&proc->lock, &ctxpr);
atomic_store (&proc->state, PROC_DEAD); atomic_store (&proc->state, PROC_DEAD);
proc->cpu = NULL;
spin_unlock (&proc->lock, &ctxpr);
spin_lock (&cpu->lock, &ctxcpu);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
spin_unlock (&cpu->lock, &ctxcpu);
DEBUG ("killed PID %d\n", proc->pid);
cpu_request_sched (cpu);
} }
static void proc_irq_sched (void* arg, void* regs) { static void proc_irq_sched (void* arg, void* regs) {
(void)arg, (void)regs; (void)arg;
proc_sched (); proc_sched ();
} }
void proc_init (void) { void proc_init (void) {
struct proc* init = proc_spawn_rd ("init.exe");
proc_register (init, thiscpu);
#if defined(__x86_64__) #if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_SAFE); irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER);
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED);
#endif #endif
do_sched (init); struct proc* spin_proc = proc_spawn_rd ("spin.exe");
proc_register (spin_proc, thiscpu);
struct proc* init = proc_spawn_rd ("init.exe");
proc_register (init, NULL);
spin_lock_ctx_t ctxcpu;
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
} }

View File

@@ -3,8 +3,12 @@
#include <aux/compiler.h> #include <aux/compiler.h>
#include <aux/elf.h> #include <aux/elf.h>
#include <libk/list.h>
#include <libk/rbtree.h> #include <libk/rbtree.h>
#include <libk/std.h> #include <libk/std.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/mm.h> #include <sys/mm.h>
@@ -13,51 +17,41 @@
#include <amd64/proc.h> /* USTACK_SIZE */ #include <amd64/proc.h> /* USTACK_SIZE */
#endif #endif
/// Process is ready to run #define PROC_NEED_RESCHEDULE true
#define PROC_NO_RESCHEDULE false
/* process states */
#define PROC_READY 0 #define PROC_READY 0
/// Process marked garbage collection
#define PROC_DEAD 1 #define PROC_DEAD 1
#define PROC_SUSPENDED 2
/* process flags */
#define PROC_USTK_PREALLOC (1 << 0)
struct cpu; struct cpu;
struct proc_mapping {
struct proc_mapping* next;
uintptr_t paddr;
uintptr_t vaddr;
size_t size;
} PACKED;
struct procw;
struct proc { struct proc {
int pid; int pid;
struct rb_node_link proc_tree_link; struct rb_node_link proc_tree_link;
struct rb_node_link cpu_run_q_link; struct rb_node_link procgroup_memb_tree_link;
struct list_node_link cpu_run_q_link;
struct proc_mapping* mappings; /* pd.lock implicitly protects this field */ struct list_node_link reap_link;
struct list_node_link* sq_entries;
struct procgroup* procgroup;
struct proc_platformdata pdata; struct proc_platformdata pdata;
struct pd pd; uint32_t flags;
spin_lock_t lock; spin_lock_t lock;
struct cpu* cpu; struct cpu* cpu;
// struct procw* procw; /* link to it's global struct */
atomic_int state; atomic_int state;
uintptr_t uvaddr_argument;
}; };
/*
* struct proc is a member of a CPU's proc_run_q.
* struct procw is a process wrapper that is a member of
* a global process list.
*/
/* struct procw { */
/* struct procw* next; */
/* struct proc* proc; */
/* }; */
void proc_sched (void); void proc_sched (void);
void proc_kill (struct proc* proc); void proc_kill (struct proc* proc);
void proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags);
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf); struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf);
void proc_register (struct proc* proc, struct cpu* cpu);
struct proc* proc_find_pid (int pid);
struct proc* proc_spawn_rd (char* name);
void proc_init (void); void proc_init (void);
#endif // _KERNEL_PROC_PROC_H #endif // _KERNEL_PROC_PROC_H

218
kernel/proc/procgroup.c Normal file
View File

@@ -0,0 +1,218 @@
#include <libk/rbtree.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
static struct rb_node_link* procgroup_tree = NULL;
static spin_lock_t procgroup_tree_lock = SPIN_LOCK_INIT;
static atomic_int pgids = 0;
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
uintptr_t* out_paddr) {
spin_lock_ctx_t ctxpg;
spin_lock (&procgroup->lock, &ctxpg);
vaddr = (vaddr == 0) ? procgroup->map_base : vaddr;
struct proc_mapping* mapping = malloc (sizeof (*mapping));
if (mapping == NULL) {
spin_unlock (&procgroup->lock, &ctxpg);
return 0;
}
uintptr_t paddr = pmm_alloc (pages);
if (paddr == PMM_ALLOC_ERR) {
free (mapping);
spin_unlock (&procgroup->lock, &ctxpg);
return 0;
}
if (out_paddr != NULL)
*out_paddr = paddr;
mapping->paddr = paddr;
mapping->vaddr = vaddr;
mapping->size = pages * PAGE_SIZE;
procgroup->map_base += pages * PAGE_SIZE;
list_append (procgroup->mappings, &mapping->proc_mappings_link);
for (uintptr_t vpage = vaddr, ppage = paddr; vpage < vaddr + pages * PAGE_SIZE;
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
mm_map_page (&procgroup->pd, ppage, vpage, flags);
}
spin_unlock (&procgroup->lock, &ctxpg);
return vaddr;
}
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
size_t unmap_size = pages * PAGE_SIZE;
uintptr_t end_vaddr = start_vaddr + unmap_size;
struct list_node_link *mapping_link, *mapping_link_tmp;
bool used_tail_mapping = false;
spin_lock_ctx_t ctxpg;
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
if (tail_mapping == NULL)
return false;
spin_lock (&procgroup->lock, &ctxpg);
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
uintptr_t m_start = mapping->vaddr;
uintptr_t m_end = mapping->vaddr + mapping->size;
/* check overlap */
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
uintptr_t free_vstart = (start_vaddr > m_start) ? start_vaddr : m_start;
uintptr_t free_vend = (end_vaddr < m_end) ? end_vaddr : m_end;
size_t free_size = free_vend - free_vstart;
uintptr_t ppage_to_free = mapping->paddr + (free_vstart - m_start);
pmm_free (ppage_to_free, free_size / PAGE_SIZE);
/* split in the middle */
if ((start_vaddr > m_start) && (end_vaddr < m_end)) {
tail_mapping->vaddr = end_vaddr;
tail_mapping->paddr = mapping->paddr + (end_vaddr - m_start);
tail_mapping->size = m_end - end_vaddr;
mapping->size = start_vaddr - m_start;
list_insert_after (procgroup->mappings, &mapping->proc_mappings_link,
&tail_mapping->proc_mappings_link);
used_tail_mapping = true;
break;
} else if ((start_vaddr <= m_start) && (end_vaddr < m_end)) { /* shrink left */
size_t diff = end_vaddr - m_start;
mapping->vaddr += diff;
mapping->paddr += diff;
mapping->size -= diff;
} else if ((start_vaddr > m_start) && (end_vaddr >= m_end)) { /* shrink right */
mapping->size = start_vaddr - m_start;
} else { /* full overlap */
list_remove (procgroup->mappings, &mapping->proc_mappings_link);
free (mapping);
}
}
}
if (!used_tail_mapping)
free (tail_mapping);
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
mm_unmap_page (&procgroup->pd, vpage);
}
spin_unlock (&procgroup->lock, &ctxpg);
return true;
}
struct procgroup* procgroup_create (void) {
spin_lock_ctx_t ctxpgtr;
struct procgroup* procgroup = malloc (sizeof (*procgroup));
if (procgroup == NULL) {
return NULL;
}
procgroup->refs = 0;
procgroup->memb_proc_tree = NULL;
procgroup->lock = SPIN_LOCK_INIT;
procgroup->pgid = atomic_fetch_add (&pgids, 1);
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys ();
procgroup->map_base = PROC_MAP_BASE;
spin_lock (&procgroup_tree_lock, &ctxpgtr);
rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,
procgroup_tree_link, pgid);
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
return procgroup;
}
void procgroup_attach (struct procgroup* procgroup, struct proc* proc) {
spin_lock_ctx_t ctxpg, ctxpr;
spin_lock (&procgroup->lock, &ctxpg);
spin_lock (&proc->lock, &ctxpr);
rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link,
procgroup_memb_tree_link, pid);
atomic_fetch_add (&procgroup->refs, 1);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&procgroup->lock, &ctxpg);
}
void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
spin_lock_ctx_t ctxpg, ctxpr, ctxpgtr;
spin_lock (&procgroup->lock, &ctxpg);
spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link);
int refs = atomic_fetch_sub (&procgroup->refs, 1);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&procgroup->lock, &ctxpg);
if (refs == 1) {
spin_lock (&procgroup_tree_lock, &ctxpgtr);
spin_lock (&procgroup->lock, &ctxpg);
rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link);
spin_unlock (&procgroup->lock, &ctxpg);
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
/* delete resources */
struct rb_node_link* rnode;
rbtree_first (&procgroup->resource_tree, rnode);
while (rnode) {
struct rb_node_link* next;
rbtree_next (rnode, next);
struct proc_resource* resource =
rbtree_entry (rnode, struct proc_resource, resource_tree_link);
rnode = next;
proc_delete_resource (resource);
}
struct list_node_link *mapping_link, *mapping_link_tmp;
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
pmm_free (mapping->paddr, mapping->size / PAGE_SIZE);
free (mapping);
}
pmm_free (procgroup->pd.cr3_paddr, 1);
free (procgroup->tls.tls_tmpl);
free (procgroup);
}
}

43
kernel/proc/procgroup.h Normal file
View File

@@ -0,0 +1,43 @@
#ifndef _KERNEL_PROC_PROCGROUP_H
#define _KERNEL_PROC_PROCGROUP_H
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/mm.h>
#include <sys/procgroup.h>
struct proc;
struct proc_mapping {
struct list_node_link proc_mappings_link;
uintptr_t paddr;
uintptr_t vaddr;
size_t size;
};
struct procgroup {
int pgid;
struct rb_node_link procgroup_tree_link;
struct rb_node_link* memb_proc_tree;
spin_lock_t lock;
atomic_int refs;
struct rb_node_link* resource_tree;
atomic_int sys_rids;
struct pd pd;
struct list_node_link* mappings;
uintptr_t map_base;
struct procgroup_tls tls;
};
struct procgroup* procgroup_create (void);
void procgroup_attach (struct procgroup* procgroup, struct proc* proc);
void procgroup_detach (struct procgroup* procgroup, struct proc* proc);
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
uintptr_t* out_paddr);
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages);
#endif // _KERNEL_PROC_PROCGROUP_H

59
kernel/proc/resource.c Normal file
View File

@@ -0,0 +1,59 @@
#include <libk/assert.h>
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <libk/string.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/mutex.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid) {
spin_lock_ctx_t ctxpg;
struct proc_resource* resource = NULL;
spin_lock (&procgroup->lock, &ctxpg);
rbtree_find (struct proc_resource, &procgroup->resource_tree, rid, resource, resource_tree_link,
rid);
spin_unlock (&procgroup->lock, &ctxpg);
return resource;
}
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid) {
spin_lock_ctx_t ctxpg;
struct proc_resource* resource;
resource = proc_find_resource (procgroup, rid);
if (resource != NULL)
return resource;
resource = malloc (sizeof (*resource));
if (resource == NULL)
return NULL;
memset (resource, 0, sizeof (*resource));
resource->lock = SPIN_LOCK_INIT;
resource->ops.cleanup = &proc_cleanup_resource_mutex;
resource->u.mutex.resource = resource;
resource->rid = rid;
resource->type = PR_MUTEX;
spin_lock (&procgroup->lock, &ctxpg);
rbtree_insert (struct proc_resource, &procgroup->resource_tree, &resource->resource_tree_link,
resource_tree_link, rid);
spin_unlock (&procgroup->lock, &ctxpg);
return resource;
}
bool proc_delete_resource (struct proc_resource* resource) {
bool reschedule = resource->ops.cleanup (resource);
free (resource);
return reschedule;
}

32
kernel/proc/resource.h Normal file
View File

@@ -0,0 +1,32 @@
#ifndef _KERNEL_PROC_RESOURCE_H
#define _KERNEL_PROC_RESOURCE_H
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <proc/mutex.h>
#include <sync/spin_lock.h>
#define PR_MUTEX 1
struct proc;
struct procgroup;
struct proc_resource {
int type;
int rid;
spin_lock_t lock;
struct rb_node_link resource_tree_link;
union {
struct proc_mutex mutex;
} u;
struct {
bool (*cleanup) (struct proc_resource* resource);
} ops;
};
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid);
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid);
bool proc_delete_resource (struct proc_resource* resource);
#endif // _KERNEL_PROC_RESOURCE_H

View File

@@ -1,3 +1,11 @@
c += proc/proc.c c += proc/proc.c \
proc/resource.c \
proc/mutex.c \
proc/procgroup.c \
proc/suspension_q.c
o += proc/proc.o o += proc/proc.o \
proc/resource.o \
proc/mutex.o \
proc/procgroup.o \
proc/suspension_q.o

111
kernel/proc/suspension_q.c Normal file
View File

@@ -0,0 +1,111 @@
#include <libk/list.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <proc/resource.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h>
#include <sys/smp.h>
#include <sys/spin_lock.h>
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
spin_lock_ctx_t* ctxrl) {
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
struct cpu* cpu = proc->cpu;
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
if (!sq_entry) {
spin_unlock (resource_lock, ctxrl);
return PROC_NO_RESCHEDULE;
}
sq_entry->proc = proc;
sq_entry->sq = sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
spin_unlock (resource_lock, ctxrl);
atomic_store (&proc->state, PROC_SUSPENDED);
/* append to sq's list */
list_append (sq->proc_list, &sq_entry->sq_link);
/* append to proc's list */
list_append (proc->sq_entries, &sq_entry->proc_link);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
proc->cpu = NULL;
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
return PROC_NEED_RESCHEDULE;
}
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
struct cpu* cpu = cpu_find_lightest ();
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
proc->cpu = cpu;
if (proc->sq_entries == NULL)
atomic_store (&proc->state, PROC_READY);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
free (sq_entry);
return PROC_NEED_RESCHEDULE;
}
void proc_sqs_cleanup (struct proc* proc) {
spin_lock_ctx_t ctxsq, ctxpr;
spin_lock (&proc->lock, &ctxpr);
/* clean suspension queue entries */
struct list_node_link *sq_link, *sq_link_tmp;
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
spin_unlock (&sq->lock, &ctxsq);
free (sq_entry);
}
spin_unlock (&proc->lock, &ctxpr);
}

View File

@@ -0,0 +1,26 @@
#ifndef _KERNEL_PROC_SUSPENTION_Q_H
#define _KERNEL_PROC_SUSPENTION_Q_H
#include <libk/list.h>
#include <sync/spin_lock.h>
struct proc;
struct proc_suspension_q {
struct list_node_link* proc_list;
spin_lock_t lock;
};
struct proc_sq_entry {
struct list_node_link sq_link;
struct list_node_link proc_link;
struct proc* proc;
struct proc_suspension_q* sq;
};
void proc_sqs_cleanup (struct proc* proc);
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
spin_lock_ctx_t* ctxrl);
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
#endif // _KERNEL_PROC_SUSPENTION_Q_H

View File

@@ -3,15 +3,15 @@
#include <sys/irq.h> #include <sys/irq.h>
#include <sys/spin_lock.h> #include <sys/spin_lock.h>
void spin_lock (spin_lock_t* sl) { void spin_lock (spin_lock_t* sl, spin_lock_ctx_t* ctx) {
irq_save (); irq_save (ctx);
while (atomic_flag_test_and_set_explicit (sl, memory_order_acquire)) while (atomic_flag_test_and_set_explicit (sl, memory_order_acquire))
spin_lock_relax (); spin_lock_relax ();
} }
void spin_unlock (spin_lock_t* sl) { void spin_unlock (spin_lock_t* sl, spin_lock_ctx_t* ctx) {
atomic_flag_clear_explicit (sl, memory_order_release); atomic_flag_clear_explicit (sl, memory_order_release);
irq_restore (); irq_restore (ctx);
} }

View File

@@ -2,12 +2,13 @@
#define _KERNEL_SYNC_SPIN_LOCK_H #define _KERNEL_SYNC_SPIN_LOCK_H
#include <libk/std.h> #include <libk/std.h>
#include <sys/spin_lock.h>
#define SPIN_LOCK_INIT ATOMIC_FLAG_INIT #define SPIN_LOCK_INIT ATOMIC_FLAG_INIT
typedef atomic_flag spin_lock_t; typedef atomic_flag spin_lock_t;
void spin_lock (spin_lock_t* sl); void spin_lock (spin_lock_t* sl, spin_lock_ctx_t* ctx);
void spin_unlock (spin_lock_t* sl); void spin_unlock (spin_lock_t* sl, spin_lock_ctx_t* ctx);
#endif // _KERNEL_SYNC_SPIN_LOCK_H #endif // _KERNEL_SYNC_SPIN_LOCK_H

View File

@@ -1,7 +1,9 @@
#ifndef _KERNEL_SYS_IRQ_H #ifndef _KERNEL_SYS_IRQ_H
#define _KERNEL_SYS_IRQ_H #define _KERNEL_SYS_IRQ_H
void irq_save (void); #include <sys/spin_lock.h>
void irq_restore (void);
void irq_save (spin_lock_ctx_t* ctx);
void irq_restore (spin_lock_ctx_t* ctx);
#endif // _KERNEL_SYS_IRQ_H #endif // _KERNEL_SYS_IRQ_H

View File

@@ -2,6 +2,7 @@
#define _KERNEL_SYS_MM_H #define _KERNEL_SYS_MM_H
#include <libk/std.h> #include <libk/std.h>
#include <sync/spin_lock.h>
#if defined(__x86_64__) #if defined(__x86_64__)
#include <amd64/mm.h> #include <amd64/mm.h>
@@ -10,17 +11,19 @@
#define MM_PG_PRESENT (1 << 0) #define MM_PG_PRESENT (1 << 0)
#define MM_PG_RW (1 << 1) #define MM_PG_RW (1 << 1)
#define MM_PG_USER (1 << 2) #define MM_PG_USER (1 << 2)
#define MM_PD_RELOAD (1 << 30)
#define MM_PD_LOCK (1 << 31)
uintptr_t mm_alloc_user_pd_phys (void); uintptr_t mm_alloc_user_pd_phys (void);
void mm_reload (void); void mm_kernel_lock (spin_lock_ctx_t* ctx);
void mm_kernel_unlock (spin_lock_ctx_t* ctx);
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags); void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags); void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags); void mm_unmap_page (struct pd* pd, uintptr_t vaddr);
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags); void mm_unmap_kernel_page (uintptr_t vaddr);
void mm_lock_kernel (void); bool mm_validate (struct pd* pd, uintptr_t vaddr);
void mm_unlock_kernel (void); bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size);
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr);
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr);
struct pd* mm_get_kernel_pd (void);
void mm_init (void); void mm_init (void);
#endif // _KERNEL_SYS_MM_H #endif // _KERNEL_SYS_MM_H

View File

@@ -1,8 +1,14 @@
#ifndef _KERNEL_SYS_PROC_H #ifndef _KERNEL_SYS_PROC_H
#define _KERNEL_SYS_PROC_H #define _KERNEL_SYS_PROC_H
#include <libk/std.h>
struct proc; struct proc;
struct proc* proc_from_elf (uint8_t* elf_contents); struct proc* proc_from_elf (uint8_t* elf_contents);
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
uintptr_t argument_ptr);
void proc_cleanup (struct proc* proc);
void proc_init_tls (struct proc* proc);
#endif // _KERNEL_SYS_PROC_H #endif // _KERNEL_SYS_PROC_H

8
kernel/sys/procgroup.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef _KERNEL_SYS_PROCGROUP_H
#define _KERNEL_SYS_PROCGROUP_H
#if defined(__x86_64__)
#include <amd64/procgroup.h>
#endif
#endif // _KERNEL_SYS_PROCGROUP_H

View File

@@ -4,6 +4,6 @@
#include <libk/std.h> #include <libk/std.h>
#include <proc/proc.h> #include <proc/proc.h>
void do_sched (struct proc* proc); void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu);
#endif // _KERNEL_SYS_SCHED_H #endif // _KERNEL_SYS_SCHED_H

View File

@@ -1,6 +1,12 @@
#ifndef _KERNEL_SYS_SPIN_LOCK_H #ifndef _KERNEL_SYS_SPIN_LOCK_H
#define _KERNEL_SYS_SPIN_LOCK_H #define _KERNEL_SYS_SPIN_LOCK_H
#include <libk/std.h>
#if defined(__x86_64__)
typedef uint64_t spin_lock_ctx_t;
#endif
void spin_lock_relax (void); void spin_lock_relax (void);
#endif // _KERNEL_SYS_SPIN_LOCK_H #endif // _KERNEL_SYS_SPIN_LOCK_H

View File

@@ -1,28 +1,180 @@
#include <aux/compiler.h> #include <aux/compiler.h>
#include <libk/assert.h>
#include <libk/std.h> #include <libk/std.h>
#include <limine/requests.h>
#include <m/status.h>
#include <m/syscall_defs.h> #include <m/syscall_defs.h>
#include <mm/pmm.h>
#include <proc/mutex.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/mm.h>
#include <sys/proc.h>
#include <syscall/syscall.h> #include <syscall/syscall.h>
#define DEFINE_SYSCALL(name) \ #define DEFINE_SYSCALL(name) \
int name (struct proc* proc, uintptr_t UNUSED a1, uintptr_t UNUSED a2, uintptr_t UNUSED a3, \ uintptr_t name (struct proc* UNUSED proc, void* UNUSED regs, uintptr_t UNUSED a1, \
uintptr_t UNUSED a4, uintptr_t UNUSED a5, uintptr_t UNUSED a6) uintptr_t UNUSED a2, uintptr_t UNUSED a3, uintptr_t UNUSED a4, \
uintptr_t UNUSED a5, uintptr_t UNUSED a6)
DEFINE_SYSCALL (sys_proc_quit) { #define SYSRESULT(x) ((uintptr_t)(x))
proc_kill (proc);
proc_sched (); static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) {
return SR_OK; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxpg;
spin_lock (&proc->procgroup->lock, &ctxpg);
if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size)) {
spin_unlock (&proc->procgroup->lock, &ctxpg);
return NULL;
}
uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr);
spin_unlock (&proc->procgroup->lock, &ctxpg);
uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr;
return (void*)out_kvaddr;
} }
DEFINE_SYSCALL (sys_proc_test) { /* int quit (void) */
DEBUG ("test syscall message!\n"); DEFINE_SYSCALL (sys_quit) {
return SR_OK; proc_kill (proc);
return SYSRESULT (ST_OK);
}
/* int test (void) */
DEFINE_SYSCALL (sys_test) {
char c = (char)a1;
DEBUG ("test syscall from %d! %c\n", proc->pid, c);
return SYSRESULT (ST_OK);
}
/* int map (uintptr_t vaddr, size_t pages, uint32_t flags) */
DEFINE_SYSCALL (sys_map) {
uintptr_t vaddr = a1;
size_t pages = (size_t)a2;
uint32_t flags = (uint32_t)a3;
if (vaddr % PAGE_SIZE != 0)
return SYSRESULT (-ST_UNALIGNED);
return SYSRESULT (procgroup_map (proc->procgroup, vaddr, pages, flags, NULL));
}
/* int unmap (uintptr_t vaddr, size_t pages) */
DEFINE_SYSCALL (sys_unmap) {
uintptr_t vaddr = a1;
size_t pages = (size_t)a2;
if (vaddr % PAGE_SIZE != 0)
return SYSRESULT (-ST_UNALIGNED);
return SYSRESULT (procgroup_unmap (proc->procgroup, vaddr, pages));
}
/* int clone (uintptr_t vstack_top, void* entry, void* argument_ptr) */
DEFINE_SYSCALL (sys_clone) {
uintptr_t vstack_top = a1;
uintptr_t entry = a2;
uintptr_t argument_ptr = a3;
struct proc* new = proc_clone (proc, vstack_top, entry, argument_ptr);
if (new == NULL) {
return SYSRESULT (-ST_OOM_ERROR);
}
int pid = new->pid;
proc_register (new, NULL);
return SYSRESULT (pid);
}
/* void* argument_ptr (void) */
DEFINE_SYSCALL (sys_argument_ptr) { return proc->uvaddr_argument; }
/* int sched (void) */
DEFINE_SYSCALL (sys_sched) {
proc_sched ();
return SYSRESULT (ST_OK);
}
/* int mutex_create (int mutex_rid) */
DEFINE_SYSCALL (sys_mutex_create) {
int mutex_rid = (int)a1;
struct proc_resource* mutex_resource = proc_create_resource_mutex (proc->procgroup, mutex_rid);
if (mutex_resource == NULL)
return SYSRESULT (-ST_OOM_ERROR);
return SYSRESULT (mutex_resource->rid);
}
/* int mutex_delete (int mutex_rid) */
DEFINE_SYSCALL (sys_mutex_delete) {
int mutex_rid = (int)a1;
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
if (mutex_resource == NULL)
return SYSRESULT (-ST_NOT_FOUND);
if (proc_delete_resource (mutex_resource) == PROC_NEED_RESCHEDULE)
proc_sched ();
return SYSRESULT (ST_OK);
}
/* int mutex_lock (int mutex_rid) */
DEFINE_SYSCALL (sys_mutex_lock) {
int mutex_rid = (int)a1;
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
if (mutex_resource == NULL)
return SYSRESULT (-ST_NOT_FOUND);
if (proc_mutex_lock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
proc_sched ();
return SYSRESULT (ST_OK);
}
/* int mutex_unlock (int mutex_rid) */
DEFINE_SYSCALL (sys_mutex_unlock) {
int mutex_rid = (int)a1;
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
if (mutex_resource == NULL)
return SYSRESULT (-ST_NOT_FOUND);
if (proc_mutex_unlock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
proc_sched ();
return SYSRESULT (ST_OK);
} }
static syscall_handler_func_t handler_table[] = { static syscall_handler_func_t handler_table[] = {
[SYS_PROC_QUIT] = &sys_proc_quit, [SYS_QUIT] = &sys_quit,
[SYS_PROC_TEST] = &sys_proc_test, [SYS_TEST] = &sys_test,
[SYS_MAP] = &sys_map,
[SYS_UNMAP] = &sys_unmap,
[SYS_CLONE] = &sys_clone,
[SYS_ARGUMENT_PTR] = &sys_argument_ptr,
[SYS_SCHED] = &sys_sched,
[SYS_MUTEX_CREATE] = &sys_mutex_create,
[SYS_MUTEX_DELETE] = &sys_mutex_delete,
[SYS_MUTEX_LOCK] = &sys_mutex_lock,
[SYS_MUTEX_UNLOCK] = &sys_mutex_unlock,
}; };
syscall_handler_func_t syscall_find_handler (int syscall_num) { syscall_handler_func_t syscall_find_handler (int syscall_num) {

View File

@@ -4,8 +4,9 @@
#include <libk/std.h> #include <libk/std.h>
#include <proc/proc.h> #include <proc/proc.h>
typedef int (*syscall_handler_func_t) (struct proc* proc, uintptr_t a1, uintptr_t a2, uintptr_t a3, typedef uintptr_t (*syscall_handler_func_t) (struct proc* proc, void* regs, uintptr_t a1,
uintptr_t a4, uintptr_t a5, uintptr_t a6); uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5,
uintptr_t a6);
syscall_handler_func_t syscall_find_handler (int syscall_num); syscall_handler_func_t syscall_find_handler (int syscall_num);

1
libmsl/alloc/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

386
libmsl/alloc/liballoc.c Normal file
View File

@@ -0,0 +1,386 @@
/* liballoc breaks when optimized too aggressively, for eg. clang's -Oz */
#pragma clang optimize off
#include <alloc/liballoc.h>
#include <m/system.h>
#define LIBALLOC_MUTEX 500
void liballoc_init (void) { mutex_create (LIBALLOC_MUTEX); }
void liballoc_deinit (void) { mutex_delete (LIBALLOC_MUTEX); }
int liballoc_lock (void) { return mutex_lock (LIBALLOC_MUTEX); }
int liballoc_unlock (void) { return mutex_unlock (LIBALLOC_MUTEX); }
void* liballoc_alloc (int pages) { return map (0, pages, MAP_FLAGS | MAP_RW); }
int liballoc_free (void* ptr, int pages) { return unmap ((uintptr_t)ptr, pages); }
/** Durand's Ridiculously Amazing Super Duper Memory functions. */
// #define DEBUG
#define LIBALLOC_MAGIC 0xc001c0de
#define MAXCOMPLETE 5
#define MAXEXP 32
#define MINEXP 8
#define MODE_BEST 0
#define MODE_INSTANT 1
#define MODE MODE_BEST
struct boundary_tag* l_freePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
int l_completePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
static int l_initialized = 0; //< Flag to indicate initialization.
static int l_pageSize = PAGE_SIZE; //< Individual page size
static int l_pageCount = 16; //< Minimum number of pages to allocate.
// *********** HELPER FUNCTIONS *******************************
/** Returns the exponent required to manage 'size' amount of memory.
*
* Returns n where 2^n <= size < 2^(n+1)
*/
static inline int getexp (unsigned int size) {
if (size < (1 << MINEXP)) {
return -1; // Smaller than the quantum.
}
int shift = MINEXP;
while (shift < MAXEXP) {
if ((1 << shift) > size)
break;
shift += 1;
}
return shift - 1;
}
static void* liballoc_memset (void* s, int c, size_t n) {
size_t i;
for (i = 0; i < n; i++)
((char*)s)[i] = c;
return s;
}
static void* liballoc_memcpy (void* s1, const void* s2, size_t n) {
char* cdest;
char* csrc;
unsigned int* ldest = (unsigned int*)s1;
unsigned int* lsrc = (unsigned int*)s2;
while (n >= sizeof (unsigned int)) {
*ldest++ = *lsrc++;
n -= sizeof (unsigned int);
}
cdest = (char*)ldest;
csrc = (char*)lsrc;
while (n > 0) {
*cdest++ = *csrc++;
n -= 1;
}
return s1;
}
static inline void insert_tag (struct boundary_tag* tag, int index) {
int realIndex;
if (index < 0) {
realIndex = getexp (tag->real_size - sizeof (struct boundary_tag));
if (realIndex < MINEXP)
realIndex = MINEXP;
} else
realIndex = index;
tag->index = realIndex;
if (l_freePages[realIndex] != NULL) {
l_freePages[realIndex]->prev = tag;
tag->next = l_freePages[realIndex];
}
l_freePages[realIndex] = tag;
}
static inline void remove_tag (struct boundary_tag* tag) {
if (l_freePages[tag->index] == tag)
l_freePages[tag->index] = tag->next;
if (tag->prev != NULL)
tag->prev->next = tag->next;
if (tag->next != NULL)
tag->next->prev = tag->prev;
tag->next = NULL;
tag->prev = NULL;
tag->index = -1;
}
static inline struct boundary_tag* melt_left (struct boundary_tag* tag) {
struct boundary_tag* left = tag->split_left;
left->real_size += tag->real_size;
left->split_right = tag->split_right;
if (tag->split_right != NULL)
tag->split_right->split_left = left;
return left;
}
static inline struct boundary_tag* absorb_right (struct boundary_tag* tag) {
struct boundary_tag* right = tag->split_right;
remove_tag (right); // Remove right from free pages.
tag->real_size += right->real_size;
tag->split_right = right->split_right;
if (right->split_right != NULL)
right->split_right->split_left = tag;
return tag;
}
static inline struct boundary_tag* split_tag (struct boundary_tag* tag) {
unsigned int remainder = tag->real_size - sizeof (struct boundary_tag) - tag->size;
struct boundary_tag* new_tag =
(struct boundary_tag*)((uintptr_t)tag + sizeof (struct boundary_tag) + tag->size);
new_tag->magic = LIBALLOC_MAGIC;
new_tag->real_size = remainder;
new_tag->next = NULL;
new_tag->prev = NULL;
new_tag->split_left = tag;
new_tag->split_right = tag->split_right;
if (new_tag->split_right != NULL)
new_tag->split_right->split_left = new_tag;
tag->split_right = new_tag;
tag->real_size -= new_tag->real_size;
insert_tag (new_tag, -1);
return new_tag;
}
// ***************************************************************
static struct boundary_tag* allocate_new_tag (unsigned int size) {
unsigned int pages;
unsigned int usage;
struct boundary_tag* tag;
// This is how much space is required.
usage = size + sizeof (struct boundary_tag);
// Perfect amount of space
pages = usage / l_pageSize;
if ((usage % l_pageSize) != 0)
pages += 1;
// Make sure it's >= the minimum size.
if (pages < (unsigned int)l_pageCount)
pages = l_pageCount;
tag = (struct boundary_tag*)liballoc_alloc (pages);
if (tag == NULL)
return NULL; // uh oh, we ran out of memory.
tag->magic = LIBALLOC_MAGIC;
tag->size = size;
tag->real_size = pages * l_pageSize;
tag->index = -1;
tag->next = NULL;
tag->prev = NULL;
tag->split_left = NULL;
tag->split_right = NULL;
return tag;
}
void* malloc (size_t size) {
int index;
void* ptr;
struct boundary_tag* tag = NULL;
liballoc_lock ();
if (l_initialized == 0) {
for (index = 0; index < MAXEXP; index++) {
l_freePages[index] = NULL;
l_completePages[index] = 0;
}
l_initialized = 1;
}
index = getexp (size) + MODE;
if (index < MINEXP)
index = MINEXP;
// Find one big enough.
tag = l_freePages[index]; // Start at the front of the list.
while (tag != NULL) {
// If there's enough space in this tag.
if ((tag->real_size - sizeof (struct boundary_tag)) >= (size + sizeof (struct boundary_tag))) {
break;
}
tag = tag->next;
}
// No page found. Make one.
if (tag == NULL) {
if ((tag = allocate_new_tag (size)) == NULL) {
liballoc_unlock ();
return NULL;
}
index = getexp (tag->real_size - sizeof (struct boundary_tag));
} else {
remove_tag (tag);
if ((tag->split_left == NULL) && (tag->split_right == NULL))
l_completePages[index] -= 1;
}
// We have a free page. Remove it from the free pages list.
tag->size = size;
// Removed... see if we can re-use the excess space.
unsigned int remainder =
tag->real_size - size - sizeof (struct boundary_tag) * 2; // Support a new tag + remainder
if (((int)(remainder) > 0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/) {
int childIndex = getexp (remainder);
if (childIndex >= 0) {
struct boundary_tag* new_tag = split_tag (tag);
(void)new_tag;
}
}
ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag));
liballoc_unlock ();
return ptr;
}
void free (void* ptr) {
int index;
struct boundary_tag* tag;
if (ptr == NULL)
return;
liballoc_lock ();
tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag));
if (tag->magic != LIBALLOC_MAGIC) {
liballoc_unlock (); // release the lock
return;
}
// MELT LEFT...
while ((tag->split_left != NULL) && (tag->split_left->index >= 0)) {
tag = melt_left (tag);
remove_tag (tag);
}
// MELT RIGHT...
while ((tag->split_right != NULL) && (tag->split_right->index >= 0)) {
tag = absorb_right (tag);
}
// Where is it going back to?
index = getexp (tag->real_size - sizeof (struct boundary_tag));
if (index < MINEXP)
index = MINEXP;
// A whole, empty block?
if ((tag->split_left == NULL) && (tag->split_right == NULL)) {
if (l_completePages[index] == MAXCOMPLETE) {
// Too many standing by to keep. Free this one.
unsigned int pages = tag->real_size / l_pageSize;
if ((tag->real_size % l_pageSize) != 0)
pages += 1;
if (pages < (unsigned int)l_pageCount)
pages = l_pageCount;
liballoc_free (tag, pages);
liballoc_unlock ();
return;
}
l_completePages[index] += 1; // Increase the count of complete pages.
}
// ..........
insert_tag (tag, index);
liballoc_unlock ();
}
void* calloc (size_t nobj, size_t size) {
int real_size;
void* p;
real_size = nobj * size;
p = malloc (real_size);
liballoc_memset (p, 0, real_size);
return p;
}
void* realloc (void* p, size_t size) {
void* ptr;
struct boundary_tag* tag;
int real_size;
if (size == 0) {
free (p);
return NULL;
}
if (p == NULL)
return malloc (size);
if (&liballoc_lock != NULL)
liballoc_lock (); // lockit
tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag));
real_size = tag->size;
if (&liballoc_unlock != NULL)
liballoc_unlock ();
if ((size_t)real_size > size)
real_size = size;
ptr = malloc (size);
liballoc_memcpy (ptr, p, real_size);
free (p);
return ptr;
}

94
libmsl/alloc/liballoc.h Normal file
View File

@@ -0,0 +1,94 @@
#ifndef _LIBALLOC_H
#define _LIBALLOC_H
#include <stddef.h>
#include <stdint.h>
#define _ALLOC_SKIP_DEFINE
// If we are told to not define our own size_t, then we
// skip the define.
#ifndef _ALLOC_SKIP_DEFINE
#ifndef _HAVE_SIZE_T
#define _HAVE_SIZE_T
typedef unsigned int size_t;
#endif
#ifndef NULL
#define NULL 0
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/** This is a boundary tag which is prepended to the
* page or section of a page which we have allocated. It is
* used to identify valid memory blocks that the
* application is trying to free.
*/
struct boundary_tag {
unsigned int magic; //< It's a kind of ...
unsigned int size; //< Requested size.
unsigned int real_size; //< Actual size.
int index; //< Location in the page table.
struct boundary_tag* split_left; //< Linked-list info for broken pages.
struct boundary_tag* split_right; //< The same.
struct boundary_tag* next; //< Linked list info.
struct boundary_tag* prev; //< Linked list info.
};
/** This function is supposed to lock the memory data structures. It
* could be as simple as disabling interrupts or acquiring a spinlock.
* It's up to you to decide.
*
* \return 0 if the lock was acquired successfully. Anything else is
* failure.
*/
extern int liballoc_lock (void);
/** This function unlocks what was previously locked by the liballoc_lock
* function. If it disabled interrupts, it enables interrupts. If it
* had acquiried a spinlock, it releases the spinlock. etc.
*
* \return 0 if the lock was successfully released.
*/
extern int liballoc_unlock (void);
/** This is the hook into the local system which allocates pages. It
* accepts an integer parameter which is the number of pages
* required. The page size was set up in the liballoc_init function.
*
* \return NULL if the pages were not allocated.
* \return A pointer to the allocated memory.
*/
extern void* liballoc_alloc (int pages);
/** This frees previously allocated memory. The void* parameter passed
* to the function is the exact same value returned from a previous
* liballoc_alloc call.
*
* The integer value is the number of pages to free.
*
* \return 0 if the memory was successfully freed.
*/
extern int liballoc_free (void* ptr, int pages);
void* malloc (size_t); //< The standard function.
void* realloc (void*, size_t); //< The standard function.
void* calloc (size_t, size_t); //< The standard function.
void free (void*); //< The standard function.
void liballoc_init (void);
void liballoc_deinit (void);
#ifdef __cplusplus
}
#endif
#endif

3
libmsl/alloc/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += alloc/liballoc.c
o += alloc/liballoc.o

View File

@@ -2,21 +2,16 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
int msl_amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
uintptr_t a5, uintptr_t a6) { uintptr_t a5, uintptr_t a6) {
uint64_t result; uint64_t result;
__asm__ volatile ("movq %1, %%rax\n" __asm__ volatile ("movq %[a4], %%r10\n"
"movq %2, %%rdi\n" "movq %[a5], %%r8\n"
"movq %3, %%rsi\n" "movq %[a6], %%r9\n"
"movq %4, %%rdx\n"
"movq %5, %%r10\n"
"movq %6, %%r8\n"
"movq %7, %%r9\n"
"syscall\n" "syscall\n"
"movq %%rax, %0\n" : "=a"(result)
: "=r"(result) : "a"(syscall_num), "D"(a1), "S"(a2),
: "r"((uint64_t)syscall_num), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), "d"(a3), [a4] "r"(a4), [a5] "r"(a5), [a6] "r"(a6)
"r"(a6) : "r10", "r8", "r9", "r11", "rcx", "cc", "memory");
: "memory", "cc", "rcx", "r11"); return result;
return (int)result;
} }

View File

@@ -3,7 +3,7 @@
#include <stdint.h> #include <stdint.h>
int msl_amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
uintptr_t a5, uintptr_t a6); uintptr_t a5, uintptr_t a6);
#endif // _LIBMSL_AMD64_SYSCALL_H #endif // _LIBMSL_AMD64_SYSCALL_H

View File

@@ -1,4 +1,5 @@
#include <m/proc.h> #include <alloc/liballoc.h>
#include <m/system.h>
#include <stdint.h> #include <stdint.h>
extern volatile uint8_t __bss_start[]; extern volatile uint8_t __bss_start[];
@@ -6,7 +7,7 @@ extern volatile uint8_t __bss_end[];
extern void app_main (void); extern void app_main (void);
static void msl_clear_bss (void) { static void clear_bss (void) {
uint8_t* p = (uint8_t*)__bss_start; uint8_t* p = (uint8_t*)__bss_start;
while (p < __bss_end) { while (p < __bss_end) {
*p++ = 0; *p++ = 0;
@@ -14,9 +15,9 @@ static void msl_clear_bss (void) {
} }
void __premain (void) { void __premain (void) {
msl_clear_bss (); clear_bss ();
liballoc_init ();
app_main (); app_main ();
liballoc_deinit ();
m_proc_quit (); quit ();
} }

View File

@@ -1,6 +0,0 @@
#include <m/syscall.h>
#include <m/syscall_defs.h>
int m_proc_quit (void) { return m_syscall (SYS_PROC_QUIT, 0, 0, 0, 0, 0, 0); }
int m_proc_test (void) { return m_syscall (SYS_PROC_TEST, 0, 0, 0, 0, 0, 0); }

View File

@@ -1,8 +0,0 @@
#ifndef _LIBMSL_M_PROC_H
#define _LIBMSL_M_PROC_H
int m_proc_quit (void);
int m_proc_test (void);
#endif // _LIBMSL_M_PROC_H

View File

@@ -1,3 +1,3 @@
c += m/proc.c c += m/system.c
o += m/proc.o o += m/system.o

View File

@@ -5,7 +5,7 @@
#if defined(__x86_64__) #if defined(__x86_64__)
#include <amd64/syscall.h> #include <amd64/syscall.h>
#define m_syscall msl_amd64_syscall #define syscall amd64_syscall
#endif #endif
#endif // _LIBMSL_M_SYSCALL_H #endif // _LIBMSL_M_SYSCALL_H

Some files were not shown because too many files have changed in this diff Show More