Compare commits
65 Commits
bba36ef057
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 38e26a9c12 | |||
| 124aa12f5b | |||
| d2f5c032d9 | |||
| 73e42588fb | |||
| e78bfb9984 | |||
| d2a88b3641 | |||
| fdda2e2df8 | |||
| 388418a718 | |||
| 1c64d608bd | |||
| 3d23187acf | |||
| a3b62ebd3d | |||
| 8bda300f6a | |||
| cf51600c6a | |||
| b388b30b24 | |||
| 600886a7ee | |||
| 67b66f2b39 | |||
| 18f791222e | |||
| 5e16bb647c | |||
| a68373e4ee | |||
| 8650010992 | |||
| 95f590fb3b | |||
| 7bb3b77ede | |||
| c26fd3cb2b | |||
| fea0999726 | |||
| 7eceecf6e3 | |||
| fff51321bc | |||
| a29233f853 | |||
| 38a43b59b0 | |||
| ddafc4eb19 | |||
| 4f7077d458 | |||
| 9a7dbf0594 | |||
| ab8093cc6c | |||
| ddbb66b5e4 | |||
| 11a1eb52aa | |||
| a054257336 | |||
| 9fc8521e63 | |||
| 711da8aeab | |||
| ebd9f0cac6 | |||
| 7cd5623d36 | |||
| 270ff507d4 | |||
| 55166f9d5f | |||
| e5cc3a64d3 | |||
| 2ab308d678 | |||
| d1d772cb42 | |||
| 0d8f9e565f | |||
| f80a26e5eb | |||
| 5bf10c1218 | |||
| 41a458b925 | |||
| 6a474c21a0 | |||
| a5283283f6 | |||
| 79768d94e6 | |||
| 0555ddd041 | |||
| ebb026b807 | |||
| d7b734306f | |||
| 28aef30f77 | |||
| 9f107a1a5e | |||
| e50f8940a9 | |||
| d09e4d97ad | |||
| 7915986902 | |||
| 902682ac11 | |||
| 7747e5e0aa | |||
| a8423fe657 | |||
| 6538fd8023 | |||
| fcd5658a80 | |||
| b1579e4ac1 |
@@ -50,7 +50,7 @@ AlignOperands: false
|
||||
SortIncludes: true
|
||||
|
||||
# Comments
|
||||
ReflowComments: false
|
||||
ReflowComments: true
|
||||
CommentPragmas: '^ IWYU pragma:'
|
||||
|
||||
# Misc
|
||||
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
- name: Install mkdocs
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install mkdocs mkdocs-material
|
||||
pip install mkdocs mkdocs-material pymdown-extensions
|
||||
|
||||
- name: Build
|
||||
run: make docs
|
||||
|
||||
2
Makefile
2
Makefile
@@ -4,4 +4,4 @@ include make/apps.mk
|
||||
include make/kernel.mk
|
||||
include make/dist.mk
|
||||
include make/docs.mk
|
||||
include make/libc.mk
|
||||
include make/libmsl.mk
|
||||
|
||||
@@ -4,7 +4,8 @@ cflags += --target=x86_64-pc-none-elf \
|
||||
-mno-avx \
|
||||
-mno-mmx \
|
||||
-mno-80387 \
|
||||
-mno-red-zone
|
||||
-mno-red-zone \
|
||||
-mcmodel=large
|
||||
|
||||
ldflags += --target=x86_64-pc-none-elf \
|
||||
-Wl,-zmax-page-size=0x1000
|
||||
|
||||
@@ -6,6 +6,8 @@ PHDRS {
|
||||
text PT_LOAD;
|
||||
rodata PT_LOAD;
|
||||
data PT_LOAD;
|
||||
bss PT_LOAD;
|
||||
tls PT_TLS;
|
||||
}
|
||||
|
||||
SECTIONS {
|
||||
@@ -13,32 +15,53 @@ SECTIONS {
|
||||
|
||||
.text : {
|
||||
*(.text .text.*)
|
||||
*(.ltext .ltext.*)
|
||||
} :text
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
.rodata : {
|
||||
*(.rodata .rodata.*)
|
||||
} :rodata
|
||||
|
||||
.note.gnu.build-id : {
|
||||
*(.note.gnu.build-id)
|
||||
} :rodata
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
.data : {
|
||||
*(.data .data.*)
|
||||
*(.ldata .ldata.*)
|
||||
} :data
|
||||
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
__bss_start = .;
|
||||
|
||||
.bss : {
|
||||
*(.bss .bss.*)
|
||||
} :data
|
||||
*(.lbss .lbss.*)
|
||||
} :bss
|
||||
|
||||
__bss_end = .;
|
||||
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
__tdata_start = .;
|
||||
|
||||
.tdata : {
|
||||
*(.tdata .tdata.*)
|
||||
} :tls
|
||||
|
||||
__tdata_end = .;
|
||||
|
||||
__tbss_start = .;
|
||||
|
||||
.tbss : {
|
||||
*(.tbss .tbss.*)
|
||||
} :tls
|
||||
|
||||
__tbss_end = .;
|
||||
|
||||
__tls_size = __tbss_end - __tdata_start;
|
||||
|
||||
/DISCARD/ : {
|
||||
*(.eh_frame*)
|
||||
*(.note .note.*)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
cpu: model=p4_prescott_celeron_336
|
||||
cpu: model=p4_prescott_celeron_336, ips=200000000
|
||||
|
||||
memory: guest=4096 host=2048
|
||||
|
||||
@@ -9,6 +9,7 @@ ata0: enabled=1
|
||||
ata0-master: type=cdrom, path=mop3.iso, status=inserted
|
||||
com1: enabled=1, mode=file, dev=bochs-com1.txt
|
||||
pci: enabled=1, chipset=i440fx
|
||||
clock: sync=realtime, time0=local
|
||||
|
||||
boot: cdrom
|
||||
|
||||
|
||||
1
docs/.gitignore
vendored
1
docs/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
doxygen/
|
||||
BIN
docs/assets/images/only-processes.png
Normal file
BIN
docs/assets/images/only-processes.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 118 KiB |
BIN
docs/assets/images/processes-threads.png
Normal file
BIN
docs/assets/images/processes-threads.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 51 KiB |
@@ -2,7 +2,3 @@
|
||||
|
||||
MOP3 is a hobby OS project of mine ;).
|
||||
|
||||
# Kernel documentation
|
||||
|
||||
- [Doxygen docs](kernel/doxygen/html/index.html)
|
||||
- [Building](building_kernel/index.html)
|
||||
|
||||
30
docs/processes_overview.md
Normal file
30
docs/processes_overview.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Overview of processes in MOP3
|
||||
|
||||
## What is a process?
|
||||
|
||||
A process is a structure defined to represent an internal state of a user application's environment. This includes
|
||||
the necessary stacks, code, data and other resources. A process (usually) has it's own address, but in certain
|
||||
circumstances may share it with another process.
|
||||
|
||||
## Only processes vs. processes-threads model
|
||||
|
||||
### Overview
|
||||
|
||||
MOP3 doesn't have a process-thread separation. Ususally in operating systems you'd have a "process", which consists
|
||||
of multiple worker threads. For eg. a single-threaded application is a process, which consists of one worker. In MOP3
|
||||
we do things a little differently. We only have processes, but some processes may work within the same pool of (generally speaking)
|
||||
"resources", such as a shared address space, shared memory allocations, mutexes and so on. An application then consists of
|
||||
not threads, but processes, which are loosely tied together via shared data.
|
||||
|
||||
#### Processes-threads model diagram
|
||||

|
||||
#### Only processes model diagram
|
||||

|
||||
|
||||
## Scheduling
|
||||
|
||||
MOP3 uses a round-robin based scheduler. For now priorities are left unimplemented, ie. every processes has
|
||||
equal priority, but this may change in the future.
|
||||
|
||||
A good explaination of round-robin scheduling can be found on the OSDev wiki: [the article](https://wiki.osdev.org/Scheduling_Algorithms#Round_Robin)
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2021 - 2023 jothepro
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,207 +0,0 @@
|
||||
# Doxygen Awesome
|
||||
|
||||
[](https://github.com/jothepro/doxygen-awesome-css/releases/latest)
|
||||
[](https://github.com/jothepro/doxygen-awesome-css/blob/main/LICENSE)
|
||||

|
||||
|
||||
<div class="title_screenshot">
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
**Doxygen Awesome** is a custom CSS theme for Doxygen HTML documentation with many customization options.
|
||||
|
||||
## Motivation
|
||||
|
||||
I really like how the Doxygen HTML documentation is structured, but IMHO it looks a bit outdated.
|
||||
|
||||
This theme is an attempt to modernize the visuals of Doxygen without changing its overall layout too much.
|
||||
|
||||
## Features
|
||||
|
||||
- 🌈 Clean, modern design
|
||||
- 🚀 Highly customizable by adjusting CSS variables
|
||||
- 🧩 No changes to the HTML structure of Doxygen are required
|
||||
- 📱 Improved mobile usability
|
||||
- 🌘 Dark mode support!
|
||||
- 🥇 Works best with **Doxygen 1.9.1** - **1.9.4** and **1.9.6** - **1.14.0**
|
||||
|
||||
## Examples
|
||||
|
||||
Some websites using this theme:
|
||||
|
||||
- [Documentation of this repository](https://jothepro.github.io/doxygen-awesome-css/)
|
||||
- [wxWidgets](https://docs.wxwidgets.org/3.2/)
|
||||
- [OpenCV 5.x](https://docs.opencv.org/5.x/)
|
||||
- [Zephyr](https://docs.zephyrproject.org/latest/doxygen/html/index.html)
|
||||
- [Spatial Audio Framework (SAF)](https://leomccormack.github.io/Spatial_Audio_Framework/index.html)
|
||||
- [Randolf Richardson's C++ classes](https://www.randolf.ca/c++/docs/)
|
||||
- [libsl3](https://a4z.github.io/libsl3/)
|
||||
- [DuMu<sup>x</sup>](https://dumux.org/docs/doxygen/master/)
|
||||
- [OpenRemise](https://openremise.at/)
|
||||
|
||||
## Installation
|
||||
|
||||
To use the theme when generating your documentation, bring the required CSS and JS files from this repository into your project.
|
||||
|
||||
This can be done in several ways:
|
||||
|
||||
- manually copying the files
|
||||
- adding the project as a Git submodule
|
||||
- downloading the project with CMake FetchContent
|
||||
- adding the project as an npm/xpm dependency
|
||||
- installing the theme system-wide
|
||||
|
||||
All theme files are located in the root of this repository and start with the prefix `doxygen-awesome-`. You may not need all of them. Follow the installation instructions to determine which files are required for your setup.
|
||||
|
||||
### Git submodule
|
||||
|
||||
For projects that use Git, add the repository as a submodule and check out the desired release:
|
||||
|
||||
```sh
|
||||
git submodule add https://github.com/jothepro/doxygen-awesome-css.git
|
||||
cd doxygen-awesome-css
|
||||
git checkout v2.4.1
|
||||
```
|
||||
|
||||
### CMake with FetchContent
|
||||
|
||||
For projects that build with CMake, the `FetchContent` module can be used to download the repository at configuration time.
|
||||
|
||||
Add the following snippet to your `CMakeLists.txt`:
|
||||
|
||||
```cmake
|
||||
include(FetchContent)
|
||||
FetchContent_Declare(
|
||||
doxygen-awesome-css
|
||||
URL https://github.com/jothepro/doxygen-awesome-css/archive/refs/heads/main.zip
|
||||
)
|
||||
FetchContent_MakeAvailable(doxygen-awesome-css)
|
||||
|
||||
# Save the location the files were cloned into
|
||||
# This allows us to get the path to doxygen-awesome.css
|
||||
FetchContent_GetProperties(doxygen-awesome-css SOURCE_DIR AWESOME_CSS_DIR)
|
||||
|
||||
# Generate the Doxyfile
|
||||
set(DOXYFILE_IN ${CMAKE_CURRENT_SOURCE_DIR}/doc/Doxyfile.in)
|
||||
set(DOXYFILE_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
|
||||
configure_file(${DOXYFILE_IN} ${DOXYFILE_OUT} @ONLY)
|
||||
```
|
||||
|
||||
This downloads the latest main (but any other revision could be used) and unpacks in the build folder. The `Doxyfile.in` can reference this location in the `HTML_EXTRA_STYLESHEET` field
|
||||
|
||||
```text
|
||||
HTML_EXTRA_STYLESHEET = @AWESOME_CSS_DIR@/doxygen-awesome.css
|
||||
```
|
||||
|
||||
When the configure stage of CMake is run, the `Doxyfile.in` is rendered to Doxyfile and Doxygen can be run as usual.
|
||||
|
||||
### npm/xpm dependency
|
||||
|
||||
In the npm ecosystem, this project can be added as a development dependency
|
||||
to your project:
|
||||
|
||||
```sh
|
||||
cd your-project
|
||||
npm install https://github.com/jothepro/doxygen-awesome-css#v2.4.1 --save-dev
|
||||
|
||||
ls -l node_modules/@jothepro/doxygen-awesome-css
|
||||
```
|
||||
|
||||
Similarly, in the [xPack](https://xpack.github.io) ecosystem, this project can be added
|
||||
as a development dependency to an [`xpm`](https://xpack.github.io/xpm/)
|
||||
managed project.
|
||||
|
||||
### System-wide
|
||||
|
||||
You can even install the theme system-wide by running `make install`.
|
||||
The files will be installed to `/usr/local/share/` by default,
|
||||
but you can customize the install location with `make PREFIX=/my/custom/path install`.
|
||||
|
||||
### Choosing a layout
|
||||
|
||||
There are two layout options. Choose one of them and configure Doxygen accordingly:
|
||||
|
||||
<div class="tabbed">
|
||||
|
||||
- <b class="tab-title">Base Theme</b><div class="darkmode_inverted_image">
|
||||

|
||||
</div>
|
||||
Comes with the typical Doxygen titlebar. Optionally the treeview in the sidebar can be enabled.
|
||||
|
||||
Required files: `doxygen-awesome.css`
|
||||
|
||||
Required `Doxyfile` configuration:
|
||||
```
|
||||
GENERATE_TREEVIEW = YES # optional. Also works without treeview
|
||||
DISABLE_INDEX = NO
|
||||
FULL_SIDEBAR = NO
|
||||
HTML_EXTRA_STYLESHEET = doxygen-awesome-css/doxygen-awesome.css
|
||||
HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5
|
||||
```
|
||||
|
||||
- <b class="tab-title">Sidebar-Only Theme</b><div class="darkmode_inverted_image">
|
||||

|
||||
</div>
|
||||
Hides the top titlebar to give more space to the content. The treeview must be enabled in order for this theme to work.
|
||||
|
||||
Required files: `doxygen-awesome.css`, `doxygen-awesome-sidebar-only.css`
|
||||
|
||||
Required `Doxyfile` configuration:
|
||||
```
|
||||
|
||||
GENERATE_TREEVIEW = YES # required!
|
||||
DISABLE_INDEX = NO
|
||||
FULL_SIDEBAR = NO
|
||||
HTML_EXTRA_STYLESHEET = doxygen-awesome-css/doxygen-awesome.css \
|
||||
doxygen-awesome-css/doxygen-awesome-sidebar-only.css
|
||||
HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
<br>
|
||||
|
||||
@warning
|
||||
- This theme is not compatible with the `FULL_SIDEBAR = YES` option provided by Doxygen!
|
||||
- `HTML_COLORSTYLE` must be set to `LIGHT` since Doxygen 1.9.5!
|
||||
|
||||
### Further installation instructions
|
||||
|
||||
- [Installing extensions](docs/extensions.md)
|
||||
- [Customizing the theme (colors, spacing, border-radius, ...)](docs/customization.md)
|
||||
- [Tips and Tricks for further configuration](docs/tricks.md)
|
||||
|
||||
## Browser support
|
||||
|
||||
Tested with
|
||||
|
||||
- Chrome 140, Chrome 140 for Android, Chrome 141 for iOS
|
||||
- Safari 26, Safari for iOS 26
|
||||
- Firefox 143, Firefox 142 for Android, Firefox 143 for iOS
|
||||
- Edge 140
|
||||
- Opera One 122
|
||||
|
||||
|
||||
The theme does not strive to be backward compatible with (significantly) older browser versions.
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
Thanks for all the bug reports and inspiring feedback on GitHub!
|
||||
|
||||
Special thanks to all the contributors:
|
||||
<br><br>
|
||||
<a href="https://github.com/jothepro/doxygen-awesome-css/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=jothepro/doxygen-awesome-css" />
|
||||
</a>
|
||||
|
||||
|
||||
<div class="section_buttons">
|
||||
|
||||
| Read Next |
|
||||
|---------------------------------:|
|
||||
| [Extensions](docs/extensions.md) |
|
||||
|
||||
</div>
|
||||
@@ -1,138 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/**
|
||||
|
||||
Doxygen Awesome
|
||||
https://github.com/jothepro/doxygen-awesome-css
|
||||
|
||||
Copyright (c) 2021 - 2025 jothepro
|
||||
|
||||
*/
|
||||
|
||||
class DoxygenAwesomeDarkModeToggle extends HTMLElement {
|
||||
// SVG icons from https://fonts.google.com/icons
|
||||
// Licensed under the Apache 2.0 license:
|
||||
// https://www.apache.org/licenses/LICENSE-2.0.html
|
||||
static lightModeIcon = `<svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24px" viewBox="0 0 24 24" width="24px" fill="#FCBF00"><rect fill="none" height="24" width="24"/><circle cx="12" cy="12" opacity=".3" r="3"/><path d="M12,9c1.65,0,3,1.35,3,3s-1.35,3-3,3s-3-1.35-3-3S10.35,9,12,9 M12,7c-2.76,0-5,2.24-5,5s2.24,5,5,5s5-2.24,5-5 S14.76,7,12,7L12,7z M2,13l2,0c0.55,0,1-0.45,1-1s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S1.45,13,2,13z M20,13l2,0c0.55,0,1-0.45,1-1 s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S19.45,13,20,13z M11,2v2c0,0.55,0.45,1,1,1s1-0.45,1-1V2c0-0.55-0.45-1-1-1S11,1.45,11,2z M11,20v2c0,0.55,0.45,1,1,1s1-0.45,1-1v-2c0-0.55-0.45-1-1-1C11.45,19,11,19.45,11,20z M5.99,4.58c-0.39-0.39-1.03-0.39-1.41,0 c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0s0.39-1.03,0-1.41L5.99,4.58z M18.36,16.95 c-0.39-0.39-1.03-0.39-1.41,0c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0c0.39-0.39,0.39-1.03,0-1.41 L18.36,16.95z M19.42,5.99c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06c-0.39,0.39-0.39,1.03,0,1.41 s1.03,0.39,1.41,0L19.42,5.99z M7.05,18.36c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06 c-0.39,0.39-0.39,1.03,0,1.41s1.03,0.39,1.41,0L7.05,18.36z"/></svg>`
|
||||
static darkModeIcon = `<svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24px" viewBox="0 0 24 24" width="24px" fill="#FE9700"><rect fill="none" height="24" width="24"/><path d="M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27 C17.45,17.19,14.93,19,12,19c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z" opacity=".3"/><path d="M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27C17.45,17.19,14.93,19,12,19 c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z M12,3c-4.97,0-9,4.03-9,9s4.03,9,9,9s9-4.03,9-9c0-0.46-0.04-0.92-0.1-1.36 c-0.98,1.37-2.58,2.26-4.4,2.26c-2.98,0-5.4-2.42-5.4-5.4c0-1.81,0.89-3.42,2.26-4.4C12.92,3.04,12.46,3,12,3L12,3z"/></svg>`
|
||||
static title = "Toggle Light/Dark Mode"
|
||||
|
||||
static prefersLightModeInDarkModeKey = "prefers-light-mode-in-dark-mode"
|
||||
static prefersDarkModeInLightModeKey = "prefers-dark-mode-in-light-mode"
|
||||
|
||||
static _staticConstructor = function() {
|
||||
DoxygenAwesomeDarkModeToggle.enableDarkMode(DoxygenAwesomeDarkModeToggle.userPreference)
|
||||
// Update the color scheme when the browsers preference changes
|
||||
// without user interaction on the website.
|
||||
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', event => {
|
||||
DoxygenAwesomeDarkModeToggle.onSystemPreferenceChanged()
|
||||
})
|
||||
// Update the color scheme when the tab is made visible again.
|
||||
// It is possible that the appearance was changed in another tab
|
||||
// while this tab was in the background.
|
||||
document.addEventListener("visibilitychange", visibilityState => {
|
||||
if (document.visibilityState === 'visible') {
|
||||
DoxygenAwesomeDarkModeToggle.onSystemPreferenceChanged()
|
||||
}
|
||||
});
|
||||
}()
|
||||
|
||||
static init() {
|
||||
$(function() {
|
||||
$(document).ready(function() {
|
||||
const toggleButton = document.createElement('doxygen-awesome-dark-mode-toggle')
|
||||
toggleButton.title = DoxygenAwesomeDarkModeToggle.title
|
||||
toggleButton.updateIcon()
|
||||
|
||||
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', event => {
|
||||
toggleButton.updateIcon()
|
||||
})
|
||||
document.addEventListener("visibilitychange", visibilityState => {
|
||||
if (document.visibilityState === 'visible') {
|
||||
toggleButton.updateIcon()
|
||||
}
|
||||
});
|
||||
|
||||
$(document).ready(function(){
|
||||
document.getElementById("MSearchBox").parentNode.appendChild(toggleButton)
|
||||
})
|
||||
$(window).resize(function(){
|
||||
document.getElementById("MSearchBox").parentNode.appendChild(toggleButton)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
constructor() {
|
||||
super();
|
||||
this.onclick=this.toggleDarkMode
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns `true` for dark-mode, `false` for light-mode system preference
|
||||
*/
|
||||
static get systemPreference() {
|
||||
return window.matchMedia('(prefers-color-scheme: dark)').matches
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns `true` for dark-mode, `false` for light-mode user preference
|
||||
*/
|
||||
static get userPreference() {
|
||||
return (!DoxygenAwesomeDarkModeToggle.systemPreference && localStorage.getItem(DoxygenAwesomeDarkModeToggle.prefersDarkModeInLightModeKey)) ||
|
||||
(DoxygenAwesomeDarkModeToggle.systemPreference && !localStorage.getItem(DoxygenAwesomeDarkModeToggle.prefersLightModeInDarkModeKey))
|
||||
}
|
||||
|
||||
static set userPreference(userPreference) {
|
||||
DoxygenAwesomeDarkModeToggle.darkModeEnabled = userPreference
|
||||
if(!userPreference) {
|
||||
if(DoxygenAwesomeDarkModeToggle.systemPreference) {
|
||||
localStorage.setItem(DoxygenAwesomeDarkModeToggle.prefersLightModeInDarkModeKey, true)
|
||||
} else {
|
||||
localStorage.removeItem(DoxygenAwesomeDarkModeToggle.prefersDarkModeInLightModeKey)
|
||||
}
|
||||
} else {
|
||||
if(!DoxygenAwesomeDarkModeToggle.systemPreference) {
|
||||
localStorage.setItem(DoxygenAwesomeDarkModeToggle.prefersDarkModeInLightModeKey, true)
|
||||
} else {
|
||||
localStorage.removeItem(DoxygenAwesomeDarkModeToggle.prefersLightModeInDarkModeKey)
|
||||
}
|
||||
}
|
||||
DoxygenAwesomeDarkModeToggle.onUserPreferenceChanged()
|
||||
}
|
||||
|
||||
static enableDarkMode(enable) {
|
||||
if(enable) {
|
||||
DoxygenAwesomeDarkModeToggle.darkModeEnabled = true
|
||||
document.documentElement.classList.add("dark-mode")
|
||||
document.documentElement.classList.remove("light-mode")
|
||||
} else {
|
||||
DoxygenAwesomeDarkModeToggle.darkModeEnabled = false
|
||||
document.documentElement.classList.remove("dark-mode")
|
||||
document.documentElement.classList.add("light-mode")
|
||||
}
|
||||
}
|
||||
|
||||
static onSystemPreferenceChanged() {
|
||||
DoxygenAwesomeDarkModeToggle.darkModeEnabled = DoxygenAwesomeDarkModeToggle.userPreference
|
||||
DoxygenAwesomeDarkModeToggle.enableDarkMode(DoxygenAwesomeDarkModeToggle.darkModeEnabled)
|
||||
}
|
||||
|
||||
static onUserPreferenceChanged() {
|
||||
DoxygenAwesomeDarkModeToggle.enableDarkMode(DoxygenAwesomeDarkModeToggle.darkModeEnabled)
|
||||
}
|
||||
|
||||
toggleDarkMode() {
|
||||
DoxygenAwesomeDarkModeToggle.userPreference = !DoxygenAwesomeDarkModeToggle.userPreference
|
||||
this.updateIcon()
|
||||
}
|
||||
|
||||
updateIcon() {
|
||||
if(DoxygenAwesomeDarkModeToggle.darkModeEnabled) {
|
||||
this.innerHTML = DoxygenAwesomeDarkModeToggle.darkModeIcon
|
||||
} else {
|
||||
this.innerHTML = DoxygenAwesomeDarkModeToggle.lightModeIcon
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
customElements.define("doxygen-awesome-dark-mode-toggle", DoxygenAwesomeDarkModeToggle);
|
||||
@@ -1,66 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/**
|
||||
|
||||
Doxygen Awesome
|
||||
https://github.com/jothepro/doxygen-awesome-css
|
||||
|
||||
Copyright (c) 2022 - 2025 jothepro
|
||||
|
||||
*/
|
||||
|
||||
class DoxygenAwesomeFragmentCopyButton extends HTMLElement {
|
||||
constructor() {
|
||||
super();
|
||||
this.onclick=this.copyContent
|
||||
}
|
||||
static title = "Copy to clipboard"
|
||||
static copyIcon = `<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M23.04,10.322c0,-2.582 -2.096,-4.678 -4.678,-4.678l-6.918,-0c-2.582,-0 -4.678,2.096 -4.678,4.678c0,-0 0,8.04 0,8.04c0,2.582 2.096,4.678 4.678,4.678c0,-0 6.918,-0 6.918,-0c2.582,-0 4.678,-2.096 4.678,-4.678c0,-0 0,-8.04 0,-8.04Zm-2.438,-0l-0,8.04c-0,1.236 -1.004,2.24 -2.24,2.24l-6.918,-0c-1.236,-0 -2.239,-1.004 -2.239,-2.24l-0,-8.04c-0,-1.236 1.003,-2.24 2.239,-2.24c0,0 6.918,0 6.918,0c1.236,0 2.24,1.004 2.24,2.24Z"/><path d="M5.327,16.748c-0,0.358 -0.291,0.648 -0.649,0.648c0,0 0,0 0,0c-2.582,0 -4.678,-2.096 -4.678,-4.678c0,0 0,-8.04 0,-8.04c0,-2.582 2.096,-4.678 4.678,-4.678l6.918,0c2.168,0 3.994,1.478 4.523,3.481c0.038,0.149 0.005,0.306 -0.09,0.428c-0.094,0.121 -0.239,0.191 -0.392,0.191c-0.451,0.005 -1.057,0.005 -1.457,0.005c-0.238,0 -0.455,-0.14 -0.553,-0.357c-0.348,-0.773 -1.128,-1.31 -2.031,-1.31c-0,0 -6.918,0 -6.918,0c-1.236,0 -2.24,1.004 -2.24,2.24l0,8.04c0,1.236 1.004,2.24 2.24,2.24l0,-0c0.358,-0 0.649,0.29 0.649,0.648c-0,0.353 -0,0.789 -0,1.142Z" style="fill-opacity:0.6;"/></svg>`
|
||||
static successIcon = `<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24"><path d="M8.084,16.111c-0.09,0.09 -0.212,0.141 -0.34,0.141c-0.127,-0 -0.249,-0.051 -0.339,-0.141c-0.746,-0.746 -2.538,-2.538 -3.525,-3.525c-0.375,-0.375 -0.983,-0.375 -1.357,0c-0.178,0.178 -0.369,0.369 -0.547,0.547c-0.375,0.375 -0.375,0.982 -0,1.357c1.135,1.135 3.422,3.422 4.75,4.751c0.27,0.27 0.637,0.421 1.018,0.421c0.382,0 0.749,-0.151 1.019,-0.421c2.731,-2.732 10.166,-10.167 12.454,-12.455c0.375,-0.375 0.375,-0.982 -0,-1.357c-0.178,-0.178 -0.369,-0.369 -0.547,-0.547c-0.375,-0.375 -0.982,-0.375 -1.357,0c-2.273,2.273 -9.567,9.567 -11.229,11.229Z"/></svg>`
|
||||
static successDuration = 980
|
||||
static init() {
|
||||
$(function() {
|
||||
$(document).ready(function() {
|
||||
if(navigator.clipboard) {
|
||||
const fragments = document.getElementsByClassName("fragment")
|
||||
for(const fragment of fragments) {
|
||||
const fragmentWrapper = document.createElement("div")
|
||||
fragmentWrapper.className = "doxygen-awesome-fragment-wrapper"
|
||||
const fragmentCopyButton = document.createElement("doxygen-awesome-fragment-copy-button")
|
||||
fragmentCopyButton.innerHTML = DoxygenAwesomeFragmentCopyButton.copyIcon
|
||||
fragmentCopyButton.title = DoxygenAwesomeFragmentCopyButton.title
|
||||
|
||||
fragment.parentNode.replaceChild(fragmentWrapper, fragment)
|
||||
fragmentWrapper.appendChild(fragment)
|
||||
fragmentWrapper.appendChild(fragmentCopyButton)
|
||||
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
copyContent() {
|
||||
const content = this.previousSibling.cloneNode(true)
|
||||
// filter out line number from file listings
|
||||
content.querySelectorAll(".lineno, .ttc").forEach((node) => {
|
||||
node.remove()
|
||||
})
|
||||
let textContent = content.textContent
|
||||
// remove trailing newlines that appear in file listings
|
||||
let numberOfTrailingNewlines = 0
|
||||
while(textContent.charAt(textContent.length - (numberOfTrailingNewlines + 1)) == '\n') {
|
||||
numberOfTrailingNewlines++;
|
||||
}
|
||||
textContent = textContent.substring(0, textContent.length - numberOfTrailingNewlines)
|
||||
navigator.clipboard.writeText(textContent);
|
||||
this.classList.add("success")
|
||||
this.innerHTML = DoxygenAwesomeFragmentCopyButton.successIcon
|
||||
window.setTimeout(() => {
|
||||
this.classList.remove("success")
|
||||
this.innerHTML = DoxygenAwesomeFragmentCopyButton.copyIcon
|
||||
}, DoxygenAwesomeFragmentCopyButton.successDuration);
|
||||
}
|
||||
}
|
||||
|
||||
customElements.define("doxygen-awesome-fragment-copy-button", DoxygenAwesomeFragmentCopyButton)
|
||||
@@ -1,72 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/**
|
||||
|
||||
Doxygen Awesome
|
||||
https://github.com/jothepro/doxygen-awesome-css
|
||||
|
||||
Copyright (c) 2022 - 2025 jothepro
|
||||
|
||||
*/
|
||||
|
||||
class DoxygenAwesomeInteractiveToc {
|
||||
static topOffset = 38
|
||||
static hideMobileMenu = true
|
||||
static headers = []
|
||||
|
||||
static init() {
|
||||
window.addEventListener("load", () => {
|
||||
let toc = document.querySelector(".contents > .toc")
|
||||
if(toc) {
|
||||
toc.classList.add("interactive")
|
||||
if(!DoxygenAwesomeInteractiveToc.hideMobileMenu) {
|
||||
toc.classList.add("open")
|
||||
}
|
||||
document.querySelector(".contents > .toc > h3")?.addEventListener("click", () => {
|
||||
if(toc.classList.contains("open")) {
|
||||
toc.classList.remove("open")
|
||||
} else {
|
||||
toc.classList.add("open")
|
||||
}
|
||||
})
|
||||
|
||||
document.querySelectorAll(".contents > .toc > ul a").forEach((node) => {
|
||||
let id = node.getAttribute("href").substring(1)
|
||||
DoxygenAwesomeInteractiveToc.headers.push({
|
||||
node: node,
|
||||
headerNode: document.getElementById(id)
|
||||
})
|
||||
|
||||
document.getElementById("doc-content")?.addEventListener("scroll",this.throttle(DoxygenAwesomeInteractiveToc.update, 100))
|
||||
})
|
||||
DoxygenAwesomeInteractiveToc.update()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
static update() {
|
||||
let active = DoxygenAwesomeInteractiveToc.headers[0]?.node
|
||||
DoxygenAwesomeInteractiveToc.headers.forEach((header) => {
|
||||
let position = header.headerNode.getBoundingClientRect().top
|
||||
header.node.classList.remove("active")
|
||||
header.node.classList.remove("aboveActive")
|
||||
if(position < DoxygenAwesomeInteractiveToc.topOffset) {
|
||||
active = header.node
|
||||
active?.classList.add("aboveActive")
|
||||
}
|
||||
})
|
||||
active?.classList.add("active")
|
||||
active?.classList.remove("aboveActive")
|
||||
}
|
||||
|
||||
static throttle(func, delay) {
|
||||
let lastCall = 0;
|
||||
return function (...args) {
|
||||
const now = new Date().getTime();
|
||||
if (now - lastCall < delay) {
|
||||
return;
|
||||
}
|
||||
lastCall = now;
|
||||
return setTimeout(() => {func(...args)}, delay);
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/**
|
||||
|
||||
Doxygen Awesome
|
||||
https://github.com/jothepro/doxygen-awesome-css
|
||||
|
||||
Copyright (c) 2022 - 2025 jothepro
|
||||
|
||||
*/
|
||||
|
||||
class DoxygenAwesomeParagraphLink {
|
||||
// Icon from https://fonts.google.com/icons
|
||||
// Licensed under the Apache 2.0 license:
|
||||
// https://www.apache.org/licenses/LICENSE-2.0.html
|
||||
static icon = `<svg xmlns="http://www.w3.org/2000/svg" height="20px" viewBox="0 0 24 24" width="20px"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M17 7h-4v2h4c1.65 0 3 1.35 3 3s-1.35 3-3 3h-4v2h4c2.76 0 5-2.24 5-5s-2.24-5-5-5zm-6 8H7c-1.65 0-3-1.35-3-3s1.35-3 3-3h4V7H7c-2.76 0-5 2.24-5 5s2.24 5 5 5h4v-2zm-3-4h8v2H8z"/></svg>`
|
||||
static title = "Permanent Link"
|
||||
static init() {
|
||||
$(function() {
|
||||
$(document).ready(function() {
|
||||
document.querySelectorAll(".contents a.anchor[id], .contents .groupheader > a[id]").forEach((node) => {
|
||||
let anchorlink = document.createElement("a")
|
||||
anchorlink.setAttribute("href", `#${node.getAttribute("id")}`)
|
||||
anchorlink.setAttribute("title", DoxygenAwesomeParagraphLink.title)
|
||||
anchorlink.classList.add("anchorlink")
|
||||
node.classList.add("anchor")
|
||||
anchorlink.innerHTML = DoxygenAwesomeParagraphLink.icon
|
||||
node.parentElement.appendChild(anchorlink)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/**
|
||||
|
||||
Doxygen Awesome
|
||||
https://github.com/jothepro/doxygen-awesome-css
|
||||
|
||||
Copyright (c) 2021 - 2025 jothepro
|
||||
|
||||
*/
|
||||
|
||||
@media screen and (min-width: 768px) {
|
||||
|
||||
#MSearchBox {
|
||||
width: calc(var(--side-nav-fixed-width) - calc(2 * var(--spacing-medium)) - var(--searchbar-height) - 1px);
|
||||
}
|
||||
|
||||
#MSearchField {
|
||||
width: calc(var(--side-nav-fixed-width) - calc(2 * var(--spacing-medium)) - 66px - var(--searchbar-height));
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/**
|
||||
|
||||
Doxygen Awesome
|
||||
https://github.com/jothepro/doxygen-awesome-css
|
||||
|
||||
Copyright (c) 2021 - 2025 jothepro
|
||||
|
||||
*/
|
||||
|
||||
html {
|
||||
/* side nav width. MUST be = `TREEVIEW_WIDTH`.
|
||||
* Make sure it is wide enough to contain the page title (logo + title + version)
|
||||
*/
|
||||
--side-nav-fixed-width: 335px;
|
||||
--menu-display: none;
|
||||
|
||||
--top-height: 120px;
|
||||
--toc-sticky-top: -25px;
|
||||
--toc-max-height: calc(100vh - 2 * var(--spacing-medium) - 25px);
|
||||
}
|
||||
|
||||
#projectname {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
|
||||
@media screen and (min-width: 768px) {
|
||||
html {
|
||||
--searchbar-background: var(--page-background-color);
|
||||
}
|
||||
|
||||
#side-nav {
|
||||
min-width: var(--side-nav-fixed-width);
|
||||
max-width: var(--side-nav-fixed-width);
|
||||
top: var(--top-height);
|
||||
overflow: visible;
|
||||
}
|
||||
|
||||
#nav-tree, #side-nav {
|
||||
height: calc(100vh - var(--top-height)) !important;
|
||||
}
|
||||
|
||||
#top {
|
||||
display: block;
|
||||
border-bottom: none;
|
||||
height: var(--top-height);
|
||||
margin-bottom: calc(0px - var(--top-height));
|
||||
max-width: var(--side-nav-fixed-width);
|
||||
overflow: hidden;
|
||||
background: var(--side-nav-background);
|
||||
}
|
||||
|
||||
#main-nav {
|
||||
float: left;
|
||||
padding-right: 0;
|
||||
}
|
||||
|
||||
.ui-resizable-handle {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.ui-resizable-e {
|
||||
width: 0;
|
||||
}
|
||||
|
||||
#nav-path {
|
||||
position: fixed;
|
||||
right: 0;
|
||||
left: calc(var(--side-nav-fixed-width) + 1px);
|
||||
bottom: 0;
|
||||
width: auto;
|
||||
}
|
||||
|
||||
#doc-content {
|
||||
height: calc(100vh - 31px) !important;
|
||||
padding-bottom: calc(3 * var(--spacing-large));
|
||||
padding-top: calc(var(--top-height) - 80px);
|
||||
box-sizing: border-box;
|
||||
margin-left: var(--side-nav-fixed-width) !important;
|
||||
}
|
||||
|
||||
#MSearchBox {
|
||||
width: calc(var(--side-nav-fixed-width) - calc(2 * var(--spacing-medium)));
|
||||
}
|
||||
|
||||
#MSearchField {
|
||||
width: calc(var(--side-nav-fixed-width) - calc(2 * var(--spacing-medium)) - 65px);
|
||||
}
|
||||
|
||||
#MSearchResultsWindow {
|
||||
left: var(--spacing-medium) !important;
|
||||
right: auto;
|
||||
}
|
||||
|
||||
#nav-sync {
|
||||
bottom: 4px;
|
||||
right: auto;
|
||||
left: 300px;
|
||||
width: 35px;
|
||||
top: auto !important;
|
||||
user-select: none;
|
||||
position: fixed
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/**
|
||||
|
||||
Doxygen Awesome
|
||||
https://github.com/jothepro/doxygen-awesome-css
|
||||
|
||||
Copyright (c) 2023 - 2025 jothepro
|
||||
|
||||
*/
|
||||
|
||||
class DoxygenAwesomeTabs {
|
||||
|
||||
static init() {
|
||||
window.addEventListener("load", () => {
|
||||
document.querySelectorAll(".tabbed:not(:empty)").forEach((tabbed, tabbedIndex) => {
|
||||
let tabLinkList = []
|
||||
tabbed.querySelectorAll(":scope > ul > li").forEach((tab, tabIndex) => {
|
||||
tab.id = "tab_" + tabbedIndex + "_" + tabIndex
|
||||
let header = tab.querySelector(".tab-title")
|
||||
let tabLink = document.createElement("button")
|
||||
tabLink.classList.add("tab-button")
|
||||
tabLink.appendChild(header)
|
||||
header.title = header.textContent
|
||||
tabLink.addEventListener("click", () => {
|
||||
tabbed.querySelectorAll(":scope > ul > li").forEach((tab) => {
|
||||
tab.classList.remove("selected")
|
||||
})
|
||||
tabLinkList.forEach((tabLink) => {
|
||||
tabLink.classList.remove("active")
|
||||
})
|
||||
tab.classList.add("selected")
|
||||
tabLink.classList.add("active")
|
||||
})
|
||||
tabLinkList.push(tabLink)
|
||||
if(tabIndex == 0) {
|
||||
tab.classList.add("selected")
|
||||
tabLink.classList.add("active")
|
||||
}
|
||||
})
|
||||
let tabsOverview = document.createElement("div")
|
||||
tabsOverview.classList.add("tabs-overview")
|
||||
let tabsOverviewContainer = document.createElement("div")
|
||||
tabsOverviewContainer.classList.add("tabs-overview-container")
|
||||
tabLinkList.forEach((tabLink) => {
|
||||
tabsOverview.appendChild(tabLink)
|
||||
})
|
||||
tabsOverviewContainer.appendChild(tabsOverview)
|
||||
tabbed.before(tabsOverviewContainer)
|
||||
|
||||
function resize() {
|
||||
let maxTabHeight = 0
|
||||
tabbed.querySelectorAll(":scope > ul > li").forEach((tab, tabIndex) => {
|
||||
let visibility = tab.style.display
|
||||
tab.style.display = "block"
|
||||
maxTabHeight = Math.max(tab.offsetHeight, maxTabHeight)
|
||||
tab.style.display = visibility
|
||||
})
|
||||
tabbed.style.height = `${maxTabHeight + 10}px`
|
||||
}
|
||||
|
||||
resize()
|
||||
new ResizeObserver(resize).observe(tabbed)
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
static resize(tabbed) {
|
||||
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,7 +5,9 @@ cflags += -nostdinc \
|
||||
-std=c11 \
|
||||
-pedantic \
|
||||
-Wall \
|
||||
-Wextra
|
||||
-Wextra \
|
||||
-ffunction-sections \
|
||||
-fdata-sections
|
||||
|
||||
cflags += -isystem ../include
|
||||
|
||||
@@ -13,4 +15,7 @@ ldflags += -ffreestanding \
|
||||
-nostdlib \
|
||||
-fno-builtin \
|
||||
-fuse-ld=lld \
|
||||
-static
|
||||
-static \
|
||||
-Wl,--gc-sections \
|
||||
-Wl,--strip-all \
|
||||
-flto
|
||||
|
||||
13
include/m/status.h
Normal file
13
include/m/status.h
Normal file
@@ -0,0 +1,13 @@
|
||||
#ifndef _M_STATUS_H
|
||||
#define _M_STATUS_H
|
||||
|
||||
#define ST_OK 0
|
||||
#define ST_SYSCALL_NOT_FOUND 1
|
||||
#define ST_UNALIGNED 2
|
||||
#define ST_OOM_ERROR 3
|
||||
#define ST_NOT_FOUND 4
|
||||
#define ST_BAD_ADDRESS_SPACE 5
|
||||
#define ST_PERMISSION_ERROR 6
|
||||
#define ST_BAD_RESOURCE 7
|
||||
|
||||
#endif // _M_STATUS_H
|
||||
@@ -1,10 +1,16 @@
|
||||
#ifndef _M_SYSCALL_DEFS_H
|
||||
#define _M_SYSCALL_DEFS_H
|
||||
|
||||
#define SYS_PROC_QUIT 1
|
||||
#define SYS_PROC_TEST 2
|
||||
|
||||
#define SR_OK 0
|
||||
#define SR_SYSCALL_NOT_FOUND 1
|
||||
#define SYS_QUIT 1
|
||||
#define SYS_TEST 2
|
||||
#define SYS_MAP 3
|
||||
#define SYS_UNMAP 4
|
||||
#define SYS_CLONE 5
|
||||
#define SYS_SCHED 6
|
||||
#define SYS_MUTEX_CREATE 7
|
||||
#define SYS_MUTEX_DELETE 8
|
||||
#define SYS_MUTEX_LOCK 9
|
||||
#define SYS_MUTEX_UNLOCK 10
|
||||
#define SYS_ARGUMENT_PTR 11
|
||||
|
||||
#endif // _M_SYSCALL_DEFS_H
|
||||
|
||||
47
init/init.c
47
init/init.c
@@ -1,9 +1,46 @@
|
||||
#include <limits.h>
|
||||
#include <m/proc.h>
|
||||
#include <proc/local.h>
|
||||
#include <proc/proc.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <string/string.h>
|
||||
|
||||
#define MUTEX 2000
|
||||
|
||||
LOCAL volatile char letter = 'c';
|
||||
|
||||
void app_proc (void) {
|
||||
char arg_letter = (char)(uintptr_t)argument_ptr ();
|
||||
|
||||
letter = arg_letter;
|
||||
|
||||
for (;;) {
|
||||
mutex_lock (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test (letter);
|
||||
|
||||
mutex_unlock (MUTEX);
|
||||
}
|
||||
|
||||
process_quit ();
|
||||
}
|
||||
|
||||
void app_main (void) {
|
||||
m_proc_test ();
|
||||
m_proc_test ();
|
||||
m_proc_test ();
|
||||
m_proc_test ();
|
||||
mutex_create (MUTEX);
|
||||
|
||||
letter = 'a';
|
||||
|
||||
process_spawn (&app_proc, (void*)'a');
|
||||
process_spawn (&app_proc, (void*)'b');
|
||||
process_spawn (&app_proc, (void*)'c');
|
||||
|
||||
for (;;) {
|
||||
mutex_lock (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test (letter);
|
||||
|
||||
mutex_unlock (MUTEX);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
S += init.S
|
||||
c += init.c
|
||||
|
||||
o += init.o
|
||||
|
||||
2
kernel/.gitignore
vendored
Normal file
2
kernel/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.json
|
||||
.cache
|
||||
3021
kernel/Doxyfile
3021
kernel/Doxyfile
File diff suppressed because it is too large
Load Diff
@@ -32,8 +32,4 @@ format:
|
||||
':!uACPI/tests/**' \
|
||||
':!libk/printf*')
|
||||
|
||||
doxygen:
|
||||
mkdir -p ../docs/kernel/doxygen
|
||||
doxygen
|
||||
|
||||
.PHONY: all clean format doxygen
|
||||
.PHONY: all clean format
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <amd64/msr.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/requests.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/spin.h>
|
||||
@@ -16,86 +17,94 @@
|
||||
#define IOAPICS_MAX 24
|
||||
#define INTERRUPT_SRC_OVERRIDES_MAX 24
|
||||
|
||||
/// ID of Local APIC
|
||||
/* ID of Local APIC */
|
||||
#define LAPIC_ID 0x20
|
||||
/// End of interrupt register
|
||||
/* End of interrupt register */
|
||||
#define LAPIC_EOI 0xB0
|
||||
/// Spurious interrupt vector register
|
||||
/* Spurious interrupt vector register */
|
||||
#define LAPIC_SIVR 0xF0
|
||||
/// Interrupt command register
|
||||
/* Interrupt command register */
|
||||
#define LAPIC_ICR 0x300
|
||||
/// LVT timer register
|
||||
/* LVT timer register */
|
||||
#define LAPIC_LVTTR 0x320
|
||||
/// Timer initial count register
|
||||
/* Timer initial count register */
|
||||
#define LAPIC_TIMICT 0x380
|
||||
/// Timer current count register
|
||||
/* Timer current count register */
|
||||
#define LAPIC_TIMCCT 0x390
|
||||
/// Divide config register
|
||||
/* Divide config register */
|
||||
#define LAPIC_DCR 0x3E0
|
||||
|
||||
/// Table of IOAPICS
|
||||
static struct acpi_madt_ioapic apics[IOAPICS_MAX];
|
||||
#define DIVIDER_VALUE 0x0B
|
||||
|
||||
struct ioapic {
|
||||
struct acpi_madt_ioapic table_data;
|
||||
spin_lock_t lock;
|
||||
uintptr_t mmio_base;
|
||||
};
|
||||
|
||||
/* Table of IOAPICS */
|
||||
static struct ioapic ioapics[IOAPICS_MAX];
|
||||
/* Table of interrupt source overrides */
|
||||
/* clang-format off */
|
||||
/// Table of interrupt source overrides
|
||||
static struct acpi_madt_interrupt_source_override intr_src_overrides[INTERRUPT_SRC_OVERRIDES_MAX];
|
||||
/* clang-format on */
|
||||
/// Count of actual IOAPIC entries
|
||||
/* Count of actual IOAPIC entries */
|
||||
static size_t ioapic_entries = 0;
|
||||
/// Count of actual interrupt source overrides
|
||||
/* Count of actual interrupt source overrides */
|
||||
static size_t intr_src_override_entries = 0;
|
||||
/// Local APIC MMIO base address. It comes from MSR_APIC_BASE
|
||||
static uintptr_t lapic_mmio_base = 0;
|
||||
|
||||
/// Read IOAPIC
|
||||
static uint32_t amd64_ioapic_read (uintptr_t vaddr, uint32_t reg) {
|
||||
*(volatile uint32_t*)vaddr = reg;
|
||||
return *(volatile uint32_t*)(vaddr + 0x10);
|
||||
static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
|
||||
|
||||
/* Read IOAPIC */
|
||||
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
|
||||
spin_lock_ctx_t ctxioar;
|
||||
|
||||
spin_lock (&ioapic->lock, &ctxioar);
|
||||
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
||||
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
|
||||
spin_unlock (&ioapic->lock, &ctxioar);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/// Write IOAPIC
|
||||
static void amd64_ioapic_write (uintptr_t vaddr, uint32_t reg, uint32_t value) {
|
||||
*(volatile uint32_t*)vaddr = reg;
|
||||
*(volatile uint32_t*)(vaddr + 0x10) = value;
|
||||
/* Write IOAPIC */
|
||||
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
|
||||
spin_lock_ctx_t ctxioaw;
|
||||
|
||||
spin_lock (&ioapic->lock, &ctxioaw);
|
||||
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
||||
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
|
||||
spin_unlock (&ioapic->lock, &ctxioaw);
|
||||
}
|
||||
|
||||
/// Find an IOAPIC corresposting to provided IRQ
|
||||
static struct acpi_madt_ioapic* amd64_ioapic_find (uint8_t irq) {
|
||||
struct acpi_madt_ioapic* apic = NULL;
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
/* Find an IOAPIC corresposting to provided IRQ */
|
||||
static struct ioapic* amd64_ioapic_find (uint32_t irq) {
|
||||
struct ioapic* ioapic = NULL;
|
||||
|
||||
for (size_t i = 0; i < ioapic_entries; i++) {
|
||||
apic = &apics[i];
|
||||
uint32_t version = amd64_ioapic_read ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, 1);
|
||||
ioapic = &ioapics[i];
|
||||
uint32_t version = amd64_ioapic_read (ioapic, 1);
|
||||
uint32_t max = ((version >> 16) & 0xFF);
|
||||
|
||||
if ((irq >= apic->gsi_base) && (irq <= (apic->gsi_base + max)))
|
||||
return apic;
|
||||
if ((irq >= ioapic->table_data.gsi_base) && (irq <= (ioapic->table_data.gsi_base + max)))
|
||||
return ioapic;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Route IRQ to an IDT entry of a given Local APIC.
|
||||
/*
|
||||
* Route IRQ to an IDT entry of a given Local APIC.
|
||||
*
|
||||
* @param vec
|
||||
* Interrupt vector number, which will be delivered to the CPU
|
||||
*
|
||||
* @param irq
|
||||
* Legacy IRQ number to be routed. Can be changed by an interrupt source override
|
||||
* vec - Interrupt vector number, which will be delivered to the CPU.
|
||||
* irq -Legacy IRQ number to be routed. Can be changed by an interrupt source override
|
||||
* into a different GSI.
|
||||
*
|
||||
* @param flags
|
||||
* IOAPIC redirection flags.
|
||||
*
|
||||
* @param lapic_id
|
||||
* Local APIC that will receive the interrupt.
|
||||
* flags - IOAPIC redirection flags.
|
||||
* lapic_id - Local APIC that will receive the interrupt.
|
||||
*/
|
||||
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id) {
|
||||
struct acpi_madt_ioapic* apic = NULL;
|
||||
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id) {
|
||||
struct ioapic* ioapic = NULL;
|
||||
struct acpi_madt_interrupt_source_override* override;
|
||||
bool found_override = false;
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
for (size_t i = 0; i < intr_src_override_entries; i++) {
|
||||
override = &intr_src_overrides[i];
|
||||
@@ -108,65 +117,26 @@ void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t
|
||||
uint64_t calc_flags = (lapic_id << 56) | (flags) | (vec & 0xFF);
|
||||
|
||||
if (found_override) {
|
||||
uint8_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
|
||||
uint8_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
|
||||
uint32_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
|
||||
uint32_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
|
||||
calc_flags |= (uint64_t)mode << 15;
|
||||
calc_flags |= (uint64_t)polarity << 13;
|
||||
|
||||
calc_flags |= flags;
|
||||
} else {
|
||||
calc_flags |= flags;
|
||||
}
|
||||
|
||||
apic = amd64_ioapic_find (irq);
|
||||
uint32_t gsi = found_override ? override->gsi : irq;
|
||||
|
||||
if (apic == NULL)
|
||||
ioapic = amd64_ioapic_find (gsi);
|
||||
|
||||
if (ioapic == NULL)
|
||||
return;
|
||||
|
||||
uint32_t irq_reg = ((irq - apic->gsi_base) * 2) + 0x10;
|
||||
uint32_t irq_reg = ((gsi - ioapic->table_data.gsi_base) * 2) + 0x10;
|
||||
|
||||
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg,
|
||||
(uint32_t)calc_flags);
|
||||
|
||||
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg + 1,
|
||||
(uint32_t)(calc_flags >> 32));
|
||||
amd64_ioapic_write (ioapic, irq_reg + 1, (uint32_t)(calc_flags >> 32));
|
||||
amd64_ioapic_write (ioapic, irq_reg, (uint32_t)calc_flags);
|
||||
}
|
||||
|
||||
/// Mask a given IRQ
|
||||
void amd64_ioapic_mask (uint8_t irq) {
|
||||
struct acpi_madt_ioapic* apic;
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
apic = amd64_ioapic_find (irq);
|
||||
|
||||
if (apic == NULL)
|
||||
return;
|
||||
|
||||
uint32_t irq_reg = ((irq - apic->gsi_base) * 2) + 0x10;
|
||||
|
||||
uint32_t value = amd64_ioapic_read ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg);
|
||||
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg,
|
||||
value | (1 << 16));
|
||||
}
|
||||
|
||||
/// Unmask a given IRQ
|
||||
void amd64_ioapic_unmask (uint8_t irq) {
|
||||
struct acpi_madt_ioapic* apic;
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
apic = amd64_ioapic_find (irq);
|
||||
|
||||
if (apic == NULL)
|
||||
return;
|
||||
|
||||
uint32_t irq_reg = ((irq - apic->gsi_base) * 2) + 0x10;
|
||||
|
||||
uint32_t value = amd64_ioapic_read ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg);
|
||||
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg,
|
||||
value & ~(1 << 16));
|
||||
}
|
||||
|
||||
/// Find and initialize the IOAPIC
|
||||
/* Find and initialize the IOAPIC */
|
||||
void amd64_ioapic_init (void) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
@@ -187,11 +157,15 @@ void amd64_ioapic_init (void) {
|
||||
|
||||
switch (current->type) {
|
||||
case ACPI_MADT_ENTRY_TYPE_IOAPIC: {
|
||||
struct acpi_madt_ioapic* ioapic = (struct acpi_madt_ioapic*)current;
|
||||
mm_map_kernel_page ((uintptr_t)ioapic->address,
|
||||
(uintptr_t)hhdm->offset + (uintptr_t)ioapic->address,
|
||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
|
||||
apics[ioapic_entries++] = *ioapic;
|
||||
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
|
||||
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
|
||||
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
|
||||
MM_PG_PRESENT | MM_PG_RW);
|
||||
ioapics[ioapic_entries++] = (struct ioapic){
|
||||
.lock = SPIN_LOCK_INIT,
|
||||
.table_data = *ioapic_table_data,
|
||||
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
|
||||
};
|
||||
} break;
|
||||
case ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE: {
|
||||
struct acpi_madt_interrupt_source_override* override =
|
||||
@@ -204,99 +178,94 @@ void amd64_ioapic_init (void) {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get MMIO base of Local APIC
|
||||
static uintptr_t amd64_lapic_base (void) { return lapic_mmio_base; }
|
||||
/* Get MMIO base of Local APIC */
|
||||
static uintptr_t amd64_lapic_base (void) { return thiscpu->lapic_mmio_base; }
|
||||
|
||||
/// Write Local APIC
|
||||
/* Write Local APIC */
|
||||
static void amd64_lapic_write (uint32_t reg, uint32_t value) {
|
||||
*(volatile uint32_t*)(amd64_lapic_base () + reg) = value;
|
||||
}
|
||||
|
||||
/// Read Local APIC
|
||||
/* Read Local APIC */
|
||||
static uint32_t amd64_lapic_read (uint32_t reg) {
|
||||
return *(volatile uint32_t*)(amd64_lapic_base () + reg);
|
||||
}
|
||||
|
||||
/// Get ID of Local APIC
|
||||
/* Get ID of Local APIC */
|
||||
uint32_t amd64_lapic_id (void) { return amd64_lapic_read (LAPIC_ID) >> 24; }
|
||||
|
||||
/// Send End of interrupt command to Local APIC
|
||||
/* Send End of interrupt command to Local APIC */
|
||||
void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
|
||||
|
||||
/// Set initial counter value in Local APIC timer
|
||||
void amd64_lapic_tick (uint32_t tick) { amd64_lapic_write (LAPIC_TIMICT, tick); }
|
||||
|
||||
/**
|
||||
* @brief Calibrate Local APIC to send interrupts in a set interval.
|
||||
/*
|
||||
* Calibrate Local APIC to send interrupts in a set interval.
|
||||
*
|
||||
* @param us
|
||||
* Period length in microseconds
|
||||
*
|
||||
* @return amount of ticsk in a given period
|
||||
* us - Period length in microseconds
|
||||
*/
|
||||
static uint32_t amd64_lapic_calibrate (uint32_t us) {
|
||||
amd64_lapic_write (LAPIC_DCR, 0x0B);
|
||||
spin_lock_ctx_t ctxlacb;
|
||||
|
||||
spin_lock (&lapic_calibration_lock, &ctxlacb);
|
||||
|
||||
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
|
||||
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
|
||||
|
||||
amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF);
|
||||
|
||||
sleep_micro (us);
|
||||
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (0 << 16));
|
||||
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
|
||||
DEBUG ("timer ticks = %u\n", ticks);
|
||||
|
||||
spin_unlock (&lapic_calibration_lock, &ctxlacb);
|
||||
|
||||
return ticks;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Starts a Local APIC, configures LVT timer to
|
||||
* send interrupts at \ref SCHED_PREEMPT_TIMER.
|
||||
/*
|
||||
* Starts a Local APIC, configures LVT timer to send interrupts at SCHED_PREEMPT_TIMER.
|
||||
*
|
||||
* @param ticks
|
||||
* Initial tick count
|
||||
* ticks - Initial tick count
|
||||
*/
|
||||
static void amd64_lapic_start (uint32_t ticks) {
|
||||
amd64_lapic_write (LAPIC_DCR, 0x0B);
|
||||
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
|
||||
|
||||
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
|
||||
amd64_lapic_write (LAPIC_TIMICT, ticks);
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initialize Local APIC, configure to send timer interrupts
|
||||
* at a given period. See \ref amd64_lapic_calibrate and \ref amd64_lapic_start.
|
||||
/*
|
||||
* Initialize Local APIC, configure to send timer interrupts at a given period. See
|
||||
* amd64_lapic_calibrate and amd64_lapic_start.
|
||||
*/
|
||||
uint64_t amd64_lapic_init (uint32_t us) {
|
||||
void amd64_lapic_init (uint32_t us) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
amd64_wrmsr (MSR_APIC_BASE, amd64_rdmsr (MSR_APIC_BASE) | (1 << 11));
|
||||
|
||||
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
|
||||
lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
|
||||
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
|
||||
|
||||
mm_map_kernel_page (lapic_paddr, lapic_mmio_base,
|
||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK | MM_PD_RELOAD);
|
||||
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
|
||||
|
||||
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
|
||||
|
||||
uint32_t ticks = amd64_lapic_calibrate (us);
|
||||
|
||||
amd64_lapic_start (ticks);
|
||||
|
||||
return ticks;
|
||||
thiscpu->lapic_ticks = amd64_lapic_calibrate (us);
|
||||
amd64_lapic_start (thiscpu->lapic_ticks);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Send an IPI to a given Local APIC. This till invoke an IDT stub located at vec.
|
||||
/*
|
||||
* Send an IPI to a given Local APIC. This till invoke an IDT stub located at vec.
|
||||
*
|
||||
* @param lapic_id
|
||||
* Target Local APIC
|
||||
*
|
||||
* @param vec
|
||||
* Interrupt vector/IDT stub, which will be invoked by the IPI.
|
||||
* lapic_id - Target Local APIC
|
||||
* vec - Interrupt vector/IDT stub, which will be invoked by the IPI.
|
||||
*/
|
||||
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec) {
|
||||
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) {
|
||||
/* wait for previous IPI to finish */
|
||||
while (amd64_lapic_read (LAPIC_ICR) & (1 << 12)) {
|
||||
__asm__ volatile ("pause");
|
||||
}
|
||||
|
||||
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
|
||||
amd64_lapic_write (LAPIC_ICR, vec);
|
||||
amd64_lapic_write (LAPIC_ICR, vec | (1 << 14));
|
||||
}
|
||||
|
||||
@@ -3,15 +3,12 @@
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id);
|
||||
void amd64_ioapic_mask (uint8_t irq);
|
||||
void amd64_ioapic_unmask (uint8_t irq);
|
||||
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id);
|
||||
void amd64_ioapic_init (void);
|
||||
|
||||
uint32_t amd64_lapic_id (void);
|
||||
void amd64_lapic_tick (uint32_t tick);
|
||||
void amd64_lapic_eoi (void);
|
||||
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec);
|
||||
uint64_t amd64_lapic_init (uint32_t us);
|
||||
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec);
|
||||
void amd64_lapic_init (uint32_t us);
|
||||
|
||||
#endif // _KERNEL_AMD64_APIC_H
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
/** @file */
|
||||
|
||||
#include <amd64/apic.h>
|
||||
#include <amd64/debug.h>
|
||||
#include <amd64/hpet.h>
|
||||
@@ -11,6 +9,7 @@
|
||||
#include <irq/irq.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/limine.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/proc.h>
|
||||
@@ -24,17 +23,16 @@
|
||||
|
||||
#define UACPI_MEMORY_BUFFER_MAX 4096
|
||||
|
||||
/** @cond DOXYGEN_IGNORE */
|
||||
ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @brief The kernel starts booting here. This is the entry point after
|
||||
* Limine hands control. We set up all the necessary platform-dependent
|
||||
* subsystems/drivers and jump into the init app.
|
||||
/*
|
||||
* The kernel starts booting here. This is the entry point after Limine hands control. We set up all
|
||||
* the necessary platform-dependent subsystems/drivers and jump into the init app.
|
||||
*/
|
||||
void bootmain (void) {
|
||||
struct cpu* bsp_cpu = cpu_make ();
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
|
||||
struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id);
|
||||
|
||||
amd64_init (bsp_cpu, false);
|
||||
syscall_init ();
|
||||
@@ -51,8 +49,6 @@ void bootmain (void) {
|
||||
|
||||
smp_init ();
|
||||
|
||||
mm_init2 ();
|
||||
|
||||
proc_init ();
|
||||
|
||||
for (;;)
|
||||
|
||||
@@ -6,32 +6,37 @@
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
|
||||
/// Port for printing to serial
|
||||
/* Port for printing to serial */
|
||||
/* TODO: Make this configurable */
|
||||
#define PORT_COM1 0x03F8
|
||||
/// \ref debugprintf buffer size
|
||||
/* debugprintf buffer size */
|
||||
#define BUFFER_SIZE 1024
|
||||
/// Lock, which ensures that prints to the serial port are atomic
|
||||
/*
|
||||
* Lock, which ensures that prints to the serial port are atomic (ie. one debugprintf is atomic in
|
||||
* itself).
|
||||
*/
|
||||
static spin_lock_t serial_lock = SPIN_LOCK_INIT;
|
||||
|
||||
static bool debug_init = false;
|
||||
|
||||
/// Block until TX buffer is empty
|
||||
/* Block until TX buffer is empty */
|
||||
static bool amd64_debug_serial_tx_empty (void) {
|
||||
return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20);
|
||||
}
|
||||
|
||||
/// Write a single character to serial
|
||||
/* Write a single character to serial */
|
||||
static void amd64_debug_serial_write (char x) {
|
||||
while (!amd64_debug_serial_tx_empty ())
|
||||
;
|
||||
amd64_io_outb (PORT_COM1, (uint8_t)x);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Formatted printing to serial. \ref serial_lock ensures that
|
||||
* all prints are atomic.
|
||||
/*
|
||||
* Formatted printing to serial. serial_lock ensures that all prints are atomic.
|
||||
*/
|
||||
void debugprintf (const char* fmt, ...) {
|
||||
spin_lock_ctx_t ctxdbgp;
|
||||
|
||||
if (!debug_init)
|
||||
return;
|
||||
|
||||
@@ -47,17 +52,17 @@ void debugprintf (const char* fmt, ...) {
|
||||
|
||||
const char* p = buffer;
|
||||
|
||||
spin_lock (&serial_lock);
|
||||
spin_lock (&serial_lock, &ctxdbgp);
|
||||
|
||||
while (*p) {
|
||||
amd64_debug_serial_write (*p);
|
||||
p++;
|
||||
}
|
||||
|
||||
spin_unlock (&serial_lock);
|
||||
spin_unlock (&serial_lock, &ctxdbgp);
|
||||
}
|
||||
|
||||
/// Initialize serial
|
||||
/* Initialize serial */
|
||||
void amd64_debug_init (void) {
|
||||
amd64_io_outb (PORT_COM1 + 1, 0x00);
|
||||
amd64_io_outb (PORT_COM1 + 3, 0x80);
|
||||
|
||||
@@ -7,17 +7,15 @@
|
||||
|
||||
#define GDT_KCODE 0x08
|
||||
#define GDT_KDATA 0x10
|
||||
#define GDT_UCODE 0x18
|
||||
#define GDT_UDATA 0x20
|
||||
#define GDT_UDATA 0x18
|
||||
#define GDT_UCODE 0x20
|
||||
#define GDT_TSS 0x28
|
||||
|
||||
/// Size of kernel stack
|
||||
/* Size of kernel stack */
|
||||
#define KSTACK_SIZE (32 * 1024)
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @brief 64-bit GDT structure. For more info see:
|
||||
/*
|
||||
* 64-bit GDT structure. For more info see:
|
||||
* - https://wiki.osdev.org/Global_Descriptor_Table
|
||||
* - https://wiki.osdev.org/GDT_Tutorial
|
||||
*/
|
||||
@@ -31,11 +29,13 @@ struct gdt_entry {
|
||||
uint8_t basehigh;
|
||||
} PACKED;
|
||||
|
||||
/* Struct that gets loaded into GDTR */
|
||||
struct gdt_ptr {
|
||||
uint16_t limit;
|
||||
uint64_t base;
|
||||
} PACKED;
|
||||
|
||||
/* New, extended GDT (we need to extend Limine's GDT) */
|
||||
struct gdt_extended {
|
||||
struct gdt_entry old[5];
|
||||
struct gdt_entry tsslow;
|
||||
|
||||
@@ -10,71 +10,113 @@
|
||||
#include <uacpi/tables.h>
|
||||
#include <uacpi/uacpi.h>
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @brief HPET (High Precision Event Timer) driver code.
|
||||
* See more at https://wiki.osdev.org/HPET
|
||||
/*
|
||||
* HPET (High Precision Event Timer) driver code. See more at https://wiki.osdev.org/HPET
|
||||
*/
|
||||
|
||||
/// HPET Main Counter Value Register
|
||||
/* HPET Main Counter Value Register */
|
||||
#define HPET_MCVR 0xF0
|
||||
/// HPET General Configuration Register
|
||||
/* HPET General Configuration Register */
|
||||
#define HPET_GCR 0x10
|
||||
/// HPET General Capabilities and ID Register
|
||||
/* HPET General Capabilities and ID Register */
|
||||
#define HPET_GCIDR 0x00
|
||||
|
||||
/// Set whether we sould use 32-bit or 64-bit reads/writes
|
||||
/* Set whether we sould use 32-bit or 64-bit reads/writes */
|
||||
static bool hpet_32bits = 1;
|
||||
/// Physical address for HPET MMIO
|
||||
/* Physical address for HPET MMIO */
|
||||
static uintptr_t hpet_paddr;
|
||||
/// HPET period in femtoseconds
|
||||
/* HPET period in femtoseconds */
|
||||
static uint64_t hpet_period_fs;
|
||||
/// Lock, which protects concurrent access. See \ref amd64/smp.c
|
||||
/* Lock, which protects concurrent access. See amd64/smp.c */
|
||||
static spin_lock_t hpet_lock = SPIN_LOCK_INIT;
|
||||
|
||||
/// Read a HPET register. Assumes caller holds \ref hpet_lock
|
||||
static uint64_t amd64_hpet_read (uint32_t reg) {
|
||||
/* Read a HPET register. Assumes caller holds hpet_lock */
|
||||
static uint64_t amd64_hpet_read64 (uint32_t reg) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
return (hpet_32bits ? *(volatile uint32_t*)(hpet_vaddr + reg)
|
||||
: *(volatile uint64_t*)(hpet_vaddr + reg));
|
||||
return *(volatile uint64_t*)(hpet_vaddr + reg);
|
||||
}
|
||||
|
||||
/// Write a HPET register. Assumes caller holds \ref hpet_lock
|
||||
static void amd64_hpet_write (uint32_t reg, uint64_t value) {
|
||||
static uint32_t amd64_hpet_read32 (uint32_t reg) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
if (hpet_32bits)
|
||||
*(volatile uint32_t*)(hpet_vaddr + reg) = (value & 0xFFFFFFFF);
|
||||
else
|
||||
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
|
||||
return *(volatile uint32_t*)(hpet_vaddr + reg);
|
||||
}
|
||||
|
||||
/// Read current value of \ref HPET_MCVR register.
|
||||
static uint64_t amd64_hpet_timestamp (void) { return amd64_hpet_read (HPET_MCVR); }
|
||||
/* Write a HPET register. Assumes caller holds hpet_lock */
|
||||
static void amd64_hpet_write64 (uint32_t reg, uint64_t value) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
|
||||
}
|
||||
|
||||
/// Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being held.
|
||||
static void amd64_hpet_write32 (uint32_t reg, uint32_t value) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
*(volatile uint32_t*)(hpet_vaddr + reg) = value;
|
||||
}
|
||||
|
||||
/* Read current value of HPET_MCVR register. */
|
||||
|
||||
static uint64_t amd64_hpet_read_counter (void) {
|
||||
uint64_t value;
|
||||
spin_lock_ctx_t ctxhrc;
|
||||
|
||||
spin_lock (&hpet_lock, &ctxhrc);
|
||||
|
||||
if (!hpet_32bits)
|
||||
value = amd64_hpet_read64 (HPET_MCVR);
|
||||
else {
|
||||
uint32_t hi1, lo, hi2;
|
||||
do {
|
||||
hi1 = amd64_hpet_read32 (HPET_MCVR + 4);
|
||||
lo = amd64_hpet_read32 (HPET_MCVR + 0);
|
||||
hi2 = amd64_hpet_read32 (HPET_MCVR + 4);
|
||||
} while (hi1 != hi2);
|
||||
|
||||
value = ((uint64_t)hi1 << 32) | lo;
|
||||
}
|
||||
|
||||
spin_unlock (&hpet_lock, &ctxhrc);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static void amd64_hpet_write_counter (uint64_t value) {
|
||||
spin_lock_ctx_t ctxhwc;
|
||||
|
||||
spin_lock (&hpet_lock, &ctxhwc);
|
||||
|
||||
if (!hpet_32bits)
|
||||
amd64_hpet_write64 (HPET_MCVR, value);
|
||||
else {
|
||||
amd64_hpet_write32 (HPET_MCVR, (uint32_t)value);
|
||||
amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32));
|
||||
}
|
||||
|
||||
spin_unlock (&hpet_lock, &ctxhwc);
|
||||
}
|
||||
|
||||
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being
|
||||
* held. */
|
||||
void amd64_hpet_sleep_micro (uint64_t us) {
|
||||
spin_lock (&hpet_lock);
|
||||
if (hpet_period_fs == 0)
|
||||
return;
|
||||
|
||||
uint64_t start = amd64_hpet_timestamp ();
|
||||
uint64_t target_fs = us * 1000000000ULL;
|
||||
uint64_t ticks_to_wait = (us * 1000ULL) / (hpet_period_fs / 1000000ULL);
|
||||
uint64_t start = amd64_hpet_read_counter ();
|
||||
|
||||
for (;;) {
|
||||
uint64_t current = amd64_hpet_timestamp ();
|
||||
uint64_t dt = current - start;
|
||||
uint64_t now = amd64_hpet_read_counter ();
|
||||
|
||||
if ((dt * hpet_period_fs) >= target_fs)
|
||||
if ((now - start) >= ticks_to_wait)
|
||||
break;
|
||||
|
||||
__asm__ volatile ("pause" ::: "memory");
|
||||
}
|
||||
|
||||
spin_unlock (&hpet_lock);
|
||||
}
|
||||
|
||||
/// Initialize HPET
|
||||
/* Initialize HPET */
|
||||
void amd64_hpet_init (void) {
|
||||
struct uacpi_table hpet_table;
|
||||
uacpi_status status = uacpi_table_find_by_signature (ACPI_HPET_SIGNATURE, &hpet_table);
|
||||
@@ -87,22 +129,14 @@ void amd64_hpet_init (void) {
|
||||
hpet_paddr = (uintptr_t)hpet->address.address;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr,
|
||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
|
||||
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
|
||||
|
||||
hpet_32bits = (amd64_hpet_read (HPET_GCIDR) & (1 << 13)) ? 0 : 1;
|
||||
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
|
||||
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
|
||||
|
||||
/* reset */
|
||||
amd64_hpet_write (HPET_GCR, 0);
|
||||
amd64_hpet_write (HPET_MCVR, 0);
|
||||
amd64_hpet_write (HPET_GCR, 1);
|
||||
hpet_period_fs = (uint32_t)(caps >> 32);
|
||||
|
||||
uint64_t gcidr = amd64_hpet_read (HPET_GCIDR);
|
||||
if (hpet_32bits) {
|
||||
uint32_t low = (uint32_t)gcidr;
|
||||
uint32_t high = (uint32_t)amd64_hpet_read (HPET_GCIDR + 4);
|
||||
gcidr = (((uint64_t)high << 32) | low);
|
||||
}
|
||||
|
||||
hpet_period_fs = (gcidr >> 32);
|
||||
amd64_hpet_write64 (HPET_GCR, 0);
|
||||
amd64_hpet_write_counter (0);
|
||||
amd64_hpet_write64 (HPET_GCR, 1);
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#define TSS 0x80
|
||||
#define TSS_PRESENT 0x89
|
||||
|
||||
/// Set a GDT entry
|
||||
/* Set a GDT entry */
|
||||
static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit,
|
||||
uint8_t acc, uint8_t gran) {
|
||||
ent->baselow = (base & 0xFFFF);
|
||||
@@ -20,7 +20,7 @@ static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32
|
||||
ent->access = acc;
|
||||
}
|
||||
|
||||
/// Initialize GDT and TSS structures for a given CPU
|
||||
/* Initialize GDT and TSS structures for a given CPU */
|
||||
static void amd64_gdt_init (struct cpu* cpu) {
|
||||
volatile struct tss* tss = &cpu->tss;
|
||||
volatile struct gdt_extended* gdt = &cpu->gdt;
|
||||
@@ -39,8 +39,8 @@ static void amd64_gdt_init (struct cpu* cpu) {
|
||||
amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0);
|
||||
amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0);
|
||||
amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0);
|
||||
amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xFA, 0xA0);
|
||||
amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xF2, 0xC0);
|
||||
amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xF2, 0xC0);
|
||||
amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xFA, 0xA0);
|
||||
amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
|
||||
|
||||
uint32_t tssbasehigh = (tssbase >> 32);
|
||||
@@ -51,11 +51,13 @@ static void amd64_gdt_init (struct cpu* cpu) {
|
||||
gdt->tsshigh.access = 0;
|
||||
gdt->tsshigh.gran = 0;
|
||||
|
||||
/* Load GDTR */
|
||||
struct gdt_ptr gdtr;
|
||||
gdtr.limit = sizeof (*gdt) - 1;
|
||||
gdtr.base = (uint64_t)gdt;
|
||||
__asm__ volatile ("lgdt %0" ::"m"(gdtr) : "memory");
|
||||
|
||||
/* Reload CS */
|
||||
__asm__ volatile ("pushq %[kcode]\n"
|
||||
"lea 1f(%%rip), %%rax\n"
|
||||
"pushq %%rax\n"
|
||||
@@ -72,11 +74,10 @@ static void amd64_gdt_init (struct cpu* cpu) {
|
||||
__asm__ volatile ("ltr %0" ::"r"((uint16_t)GDT_TSS));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Initialize essentials (GDT, TSS, IDT) for a given CPU
|
||||
/*
|
||||
* Initialize essentials (GDT, TSS, IDT) for a given CPU
|
||||
*
|
||||
* @param load_idt
|
||||
* Tell whether the IDT needs to be loaded. It only has to be loaded once on
|
||||
* load_idt - Tell whether the IDT needs to be loaded. It only has to be loaded once on
|
||||
* the BSP
|
||||
*/
|
||||
void amd64_init (struct cpu* cpu, bool load_idt) {
|
||||
|
||||
@@ -7,10 +7,12 @@
|
||||
#include <irq/irq.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <m/syscall_defs.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/irq.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/spin.h>
|
||||
#include <syscall/syscall.h>
|
||||
|
||||
/* 8259 PIC defs. */
|
||||
#define PIC1 0x20
|
||||
@@ -39,7 +41,7 @@
|
||||
|
||||
#define IDT_ENTRIES_MAX 256
|
||||
|
||||
/// 64-bit <IDT entry structure: https://wiki.osdev.org/Interrupt_Descriptor_Table
|
||||
/* 64-bit <IDT entry structure: https://wiki.osdev.org/Interrupt_Descriptor_Table */
|
||||
struct idt_entry {
|
||||
uint16_t intrlow;
|
||||
uint16_t kernel_cs;
|
||||
@@ -55,18 +57,14 @@ struct idt {
|
||||
uint64_t base;
|
||||
} PACKED;
|
||||
|
||||
/** @cond DOXYGEN_IGNORE */
|
||||
ALIGNED (16) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX];
|
||||
/** @endcond */
|
||||
static volatile struct idt idt;
|
||||
|
||||
/// Remaps and disables old 8259 PIC, since we'll be using APIC.
|
||||
/* Remaps and disables old 8259 PIC, since we'll be using APIC. */
|
||||
static void amd64_init_pic (void) {
|
||||
/** @cond DOXYGEN_IGNORE */
|
||||
#define IO_OP(fn, ...) \
|
||||
fn (__VA_ARGS__); \
|
||||
amd64_io_wait ()
|
||||
/** @endcond */
|
||||
|
||||
IO_OP (amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4));
|
||||
IO_OP (amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4));
|
||||
@@ -87,7 +85,7 @@ static void amd64_init_pic (void) {
|
||||
#undef IO_OP
|
||||
}
|
||||
|
||||
/// Set IDT entry
|
||||
/* Set IDT entry */
|
||||
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags,
|
||||
uint8_t ist) {
|
||||
ent->intrlow = (handler & 0xFFFF);
|
||||
@@ -99,18 +97,16 @@ static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uin
|
||||
ent->resv = 0;
|
||||
}
|
||||
|
||||
/// Load the IDT
|
||||
/* Load the IDT */
|
||||
void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); }
|
||||
|
||||
/// Initialize IDT entries
|
||||
/* Initialize IDT entries */
|
||||
static void amd64_idt_init (void) {
|
||||
memset ((void*)idt_entries, 0, sizeof (idt_entries));
|
||||
|
||||
/** @cond DOXYGEN_IGNORE */
|
||||
#define IDT_ENTRY(n, ist) \
|
||||
extern void amd64_intr##n (void); \
|
||||
amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist))
|
||||
/** @endcond */
|
||||
/* clang-format off */
|
||||
IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0);
|
||||
IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0);
|
||||
@@ -127,6 +123,8 @@ static void amd64_idt_init (void) {
|
||||
|
||||
IDT_ENTRY (SCHED_PREEMPT_TIMER, 1);
|
||||
IDT_ENTRY (TLB_SHOOTDOWN, 1);
|
||||
IDT_ENTRY (CPU_REQUEST_SCHED, 1);
|
||||
IDT_ENTRY (CPU_SPURIOUS, 1);
|
||||
/* clang-format on */
|
||||
#undef IDT_ENTRY
|
||||
|
||||
@@ -136,13 +134,7 @@ static void amd64_idt_init (void) {
|
||||
amd64_load_idt ();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Handle CPU exception and dump registers. If incoming CS has CPL3, kill the
|
||||
* process.
|
||||
*
|
||||
* @param regs
|
||||
* saved registers
|
||||
*/
|
||||
/* Handle CPU exception and dump registers. If incoming CS has CPL3, kill the process. */
|
||||
static void amd64_intr_exception (struct saved_regs* regs) {
|
||||
DEBUG ("cpu exception %lu (%lu)\n", regs->trap, regs->error);
|
||||
|
||||
@@ -171,10 +163,23 @@ static void amd64_intr_exception (struct saved_regs* regs) {
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle incoming interrupt, dispatch IRQ handlers.
|
||||
/* Handle incoming interrupt, dispatch IRQ handlers. */
|
||||
void amd64_intr_handler (void* stack_ptr) {
|
||||
spin_lock_ctx_t ctxcpu, ctxpr;
|
||||
|
||||
amd64_load_kernel_cr3 ();
|
||||
|
||||
struct saved_regs* regs = stack_ptr;
|
||||
|
||||
spin_lock (&thiscpu->lock, &ctxcpu);
|
||||
struct proc* proc_current = thiscpu->proc_current;
|
||||
spin_lock (&proc_current->lock, &ctxpr);
|
||||
|
||||
memcpy (&proc_current->pdata.regs, regs, sizeof (struct saved_regs));
|
||||
|
||||
spin_unlock (&proc_current->lock, &ctxpr);
|
||||
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||
|
||||
if (regs->trap <= 31) {
|
||||
amd64_intr_exception (regs);
|
||||
} else {
|
||||
@@ -183,18 +188,12 @@ void amd64_intr_handler (void* stack_ptr) {
|
||||
struct irq* irq = irq_find (regs->trap);
|
||||
|
||||
if (irq != NULL) {
|
||||
if ((irq->flags & IRQ_INTERRUPT_SAFE))
|
||||
__asm__ volatile ("sti");
|
||||
|
||||
irq->func (irq->arg, stack_ptr);
|
||||
|
||||
if ((irq->flags & IRQ_INTERRUPT_SAFE))
|
||||
__asm__ volatile ("cli");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize interrupts
|
||||
/* Initialize interrupts */
|
||||
void amd64_intr_init (void) {
|
||||
amd64_init_pic ();
|
||||
amd64_idt_init ();
|
||||
@@ -202,39 +201,21 @@ void amd64_intr_init (void) {
|
||||
|
||||
/* Aux. */
|
||||
|
||||
/// Save RFLAGS of the current CPU
|
||||
/* Save RFLAGS of the current CPU */
|
||||
static uint64_t amd64_irq_save_flags (void) {
|
||||
uint64_t rflags;
|
||||
__asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc");
|
||||
return rflags;
|
||||
}
|
||||
|
||||
/// Restore interrupts (IF bit) from RFLAGS
|
||||
/* Restore interrupts (IF bit) from RFLAGS */
|
||||
static void amd64_irq_restore_flags (uint64_t rflags) {
|
||||
if (rflags & (1ULL << 9))
|
||||
__asm__ volatile ("sti");
|
||||
}
|
||||
|
||||
/// Save current interrupt state
|
||||
void irq_save (void) {
|
||||
int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
|
||||
if (prev == 0)
|
||||
thiscpu->irq_ctx.rflags = amd64_irq_save_flags ();
|
||||
}
|
||||
/* Save current interrupt state */
|
||||
void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
|
||||
|
||||
/// Restore interrupt state
|
||||
void irq_restore (void) {
|
||||
int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
|
||||
if (prev == 1)
|
||||
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);
|
||||
}
|
||||
|
||||
/// Map custom IRQ mappings to legacy IRQs
|
||||
uint8_t amd64_resolve_irq (uint8_t irq) {
|
||||
static const uint8_t mappings[] = {
|
||||
[SCHED_PREEMPT_TIMER] = 0,
|
||||
[TLB_SHOOTDOWN] = 1,
|
||||
};
|
||||
|
||||
return mappings[irq];
|
||||
}
|
||||
/* Restore interrupt state */
|
||||
void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }
|
||||
|
||||
@@ -32,7 +32,6 @@ struct saved_regs {
|
||||
} PACKED;
|
||||
|
||||
void amd64_load_idt (void);
|
||||
uint8_t amd64_resolve_irq (uint8_t irq);
|
||||
void amd64_intr_init (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_INTR_H
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
#ifndef _KERNEL_AMD64_INTR_DEFS_H
|
||||
#define _KERNEL_AMD64_INTR_DEFS_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Definitions for custom, nonstandard IDT entries. They have to be remapped
|
||||
* by \ref amd64_resolve_irq into legacy IRQs.
|
||||
*/
|
||||
/* Definitions for custom, nonstandard IDT entries. They have to be remapped by amd64_resolve_irq
|
||||
* into legacy IRQs. */
|
||||
|
||||
#define SCHED_PREEMPT_TIMER 80
|
||||
#define TLB_SHOOTDOWN 81
|
||||
#define CPU_REQUEST_SCHED 82
|
||||
#define CPU_SPURIOUS 255
|
||||
|
||||
#endif // _KERNEL_AMD64_INTR_DEFS_H
|
||||
|
||||
@@ -7,33 +7,41 @@
|
||||
pushq $z;
|
||||
|
||||
#define no_err(z) \
|
||||
pushq $0; \
|
||||
pushq $0; \
|
||||
pushq $z;
|
||||
|
||||
#define make_intr_stub(x, n) \
|
||||
.global amd64_intr ## n; \
|
||||
amd64_intr ## n:; \
|
||||
x(n); \
|
||||
cli; \
|
||||
;\
|
||||
push_regs; \
|
||||
;\
|
||||
cld; \
|
||||
;\
|
||||
movq %rsp, %rdi; \
|
||||
;\
|
||||
movq %rsp, %rbp; \
|
||||
;\
|
||||
subq $8, %rsp; \
|
||||
andq $~0xF, %rsp; \
|
||||
;\
|
||||
callq amd64_intr_handler; \
|
||||
;\
|
||||
movq %rbp, %rsp; \
|
||||
;\
|
||||
pop_regs; \
|
||||
addq $16, %rsp; \
|
||||
;\
|
||||
#define make_intr_stub(x, n) \
|
||||
.global amd64_intr ## n; \
|
||||
amd64_intr ## n:; \
|
||||
x(n); \
|
||||
cli; \
|
||||
; \
|
||||
push_regs; \
|
||||
; \
|
||||
movw $0x10, %ax; \
|
||||
movw %ax, %ds; \
|
||||
movw %ax, %es; \
|
||||
; \
|
||||
cld; \
|
||||
; \
|
||||
movq %rsp, %rdi; \
|
||||
; \
|
||||
movq %cr3, %rax; pushq %rax; \
|
||||
; \
|
||||
movq %rsp, %rbp; \
|
||||
; \
|
||||
subq $8, %rsp; \
|
||||
andq $-16, %rsp; \
|
||||
; \
|
||||
callq amd64_intr_handler; \
|
||||
; \
|
||||
movq %rbp, %rsp; \
|
||||
; \
|
||||
popq %rax; movq %rax, %cr3; \
|
||||
; \
|
||||
pop_regs; \
|
||||
addq $16, %rsp; \
|
||||
; \
|
||||
iretq;
|
||||
|
||||
|
||||
@@ -88,3 +96,5 @@ make_intr_stub(no_err, 47)
|
||||
|
||||
make_intr_stub(no_err, SCHED_PREEMPT_TIMER)
|
||||
make_intr_stub(no_err, TLB_SHOOTDOWN)
|
||||
make_intr_stub(no_err, CPU_REQUEST_SCHED)
|
||||
make_intr_stub(no_err, CPU_SPURIOUS)
|
||||
|
||||
@@ -11,36 +11,43 @@
|
||||
#include <sys/mm.h>
|
||||
#include <sys/smp.h>
|
||||
|
||||
/// Present flag
|
||||
#define AMD64_PG_PRESENT (1 << 0)
|
||||
/// Writable flag
|
||||
#define AMD64_PG_RW (1 << 1)
|
||||
/// User-accessible flag
|
||||
#define AMD64_PG_USER (1 << 2)
|
||||
#define AMD64_PG_RW (1 << 1)
|
||||
#define AMD64_PG_USER (1 << 2)
|
||||
#define AMD64_PG_HUGE (1 << 7)
|
||||
|
||||
/// Auxilary struct for page directory walking
|
||||
/* Auxilary struct for page directory walking */
|
||||
struct pg_index {
|
||||
uint16_t pml4, pml3, pml2, pml1;
|
||||
} PACKED;
|
||||
|
||||
/// Kernel page directory
|
||||
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
|
||||
/// Lock needed to sync between map/unmap operations and TLB shootdown
|
||||
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
|
||||
/* Kernel page directory */
|
||||
static struct pd kernel_pd;
|
||||
static spin_lock_t kernel_pd_lock;
|
||||
|
||||
/// Get current value of CR3 register
|
||||
void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||
|
||||
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||
|
||||
/* Get current value of CR3 register */
|
||||
static uintptr_t amd64_current_cr3 (void) {
|
||||
uintptr_t cr3;
|
||||
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory");
|
||||
return cr3;
|
||||
}
|
||||
|
||||
/// Load kernel CR3 as current CR3
|
||||
/* Load kernel CR3 as current CR3 */
|
||||
void amd64_load_kernel_cr3 (void) {
|
||||
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
|
||||
uintptr_t cr3 = amd64_current_cr3 ();
|
||||
|
||||
if (cr3 != kernel_pd.cr3_paddr) {
|
||||
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract PML info from virtual address
|
||||
struct pd* mm_get_kernel_pd (void) { return &kernel_pd; }
|
||||
|
||||
/* Extract PML info from virtual address */
|
||||
static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
|
||||
struct pg_index ret;
|
||||
|
||||
@@ -52,16 +59,19 @@ static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/// Walk paging tables and allocate necessary structures along the way
|
||||
/* Walk paging tables and allocate necessary structures along the way */
|
||||
static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) {
|
||||
uint64_t entry = table[entry_idx];
|
||||
physaddr_t paddr;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
if (entry & AMD64_PG_PRESENT)
|
||||
if (entry & AMD64_PG_PRESENT) {
|
||||
if (entry & AMD64_PG_HUGE)
|
||||
return NULL;
|
||||
|
||||
paddr = entry & ~0xFFFULL;
|
||||
else {
|
||||
} else {
|
||||
if (!alloc)
|
||||
return NULL;
|
||||
|
||||
@@ -77,7 +87,15 @@ static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool
|
||||
return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr);
|
||||
}
|
||||
|
||||
/// Convert generic memory management subsystem flags into AMD64-specific flags
|
||||
static bool amd64_mm_is_table_empty (uint64_t* table) {
|
||||
for (size_t i = 0; i < 512; i++) {
|
||||
if (table[i] & AMD64_PG_PRESENT)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Convert generic memory management subsystem flags into AMD64-specific flags */
|
||||
static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
|
||||
uint64_t flags = 0;
|
||||
|
||||
@@ -88,24 +106,15 @@ static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
|
||||
return flags;
|
||||
}
|
||||
|
||||
/// Reload the current CR3 value ON A LOCAL CPU
|
||||
/* Reload the current CR3 value ON A LOCAL CPU */
|
||||
static void amd64_reload_cr3 (void) {
|
||||
uint64_t cr3;
|
||||
__asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Map physical address to virtual address with flags. TLB needs to be flushed
|
||||
* afterwards.
|
||||
*/
|
||||
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
|
||||
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
|
||||
|
||||
@@ -114,88 +123,77 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
|
||||
do_reload = true;
|
||||
|
||||
done:
|
||||
if (do_reload && (flags & MM_PD_RELOAD))
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
}
|
||||
|
||||
/// Map a page into kernel page directory
|
||||
/* Map a page into kernel page directory */
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
mm_map_page (&kernel_pd, paddr, vaddr, flags);
|
||||
amd64_reload_cr3 ();
|
||||
}
|
||||
|
||||
/// Unmap a virtual address. TLB needs to be flushed afterwards
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
|
||||
/* Unmap a virtual address. TLB needs to be flushed afterwards */
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
*pte &= ~AMD64_PG_PRESENT;
|
||||
do_reload = true;
|
||||
if ((*pte) & AMD64_PG_PRESENT)
|
||||
*pte = 0;
|
||||
|
||||
done:
|
||||
if (do_reload && (flags & MM_PD_RELOAD))
|
||||
amd64_reload_cr3 ();
|
||||
if (amd64_mm_is_table_empty (pml1)) {
|
||||
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
|
||||
pmm_free (pml1_phys, 1);
|
||||
pml2[pg_index.pml2] = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
if (amd64_mm_is_table_empty (pml2)) {
|
||||
uintptr_t pml2_phys = pml3[pg_index.pml3] & ~0xFFFULL;
|
||||
pmm_free (pml2_phys, 1);
|
||||
pml3[pg_index.pml3] = 0;
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
if (amd64_mm_is_table_empty (pml3)) {
|
||||
uintptr_t pml3_phys = pml4[pg_index.pml4] & ~0xFFFULL;
|
||||
pmm_free (pml3_phys, 1);
|
||||
pml4[pg_index.pml4] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Unmap a page from kernel page directory
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
|
||||
mm_unmap_page (&kernel_pd, vaddr, flags);
|
||||
/* Unmap a page from kernel page directory */
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr) {
|
||||
mm_unmap_page (&kernel_pd, vaddr);
|
||||
amd64_reload_cr3 ();
|
||||
}
|
||||
|
||||
/// Lock kernel page directory
|
||||
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); }
|
||||
|
||||
/// Unlock kernel page directory
|
||||
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); }
|
||||
|
||||
/// Allocate a userspace-ready page directory
|
||||
/* Allocate a userspace-ready page directory */
|
||||
uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
@@ -213,37 +211,111 @@ uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
return cr3;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reload after map/unmap operation was performed. This function does the TLB
|
||||
* shootdown.
|
||||
*/
|
||||
void mm_reload (void) {
|
||||
spin_lock (&mm_lock);
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool ret = false;
|
||||
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
||||
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN);
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t pte = pml1[pg_index.pml1];
|
||||
ret = (pte & AMD64_PG_PRESENT) != 0;
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
|
||||
bool ok = true;
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
ok = mm_validate (pd, vaddr + i);
|
||||
if (!ok)
|
||||
goto done;
|
||||
}
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
done:
|
||||
return ok;
|
||||
}
|
||||
|
||||
/// TLB shootdown IRQ handler
|
||||
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
|
||||
(void)arg, (void)regs;
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
amd64_reload_cr3 ();
|
||||
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
|
||||
for (size_t i4 = 0; i4 < 512; i4++) {
|
||||
if (!(pml4[i4] & AMD64_PG_PRESENT))
|
||||
continue;
|
||||
|
||||
uint64_t* pml3 = (uint64_t*)((uintptr_t)hhdm->offset + (pml4[i4] & ~0xFFFULL));
|
||||
for (size_t i3 = 0; i3 < 512; i3++) {
|
||||
if (!(pml3[i3] & AMD64_PG_PRESENT))
|
||||
continue;
|
||||
|
||||
uint64_t* pml2 = (uint64_t*)((uintptr_t)hhdm->offset + (pml3[i3] & ~0xFFFULL));
|
||||
for (size_t i2 = 0; i2 < 512; i2++) {
|
||||
if (!(pml2[i2] & AMD64_PG_PRESENT))
|
||||
continue;
|
||||
|
||||
uint64_t* pml1 = (uint64_t*)((uintptr_t)hhdm->offset + (pml2[i2] & ~0xFFFULL));
|
||||
for (size_t i1 = 0; i1 < 512; i1++) {
|
||||
if ((pml1[i1] & AMD64_PG_PRESENT) && ((pml1[i1] & ~0xFFFULL) == (paddr & ~0xFFFULL))) {
|
||||
struct pg_index idx = {i4, i3, i2, i1};
|
||||
ret = (((uint64_t)idx.pml4 << 39) | ((uint64_t)idx.pml3 << 30) |
|
||||
((uint64_t)idx.pml2 << 21) | ((uint64_t)idx.pml1 << 12) | (paddr & 0xFFFULL));
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Continue initializing memory management subsystem for AMD64 after the
|
||||
* essential parts were initialized
|
||||
*/
|
||||
void mm_init2 (void) {
|
||||
irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE);
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t pte = pml1[pg_index.pml1];
|
||||
|
||||
if (!(pte & AMD64_PG_PRESENT))
|
||||
goto done;
|
||||
|
||||
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/// Initialize essentials for the AMD64 memory management subsystem
|
||||
/* Initialize essentials for the AMD64 memory management subsystem */
|
||||
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }
|
||||
|
||||
@@ -7,11 +7,9 @@
|
||||
#define PAGE_SIZE 4096
|
||||
|
||||
struct pd {
|
||||
spin_lock_t lock;
|
||||
uintptr_t cr3_paddr;
|
||||
};
|
||||
|
||||
void amd64_load_kernel_cr3 (void);
|
||||
void mm_init2 (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_MM_H
|
||||
|
||||
@@ -28,15 +28,15 @@ Usage-Guide:
|
||||
|
||||
License-Text:
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
|
||||
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
@@ -85,8 +85,8 @@ patent must be licensed for everyone's free use or not licensed at all.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
@@ -140,7 +140,7 @@ above, provided that you also meet all of these conditions:
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
@@ -198,7 +198,7 @@ access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
@@ -255,7 +255,7 @@ impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
@@ -285,7 +285,7 @@ make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
@@ -307,9 +307,9 @@ YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
|
||||
@@ -1,13 +1,23 @@
|
||||
#include <amd64/gdt.h>
|
||||
#include <amd64/proc.h>
|
||||
#include <aux/elf.h>
|
||||
#include <libk/align.h>
|
||||
#include <libk/list.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/mutex.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/proc.h>
|
||||
|
||||
static atomic_int pids = 0;
|
||||
|
||||
struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
@@ -18,32 +28,24 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
|
||||
memset (proc, 0, sizeof (*proc));
|
||||
|
||||
proc->pd.lock = SPIN_LOCK_INIT;
|
||||
proc->pd.cr3_paddr = mm_alloc_user_pd_phys ();
|
||||
if (proc->pd.cr3_paddr == 0) {
|
||||
proc->lock = SPIN_LOCK_INIT;
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
proc->pid = atomic_fetch_add (&pids, 1);
|
||||
|
||||
proc->procgroup = procgroup_create ();
|
||||
if (proc->procgroup == NULL) {
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
procgroup_attach (proc->procgroup, proc);
|
||||
|
||||
proc->pdata.kernel_stack = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
|
||||
if (proc->pdata.kernel_stack == PMM_ALLOC_ERR) {
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
uintptr_t kernel_stack = proc->pdata.kernel_stack;
|
||||
proc->pdata.kernel_stack += (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
||||
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
|
||||
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
||||
|
||||
proc->pdata.user_stack = pmm_alloc (USTACK_SIZE / PAGE_SIZE);
|
||||
if (proc->pdata.user_stack == PMM_ALLOC_ERR) {
|
||||
free (proc);
|
||||
pmm_free (kernel_stack, USTACK_SIZE / PAGE_SIZE);
|
||||
return NULL;
|
||||
}
|
||||
uintptr_t user_stack = proc->pdata.user_stack;
|
||||
proc->pdata.user_stack += USTACK_SIZE;
|
||||
procgroup_map (proc->procgroup, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
|
||||
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW, NULL);
|
||||
|
||||
proc_map (proc, user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
|
||||
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
|
||||
proc->flags |= PROC_USTK_PREALLOC;
|
||||
|
||||
struct elf_aux aux = proc_load_segments (proc, elf_contents);
|
||||
|
||||
@@ -52,8 +54,85 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
proc->pdata.regs.rflags = 0x202;
|
||||
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
||||
proc->pdata.regs.rip = aux.entry;
|
||||
proc->lock = SPIN_LOCK_INIT;
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
|
||||
return proc;
|
||||
}
|
||||
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
|
||||
uintptr_t argument_ptr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
spin_lock_ctx_t ctxprt;
|
||||
|
||||
struct proc* proc = malloc (sizeof (*proc));
|
||||
if (proc == NULL)
|
||||
return NULL;
|
||||
|
||||
memset (proc, 0, sizeof (*proc));
|
||||
|
||||
proc->lock = SPIN_LOCK_INIT;
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
proc->pid = atomic_fetch_add (&pids, 1);
|
||||
|
||||
spin_lock (&proto->lock, &ctxprt);
|
||||
|
||||
proc->procgroup = proto->procgroup;
|
||||
procgroup_attach (proc->procgroup, proc);
|
||||
|
||||
spin_unlock (&proto->lock, &ctxprt);
|
||||
|
||||
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
|
||||
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
||||
|
||||
proc->pdata.regs.ss = GDT_UDATA | 0x03;
|
||||
proc->pdata.regs.rsp = (uint64_t)vstack_top;
|
||||
proc->pdata.regs.rflags = 0x202;
|
||||
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
||||
proc->pdata.regs.rip = (uint64_t)entry;
|
||||
|
||||
proc->uvaddr_argument = argument_ptr;
|
||||
|
||||
proc_init_tls (proc);
|
||||
|
||||
return proc;
|
||||
}
|
||||
|
||||
void proc_cleanup (struct proc* proc) {
|
||||
proc_sqs_cleanup (proc);
|
||||
proc_mutexes_cleanup (proc);
|
||||
|
||||
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
|
||||
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
|
||||
|
||||
procgroup_detach (proc->procgroup, proc);
|
||||
|
||||
/* clean the process */
|
||||
free (proc);
|
||||
}
|
||||
|
||||
void proc_init_tls (struct proc* proc) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
if (proc->procgroup->tls.tls_tmpl == NULL)
|
||||
return;
|
||||
|
||||
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
|
||||
size_t pages = proc->procgroup->tls.tls_tmpl_pages;
|
||||
|
||||
uintptr_t tls_paddr;
|
||||
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
|
||||
|
||||
uintptr_t tls_vaddr = procgroup_map (proc->procgroup, 0, pages, flags, &tls_paddr);
|
||||
|
||||
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
|
||||
|
||||
memset ((void*)k_tls_addr, 0, pages * PAGE_SIZE);
|
||||
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
|
||||
|
||||
uintptr_t ktcb = k_tls_addr + tls_size;
|
||||
uintptr_t utcb = tls_vaddr + tls_size;
|
||||
|
||||
*(uintptr_t*)ktcb = utcb;
|
||||
|
||||
proc->pdata.fs_base = utcb;
|
||||
proc->pdata.tls_vaddr = tls_vaddr;
|
||||
}
|
||||
|
||||
@@ -4,17 +4,19 @@
|
||||
#include <amd64/intr.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
/// Top of userspace process' stack
|
||||
/* Top of userspace process' stack */
|
||||
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
|
||||
/// Size of userspace process' stack
|
||||
/* Size of userspace process' stack */
|
||||
#define USTACK_SIZE (256 * PAGE_SIZE)
|
||||
/* proc_map () base address */
|
||||
#define PROC_MAP_BASE 0x0000700000000000
|
||||
|
||||
/// Platform-dependent process data
|
||||
/* Platform-dependent process data */
|
||||
struct proc_platformdata {
|
||||
struct saved_regs regs;
|
||||
uintptr_t user_stack;
|
||||
uintptr_t kernel_stack;
|
||||
uint64_t gs_base;
|
||||
uint64_t fs_base;
|
||||
uintptr_t tls_vaddr;
|
||||
};
|
||||
|
||||
#endif // _KERNEL_AMD64_PROC_H
|
||||
|
||||
13
kernel/amd64/procgroup.h
Normal file
13
kernel/amd64/procgroup.h
Normal file
@@ -0,0 +1,13 @@
|
||||
#ifndef _KERNEL_AMD64_PROCGRPUP_H
|
||||
#define _KERNEL_AMD64_PROCGRPUP_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
struct procgroup_tls {
|
||||
uint8_t* tls_tmpl;
|
||||
size_t tls_tmpl_size;
|
||||
size_t tls_tmpl_total_size;
|
||||
size_t tls_tmpl_pages;
|
||||
};
|
||||
|
||||
#endif // _KERNEL_AMD64_PROCGRPUP_H
|
||||
@@ -35,4 +35,21 @@
|
||||
popq % rcx; \
|
||||
popq % rax;
|
||||
|
||||
#define pop_regs_skip_rax \
|
||||
popq % r15; \
|
||||
popq % r14; \
|
||||
popq % r13; \
|
||||
popq % r12; \
|
||||
popq % r11; \
|
||||
popq % r10; \
|
||||
popq % r9; \
|
||||
popq % r8; \
|
||||
popq % rbx; \
|
||||
popq % rbp; \
|
||||
popq % rdi; \
|
||||
popq % rsi; \
|
||||
popq % rdx; \
|
||||
popq % rcx; \
|
||||
addq $8, % rsp
|
||||
|
||||
#endif // _KERNEL_AMD64_REGSASM_H
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
|
||||
.global amd64_do_sched
|
||||
amd64_do_sched:
|
||||
cli
|
||||
movq %rsi, %cr3
|
||||
movq %rdi, %rsp
|
||||
pop_regs
|
||||
add $16, %rsp
|
||||
addq $16, %rsp
|
||||
iretq
|
||||
|
||||
@@ -3,12 +3,21 @@
|
||||
#include <amd64/sched.h>
|
||||
#include <libk/std.h>
|
||||
#include <proc/proc.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/smp.h>
|
||||
|
||||
void do_sched (struct proc* proc) {
|
||||
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu) {
|
||||
spin_lock_ctx_t ctxpr;
|
||||
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
|
||||
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
|
||||
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
|
||||
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
|
||||
|
||||
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd.cr3_paddr);
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (cpu_lock, ctxcpu);
|
||||
|
||||
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#include <amd64/apic.h>
|
||||
#include <amd64/init.h>
|
||||
#include <amd64/intr_defs.h>
|
||||
#include <amd64/mm.h>
|
||||
#include <amd64/msr-index.h>
|
||||
#include <amd64/msr.h>
|
||||
@@ -7,8 +8,10 @@
|
||||
#include <libk/string.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <proc/proc.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/sched.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/syscall.h>
|
||||
|
||||
@@ -20,7 +23,7 @@ static struct cpu cpus[CPUS_MAX];
|
||||
static atomic_int cpu_init_count;
|
||||
|
||||
/// Allocate a CPU structure
|
||||
struct cpu* cpu_make (void) {
|
||||
struct cpu* cpu_make (uint64_t lapic_id) {
|
||||
int id = atomic_fetch_add (&cpu_counter, 1);
|
||||
|
||||
struct cpu* cpu = &cpus[id];
|
||||
@@ -28,7 +31,7 @@ struct cpu* cpu_make (void) {
|
||||
memset (cpu, 0, sizeof (*cpu));
|
||||
cpu->lock = SPIN_LOCK_INIT;
|
||||
cpu->id = id;
|
||||
cpu->self = cpu;
|
||||
cpu->lapic_id = lapic_id;
|
||||
|
||||
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
|
||||
|
||||
@@ -40,38 +43,65 @@ struct cpu* cpu_get (void) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void cpu_request_sched (struct cpu* cpu) {
|
||||
if (cpu == thiscpu) {
|
||||
proc_sched ();
|
||||
return;
|
||||
}
|
||||
|
||||
amd64_lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED);
|
||||
}
|
||||
|
||||
struct cpu* cpu_find_lightest (void) {
|
||||
struct cpu* cpu = &cpus[0];
|
||||
|
||||
int load = atomic_load (&cpu->proc_run_q_count);
|
||||
|
||||
for (unsigned int i = 1; i < cpu_counter; i++) {
|
||||
struct cpu* new_cpu = &cpus[i];
|
||||
int new_load = atomic_load (&new_cpu->proc_run_q_count);
|
||||
if (new_load < load) {
|
||||
load = new_load;
|
||||
cpu = new_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
/// Bootstrap code for non-BSP CPUs
|
||||
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
|
||||
amd64_load_kernel_cr3 ();
|
||||
|
||||
struct cpu* cpu = cpu_make ();
|
||||
struct cpu* cpu = cpu_make (mp_info->lapic_id);
|
||||
|
||||
amd64_init (cpu, true); /* gdt + idt */
|
||||
syscall_init ();
|
||||
|
||||
thiscpu->lapic_ticks = amd64_lapic_init (10000);
|
||||
amd64_lapic_tick (thiscpu->lapic_ticks);
|
||||
amd64_lapic_init (1000);
|
||||
|
||||
DEBUG ("CPU %u is online!\n", thiscpu->id);
|
||||
|
||||
__asm__ volatile ("sti");
|
||||
|
||||
atomic_fetch_sub (&cpu_init_count, 1);
|
||||
|
||||
for (;;)
|
||||
;
|
||||
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
|
||||
proc_register (spin_proc, thiscpu);
|
||||
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
|
||||
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
|
||||
}
|
||||
|
||||
/// Initialize SMP subsystem for AMD64. Start AP CPUs
|
||||
void smp_init (void) {
|
||||
thiscpu->lapic_ticks = amd64_lapic_init (10000);
|
||||
amd64_lapic_init (1000);
|
||||
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
|
||||
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
|
||||
|
||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
||||
if (mp->cpus[i]->lapic_id != thiscpu->id) {
|
||||
if (mp->cpus[i]->lapic_id != thiscpu->lapic_id) {
|
||||
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
|
||||
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
|
||||
}
|
||||
|
||||
@@ -2,10 +2,13 @@
|
||||
#define _KERNEL_AMD64_SMP_H
|
||||
|
||||
#include <amd64/gdt.h>
|
||||
#include <amd64/intr.h>
|
||||
#include <amd64/tss.h>
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <proc/proc.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
#define CPUS_MAX 32
|
||||
|
||||
@@ -13,30 +16,28 @@ struct cpu {
|
||||
/* for syscall instruction */
|
||||
uintptr_t syscall_user_stack;
|
||||
uintptr_t syscall_kernel_stack;
|
||||
struct cpu* self;
|
||||
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
|
||||
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
|
||||
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
|
||||
volatile struct gdt_extended gdt ALIGNED (16);
|
||||
volatile struct tss tss;
|
||||
|
||||
uintptr_t lapic_mmio_base;
|
||||
uint64_t lapic_ticks;
|
||||
uint64_t lapic_id;
|
||||
uint32_t id;
|
||||
|
||||
struct {
|
||||
uint64_t rflags;
|
||||
atomic_int nesting;
|
||||
} irq_ctx;
|
||||
|
||||
spin_lock_t lock;
|
||||
|
||||
struct proc* proc_run_q;
|
||||
struct list_node_link* proc_run_q;
|
||||
struct proc* proc_current;
|
||||
} PACKED;
|
||||
atomic_int proc_run_q_count;
|
||||
};
|
||||
|
||||
struct cpu* cpu_make (void);
|
||||
struct cpu* cpu_make (uint64_t lapic_id);
|
||||
struct cpu* cpu_get (void);
|
||||
void amd64_thiscpu_set_init (void);
|
||||
void cpu_request_sched (struct cpu* cpu);
|
||||
struct cpu* cpu_find_lightest (void);
|
||||
|
||||
#define thiscpu (cpu_get ())
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
.global amd64_spin
|
||||
amd64_spin:
|
||||
hlt
|
||||
jmp amd64_spin
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
#include <amd64/mm.h>
|
||||
#include <amd64/msr-index.h>
|
||||
#include <amd64/msr.h>
|
||||
#include <libk/string.h>
|
||||
#include <m/status.h>
|
||||
#include <m/syscall_defs.h>
|
||||
#include <proc/proc.h>
|
||||
#include <sys/debug.h>
|
||||
@@ -11,28 +13,33 @@
|
||||
|
||||
extern void amd64_syscall_entry (void);
|
||||
|
||||
int amd64_syscall_dispatch (void* stack_ptr) {
|
||||
uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
|
||||
spin_lock_ctx_t ctxcpu, ctxpr;
|
||||
|
||||
amd64_load_kernel_cr3 ();
|
||||
struct saved_regs* regs = stack_ptr;
|
||||
|
||||
spin_lock (&thiscpu->lock, &ctxcpu);
|
||||
struct proc* caller = thiscpu->proc_current;
|
||||
spin_lock (&caller->lock, &ctxpr);
|
||||
|
||||
memcpy (&caller->pdata.regs, regs, sizeof (struct saved_regs));
|
||||
|
||||
spin_unlock (&caller->lock, &ctxpr);
|
||||
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||
|
||||
int syscall_num = regs->rax;
|
||||
syscall_handler_func_t func = syscall_find_handler (syscall_num);
|
||||
|
||||
if (func == NULL)
|
||||
return -SR_SYSCALL_NOT_FOUND;
|
||||
if (func == NULL) {
|
||||
return -ST_SYSCALL_NOT_FOUND;
|
||||
}
|
||||
|
||||
struct proc* caller = thiscpu->proc_current;
|
||||
|
||||
__asm__ volatile ("sti");
|
||||
|
||||
int result = func (caller, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
||||
|
||||
__asm__ volatile ("cli");
|
||||
|
||||
return result;
|
||||
return func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
||||
}
|
||||
|
||||
void syscall_init (void) {
|
||||
amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_UCODE - 16) << 48));
|
||||
amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48));
|
||||
amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry);
|
||||
amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9));
|
||||
amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE);
|
||||
|
||||
@@ -9,30 +9,39 @@ amd64_syscall_entry:
|
||||
movq %rsp, %gs:0
|
||||
movq %gs:8, %rsp
|
||||
|
||||
pushq $0x23
|
||||
pushq $0x1b
|
||||
pushq %gs:0
|
||||
pushq %r11
|
||||
pushq $0x1b
|
||||
pushq $0x23
|
||||
pushq %rcx
|
||||
pushq $0
|
||||
pushq $0
|
||||
|
||||
push_regs
|
||||
|
||||
movw $0x10, %ax
|
||||
movw %ax, %ds
|
||||
movw %ax, %es
|
||||
movw %ax, %ss
|
||||
|
||||
cld
|
||||
|
||||
movq %rsp, %rdi
|
||||
|
||||
movq %cr3, %rax; pushq %rax
|
||||
|
||||
movq %rsp, %rbp
|
||||
|
||||
subq $8, %rsp
|
||||
andq $~0xF, %rsp
|
||||
andq $-16, %rsp
|
||||
|
||||
callq amd64_syscall_dispatch
|
||||
|
||||
movq %rbp, %rsp
|
||||
|
||||
pop_regs
|
||||
popq %rbx; movq %rbx, %cr3
|
||||
|
||||
pop_regs_skip_rax
|
||||
|
||||
addq $56, %rsp
|
||||
movq %gs:0, %rsp
|
||||
|
||||
652
kernel/aux/elf.h
652
kernel/aux/elf.h
File diff suppressed because it is too large
Load Diff
@@ -1,269 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<doxygenlayout version="2.0">
|
||||
<!-- Generated by doxygen 1.15.0 -->
|
||||
<!-- Navigation index tabs for HTML output -->
|
||||
<navindex>
|
||||
<tab type="mainpage" visible="yes" title=""/>
|
||||
<tab type="pages" visible="yes" title="" intro=""/>
|
||||
<tab type="topics" visible="yes" title="" intro=""/>
|
||||
<tab type="modules" visible="yes" title="" intro="">
|
||||
<tab type="modulelist" visible="yes" title="" intro=""/>
|
||||
<tab type="modulemembers" visible="yes" title="" intro=""/>
|
||||
</tab>
|
||||
<tab type="namespaces" visible="yes" title="">
|
||||
<tab type="namespacelist" visible="yes" title="" intro=""/>
|
||||
<tab type="namespacemembers" visible="yes" title="" intro=""/>
|
||||
</tab>
|
||||
<tab type="concepts" visible="yes" title="">
|
||||
</tab>
|
||||
<tab type="interfaces" visible="yes" title="">
|
||||
<tab type="interfacelist" visible="yes" title="" intro=""/>
|
||||
<tab type="interfaceindex" visible="$ALPHABETICAL_INDEX" title=""/>
|
||||
<tab type="interfacehierarchy" visible="yes" title="" intro=""/>
|
||||
</tab>
|
||||
<tab type="classes" visible="yes" title="">
|
||||
<tab type="classlist" visible="yes" title="" intro=""/>
|
||||
<tab type="classindex" visible="$ALPHABETICAL_INDEX" title=""/>
|
||||
<tab type="hierarchy" visible="yes" title="" intro=""/>
|
||||
<tab type="classmembers" visible="yes" title="" intro=""/>
|
||||
</tab>
|
||||
<tab type="structs" visible="yes" title="">
|
||||
<tab type="structlist" visible="yes" title="" intro=""/>
|
||||
<tab type="structindex" visible="$ALPHABETICAL_INDEX" title=""/>
|
||||
</tab>
|
||||
<tab type="exceptions" visible="yes" title="">
|
||||
<tab type="exceptionlist" visible="yes" title="" intro=""/>
|
||||
<tab type="exceptionindex" visible="$ALPHABETICAL_INDEX" title=""/>
|
||||
<tab type="exceptionhierarchy" visible="yes" title="" intro=""/>
|
||||
</tab>
|
||||
<tab type="files" visible="yes" title="">
|
||||
<tab type="filelist" visible="yes" title="" intro=""/>
|
||||
<tab type="globals" visible="yes" title="" intro=""/>
|
||||
</tab>
|
||||
<tab type="examples" visible="yes" title="" intro=""/>
|
||||
</navindex>
|
||||
|
||||
<!-- Layout definition for a class page -->
|
||||
<class>
|
||||
<briefdescription visible="yes"/>
|
||||
<includes visible="$SHOW_HEADERFILE"/>
|
||||
<inheritancegraph visible="yes"/>
|
||||
<collaborationgraph visible="yes"/>
|
||||
<memberdecl>
|
||||
<nestedclasses visible="yes" title=""/>
|
||||
<publictypes visible="yes" title=""/>
|
||||
<services visible="yes" title=""/>
|
||||
<interfaces visible="yes" title=""/>
|
||||
<publicslots visible="yes" title=""/>
|
||||
<signals visible="yes" title=""/>
|
||||
<publicmethods visible="yes" title=""/>
|
||||
<publicstaticmethods visible="yes" title=""/>
|
||||
<publicattributes visible="yes" title=""/>
|
||||
<publicstaticattributes visible="yes" title=""/>
|
||||
<protectedtypes visible="yes" title=""/>
|
||||
<protectedslots visible="yes" title=""/>
|
||||
<protectedmethods visible="yes" title=""/>
|
||||
<protectedstaticmethods visible="yes" title=""/>
|
||||
<protectedattributes visible="yes" title=""/>
|
||||
<protectedstaticattributes visible="yes" title=""/>
|
||||
<packagetypes visible="yes" title=""/>
|
||||
<packagemethods visible="yes" title=""/>
|
||||
<packagestaticmethods visible="yes" title=""/>
|
||||
<packageattributes visible="yes" title=""/>
|
||||
<packagestaticattributes visible="yes" title=""/>
|
||||
<properties visible="yes" title=""/>
|
||||
<events visible="yes" title=""/>
|
||||
<privatetypes visible="yes" title=""/>
|
||||
<privateslots visible="yes" title=""/>
|
||||
<privatemethods visible="yes" title=""/>
|
||||
<privatestaticmethods visible="yes" title=""/>
|
||||
<privateattributes visible="yes" title=""/>
|
||||
<privatestaticattributes visible="yes" title=""/>
|
||||
<friends visible="yes" title=""/>
|
||||
<related visible="yes" title="" subtitle=""/>
|
||||
<membergroups visible="yes"/>
|
||||
</memberdecl>
|
||||
<detaileddescription visible="yes" title=""/>
|
||||
<memberdef>
|
||||
<inlineclasses visible="yes" title=""/>
|
||||
<typedefs visible="yes" title=""/>
|
||||
<enums visible="yes" title=""/>
|
||||
<services visible="yes" title=""/>
|
||||
<interfaces visible="yes" title=""/>
|
||||
<constructors visible="yes" title=""/>
|
||||
<functions visible="yes" title=""/>
|
||||
<related visible="yes" title=""/>
|
||||
<variables visible="yes" title=""/>
|
||||
<properties visible="yes" title=""/>
|
||||
<events visible="yes" title=""/>
|
||||
</memberdef>
|
||||
<allmemberslink visible="yes"/>
|
||||
<usedfiles visible="$SHOW_USED_FILES"/>
|
||||
<authorsection visible="yes"/>
|
||||
</class>
|
||||
|
||||
<!-- Layout definition for a namespace page -->
|
||||
<namespace>
|
||||
<briefdescription visible="yes"/>
|
||||
<memberdecl>
|
||||
<nestednamespaces visible="yes" title=""/>
|
||||
<constantgroups visible="yes" title=""/>
|
||||
<interfaces visible="yes" title=""/>
|
||||
<classes visible="yes" title=""/>
|
||||
<concepts visible="yes" title=""/>
|
||||
<structs visible="yes" title=""/>
|
||||
<exceptions visible="yes" title=""/>
|
||||
<typedefs visible="yes" title=""/>
|
||||
<sequences visible="yes" title=""/>
|
||||
<dictionaries visible="yes" title=""/>
|
||||
<enums visible="yes" title=""/>
|
||||
<functions visible="yes" title=""/>
|
||||
<variables visible="yes" title=""/>
|
||||
<properties visible="yes" title=""/>
|
||||
<membergroups visible="yes"/>
|
||||
</memberdecl>
|
||||
<detaileddescription visible="yes" title=""/>
|
||||
<memberdef>
|
||||
<inlineclasses visible="yes" title=""/>
|
||||
<typedefs visible="yes" title=""/>
|
||||
<sequences visible="yes" title=""/>
|
||||
<dictionaries visible="yes" title=""/>
|
||||
<enums visible="yes" title=""/>
|
||||
<functions visible="yes" title=""/>
|
||||
<variables visible="yes" title=""/>
|
||||
<properties visible="yes" title=""/>
|
||||
</memberdef>
|
||||
<authorsection visible="yes"/>
|
||||
</namespace>
|
||||
|
||||
<!-- Layout definition for a concept page -->
|
||||
<concept>
|
||||
<briefdescription visible="yes"/>
|
||||
<includes visible="$SHOW_HEADERFILE"/>
|
||||
<definition visible="yes" title=""/>
|
||||
<detaileddescription visible="yes" title=""/>
|
||||
<authorsection visible="yes"/>
|
||||
</concept>
|
||||
|
||||
<!-- Layout definition for a file page -->
|
||||
<file>
|
||||
<briefdescription visible="yes"/>
|
||||
<includes visible="$SHOW_INCLUDE_FILES"/>
|
||||
<includegraph visible="yes"/>
|
||||
<includedbygraph visible="yes"/>
|
||||
<sourcelink visible="yes"/>
|
||||
<memberdecl>
|
||||
<interfaces visible="yes" title=""/>
|
||||
<classes visible="yes" title=""/>
|
||||
<structs visible="yes" title=""/>
|
||||
<exceptions visible="yes" title=""/>
|
||||
<namespaces visible="yes" title=""/>
|
||||
<concepts visible="yes" title=""/>
|
||||
<constantgroups visible="yes" title=""/>
|
||||
<defines visible="yes" title=""/>
|
||||
<typedefs visible="yes" title=""/>
|
||||
<sequences visible="yes" title=""/>
|
||||
<dictionaries visible="yes" title=""/>
|
||||
<enums visible="yes" title=""/>
|
||||
<functions visible="yes" title=""/>
|
||||
<variables visible="yes" title=""/>
|
||||
<properties visible="yes" title=""/>
|
||||
<membergroups visible="yes"/>
|
||||
</memberdecl>
|
||||
<detaileddescription visible="yes" title=""/>
|
||||
<memberdef>
|
||||
<inlineclasses visible="yes" title=""/>
|
||||
<defines visible="yes" title=""/>
|
||||
<typedefs visible="yes" title=""/>
|
||||
<sequences visible="yes" title=""/>
|
||||
<dictionaries visible="yes" title=""/>
|
||||
<enums visible="yes" title=""/>
|
||||
<functions visible="yes" title=""/>
|
||||
<variables visible="yes" title=""/>
|
||||
<properties visible="yes" title=""/>
|
||||
</memberdef>
|
||||
<authorsection/>
|
||||
</file>
|
||||
|
||||
<!-- Layout definition for a group page -->
|
||||
<group>
|
||||
<briefdescription visible="yes"/>
|
||||
<groupgraph visible="yes"/>
|
||||
<memberdecl>
|
||||
<nestedgroups visible="yes" title=""/>
|
||||
<modules visible="yes" title=""/>
|
||||
<dirs visible="yes" title=""/>
|
||||
<files visible="yes" title=""/>
|
||||
<namespaces visible="yes" title=""/>
|
||||
<concepts visible="yes" title=""/>
|
||||
<classes visible="yes" title=""/>
|
||||
<defines visible="yes" title=""/>
|
||||
<typedefs visible="yes" title=""/>
|
||||
<sequences visible="yes" title=""/>
|
||||
<dictionaries visible="yes" title=""/>
|
||||
<enums visible="yes" title=""/>
|
||||
<enumvalues visible="yes" title=""/>
|
||||
<functions visible="yes" title=""/>
|
||||
<variables visible="yes" title=""/>
|
||||
<signals visible="yes" title=""/>
|
||||
<publicslots visible="yes" title=""/>
|
||||
<protectedslots visible="yes" title=""/>
|
||||
<privateslots visible="yes" title=""/>
|
||||
<events visible="yes" title=""/>
|
||||
<properties visible="yes" title=""/>
|
||||
<friends visible="yes" title=""/>
|
||||
<membergroups visible="yes"/>
|
||||
</memberdecl>
|
||||
<detaileddescription visible="yes" title=""/>
|
||||
<memberdef>
|
||||
<pagedocs/>
|
||||
<inlineclasses visible="yes" title=""/>
|
||||
<defines visible="yes" title=""/>
|
||||
<typedefs visible="yes" title=""/>
|
||||
<sequences visible="yes" title=""/>
|
||||
<dictionaries visible="yes" title=""/>
|
||||
<enums visible="yes" title=""/>
|
||||
<enumvalues visible="yes" title=""/>
|
||||
<functions visible="yes" title=""/>
|
||||
<variables visible="yes" title=""/>
|
||||
<signals visible="yes" title=""/>
|
||||
<publicslots visible="yes" title=""/>
|
||||
<protectedslots visible="yes" title=""/>
|
||||
<privateslots visible="yes" title=""/>
|
||||
<events visible="yes" title=""/>
|
||||
<properties visible="yes" title=""/>
|
||||
<friends visible="yes" title=""/>
|
||||
</memberdef>
|
||||
<authorsection visible="yes"/>
|
||||
</group>
|
||||
|
||||
<!-- Layout definition for a C++20 module page -->
|
||||
<module>
|
||||
<briefdescription visible="yes"/>
|
||||
<exportedmodules visible="yes"/>
|
||||
<memberdecl>
|
||||
<concepts visible="yes" title=""/>
|
||||
<classes visible="yes" title=""/>
|
||||
<enums visible="yes" title=""/>
|
||||
<typedefs visible="yes" title=""/>
|
||||
<functions visible="yes" title=""/>
|
||||
<variables visible="yes" title=""/>
|
||||
<membergroups visible="yes" title=""/>
|
||||
</memberdecl>
|
||||
<detaileddescription visible="yes" title=""/>
|
||||
<memberdecl>
|
||||
<files visible="yes"/>
|
||||
</memberdecl>
|
||||
</module>
|
||||
|
||||
<!-- Layout definition for a directory page -->
|
||||
<directory>
|
||||
<briefdescription visible="yes"/>
|
||||
<directorygraph visible="yes"/>
|
||||
<memberdecl>
|
||||
<dirs visible="yes"/>
|
||||
<files visible="yes"/>
|
||||
</memberdecl>
|
||||
<detaileddescription visible="yes" title=""/>
|
||||
</directory>
|
||||
</doxygenlayout>
|
||||
@@ -3,18 +3,20 @@
|
||||
#include <libk/std.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include <amd64/apic.h>
|
||||
#include <amd64/intr.h>
|
||||
#endif
|
||||
|
||||
/* TODO: figure out a generic way to work with IRQs */
|
||||
struct irq* irq_table[0x100];
|
||||
|
||||
static struct irq* irqs = NULL;
|
||||
static spin_lock_t irqs_lock;
|
||||
static spin_lock_t irqs_lock = SPIN_LOCK_INIT;
|
||||
|
||||
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
|
||||
spin_lock_ctx_t ctxiqa;
|
||||
|
||||
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) {
|
||||
struct irq* irq = malloc (sizeof (*irq));
|
||||
if (irq == NULL) {
|
||||
return false;
|
||||
@@ -23,44 +25,22 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
|
||||
irq->func = func;
|
||||
irq->arg = arg;
|
||||
irq->irq_num = irq_num;
|
||||
irq->flags = flags;
|
||||
|
||||
spin_lock (&irqs_lock);
|
||||
linklist_append (struct irq*, irqs, irq);
|
||||
spin_unlock (&irqs_lock);
|
||||
|
||||
#if defined(__x86_64__)
|
||||
uint8_t resolution = amd64_resolve_irq (irq_num);
|
||||
amd64_ioapic_route_irq (irq_num, resolution, 0, amd64_lapic_id ());
|
||||
#endif
|
||||
spin_lock (&irqs_lock, &ctxiqa);
|
||||
irq_table[irq_num] = irq;
|
||||
spin_unlock (&irqs_lock, &ctxiqa);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void irq_detach (void (*func) (void*, void*)) {
|
||||
spin_lock (&irqs_lock);
|
||||
|
||||
struct irq *irq, *irq_tmp;
|
||||
linklist_foreach (irqs, irq, irq_tmp) {
|
||||
if ((uintptr_t)irq->func == (uintptr_t)func)
|
||||
linklist_remove (struct irq*, irqs, irq);
|
||||
}
|
||||
|
||||
spin_unlock (&irqs_lock);
|
||||
}
|
||||
|
||||
struct irq* irq_find (uint32_t irq_num) {
|
||||
spin_lock (&irqs_lock);
|
||||
spin_lock_ctx_t ctxiqa;
|
||||
|
||||
struct irq *irq, *irq_tmp;
|
||||
linklist_foreach (irqs, irq, irq_tmp) {
|
||||
if (irq->irq_num == irq_num) {
|
||||
spin_unlock (&irqs_lock);
|
||||
return irq;
|
||||
}
|
||||
}
|
||||
spin_lock (&irqs_lock, &ctxiqa);
|
||||
|
||||
spin_unlock (&irqs_lock);
|
||||
struct irq* irq = irq_table[irq_num];
|
||||
|
||||
return NULL;
|
||||
spin_unlock (&irqs_lock, &ctxiqa);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
@@ -1,24 +1,20 @@
|
||||
#ifndef _KERNEL_IRQ_IRQ_H
|
||||
#define _KERNEL_IRQ_IRQ_H
|
||||
|
||||
#include <libk/list.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
#define IRQ_INTERRUPT_SAFE (1 << 0)
|
||||
#define IRQ_INTERRUPT_UNSAFE (1 << 1)
|
||||
|
||||
typedef void (*irq_func_t) (void* arg, void* regs);
|
||||
|
||||
struct irq {
|
||||
struct irq* next;
|
||||
struct list_node_link irqs_link;
|
||||
|
||||
irq_func_t func;
|
||||
void* arg;
|
||||
uint32_t irq_num;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num, uint32_t flags);
|
||||
void irq_detach (irq_func_t func);
|
||||
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num);
|
||||
struct irq* irq_find (uint32_t irq_num);
|
||||
|
||||
#endif // _KERNEL_IRQ_IRQ_H
|
||||
|
||||
15
kernel/libk/assert.h
Normal file
15
kernel/libk/assert.h
Normal file
@@ -0,0 +1,15 @@
|
||||
#ifndef _KERNEL_LIBK_ASSERT_H
|
||||
#define _KERNEL_LIBK_ASSERT_H
|
||||
|
||||
#include <sys/spin.h>
|
||||
|
||||
#define assert(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
DEBUG ("%s ssertion failed\n", #x); \
|
||||
spin (); \
|
||||
__builtin_unreachable (); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif // _KERNEL_LIBK_ASSERT_H
|
||||
@@ -1,12 +1,19 @@
|
||||
#ifndef _KERNEL_LIBK_LIST_H
|
||||
#define _KERNEL_LIBK_LIST_H
|
||||
|
||||
#define dlinklist_append(type, head, new) \
|
||||
struct list_node_link {
|
||||
struct list_node_link* next;
|
||||
struct list_node_link* prev;
|
||||
};
|
||||
|
||||
#define list_entry(ptr, type, member) ((type*)((char*)(ptr) - offsetof (type, member)))
|
||||
|
||||
#define list_append(head, new) \
|
||||
do { \
|
||||
if ((new) != NULL) { \
|
||||
(new)->next = NULL; \
|
||||
if ((head) != NULL) { \
|
||||
type __tmp = (head); \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp->next != NULL) { \
|
||||
__tmp = __tmp->next; \
|
||||
} \
|
||||
@@ -19,7 +26,7 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define dlinklist_prepend(head, new) \
|
||||
#define list_prepend(head, new) \
|
||||
do { \
|
||||
if ((new) != NULL) { \
|
||||
(new)->prev = NULL; \
|
||||
@@ -31,7 +38,7 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define dlinklist_remove(head, ele) \
|
||||
#define list_remove(head, ele) \
|
||||
do { \
|
||||
if ((ele) != NULL) { \
|
||||
if ((ele)->prev != NULL) { \
|
||||
@@ -47,10 +54,10 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define dlinklist_find(type, head, out, propname, propvalue) \
|
||||
#define list_find(head, out, propname, propvalue) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
type __tmp = (head); \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp) { \
|
||||
if (__tmp->propname == (propvalue)) { \
|
||||
(out) = __tmp; \
|
||||
@@ -60,23 +67,23 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define dlinklist_foreach(head, var, tmp) \
|
||||
#define list_foreach(head, var, tmp) \
|
||||
for (var = (head), tmp = (var ? var->next : NULL); var != NULL; \
|
||||
var = tmp, tmp = (var ? var->next : NULL))
|
||||
|
||||
#define dlinklist_foreach_index(head, var, tmp, idx) \
|
||||
#define list_foreach_index(head, var, tmp, idx) \
|
||||
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL; \
|
||||
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
|
||||
|
||||
#define dlinklist_foreach_index_limit(head, var, tmp, idx, max) \
|
||||
#define list_foreach_index_limit(head, var, tmp, idx, max) \
|
||||
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL && (idx) < (max); \
|
||||
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
|
||||
|
||||
#define dlinklist_back(type, head, out) \
|
||||
#define list_back(head, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
if ((head) != NULL) { \
|
||||
type __tmp = (head); \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp->next != NULL) { \
|
||||
__tmp = __tmp->next; \
|
||||
} \
|
||||
@@ -84,11 +91,11 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define dlinklist_front(type, head, out) \
|
||||
#define list_front(head, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
if ((head) != NULL) { \
|
||||
type __tmp = (head); \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp->prev != NULL) { \
|
||||
__tmp = __tmp->prev; \
|
||||
} \
|
||||
@@ -96,7 +103,7 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define dlinklist_insert_after(head, pos, new) \
|
||||
#define list_insert_after(head, pos, new) \
|
||||
do { \
|
||||
if ((pos) != NULL && (new) != NULL) { \
|
||||
(new)->prev = (pos); \
|
||||
@@ -112,7 +119,7 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define dlinklist_insert_before(head, pos, new) \
|
||||
#define list_insert_before(head, pos, new) \
|
||||
do { \
|
||||
if ((pos) != NULL && (new) != NULL) { \
|
||||
(new)->next = (pos); \
|
||||
@@ -130,11 +137,11 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define dlinklist_index_of(type, head, ele, out_idx) \
|
||||
#define list_index_of(head, ele, out_idx) \
|
||||
do { \
|
||||
(out_idx) = -1; \
|
||||
int __idx = 0; \
|
||||
type __tmp = (head); \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp != NULL) { \
|
||||
if (__tmp == (ele)) { \
|
||||
(out_idx) = __idx; \
|
||||
@@ -145,11 +152,11 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define dlinklist_index_of_prop(type, head, propname, propvalue, out_idx) \
|
||||
#define list_index_of_prop(head, propname, propvalue, out_idx) \
|
||||
do { \
|
||||
(out_idx) = -1; \
|
||||
int __idx = 0; \
|
||||
type __tmp = (head); \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp != NULL) { \
|
||||
if (__tmp->propname == (propvalue)) { \
|
||||
(out_idx) = __idx; \
|
||||
@@ -160,109 +167,4 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define linklist_index_of(type, head, ele, out_idx) \
|
||||
do { \
|
||||
(out_idx) = -1; \
|
||||
int __idx = 0; \
|
||||
type __tmp = (head); \
|
||||
while (__tmp != NULL) { \
|
||||
if (__tmp == (ele)) { \
|
||||
(out_idx) = __idx; \
|
||||
break; \
|
||||
} \
|
||||
__tmp = __tmp->next; \
|
||||
__idx++; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define linklist_index_of_prop(type, head, propname, propvalue, out_idx) \
|
||||
do { \
|
||||
(out_idx) = -1; \
|
||||
int __idx = 0; \
|
||||
type __tmp = (head); \
|
||||
while (__tmp != NULL) { \
|
||||
if (__tmp->propname == (propvalue)) { \
|
||||
(out_idx) = __idx; \
|
||||
break; \
|
||||
} \
|
||||
__tmp = __tmp->next; \
|
||||
__idx++; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define linklist_append(type, head, new) \
|
||||
do { \
|
||||
if ((new) != NULL) { \
|
||||
if ((head) != NULL) { \
|
||||
type __tmp; \
|
||||
(new)->next = NULL; \
|
||||
__tmp = (head); \
|
||||
while (__tmp->next != NULL) { \
|
||||
__tmp = __tmp->next; \
|
||||
} \
|
||||
__tmp->next = (new); \
|
||||
} else { \
|
||||
(new)->next = NULL; \
|
||||
(head) = (new); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define linklist_remove(type, head, ele) \
|
||||
do { \
|
||||
if ((head) != NULL && (ele) != NULL) { \
|
||||
type __cur = (head); \
|
||||
type __prev = NULL; \
|
||||
while (__cur != NULL && __cur != (ele)) { \
|
||||
__prev = __cur; \
|
||||
__cur = __cur->next; \
|
||||
} \
|
||||
if (__cur == (ele)) { \
|
||||
if (__prev != NULL) { \
|
||||
__prev->next = __cur->next; \
|
||||
} else { \
|
||||
(head) = __cur->next; \
|
||||
} \
|
||||
(ele)->next = NULL; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define linklist_find(type, head, out, propname, propvalue) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
type __tmp = (head); \
|
||||
while (__tmp) { \
|
||||
if (__tmp->propname == (propvalue)) { \
|
||||
(out) = __tmp; \
|
||||
break; \
|
||||
} \
|
||||
__tmp = __tmp->next; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define linklist_foreach(head, var, tmp) \
|
||||
for (var = (head), tmp = (var ? var->next : NULL); var != NULL; \
|
||||
var = tmp, tmp = (var ? var->next : NULL))
|
||||
|
||||
#define linklist_foreach_index(head, var, tmp, idx) \
|
||||
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL; \
|
||||
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
|
||||
|
||||
#define linklist_foreach_index_limit(head, var, tmp, idx, max) \
|
||||
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL && (idx) < (max); \
|
||||
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
|
||||
|
||||
#define linklist_back(type, head, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
if ((head) != NULL) { \
|
||||
type __tmp = (head); \
|
||||
while (__tmp->next != NULL) { \
|
||||
__tmp = __tmp->next; \
|
||||
} \
|
||||
(out) = __tmp; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif // _KERNEL_LIBK_LIST_H
|
||||
|
||||
323
kernel/libk/rbtree.h
Normal file
323
kernel/libk/rbtree.h
Normal file
@@ -0,0 +1,323 @@
|
||||
#ifndef _KERNEL_LIBK_RBTREE_H
|
||||
#define _KERNEL_LIBK_RBTREE_H
|
||||
|
||||
struct rb_node_link {
|
||||
struct rb_node_link* left;
|
||||
struct rb_node_link* right;
|
||||
struct rb_node_link* parent;
|
||||
int color;
|
||||
};
|
||||
|
||||
#define RBTREE_RED 0
|
||||
#define RBTREE_BLACK 1
|
||||
|
||||
#define rbtree_parent(x) ((x)->parent)
|
||||
#define rbtree_left(x) ((x)->left)
|
||||
#define rbtree_right(x) ((x)->right)
|
||||
#define rbtree_color(x) ((x)->color)
|
||||
|
||||
#define rbtree_entry(node, type, member) ((type*)((char*)(node) - offsetof (type, member)))
|
||||
|
||||
#define rbtree_node_color(x) ((x) ? (x)->color : RBTREE_BLACK)
|
||||
|
||||
#define rbtree_rotate_left(root_ptr, x_node) \
|
||||
do { \
|
||||
struct rb_node_link* __x = (x_node); \
|
||||
struct rb_node_link* __y = __x->right; \
|
||||
__x->right = __y->left; \
|
||||
if (__y->left) \
|
||||
__y->left->parent = __x; \
|
||||
__y->parent = __x->parent; \
|
||||
if (!__x->parent) \
|
||||
*(root_ptr) = __y; \
|
||||
else if (__x == __x->parent->left) \
|
||||
__x->parent->left = __y; \
|
||||
else \
|
||||
__x->parent->right = __y; \
|
||||
__y->left = __x; \
|
||||
__x->parent = __y; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_rotate_right(root_ptr, y_node) \
|
||||
do { \
|
||||
struct rb_node_link* __y = (y_node); \
|
||||
struct rb_node_link* __x = __y->left; \
|
||||
__y->left = __x->right; \
|
||||
if (__x->right) \
|
||||
__x->right->parent = __y; \
|
||||
__x->parent = __y->parent; \
|
||||
if (!__y->parent) \
|
||||
*(root_ptr) = __x; \
|
||||
else if (__y == __y->parent->right) \
|
||||
__y->parent->right = __x; \
|
||||
else \
|
||||
__y->parent->left = __x; \
|
||||
__x->right = __y; \
|
||||
__y->parent = __x; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_insert_fixup(root_ptr, z_node) \
|
||||
do { \
|
||||
struct rb_node_link* __z = (z_node); \
|
||||
while (__z->parent && __z->parent->color == RBTREE_RED) { \
|
||||
if (__z->parent == __z->parent->parent->left) { \
|
||||
struct rb_node_link* __y = __z->parent->parent->right; \
|
||||
if (rbtree_node_color (__y) == RBTREE_RED) { \
|
||||
__z->parent->color = RBTREE_BLACK; \
|
||||
__y->color = RBTREE_BLACK; \
|
||||
__z->parent->parent->color = RBTREE_RED; \
|
||||
__z = __z->parent->parent; \
|
||||
} else { \
|
||||
if (__z == __z->parent->right) { \
|
||||
__z = __z->parent; \
|
||||
rbtree_rotate_left (root_ptr, __z); \
|
||||
} \
|
||||
__z->parent->color = RBTREE_BLACK; \
|
||||
__z->parent->parent->color = RBTREE_RED; \
|
||||
rbtree_rotate_right (root_ptr, __z->parent->parent); \
|
||||
} \
|
||||
} else { \
|
||||
struct rb_node_link* __y = __z->parent->parent->left; \
|
||||
if (rbtree_node_color (__y) == RBTREE_RED) { \
|
||||
__z->parent->color = RBTREE_BLACK; \
|
||||
__y->color = RBTREE_BLACK; \
|
||||
__z->parent->parent->color = RBTREE_RED; \
|
||||
__z = __z->parent->parent; \
|
||||
} else { \
|
||||
if (__z == __z->parent->left) { \
|
||||
__z = __z->parent; \
|
||||
rbtree_rotate_right (root_ptr, __z); \
|
||||
} \
|
||||
__z->parent->color = RBTREE_BLACK; \
|
||||
__z->parent->parent->color = RBTREE_RED; \
|
||||
rbtree_rotate_left (root_ptr, __z->parent->parent); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
(*(root_ptr))->color = RBTREE_BLACK; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_insert(type, root_ptr, node, member, keyfield) \
|
||||
do { \
|
||||
struct rb_node_link** __link = (root_ptr); \
|
||||
struct rb_node_link* __parent = NULL; \
|
||||
struct rb_node_link* __new = (node); \
|
||||
type* __nobj = rbtree_entry (__new, type, member); \
|
||||
while (*__link) { \
|
||||
__parent = *__link; \
|
||||
type* __xobj = rbtree_entry (*__link, type, member); \
|
||||
if (__nobj->keyfield < __xobj->keyfield) \
|
||||
__link = &((*__link)->left); \
|
||||
else \
|
||||
__link = &((*__link)->right); \
|
||||
} \
|
||||
__new->parent = __parent; \
|
||||
__new->left = __new->right = NULL; \
|
||||
__new->color = RBTREE_RED; \
|
||||
*__link = __new; \
|
||||
rbtree_insert_fixup (root_ptr, __new); \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_find(type, root_ptr, keyval, out, member, keyfield) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
struct rb_node_link* __cur = *(root_ptr); \
|
||||
while (__cur) { \
|
||||
type* __obj = rbtree_entry (__cur, type, member); \
|
||||
if ((keyval) == __obj->keyfield) { \
|
||||
(out) = rbtree_entry (__cur, type, member); \
|
||||
break; \
|
||||
} else if ((keyval) < __obj->keyfield) \
|
||||
__cur = __cur->left; \
|
||||
else \
|
||||
__cur = __cur->right; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_min(node, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
struct rb_node_link* __n = (node); \
|
||||
while (__n && __n->left) \
|
||||
__n = __n->left; \
|
||||
(out) = __n; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_max(node, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
struct rb_node_link* __n = (node); \
|
||||
while (__n && __n->right) \
|
||||
__n = __n->right; \
|
||||
(out) = __n; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_first(root_ptr, out) rbtree_min (*(root_ptr), out)
|
||||
#define rbtree_last(root_ptr, out) rbtree_max (*(root_ptr), out)
|
||||
|
||||
#define rbtree_transplant(root_ptr, u_node, v_node) \
|
||||
do { \
|
||||
struct rb_node_link* __u = (u_node); \
|
||||
struct rb_node_link* __v = (v_node); \
|
||||
if (!__u->parent) \
|
||||
*(root_ptr) = __v; \
|
||||
else if (__u == __u->parent->left) \
|
||||
__u->parent->left = __v; \
|
||||
else \
|
||||
__u->parent->right = __v; \
|
||||
if (__v) \
|
||||
__v->parent = __u->parent; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_delete_fixup(root_ptr, x_node, xparent_node) \
|
||||
do { \
|
||||
struct rb_node_link* __rdf_x = (x_node); \
|
||||
struct rb_node_link* __rdf_xp = (xparent_node); \
|
||||
while (__rdf_xp && (__rdf_x == NULL || __rdf_x->color == RBTREE_BLACK)) { \
|
||||
if (__rdf_x == __rdf_xp->left) { \
|
||||
struct rb_node_link* __w = __rdf_xp->right; \
|
||||
if (rbtree_node_color (__w) == RBTREE_RED) { \
|
||||
__w->color = RBTREE_BLACK; \
|
||||
__rdf_xp->color = RBTREE_RED; \
|
||||
rbtree_rotate_left (root_ptr, __rdf_xp); \
|
||||
__w = __rdf_xp->right; \
|
||||
} \
|
||||
if (rbtree_node_color (__w->left) == RBTREE_BLACK && \
|
||||
rbtree_node_color (__w->right) == RBTREE_BLACK) { \
|
||||
if (__w) \
|
||||
__w->color = RBTREE_RED; \
|
||||
__rdf_x = __rdf_xp; \
|
||||
__rdf_xp = __rdf_x->parent; \
|
||||
} else { \
|
||||
if (rbtree_node_color (__w->right) == RBTREE_BLACK) { \
|
||||
if (__w->left) \
|
||||
__w->left->color = RBTREE_BLACK; \
|
||||
__w->color = RBTREE_RED; \
|
||||
rbtree_rotate_right (root_ptr, __w); \
|
||||
__w = __rdf_xp->right; \
|
||||
} \
|
||||
__w->color = __rdf_xp->color; \
|
||||
__rdf_xp->color = RBTREE_BLACK; \
|
||||
if (__w->right) \
|
||||
__w->right->color = RBTREE_BLACK; \
|
||||
rbtree_rotate_left (root_ptr, __rdf_xp); \
|
||||
__rdf_x = *(root_ptr); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
struct rb_node_link* __w = __rdf_xp->left; \
|
||||
if (rbtree_node_color (__w) == RBTREE_RED) { \
|
||||
__w->color = RBTREE_BLACK; \
|
||||
__rdf_xp->color = RBTREE_RED; \
|
||||
rbtree_rotate_right (root_ptr, __rdf_xp); \
|
||||
__w = __rdf_xp->left; \
|
||||
} \
|
||||
if (rbtree_node_color (__w->right) == RBTREE_BLACK && \
|
||||
rbtree_node_color (__w->left) == RBTREE_BLACK) { \
|
||||
if (__w) \
|
||||
__w->color = RBTREE_RED; \
|
||||
__rdf_x = __rdf_xp; \
|
||||
__rdf_xp = __rdf_x->parent; \
|
||||
} else { \
|
||||
if (rbtree_node_color (__w->left) == RBTREE_BLACK) { \
|
||||
if (__w->right) \
|
||||
__w->right->color = RBTREE_BLACK; \
|
||||
__w->color = RBTREE_RED; \
|
||||
rbtree_rotate_left (root_ptr, __w); \
|
||||
__w = __rdf_xp->left; \
|
||||
} \
|
||||
__w->color = __rdf_xp->color; \
|
||||
__rdf_xp->color = RBTREE_BLACK; \
|
||||
if (__w->left) \
|
||||
__w->left->color = RBTREE_BLACK; \
|
||||
rbtree_rotate_right (root_ptr, __rdf_xp); \
|
||||
__rdf_x = *(root_ptr); \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
if (__rdf_x) \
|
||||
__rdf_x->color = RBTREE_BLACK; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_delete(root_ptr, z_node) \
|
||||
do { \
|
||||
struct rb_node_link* __rd_z = (z_node); \
|
||||
struct rb_node_link* __rd_y = __rd_z; \
|
||||
struct rb_node_link* __rd_x = NULL; \
|
||||
struct rb_node_link* __rd_xp = NULL; \
|
||||
int __rd_y_orig_color = __rd_y->color; \
|
||||
if (!__rd_z->left) { \
|
||||
__rd_x = __rd_z->right; \
|
||||
__rd_xp = __rd_z->parent; \
|
||||
rbtree_transplant (root_ptr, __rd_z, __rd_z->right); \
|
||||
} else if (!__rd_z->right) { \
|
||||
__rd_x = __rd_z->left; \
|
||||
__rd_xp = __rd_z->parent; \
|
||||
rbtree_transplant (root_ptr, __rd_z, __rd_z->left); \
|
||||
} else { \
|
||||
rbtree_min (__rd_z->right, __rd_y); \
|
||||
__rd_y_orig_color = __rd_y->color; \
|
||||
__rd_x = __rd_y->right; \
|
||||
if (__rd_y->parent == __rd_z) { \
|
||||
__rd_xp = __rd_y; \
|
||||
if (__rd_x) \
|
||||
__rd_x->parent = __rd_y; \
|
||||
} else { \
|
||||
__rd_xp = __rd_y->parent; \
|
||||
rbtree_transplant (root_ptr, __rd_y, __rd_y->right); \
|
||||
__rd_y->right = __rd_z->right; \
|
||||
__rd_y->right->parent = __rd_y; \
|
||||
} \
|
||||
rbtree_transplant (root_ptr, __rd_z, __rd_y); \
|
||||
__rd_y->left = __rd_z->left; \
|
||||
__rd_y->left->parent = __rd_y; \
|
||||
__rd_y->color = __rd_z->color; \
|
||||
} \
|
||||
if (__rd_y_orig_color == RBTREE_BLACK) \
|
||||
rbtree_delete_fixup (root_ptr, __rd_x, __rd_xp); \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_next(node, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
if (node) { \
|
||||
if ((node)->right) { \
|
||||
struct rb_node_link* __n = (node)->right; \
|
||||
while (__n->left) \
|
||||
__n = __n->left; \
|
||||
(out) = __n; \
|
||||
} else { \
|
||||
struct rb_node_link* __n = (node); \
|
||||
struct rb_node_link* __p = (node)->parent; \
|
||||
while (__p && __n == __p->right) { \
|
||||
__n = __p; \
|
||||
__p = __p->parent; \
|
||||
} \
|
||||
(out) = __p; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_prev(node, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
if (node) { \
|
||||
if ((node)->left) { \
|
||||
struct rb_node_link* __n = (node)->left; \
|
||||
while (__n->right) \
|
||||
__n = __n->right; \
|
||||
(out) = __n; \
|
||||
} else { \
|
||||
struct rb_node_link* __n = (node); \
|
||||
struct rb_node_link* __p = (node)->parent; \
|
||||
while (__p && __n == __p->left) { \
|
||||
__n = __p; \
|
||||
__p = __p->parent; \
|
||||
} \
|
||||
(out) = __p; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif // _KERNEL_LIBK_RBTREE_H
|
||||
@@ -1,6 +1,8 @@
|
||||
#ifndef _KERNEL_LIBK_STRING_H
|
||||
#define _KERNEL_LIBK_STRING_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
size_t memset (void* dst, uint8_t b, size_t n);
|
||||
size_t memcpy (void* dst, const void* src, size_t n);
|
||||
void strncpy (char* dst, const char* src, size_t n);
|
||||
|
||||
@@ -20,3 +20,4 @@ DECL_REQ (memmap, MEMMAP);
|
||||
DECL_REQ (rsdp, RSDP);
|
||||
DECL_REQ (mp, MP);
|
||||
DECL_REQ (module, MODULE);
|
||||
DECL_REQ (framebuffer, FRAMEBUFFER);
|
||||
|
||||
@@ -10,5 +10,6 @@ EXTERN_REQ (memmap);
|
||||
EXTERN_REQ (rsdp);
|
||||
EXTERN_REQ (mp);
|
||||
EXTERN_REQ (module);
|
||||
EXTERN_REQ (framebuffer);
|
||||
|
||||
#endif // _KERNEL_LIMINE_REQUESTS_H
|
||||
|
||||
@@ -11,13 +11,13 @@
|
||||
|
||||
spin_lock_t _liballoc_lock = SPIN_LOCK_INIT;
|
||||
|
||||
int liballoc_lock (void) {
|
||||
spin_lock (&_liballoc_lock);
|
||||
int liballoc_lock (void* ctx) {
|
||||
spin_lock (&_liballoc_lock, (spin_lock_ctx_t*)ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int liballoc_unlock (void) {
|
||||
spin_unlock (&_liballoc_lock);
|
||||
int liballoc_unlock (void* ctx) {
|
||||
spin_unlock (&_liballoc_lock, (spin_lock_ctx_t*)ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ int liballoc_free (void* ptr, int pages) {
|
||||
|
||||
/** Durand's Ridiculously Amazing Super Duper Memory functions. */
|
||||
|
||||
//#define DEBUG
|
||||
// #define DEBUG
|
||||
|
||||
#define LIBALLOC_MAGIC 0xc001c0de
|
||||
#define MAXCOMPLETE 5
|
||||
@@ -243,8 +243,9 @@ void* malloc (size_t size) {
|
||||
int index;
|
||||
void* ptr;
|
||||
struct boundary_tag* tag = NULL;
|
||||
spin_lock_ctx_t ctxliba;
|
||||
|
||||
liballoc_lock ();
|
||||
liballoc_lock (&ctxliba);
|
||||
|
||||
if (l_initialized == 0) {
|
||||
for (index = 0; index < MAXEXP; index++) {
|
||||
@@ -272,7 +273,7 @@ void* malloc (size_t size) {
|
||||
// No page found. Make one.
|
||||
if (tag == NULL) {
|
||||
if ((tag = allocate_new_tag (size)) == NULL) {
|
||||
liballoc_unlock ();
|
||||
liballoc_unlock (&ctxliba);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -305,23 +306,24 @@ void* malloc (size_t size) {
|
||||
|
||||
ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag));
|
||||
|
||||
liballoc_unlock ();
|
||||
liballoc_unlock (&ctxliba);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void free (void* ptr) {
|
||||
int index;
|
||||
struct boundary_tag* tag;
|
||||
spin_lock_ctx_t ctxliba;
|
||||
|
||||
if (ptr == NULL)
|
||||
return;
|
||||
|
||||
liballoc_lock ();
|
||||
liballoc_lock (&ctxliba);
|
||||
|
||||
tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag));
|
||||
|
||||
if (tag->magic != LIBALLOC_MAGIC) {
|
||||
liballoc_unlock (); // release the lock
|
||||
liballoc_unlock (&ctxliba); // release the lock
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -354,7 +356,7 @@ void free (void* ptr) {
|
||||
|
||||
liballoc_free (tag, pages);
|
||||
|
||||
liballoc_unlock ();
|
||||
liballoc_unlock (&ctxliba);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -365,7 +367,7 @@ void free (void* ptr) {
|
||||
|
||||
insert_tag (tag, index);
|
||||
|
||||
liballoc_unlock ();
|
||||
liballoc_unlock (&ctxliba);
|
||||
}
|
||||
|
||||
void* calloc (size_t nobj, size_t size) {
|
||||
@@ -385,6 +387,7 @@ void* realloc (void* p, size_t size) {
|
||||
void* ptr;
|
||||
struct boundary_tag* tag;
|
||||
int real_size;
|
||||
spin_lock_ctx_t ctxliba;
|
||||
|
||||
if (size == 0) {
|
||||
free (p);
|
||||
@@ -394,11 +397,11 @@ void* realloc (void* p, size_t size) {
|
||||
return malloc (size);
|
||||
|
||||
if (&liballoc_lock != NULL)
|
||||
liballoc_lock (); // lockit
|
||||
liballoc_lock (&ctxliba); // lockit
|
||||
tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag));
|
||||
real_size = tag->size;
|
||||
if (&liballoc_unlock != NULL)
|
||||
liballoc_unlock ();
|
||||
liballoc_unlock (&ctxliba);
|
||||
|
||||
if ((size_t)real_size > size)
|
||||
real_size = size;
|
||||
|
||||
@@ -47,7 +47,7 @@ struct boundary_tag {
|
||||
* \return 0 if the lock was acquired successfully. Anything else is
|
||||
* failure.
|
||||
*/
|
||||
extern int liballoc_lock ();
|
||||
extern int liballoc_lock (void* ctx);
|
||||
|
||||
/** This function unlocks what was previously locked by the liballoc_lock
|
||||
* function. If it disabled interrupts, it enables interrupts. If it
|
||||
@@ -55,7 +55,7 @@ extern int liballoc_lock ();
|
||||
*
|
||||
* \return 0 if the lock was successfully released.
|
||||
*/
|
||||
extern int liballoc_unlock ();
|
||||
extern int liballoc_unlock (void* ctx);
|
||||
|
||||
/** This is the hook into the local system which allocates pages. It
|
||||
* accepts an integer parameter which is the number of pages
|
||||
|
||||
@@ -38,8 +38,8 @@ void pmm_init (void) {
|
||||
struct pmm_region* pmm_region = &pmm.regions[region];
|
||||
|
||||
/*
|
||||
* We need to calculate sizes for the pmm region and the bitmap. The bitmap MUSTN'T include it's
|
||||
* own region within the bit range.
|
||||
* We need to calculate sizes for the pmm region and the bitmap. The bitmap MUSTN'T include
|
||||
* it's own region within the bit range.
|
||||
* */
|
||||
|
||||
size_t size = align_down (entry->length, PAGE_SIZE);
|
||||
@@ -100,6 +100,8 @@ static size_t pmm_find_free_space (struct pmm_region* pmm_region, size_t nblks)
|
||||
}
|
||||
|
||||
physaddr_t pmm_alloc (size_t nblks) {
|
||||
spin_lock_ctx_t ctxpmmr;
|
||||
|
||||
for (size_t region = 0; region < PMM_REGIONS_MAX; region++) {
|
||||
struct pmm_region* pmm_region = &pmm.regions[region];
|
||||
|
||||
@@ -107,7 +109,7 @@ physaddr_t pmm_alloc (size_t nblks) {
|
||||
if (!(pmm_region->flags & PMM_REGION_ACTIVE))
|
||||
continue;
|
||||
|
||||
spin_lock (&pmm_region->lock);
|
||||
spin_lock (&pmm_region->lock, &ctxpmmr);
|
||||
|
||||
/* Find starting bit of the free bit range */
|
||||
size_t bit = pmm_find_free_space (pmm_region, nblks);
|
||||
@@ -116,18 +118,19 @@ physaddr_t pmm_alloc (size_t nblks) {
|
||||
if (bit != (size_t)-1) {
|
||||
/* Mark it */
|
||||
bm_set_region (&pmm_region->bm, bit, nblks);
|
||||
spin_unlock (&pmm_region->lock);
|
||||
spin_unlock (&pmm_region->lock, &ctxpmmr);
|
||||
|
||||
return pmm_region->membase + bit * PAGE_SIZE;
|
||||
}
|
||||
|
||||
spin_unlock (&pmm_region->lock);
|
||||
spin_unlock (&pmm_region->lock, &ctxpmmr);
|
||||
}
|
||||
|
||||
return PMM_ALLOC_ERR;
|
||||
}
|
||||
|
||||
void pmm_free (physaddr_t p_addr, size_t nblks) {
|
||||
spin_lock_ctx_t ctxpmmr;
|
||||
/* Round down to nearest page boundary */
|
||||
physaddr_t aligned_p_addr = align_down (p_addr, PAGE_SIZE);
|
||||
|
||||
@@ -145,11 +148,11 @@ void pmm_free (physaddr_t p_addr, size_t nblks) {
|
||||
|
||||
size_t bit = div_align_up (addr, PAGE_SIZE);
|
||||
|
||||
spin_lock (&pmm_region->lock);
|
||||
spin_lock (&pmm_region->lock, &ctxpmmr);
|
||||
|
||||
bm_clear_region (&pmm_region->bm, bit, nblks);
|
||||
|
||||
spin_unlock (&pmm_region->lock);
|
||||
spin_unlock (&pmm_region->lock, &ctxpmmr);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
10
kernel/proc/locks.txt
Normal file
10
kernel/proc/locks.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
Lock hierarchy for process scheduling:
|
||||
|
||||
1. proc_tree_lock
|
||||
2. cpu->lock
|
||||
3. procgroup->lock
|
||||
4. proc->lock
|
||||
5. sq->lock
|
||||
|
||||
1. procgroup_tree_lock
|
||||
2. procgroup->lock
|
||||
130
kernel/proc/mutex.c
Normal file
130
kernel/proc/mutex.c
Normal file
@@ -0,0 +1,130 @@
|
||||
#include <libk/assert.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <proc/mutex.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/suspension_q.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
void proc_mutexes_cleanup (struct proc* proc) {
|
||||
spin_lock_ctx_t ctxpg, ctxrs;
|
||||
|
||||
spin_lock (&proc->procgroup->lock, &ctxpg);
|
||||
|
||||
struct rb_node_link* rnode;
|
||||
rbtree_first (&proc->procgroup->resource_tree, rnode);
|
||||
|
||||
while (rnode) {
|
||||
struct rb_node_link* next;
|
||||
rbtree_next (rnode, next);
|
||||
|
||||
struct proc_resource* resource = rbtree_entry (rnode, struct proc_resource, resource_tree_link);
|
||||
|
||||
rnode = next;
|
||||
|
||||
spin_lock (&resource->lock, &ctxrs);
|
||||
|
||||
if (resource->type != PR_MUTEX) {
|
||||
spin_unlock (&resource->lock, &ctxrs);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (resource->u.mutex.owner == proc && resource->u.mutex.locked) {
|
||||
spin_unlock (&resource->lock, &ctxrs);
|
||||
|
||||
proc_mutex_unlock (proc, &resource->u.mutex);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||
}
|
||||
|
||||
bool proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
struct proc_mutex* mutex = &resource->u.mutex;
|
||||
spin_lock_ctx_t ctxmt, ctxsq;
|
||||
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
|
||||
bool reschedule = PROC_NO_RESCHEDULE;
|
||||
|
||||
while (mutex->suspension_q.proc_list != NULL) {
|
||||
struct list_node_link* node = mutex->suspension_q.proc_list;
|
||||
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
|
||||
struct proc* suspended_proc = sq_entry->proc;
|
||||
|
||||
/* we will relock during resume */
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
reschedule = reschedule || proc_sq_resume (suspended_proc, sq_entry);
|
||||
|
||||
/* reacquire */
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
}
|
||||
|
||||
mutex->locked = false;
|
||||
mutex->owner = NULL;
|
||||
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
return reschedule;
|
||||
}
|
||||
|
||||
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_lock_ctx_t ctxmt;
|
||||
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
if (!mutex->locked || mutex->owner == proc) {
|
||||
mutex->locked = true;
|
||||
mutex->owner = proc;
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
|
||||
}
|
||||
|
||||
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_lock_ctx_t ctxmt, ctxsq;
|
||||
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
if (mutex->owner != proc) {
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
|
||||
struct list_node_link* node = mutex->suspension_q.proc_list;
|
||||
|
||||
if (node) {
|
||||
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
|
||||
struct proc* resumed_proc = sq_entry->proc;
|
||||
|
||||
mutex->owner = resumed_proc;
|
||||
mutex->locked = true;
|
||||
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
return proc_sq_resume (resumed_proc, sq_entry);
|
||||
}
|
||||
|
||||
mutex->locked = false;
|
||||
mutex->owner = NULL;
|
||||
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
23
kernel/proc/mutex.h
Normal file
23
kernel/proc/mutex.h
Normal file
@@ -0,0 +1,23 @@
|
||||
#ifndef _KERNEL_PROC_MUTEX_H
|
||||
#define _KERNEL_PROC_MUTEX_H
|
||||
|
||||
#include <libk/std.h>
|
||||
#include <proc/suspension_q.h>
|
||||
|
||||
struct proc;
|
||||
struct proc_resource;
|
||||
|
||||
struct proc_mutex {
|
||||
struct proc_resource* resource;
|
||||
|
||||
bool locked;
|
||||
struct proc_suspension_q suspension_q;
|
||||
struct proc* owner;
|
||||
};
|
||||
|
||||
bool proc_cleanup_resource_mutex (struct proc_resource* resource);
|
||||
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
|
||||
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex);
|
||||
void proc_mutexes_cleanup (struct proc* proc);
|
||||
|
||||
#endif // _KERNEL_PROC_MUTEX_H
|
||||
@@ -3,12 +3,15 @@
|
||||
#include <irq/irq.h>
|
||||
#include <libk/align.h>
|
||||
#include <libk/list.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <rd/rd.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
@@ -22,8 +25,12 @@
|
||||
#include <amd64/intr_defs.h>
|
||||
#endif
|
||||
|
||||
static struct procw* procs;
|
||||
static spin_lock_t procs_lock = SPIN_LOCK_INIT;
|
||||
#define SCHED_REAP_FREQ 10
|
||||
|
||||
static struct rb_node_link* proc_tree = NULL;
|
||||
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
|
||||
|
||||
static atomic_int sched_cycles = 0;
|
||||
|
||||
static bool proc_check_elf (uint8_t* elf) {
|
||||
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
|
||||
@@ -31,27 +38,6 @@ static bool proc_check_elf (uint8_t* elf) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
|
||||
uint32_t flags) {
|
||||
struct proc_mapping* mapping = malloc (sizeof (*mapping));
|
||||
mapping->paddr = start_paddr;
|
||||
mapping->vaddr = start_vaddr;
|
||||
mapping->size = pages * PAGE_SIZE;
|
||||
|
||||
flags &= ~MM_PD_LOCK; /* clear LOCK flag if present, because we lock manualy */
|
||||
|
||||
spin_lock (&proc->pd.lock);
|
||||
|
||||
linklist_append (struct proc_mapping*, proc->mappings, mapping);
|
||||
|
||||
for (uintptr_t vpage = start_vaddr, ppage = start_paddr; vpage < start_vaddr + pages * PAGE_SIZE;
|
||||
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
|
||||
mm_map_page (&proc->pd, ppage, vpage, flags);
|
||||
}
|
||||
|
||||
spin_unlock (&proc->pd.lock);
|
||||
}
|
||||
|
||||
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||
struct elf_aux aux;
|
||||
|
||||
@@ -76,19 +62,37 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||
|
||||
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
|
||||
|
||||
uintptr_t p_addr = pmm_alloc (blks);
|
||||
if (p_addr == PMM_ALLOC_ERR)
|
||||
DEBUG ("pmm oom error while loading ELF segments! (tried to alloc %zu blks)\n", blks);
|
||||
|
||||
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
|
||||
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
|
||||
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
|
||||
|
||||
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
|
||||
if (phdr->p_flags & PF_W)
|
||||
pg_flags |= MM_PG_RW;
|
||||
|
||||
proc_map (proc, p_addr, v_addr, blks, pg_flags);
|
||||
uintptr_t p_addr;
|
||||
procgroup_map (proc->procgroup, v_addr, blks, pg_flags, &p_addr);
|
||||
|
||||
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
|
||||
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
|
||||
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
|
||||
} break;
|
||||
case PT_TLS: {
|
||||
#if defined(__x86_64__)
|
||||
if (phdr->p_memsz > 0) {
|
||||
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
|
||||
size_t tls_size = align_up (phdr->p_memsz, tls_align);
|
||||
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
|
||||
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
|
||||
proc->procgroup->tls.tls_tmpl_pages = blks;
|
||||
proc->procgroup->tls.tls_tmpl_size = tls_size;
|
||||
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
|
||||
|
||||
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
|
||||
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
|
||||
|
||||
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
|
||||
phdr->p_filesz);
|
||||
|
||||
proc_init_tls (proc);
|
||||
}
|
||||
#endif
|
||||
} break;
|
||||
}
|
||||
}
|
||||
@@ -96,11 +100,10 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||
return aux;
|
||||
}
|
||||
|
||||
static struct proc* proc_spawn_rd (char* name) {
|
||||
struct proc* proc_spawn_rd (char* name) {
|
||||
struct rd_file* rd_file = rd_get_file (name);
|
||||
|
||||
bool ok = proc_check_elf (rd_file->content);
|
||||
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
|
||||
|
||||
if (!ok)
|
||||
return NULL;
|
||||
@@ -108,93 +111,173 @@ static struct proc* proc_spawn_rd (char* name) {
|
||||
return proc_from_elf (rd_file->content);
|
||||
}
|
||||
|
||||
static void proc_register_for_cpu (struct proc* proc, struct cpu* cpu) {
|
||||
/* make available globally. */
|
||||
struct procw* procw = malloc (sizeof (*procw));
|
||||
if (procw == NULL)
|
||||
return;
|
||||
procw->proc = proc;
|
||||
proc->procw = procw;
|
||||
proc->cpu = cpu;
|
||||
struct proc* proc_find_pid (int pid) {
|
||||
spin_lock_ctx_t ctxprtr;
|
||||
struct proc* proc = NULL;
|
||||
|
||||
spin_lock (&procs_lock);
|
||||
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
|
||||
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
spin_lock (&cpu->lock);
|
||||
return proc;
|
||||
}
|
||||
|
||||
linklist_append (struct procw*, procs, procw);
|
||||
linklist_append (struct proc*, cpu->proc_run_q, proc);
|
||||
void proc_register (struct proc* proc, struct cpu* cpu1) {
|
||||
spin_lock_ctx_t ctxcpu, ctxprtr;
|
||||
|
||||
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
|
||||
|
||||
struct cpu* cpu = proc->cpu;
|
||||
|
||||
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
|
||||
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
||||
|
||||
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
if (cpu->proc_current == NULL)
|
||||
cpu->proc_current = proc;
|
||||
|
||||
spin_unlock (&cpu->lock);
|
||||
|
||||
spin_unlock (&procs_lock);
|
||||
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
}
|
||||
|
||||
static struct proc* proc_find_sched (void) {
|
||||
struct proc* start = thiscpu->proc_current;
|
||||
struct proc* proc = start->next;
|
||||
/* caller holds cpu->lock */
|
||||
static struct proc* proc_find_sched (struct cpu* cpu) {
|
||||
if (!cpu->proc_run_q)
|
||||
return NULL;
|
||||
|
||||
for (;;) {
|
||||
if (proc == NULL) {
|
||||
proc = thiscpu->proc_run_q;
|
||||
}
|
||||
struct list_node_link *current, *start;
|
||||
|
||||
if (atomic_load (&proc->state) == PROC_READY) {
|
||||
if (cpu->proc_current)
|
||||
current = cpu->proc_current->cpu_run_q_link.next;
|
||||
else
|
||||
current = cpu->proc_run_q;
|
||||
|
||||
if (!current)
|
||||
current = cpu->proc_run_q;
|
||||
|
||||
start = current;
|
||||
|
||||
do {
|
||||
struct proc* proc = list_entry (current, struct proc, cpu_run_q_link);
|
||||
|
||||
if (atomic_load (&proc->state) == PROC_READY)
|
||||
return proc;
|
||||
|
||||
current = current->next ? current->next : cpu->proc_run_q;
|
||||
} while (current != start);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void proc_reap (void) {
|
||||
struct proc* proc = NULL;
|
||||
struct list_node_link* reap_list = NULL;
|
||||
spin_lock_ctx_t ctxprtr;
|
||||
spin_lock_ctx_t ctxpr;
|
||||
|
||||
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
struct rb_node_link* node;
|
||||
rbtree_first (&proc_tree, node);
|
||||
|
||||
while (node) {
|
||||
struct rb_node_link* next;
|
||||
rbtree_next (node, next);
|
||||
proc = rbtree_entry (node, struct proc, proc_tree_link);
|
||||
|
||||
if (atomic_load (&proc->state) == PROC_DEAD) {
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
rbtree_delete (&proc_tree, &proc->proc_tree_link);
|
||||
list_append (reap_list, &proc->reap_link);
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
}
|
||||
|
||||
/* No runnable processes found. */
|
||||
if (proc == start) {
|
||||
return NULL;
|
||||
}
|
||||
node = next;
|
||||
}
|
||||
|
||||
proc = proc->next;
|
||||
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
struct list_node_link *reap_link, *reap_link_tmp;
|
||||
list_foreach (reap_list, reap_link, reap_link_tmp) {
|
||||
proc = list_entry (reap_link, struct proc, reap_link);
|
||||
|
||||
list_remove (reap_list, &proc->reap_link);
|
||||
DEBUG ("cleanup PID %d\n", proc->pid);
|
||||
proc_cleanup (proc);
|
||||
}
|
||||
}
|
||||
|
||||
void proc_sched (void) {
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
|
||||
int s_cycles = atomic_fetch_add (&sched_cycles, 1);
|
||||
|
||||
if (s_cycles % SCHED_REAP_FREQ == 0)
|
||||
proc_reap ();
|
||||
|
||||
struct proc* next = NULL;
|
||||
struct cpu* cpu = thiscpu;
|
||||
|
||||
spin_lock (&thiscpu->lock);
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
|
||||
if (thiscpu->proc_run_q == NULL || thiscpu->proc_current == NULL) {
|
||||
spin_unlock (&thiscpu->lock);
|
||||
goto idle;
|
||||
next = proc_find_sched (cpu);
|
||||
|
||||
if (next) {
|
||||
cpu->proc_current = next;
|
||||
|
||||
do_sched (next, &cpu->lock, &ctxcpu);
|
||||
} else {
|
||||
cpu->proc_current = NULL;
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
spin ();
|
||||
}
|
||||
|
||||
next = proc_find_sched ();
|
||||
|
||||
if (next != NULL)
|
||||
thiscpu->proc_current = next;
|
||||
|
||||
spin_unlock (&thiscpu->lock);
|
||||
|
||||
if (next != NULL && atomic_load (&next->state) == PROC_READY)
|
||||
do_sched (next);
|
||||
|
||||
idle:
|
||||
spin ();
|
||||
}
|
||||
|
||||
void proc_kill (struct proc* proc) {
|
||||
/* mark for garbage collection */
|
||||
spin_lock_ctx_t ctxpr, ctxcpu;
|
||||
struct cpu* cpu = proc->cpu;
|
||||
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
atomic_store (&proc->state, PROC_DEAD);
|
||||
proc->cpu = NULL;
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
|
||||
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
|
||||
if (cpu->proc_current == proc)
|
||||
cpu->proc_current = NULL;
|
||||
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
DEBUG ("killed PID %d\n", proc->pid);
|
||||
|
||||
cpu_request_sched (cpu);
|
||||
}
|
||||
|
||||
static void proc_irq_sched (void* arg, void* regs) {
|
||||
(void)arg, (void)regs;
|
||||
(void)arg;
|
||||
proc_sched ();
|
||||
}
|
||||
|
||||
void proc_init (void) {
|
||||
struct proc* init = proc_spawn_rd ("init.exe");
|
||||
proc_register_for_cpu (init, thiscpu);
|
||||
|
||||
#if defined(__x86_64__)
|
||||
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_SAFE);
|
||||
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER);
|
||||
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED);
|
||||
#endif
|
||||
|
||||
do_sched (init);
|
||||
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
|
||||
proc_register (spin_proc, thiscpu);
|
||||
|
||||
struct proc* init = proc_spawn_rd ("init.exe");
|
||||
proc_register (init, NULL);
|
||||
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
|
||||
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
|
||||
}
|
||||
|
||||
@@ -3,7 +3,12 @@
|
||||
|
||||
#include <aux/compiler.h>
|
||||
#include <aux/elf.h>
|
||||
#include <libk/list.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <proc/suspension_q.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/mm.h>
|
||||
|
||||
@@ -12,48 +17,41 @@
|
||||
#include <amd64/proc.h> /* USTACK_SIZE */
|
||||
#endif
|
||||
|
||||
/// Process is ready to run
|
||||
#define PROC_READY 0
|
||||
/// Process marked garbage collection
|
||||
#define PROC_DEAD 1
|
||||
#define PROC_NEED_RESCHEDULE true
|
||||
#define PROC_NO_RESCHEDULE false
|
||||
|
||||
/* process states */
|
||||
#define PROC_READY 0
|
||||
#define PROC_DEAD 1
|
||||
#define PROC_SUSPENDED 2
|
||||
|
||||
/* process flags */
|
||||
#define PROC_USTK_PREALLOC (1 << 0)
|
||||
|
||||
struct cpu;
|
||||
|
||||
struct proc_mapping {
|
||||
struct proc_mapping* next;
|
||||
uintptr_t paddr;
|
||||
uintptr_t vaddr;
|
||||
size_t size;
|
||||
} PACKED;
|
||||
|
||||
struct procw;
|
||||
|
||||
struct proc {
|
||||
struct proc* next;
|
||||
struct proc_mapping* mappings; /* pd.lock implicitly protects this field */
|
||||
int pid;
|
||||
struct rb_node_link proc_tree_link;
|
||||
struct rb_node_link procgroup_memb_tree_link;
|
||||
struct list_node_link cpu_run_q_link;
|
||||
struct list_node_link reap_link;
|
||||
struct list_node_link* sq_entries;
|
||||
struct procgroup* procgroup;
|
||||
struct proc_platformdata pdata;
|
||||
struct pd pd;
|
||||
uint32_t flags;
|
||||
spin_lock_t lock;
|
||||
struct cpu* cpu;
|
||||
struct procw* procw; /* link to it's global struct */
|
||||
atomic_int state;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct proc is a member of a CPU's proc_run_q.
|
||||
* struct procw is a process wrapper that is a member of
|
||||
* a global process list.
|
||||
*/
|
||||
struct procw {
|
||||
struct procw* next;
|
||||
struct proc* proc;
|
||||
uintptr_t uvaddr_argument;
|
||||
};
|
||||
|
||||
void proc_sched (void);
|
||||
void proc_kill (struct proc* proc);
|
||||
void proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
|
||||
uint32_t flags);
|
||||
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf);
|
||||
void proc_register (struct proc* proc, struct cpu* cpu);
|
||||
struct proc* proc_find_pid (int pid);
|
||||
struct proc* proc_spawn_rd (char* name);
|
||||
void proc_init (void);
|
||||
|
||||
#endif // _KERNEL_PROC_PROC_H
|
||||
|
||||
218
kernel/proc/procgroup.c
Normal file
218
kernel/proc/procgroup.c
Normal file
@@ -0,0 +1,218 @@
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
|
||||
static struct rb_node_link* procgroup_tree = NULL;
|
||||
static spin_lock_t procgroup_tree_lock = SPIN_LOCK_INIT;
|
||||
static atomic_int pgids = 0;
|
||||
|
||||
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
|
||||
uintptr_t* out_paddr) {
|
||||
spin_lock_ctx_t ctxpg;
|
||||
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
|
||||
vaddr = (vaddr == 0) ? procgroup->map_base : vaddr;
|
||||
|
||||
struct proc_mapping* mapping = malloc (sizeof (*mapping));
|
||||
|
||||
if (mapping == NULL) {
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
uintptr_t paddr = pmm_alloc (pages);
|
||||
|
||||
if (paddr == PMM_ALLOC_ERR) {
|
||||
free (mapping);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (out_paddr != NULL)
|
||||
*out_paddr = paddr;
|
||||
|
||||
mapping->paddr = paddr;
|
||||
mapping->vaddr = vaddr;
|
||||
mapping->size = pages * PAGE_SIZE;
|
||||
|
||||
procgroup->map_base += pages * PAGE_SIZE;
|
||||
|
||||
list_append (procgroup->mappings, &mapping->proc_mappings_link);
|
||||
|
||||
for (uintptr_t vpage = vaddr, ppage = paddr; vpage < vaddr + pages * PAGE_SIZE;
|
||||
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
|
||||
mm_map_page (&procgroup->pd, ppage, vpage, flags);
|
||||
}
|
||||
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
|
||||
size_t unmap_size = pages * PAGE_SIZE;
|
||||
uintptr_t end_vaddr = start_vaddr + unmap_size;
|
||||
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
|
||||
bool used_tail_mapping = false;
|
||||
spin_lock_ctx_t ctxpg;
|
||||
|
||||
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
|
||||
if (tail_mapping == NULL)
|
||||
return false;
|
||||
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
|
||||
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
|
||||
struct proc_mapping* mapping =
|
||||
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
||||
|
||||
uintptr_t m_start = mapping->vaddr;
|
||||
uintptr_t m_end = mapping->vaddr + mapping->size;
|
||||
|
||||
/* check overlap */
|
||||
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
|
||||
uintptr_t free_vstart = (start_vaddr > m_start) ? start_vaddr : m_start;
|
||||
uintptr_t free_vend = (end_vaddr < m_end) ? end_vaddr : m_end;
|
||||
size_t free_size = free_vend - free_vstart;
|
||||
|
||||
uintptr_t ppage_to_free = mapping->paddr + (free_vstart - m_start);
|
||||
pmm_free (ppage_to_free, free_size / PAGE_SIZE);
|
||||
|
||||
/* split in the middle */
|
||||
if ((start_vaddr > m_start) && (end_vaddr < m_end)) {
|
||||
tail_mapping->vaddr = end_vaddr;
|
||||
tail_mapping->paddr = mapping->paddr + (end_vaddr - m_start);
|
||||
tail_mapping->size = m_end - end_vaddr;
|
||||
|
||||
mapping->size = start_vaddr - m_start;
|
||||
|
||||
list_insert_after (procgroup->mappings, &mapping->proc_mappings_link,
|
||||
&tail_mapping->proc_mappings_link);
|
||||
|
||||
used_tail_mapping = true;
|
||||
|
||||
break;
|
||||
} else if ((start_vaddr <= m_start) && (end_vaddr < m_end)) { /* shrink left */
|
||||
size_t diff = end_vaddr - m_start;
|
||||
mapping->vaddr += diff;
|
||||
mapping->paddr += diff;
|
||||
mapping->size -= diff;
|
||||
} else if ((start_vaddr > m_start) && (end_vaddr >= m_end)) { /* shrink right */
|
||||
mapping->size = start_vaddr - m_start;
|
||||
} else { /* full overlap */
|
||||
list_remove (procgroup->mappings, &mapping->proc_mappings_link);
|
||||
free (mapping);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!used_tail_mapping)
|
||||
free (tail_mapping);
|
||||
|
||||
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
|
||||
mm_unmap_page (&procgroup->pd, vpage);
|
||||
}
|
||||
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct procgroup* procgroup_create (void) {
|
||||
spin_lock_ctx_t ctxpgtr;
|
||||
|
||||
struct procgroup* procgroup = malloc (sizeof (*procgroup));
|
||||
if (procgroup == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
procgroup->refs = 0;
|
||||
procgroup->memb_proc_tree = NULL;
|
||||
procgroup->lock = SPIN_LOCK_INIT;
|
||||
procgroup->pgid = atomic_fetch_add (&pgids, 1);
|
||||
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys ();
|
||||
procgroup->map_base = PROC_MAP_BASE;
|
||||
|
||||
spin_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,
|
||||
procgroup_tree_link, pgid);
|
||||
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||
|
||||
return procgroup;
|
||||
}
|
||||
|
||||
void procgroup_attach (struct procgroup* procgroup, struct proc* proc) {
|
||||
spin_lock_ctx_t ctxpg, ctxpr;
|
||||
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
|
||||
rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link,
|
||||
procgroup_memb_tree_link, pid);
|
||||
atomic_fetch_add (&procgroup->refs, 1);
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
}
|
||||
|
||||
void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
|
||||
spin_lock_ctx_t ctxpg, ctxpr, ctxpgtr;
|
||||
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
|
||||
rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link);
|
||||
int refs = atomic_fetch_sub (&procgroup->refs, 1);
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
if (refs == 1) {
|
||||
spin_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
|
||||
rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link);
|
||||
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||
|
||||
/* delete resources */
|
||||
struct rb_node_link* rnode;
|
||||
rbtree_first (&procgroup->resource_tree, rnode);
|
||||
while (rnode) {
|
||||
struct rb_node_link* next;
|
||||
rbtree_next (rnode, next);
|
||||
|
||||
struct proc_resource* resource =
|
||||
rbtree_entry (rnode, struct proc_resource, resource_tree_link);
|
||||
|
||||
rnode = next;
|
||||
|
||||
proc_delete_resource (resource);
|
||||
}
|
||||
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
|
||||
struct proc_mapping* mapping =
|
||||
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
||||
|
||||
pmm_free (mapping->paddr, mapping->size / PAGE_SIZE);
|
||||
free (mapping);
|
||||
}
|
||||
|
||||
pmm_free (procgroup->pd.cr3_paddr, 1);
|
||||
|
||||
free (procgroup->tls.tls_tmpl);
|
||||
|
||||
free (procgroup);
|
||||
}
|
||||
}
|
||||
43
kernel/proc/procgroup.h
Normal file
43
kernel/proc/procgroup.h
Normal file
@@ -0,0 +1,43 @@
|
||||
#ifndef _KERNEL_PROC_PROCGROUP_H
|
||||
#define _KERNEL_PROC_PROCGROUP_H
|
||||
|
||||
#include <libk/list.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <proc/resource.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/procgroup.h>
|
||||
|
||||
struct proc;
|
||||
|
||||
struct proc_mapping {
|
||||
struct list_node_link proc_mappings_link;
|
||||
|
||||
uintptr_t paddr;
|
||||
uintptr_t vaddr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct procgroup {
|
||||
int pgid;
|
||||
struct rb_node_link procgroup_tree_link;
|
||||
struct rb_node_link* memb_proc_tree;
|
||||
spin_lock_t lock;
|
||||
atomic_int refs;
|
||||
struct rb_node_link* resource_tree;
|
||||
atomic_int sys_rids;
|
||||
struct pd pd;
|
||||
struct list_node_link* mappings;
|
||||
uintptr_t map_base;
|
||||
struct procgroup_tls tls;
|
||||
};
|
||||
|
||||
struct procgroup* procgroup_create (void);
|
||||
void procgroup_attach (struct procgroup* procgroup, struct proc* proc);
|
||||
void procgroup_detach (struct procgroup* procgroup, struct proc* proc);
|
||||
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
|
||||
uintptr_t* out_paddr);
|
||||
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages);
|
||||
|
||||
#endif // _KERNEL_PROC_PROCGROUP_H
|
||||
59
kernel/proc/resource.c
Normal file
59
kernel/proc/resource.c
Normal file
@@ -0,0 +1,59 @@
|
||||
#include <libk/assert.h>
|
||||
#include <libk/list.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/mutex.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
|
||||
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid) {
|
||||
spin_lock_ctx_t ctxpg;
|
||||
struct proc_resource* resource = NULL;
|
||||
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
rbtree_find (struct proc_resource, &procgroup->resource_tree, rid, resource, resource_tree_link,
|
||||
rid);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
return resource;
|
||||
}
|
||||
|
||||
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid) {
|
||||
spin_lock_ctx_t ctxpg;
|
||||
struct proc_resource* resource;
|
||||
|
||||
resource = proc_find_resource (procgroup, rid);
|
||||
if (resource != NULL)
|
||||
return resource;
|
||||
|
||||
resource = malloc (sizeof (*resource));
|
||||
if (resource == NULL)
|
||||
return NULL;
|
||||
|
||||
memset (resource, 0, sizeof (*resource));
|
||||
resource->lock = SPIN_LOCK_INIT;
|
||||
resource->ops.cleanup = &proc_cleanup_resource_mutex;
|
||||
resource->u.mutex.resource = resource;
|
||||
resource->rid = rid;
|
||||
resource->type = PR_MUTEX;
|
||||
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
rbtree_insert (struct proc_resource, &procgroup->resource_tree, &resource->resource_tree_link,
|
||||
resource_tree_link, rid);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
return resource;
|
||||
}
|
||||
|
||||
bool proc_delete_resource (struct proc_resource* resource) {
|
||||
bool reschedule = resource->ops.cleanup (resource);
|
||||
free (resource);
|
||||
|
||||
return reschedule;
|
||||
}
|
||||
32
kernel/proc/resource.h
Normal file
32
kernel/proc/resource.h
Normal file
@@ -0,0 +1,32 @@
|
||||
#ifndef _KERNEL_PROC_RESOURCE_H
|
||||
#define _KERNEL_PROC_RESOURCE_H
|
||||
|
||||
#include <libk/list.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <proc/mutex.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
#define PR_MUTEX 1
|
||||
|
||||
struct proc;
|
||||
struct procgroup;
|
||||
|
||||
struct proc_resource {
|
||||
int type;
|
||||
int rid;
|
||||
spin_lock_t lock;
|
||||
struct rb_node_link resource_tree_link;
|
||||
union {
|
||||
struct proc_mutex mutex;
|
||||
} u;
|
||||
struct {
|
||||
bool (*cleanup) (struct proc_resource* resource);
|
||||
} ops;
|
||||
};
|
||||
|
||||
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid);
|
||||
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid);
|
||||
bool proc_delete_resource (struct proc_resource* resource);
|
||||
|
||||
#endif // _KERNEL_PROC_RESOURCE_H
|
||||
@@ -1,3 +1,11 @@
|
||||
c += proc/proc.c
|
||||
c += proc/proc.c \
|
||||
proc/resource.c \
|
||||
proc/mutex.c \
|
||||
proc/procgroup.c \
|
||||
proc/suspension_q.c
|
||||
|
||||
o += proc/proc.o
|
||||
o += proc/proc.o \
|
||||
proc/resource.o \
|
||||
proc/mutex.o \
|
||||
proc/procgroup.o \
|
||||
proc/suspension_q.o
|
||||
|
||||
111
kernel/proc/suspension_q.c
Normal file
111
kernel/proc/suspension_q.c
Normal file
@@ -0,0 +1,111 @@
|
||||
#include <libk/list.h>
|
||||
#include <libk/std.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/resource.h>
|
||||
#include <proc/suspension_q.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
spin_lock_ctx_t* ctxrl) {
|
||||
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
|
||||
struct cpu* cpu = proc->cpu;
|
||||
|
||||
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
|
||||
if (!sq_entry) {
|
||||
spin_unlock (resource_lock, ctxrl);
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
sq_entry->proc = proc;
|
||||
sq_entry->sq = sq;
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
|
||||
spin_unlock (resource_lock, ctxrl);
|
||||
|
||||
atomic_store (&proc->state, PROC_SUSPENDED);
|
||||
|
||||
/* append to sq's list */
|
||||
list_append (sq->proc_list, &sq_entry->sq_link);
|
||||
|
||||
/* append to proc's list */
|
||||
list_append (proc->sq_entries, &sq_entry->proc_link);
|
||||
|
||||
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
|
||||
|
||||
if (cpu->proc_current == proc)
|
||||
cpu->proc_current = NULL;
|
||||
|
||||
proc->cpu = NULL;
|
||||
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
|
||||
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
|
||||
struct cpu* cpu = cpu_find_lightest ();
|
||||
struct proc_suspension_q* sq = sq_entry->sq;
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
|
||||
/* remove from sq's list */
|
||||
list_remove (sq->proc_list, &sq_entry->sq_link);
|
||||
|
||||
/* remove from proc's list */
|
||||
list_remove (proc->sq_entries, &sq_entry->proc_link);
|
||||
|
||||
proc->cpu = cpu;
|
||||
|
||||
if (proc->sq_entries == NULL)
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
free (sq_entry);
|
||||
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
void proc_sqs_cleanup (struct proc* proc) {
|
||||
spin_lock_ctx_t ctxsq, ctxpr;
|
||||
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
|
||||
/* clean suspension queue entries */
|
||||
struct list_node_link *sq_link, *sq_link_tmp;
|
||||
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
|
||||
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
|
||||
struct proc_suspension_q* sq = sq_entry->sq;
|
||||
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
|
||||
/* remove from sq's list */
|
||||
list_remove (sq->proc_list, &sq_entry->sq_link);
|
||||
|
||||
/* remove from proc's list */
|
||||
list_remove (proc->sq_entries, &sq_entry->proc_link);
|
||||
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
|
||||
free (sq_entry);
|
||||
}
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
}
|
||||
26
kernel/proc/suspension_q.h
Normal file
26
kernel/proc/suspension_q.h
Normal file
@@ -0,0 +1,26 @@
|
||||
#ifndef _KERNEL_PROC_SUSPENTION_Q_H
|
||||
#define _KERNEL_PROC_SUSPENTION_Q_H
|
||||
|
||||
#include <libk/list.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
struct proc;
|
||||
|
||||
struct proc_suspension_q {
|
||||
struct list_node_link* proc_list;
|
||||
spin_lock_t lock;
|
||||
};
|
||||
|
||||
struct proc_sq_entry {
|
||||
struct list_node_link sq_link;
|
||||
struct list_node_link proc_link;
|
||||
struct proc* proc;
|
||||
struct proc_suspension_q* sq;
|
||||
};
|
||||
|
||||
void proc_sqs_cleanup (struct proc* proc);
|
||||
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
spin_lock_ctx_t* ctxrl);
|
||||
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
|
||||
|
||||
#endif // _KERNEL_PROC_SUSPENTION_Q_H
|
||||
@@ -3,15 +3,15 @@
|
||||
#include <sys/irq.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
void spin_lock (spin_lock_t* sl) {
|
||||
irq_save ();
|
||||
void spin_lock (spin_lock_t* sl, spin_lock_ctx_t* ctx) {
|
||||
irq_save (ctx);
|
||||
|
||||
while (atomic_flag_test_and_set_explicit (sl, memory_order_acquire))
|
||||
spin_lock_relax ();
|
||||
}
|
||||
|
||||
void spin_unlock (spin_lock_t* sl) {
|
||||
void spin_unlock (spin_lock_t* sl, spin_lock_ctx_t* ctx) {
|
||||
atomic_flag_clear_explicit (sl, memory_order_release);
|
||||
|
||||
irq_restore ();
|
||||
irq_restore (ctx);
|
||||
}
|
||||
|
||||
@@ -2,12 +2,13 @@
|
||||
#define _KERNEL_SYNC_SPIN_LOCK_H
|
||||
|
||||
#include <libk/std.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
#define SPIN_LOCK_INIT ATOMIC_FLAG_INIT
|
||||
|
||||
typedef atomic_flag spin_lock_t;
|
||||
|
||||
void spin_lock (spin_lock_t* sl);
|
||||
void spin_unlock (spin_lock_t* sl);
|
||||
void spin_lock (spin_lock_t* sl, spin_lock_ctx_t* ctx);
|
||||
void spin_unlock (spin_lock_t* sl, spin_lock_ctx_t* ctx);
|
||||
|
||||
#endif // _KERNEL_SYNC_SPIN_LOCK_H
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
#ifndef _KERNEL_SYS_IRQ_H
|
||||
#define _KERNEL_SYS_IRQ_H
|
||||
|
||||
void irq_save (void);
|
||||
void irq_restore (void);
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
void irq_save (spin_lock_ctx_t* ctx);
|
||||
void irq_restore (spin_lock_ctx_t* ctx);
|
||||
|
||||
#endif // _KERNEL_SYS_IRQ_H
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#define _KERNEL_SYS_MM_H
|
||||
|
||||
#include <libk/std.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include <amd64/mm.h>
|
||||
@@ -10,17 +11,19 @@
|
||||
#define MM_PG_PRESENT (1 << 0)
|
||||
#define MM_PG_RW (1 << 1)
|
||||
#define MM_PG_USER (1 << 2)
|
||||
#define MM_PD_RELOAD (1 << 30)
|
||||
#define MM_PD_LOCK (1 << 31)
|
||||
|
||||
uintptr_t mm_alloc_user_pd_phys (void);
|
||||
void mm_reload (void);
|
||||
void mm_kernel_lock (spin_lock_ctx_t* ctx);
|
||||
void mm_kernel_unlock (spin_lock_ctx_t* ctx);
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags);
|
||||
void mm_lock_kernel (void);
|
||||
void mm_unlock_kernel (void);
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr);
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr);
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr);
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size);
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr);
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr);
|
||||
struct pd* mm_get_kernel_pd (void);
|
||||
void mm_init (void);
|
||||
|
||||
#endif // _KERNEL_SYS_MM_H
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
#ifndef _KERNEL_SYS_PROC_H
|
||||
#define _KERNEL_SYS_PROC_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
struct proc;
|
||||
|
||||
struct proc* proc_from_elf (uint8_t* elf_contents);
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
|
||||
uintptr_t argument_ptr);
|
||||
void proc_cleanup (struct proc* proc);
|
||||
void proc_init_tls (struct proc* proc);
|
||||
|
||||
#endif // _KERNEL_SYS_PROC_H
|
||||
|
||||
8
kernel/sys/procgroup.h
Normal file
8
kernel/sys/procgroup.h
Normal file
@@ -0,0 +1,8 @@
|
||||
#ifndef _KERNEL_SYS_PROCGROUP_H
|
||||
#define _KERNEL_SYS_PROCGROUP_H
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include <amd64/procgroup.h>
|
||||
#endif
|
||||
|
||||
#endif // _KERNEL_SYS_PROCGROUP_H
|
||||
@@ -4,6 +4,6 @@
|
||||
#include <libk/std.h>
|
||||
#include <proc/proc.h>
|
||||
|
||||
void do_sched (struct proc* proc);
|
||||
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu);
|
||||
|
||||
#endif // _KERNEL_SYS_SCHED_H
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
#ifndef _KERNEL_SYS_SPIN_LOCK_H
|
||||
#define _KERNEL_SYS_SPIN_LOCK_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
#if defined(__x86_64__)
|
||||
typedef uint64_t spin_lock_ctx_t;
|
||||
#endif
|
||||
|
||||
void spin_lock_relax (void);
|
||||
|
||||
#endif // _KERNEL_SYS_SPIN_LOCK_H
|
||||
|
||||
@@ -1,28 +1,180 @@
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/assert.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/requests.h>
|
||||
#include <m/status.h>
|
||||
#include <m/syscall_defs.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/mutex.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/proc.h>
|
||||
#include <syscall/syscall.h>
|
||||
|
||||
#define DEFINE_SYSCALL(name) \
|
||||
int name (struct proc* proc, uintptr_t UNUSED a1, uintptr_t UNUSED a2, uintptr_t UNUSED a3, \
|
||||
uintptr_t UNUSED a4, uintptr_t UNUSED a5, uintptr_t UNUSED a6)
|
||||
uintptr_t name (struct proc* UNUSED proc, void* UNUSED regs, uintptr_t UNUSED a1, \
|
||||
uintptr_t UNUSED a2, uintptr_t UNUSED a3, uintptr_t UNUSED a4, \
|
||||
uintptr_t UNUSED a5, uintptr_t UNUSED a6)
|
||||
|
||||
DEFINE_SYSCALL (sys_proc_quit) {
|
||||
proc_kill (proc);
|
||||
proc_sched ();
|
||||
return SR_OK;
|
||||
#define SYSRESULT(x) ((uintptr_t)(x))
|
||||
|
||||
static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
spin_lock_ctx_t ctxpg;
|
||||
|
||||
spin_lock (&proc->procgroup->lock, &ctxpg);
|
||||
|
||||
if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size)) {
|
||||
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr);
|
||||
|
||||
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||
|
||||
uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr;
|
||||
|
||||
return (void*)out_kvaddr;
|
||||
}
|
||||
|
||||
DEFINE_SYSCALL (sys_proc_test) {
|
||||
DEBUG ("test syscall message!\n");
|
||||
return SR_OK;
|
||||
/* int quit (void) */
|
||||
DEFINE_SYSCALL (sys_quit) {
|
||||
proc_kill (proc);
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
/* int test (void) */
|
||||
DEFINE_SYSCALL (sys_test) {
|
||||
char c = (char)a1;
|
||||
DEBUG ("test syscall from %d! %c\n", proc->pid, c);
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
/* int map (uintptr_t vaddr, size_t pages, uint32_t flags) */
|
||||
DEFINE_SYSCALL (sys_map) {
|
||||
uintptr_t vaddr = a1;
|
||||
size_t pages = (size_t)a2;
|
||||
uint32_t flags = (uint32_t)a3;
|
||||
|
||||
if (vaddr % PAGE_SIZE != 0)
|
||||
return SYSRESULT (-ST_UNALIGNED);
|
||||
|
||||
return SYSRESULT (procgroup_map (proc->procgroup, vaddr, pages, flags, NULL));
|
||||
}
|
||||
|
||||
/* int unmap (uintptr_t vaddr, size_t pages) */
|
||||
DEFINE_SYSCALL (sys_unmap) {
|
||||
uintptr_t vaddr = a1;
|
||||
size_t pages = (size_t)a2;
|
||||
|
||||
if (vaddr % PAGE_SIZE != 0)
|
||||
return SYSRESULT (-ST_UNALIGNED);
|
||||
|
||||
return SYSRESULT (procgroup_unmap (proc->procgroup, vaddr, pages));
|
||||
}
|
||||
|
||||
/* int clone (uintptr_t vstack_top, void* entry, void* argument_ptr) */
|
||||
DEFINE_SYSCALL (sys_clone) {
|
||||
uintptr_t vstack_top = a1;
|
||||
uintptr_t entry = a2;
|
||||
uintptr_t argument_ptr = a3;
|
||||
|
||||
struct proc* new = proc_clone (proc, vstack_top, entry, argument_ptr);
|
||||
|
||||
if (new == NULL) {
|
||||
return SYSRESULT (-ST_OOM_ERROR);
|
||||
}
|
||||
|
||||
int pid = new->pid;
|
||||
|
||||
proc_register (new, NULL);
|
||||
|
||||
return SYSRESULT (pid);
|
||||
}
|
||||
|
||||
/* void* argument_ptr (void) */
|
||||
DEFINE_SYSCALL (sys_argument_ptr) { return proc->uvaddr_argument; }
|
||||
|
||||
/* int sched (void) */
|
||||
DEFINE_SYSCALL (sys_sched) {
|
||||
proc_sched ();
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
/* int mutex_create (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_mutex_create) {
|
||||
int mutex_rid = (int)a1;
|
||||
|
||||
struct proc_resource* mutex_resource = proc_create_resource_mutex (proc->procgroup, mutex_rid);
|
||||
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_OOM_ERROR);
|
||||
|
||||
return SYSRESULT (mutex_resource->rid);
|
||||
}
|
||||
|
||||
/* int mutex_delete (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_mutex_delete) {
|
||||
int mutex_rid = (int)a1;
|
||||
|
||||
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
|
||||
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
if (proc_delete_resource (mutex_resource) == PROC_NEED_RESCHEDULE)
|
||||
proc_sched ();
|
||||
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
/* int mutex_lock (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_mutex_lock) {
|
||||
int mutex_rid = (int)a1;
|
||||
|
||||
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
|
||||
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
if (proc_mutex_lock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
|
||||
proc_sched ();
|
||||
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
/* int mutex_unlock (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_mutex_unlock) {
|
||||
int mutex_rid = (int)a1;
|
||||
|
||||
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
|
||||
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
if (proc_mutex_unlock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
|
||||
proc_sched ();
|
||||
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
static syscall_handler_func_t handler_table[] = {
|
||||
[SYS_PROC_QUIT] = &sys_proc_quit,
|
||||
[SYS_PROC_TEST] = &sys_proc_test,
|
||||
[SYS_QUIT] = &sys_quit,
|
||||
[SYS_TEST] = &sys_test,
|
||||
[SYS_MAP] = &sys_map,
|
||||
[SYS_UNMAP] = &sys_unmap,
|
||||
[SYS_CLONE] = &sys_clone,
|
||||
[SYS_ARGUMENT_PTR] = &sys_argument_ptr,
|
||||
[SYS_SCHED] = &sys_sched,
|
||||
[SYS_MUTEX_CREATE] = &sys_mutex_create,
|
||||
[SYS_MUTEX_DELETE] = &sys_mutex_delete,
|
||||
[SYS_MUTEX_LOCK] = &sys_mutex_lock,
|
||||
[SYS_MUTEX_UNLOCK] = &sys_mutex_unlock,
|
||||
};
|
||||
|
||||
syscall_handler_func_t syscall_find_handler (int syscall_num) {
|
||||
|
||||
@@ -4,8 +4,9 @@
|
||||
#include <libk/std.h>
|
||||
#include <proc/proc.h>
|
||||
|
||||
typedef int (*syscall_handler_func_t) (struct proc* proc, uintptr_t a1, uintptr_t a2, uintptr_t a3,
|
||||
uintptr_t a4, uintptr_t a5, uintptr_t a6);
|
||||
typedef uintptr_t (*syscall_handler_func_t) (struct proc* proc, void* regs, uintptr_t a1,
|
||||
uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5,
|
||||
uintptr_t a6);
|
||||
|
||||
syscall_handler_func_t syscall_find_handler (int syscall_num);
|
||||
|
||||
|
||||
1
libmsl/alloc/.gitignore
vendored
Normal file
1
libmsl/alloc/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.o
|
||||
386
libmsl/alloc/liballoc.c
Normal file
386
libmsl/alloc/liballoc.c
Normal file
@@ -0,0 +1,386 @@
|
||||
/* liballoc breaks when optimized too aggressively, for eg. clang's -Oz */
|
||||
#pragma clang optimize off
|
||||
|
||||
#include <alloc/liballoc.h>
|
||||
#include <m/system.h>
|
||||
|
||||
#define LIBALLOC_MUTEX 500
|
||||
|
||||
void liballoc_init (void) { mutex_create (LIBALLOC_MUTEX); }
|
||||
|
||||
void liballoc_deinit (void) { mutex_delete (LIBALLOC_MUTEX); }
|
||||
|
||||
int liballoc_lock (void) { return mutex_lock (LIBALLOC_MUTEX); }
|
||||
|
||||
int liballoc_unlock (void) { return mutex_unlock (LIBALLOC_MUTEX); }
|
||||
|
||||
void* liballoc_alloc (int pages) { return map (0, pages, MAP_FLAGS | MAP_RW); }
|
||||
|
||||
int liballoc_free (void* ptr, int pages) { return unmap ((uintptr_t)ptr, pages); }
|
||||
|
||||
/** Durand's Ridiculously Amazing Super Duper Memory functions. */
|
||||
|
||||
// #define DEBUG
|
||||
|
||||
#define LIBALLOC_MAGIC 0xc001c0de
|
||||
#define MAXCOMPLETE 5
|
||||
#define MAXEXP 32
|
||||
#define MINEXP 8
|
||||
|
||||
#define MODE_BEST 0
|
||||
#define MODE_INSTANT 1
|
||||
|
||||
#define MODE MODE_BEST
|
||||
|
||||
struct boundary_tag* l_freePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
|
||||
int l_completePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
|
||||
|
||||
static int l_initialized = 0; //< Flag to indicate initialization.
|
||||
static int l_pageSize = PAGE_SIZE; //< Individual page size
|
||||
static int l_pageCount = 16; //< Minimum number of pages to allocate.
|
||||
|
||||
// *********** HELPER FUNCTIONS *******************************
|
||||
|
||||
/** Returns the exponent required to manage 'size' amount of memory.
|
||||
*
|
||||
* Returns n where 2^n <= size < 2^(n+1)
|
||||
*/
|
||||
static inline int getexp (unsigned int size) {
|
||||
if (size < (1 << MINEXP)) {
|
||||
return -1; // Smaller than the quantum.
|
||||
}
|
||||
|
||||
int shift = MINEXP;
|
||||
|
||||
while (shift < MAXEXP) {
|
||||
if ((1 << shift) > size)
|
||||
break;
|
||||
shift += 1;
|
||||
}
|
||||
|
||||
return shift - 1;
|
||||
}
|
||||
|
||||
static void* liballoc_memset (void* s, int c, size_t n) {
|
||||
size_t i;
|
||||
for (i = 0; i < n; i++)
|
||||
((char*)s)[i] = c;
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
static void* liballoc_memcpy (void* s1, const void* s2, size_t n) {
|
||||
char* cdest;
|
||||
char* csrc;
|
||||
unsigned int* ldest = (unsigned int*)s1;
|
||||
unsigned int* lsrc = (unsigned int*)s2;
|
||||
|
||||
while (n >= sizeof (unsigned int)) {
|
||||
*ldest++ = *lsrc++;
|
||||
n -= sizeof (unsigned int);
|
||||
}
|
||||
|
||||
cdest = (char*)ldest;
|
||||
csrc = (char*)lsrc;
|
||||
|
||||
while (n > 0) {
|
||||
*cdest++ = *csrc++;
|
||||
n -= 1;
|
||||
}
|
||||
|
||||
return s1;
|
||||
}
|
||||
|
||||
static inline void insert_tag (struct boundary_tag* tag, int index) {
|
||||
int realIndex;
|
||||
|
||||
if (index < 0) {
|
||||
realIndex = getexp (tag->real_size - sizeof (struct boundary_tag));
|
||||
if (realIndex < MINEXP)
|
||||
realIndex = MINEXP;
|
||||
} else
|
||||
realIndex = index;
|
||||
|
||||
tag->index = realIndex;
|
||||
|
||||
if (l_freePages[realIndex] != NULL) {
|
||||
l_freePages[realIndex]->prev = tag;
|
||||
tag->next = l_freePages[realIndex];
|
||||
}
|
||||
|
||||
l_freePages[realIndex] = tag;
|
||||
}
|
||||
|
||||
static inline void remove_tag (struct boundary_tag* tag) {
|
||||
if (l_freePages[tag->index] == tag)
|
||||
l_freePages[tag->index] = tag->next;
|
||||
|
||||
if (tag->prev != NULL)
|
||||
tag->prev->next = tag->next;
|
||||
if (tag->next != NULL)
|
||||
tag->next->prev = tag->prev;
|
||||
|
||||
tag->next = NULL;
|
||||
tag->prev = NULL;
|
||||
tag->index = -1;
|
||||
}
|
||||
|
||||
static inline struct boundary_tag* melt_left (struct boundary_tag* tag) {
|
||||
struct boundary_tag* left = tag->split_left;
|
||||
|
||||
left->real_size += tag->real_size;
|
||||
left->split_right = tag->split_right;
|
||||
|
||||
if (tag->split_right != NULL)
|
||||
tag->split_right->split_left = left;
|
||||
|
||||
return left;
|
||||
}
|
||||
|
||||
static inline struct boundary_tag* absorb_right (struct boundary_tag* tag) {
|
||||
struct boundary_tag* right = tag->split_right;
|
||||
|
||||
remove_tag (right); // Remove right from free pages.
|
||||
|
||||
tag->real_size += right->real_size;
|
||||
|
||||
tag->split_right = right->split_right;
|
||||
if (right->split_right != NULL)
|
||||
right->split_right->split_left = tag;
|
||||
|
||||
return tag;
|
||||
}
|
||||
|
||||
static inline struct boundary_tag* split_tag (struct boundary_tag* tag) {
|
||||
unsigned int remainder = tag->real_size - sizeof (struct boundary_tag) - tag->size;
|
||||
|
||||
struct boundary_tag* new_tag =
|
||||
(struct boundary_tag*)((uintptr_t)tag + sizeof (struct boundary_tag) + tag->size);
|
||||
|
||||
new_tag->magic = LIBALLOC_MAGIC;
|
||||
new_tag->real_size = remainder;
|
||||
|
||||
new_tag->next = NULL;
|
||||
new_tag->prev = NULL;
|
||||
|
||||
new_tag->split_left = tag;
|
||||
new_tag->split_right = tag->split_right;
|
||||
|
||||
if (new_tag->split_right != NULL)
|
||||
new_tag->split_right->split_left = new_tag;
|
||||
tag->split_right = new_tag;
|
||||
|
||||
tag->real_size -= new_tag->real_size;
|
||||
|
||||
insert_tag (new_tag, -1);
|
||||
|
||||
return new_tag;
|
||||
}
|
||||
|
||||
// ***************************************************************
|
||||
|
||||
static struct boundary_tag* allocate_new_tag (unsigned int size) {
|
||||
unsigned int pages;
|
||||
unsigned int usage;
|
||||
struct boundary_tag* tag;
|
||||
|
||||
// This is how much space is required.
|
||||
usage = size + sizeof (struct boundary_tag);
|
||||
|
||||
// Perfect amount of space
|
||||
pages = usage / l_pageSize;
|
||||
if ((usage % l_pageSize) != 0)
|
||||
pages += 1;
|
||||
|
||||
// Make sure it's >= the minimum size.
|
||||
if (pages < (unsigned int)l_pageCount)
|
||||
pages = l_pageCount;
|
||||
|
||||
tag = (struct boundary_tag*)liballoc_alloc (pages);
|
||||
|
||||
if (tag == NULL)
|
||||
return NULL; // uh oh, we ran out of memory.
|
||||
|
||||
tag->magic = LIBALLOC_MAGIC;
|
||||
tag->size = size;
|
||||
tag->real_size = pages * l_pageSize;
|
||||
tag->index = -1;
|
||||
|
||||
tag->next = NULL;
|
||||
tag->prev = NULL;
|
||||
tag->split_left = NULL;
|
||||
tag->split_right = NULL;
|
||||
|
||||
return tag;
|
||||
}
|
||||
|
||||
void* malloc (size_t size) {
|
||||
int index;
|
||||
void* ptr;
|
||||
struct boundary_tag* tag = NULL;
|
||||
|
||||
liballoc_lock ();
|
||||
|
||||
if (l_initialized == 0) {
|
||||
for (index = 0; index < MAXEXP; index++) {
|
||||
l_freePages[index] = NULL;
|
||||
l_completePages[index] = 0;
|
||||
}
|
||||
l_initialized = 1;
|
||||
}
|
||||
|
||||
index = getexp (size) + MODE;
|
||||
if (index < MINEXP)
|
||||
index = MINEXP;
|
||||
|
||||
// Find one big enough.
|
||||
tag = l_freePages[index]; // Start at the front of the list.
|
||||
while (tag != NULL) {
|
||||
// If there's enough space in this tag.
|
||||
if ((tag->real_size - sizeof (struct boundary_tag)) >= (size + sizeof (struct boundary_tag))) {
|
||||
break;
|
||||
}
|
||||
|
||||
tag = tag->next;
|
||||
}
|
||||
|
||||
// No page found. Make one.
|
||||
if (tag == NULL) {
|
||||
if ((tag = allocate_new_tag (size)) == NULL) {
|
||||
liballoc_unlock ();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
index = getexp (tag->real_size - sizeof (struct boundary_tag));
|
||||
} else {
|
||||
remove_tag (tag);
|
||||
|
||||
if ((tag->split_left == NULL) && (tag->split_right == NULL))
|
||||
l_completePages[index] -= 1;
|
||||
}
|
||||
|
||||
// We have a free page. Remove it from the free pages list.
|
||||
|
||||
tag->size = size;
|
||||
|
||||
// Removed... see if we can re-use the excess space.
|
||||
|
||||
unsigned int remainder =
|
||||
tag->real_size - size - sizeof (struct boundary_tag) * 2; // Support a new tag + remainder
|
||||
|
||||
if (((int)(remainder) > 0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/) {
|
||||
int childIndex = getexp (remainder);
|
||||
|
||||
if (childIndex >= 0) {
|
||||
struct boundary_tag* new_tag = split_tag (tag);
|
||||
|
||||
(void)new_tag;
|
||||
}
|
||||
}
|
||||
|
||||
ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag));
|
||||
|
||||
liballoc_unlock ();
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void free (void* ptr) {
|
||||
int index;
|
||||
struct boundary_tag* tag;
|
||||
|
||||
if (ptr == NULL)
|
||||
return;
|
||||
|
||||
liballoc_lock ();
|
||||
|
||||
tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag));
|
||||
|
||||
if (tag->magic != LIBALLOC_MAGIC) {
|
||||
liballoc_unlock (); // release the lock
|
||||
return;
|
||||
}
|
||||
|
||||
// MELT LEFT...
|
||||
while ((tag->split_left != NULL) && (tag->split_left->index >= 0)) {
|
||||
tag = melt_left (tag);
|
||||
remove_tag (tag);
|
||||
}
|
||||
|
||||
// MELT RIGHT...
|
||||
while ((tag->split_right != NULL) && (tag->split_right->index >= 0)) {
|
||||
tag = absorb_right (tag);
|
||||
}
|
||||
|
||||
// Where is it going back to?
|
||||
index = getexp (tag->real_size - sizeof (struct boundary_tag));
|
||||
if (index < MINEXP)
|
||||
index = MINEXP;
|
||||
|
||||
// A whole, empty block?
|
||||
if ((tag->split_left == NULL) && (tag->split_right == NULL)) {
|
||||
if (l_completePages[index] == MAXCOMPLETE) {
|
||||
// Too many standing by to keep. Free this one.
|
||||
unsigned int pages = tag->real_size / l_pageSize;
|
||||
|
||||
if ((tag->real_size % l_pageSize) != 0)
|
||||
pages += 1;
|
||||
if (pages < (unsigned int)l_pageCount)
|
||||
pages = l_pageCount;
|
||||
|
||||
liballoc_free (tag, pages);
|
||||
|
||||
liballoc_unlock ();
|
||||
return;
|
||||
}
|
||||
|
||||
l_completePages[index] += 1; // Increase the count of complete pages.
|
||||
}
|
||||
|
||||
// ..........
|
||||
|
||||
insert_tag (tag, index);
|
||||
|
||||
liballoc_unlock ();
|
||||
}
|
||||
|
||||
void* calloc (size_t nobj, size_t size) {
|
||||
int real_size;
|
||||
void* p;
|
||||
|
||||
real_size = nobj * size;
|
||||
|
||||
p = malloc (real_size);
|
||||
|
||||
liballoc_memset (p, 0, real_size);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
void* realloc (void* p, size_t size) {
|
||||
void* ptr;
|
||||
struct boundary_tag* tag;
|
||||
int real_size;
|
||||
|
||||
if (size == 0) {
|
||||
free (p);
|
||||
return NULL;
|
||||
}
|
||||
if (p == NULL)
|
||||
return malloc (size);
|
||||
|
||||
if (&liballoc_lock != NULL)
|
||||
liballoc_lock (); // lockit
|
||||
tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag));
|
||||
real_size = tag->size;
|
||||
if (&liballoc_unlock != NULL)
|
||||
liballoc_unlock ();
|
||||
|
||||
if ((size_t)real_size > size)
|
||||
real_size = size;
|
||||
|
||||
ptr = malloc (size);
|
||||
liballoc_memcpy (ptr, p, real_size);
|
||||
free (p);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
94
libmsl/alloc/liballoc.h
Normal file
94
libmsl/alloc/liballoc.h
Normal file
@@ -0,0 +1,94 @@
|
||||
#ifndef _LIBALLOC_H
|
||||
#define _LIBALLOC_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define _ALLOC_SKIP_DEFINE
|
||||
|
||||
// If we are told to not define our own size_t, then we
|
||||
// skip the define.
|
||||
#ifndef _ALLOC_SKIP_DEFINE
|
||||
|
||||
#ifndef _HAVE_SIZE_T
|
||||
#define _HAVE_SIZE_T
|
||||
typedef unsigned int size_t;
|
||||
#endif
|
||||
|
||||
#ifndef NULL
|
||||
#define NULL 0
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/** This is a boundary tag which is prepended to the
|
||||
* page or section of a page which we have allocated. It is
|
||||
* used to identify valid memory blocks that the
|
||||
* application is trying to free.
|
||||
*/
|
||||
struct boundary_tag {
|
||||
unsigned int magic; //< It's a kind of ...
|
||||
unsigned int size; //< Requested size.
|
||||
unsigned int real_size; //< Actual size.
|
||||
int index; //< Location in the page table.
|
||||
|
||||
struct boundary_tag* split_left; //< Linked-list info for broken pages.
|
||||
struct boundary_tag* split_right; //< The same.
|
||||
|
||||
struct boundary_tag* next; //< Linked list info.
|
||||
struct boundary_tag* prev; //< Linked list info.
|
||||
};
|
||||
|
||||
/** This function is supposed to lock the memory data structures. It
|
||||
* could be as simple as disabling interrupts or acquiring a spinlock.
|
||||
* It's up to you to decide.
|
||||
*
|
||||
* \return 0 if the lock was acquired successfully. Anything else is
|
||||
* failure.
|
||||
*/
|
||||
extern int liballoc_lock (void);
|
||||
|
||||
/** This function unlocks what was previously locked by the liballoc_lock
|
||||
* function. If it disabled interrupts, it enables interrupts. If it
|
||||
* had acquiried a spinlock, it releases the spinlock. etc.
|
||||
*
|
||||
* \return 0 if the lock was successfully released.
|
||||
*/
|
||||
extern int liballoc_unlock (void);
|
||||
|
||||
/** This is the hook into the local system which allocates pages. It
|
||||
* accepts an integer parameter which is the number of pages
|
||||
* required. The page size was set up in the liballoc_init function.
|
||||
*
|
||||
* \return NULL if the pages were not allocated.
|
||||
* \return A pointer to the allocated memory.
|
||||
*/
|
||||
extern void* liballoc_alloc (int pages);
|
||||
|
||||
/** This frees previously allocated memory. The void* parameter passed
|
||||
* to the function is the exact same value returned from a previous
|
||||
* liballoc_alloc call.
|
||||
*
|
||||
* The integer value is the number of pages to free.
|
||||
*
|
||||
* \return 0 if the memory was successfully freed.
|
||||
*/
|
||||
extern int liballoc_free (void* ptr, int pages);
|
||||
|
||||
void* malloc (size_t); //< The standard function.
|
||||
void* realloc (void*, size_t); //< The standard function.
|
||||
void* calloc (size_t, size_t); //< The standard function.
|
||||
void free (void*); //< The standard function.
|
||||
|
||||
void liballoc_init (void);
|
||||
void liballoc_deinit (void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
3
libmsl/alloc/src.mk
Normal file
3
libmsl/alloc/src.mk
Normal file
@@ -0,0 +1,3 @@
|
||||
c += alloc/liballoc.c
|
||||
|
||||
o += alloc/liballoc.o
|
||||
@@ -2,21 +2,16 @@
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
int msl_amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
|
||||
uintptr_t a5, uintptr_t a6) {
|
||||
uintptr_t amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
|
||||
uintptr_t a5, uintptr_t a6) {
|
||||
uint64_t result;
|
||||
__asm__ volatile ("movq %1, %%rax\n"
|
||||
"movq %2, %%rdi\n"
|
||||
"movq %3, %%rsi\n"
|
||||
"movq %4, %%rdx\n"
|
||||
"movq %5, %%r10\n"
|
||||
"movq %6, %%r8\n"
|
||||
"movq %7, %%r9\n"
|
||||
__asm__ volatile ("movq %[a4], %%r10\n"
|
||||
"movq %[a5], %%r8\n"
|
||||
"movq %[a6], %%r9\n"
|
||||
"syscall\n"
|
||||
"movq %%rax, %0\n"
|
||||
: "=r"(result)
|
||||
: "r"((uint64_t)syscall_num), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
|
||||
"r"(a6)
|
||||
: "memory", "cc", "rcx", "r11");
|
||||
return (int)result;
|
||||
: "=a"(result)
|
||||
: "a"(syscall_num), "D"(a1), "S"(a2),
|
||||
"d"(a3), [a4] "r"(a4), [a5] "r"(a5), [a6] "r"(a6)
|
||||
: "r10", "r8", "r9", "r11", "rcx", "cc", "memory");
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
int msl_amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
|
||||
uintptr_t a5, uintptr_t a6);
|
||||
uintptr_t amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
|
||||
uintptr_t a5, uintptr_t a6);
|
||||
|
||||
#endif // _LIBMSL_AMD64_SYSCALL_H
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#include <m/proc.h>
|
||||
#include <alloc/liballoc.h>
|
||||
#include <m/system.h>
|
||||
#include <stdint.h>
|
||||
|
||||
extern volatile uint8_t __bss_start[];
|
||||
@@ -6,7 +7,7 @@ extern volatile uint8_t __bss_end[];
|
||||
|
||||
extern void app_main (void);
|
||||
|
||||
static void msl_clear_bss (void) {
|
||||
static void clear_bss (void) {
|
||||
uint8_t* p = (uint8_t*)__bss_start;
|
||||
while (p < __bss_end) {
|
||||
*p++ = 0;
|
||||
@@ -14,9 +15,9 @@ static void msl_clear_bss (void) {
|
||||
}
|
||||
|
||||
void __premain (void) {
|
||||
msl_clear_bss ();
|
||||
|
||||
clear_bss ();
|
||||
liballoc_init ();
|
||||
app_main ();
|
||||
|
||||
m_proc_quit ();
|
||||
liballoc_deinit ();
|
||||
quit ();
|
||||
}
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
#include <m/syscall.h>
|
||||
#include <m/syscall_defs.h>
|
||||
|
||||
int m_proc_quit (void) { return m_syscall (SYS_PROC_QUIT, 0, 0, 0, 0, 0, 0); }
|
||||
|
||||
int m_proc_test (void) { return m_syscall (SYS_PROC_TEST, 0, 0, 0, 0, 0, 0); }
|
||||
@@ -1,8 +0,0 @@
|
||||
#ifndef _LIBMSL_M_PROC_H
|
||||
#define _LIBMSL_M_PROC_H
|
||||
|
||||
int m_proc_quit (void);
|
||||
|
||||
int m_proc_test (void);
|
||||
|
||||
#endif // _LIBMSL_M_PROC_H
|
||||
@@ -1,3 +1,3 @@
|
||||
c += m/proc.c
|
||||
c += m/system.c
|
||||
|
||||
o += m/proc.o
|
||||
o += m/system.o
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include <amd64/syscall.h>
|
||||
#define m_syscall msl_amd64_syscall
|
||||
#define syscall amd64_syscall
|
||||
#endif
|
||||
|
||||
#endif // _LIBMSL_M_SYSCALL_H
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user