Compare commits
98 Commits
c3123192d8
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 38e26a9c12 | |||
| 124aa12f5b | |||
| d2f5c032d9 | |||
| 73e42588fb | |||
| e78bfb9984 | |||
| d2a88b3641 | |||
| fdda2e2df8 | |||
| 388418a718 | |||
| 1c64d608bd | |||
| 3d23187acf | |||
| a3b62ebd3d | |||
| 8bda300f6a | |||
| cf51600c6a | |||
| b388b30b24 | |||
| 600886a7ee | |||
| 67b66f2b39 | |||
| 18f791222e | |||
| 5e16bb647c | |||
| a68373e4ee | |||
| 8650010992 | |||
| 95f590fb3b | |||
| 7bb3b77ede | |||
| c26fd3cb2b | |||
| fea0999726 | |||
| 7eceecf6e3 | |||
| fff51321bc | |||
| a29233f853 | |||
| 38a43b59b0 | |||
| ddafc4eb19 | |||
| 4f7077d458 | |||
| 9a7dbf0594 | |||
| ab8093cc6c | |||
| ddbb66b5e4 | |||
| 11a1eb52aa | |||
| a054257336 | |||
| 9fc8521e63 | |||
| 711da8aeab | |||
| ebd9f0cac6 | |||
| 7cd5623d36 | |||
| 270ff507d4 | |||
| 55166f9d5f | |||
| e5cc3a64d3 | |||
| 2ab308d678 | |||
| d1d772cb42 | |||
| 0d8f9e565f | |||
| f80a26e5eb | |||
| 5bf10c1218 | |||
| 41a458b925 | |||
| 6a474c21a0 | |||
| a5283283f6 | |||
| 79768d94e6 | |||
| 0555ddd041 | |||
| ebb026b807 | |||
| d7b734306f | |||
| 28aef30f77 | |||
| 9f107a1a5e | |||
| e50f8940a9 | |||
| d09e4d97ad | |||
| 7915986902 | |||
| 902682ac11 | |||
| 7747e5e0aa | |||
| a8423fe657 | |||
| 6538fd8023 | |||
| fcd5658a80 | |||
| b1579e4ac1 | |||
| bba36ef057 | |||
| b5353cb600 | |||
| e077d322f4 | |||
| 2c954a9ca9 | |||
| cf04e3db18 | |||
| 124a7f7215 | |||
| e52268cd8e | |||
| 1341dc00d9 | |||
| 99bab4ceee | |||
| 121fb3b33c | |||
| 5e6bdcc52d | |||
| 3bcbdb5ec4 | |||
| 7f53ede2ab | |||
| f1e34b78cd | |||
| 97ad0b338c | |||
| 74c782d653 | |||
| 949f9c5293 | |||
| a6c3f4cf87 | |||
| 34f1e0ba30 | |||
| 4f4f5c3d2f | |||
| d861ab56c4 | |||
| b279774bd6 | |||
| fa7998c323 | |||
| c16170e4c2 | |||
| 259aa732c8 | |||
| 1fd6f4890d | |||
| 849df9c27d | |||
| 69feceaaae | |||
| 7b33d0757a | |||
| 741d0fb9b0 | |||
| c85cbd0c01 | |||
| b2d8294b12 | |||
| 8794a61073 |
58
.clang-format
Normal file
58
.clang-format
Normal file
@@ -0,0 +1,58 @@
|
||||
BasedOnStyle: LLVM
|
||||
Language: C
|
||||
|
||||
# Indentation
|
||||
IndentWidth: 2
|
||||
TabWidth: 2
|
||||
UseTab: Never
|
||||
|
||||
# Braces and blocks
|
||||
BreakBeforeBraces: Attach
|
||||
BraceWrapping:
|
||||
AfterFunction: false
|
||||
AfterControlStatement: false
|
||||
AfterStruct: false
|
||||
AfterEnum: false
|
||||
AfterUnion: false
|
||||
BeforeElse: false
|
||||
|
||||
# Control statements
|
||||
AllowShortIfStatementsOnASingleLine: false
|
||||
AllowShortLoopsOnASingleLine: false
|
||||
AllowShortBlocksOnASingleLine: Never
|
||||
|
||||
# Line breaking
|
||||
ColumnLimit: 100
|
||||
BreakBeforeBinaryOperators: None
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakStringLiterals: false
|
||||
|
||||
# Spacing
|
||||
SpaceBeforeParens: Always
|
||||
SpaceBeforeAssignmentOperators: true
|
||||
SpacesInParentheses: false
|
||||
SpacesInSquareBrackets: false
|
||||
SpaceInEmptyParentheses: false
|
||||
SpacesBeforeTrailingComments: 1
|
||||
|
||||
# Pointer alignment
|
||||
PointerAlignment: Left
|
||||
DerivePointerAlignment: false
|
||||
|
||||
# Alignment
|
||||
AlignAfterOpenBracket: Align
|
||||
AlignConsecutiveAssignments: false
|
||||
AlignConsecutiveDeclarations: false
|
||||
AlignConsecutiveMacros: true
|
||||
AlignOperands: false
|
||||
|
||||
# Includes
|
||||
SortIncludes: true
|
||||
|
||||
# Comments
|
||||
ReflowComments: true
|
||||
CommentPragmas: '^ IWYU pragma:'
|
||||
|
||||
# Misc
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
MaxEmptyLinesToKeep: 1
|
||||
37
.editorconfig
Normal file
37
.editorconfig
Normal file
@@ -0,0 +1,37 @@
|
||||
root = true
|
||||
|
||||
# Default for all files
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
# C / header files
|
||||
[*.{c,h}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
tab_width = 2
|
||||
max_line_length = 80
|
||||
|
||||
# Assembly (if present; usually tab-sensitive)
|
||||
[*.S]
|
||||
indent_style = tab
|
||||
tab_width = 8
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
# Makefiles (MUST use tabs)
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
tab_width = 8
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
[*.mk]
|
||||
indent_style = tab
|
||||
tab_width = 8
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
# Markdown (avoid wrapping conflicts)
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
max_line_length = off
|
||||
43
.gitea/workflows/docs.yaml
Normal file
43
.gitea/workflows/docs.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Build documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install software
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y doxygen make rsync
|
||||
|
||||
- name: Set up python3
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install mkdocs
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install mkdocs mkdocs-material pymdown-extensions
|
||||
|
||||
- name: Build
|
||||
run: make docs
|
||||
|
||||
- name: Deploy
|
||||
env:
|
||||
SSH_KEY: ${{ secrets.DEPLOY_SSH_KEY }}
|
||||
REMOTE_IP: ${{ vars.DEPLOY_REMOTE_IP }}
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "$SSH_KEY" > ~/.ssh/id_rsa
|
||||
chmod 600 ~/.ssh/id_rsa
|
||||
ssh-keyscan -H "$REMOTE_IP" >> ~/.ssh/known_hosts
|
||||
chmod -R 777 site
|
||||
rsync -az --delete site/ webuser@"$REMOTE_IP":/home/webuser/mop/
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,3 +2,5 @@ iso_root
|
||||
mop3.iso
|
||||
bochs-log.txt
|
||||
bochs-com1.txt
|
||||
mop3dist.tar
|
||||
site/
|
||||
|
||||
12
Makefile
12
Makefile
@@ -1,9 +1,7 @@
|
||||
platform ?= amd64
|
||||
|
||||
all_kernel:
|
||||
make -C kernel platform=$(platform) all
|
||||
|
||||
clean_kernel:
|
||||
make -C kernel platform=$(platform) clean
|
||||
|
||||
.PHONY: all_kernel clean_kernel
|
||||
include make/apps.mk
|
||||
include make/kernel.mk
|
||||
include make/dist.mk
|
||||
include make/docs.mk
|
||||
include make/libmsl.mk
|
||||
|
||||
11
amd64/flags.mk
Normal file
11
amd64/flags.mk
Normal file
@@ -0,0 +1,11 @@
|
||||
cflags += --target=x86_64-pc-none-elf \
|
||||
-mno-sse \
|
||||
-mno-sse2 \
|
||||
-mno-avx \
|
||||
-mno-mmx \
|
||||
-mno-80387 \
|
||||
-mno-red-zone \
|
||||
-mcmodel=large
|
||||
|
||||
ldflags += --target=x86_64-pc-none-elf \
|
||||
-Wl,-zmax-page-size=0x1000
|
||||
69
amd64/link.ld
Normal file
69
amd64/link.ld
Normal file
@@ -0,0 +1,69 @@
|
||||
OUTPUT_FORMAT(elf64-x86-64)
|
||||
|
||||
ENTRY(_start)
|
||||
|
||||
PHDRS {
|
||||
text PT_LOAD;
|
||||
rodata PT_LOAD;
|
||||
data PT_LOAD;
|
||||
bss PT_LOAD;
|
||||
tls PT_TLS;
|
||||
}
|
||||
|
||||
SECTIONS {
|
||||
. = 0x0000500000000000;
|
||||
|
||||
.text : {
|
||||
*(.text .text.*)
|
||||
*(.ltext .ltext.*)
|
||||
} :text
|
||||
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
.rodata : {
|
||||
*(.rodata .rodata.*)
|
||||
} :rodata
|
||||
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
.data : {
|
||||
*(.data .data.*)
|
||||
*(.ldata .ldata.*)
|
||||
} :data
|
||||
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
__bss_start = .;
|
||||
|
||||
.bss : {
|
||||
*(.bss .bss.*)
|
||||
*(.lbss .lbss.*)
|
||||
} :bss
|
||||
|
||||
__bss_end = .;
|
||||
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
__tdata_start = .;
|
||||
|
||||
.tdata : {
|
||||
*(.tdata .tdata.*)
|
||||
} :tls
|
||||
|
||||
__tdata_end = .;
|
||||
|
||||
__tbss_start = .;
|
||||
|
||||
.tbss : {
|
||||
*(.tbss .tbss.*)
|
||||
} :tls
|
||||
|
||||
__tbss_end = .;
|
||||
|
||||
__tls_size = __tbss_end - __tdata_start;
|
||||
|
||||
/DISCARD/ : {
|
||||
*(.eh_frame*)
|
||||
*(.note .note.*)
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,15 @@
|
||||
cpu: model=p4_prescott_celeron_336
|
||||
cpu: model=p4_prescott_celeron_336, ips=200000000
|
||||
|
||||
memory: guest=4096 host=2048
|
||||
|
||||
romimage: file=/usr/share/bochs/BIOS-bochs-latest, options=fastboot
|
||||
romimage: file=/usr/share/bochs/BIOS-bochs-latest
|
||||
vgaromimage: file=/usr/share/bochs/VGABIOS-lgpl-latest.bin
|
||||
|
||||
ata0: enabled=1
|
||||
ata0-master: type=cdrom, path=mop3.iso, status=inserted
|
||||
com1: enabled=1, mode=file, dev=bochs-com1.txt
|
||||
pci: enabled=1, chipset=i440fx
|
||||
clock: sync=realtime, time0=local
|
||||
|
||||
boot: cdrom
|
||||
|
||||
|
||||
14
aux/devel.sh
Executable file
14
aux/devel.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -xe
|
||||
|
||||
if [ "$1" = "debug" ]; then
|
||||
make -B all_kernel buildtype=debug
|
||||
else
|
||||
make -B all_kernel
|
||||
fi
|
||||
|
||||
make -B all_libmsl
|
||||
make -B all_apps
|
||||
make -B all_dist
|
||||
./aux/limine_iso_amd64.sh
|
||||
7
aux/format.sh
Executable file
7
aux/format.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
make -B format_kernel
|
||||
make -B format_libmsl
|
||||
make -B format_apps
|
||||
@@ -10,6 +10,8 @@ cp -v boot/limine/limine-bios.sys boot/limine/limine-bios-cd.bin \
|
||||
|
||||
cp -v boot/limine/BOOTX64.EFI boot/limine/BOOTIA32.EFI iso_root/EFI/BOOT
|
||||
|
||||
cp -v mop3dist.tar iso_root/boot
|
||||
|
||||
xorriso -as mkisofs -R -r -J -b boot/limine/limine-bios-cd.bin \
|
||||
-no-emul-boot -boot-load-size 4 -boot-info-table -hfsplus \
|
||||
-apm-block-size 2048 --efi-boot boot/limine/limine-uefi-cd.bin \
|
||||
|
||||
5
aux/qemu_amd64.sh
Executable file
5
aux/qemu_amd64.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
qemu-system-x86_64 -M q35 -m 4G -serial stdio -enable-kvm -cdrom mop3.iso -smp 4 $@
|
||||
5
aux/qemu_amd64_debug.sh
Executable file
5
aux/qemu_amd64_debug.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -x
|
||||
|
||||
qemu-system-x86_64 -M q35 -m 4G -serial stdio -cdrom mop3.iso -smp 4 -s -S $@
|
||||
@@ -3,3 +3,4 @@ timeout: 10
|
||||
/mop3
|
||||
protocol: limine
|
||||
path: boot():/boot/kernel.elf
|
||||
module_path: boot():/boot/mop3dist.tar
|
||||
|
||||
BIN
docs/assets/images/only-processes.png
Normal file
BIN
docs/assets/images/only-processes.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 118 KiB |
BIN
docs/assets/images/processes-threads.png
Normal file
BIN
docs/assets/images/processes-threads.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 51 KiB |
44
docs/building_kernel.md
Normal file
44
docs/building_kernel.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Building the MOP3 kernel
|
||||
|
||||
This article describes, how to build the kernel, how the build system works and prerequisites.
|
||||
|
||||
## Preprequistes
|
||||
|
||||
- POSIX host system (tested on Linux, may break on other systems)
|
||||
- Git
|
||||
- GNU make
|
||||
- LLVM toolchain/Clang C compiler
|
||||
- Xorriso
|
||||
|
||||
## Build steps
|
||||
|
||||
cd into root of MOP3 source tree.
|
||||
|
||||
Build the kernel:
|
||||
```
|
||||
make -B all_kernel buildtype=<debug|release>
|
||||
```
|
||||
|
||||
Build essential system applications:
|
||||
```
|
||||
make -B all_apps
|
||||
```
|
||||
|
||||
Prepare the ramdisk:
|
||||
```
|
||||
make -B all_dist
|
||||
```
|
||||
|
||||
Build ISO image:
|
||||
```
|
||||
./aux/limine_iso_amd64.sh
|
||||
```
|
||||
|
||||
Now you have an ISO image, which can be run my QEMU or you can burn it onto a CD.
|
||||
|
||||
For the convenience of the developer, there's a magic "do all" script located in `aux`:
|
||||
```
|
||||
./aux/devel.sh # optionally "./aux/devel.sh debug" for debugging
|
||||
```
|
||||
|
||||
It does all the previous steps, just packed into a single script.
|
||||
4
docs/index.md
Normal file
4
docs/index.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# MOP3 operating system documentation
|
||||
|
||||
MOP3 is a hobby OS project of mine ;).
|
||||
|
||||
30
docs/processes_overview.md
Normal file
30
docs/processes_overview.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Overview of processes in MOP3
|
||||
|
||||
## What is a process?
|
||||
|
||||
A process is a structure defined to represent an internal state of a user application's environment. This includes
|
||||
the necessary stacks, code, data and other resources. A process (usually) has it's own address, but in certain
|
||||
circumstances may share it with another process.
|
||||
|
||||
## Only processes vs. processes-threads model
|
||||
|
||||
### Overview
|
||||
|
||||
MOP3 doesn't have a process-thread separation. Ususally in operating systems you'd have a "process", which consists
|
||||
of multiple worker threads. For eg. a single-threaded application is a process, which consists of one worker. In MOP3
|
||||
we do things a little differently. We only have processes, but some processes may work within the same pool of (generally speaking)
|
||||
"resources", such as a shared address space, shared memory allocations, mutexes and so on. An application then consists of
|
||||
not threads, but processes, which are loosely tied together via shared data.
|
||||
|
||||
#### Processes-threads model diagram
|
||||

|
||||
#### Only processes model diagram
|
||||

|
||||
|
||||
## Scheduling
|
||||
|
||||
MOP3 uses a round-robin based scheduler. For now priorities are left unimplemented, ie. every processes has
|
||||
equal priority, but this may change in the future.
|
||||
|
||||
A good explaination of round-robin scheduling can be found on the OSDev wiki: [the article](https://wiki.osdev.org/Scheduling_Algorithms#Round_Robin)
|
||||
|
||||
21
generic/flags.mk
Normal file
21
generic/flags.mk
Normal file
@@ -0,0 +1,21 @@
|
||||
cflags += -nostdinc \
|
||||
-nostdlib \
|
||||
-ffreestanding \
|
||||
-fno-builtin \
|
||||
-std=c11 \
|
||||
-pedantic \
|
||||
-Wall \
|
||||
-Wextra \
|
||||
-ffunction-sections \
|
||||
-fdata-sections
|
||||
|
||||
cflags += -isystem ../include
|
||||
|
||||
ldflags += -ffreestanding \
|
||||
-nostdlib \
|
||||
-fno-builtin \
|
||||
-fuse-ld=lld \
|
||||
-static \
|
||||
-Wl,--gc-sections \
|
||||
-Wl,--strip-all \
|
||||
-flto
|
||||
13
include/m/status.h
Normal file
13
include/m/status.h
Normal file
@@ -0,0 +1,13 @@
|
||||
#ifndef _M_STATUS_H
|
||||
#define _M_STATUS_H
|
||||
|
||||
#define ST_OK 0
|
||||
#define ST_SYSCALL_NOT_FOUND 1
|
||||
#define ST_UNALIGNED 2
|
||||
#define ST_OOM_ERROR 3
|
||||
#define ST_NOT_FOUND 4
|
||||
#define ST_BAD_ADDRESS_SPACE 5
|
||||
#define ST_PERMISSION_ERROR 6
|
||||
#define ST_BAD_RESOURCE 7
|
||||
|
||||
#endif // _M_STATUS_H
|
||||
16
include/m/syscall_defs.h
Normal file
16
include/m/syscall_defs.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#ifndef _M_SYSCALL_DEFS_H
|
||||
#define _M_SYSCALL_DEFS_H
|
||||
|
||||
#define SYS_QUIT 1
|
||||
#define SYS_TEST 2
|
||||
#define SYS_MAP 3
|
||||
#define SYS_UNMAP 4
|
||||
#define SYS_CLONE 5
|
||||
#define SYS_SCHED 6
|
||||
#define SYS_MUTEX_CREATE 7
|
||||
#define SYS_MUTEX_DELETE 8
|
||||
#define SYS_MUTEX_LOCK 9
|
||||
#define SYS_MUTEX_UNLOCK 10
|
||||
#define SYS_ARGUMENT_PTR 11
|
||||
|
||||
#endif // _M_SYSCALL_DEFS_H
|
||||
2
init/.gitignore
vendored
Normal file
2
init/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.o
|
||||
*.exe
|
||||
1
init/Makefile
Normal file
1
init/Makefile
Normal file
@@ -0,0 +1 @@
|
||||
include ../make/user.mk
|
||||
1
init/app.mk
Normal file
1
init/app.mk
Normal file
@@ -0,0 +1 @@
|
||||
app := init.exe
|
||||
46
init/init.c
Normal file
46
init/init.c
Normal file
@@ -0,0 +1,46 @@
|
||||
#include <limits.h>
|
||||
#include <proc/local.h>
|
||||
#include <proc/proc.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <string/string.h>
|
||||
|
||||
#define MUTEX 2000
|
||||
|
||||
LOCAL volatile char letter = 'c';
|
||||
|
||||
void app_proc (void) {
|
||||
char arg_letter = (char)(uintptr_t)argument_ptr ();
|
||||
|
||||
letter = arg_letter;
|
||||
|
||||
for (;;) {
|
||||
mutex_lock (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test (letter);
|
||||
|
||||
mutex_unlock (MUTEX);
|
||||
}
|
||||
|
||||
process_quit ();
|
||||
}
|
||||
|
||||
void app_main (void) {
|
||||
mutex_create (MUTEX);
|
||||
|
||||
letter = 'a';
|
||||
|
||||
process_spawn (&app_proc, (void*)'a');
|
||||
process_spawn (&app_proc, (void*)'b');
|
||||
process_spawn (&app_proc, (void*)'c');
|
||||
|
||||
for (;;) {
|
||||
mutex_lock (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test (letter);
|
||||
|
||||
mutex_unlock (MUTEX);
|
||||
}
|
||||
}
|
||||
3
init/src.mk
Normal file
3
init/src.mk
Normal file
@@ -0,0 +1,3 @@
|
||||
c += init.c
|
||||
|
||||
o += init.o
|
||||
2
kernel/.gitignore
vendored
Normal file
2
kernel/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.json
|
||||
.cache
|
||||
@@ -5,6 +5,7 @@ ldflags :=
|
||||
cflags :=
|
||||
buildtype ?= release
|
||||
|
||||
include vars.mk
|
||||
include flags.mk
|
||||
include src.mk
|
||||
|
||||
@@ -22,4 +23,13 @@ build/kernel.elf: $(o)
|
||||
clean:
|
||||
rm -f $(o) build/kernel.elf
|
||||
|
||||
.PHONY: all clean
|
||||
format:
|
||||
clang-format -i $$(git ls-files '*.c' '*.h' \
|
||||
':!limine/limine.h' \
|
||||
':!c_headers/include/**' \
|
||||
':!uACPI/source/**' \
|
||||
':!uACPI/include/**' \
|
||||
':!uACPI/tests/**' \
|
||||
':!libk/printf*')
|
||||
|
||||
.PHONY: all clean format
|
||||
|
||||
271
kernel/amd64/apic.c
Normal file
271
kernel/amd64/apic.c
Normal file
@@ -0,0 +1,271 @@
|
||||
#include <amd64/apic.h>
|
||||
#include <amd64/intr_defs.h>
|
||||
#include <amd64/msr-index.h>
|
||||
#include <amd64/msr.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/requests.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/spin.h>
|
||||
#include <sys/time.h>
|
||||
#include <uacpi/acpi.h>
|
||||
#include <uacpi/status.h>
|
||||
#include <uacpi/tables.h>
|
||||
#include <uacpi/uacpi.h>
|
||||
|
||||
#define IOAPICS_MAX 24
|
||||
#define INTERRUPT_SRC_OVERRIDES_MAX 24
|
||||
|
||||
/* ID of Local APIC */
|
||||
#define LAPIC_ID 0x20
|
||||
/* End of interrupt register */
|
||||
#define LAPIC_EOI 0xB0
|
||||
/* Spurious interrupt vector register */
|
||||
#define LAPIC_SIVR 0xF0
|
||||
/* Interrupt command register */
|
||||
#define LAPIC_ICR 0x300
|
||||
/* LVT timer register */
|
||||
#define LAPIC_LVTTR 0x320
|
||||
/* Timer initial count register */
|
||||
#define LAPIC_TIMICT 0x380
|
||||
/* Timer current count register */
|
||||
#define LAPIC_TIMCCT 0x390
|
||||
/* Divide config register */
|
||||
#define LAPIC_DCR 0x3E0
|
||||
|
||||
#define DIVIDER_VALUE 0x0B
|
||||
|
||||
struct ioapic {
|
||||
struct acpi_madt_ioapic table_data;
|
||||
spin_lock_t lock;
|
||||
uintptr_t mmio_base;
|
||||
};
|
||||
|
||||
/* Table of IOAPICS */
|
||||
static struct ioapic ioapics[IOAPICS_MAX];
|
||||
/* Table of interrupt source overrides */
|
||||
/* clang-format off */
|
||||
static struct acpi_madt_interrupt_source_override intr_src_overrides[INTERRUPT_SRC_OVERRIDES_MAX];
|
||||
/* clang-format on */
|
||||
/* Count of actual IOAPIC entries */
|
||||
static size_t ioapic_entries = 0;
|
||||
/* Count of actual interrupt source overrides */
|
||||
static size_t intr_src_override_entries = 0;
|
||||
|
||||
static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
|
||||
|
||||
/* Read IOAPIC */
|
||||
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
|
||||
spin_lock_ctx_t ctxioar;
|
||||
|
||||
spin_lock (&ioapic->lock, &ctxioar);
|
||||
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
||||
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
|
||||
spin_unlock (&ioapic->lock, &ctxioar);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Write IOAPIC */
|
||||
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
|
||||
spin_lock_ctx_t ctxioaw;
|
||||
|
||||
spin_lock (&ioapic->lock, &ctxioaw);
|
||||
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
||||
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
|
||||
spin_unlock (&ioapic->lock, &ctxioaw);
|
||||
}
|
||||
|
||||
/* Find an IOAPIC corresposting to provided IRQ */
|
||||
static struct ioapic* amd64_ioapic_find (uint32_t irq) {
|
||||
struct ioapic* ioapic = NULL;
|
||||
|
||||
for (size_t i = 0; i < ioapic_entries; i++) {
|
||||
ioapic = &ioapics[i];
|
||||
uint32_t version = amd64_ioapic_read (ioapic, 1);
|
||||
uint32_t max = ((version >> 16) & 0xFF);
|
||||
|
||||
if ((irq >= ioapic->table_data.gsi_base) && (irq <= (ioapic->table_data.gsi_base + max)))
|
||||
return ioapic;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Route IRQ to an IDT entry of a given Local APIC.
|
||||
*
|
||||
* vec - Interrupt vector number, which will be delivered to the CPU.
|
||||
* irq -Legacy IRQ number to be routed. Can be changed by an interrupt source override
|
||||
* into a different GSI.
|
||||
* flags - IOAPIC redirection flags.
|
||||
* lapic_id - Local APIC that will receive the interrupt.
|
||||
*/
|
||||
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id) {
|
||||
struct ioapic* ioapic = NULL;
|
||||
struct acpi_madt_interrupt_source_override* override;
|
||||
bool found_override = false;
|
||||
|
||||
for (size_t i = 0; i < intr_src_override_entries; i++) {
|
||||
override = &intr_src_overrides[i];
|
||||
if (override->source == irq) {
|
||||
found_override = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t calc_flags = (lapic_id << 56) | (flags) | (vec & 0xFF);
|
||||
|
||||
if (found_override) {
|
||||
uint32_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
|
||||
uint32_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
|
||||
calc_flags |= (uint64_t)mode << 15;
|
||||
calc_flags |= (uint64_t)polarity << 13;
|
||||
}
|
||||
|
||||
uint32_t gsi = found_override ? override->gsi : irq;
|
||||
|
||||
ioapic = amd64_ioapic_find (gsi);
|
||||
|
||||
if (ioapic == NULL)
|
||||
return;
|
||||
|
||||
uint32_t irq_reg = ((gsi - ioapic->table_data.gsi_base) * 2) + 0x10;
|
||||
|
||||
amd64_ioapic_write (ioapic, irq_reg + 1, (uint32_t)(calc_flags >> 32));
|
||||
amd64_ioapic_write (ioapic, irq_reg, (uint32_t)calc_flags);
|
||||
}
|
||||
|
||||
/* Find and initialize the IOAPIC */
|
||||
void amd64_ioapic_init (void) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
struct uacpi_table apic_table;
|
||||
uacpi_status status = uacpi_table_find_by_signature (ACPI_MADT_SIGNATURE, &apic_table);
|
||||
if (status != UACPI_STATUS_OK) {
|
||||
DEBUG ("Could not find MADT table!\n");
|
||||
spin ();
|
||||
}
|
||||
|
||||
struct acpi_madt* apic = (struct acpi_madt*)apic_table.virt_addr;
|
||||
struct acpi_entry_hdr* current = (struct acpi_entry_hdr*)apic->entries;
|
||||
|
||||
for (;;) {
|
||||
if ((uintptr_t)current >=
|
||||
((uintptr_t)apic->entries + apic->hdr.length - sizeof (struct acpi_madt)))
|
||||
break;
|
||||
|
||||
switch (current->type) {
|
||||
case ACPI_MADT_ENTRY_TYPE_IOAPIC: {
|
||||
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
|
||||
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
|
||||
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
|
||||
MM_PG_PRESENT | MM_PG_RW);
|
||||
ioapics[ioapic_entries++] = (struct ioapic){
|
||||
.lock = SPIN_LOCK_INIT,
|
||||
.table_data = *ioapic_table_data,
|
||||
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
|
||||
};
|
||||
} break;
|
||||
case ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE: {
|
||||
struct acpi_madt_interrupt_source_override* override =
|
||||
(struct acpi_madt_interrupt_source_override*)current;
|
||||
intr_src_overrides[intr_src_override_entries++] = *override;
|
||||
} break;
|
||||
}
|
||||
|
||||
current = (struct acpi_entry_hdr*)((uintptr_t)current + current->length);
|
||||
}
|
||||
}
|
||||
|
||||
/* Get MMIO base of Local APIC */
|
||||
static uintptr_t amd64_lapic_base (void) { return thiscpu->lapic_mmio_base; }
|
||||
|
||||
/* Write Local APIC */
|
||||
static void amd64_lapic_write (uint32_t reg, uint32_t value) {
|
||||
*(volatile uint32_t*)(amd64_lapic_base () + reg) = value;
|
||||
}
|
||||
|
||||
/* Read Local APIC */
|
||||
static uint32_t amd64_lapic_read (uint32_t reg) {
|
||||
return *(volatile uint32_t*)(amd64_lapic_base () + reg);
|
||||
}
|
||||
|
||||
/* Get ID of Local APIC */
|
||||
uint32_t amd64_lapic_id (void) { return amd64_lapic_read (LAPIC_ID) >> 24; }
|
||||
|
||||
/* Send End of interrupt command to Local APIC */
|
||||
void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
|
||||
|
||||
/*
|
||||
* Calibrate Local APIC to send interrupts in a set interval.
|
||||
*
|
||||
* us - Period length in microseconds
|
||||
*/
|
||||
static uint32_t amd64_lapic_calibrate (uint32_t us) {
|
||||
spin_lock_ctx_t ctxlacb;
|
||||
|
||||
spin_lock (&lapic_calibration_lock, &ctxlacb);
|
||||
|
||||
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
|
||||
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
|
||||
amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF);
|
||||
|
||||
sleep_micro (us);
|
||||
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (0 << 16));
|
||||
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
|
||||
DEBUG ("timer ticks = %u\n", ticks);
|
||||
|
||||
spin_unlock (&lapic_calibration_lock, &ctxlacb);
|
||||
|
||||
return ticks;
|
||||
}
|
||||
|
||||
/*
|
||||
* Starts a Local APIC, configures LVT timer to send interrupts at SCHED_PREEMPT_TIMER.
|
||||
*
|
||||
* ticks - Initial tick count
|
||||
*/
|
||||
static void amd64_lapic_start (uint32_t ticks) {
|
||||
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
|
||||
amd64_lapic_write (LAPIC_TIMICT, ticks);
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize Local APIC, configure to send timer interrupts at a given period. See
|
||||
* amd64_lapic_calibrate and amd64_lapic_start.
|
||||
*/
|
||||
void amd64_lapic_init (uint32_t us) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
amd64_wrmsr (MSR_APIC_BASE, amd64_rdmsr (MSR_APIC_BASE) | (1 << 11));
|
||||
|
||||
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
|
||||
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
|
||||
|
||||
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
|
||||
|
||||
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
|
||||
|
||||
thiscpu->lapic_ticks = amd64_lapic_calibrate (us);
|
||||
amd64_lapic_start (thiscpu->lapic_ticks);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send an IPI to a given Local APIC. This till invoke an IDT stub located at vec.
|
||||
*
|
||||
* lapic_id - Target Local APIC
|
||||
* vec - Interrupt vector/IDT stub, which will be invoked by the IPI.
|
||||
*/
|
||||
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) {
|
||||
/* wait for previous IPI to finish */
|
||||
while (amd64_lapic_read (LAPIC_ICR) & (1 << 12)) {
|
||||
__asm__ volatile ("pause");
|
||||
}
|
||||
|
||||
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
|
||||
amd64_lapic_write (LAPIC_ICR, vec | (1 << 14));
|
||||
}
|
||||
14
kernel/amd64/apic.h
Normal file
14
kernel/amd64/apic.h
Normal file
@@ -0,0 +1,14 @@
|
||||
#ifndef _KERNEL_AMD64_APIC_H
|
||||
#define _KERNEL_AMD64_APIC_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id);
|
||||
void amd64_ioapic_init (void);
|
||||
|
||||
uint32_t amd64_lapic_id (void);
|
||||
void amd64_lapic_eoi (void);
|
||||
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec);
|
||||
void amd64_lapic_init (uint32_t us);
|
||||
|
||||
#endif // _KERNEL_AMD64_APIC_H
|
||||
@@ -1,20 +1,56 @@
|
||||
#include <limine/limine.h>
|
||||
#include <amd64/apic.h>
|
||||
#include <amd64/debug.h>
|
||||
#include <amd64/hpet.h>
|
||||
#include <amd64/init.h>
|
||||
#include <sys/debug.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <amd64/intr_defs.h>
|
||||
#include <amd64/msr-index.h>
|
||||
#include <amd64/msr.h>
|
||||
#include <aux/compiler.h>
|
||||
#include <irq/irq.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/limine.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/proc.h>
|
||||
#include <rd/rd.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/time.h>
|
||||
#include <uacpi/uacpi.h>
|
||||
|
||||
#define UACPI_MEMORY_BUFFER_MAX 4096
|
||||
|
||||
ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
|
||||
|
||||
/*
|
||||
* The kernel starts booting here. This is the entry point after Limine hands control. We set up all
|
||||
* the necessary platform-dependent subsystems/drivers and jump into the init app.
|
||||
*/
|
||||
void bootmain (void) {
|
||||
amd64_init();
|
||||
DEBUG("Hello from amd64!\n");
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
|
||||
struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id);
|
||||
|
||||
amd64_init (bsp_cpu, false);
|
||||
syscall_init ();
|
||||
amd64_debug_init ();
|
||||
pmm_init ();
|
||||
mm_init ();
|
||||
|
||||
int *a = malloc(sizeof(int));
|
||||
*a = 6969;
|
||||
DEBUG("a=%p, *a=%d\n", a, *a);
|
||||
rd_init ();
|
||||
|
||||
*(volatile int *)0 = 123;
|
||||
uacpi_setup_early_table_access ((void*)uacpi_memory_buffer, sizeof (uacpi_memory_buffer));
|
||||
|
||||
for (;;);
|
||||
amd64_ioapic_init ();
|
||||
amd64_hpet_init ();
|
||||
|
||||
smp_init ();
|
||||
|
||||
proc_init ();
|
||||
|
||||
for (;;)
|
||||
;
|
||||
}
|
||||
|
||||
@@ -1,23 +1,45 @@
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <libk/printf.h>
|
||||
#include <sys/debug.h>
|
||||
#include <amd64/debug.h>
|
||||
#include <amd64/io.h>
|
||||
#include <libk/printf.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
|
||||
/* Port for printing to serial */
|
||||
/* TODO: Make this configurable */
|
||||
#define PORT_COM1 0x03F8
|
||||
/* debugprintf buffer size */
|
||||
#define BUFFER_SIZE 1024
|
||||
/*
|
||||
* Lock, which ensures that prints to the serial port are atomic (ie. one debugprintf is atomic in
|
||||
* itself).
|
||||
*/
|
||||
static spin_lock_t serial_lock = SPIN_LOCK_INIT;
|
||||
|
||||
static bool debug_init = false;
|
||||
|
||||
/* Block until TX buffer is empty */
|
||||
static bool amd64_debug_serial_tx_empty (void) {
|
||||
return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20);
|
||||
}
|
||||
|
||||
/* Write a single character to serial */
|
||||
static void amd64_debug_serial_write (char x) {
|
||||
while (!amd64_debug_serial_tx_empty());
|
||||
while (!amd64_debug_serial_tx_empty ())
|
||||
;
|
||||
amd64_io_outb (PORT_COM1, (uint8_t)x);
|
||||
}
|
||||
|
||||
/*
|
||||
* Formatted printing to serial. serial_lock ensures that all prints are atomic.
|
||||
*/
|
||||
void debugprintf (const char* fmt, ...) {
|
||||
spin_lock_ctx_t ctxdbgp;
|
||||
|
||||
if (!debug_init)
|
||||
return;
|
||||
|
||||
char buffer[BUFFER_SIZE];
|
||||
memset (buffer, 0, sizeof (buffer));
|
||||
|
||||
@@ -29,12 +51,18 @@ void debugprintf(const char *fmt, ...) {
|
||||
buffer[sizeof (buffer) - 1] = '\0';
|
||||
|
||||
const char* p = buffer;
|
||||
|
||||
spin_lock (&serial_lock, &ctxdbgp);
|
||||
|
||||
while (*p) {
|
||||
amd64_debug_serial_write (*p);
|
||||
p++;
|
||||
}
|
||||
|
||||
spin_unlock (&serial_lock, &ctxdbgp);
|
||||
}
|
||||
|
||||
/* Initialize serial */
|
||||
void amd64_debug_init (void) {
|
||||
amd64_io_outb (PORT_COM1 + 1, 0x00);
|
||||
amd64_io_outb (PORT_COM1 + 3, 0x80);
|
||||
@@ -43,4 +71,6 @@ void amd64_debug_init(void) {
|
||||
amd64_io_outb (PORT_COM1 + 3, 0x03);
|
||||
amd64_io_outb (PORT_COM1 + 2, 0xC7);
|
||||
amd64_io_outb (PORT_COM1 + 4, 0x0B);
|
||||
|
||||
debug_init = true;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
cflags += --target=x86_64-pc-none-elf \
|
||||
-mno-sse \
|
||||
-mno-avx
|
||||
-mno-sse2 \
|
||||
-mno-avx \
|
||||
-mno-mmx \
|
||||
-mno-80387 \
|
||||
-mno-red-zone \
|
||||
-fno-omit-frame-pointer
|
||||
|
||||
ldflags += --target=x86_64-pc-none-elf \
|
||||
-Wl,-zmax-page-size=0x1000
|
||||
|
||||
45
kernel/amd64/gdt.h
Normal file
45
kernel/amd64/gdt.h
Normal file
@@ -0,0 +1,45 @@
|
||||
#ifndef _KERNEL_AMD64_GDT_H
|
||||
#define _KERNEL_AMD64_GDT_H
|
||||
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/std.h>
|
||||
#include <proc/proc.h>
|
||||
|
||||
#define GDT_KCODE 0x08
|
||||
#define GDT_KDATA 0x10
|
||||
#define GDT_UDATA 0x18
|
||||
#define GDT_UCODE 0x20
|
||||
#define GDT_TSS 0x28
|
||||
|
||||
/* Size of kernel stack */
|
||||
#define KSTACK_SIZE (32 * 1024)
|
||||
|
||||
/*
|
||||
* 64-bit GDT structure. For more info see:
|
||||
* - https://wiki.osdev.org/Global_Descriptor_Table
|
||||
* - https://wiki.osdev.org/GDT_Tutorial
|
||||
*/
|
||||
|
||||
struct gdt_entry {
|
||||
uint16_t limitlow;
|
||||
uint16_t baselow;
|
||||
uint8_t basemid;
|
||||
uint8_t access;
|
||||
uint8_t gran;
|
||||
uint8_t basehigh;
|
||||
} PACKED;
|
||||
|
||||
/* Struct that gets loaded into GDTR */
|
||||
struct gdt_ptr {
|
||||
uint16_t limit;
|
||||
uint64_t base;
|
||||
} PACKED;
|
||||
|
||||
/* New, extended GDT (we need to extend Limine's GDT) */
|
||||
struct gdt_extended {
|
||||
struct gdt_entry old[5];
|
||||
struct gdt_entry tsslow;
|
||||
struct gdt_entry tsshigh;
|
||||
} PACKED;
|
||||
|
||||
#endif // _KERNEL_AMD64_GDT_H
|
||||
142
kernel/amd64/hpet.c
Normal file
142
kernel/amd64/hpet.c
Normal file
@@ -0,0 +1,142 @@
|
||||
#include <amd64/hpet.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/requests.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/spin.h>
|
||||
#include <uacpi/acpi.h>
|
||||
#include <uacpi/status.h>
|
||||
#include <uacpi/tables.h>
|
||||
#include <uacpi/uacpi.h>
|
||||
|
||||
/*
|
||||
* HPET (High Precision Event Timer) driver code. See more at https://wiki.osdev.org/HPET
|
||||
*/
|
||||
|
||||
/* HPET Main Counter Value Register */
|
||||
#define HPET_MCVR 0xF0
|
||||
/* HPET General Configuration Register */
|
||||
#define HPET_GCR 0x10
|
||||
/* HPET General Capabilities and ID Register */
|
||||
#define HPET_GCIDR 0x00
|
||||
|
||||
/* Set whether we sould use 32-bit or 64-bit reads/writes */
|
||||
static bool hpet_32bits = 1;
|
||||
/* Physical address for HPET MMIO */
|
||||
static uintptr_t hpet_paddr;
|
||||
/* HPET period in femtoseconds */
|
||||
static uint64_t hpet_period_fs;
|
||||
/* Lock, which protects concurrent access. See amd64/smp.c */
|
||||
static spin_lock_t hpet_lock = SPIN_LOCK_INIT;
|
||||
|
||||
/* Read a HPET register. Assumes caller holds hpet_lock */
|
||||
static uint64_t amd64_hpet_read64 (uint32_t reg) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
return *(volatile uint64_t*)(hpet_vaddr + reg);
|
||||
}
|
||||
|
||||
static uint32_t amd64_hpet_read32 (uint32_t reg) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
return *(volatile uint32_t*)(hpet_vaddr + reg);
|
||||
}
|
||||
|
||||
/* Write a HPET register. Assumes caller holds hpet_lock */
|
||||
static void amd64_hpet_write64 (uint32_t reg, uint64_t value) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
|
||||
}
|
||||
|
||||
static void amd64_hpet_write32 (uint32_t reg, uint32_t value) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
*(volatile uint32_t*)(hpet_vaddr + reg) = value;
|
||||
}
|
||||
|
||||
/* Read current value of HPET_MCVR register. */
|
||||
|
||||
static uint64_t amd64_hpet_read_counter (void) {
|
||||
uint64_t value;
|
||||
spin_lock_ctx_t ctxhrc;
|
||||
|
||||
spin_lock (&hpet_lock, &ctxhrc);
|
||||
|
||||
if (!hpet_32bits)
|
||||
value = amd64_hpet_read64 (HPET_MCVR);
|
||||
else {
|
||||
uint32_t hi1, lo, hi2;
|
||||
do {
|
||||
hi1 = amd64_hpet_read32 (HPET_MCVR + 4);
|
||||
lo = amd64_hpet_read32 (HPET_MCVR + 0);
|
||||
hi2 = amd64_hpet_read32 (HPET_MCVR + 4);
|
||||
} while (hi1 != hi2);
|
||||
|
||||
value = ((uint64_t)hi1 << 32) | lo;
|
||||
}
|
||||
|
||||
spin_unlock (&hpet_lock, &ctxhrc);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static void amd64_hpet_write_counter (uint64_t value) {
|
||||
spin_lock_ctx_t ctxhwc;
|
||||
|
||||
spin_lock (&hpet_lock, &ctxhwc);
|
||||
|
||||
if (!hpet_32bits)
|
||||
amd64_hpet_write64 (HPET_MCVR, value);
|
||||
else {
|
||||
amd64_hpet_write32 (HPET_MCVR, (uint32_t)value);
|
||||
amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32));
|
||||
}
|
||||
|
||||
spin_unlock (&hpet_lock, &ctxhwc);
|
||||
}
|
||||
|
||||
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being
|
||||
* held. */
|
||||
void amd64_hpet_sleep_micro (uint64_t us) {
|
||||
if (hpet_period_fs == 0)
|
||||
return;
|
||||
|
||||
uint64_t ticks_to_wait = (us * 1000ULL) / (hpet_period_fs / 1000000ULL);
|
||||
uint64_t start = amd64_hpet_read_counter ();
|
||||
|
||||
for (;;) {
|
||||
uint64_t now = amd64_hpet_read_counter ();
|
||||
|
||||
if ((now - start) >= ticks_to_wait)
|
||||
break;
|
||||
|
||||
__asm__ volatile ("pause" ::: "memory");
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize HPET */
|
||||
void amd64_hpet_init (void) {
|
||||
struct uacpi_table hpet_table;
|
||||
uacpi_status status = uacpi_table_find_by_signature (ACPI_HPET_SIGNATURE, &hpet_table);
|
||||
if (status != UACPI_STATUS_OK) {
|
||||
DEBUG ("Could not find HPET table!\n");
|
||||
spin ();
|
||||
}
|
||||
|
||||
struct acpi_hpet* hpet = (struct acpi_hpet*)hpet_table.virt_addr;
|
||||
hpet_paddr = (uintptr_t)hpet->address.address;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
|
||||
|
||||
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
|
||||
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
|
||||
|
||||
hpet_period_fs = (uint32_t)(caps >> 32);
|
||||
|
||||
amd64_hpet_write64 (HPET_GCR, 0);
|
||||
amd64_hpet_write_counter (0);
|
||||
amd64_hpet_write64 (HPET_GCR, 1);
|
||||
}
|
||||
9
kernel/amd64/hpet.h
Normal file
9
kernel/amd64/hpet.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef _KERNEL_AMD64_HPET_H
|
||||
#define _KERNEL_AMD64_HPET_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
void amd64_hpet_sleep_micro (uint64_t us);
|
||||
void amd64_hpet_init (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_HPET_H
|
||||
@@ -1,46 +1,17 @@
|
||||
#include <amd64/gdt.h>
|
||||
#include <amd64/init.h>
|
||||
#include <amd64/intr.h>
|
||||
#include <amd64/smp.h>
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <amd64/init.h>
|
||||
#include <amd64/tss.h>
|
||||
#include <amd64/debug.h>
|
||||
#include <amd64/intr.h>
|
||||
|
||||
#define GDT_KCODE 0x08
|
||||
#define GDT_KDATA 0x10
|
||||
#define GDT_UCODE 0x18
|
||||
#define GDT_UDATA 0x20
|
||||
#define GDT_TSS 0x28
|
||||
|
||||
#define TSS 0x80
|
||||
#define TSS_PRESENT 0x89
|
||||
|
||||
#define KSTACK_SIZE (8*1024)
|
||||
|
||||
struct gdt_entry {
|
||||
uint16_t limitlow;
|
||||
uint16_t baselow;
|
||||
uint8_t basemid;
|
||||
uint8_t access;
|
||||
uint8_t gran;
|
||||
uint8_t basehigh;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct gdt_ptr {
|
||||
uint16_t limit;
|
||||
uint64_t base;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct gdt_extended {
|
||||
struct gdt_entry old[5];
|
||||
struct gdt_entry tsslow;
|
||||
struct gdt_entry tsshigh;
|
||||
} __attribute__((packed));
|
||||
|
||||
__attribute__((aligned(16))) static volatile uint8_t kernel_stack[KSTACK_SIZE];
|
||||
__attribute__((aligned(16))) static volatile struct gdt_extended gdt;
|
||||
|
||||
static void amd64_gdt_set(volatile struct gdt_entry *ent, uint32_t base,
|
||||
uint32_t limit, uint8_t acc, uint8_t gran) {
|
||||
/* Set a GDT entry */
|
||||
static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit,
|
||||
uint8_t acc, uint8_t gran) {
|
||||
ent->baselow = (base & 0xFFFF);
|
||||
ent->basemid = (base >> 16) & 0xFF;
|
||||
ent->basehigh = (base >> 24) & 0xFF;
|
||||
@@ -49,41 +20,45 @@ static void amd64_gdt_set(volatile struct gdt_entry *ent, uint32_t base,
|
||||
ent->access = acc;
|
||||
}
|
||||
|
||||
static void amd64_gdt_init(void) {
|
||||
volatile struct tss *tss = amd64_get_tss();
|
||||
/* Initialize GDT and TSS structures for a given CPU */
|
||||
static void amd64_gdt_init (struct cpu* cpu) {
|
||||
volatile struct tss* tss = &cpu->tss;
|
||||
volatile struct gdt_extended* gdt = &cpu->gdt;
|
||||
|
||||
memset((void *)&gdt, 0, sizeof(gdt));
|
||||
memset((void *)kernel_stack, 0, sizeof(kernel_stack));
|
||||
memset ((void*)gdt, 0, sizeof (*gdt));
|
||||
memset ((void*)tss, 0, sizeof (*tss));
|
||||
|
||||
tss->iopb_off = sizeof (*tss);
|
||||
tss->rsp0 = (uint64_t)((uintptr_t)kernel_stack + sizeof(kernel_stack));
|
||||
tss->rsp0 = (uint64_t)((uintptr_t)cpu->kernel_stack + sizeof (cpu->kernel_stack));
|
||||
tss->ist[0] = (uint64_t)((uintptr_t)cpu->except_stack + sizeof (cpu->except_stack));
|
||||
tss->ist[1] = (uint64_t)((uintptr_t)cpu->irq_stack + sizeof (cpu->irq_stack));
|
||||
|
||||
uint64_t tssbase = (uint64_t)&tss;
|
||||
uint64_t tssbase = (uint64_t)tss;
|
||||
uint64_t tsslimit = sizeof (*tss) - 1;
|
||||
|
||||
amd64_gdt_set(&gdt.old[0], 0, 0, 0, 0);
|
||||
amd64_gdt_set(&gdt.old[1], 0, 0xFFFFF, 0x9A, 0xA0);
|
||||
amd64_gdt_set(&gdt.old[2], 0, 0xFFFFF, 0x92, 0xC0);
|
||||
amd64_gdt_set(&gdt.old[3], 0, 0xFFFFF, 0xFA, 0xA0);
|
||||
amd64_gdt_set(&gdt.old[4], 0, 0xFFFFF, 0xF2, 0xC0);
|
||||
amd64_gdt_set(&gdt.tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
|
||||
amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0);
|
||||
amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0);
|
||||
amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0);
|
||||
amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xF2, 0xC0);
|
||||
amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xFA, 0xA0);
|
||||
amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
|
||||
|
||||
uint32_t tssbasehigh = (tssbase >> 32);
|
||||
gdt.tsshigh.limitlow = (tssbasehigh & 0xFFFF);
|
||||
gdt.tsshigh.baselow = (tssbasehigh >> 16) & 0xFFFF;
|
||||
gdt.tsshigh.basemid = 0;
|
||||
gdt.tsshigh.basehigh = 0;
|
||||
gdt.tsshigh.access = 0;
|
||||
gdt.tsshigh.gran = 0;
|
||||
gdt->tsshigh.limitlow = (tssbasehigh & 0xFFFF);
|
||||
gdt->tsshigh.baselow = (tssbasehigh >> 16) & 0xFFFF;
|
||||
gdt->tsshigh.basemid = 0;
|
||||
gdt->tsshigh.basehigh = 0;
|
||||
gdt->tsshigh.access = 0;
|
||||
gdt->tsshigh.gran = 0;
|
||||
|
||||
/* Load GDTR */
|
||||
struct gdt_ptr gdtr;
|
||||
gdtr.limit = sizeof(gdt) - 1;
|
||||
gdtr.base = (uint64_t)&gdt;
|
||||
gdtr.limit = sizeof (*gdt) - 1;
|
||||
gdtr.base = (uint64_t)gdt;
|
||||
__asm__ volatile ("lgdt %0" ::"m"(gdtr) : "memory");
|
||||
|
||||
__asm__ volatile(
|
||||
"pushq %[kcode]\n"
|
||||
/* Reload CS */
|
||||
__asm__ volatile ("pushq %[kcode]\n"
|
||||
"lea 1f(%%rip), %%rax\n"
|
||||
"pushq %%rax\n"
|
||||
"lretq\n"
|
||||
@@ -94,14 +69,21 @@ static void amd64_gdt_init(void) {
|
||||
"movw %%ax, %%ss\n"
|
||||
:
|
||||
: [kcode] "i"(GDT_KCODE), [kdata] "i"(GDT_KDATA)
|
||||
: "rax", "memory"
|
||||
);
|
||||
: "rax", "memory");
|
||||
|
||||
__asm__ volatile ("ltr %0" ::"r"((uint16_t)GDT_TSS));
|
||||
}
|
||||
|
||||
void amd64_init(void) {
|
||||
amd64_gdt_init();
|
||||
amd64_debug_init();
|
||||
/*
|
||||
* Initialize essentials (GDT, TSS, IDT) for a given CPU
|
||||
*
|
||||
* load_idt - Tell whether the IDT needs to be loaded. It only has to be loaded once on
|
||||
* the BSP
|
||||
*/
|
||||
void amd64_init (struct cpu* cpu, bool load_idt) {
|
||||
amd64_gdt_init (cpu);
|
||||
if (load_idt)
|
||||
amd64_load_idt ();
|
||||
else
|
||||
amd64_intr_init ();
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
#ifndef _KERNEL_AMD64_INIT_H
|
||||
#define _KERNEL_AMD64_INIT_H
|
||||
|
||||
void amd64_init(void);
|
||||
#include <amd64/smp.h>
|
||||
|
||||
void amd64_init (struct cpu* cpu, bool load_idt);
|
||||
|
||||
#endif // _KERNEL_AMD64_INIT_H
|
||||
|
||||
@@ -1,8 +1,18 @@
|
||||
#include <amd64/apic.h>
|
||||
#include <amd64/gdt.h>
|
||||
#include <amd64/intr.h>
|
||||
#include <amd64/intr_defs.h>
|
||||
#include <amd64/io.h>
|
||||
#include <aux/compiler.h>
|
||||
#include <irq/irq.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <m/syscall_defs.h>
|
||||
#include <sys/debug.h>
|
||||
#include <amd64/intr.h>
|
||||
#include <amd64/io.h>
|
||||
#include <sys/irq.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/spin.h>
|
||||
#include <syscall/syscall.h>
|
||||
|
||||
/* 8259 PIC defs. */
|
||||
#define PIC1 0x20
|
||||
@@ -31,6 +41,7 @@
|
||||
|
||||
#define IDT_ENTRIES_MAX 256
|
||||
|
||||
/* 64-bit <IDT entry structure: https://wiki.osdev.org/Interrupt_Descriptor_Table */
|
||||
struct idt_entry {
|
||||
uint16_t intrlow;
|
||||
uint16_t kernel_cs;
|
||||
@@ -39,21 +50,21 @@ struct idt_entry {
|
||||
uint16_t intrmid;
|
||||
uint32_t intrhigh;
|
||||
uint32_t resv;
|
||||
} __attribute__((packed));
|
||||
} PACKED;
|
||||
|
||||
struct idt {
|
||||
uint16_t limit;
|
||||
uint64_t base;
|
||||
} __attribute__((packed));
|
||||
} PACKED;
|
||||
|
||||
__attribute__((aligned(16))) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX];
|
||||
ALIGNED (16) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX];
|
||||
static volatile struct idt idt;
|
||||
|
||||
extern void amd64_spin(void);
|
||||
|
||||
/* Remaps and disables old 8259 PIC, since we'll be using APIC. */
|
||||
static void amd64_init_pic (void) {
|
||||
#define IO_OP(fn, ...) fn(__VA_ARGS__); amd64_io_wait()
|
||||
#define IO_OP(fn, ...) \
|
||||
fn (__VA_ARGS__); \
|
||||
amd64_io_wait ()
|
||||
|
||||
IO_OP (amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4));
|
||||
IO_OP (amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4));
|
||||
@@ -74,43 +85,56 @@ static void amd64_init_pic(void) {
|
||||
#undef IO_OP
|
||||
}
|
||||
|
||||
static void amd64_idt_set(volatile struct idt_entry *ent, uint64_t handler, uint8_t flags) {
|
||||
/* Set IDT entry */
|
||||
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags,
|
||||
uint8_t ist) {
|
||||
ent->intrlow = (handler & 0xFFFF);
|
||||
ent->kernel_cs = 0x08; // GDT_KCODE (init.c)
|
||||
ent->ist = 0;
|
||||
ent->kernel_cs = GDT_KCODE;
|
||||
ent->ist = ist;
|
||||
ent->attrs = flags;
|
||||
ent->intrmid = ((handler >> 16) & 0xFFFF);
|
||||
ent->intrhigh = ((handler >> 32) & 0xFFFFFFFF);
|
||||
ent->resv = 0;
|
||||
}
|
||||
|
||||
/* Load the IDT */
|
||||
void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); }
|
||||
|
||||
/* Initialize IDT entries */
|
||||
static void amd64_idt_init (void) {
|
||||
memset ((void*)idt_entries, 0, sizeof (idt_entries));
|
||||
|
||||
#define IDT_ENTRY(n) \
|
||||
#define IDT_ENTRY(n, ist) \
|
||||
extern void amd64_intr##n (void); \
|
||||
amd64_idt_set(&idt_entries[(n)], (uint64_t)&amd64_intr ## n, 0x8E)
|
||||
IDT_ENTRY(0); IDT_ENTRY(1); IDT_ENTRY(2); IDT_ENTRY(3);
|
||||
IDT_ENTRY(4); IDT_ENTRY(5); IDT_ENTRY(6); IDT_ENTRY(7);
|
||||
IDT_ENTRY(8); IDT_ENTRY(9); IDT_ENTRY(10); IDT_ENTRY(11);
|
||||
IDT_ENTRY(12); IDT_ENTRY(13); IDT_ENTRY(14); IDT_ENTRY(15);
|
||||
IDT_ENTRY(16); IDT_ENTRY(17); IDT_ENTRY(18); IDT_ENTRY(19);
|
||||
IDT_ENTRY(20); IDT_ENTRY(21); IDT_ENTRY(22); IDT_ENTRY(23);
|
||||
IDT_ENTRY(24); IDT_ENTRY(25); IDT_ENTRY(26); IDT_ENTRY(27);
|
||||
IDT_ENTRY(28); IDT_ENTRY(29); IDT_ENTRY(30); IDT_ENTRY(31);
|
||||
IDT_ENTRY(32); IDT_ENTRY(33); IDT_ENTRY(34); IDT_ENTRY(35);
|
||||
IDT_ENTRY(36); IDT_ENTRY(37); IDT_ENTRY(38); IDT_ENTRY(39);
|
||||
IDT_ENTRY(40); IDT_ENTRY(41); IDT_ENTRY(42); IDT_ENTRY(43);
|
||||
IDT_ENTRY(44); IDT_ENTRY(45); IDT_ENTRY(46); IDT_ENTRY(47);
|
||||
amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist))
|
||||
/* clang-format off */
|
||||
IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0);
|
||||
IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0);
|
||||
IDT_ENTRY (8, 0); IDT_ENTRY (9, 0); IDT_ENTRY (10, 0); IDT_ENTRY (11, 0);
|
||||
IDT_ENTRY (12, 0); IDT_ENTRY (13, 0); IDT_ENTRY (14, 0); IDT_ENTRY (15, 0);
|
||||
IDT_ENTRY (16, 0); IDT_ENTRY (17, 0); IDT_ENTRY (18, 0); IDT_ENTRY (19, 0);
|
||||
IDT_ENTRY (20, 0); IDT_ENTRY (21, 0); IDT_ENTRY (22, 0); IDT_ENTRY (23, 0);
|
||||
IDT_ENTRY (24, 0); IDT_ENTRY (25, 0); IDT_ENTRY (26, 0); IDT_ENTRY (27, 0);
|
||||
IDT_ENTRY (28, 0); IDT_ENTRY (29, 0); IDT_ENTRY (30, 0); IDT_ENTRY (31, 0);
|
||||
IDT_ENTRY (32, 1); IDT_ENTRY (33, 1); IDT_ENTRY (34, 1); IDT_ENTRY (35, 1);
|
||||
IDT_ENTRY (36, 1); IDT_ENTRY (37, 1); IDT_ENTRY (38, 1); IDT_ENTRY (39, 1);
|
||||
IDT_ENTRY (40, 1); IDT_ENTRY (41, 1); IDT_ENTRY (42, 1); IDT_ENTRY (43, 1);
|
||||
IDT_ENTRY (44, 1); IDT_ENTRY (45, 1); IDT_ENTRY (46, 1); IDT_ENTRY (47, 1);
|
||||
|
||||
IDT_ENTRY (SCHED_PREEMPT_TIMER, 1);
|
||||
IDT_ENTRY (TLB_SHOOTDOWN, 1);
|
||||
IDT_ENTRY (CPU_REQUEST_SCHED, 1);
|
||||
IDT_ENTRY (CPU_SPURIOUS, 1);
|
||||
/* clang-format on */
|
||||
#undef IDT_ENTRY
|
||||
|
||||
idt.limit = sizeof (idt_entries) - 1;
|
||||
idt.base = (uint64_t)idt_entries;
|
||||
|
||||
__asm__ volatile("lidt %0" :: "m"(idt));
|
||||
__asm__ volatile("sti");
|
||||
amd64_load_idt ();
|
||||
}
|
||||
|
||||
/* Handle CPU exception and dump registers. If incoming CS has CPL3, kill the process. */
|
||||
static void amd64_intr_exception (struct saved_regs* regs) {
|
||||
DEBUG ("cpu exception %lu (%lu)\n", regs->trap, regs->error);
|
||||
|
||||
@@ -119,8 +143,7 @@ static void amd64_intr_exception(struct saved_regs *regs) {
|
||||
uint64_t cr3;
|
||||
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3));
|
||||
|
||||
debugprintf(
|
||||
"r15=%016lx r14=%016lx r13=%016lx\n"
|
||||
debugprintf ("r15=%016lx r14=%016lx r13=%016lx\n"
|
||||
"r12=%016lx r11=%016lx r10=%016lx\n"
|
||||
"r9 =%016lx r8 =%016lx rbp=%016lx\n"
|
||||
"rdi=%016lx rsi=%016lx rdx=%016lx\n"
|
||||
@@ -128,30 +151,71 @@ static void amd64_intr_exception(struct saved_regs *regs) {
|
||||
"err=%016lx rip=%016lx cs =%016lx\n"
|
||||
"rfl=%016lx rsp=%016lx ss =%016lx\n"
|
||||
"cr2=%016lx cr3=%016lx rbx=%016lx\n",
|
||||
regs->r15, regs->r14, regs->r13,
|
||||
regs->r12, regs->r11, regs->r10,
|
||||
regs->r9, regs->r8, regs->rbp,
|
||||
regs->rdi, regs->rsi, regs->rdx,
|
||||
regs->rcx, regs->rax, regs->trap,
|
||||
regs->error, regs->rip, regs->cs,
|
||||
regs->rflags, regs->rsp, regs->ss,
|
||||
cr2, cr3, regs->rbx
|
||||
);
|
||||
regs->r15, regs->r14, regs->r13, regs->r12, regs->r11, regs->r10, regs->r9, regs->r8,
|
||||
regs->rbp, regs->rdi, regs->rsi, regs->rdx, regs->rcx, regs->rax, regs->trap,
|
||||
regs->error, regs->rip, regs->cs, regs->rflags, regs->rsp, regs->ss, cr2, cr3,
|
||||
regs->rbx);
|
||||
|
||||
amd64_spin();
|
||||
if (regs->cs == (GDT_UCODE | 0x03)) {
|
||||
proc_kill (thiscpu->proc_current);
|
||||
} else {
|
||||
spin ();
|
||||
}
|
||||
}
|
||||
|
||||
/* Handle incoming interrupt, dispatch IRQ handlers. */
|
||||
void amd64_intr_handler (void* stack_ptr) {
|
||||
spin_lock_ctx_t ctxcpu, ctxpr;
|
||||
|
||||
amd64_load_kernel_cr3 ();
|
||||
|
||||
struct saved_regs* regs = stack_ptr;
|
||||
|
||||
spin_lock (&thiscpu->lock, &ctxcpu);
|
||||
struct proc* proc_current = thiscpu->proc_current;
|
||||
spin_lock (&proc_current->lock, &ctxpr);
|
||||
|
||||
memcpy (&proc_current->pdata.regs, regs, sizeof (struct saved_regs));
|
||||
|
||||
spin_unlock (&proc_current->lock, &ctxpr);
|
||||
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||
|
||||
if (regs->trap <= 31) {
|
||||
amd64_intr_exception (regs);
|
||||
} else {
|
||||
DEBUG("unknown trap %lu\n", regs->trap);
|
||||
amd64_lapic_eoi ();
|
||||
|
||||
struct irq* irq = irq_find (regs->trap);
|
||||
|
||||
if (irq != NULL) {
|
||||
irq->func (irq->arg, stack_ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize interrupts */
|
||||
void amd64_intr_init (void) {
|
||||
amd64_init_pic ();
|
||||
amd64_idt_init ();
|
||||
}
|
||||
|
||||
/* Aux. */
|
||||
|
||||
/* Save RFLAGS of the current CPU */
|
||||
static uint64_t amd64_irq_save_flags (void) {
|
||||
uint64_t rflags;
|
||||
__asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc");
|
||||
return rflags;
|
||||
}
|
||||
|
||||
/* Restore interrupts (IF bit) from RFLAGS */
|
||||
static void amd64_irq_restore_flags (uint64_t rflags) {
|
||||
if (rflags & (1ULL << 9))
|
||||
__asm__ volatile ("sti");
|
||||
}
|
||||
|
||||
/* Save current interrupt state */
|
||||
void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
|
||||
|
||||
/* Restore interrupt state */
|
||||
void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#ifndef _KERNEL_AMD64_INTR_H
|
||||
#define _KERNEL_AMD64_INTR_H
|
||||
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
struct saved_regs {
|
||||
@@ -28,8 +29,9 @@ struct saved_regs {
|
||||
uint64_t rflags;
|
||||
uint64_t rsp;
|
||||
uint64_t ss;
|
||||
} __attribute__((packed));
|
||||
} PACKED;
|
||||
|
||||
void amd64_load_idt (void);
|
||||
void amd64_intr_init (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_INTR_H
|
||||
|
||||
12
kernel/amd64/intr_defs.h
Normal file
12
kernel/amd64/intr_defs.h
Normal file
@@ -0,0 +1,12 @@
|
||||
#ifndef _KERNEL_AMD64_INTR_DEFS_H
|
||||
#define _KERNEL_AMD64_INTR_DEFS_H
|
||||
|
||||
/* Definitions for custom, nonstandard IDT entries. They have to be remapped by amd64_resolve_irq
|
||||
* into legacy IRQs. */
|
||||
|
||||
#define SCHED_PREEMPT_TIMER 80
|
||||
#define TLB_SHOOTDOWN 81
|
||||
#define CPU_REQUEST_SCHED 82
|
||||
#define CPU_SPURIOUS 255
|
||||
|
||||
#endif // _KERNEL_AMD64_INTR_DEFS_H
|
||||
@@ -1,7 +1,7 @@
|
||||
.extern amd64_intr_handler
|
||||
#include <amd64/intr_defs.h>
|
||||
#include <amd64/regsasm.h>
|
||||
|
||||
dupa:
|
||||
jmp dupa
|
||||
.extern amd64_intr_handler
|
||||
|
||||
#define err(z) \
|
||||
pushq $z;
|
||||
@@ -10,53 +10,38 @@ dupa:
|
||||
pushq $0; \
|
||||
pushq $z;
|
||||
|
||||
#define push_regs \
|
||||
pushq %rax; \
|
||||
pushq %rcx; \
|
||||
pushq %rdx; \
|
||||
pushq %rsi; \
|
||||
pushq %rdi; \
|
||||
pushq %rbp; \
|
||||
pushq %rbx; \
|
||||
pushq %r8; \
|
||||
pushq %r9; \
|
||||
pushq %r10; \
|
||||
pushq %r11; \
|
||||
pushq %r12; \
|
||||
pushq %r13; \
|
||||
pushq %r14; \
|
||||
pushq %r15;
|
||||
|
||||
#define pop_regs \
|
||||
popq %r15; \
|
||||
popq %r14; \
|
||||
popq %r13; \
|
||||
popq %r12; \
|
||||
popq %r11; \
|
||||
popq %r10; \
|
||||
popq %r9; \
|
||||
popq %r8; \
|
||||
pushq %rbx; \
|
||||
popq %rbp; \
|
||||
popq %rdi; \
|
||||
popq %rsi; \
|
||||
popq %rdx; \
|
||||
popq %rcx; \
|
||||
popq %rax;
|
||||
|
||||
#define make_intr_stub(x, n) \
|
||||
.global amd64_intr ## n; \
|
||||
amd64_intr ## n:; \
|
||||
x(n); \
|
||||
cli; \
|
||||
; \
|
||||
push_regs; \
|
||||
; \
|
||||
movw $0x10, %ax; \
|
||||
movw %ax, %ds; \
|
||||
movw %ax, %es; \
|
||||
; \
|
||||
cld; \
|
||||
; \
|
||||
movq %rsp, %rdi; \
|
||||
andq $~0xF, %rsp; \
|
||||
; \
|
||||
movq %cr3, %rax; pushq %rax; \
|
||||
; \
|
||||
movq %rsp, %rbp; \
|
||||
; \
|
||||
subq $8, %rsp; \
|
||||
andq $-16, %rsp; \
|
||||
; \
|
||||
callq amd64_intr_handler; \
|
||||
movq %rdi, %rsp; \
|
||||
; \
|
||||
movq %rbp, %rsp; \
|
||||
; \
|
||||
popq %rax; movq %rax, %cr3; \
|
||||
; \
|
||||
pop_regs; \
|
||||
addq $16, %rsp; \
|
||||
; \
|
||||
iretq;
|
||||
|
||||
|
||||
@@ -108,3 +93,8 @@ make_intr_stub(no_err, 44)
|
||||
make_intr_stub(no_err, 45)
|
||||
make_intr_stub(no_err, 46)
|
||||
make_intr_stub(no_err, 47)
|
||||
|
||||
make_intr_stub(no_err, SCHED_PREEMPT_TIMER)
|
||||
make_intr_stub(no_err, TLB_SHOOTDOWN)
|
||||
make_intr_stub(no_err, CPU_REQUEST_SCHED)
|
||||
make_intr_stub(no_err, CPU_SPURIOUS)
|
||||
|
||||
@@ -1,54 +1,51 @@
|
||||
#include <libk/std.h>
|
||||
#include <amd64/io.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
/// Perform outb instruction (send 8-bit int)
|
||||
void amd64_io_outb (uint16_t port, uint8_t v) {
|
||||
__asm__ volatile ("outb %1, %0" ::"dN"(port), "a"(v));
|
||||
}
|
||||
|
||||
/// Perform outw instruction (send 16-bit int)
|
||||
void amd64_io_outw (uint16_t port, uint16_t v) {
|
||||
__asm__ volatile ("outw %%ax, %%dx" ::"a"(v), "d"(port));
|
||||
}
|
||||
|
||||
/// Perform outl instruction (send 32-bit int)
|
||||
void amd64_io_outl (uint16_t port, uint32_t v) {
|
||||
__asm__ volatile ("outl %%eax, %%dx" ::"d"(port), "a"(v));
|
||||
}
|
||||
|
||||
/// Perform outsw instruction (send a string)
|
||||
void amd64_io_outsw (uint16_t port, const void* addr, int cnt) {
|
||||
__asm__ volatile(
|
||||
"cld; rep outsw"
|
||||
: "+S"(addr), "+c"(cnt)
|
||||
: "d"(port)
|
||||
: "memory", "cc"
|
||||
);
|
||||
__asm__ volatile ("cld; rep outsw" : "+S"(addr), "+c"(cnt) : "d"(port) : "memory", "cc");
|
||||
}
|
||||
|
||||
/// Perform inb instruction (receive 8-bit int)
|
||||
uint8_t amd64_io_inb (uint16_t port) {
|
||||
uint8_t r;
|
||||
__asm__ volatile ("inb %1, %0" : "=a"(r) : "dN"(port));
|
||||
return r;
|
||||
}
|
||||
|
||||
/// Perform inw instruction (receive 16-bit int)
|
||||
uint16_t amd64_io_inw (uint16_t port) {
|
||||
uint16_t r;
|
||||
__asm__ volatile ("inw %%dx, %%ax" : "=a"(r) : "d"(port));
|
||||
return r;
|
||||
}
|
||||
|
||||
/// Perform inl instruction (receive 32-bit int)
|
||||
uint32_t amd64_io_inl (uint16_t port) {
|
||||
uint32_t r;
|
||||
__asm__ volatile ("inl %%dx, %%eax" : "=a"(r) : "d"(port));
|
||||
return r;
|
||||
}
|
||||
|
||||
/// Perform insw instruction (receive a string)
|
||||
void amd64_io_insw (uint16_t port, void* addr, int cnt) {
|
||||
__asm__ volatile(
|
||||
"cld; rep insw"
|
||||
: "+D"(addr), "+c"(cnt)
|
||||
: "d"(port)
|
||||
: "memory", "cc"
|
||||
);
|
||||
__asm__ volatile ("cld; rep insw" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory", "cc");
|
||||
}
|
||||
|
||||
void amd64_io_wait(void) {
|
||||
amd64_io_outb(0x80, 0);
|
||||
}
|
||||
/// output a byte on port 0x80, which does a small IO delay
|
||||
void amd64_io_wait (void) { amd64_io_outb (0x80, 0); }
|
||||
|
||||
321
kernel/amd64/mm.c
Normal file
321
kernel/amd64/mm.c
Normal file
@@ -0,0 +1,321 @@
|
||||
#include <amd64/apic.h>
|
||||
#include <amd64/intr_defs.h>
|
||||
#include <aux/compiler.h>
|
||||
#include <irq/irq.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/smp.h>
|
||||
|
||||
#define AMD64_PG_PRESENT (1 << 0)
|
||||
#define AMD64_PG_RW (1 << 1)
|
||||
#define AMD64_PG_USER (1 << 2)
|
||||
#define AMD64_PG_HUGE (1 << 7)
|
||||
|
||||
/* Auxilary struct for page directory walking */
|
||||
struct pg_index {
|
||||
uint16_t pml4, pml3, pml2, pml1;
|
||||
} PACKED;
|
||||
|
||||
/* Kernel page directory */
|
||||
static struct pd kernel_pd;
|
||||
static spin_lock_t kernel_pd_lock;
|
||||
|
||||
void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||
|
||||
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||
|
||||
/* Get current value of CR3 register */
|
||||
static uintptr_t amd64_current_cr3 (void) {
|
||||
uintptr_t cr3;
|
||||
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory");
|
||||
return cr3;
|
||||
}
|
||||
|
||||
/* Load kernel CR3 as current CR3 */
|
||||
void amd64_load_kernel_cr3 (void) {
|
||||
uintptr_t cr3 = amd64_current_cr3 ();
|
||||
|
||||
if (cr3 != kernel_pd.cr3_paddr) {
|
||||
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
|
||||
}
|
||||
}
|
||||
|
||||
struct pd* mm_get_kernel_pd (void) { return &kernel_pd; }
|
||||
|
||||
/* Extract PML info from virtual address */
|
||||
static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
|
||||
struct pg_index ret;
|
||||
|
||||
ret.pml4 = ((vaddr >> 39) & 0x1FF);
|
||||
ret.pml3 = ((vaddr >> 30) & 0x1FF);
|
||||
ret.pml2 = ((vaddr >> 21) & 0x1FF);
|
||||
ret.pml1 = ((vaddr >> 12) & 0x1FF);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Walk paging tables and allocate necessary structures along the way */
|
||||
static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) {
|
||||
uint64_t entry = table[entry_idx];
|
||||
physaddr_t paddr;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
if (entry & AMD64_PG_PRESENT) {
|
||||
if (entry & AMD64_PG_HUGE)
|
||||
return NULL;
|
||||
|
||||
paddr = entry & ~0xFFFULL;
|
||||
} else {
|
||||
if (!alloc)
|
||||
return NULL;
|
||||
|
||||
paddr = pmm_alloc (1);
|
||||
|
||||
if (paddr == PMM_ALLOC_ERR)
|
||||
return NULL;
|
||||
|
||||
memset ((void*)((uintptr_t)hhdm->offset + (uintptr_t)paddr), 0, PAGE_SIZE);
|
||||
table[entry_idx] = paddr | AMD64_PG_PRESENT | AMD64_PG_RW | AMD64_PG_USER;
|
||||
}
|
||||
|
||||
return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr);
|
||||
}
|
||||
|
||||
static bool amd64_mm_is_table_empty (uint64_t* table) {
|
||||
for (size_t i = 0; i < 512; i++) {
|
||||
if (table[i] & AMD64_PG_PRESENT)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Convert generic memory management subsystem flags into AMD64-specific flags */
|
||||
static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
|
||||
uint64_t flags = 0;
|
||||
|
||||
flags |= ((generic & MM_PG_PRESENT) ? AMD64_PG_PRESENT : 0);
|
||||
flags |= ((generic & MM_PG_RW) ? AMD64_PG_RW : 0);
|
||||
flags |= ((generic & MM_PG_USER) ? AMD64_PG_USER : 0);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
/* Reload the current CR3 value ON A LOCAL CPU */
|
||||
static void amd64_reload_cr3 (void) {
|
||||
uint64_t cr3;
|
||||
__asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory");
|
||||
}
|
||||
|
||||
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
|
||||
if (pml3 == NULL)
|
||||
return;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
|
||||
if (pml2 == NULL)
|
||||
return;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
|
||||
if (pml1 == NULL)
|
||||
return;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
|
||||
}
|
||||
|
||||
/* Map a page into kernel page directory */
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
mm_map_page (&kernel_pd, paddr, vaddr, flags);
|
||||
amd64_reload_cr3 ();
|
||||
}
|
||||
|
||||
/* Unmap a virtual address. TLB needs to be flushed afterwards */
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
return;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
return;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
return;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
if ((*pte) & AMD64_PG_PRESENT)
|
||||
*pte = 0;
|
||||
|
||||
if (amd64_mm_is_table_empty (pml1)) {
|
||||
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
|
||||
pmm_free (pml1_phys, 1);
|
||||
pml2[pg_index.pml2] = 0;
|
||||
|
||||
if (amd64_mm_is_table_empty (pml2)) {
|
||||
uintptr_t pml2_phys = pml3[pg_index.pml3] & ~0xFFFULL;
|
||||
pmm_free (pml2_phys, 1);
|
||||
pml3[pg_index.pml3] = 0;
|
||||
|
||||
if (amd64_mm_is_table_empty (pml3)) {
|
||||
uintptr_t pml3_phys = pml4[pg_index.pml4] & ~0xFFFULL;
|
||||
pmm_free (pml3_phys, 1);
|
||||
pml4[pg_index.pml4] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Unmap a page from kernel page directory */
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr) {
|
||||
mm_unmap_page (&kernel_pd, vaddr);
|
||||
amd64_reload_cr3 ();
|
||||
}
|
||||
|
||||
/* Allocate a userspace-ready page directory */
|
||||
uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
physaddr_t cr3 = pmm_alloc (1);
|
||||
if (cr3 == PMM_ALLOC_ERR)
|
||||
return 0;
|
||||
|
||||
uint8_t* vu_cr3 = (uint8_t*)((uintptr_t)hhdm->offset + cr3);
|
||||
memset ((void*)vu_cr3, 0, PAGE_SIZE / 2);
|
||||
|
||||
uint8_t* vk_cr3 = (uint8_t*)((uintptr_t)hhdm->offset + (uintptr_t)kernel_pd.cr3_paddr);
|
||||
|
||||
memcpy (&vu_cr3[PAGE_SIZE / 2], &vk_cr3[PAGE_SIZE / 2], PAGE_SIZE / 2);
|
||||
|
||||
return cr3;
|
||||
}
|
||||
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool ret = false;
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t pte = pml1[pg_index.pml1];
|
||||
ret = (pte & AMD64_PG_PRESENT) != 0;
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
|
||||
bool ok = true;
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
ok = mm_validate (pd, vaddr + i);
|
||||
if (!ok)
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
return ok;
|
||||
}
|
||||
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
|
||||
for (size_t i4 = 0; i4 < 512; i4++) {
|
||||
if (!(pml4[i4] & AMD64_PG_PRESENT))
|
||||
continue;
|
||||
|
||||
uint64_t* pml3 = (uint64_t*)((uintptr_t)hhdm->offset + (pml4[i4] & ~0xFFFULL));
|
||||
for (size_t i3 = 0; i3 < 512; i3++) {
|
||||
if (!(pml3[i3] & AMD64_PG_PRESENT))
|
||||
continue;
|
||||
|
||||
uint64_t* pml2 = (uint64_t*)((uintptr_t)hhdm->offset + (pml3[i3] & ~0xFFFULL));
|
||||
for (size_t i2 = 0; i2 < 512; i2++) {
|
||||
if (!(pml2[i2] & AMD64_PG_PRESENT))
|
||||
continue;
|
||||
|
||||
uint64_t* pml1 = (uint64_t*)((uintptr_t)hhdm->offset + (pml2[i2] & ~0xFFFULL));
|
||||
for (size_t i1 = 0; i1 < 512; i1++) {
|
||||
if ((pml1[i1] & AMD64_PG_PRESENT) && ((pml1[i1] & ~0xFFFULL) == (paddr & ~0xFFFULL))) {
|
||||
struct pg_index idx = {i4, i3, i2, i1};
|
||||
ret = (((uint64_t)idx.pml4 << 39) | ((uint64_t)idx.pml3 << 30) |
|
||||
((uint64_t)idx.pml2 << 21) | ((uint64_t)idx.pml1 << 12) | (paddr & 0xFFFULL));
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t pte = pml1[pg_index.pml1];
|
||||
|
||||
if (!(pte & AMD64_PG_PRESENT))
|
||||
goto done;
|
||||
|
||||
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Initialize essentials for the AMD64 memory management subsystem */
|
||||
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }
|
||||
@@ -1,6 +1,15 @@
|
||||
#ifndef _KERNEL_AMD64_MM_H
|
||||
#define _KERNEL_AMD64_MM_H
|
||||
|
||||
#include <libk/std.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
#define PAGE_SIZE 4096
|
||||
|
||||
struct pd {
|
||||
uintptr_t cr3_paddr;
|
||||
};
|
||||
|
||||
void amd64_load_kernel_cr3 (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_MM_H
|
||||
|
||||
1093
kernel/amd64/msr-index.h
Normal file
1093
kernel/amd64/msr-index.h
Normal file
File diff suppressed because it is too large
Load Diff
16
kernel/amd64/msr.c
Normal file
16
kernel/amd64/msr.c
Normal file
@@ -0,0 +1,16 @@
|
||||
#include <amd64/msr.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
/// Read a model-specific register
|
||||
uint64_t amd64_rdmsr (uint32_t msr) {
|
||||
uint32_t low, high;
|
||||
__asm__ volatile ("rdmsr" : "=a"(low), "=d"(high) : "c"(msr));
|
||||
return ((uint64_t)high << 32 | (uint64_t)low);
|
||||
}
|
||||
|
||||
/// Write a model-specific register
|
||||
void amd64_wrmsr (uint32_t msr, uint64_t value) {
|
||||
uint32_t low = (uint32_t)(value & 0xFFFFFFFF);
|
||||
uint32_t high = (uint32_t)(value >> 32);
|
||||
__asm__ volatile ("wrmsr" ::"c"(msr), "a"(low), "d"(high));
|
||||
}
|
||||
9
kernel/amd64/msr.h
Normal file
9
kernel/amd64/msr.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef _KERNEL_AMD64_MSR_H
|
||||
#define _KERNEL_AMD64_MSR_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
uint64_t amd64_rdmsr (uint32_t msr);
|
||||
void amd64_wrmsr (uint32_t msr, uint64_t value);
|
||||
|
||||
#endif // _KERNEL_AMD64_MSR_H
|
||||
138
kernel/amd64/proc.c
Normal file
138
kernel/amd64/proc.c
Normal file
@@ -0,0 +1,138 @@
|
||||
#include <amd64/gdt.h>
|
||||
#include <amd64/proc.h>
|
||||
#include <aux/elf.h>
|
||||
#include <libk/align.h>
|
||||
#include <libk/list.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/mutex.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/proc.h>
|
||||
|
||||
static atomic_int pids = 0;
|
||||
|
||||
struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
struct proc* proc = malloc (sizeof (*proc));
|
||||
if (proc == NULL)
|
||||
return NULL;
|
||||
|
||||
memset (proc, 0, sizeof (*proc));
|
||||
|
||||
proc->lock = SPIN_LOCK_INIT;
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
proc->pid = atomic_fetch_add (&pids, 1);
|
||||
|
||||
proc->procgroup = procgroup_create ();
|
||||
if (proc->procgroup == NULL) {
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
procgroup_attach (proc->procgroup, proc);
|
||||
|
||||
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
|
||||
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
||||
|
||||
procgroup_map (proc->procgroup, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
|
||||
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW, NULL);
|
||||
|
||||
proc->flags |= PROC_USTK_PREALLOC;
|
||||
|
||||
struct elf_aux aux = proc_load_segments (proc, elf_contents);
|
||||
|
||||
proc->pdata.regs.ss = GDT_UDATA | 0x03;
|
||||
proc->pdata.regs.rsp = (uint64_t)PROC_USTACK_TOP;
|
||||
proc->pdata.regs.rflags = 0x202;
|
||||
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
||||
proc->pdata.regs.rip = aux.entry;
|
||||
|
||||
return proc;
|
||||
}
|
||||
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
|
||||
uintptr_t argument_ptr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
spin_lock_ctx_t ctxprt;
|
||||
|
||||
struct proc* proc = malloc (sizeof (*proc));
|
||||
if (proc == NULL)
|
||||
return NULL;
|
||||
|
||||
memset (proc, 0, sizeof (*proc));
|
||||
|
||||
proc->lock = SPIN_LOCK_INIT;
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
proc->pid = atomic_fetch_add (&pids, 1);
|
||||
|
||||
spin_lock (&proto->lock, &ctxprt);
|
||||
|
||||
proc->procgroup = proto->procgroup;
|
||||
procgroup_attach (proc->procgroup, proc);
|
||||
|
||||
spin_unlock (&proto->lock, &ctxprt);
|
||||
|
||||
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
|
||||
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
||||
|
||||
proc->pdata.regs.ss = GDT_UDATA | 0x03;
|
||||
proc->pdata.regs.rsp = (uint64_t)vstack_top;
|
||||
proc->pdata.regs.rflags = 0x202;
|
||||
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
||||
proc->pdata.regs.rip = (uint64_t)entry;
|
||||
|
||||
proc->uvaddr_argument = argument_ptr;
|
||||
|
||||
proc_init_tls (proc);
|
||||
|
||||
return proc;
|
||||
}
|
||||
|
||||
void proc_cleanup (struct proc* proc) {
|
||||
proc_sqs_cleanup (proc);
|
||||
proc_mutexes_cleanup (proc);
|
||||
|
||||
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
|
||||
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
|
||||
|
||||
procgroup_detach (proc->procgroup, proc);
|
||||
|
||||
/* clean the process */
|
||||
free (proc);
|
||||
}
|
||||
|
||||
void proc_init_tls (struct proc* proc) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
if (proc->procgroup->tls.tls_tmpl == NULL)
|
||||
return;
|
||||
|
||||
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
|
||||
size_t pages = proc->procgroup->tls.tls_tmpl_pages;
|
||||
|
||||
uintptr_t tls_paddr;
|
||||
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
|
||||
|
||||
uintptr_t tls_vaddr = procgroup_map (proc->procgroup, 0, pages, flags, &tls_paddr);
|
||||
|
||||
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
|
||||
|
||||
memset ((void*)k_tls_addr, 0, pages * PAGE_SIZE);
|
||||
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
|
||||
|
||||
uintptr_t ktcb = k_tls_addr + tls_size;
|
||||
uintptr_t utcb = tls_vaddr + tls_size;
|
||||
|
||||
*(uintptr_t*)ktcb = utcb;
|
||||
|
||||
proc->pdata.fs_base = utcb;
|
||||
proc->pdata.tls_vaddr = tls_vaddr;
|
||||
}
|
||||
22
kernel/amd64/proc.h
Normal file
22
kernel/amd64/proc.h
Normal file
@@ -0,0 +1,22 @@
|
||||
#ifndef _KERNEL_AMD64_PROC_H
|
||||
#define _KERNEL_AMD64_PROC_H
|
||||
|
||||
#include <amd64/intr.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
/* Top of userspace process' stack */
|
||||
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
|
||||
/* Size of userspace process' stack */
|
||||
#define USTACK_SIZE (256 * PAGE_SIZE)
|
||||
/* proc_map () base address */
|
||||
#define PROC_MAP_BASE 0x0000700000000000
|
||||
|
||||
/* Platform-dependent process data */
|
||||
struct proc_platformdata {
|
||||
struct saved_regs regs;
|
||||
uintptr_t kernel_stack;
|
||||
uint64_t fs_base;
|
||||
uintptr_t tls_vaddr;
|
||||
};
|
||||
|
||||
#endif // _KERNEL_AMD64_PROC_H
|
||||
13
kernel/amd64/procgroup.h
Normal file
13
kernel/amd64/procgroup.h
Normal file
@@ -0,0 +1,13 @@
|
||||
#ifndef _KERNEL_AMD64_PROCGRPUP_H
|
||||
#define _KERNEL_AMD64_PROCGRPUP_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
struct procgroup_tls {
|
||||
uint8_t* tls_tmpl;
|
||||
size_t tls_tmpl_size;
|
||||
size_t tls_tmpl_total_size;
|
||||
size_t tls_tmpl_pages;
|
||||
};
|
||||
|
||||
#endif // _KERNEL_AMD64_PROCGRPUP_H
|
||||
55
kernel/amd64/regsasm.h
Normal file
55
kernel/amd64/regsasm.h
Normal file
@@ -0,0 +1,55 @@
|
||||
#ifndef _KERNEL_AMD64_REGSASM_H
|
||||
#define _KERNEL_AMD64_REGSASM_H
|
||||
|
||||
#define push_regs \
|
||||
pushq % rax; \
|
||||
pushq % rcx; \
|
||||
pushq % rdx; \
|
||||
pushq % rsi; \
|
||||
pushq % rdi; \
|
||||
pushq % rbp; \
|
||||
pushq % rbx; \
|
||||
pushq % r8; \
|
||||
pushq % r9; \
|
||||
pushq % r10; \
|
||||
pushq % r11; \
|
||||
pushq % r12; \
|
||||
pushq % r13; \
|
||||
pushq % r14; \
|
||||
pushq % r15;
|
||||
|
||||
#define pop_regs \
|
||||
popq % r15; \
|
||||
popq % r14; \
|
||||
popq % r13; \
|
||||
popq % r12; \
|
||||
popq % r11; \
|
||||
popq % r10; \
|
||||
popq % r9; \
|
||||
popq % r8; \
|
||||
popq % rbx; \
|
||||
popq % rbp; \
|
||||
popq % rdi; \
|
||||
popq % rsi; \
|
||||
popq % rdx; \
|
||||
popq % rcx; \
|
||||
popq % rax;
|
||||
|
||||
#define pop_regs_skip_rax \
|
||||
popq % r15; \
|
||||
popq % r14; \
|
||||
popq % r13; \
|
||||
popq % r12; \
|
||||
popq % r11; \
|
||||
popq % r10; \
|
||||
popq % r9; \
|
||||
popq % r8; \
|
||||
popq % rbx; \
|
||||
popq % rbp; \
|
||||
popq % rdi; \
|
||||
popq % rsi; \
|
||||
popq % rdx; \
|
||||
popq % rcx; \
|
||||
addq $8, % rsp
|
||||
|
||||
#endif // _KERNEL_AMD64_REGSASM_H
|
||||
9
kernel/amd64/sched.S
Normal file
9
kernel/amd64/sched.S
Normal file
@@ -0,0 +1,9 @@
|
||||
#include <amd64/regsasm.h>
|
||||
|
||||
.global amd64_do_sched
|
||||
amd64_do_sched:
|
||||
movq %rsi, %cr3
|
||||
movq %rdi, %rsp
|
||||
pop_regs
|
||||
addq $16, %rsp
|
||||
iretq
|
||||
7
kernel/amd64/sched.h
Normal file
7
kernel/amd64/sched.h
Normal file
@@ -0,0 +1,7 @@
|
||||
#ifndef _KERNEL_AMD64_SCHED_H
|
||||
#define _KERNEL_AMD64_SCHED_H
|
||||
|
||||
/// Perform process context switch
|
||||
void amd64_do_sched (void* regs, void* cr3);
|
||||
|
||||
#endif // _KERNEL_AMD64_SCHED_H
|
||||
23
kernel/amd64/sched1.c
Normal file
23
kernel/amd64/sched1.c
Normal file
@@ -0,0 +1,23 @@
|
||||
#include <amd64/msr-index.h>
|
||||
#include <amd64/msr.h>
|
||||
#include <amd64/sched.h>
|
||||
#include <libk/std.h>
|
||||
#include <proc/proc.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/smp.h>
|
||||
|
||||
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu) {
|
||||
spin_lock_ctx_t ctxpr;
|
||||
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
|
||||
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
|
||||
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
|
||||
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (cpu_lock, ctxcpu);
|
||||
|
||||
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr);
|
||||
}
|
||||
114
kernel/amd64/smp.c
Normal file
114
kernel/amd64/smp.c
Normal file
@@ -0,0 +1,114 @@
|
||||
#include <amd64/apic.h>
|
||||
#include <amd64/init.h>
|
||||
#include <amd64/intr_defs.h>
|
||||
#include <amd64/mm.h>
|
||||
#include <amd64/msr-index.h>
|
||||
#include <amd64/msr.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <proc/proc.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/sched.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/syscall.h>
|
||||
|
||||
/// Cpu ID counter
|
||||
static atomic_uint cpu_counter = 0;
|
||||
/// The CPUs
|
||||
static struct cpu cpus[CPUS_MAX];
|
||||
|
||||
static atomic_int cpu_init_count;
|
||||
|
||||
/// Allocate a CPU structure
|
||||
struct cpu* cpu_make (uint64_t lapic_id) {
|
||||
int id = atomic_fetch_add (&cpu_counter, 1);
|
||||
|
||||
struct cpu* cpu = &cpus[id];
|
||||
|
||||
memset (cpu, 0, sizeof (*cpu));
|
||||
cpu->lock = SPIN_LOCK_INIT;
|
||||
cpu->id = id;
|
||||
cpu->lapic_id = lapic_id;
|
||||
|
||||
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
struct cpu* cpu_get (void) {
|
||||
struct cpu* ptr = (struct cpu*)amd64_rdmsr (MSR_GS_BASE);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void cpu_request_sched (struct cpu* cpu) {
|
||||
if (cpu == thiscpu) {
|
||||
proc_sched ();
|
||||
return;
|
||||
}
|
||||
|
||||
amd64_lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED);
|
||||
}
|
||||
|
||||
struct cpu* cpu_find_lightest (void) {
|
||||
struct cpu* cpu = &cpus[0];
|
||||
|
||||
int load = atomic_load (&cpu->proc_run_q_count);
|
||||
|
||||
for (unsigned int i = 1; i < cpu_counter; i++) {
|
||||
struct cpu* new_cpu = &cpus[i];
|
||||
int new_load = atomic_load (&new_cpu->proc_run_q_count);
|
||||
if (new_load < load) {
|
||||
load = new_load;
|
||||
cpu = new_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
/// Bootstrap code for non-BSP CPUs
|
||||
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
|
||||
amd64_load_kernel_cr3 ();
|
||||
|
||||
struct cpu* cpu = cpu_make (mp_info->lapic_id);
|
||||
|
||||
amd64_init (cpu, true); /* gdt + idt */
|
||||
syscall_init ();
|
||||
|
||||
amd64_lapic_init (1000);
|
||||
|
||||
DEBUG ("CPU %u is online!\n", thiscpu->id);
|
||||
|
||||
atomic_fetch_sub (&cpu_init_count, 1);
|
||||
|
||||
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
|
||||
proc_register (spin_proc, thiscpu);
|
||||
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
|
||||
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
|
||||
}
|
||||
|
||||
/// Initialize SMP subsystem for AMD64. Start AP CPUs
|
||||
void smp_init (void) {
|
||||
amd64_lapic_init (1000);
|
||||
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
|
||||
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
|
||||
|
||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
||||
if (mp->cpus[i]->lapic_id != thiscpu->lapic_id) {
|
||||
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
|
||||
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
|
||||
}
|
||||
}
|
||||
|
||||
while (atomic_load (&cpu_init_count) > 0)
|
||||
;
|
||||
|
||||
DEBUG ("All CPUs are online\n");
|
||||
}
|
||||
44
kernel/amd64/smp.h
Normal file
44
kernel/amd64/smp.h
Normal file
@@ -0,0 +1,44 @@
|
||||
#ifndef _KERNEL_AMD64_SMP_H
|
||||
#define _KERNEL_AMD64_SMP_H
|
||||
|
||||
#include <amd64/gdt.h>
|
||||
#include <amd64/intr.h>
|
||||
#include <amd64/tss.h>
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <proc/proc.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
#define CPUS_MAX 32
|
||||
|
||||
struct cpu {
|
||||
/* for syscall instruction */
|
||||
uintptr_t syscall_user_stack;
|
||||
uintptr_t syscall_kernel_stack;
|
||||
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
|
||||
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
|
||||
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
|
||||
volatile struct gdt_extended gdt ALIGNED (16);
|
||||
volatile struct tss tss;
|
||||
|
||||
uintptr_t lapic_mmio_base;
|
||||
uint64_t lapic_ticks;
|
||||
uint64_t lapic_id;
|
||||
uint32_t id;
|
||||
|
||||
spin_lock_t lock;
|
||||
|
||||
struct list_node_link* proc_run_q;
|
||||
struct proc* proc_current;
|
||||
atomic_int proc_run_q_count;
|
||||
};
|
||||
|
||||
struct cpu* cpu_make (uint64_t lapic_id);
|
||||
struct cpu* cpu_get (void);
|
||||
void cpu_request_sched (struct cpu* cpu);
|
||||
struct cpu* cpu_find_lightest (void);
|
||||
|
||||
#define thiscpu (cpu_get ())
|
||||
|
||||
#endif // _KERNEL_AMD64_SMP_H
|
||||
@@ -1,3 +1,4 @@
|
||||
.global amd64_spin
|
||||
amd64_spin:
|
||||
hlt
|
||||
jmp amd64_spin
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
void spin_lock_relax(void) {
|
||||
__asm__ volatile("pause");
|
||||
}
|
||||
/// Relax the spinlock using AMD64 pause instruction
|
||||
void spin_lock_relax (void) { __asm__ volatile ("pause"); }
|
||||
|
||||
@@ -1,20 +1,40 @@
|
||||
c += amd64/bootmain.c \
|
||||
amd64/init.c \
|
||||
amd64/tss.c \
|
||||
amd64/io.c \
|
||||
amd64/debug.c \
|
||||
amd64/spin_lock.c \
|
||||
amd64/intr.c
|
||||
amd64/intr.c \
|
||||
amd64/apic.c \
|
||||
amd64/msr.c \
|
||||
amd64/hpet.c \
|
||||
amd64/mm.c \
|
||||
amd64/time.c \
|
||||
amd64/smp.c \
|
||||
amd64/sched1.c \
|
||||
amd64/proc.c \
|
||||
amd64/syscall.c
|
||||
|
||||
S += amd64/intr_stub.S \
|
||||
amd64/spin.S
|
||||
amd64/spin.S \
|
||||
amd64/sched.S \
|
||||
amd64/syscallentry.S
|
||||
|
||||
o += amd64/bootmain.o \
|
||||
amd64/init.o \
|
||||
amd64/tss.o \
|
||||
amd64/io.o \
|
||||
amd64/debug.o \
|
||||
amd64/spin_lock.o \
|
||||
amd64/intr.o \
|
||||
amd64/intr_stub.o \
|
||||
amd64/spin.o
|
||||
amd64/spin.o \
|
||||
amd64/apic.o \
|
||||
amd64/msr.o \
|
||||
amd64/hpet.o \
|
||||
amd64/mm.o \
|
||||
amd64/time.o \
|
||||
amd64/smp.o \
|
||||
amd64/sched.o \
|
||||
amd64/sched1.o \
|
||||
amd64/proc.o \
|
||||
amd64/syscall.o \
|
||||
amd64/syscallentry.o
|
||||
|
||||
46
kernel/amd64/syscall.c
Normal file
46
kernel/amd64/syscall.c
Normal file
@@ -0,0 +1,46 @@
|
||||
#include <amd64/gdt.h>
|
||||
#include <amd64/intr.h>
|
||||
#include <amd64/mm.h>
|
||||
#include <amd64/msr-index.h>
|
||||
#include <amd64/msr.h>
|
||||
#include <libk/string.h>
|
||||
#include <m/status.h>
|
||||
#include <m/syscall_defs.h>
|
||||
#include <proc/proc.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/smp.h>
|
||||
#include <syscall/syscall.h>
|
||||
|
||||
extern void amd64_syscall_entry (void);
|
||||
|
||||
uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
|
||||
spin_lock_ctx_t ctxcpu, ctxpr;
|
||||
|
||||
amd64_load_kernel_cr3 ();
|
||||
struct saved_regs* regs = stack_ptr;
|
||||
|
||||
spin_lock (&thiscpu->lock, &ctxcpu);
|
||||
struct proc* caller = thiscpu->proc_current;
|
||||
spin_lock (&caller->lock, &ctxpr);
|
||||
|
||||
memcpy (&caller->pdata.regs, regs, sizeof (struct saved_regs));
|
||||
|
||||
spin_unlock (&caller->lock, &ctxpr);
|
||||
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||
|
||||
int syscall_num = regs->rax;
|
||||
syscall_handler_func_t func = syscall_find_handler (syscall_num);
|
||||
|
||||
if (func == NULL) {
|
||||
return -ST_SYSCALL_NOT_FOUND;
|
||||
}
|
||||
|
||||
return func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
||||
}
|
||||
|
||||
void syscall_init (void) {
|
||||
amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48));
|
||||
amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry);
|
||||
amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9));
|
||||
amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE);
|
||||
}
|
||||
49
kernel/amd64/syscallentry.S
Normal file
49
kernel/amd64/syscallentry.S
Normal file
@@ -0,0 +1,49 @@
|
||||
#include <amd64/regsasm.h>
|
||||
|
||||
.extern amd64_syscall_dispatch
|
||||
|
||||
.global amd64_syscall_entry
|
||||
amd64_syscall_entry:
|
||||
cli
|
||||
|
||||
movq %rsp, %gs:0
|
||||
movq %gs:8, %rsp
|
||||
|
||||
pushq $0x1b
|
||||
pushq %gs:0
|
||||
pushq %r11
|
||||
pushq $0x23
|
||||
pushq %rcx
|
||||
pushq $0
|
||||
pushq $0
|
||||
|
||||
push_regs
|
||||
|
||||
movw $0x10, %ax
|
||||
movw %ax, %ds
|
||||
movw %ax, %es
|
||||
movw %ax, %ss
|
||||
|
||||
cld
|
||||
|
||||
movq %rsp, %rdi
|
||||
|
||||
movq %cr3, %rax; pushq %rax
|
||||
|
||||
movq %rsp, %rbp
|
||||
|
||||
subq $8, %rsp
|
||||
andq $-16, %rsp
|
||||
|
||||
callq amd64_syscall_dispatch
|
||||
|
||||
movq %rbp, %rsp
|
||||
|
||||
popq %rbx; movq %rbx, %cr3
|
||||
|
||||
pop_regs_skip_rax
|
||||
|
||||
addq $56, %rsp
|
||||
movq %gs:0, %rsp
|
||||
|
||||
sysretq
|
||||
6
kernel/amd64/time.c
Normal file
6
kernel/amd64/time.c
Normal file
@@ -0,0 +1,6 @@
|
||||
#include <amd64/hpet.h>
|
||||
#include <libk/std.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
/// Sleep for given amount of microseconds
|
||||
void sleep_micro (size_t us) { amd64_hpet_sleep_micro (us); }
|
||||
@@ -1,8 +0,0 @@
|
||||
#include <libk/std.h>
|
||||
#include <amd64/tss.h>
|
||||
|
||||
__attribute__((aligned(16))) static volatile struct tss tss;
|
||||
|
||||
volatile struct tss *amd64_get_tss(void) {
|
||||
return &tss;
|
||||
}
|
||||
@@ -1,8 +1,10 @@
|
||||
#ifndef _KERNEL_AMD64_TSS_H
|
||||
#define _KERNEL_AMD64_TSS_H
|
||||
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
/// 64-bit TSS structure: https://wiki.osdev.org/Task_State_Segment
|
||||
struct tss {
|
||||
uint32_t resv0;
|
||||
uint64_t rsp0;
|
||||
@@ -13,8 +15,6 @@ struct tss {
|
||||
uint64_t resv2;
|
||||
uint16_t resv3;
|
||||
uint16_t iopb_off;
|
||||
} __attribute__((packed));
|
||||
|
||||
volatile struct tss *amd64_get_tss(void);
|
||||
} PACKED;
|
||||
|
||||
#endif // _KERNEL_AMD64_TSS_H
|
||||
|
||||
2
kernel/amd64/vars.mk
Normal file
2
kernel/amd64/vars.mk
Normal file
@@ -0,0 +1,2 @@
|
||||
# make vars
|
||||
PLATFORM_ACPI=1
|
||||
9
kernel/aux/compiler.h
Normal file
9
kernel/aux/compiler.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef _KERNEL_AUX_COMPILER_H
|
||||
#define _KERNEL_AUX_COMPILER_H
|
||||
|
||||
#define PACKED __attribute__ ((packed))
|
||||
#define ALIGNED(N) __attribute__ ((aligned ((N))))
|
||||
#define SECTION(name) __attribute__ ((section (name)))
|
||||
#define UNUSED __attribute__ ((unused))
|
||||
|
||||
#endif // _KERNEL_AUX_COMPILER_H
|
||||
4555
kernel/aux/elf.h
Normal file
4555
kernel/aux/elf.h
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,6 @@
|
||||
include $(platform)/flags.mk
|
||||
include generic/flags.mk
|
||||
|
||||
ifeq ($(PLATFORM_ACPI),1)
|
||||
include uACPI/flags.mk
|
||||
endif
|
||||
|
||||
@@ -8,7 +8,7 @@ cflags += -nostdinc \
|
||||
-Wextra \
|
||||
-mcmodel=kernel
|
||||
|
||||
cflags += -isystem . -isystem c_headers/include
|
||||
cflags += -isystem . -isystem ../include
|
||||
|
||||
cflags += -DPRINTF_INCLUDE_CONFIG_H=1 \
|
||||
-D_ALLOC_SKIP_DEFINE
|
||||
|
||||
1
kernel/irq/.gitignore
vendored
Normal file
1
kernel/irq/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.o
|
||||
46
kernel/irq/irq.c
Normal file
46
kernel/irq/irq.c
Normal file
@@ -0,0 +1,46 @@
|
||||
#include <irq/irq.h>
|
||||
#include <libk/list.h>
|
||||
#include <libk/std.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include <amd64/apic.h>
|
||||
#include <amd64/intr.h>
|
||||
#endif
|
||||
|
||||
struct irq* irq_table[0x100];
|
||||
|
||||
static spin_lock_t irqs_lock = SPIN_LOCK_INIT;
|
||||
|
||||
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
|
||||
spin_lock_ctx_t ctxiqa;
|
||||
|
||||
struct irq* irq = malloc (sizeof (*irq));
|
||||
if (irq == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
irq->func = func;
|
||||
irq->arg = arg;
|
||||
irq->irq_num = irq_num;
|
||||
|
||||
spin_lock (&irqs_lock, &ctxiqa);
|
||||
irq_table[irq_num] = irq;
|
||||
spin_unlock (&irqs_lock, &ctxiqa);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct irq* irq_find (uint32_t irq_num) {
|
||||
spin_lock_ctx_t ctxiqa;
|
||||
|
||||
spin_lock (&irqs_lock, &ctxiqa);
|
||||
|
||||
struct irq* irq = irq_table[irq_num];
|
||||
|
||||
spin_unlock (&irqs_lock, &ctxiqa);
|
||||
|
||||
return irq;
|
||||
}
|
||||
20
kernel/irq/irq.h
Normal file
20
kernel/irq/irq.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef _KERNEL_IRQ_IRQ_H
|
||||
#define _KERNEL_IRQ_IRQ_H
|
||||
|
||||
#include <libk/list.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
typedef void (*irq_func_t) (void* arg, void* regs);
|
||||
|
||||
struct irq {
|
||||
struct list_node_link irqs_link;
|
||||
|
||||
irq_func_t func;
|
||||
void* arg;
|
||||
uint32_t irq_num;
|
||||
};
|
||||
|
||||
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num);
|
||||
struct irq* irq_find (uint32_t irq_num);
|
||||
|
||||
#endif // _KERNEL_IRQ_IRQ_H
|
||||
3
kernel/irq/src.mk
Normal file
3
kernel/irq/src.mk
Normal file
@@ -0,0 +1,3 @@
|
||||
c += irq/irq.c
|
||||
|
||||
o += irq/irq.o
|
||||
15
kernel/libk/assert.h
Normal file
15
kernel/libk/assert.h
Normal file
@@ -0,0 +1,15 @@
|
||||
#ifndef _KERNEL_LIBK_ASSERT_H
|
||||
#define _KERNEL_LIBK_ASSERT_H
|
||||
|
||||
#include <sys/spin.h>
|
||||
|
||||
#define assert(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
DEBUG ("%s ssertion failed\n", #x); \
|
||||
spin (); \
|
||||
__builtin_unreachable (); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif // _KERNEL_LIBK_ASSERT_H
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <libk/std.h>
|
||||
#include <libk/bm.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
|
||||
void bm_init (struct bm* bm, uint8_t* base, size_t nbits) {
|
||||
@@ -45,16 +45,16 @@ bool bm_test(struct bm *bm, size_t k) {
|
||||
* Set a range of bits in a bitmap. if starting bit is out of range, we fail.
|
||||
*/
|
||||
bool bm_set_region (struct bm* bm, size_t k, size_t m) {
|
||||
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
|
||||
if (((k + m) > bm->nbits) || (k + m) < k)
|
||||
return false;
|
||||
|
||||
for (size_t i = k; i < m; i++) {
|
||||
for (size_t i = k; i < (k + m); i++) {
|
||||
bool taken = bm_test (bm, i);
|
||||
if (taken)
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = k; i < m; i++)
|
||||
for (size_t i = k; i < (k + m); i++)
|
||||
bm_set (bm, i);
|
||||
|
||||
return true;
|
||||
@@ -64,10 +64,10 @@ bool bm_set_region(struct bm *bm, size_t k, size_t m) {
|
||||
* Clear a range of bits in a bitmap. starting bit must be in range.
|
||||
*/
|
||||
void bm_clear_region (struct bm* bm, size_t k, size_t m) {
|
||||
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
|
||||
if (((k + m) > bm->nbits) || (k + m) < k)
|
||||
return;
|
||||
|
||||
for (size_t i = k; i < m; i++)
|
||||
for (size_t i = k; i < (k + m); i++)
|
||||
bm_clear (bm, i);
|
||||
}
|
||||
|
||||
@@ -78,10 +78,10 @@ void bm_clear_region(struct bm *bm, size_t k, size_t m) {
|
||||
* useful for implementing the physical memory manager algorithm.
|
||||
*/
|
||||
bool bm_test_region (struct bm* bm, size_t k, size_t m) {
|
||||
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
|
||||
if (((k + m) > bm->nbits) || (k + m) < k)
|
||||
return true;
|
||||
|
||||
for (size_t i = k; i < m; i++) {
|
||||
for (size_t i = k; i < (k + m); i++) {
|
||||
bool test = bm_test (bm, i);
|
||||
if (test)
|
||||
return true;
|
||||
|
||||
170
kernel/libk/list.h
Normal file
170
kernel/libk/list.h
Normal file
@@ -0,0 +1,170 @@
|
||||
#ifndef _KERNEL_LIBK_LIST_H
|
||||
#define _KERNEL_LIBK_LIST_H
|
||||
|
||||
struct list_node_link {
|
||||
struct list_node_link* next;
|
||||
struct list_node_link* prev;
|
||||
};
|
||||
|
||||
#define list_entry(ptr, type, member) ((type*)((char*)(ptr) - offsetof (type, member)))
|
||||
|
||||
#define list_append(head, new) \
|
||||
do { \
|
||||
if ((new) != NULL) { \
|
||||
(new)->next = NULL; \
|
||||
if ((head) != NULL) { \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp->next != NULL) { \
|
||||
__tmp = __tmp->next; \
|
||||
} \
|
||||
__tmp->next = (new); \
|
||||
(new)->prev = __tmp; \
|
||||
} else { \
|
||||
(new)->prev = NULL; \
|
||||
(head) = (new); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define list_prepend(head, new) \
|
||||
do { \
|
||||
if ((new) != NULL) { \
|
||||
(new)->prev = NULL; \
|
||||
(new)->next = (head); \
|
||||
if ((head) != NULL) { \
|
||||
(head)->prev = (new); \
|
||||
} \
|
||||
(head) = (new); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define list_remove(head, ele) \
|
||||
do { \
|
||||
if ((ele) != NULL) { \
|
||||
if ((ele)->prev != NULL) { \
|
||||
(ele)->prev->next = (ele)->next; \
|
||||
} else { \
|
||||
(head) = (ele)->next; \
|
||||
} \
|
||||
if ((ele)->next != NULL) { \
|
||||
(ele)->next->prev = (ele)->prev; \
|
||||
} \
|
||||
(ele)->next = NULL; \
|
||||
(ele)->prev = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define list_find(head, out, propname, propvalue) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp) { \
|
||||
if (__tmp->propname == (propvalue)) { \
|
||||
(out) = __tmp; \
|
||||
break; \
|
||||
} \
|
||||
__tmp = __tmp->next; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define list_foreach(head, var, tmp) \
|
||||
for (var = (head), tmp = (var ? var->next : NULL); var != NULL; \
|
||||
var = tmp, tmp = (var ? var->next : NULL))
|
||||
|
||||
#define list_foreach_index(head, var, tmp, idx) \
|
||||
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL; \
|
||||
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
|
||||
|
||||
#define list_foreach_index_limit(head, var, tmp, idx, max) \
|
||||
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL && (idx) < (max); \
|
||||
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
|
||||
|
||||
#define list_back(head, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
if ((head) != NULL) { \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp->next != NULL) { \
|
||||
__tmp = __tmp->next; \
|
||||
} \
|
||||
(out) = __tmp; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define list_front(head, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
if ((head) != NULL) { \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp->prev != NULL) { \
|
||||
__tmp = __tmp->prev; \
|
||||
} \
|
||||
(out) = __tmp; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define list_insert_after(head, pos, new) \
|
||||
do { \
|
||||
if ((pos) != NULL && (new) != NULL) { \
|
||||
(new)->prev = (pos); \
|
||||
(new)->next = (pos)->next; \
|
||||
if ((pos)->next != NULL) { \
|
||||
(pos)->next->prev = (new); \
|
||||
} \
|
||||
(pos)->next = (new); \
|
||||
} else if ((pos) == NULL && (head) == NULL) { \
|
||||
(new)->prev = NULL; \
|
||||
(new)->next = NULL; \
|
||||
(head) = (new); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define list_insert_before(head, pos, new) \
|
||||
do { \
|
||||
if ((pos) != NULL && (new) != NULL) { \
|
||||
(new)->next = (pos); \
|
||||
(new)->prev = (pos)->prev; \
|
||||
if ((pos)->prev != NULL) { \
|
||||
(pos)->prev->next = (new); \
|
||||
} else { \
|
||||
(head) = (new); \
|
||||
} \
|
||||
(pos)->prev = (new); \
|
||||
} else if ((pos) == NULL && (head) == NULL) { \
|
||||
(new)->prev = NULL; \
|
||||
(new)->next = NULL; \
|
||||
(head) = (new); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define list_index_of(head, ele, out_idx) \
|
||||
do { \
|
||||
(out_idx) = -1; \
|
||||
int __idx = 0; \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp != NULL) { \
|
||||
if (__tmp == (ele)) { \
|
||||
(out_idx) = __idx; \
|
||||
break; \
|
||||
} \
|
||||
__tmp = __tmp->next; \
|
||||
__idx++; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define list_index_of_prop(head, propname, propvalue, out_idx) \
|
||||
do { \
|
||||
(out_idx) = -1; \
|
||||
int __idx = 0; \
|
||||
struct list_node_link* __tmp = (head); \
|
||||
while (__tmp != NULL) { \
|
||||
if (__tmp->propname == (propvalue)) { \
|
||||
(out_idx) = __idx; \
|
||||
break; \
|
||||
} \
|
||||
__tmp = __tmp->next; \
|
||||
__idx++; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif // _KERNEL_LIBK_LIST_H
|
||||
@@ -1,3 +1,2 @@
|
||||
|
||||
void putchar_ (char x) { (void)x; }
|
||||
|
||||
|
||||
323
kernel/libk/rbtree.h
Normal file
323
kernel/libk/rbtree.h
Normal file
@@ -0,0 +1,323 @@
|
||||
#ifndef _KERNEL_LIBK_RBTREE_H
|
||||
#define _KERNEL_LIBK_RBTREE_H
|
||||
|
||||
struct rb_node_link {
|
||||
struct rb_node_link* left;
|
||||
struct rb_node_link* right;
|
||||
struct rb_node_link* parent;
|
||||
int color;
|
||||
};
|
||||
|
||||
#define RBTREE_RED 0
|
||||
#define RBTREE_BLACK 1
|
||||
|
||||
#define rbtree_parent(x) ((x)->parent)
|
||||
#define rbtree_left(x) ((x)->left)
|
||||
#define rbtree_right(x) ((x)->right)
|
||||
#define rbtree_color(x) ((x)->color)
|
||||
|
||||
#define rbtree_entry(node, type, member) ((type*)((char*)(node) - offsetof (type, member)))
|
||||
|
||||
#define rbtree_node_color(x) ((x) ? (x)->color : RBTREE_BLACK)
|
||||
|
||||
#define rbtree_rotate_left(root_ptr, x_node) \
|
||||
do { \
|
||||
struct rb_node_link* __x = (x_node); \
|
||||
struct rb_node_link* __y = __x->right; \
|
||||
__x->right = __y->left; \
|
||||
if (__y->left) \
|
||||
__y->left->parent = __x; \
|
||||
__y->parent = __x->parent; \
|
||||
if (!__x->parent) \
|
||||
*(root_ptr) = __y; \
|
||||
else if (__x == __x->parent->left) \
|
||||
__x->parent->left = __y; \
|
||||
else \
|
||||
__x->parent->right = __y; \
|
||||
__y->left = __x; \
|
||||
__x->parent = __y; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_rotate_right(root_ptr, y_node) \
|
||||
do { \
|
||||
struct rb_node_link* __y = (y_node); \
|
||||
struct rb_node_link* __x = __y->left; \
|
||||
__y->left = __x->right; \
|
||||
if (__x->right) \
|
||||
__x->right->parent = __y; \
|
||||
__x->parent = __y->parent; \
|
||||
if (!__y->parent) \
|
||||
*(root_ptr) = __x; \
|
||||
else if (__y == __y->parent->right) \
|
||||
__y->parent->right = __x; \
|
||||
else \
|
||||
__y->parent->left = __x; \
|
||||
__x->right = __y; \
|
||||
__y->parent = __x; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_insert_fixup(root_ptr, z_node) \
|
||||
do { \
|
||||
struct rb_node_link* __z = (z_node); \
|
||||
while (__z->parent && __z->parent->color == RBTREE_RED) { \
|
||||
if (__z->parent == __z->parent->parent->left) { \
|
||||
struct rb_node_link* __y = __z->parent->parent->right; \
|
||||
if (rbtree_node_color (__y) == RBTREE_RED) { \
|
||||
__z->parent->color = RBTREE_BLACK; \
|
||||
__y->color = RBTREE_BLACK; \
|
||||
__z->parent->parent->color = RBTREE_RED; \
|
||||
__z = __z->parent->parent; \
|
||||
} else { \
|
||||
if (__z == __z->parent->right) { \
|
||||
__z = __z->parent; \
|
||||
rbtree_rotate_left (root_ptr, __z); \
|
||||
} \
|
||||
__z->parent->color = RBTREE_BLACK; \
|
||||
__z->parent->parent->color = RBTREE_RED; \
|
||||
rbtree_rotate_right (root_ptr, __z->parent->parent); \
|
||||
} \
|
||||
} else { \
|
||||
struct rb_node_link* __y = __z->parent->parent->left; \
|
||||
if (rbtree_node_color (__y) == RBTREE_RED) { \
|
||||
__z->parent->color = RBTREE_BLACK; \
|
||||
__y->color = RBTREE_BLACK; \
|
||||
__z->parent->parent->color = RBTREE_RED; \
|
||||
__z = __z->parent->parent; \
|
||||
} else { \
|
||||
if (__z == __z->parent->left) { \
|
||||
__z = __z->parent; \
|
||||
rbtree_rotate_right (root_ptr, __z); \
|
||||
} \
|
||||
__z->parent->color = RBTREE_BLACK; \
|
||||
__z->parent->parent->color = RBTREE_RED; \
|
||||
rbtree_rotate_left (root_ptr, __z->parent->parent); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
(*(root_ptr))->color = RBTREE_BLACK; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_insert(type, root_ptr, node, member, keyfield) \
|
||||
do { \
|
||||
struct rb_node_link** __link = (root_ptr); \
|
||||
struct rb_node_link* __parent = NULL; \
|
||||
struct rb_node_link* __new = (node); \
|
||||
type* __nobj = rbtree_entry (__new, type, member); \
|
||||
while (*__link) { \
|
||||
__parent = *__link; \
|
||||
type* __xobj = rbtree_entry (*__link, type, member); \
|
||||
if (__nobj->keyfield < __xobj->keyfield) \
|
||||
__link = &((*__link)->left); \
|
||||
else \
|
||||
__link = &((*__link)->right); \
|
||||
} \
|
||||
__new->parent = __parent; \
|
||||
__new->left = __new->right = NULL; \
|
||||
__new->color = RBTREE_RED; \
|
||||
*__link = __new; \
|
||||
rbtree_insert_fixup (root_ptr, __new); \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_find(type, root_ptr, keyval, out, member, keyfield) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
struct rb_node_link* __cur = *(root_ptr); \
|
||||
while (__cur) { \
|
||||
type* __obj = rbtree_entry (__cur, type, member); \
|
||||
if ((keyval) == __obj->keyfield) { \
|
||||
(out) = rbtree_entry (__cur, type, member); \
|
||||
break; \
|
||||
} else if ((keyval) < __obj->keyfield) \
|
||||
__cur = __cur->left; \
|
||||
else \
|
||||
__cur = __cur->right; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_min(node, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
struct rb_node_link* __n = (node); \
|
||||
while (__n && __n->left) \
|
||||
__n = __n->left; \
|
||||
(out) = __n; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_max(node, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
struct rb_node_link* __n = (node); \
|
||||
while (__n && __n->right) \
|
||||
__n = __n->right; \
|
||||
(out) = __n; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_first(root_ptr, out) rbtree_min (*(root_ptr), out)
|
||||
#define rbtree_last(root_ptr, out) rbtree_max (*(root_ptr), out)
|
||||
|
||||
#define rbtree_transplant(root_ptr, u_node, v_node) \
|
||||
do { \
|
||||
struct rb_node_link* __u = (u_node); \
|
||||
struct rb_node_link* __v = (v_node); \
|
||||
if (!__u->parent) \
|
||||
*(root_ptr) = __v; \
|
||||
else if (__u == __u->parent->left) \
|
||||
__u->parent->left = __v; \
|
||||
else \
|
||||
__u->parent->right = __v; \
|
||||
if (__v) \
|
||||
__v->parent = __u->parent; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_delete_fixup(root_ptr, x_node, xparent_node) \
|
||||
do { \
|
||||
struct rb_node_link* __rdf_x = (x_node); \
|
||||
struct rb_node_link* __rdf_xp = (xparent_node); \
|
||||
while (__rdf_xp && (__rdf_x == NULL || __rdf_x->color == RBTREE_BLACK)) { \
|
||||
if (__rdf_x == __rdf_xp->left) { \
|
||||
struct rb_node_link* __w = __rdf_xp->right; \
|
||||
if (rbtree_node_color (__w) == RBTREE_RED) { \
|
||||
__w->color = RBTREE_BLACK; \
|
||||
__rdf_xp->color = RBTREE_RED; \
|
||||
rbtree_rotate_left (root_ptr, __rdf_xp); \
|
||||
__w = __rdf_xp->right; \
|
||||
} \
|
||||
if (rbtree_node_color (__w->left) == RBTREE_BLACK && \
|
||||
rbtree_node_color (__w->right) == RBTREE_BLACK) { \
|
||||
if (__w) \
|
||||
__w->color = RBTREE_RED; \
|
||||
__rdf_x = __rdf_xp; \
|
||||
__rdf_xp = __rdf_x->parent; \
|
||||
} else { \
|
||||
if (rbtree_node_color (__w->right) == RBTREE_BLACK) { \
|
||||
if (__w->left) \
|
||||
__w->left->color = RBTREE_BLACK; \
|
||||
__w->color = RBTREE_RED; \
|
||||
rbtree_rotate_right (root_ptr, __w); \
|
||||
__w = __rdf_xp->right; \
|
||||
} \
|
||||
__w->color = __rdf_xp->color; \
|
||||
__rdf_xp->color = RBTREE_BLACK; \
|
||||
if (__w->right) \
|
||||
__w->right->color = RBTREE_BLACK; \
|
||||
rbtree_rotate_left (root_ptr, __rdf_xp); \
|
||||
__rdf_x = *(root_ptr); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
struct rb_node_link* __w = __rdf_xp->left; \
|
||||
if (rbtree_node_color (__w) == RBTREE_RED) { \
|
||||
__w->color = RBTREE_BLACK; \
|
||||
__rdf_xp->color = RBTREE_RED; \
|
||||
rbtree_rotate_right (root_ptr, __rdf_xp); \
|
||||
__w = __rdf_xp->left; \
|
||||
} \
|
||||
if (rbtree_node_color (__w->right) == RBTREE_BLACK && \
|
||||
rbtree_node_color (__w->left) == RBTREE_BLACK) { \
|
||||
if (__w) \
|
||||
__w->color = RBTREE_RED; \
|
||||
__rdf_x = __rdf_xp; \
|
||||
__rdf_xp = __rdf_x->parent; \
|
||||
} else { \
|
||||
if (rbtree_node_color (__w->left) == RBTREE_BLACK) { \
|
||||
if (__w->right) \
|
||||
__w->right->color = RBTREE_BLACK; \
|
||||
__w->color = RBTREE_RED; \
|
||||
rbtree_rotate_left (root_ptr, __w); \
|
||||
__w = __rdf_xp->left; \
|
||||
} \
|
||||
__w->color = __rdf_xp->color; \
|
||||
__rdf_xp->color = RBTREE_BLACK; \
|
||||
if (__w->left) \
|
||||
__w->left->color = RBTREE_BLACK; \
|
||||
rbtree_rotate_right (root_ptr, __rdf_xp); \
|
||||
__rdf_x = *(root_ptr); \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
if (__rdf_x) \
|
||||
__rdf_x->color = RBTREE_BLACK; \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_delete(root_ptr, z_node) \
|
||||
do { \
|
||||
struct rb_node_link* __rd_z = (z_node); \
|
||||
struct rb_node_link* __rd_y = __rd_z; \
|
||||
struct rb_node_link* __rd_x = NULL; \
|
||||
struct rb_node_link* __rd_xp = NULL; \
|
||||
int __rd_y_orig_color = __rd_y->color; \
|
||||
if (!__rd_z->left) { \
|
||||
__rd_x = __rd_z->right; \
|
||||
__rd_xp = __rd_z->parent; \
|
||||
rbtree_transplant (root_ptr, __rd_z, __rd_z->right); \
|
||||
} else if (!__rd_z->right) { \
|
||||
__rd_x = __rd_z->left; \
|
||||
__rd_xp = __rd_z->parent; \
|
||||
rbtree_transplant (root_ptr, __rd_z, __rd_z->left); \
|
||||
} else { \
|
||||
rbtree_min (__rd_z->right, __rd_y); \
|
||||
__rd_y_orig_color = __rd_y->color; \
|
||||
__rd_x = __rd_y->right; \
|
||||
if (__rd_y->parent == __rd_z) { \
|
||||
__rd_xp = __rd_y; \
|
||||
if (__rd_x) \
|
||||
__rd_x->parent = __rd_y; \
|
||||
} else { \
|
||||
__rd_xp = __rd_y->parent; \
|
||||
rbtree_transplant (root_ptr, __rd_y, __rd_y->right); \
|
||||
__rd_y->right = __rd_z->right; \
|
||||
__rd_y->right->parent = __rd_y; \
|
||||
} \
|
||||
rbtree_transplant (root_ptr, __rd_z, __rd_y); \
|
||||
__rd_y->left = __rd_z->left; \
|
||||
__rd_y->left->parent = __rd_y; \
|
||||
__rd_y->color = __rd_z->color; \
|
||||
} \
|
||||
if (__rd_y_orig_color == RBTREE_BLACK) \
|
||||
rbtree_delete_fixup (root_ptr, __rd_x, __rd_xp); \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_next(node, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
if (node) { \
|
||||
if ((node)->right) { \
|
||||
struct rb_node_link* __n = (node)->right; \
|
||||
while (__n->left) \
|
||||
__n = __n->left; \
|
||||
(out) = __n; \
|
||||
} else { \
|
||||
struct rb_node_link* __n = (node); \
|
||||
struct rb_node_link* __p = (node)->parent; \
|
||||
while (__p && __n == __p->right) { \
|
||||
__n = __p; \
|
||||
__p = __p->parent; \
|
||||
} \
|
||||
(out) = __p; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define rbtree_prev(node, out) \
|
||||
do { \
|
||||
(out) = NULL; \
|
||||
if (node) { \
|
||||
if ((node)->left) { \
|
||||
struct rb_node_link* __n = (node)->left; \
|
||||
while (__n->right) \
|
||||
__n = __n->right; \
|
||||
(out) = __n; \
|
||||
} else { \
|
||||
struct rb_node_link* __n = (node); \
|
||||
struct rb_node_link* __p = (node)->parent; \
|
||||
while (__p && __n == __p->left) { \
|
||||
__n = __p; \
|
||||
__p = __p->parent; \
|
||||
} \
|
||||
(out) = __p; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif // _KERNEL_LIBK_RBTREE_H
|
||||
@@ -4,10 +4,10 @@
|
||||
#include <limits.h>
|
||||
#include <stdalign.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdatomic.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdnoreturn.h>
|
||||
#include <stdatomic.h>
|
||||
|
||||
#endif // _KERNEL_LIBK_STD_H
|
||||
|
||||
@@ -21,12 +21,14 @@ size_t memcpy(void *dst, const void *src, size_t n) {
|
||||
// SOURCE: https://stackoverflow.com/a/48967408
|
||||
void strncpy (char* dst, const char* src, size_t n) {
|
||||
size_t i = 0;
|
||||
while(i++ != n && (*dst++ = *src++));
|
||||
while (i++ != n && (*dst++ = *src++))
|
||||
;
|
||||
}
|
||||
|
||||
size_t strlen (const char* str) {
|
||||
const char* s;
|
||||
for (s = str; *s; ++s);
|
||||
for (s = str; *s; ++s)
|
||||
;
|
||||
return (s - str);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
#ifndef _KERNEL_LIBK_STRING_H
|
||||
#define _KERNEL_LIBK_STRING_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
size_t memset (void* dst, uint8_t b, size_t n);
|
||||
size_t memcpy (void* dst, const void* src, size_t n);
|
||||
void strncpy (char* dst, const char* src, size_t n);
|
||||
|
||||
@@ -1,20 +1,23 @@
|
||||
#include <aux/compiler.h>
|
||||
#include <limine/limine.h>
|
||||
|
||||
#define DECL_REQ(small, big) \
|
||||
__attribute__((used, section(".limine_requests"))) \
|
||||
struct limine_ ## small ## _request limine_ ## small ## _request = { \
|
||||
.id = LIMINE_ ## big ## _REQUEST_ID, \
|
||||
.revision = 4 \
|
||||
}
|
||||
SECTION (".limine_requests") \
|
||||
struct limine_##small##_request limine_##small##_request = {.id = LIMINE_##big##_REQUEST_ID, \
|
||||
.revision = 4}
|
||||
|
||||
__attribute__((used, section(".limine_requests")))
|
||||
SECTION (".limine_requests")
|
||||
volatile uint64_t limine_base_revision[] = LIMINE_BASE_REVISION (4);
|
||||
|
||||
__attribute__((used, section(".limine_requests_start")))
|
||||
SECTION (".limine_requests_start")
|
||||
volatile uint64_t limine_requests_start_marker[] = LIMINE_REQUESTS_START_MARKER;
|
||||
|
||||
__attribute__((used, section(".limine_requests_end")))
|
||||
SECTION (".limine_requests_end")
|
||||
volatile uint64_t limine_requests_end_marker[] = LIMINE_REQUESTS_END_MARKER;
|
||||
|
||||
DECL_REQ (hhdm, HHDM);
|
||||
DECL_REQ (memmap, MEMMAP);
|
||||
DECL_REQ (rsdp, RSDP);
|
||||
DECL_REQ (mp, MP);
|
||||
DECL_REQ (module, MODULE);
|
||||
DECL_REQ (framebuffer, FRAMEBUFFER);
|
||||
|
||||
@@ -3,10 +3,13 @@
|
||||
|
||||
#include <limine/limine.h>
|
||||
|
||||
#define EXTERN_REQ(small) \
|
||||
extern struct limine_ ## small ## _request limine_ ## small ## _request
|
||||
#define EXTERN_REQ(small) extern struct limine_##small##_request limine_##small##_request
|
||||
|
||||
EXTERN_REQ (hhdm);
|
||||
EXTERN_REQ (memmap);
|
||||
EXTERN_REQ (rsdp);
|
||||
EXTERN_REQ (mp);
|
||||
EXTERN_REQ (module);
|
||||
EXTERN_REQ (framebuffer);
|
||||
|
||||
#endif // _KERNEL_LIMINE_REQUESTS_H
|
||||
|
||||
@@ -1,22 +1,23 @@
|
||||
/* liballoc breaks when optimized too aggressively, for eg. clang's -Oz */
|
||||
#pragma clang optimize off
|
||||
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <mm/types.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <limine/requests.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
|
||||
/* Porting */
|
||||
spin_lock_t _liballoc_lock = SPIN_LOCK_INIT;
|
||||
|
||||
int liballoc_lock(void) {
|
||||
spin_lock(&_liballoc_lock);
|
||||
int liballoc_lock (void* ctx) {
|
||||
spin_lock (&_liballoc_lock, (spin_lock_ctx_t*)ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int liballoc_unlock(void) {
|
||||
spin_unlock(&_liballoc_lock);
|
||||
int liballoc_unlock (void* ctx) {
|
||||
spin_unlock (&_liballoc_lock, (spin_lock_ctx_t*)ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -28,6 +29,7 @@ void *liballoc_alloc(int pages) {
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t addr = (uintptr_t)(p_addr + hhdm->offset);
|
||||
|
||||
return (void*)addr;
|
||||
}
|
||||
|
||||
@@ -55,61 +57,36 @@ int liballoc_free(void *ptr, int pages) {
|
||||
|
||||
#define MODE MODE_BEST
|
||||
|
||||
#ifdef DEBUG
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
|
||||
struct boundary_tag* l_freePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
|
||||
int l_completePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
unsigned int l_allocated = 0; //< The real amount of memory allocated.
|
||||
unsigned int l_inuse = 0; //< The amount of memory in use (malloc'ed).
|
||||
#endif
|
||||
|
||||
|
||||
static int l_initialized = 0; //< Flag to indicate initialization.
|
||||
static int l_pageSize = 4096; //< Individual page size
|
||||
static int l_pageCount = 16; //< Minimum number of pages to allocate.
|
||||
|
||||
|
||||
// *********** HELPER FUNCTIONS *******************************
|
||||
|
||||
/** Returns the exponent required to manage 'size' amount of memory.
|
||||
*
|
||||
* Returns n where 2^n <= size < 2^(n+1)
|
||||
*/
|
||||
static inline int getexp( unsigned int size )
|
||||
{
|
||||
if ( size < (1<<MINEXP) )
|
||||
{
|
||||
#ifdef DEBUG
|
||||
printf("getexp returns -1 for %i less than MINEXP\n", size );
|
||||
#endif
|
||||
static inline int getexp (unsigned int size) {
|
||||
if (size < (1 << MINEXP)) {
|
||||
return -1; // Smaller than the quantum.
|
||||
}
|
||||
|
||||
|
||||
int shift = MINEXP;
|
||||
|
||||
while ( shift < MAXEXP )
|
||||
{
|
||||
if ( (1<<shift) > size ) break;
|
||||
while (shift < MAXEXP) {
|
||||
if ((1 << shift) > size)
|
||||
break;
|
||||
shift += 1;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
printf("getexp returns %i (%i bytes) for %i size\n", shift - 1, (1<<(shift -1)), size );
|
||||
#endif
|
||||
|
||||
return shift - 1;
|
||||
}
|
||||
|
||||
|
||||
static void* liballoc_memset(void* s, int c, size_t n)
|
||||
{
|
||||
static void* liballoc_memset (void* s, int c, size_t n) {
|
||||
size_t i;
|
||||
for (i = 0; i < n; i++)
|
||||
((char*)s)[i] = c;
|
||||
@@ -117,15 +94,13 @@ static void* liballoc_memset(void* s, int c, size_t n)
|
||||
return s;
|
||||
}
|
||||
|
||||
static void* liballoc_memcpy(void* s1, const void* s2, size_t n)
|
||||
{
|
||||
static void* liballoc_memcpy (void* s1, const void* s2, size_t n) {
|
||||
char* cdest;
|
||||
char* csrc;
|
||||
unsigned int* ldest = (unsigned int*)s1;
|
||||
unsigned int* lsrc = (unsigned int*)s2;
|
||||
|
||||
while ( n >= sizeof(unsigned int) )
|
||||
{
|
||||
while (n >= sizeof (unsigned int)) {
|
||||
*ldest++ = *lsrc++;
|
||||
n -= sizeof (unsigned int);
|
||||
}
|
||||
@@ -133,8 +108,7 @@ static void* liballoc_memcpy(void* s1, const void* s2, size_t n)
|
||||
cdest = (char*)ldest;
|
||||
csrc = (char*)lsrc;
|
||||
|
||||
while ( n > 0 )
|
||||
{
|
||||
while (n > 0) {
|
||||
*cdest++ = *csrc++;
|
||||
n -= 1;
|
||||
}
|
||||
@@ -142,58 +116,19 @@ static void* liballoc_memcpy(void* s1, const void* s2, size_t n)
|
||||
return s1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
static void dump_array()
|
||||
{
|
||||
int i = 0;
|
||||
struct boundary_tag *tag = NULL;
|
||||
|
||||
printf("------ Free pages array ---------\n");
|
||||
printf("System memory allocated: %i\n", l_allocated );
|
||||
printf("Memory in used (malloc'ed): %i\n", l_inuse );
|
||||
|
||||
for ( i = 0; i < MAXEXP; i++ )
|
||||
{
|
||||
printf("%.2i(%i): ",i, l_completePages[i] );
|
||||
|
||||
tag = l_freePages[ i ];
|
||||
while ( tag != NULL )
|
||||
{
|
||||
if ( tag->split_left != NULL ) printf("*");
|
||||
printf("%i", tag->real_size );
|
||||
if ( tag->split_right != NULL ) printf("*");
|
||||
|
||||
printf(" ");
|
||||
tag = tag->next;
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
printf("'*' denotes a split to the left/right of a tag\n");
|
||||
fflush( stdout );
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
static inline void insert_tag( struct boundary_tag *tag, int index )
|
||||
{
|
||||
static inline void insert_tag (struct boundary_tag* tag, int index) {
|
||||
int realIndex;
|
||||
|
||||
if ( index < 0 )
|
||||
{
|
||||
if (index < 0) {
|
||||
realIndex = getexp (tag->real_size - sizeof (struct boundary_tag));
|
||||
if ( realIndex < MINEXP ) realIndex = MINEXP;
|
||||
}
|
||||
else
|
||||
if (realIndex < MINEXP)
|
||||
realIndex = MINEXP;
|
||||
} else
|
||||
realIndex = index;
|
||||
|
||||
tag->index = realIndex;
|
||||
|
||||
if ( l_freePages[ realIndex ] != NULL )
|
||||
{
|
||||
if (l_freePages[realIndex] != NULL) {
|
||||
l_freePages[realIndex]->prev = tag;
|
||||
tag->next = l_freePages[realIndex];
|
||||
}
|
||||
@@ -201,34 +136,33 @@ static inline void insert_tag( struct boundary_tag *tag, int index )
|
||||
l_freePages[realIndex] = tag;
|
||||
}
|
||||
|
||||
static inline void remove_tag( struct boundary_tag *tag )
|
||||
{
|
||||
if ( l_freePages[ tag->index ] == tag ) l_freePages[ tag->index ] = tag->next;
|
||||
static inline void remove_tag (struct boundary_tag* tag) {
|
||||
if (l_freePages[tag->index] == tag)
|
||||
l_freePages[tag->index] = tag->next;
|
||||
|
||||
if ( tag->prev != NULL ) tag->prev->next = tag->next;
|
||||
if ( tag->next != NULL ) tag->next->prev = tag->prev;
|
||||
if (tag->prev != NULL)
|
||||
tag->prev->next = tag->next;
|
||||
if (tag->next != NULL)
|
||||
tag->next->prev = tag->prev;
|
||||
|
||||
tag->next = NULL;
|
||||
tag->prev = NULL;
|
||||
tag->index = -1;
|
||||
}
|
||||
|
||||
|
||||
static inline struct boundary_tag* melt_left( struct boundary_tag *tag )
|
||||
{
|
||||
static inline struct boundary_tag* melt_left (struct boundary_tag* tag) {
|
||||
struct boundary_tag* left = tag->split_left;
|
||||
|
||||
left->real_size += tag->real_size;
|
||||
left->split_right = tag->split_right;
|
||||
|
||||
if ( tag->split_right != NULL ) tag->split_right->split_left = left;
|
||||
if (tag->split_right != NULL)
|
||||
tag->split_right->split_left = left;
|
||||
|
||||
return left;
|
||||
}
|
||||
|
||||
|
||||
static inline struct boundary_tag* absorb_right( struct boundary_tag *tag )
|
||||
{
|
||||
static inline struct boundary_tag* absorb_right (struct boundary_tag* tag) {
|
||||
struct boundary_tag* right = tag->split_right;
|
||||
|
||||
remove_tag (right); // Remove right from free pages.
|
||||
@@ -242,8 +176,7 @@ static inline struct boundary_tag* absorb_right( struct boundary_tag *tag )
|
||||
return tag;
|
||||
}
|
||||
|
||||
static inline struct boundary_tag* split_tag( struct boundary_tag* tag )
|
||||
{
|
||||
static inline struct boundary_tag* split_tag (struct boundary_tag* tag) {
|
||||
unsigned int remainder = tag->real_size - sizeof (struct boundary_tag) - tag->size;
|
||||
|
||||
struct boundary_tag* new_tag =
|
||||
@@ -258,7 +191,8 @@ static inline struct boundary_tag* split_tag( struct boundary_tag* tag )
|
||||
new_tag->split_left = tag;
|
||||
new_tag->split_right = tag->split_right;
|
||||
|
||||
if (new_tag->split_right != NULL) new_tag->split_right->split_left = new_tag;
|
||||
if (new_tag->split_right != NULL)
|
||||
new_tag->split_right->split_left = new_tag;
|
||||
tag->split_right = new_tag;
|
||||
|
||||
tag->real_size -= new_tag->real_size;
|
||||
@@ -268,14 +202,9 @@ static inline struct boundary_tag* split_tag( struct boundary_tag* tag )
|
||||
return new_tag;
|
||||
}
|
||||
|
||||
|
||||
// ***************************************************************
|
||||
|
||||
|
||||
|
||||
|
||||
static struct boundary_tag* allocate_new_tag( unsigned int size )
|
||||
{
|
||||
static struct boundary_tag* allocate_new_tag (unsigned int size) {
|
||||
unsigned int pages;
|
||||
unsigned int usage;
|
||||
struct boundary_tag* tag;
|
||||
@@ -285,14 +214,17 @@ static struct boundary_tag* allocate_new_tag( unsigned int size )
|
||||
|
||||
// Perfect amount of space
|
||||
pages = usage / l_pageSize;
|
||||
if ( (usage % l_pageSize) != 0 ) pages += 1;
|
||||
if ((usage % l_pageSize) != 0)
|
||||
pages += 1;
|
||||
|
||||
// Make sure it's >= the minimum size.
|
||||
if ( pages < (unsigned int)l_pageCount ) pages = l_pageCount;
|
||||
if (pages < (unsigned int)l_pageCount)
|
||||
pages = l_pageCount;
|
||||
|
||||
tag = (struct boundary_tag*)liballoc_alloc (pages);
|
||||
|
||||
if ( tag == NULL ) return NULL; // uh oh, we ran out of memory.
|
||||
if (tag == NULL)
|
||||
return NULL; // uh oh, we ran out of memory.
|
||||
|
||||
tag->magic = LIBALLOC_MAGIC;
|
||||
tag->size = size;
|
||||
@@ -304,35 +236,19 @@ static struct boundary_tag* allocate_new_tag( unsigned int size )
|
||||
tag->split_left = NULL;
|
||||
tag->split_right = NULL;
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
printf("Resource allocated %x of %i pages (%i bytes) for %i size.\n", tag, pages, pages * l_pageSize, size );
|
||||
|
||||
l_allocated += pages * l_pageSize;
|
||||
|
||||
printf("Total memory usage = %i KB\n", (int)((l_allocated / (1024))) );
|
||||
#endif
|
||||
|
||||
return tag;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void *malloc(size_t size)
|
||||
{
|
||||
void* malloc (size_t size) {
|
||||
int index;
|
||||
void* ptr;
|
||||
struct boundary_tag* tag = NULL;
|
||||
spin_lock_ctx_t ctxliba;
|
||||
|
||||
liballoc_lock();
|
||||
liballoc_lock (&ctxliba);
|
||||
|
||||
if ( l_initialized == 0 )
|
||||
{
|
||||
#ifdef DEBUG
|
||||
printf("%s\n","liballoc initializing.");
|
||||
#endif
|
||||
for ( index = 0; index < MAXEXP; index++ )
|
||||
{
|
||||
if (l_initialized == 0) {
|
||||
for (index = 0; index < MAXEXP; index++) {
|
||||
l_freePages[index] = NULL;
|
||||
l_completePages[index] = 0;
|
||||
}
|
||||
@@ -340,40 +256,29 @@ void *malloc(size_t size)
|
||||
}
|
||||
|
||||
index = getexp (size) + MODE;
|
||||
if ( index < MINEXP ) index = MINEXP;
|
||||
|
||||
if (index < MINEXP)
|
||||
index = MINEXP;
|
||||
|
||||
// Find one big enough.
|
||||
tag = l_freePages[index]; // Start at the front of the list.
|
||||
while ( tag != NULL )
|
||||
{
|
||||
while (tag != NULL) {
|
||||
// If there's enough space in this tag.
|
||||
if ( (tag->real_size - sizeof(struct boundary_tag))
|
||||
>= (size + sizeof(struct boundary_tag) ) )
|
||||
{
|
||||
#ifdef DEBUG
|
||||
printf("Tag search found %i >= %i\n",(tag->real_size - sizeof(struct boundary_tag)), (size + sizeof(struct boundary_tag) ) );
|
||||
#endif
|
||||
if ((tag->real_size - sizeof (struct boundary_tag)) >= (size + sizeof (struct boundary_tag))) {
|
||||
break;
|
||||
}
|
||||
|
||||
tag = tag->next;
|
||||
}
|
||||
|
||||
|
||||
// No page found. Make one.
|
||||
if ( tag == NULL )
|
||||
{
|
||||
if ( (tag = allocate_new_tag( size )) == NULL )
|
||||
{
|
||||
liballoc_unlock();
|
||||
if (tag == NULL) {
|
||||
if ((tag = allocate_new_tag (size)) == NULL) {
|
||||
liballoc_unlock (&ctxliba);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
index = getexp (tag->real_size - sizeof (struct boundary_tag));
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
remove_tag (tag);
|
||||
|
||||
if ((tag->split_left == NULL) && (tag->split_right == NULL))
|
||||
@@ -386,150 +291,86 @@ void *malloc(size_t size)
|
||||
|
||||
// Removed... see if we can re-use the excess space.
|
||||
|
||||
#ifdef DEBUG
|
||||
printf("Found tag with %i bytes available (requested %i bytes, leaving %i), which has exponent: %i (%i bytes)\n", tag->real_size - sizeof(struct boundary_tag), size, tag->real_size - size - sizeof(struct boundary_tag), index, 1<<index );
|
||||
#endif
|
||||
unsigned int remainder =
|
||||
tag->real_size - size - sizeof (struct boundary_tag) * 2; // Support a new tag + remainder
|
||||
|
||||
unsigned int remainder = tag->real_size - size - sizeof( struct boundary_tag ) * 2; // Support a new tag + remainder
|
||||
|
||||
if ( ((int)(remainder) > 0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/ )
|
||||
{
|
||||
if (((int)(remainder) > 0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/) {
|
||||
int childIndex = getexp (remainder);
|
||||
|
||||
if ( childIndex >= 0 )
|
||||
{
|
||||
#ifdef DEBUG
|
||||
printf("Seems to be splittable: %i >= 2^%i .. %i\n", remainder, childIndex, (1<<childIndex) );
|
||||
#endif
|
||||
|
||||
if (childIndex >= 0) {
|
||||
struct boundary_tag* new_tag = split_tag (tag);
|
||||
|
||||
(void)new_tag;
|
||||
|
||||
#ifdef DEBUG
|
||||
printf("Old tag has become %i bytes, new tag is now %i bytes (%i exp)\n", tag->real_size, new_tag->real_size, new_tag->index );
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag));
|
||||
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
l_inuse += size;
|
||||
printf("malloc: %x, %i, %i\n", ptr, (int)l_inuse / 1024, (int)l_allocated / 1024 );
|
||||
dump_array();
|
||||
#endif
|
||||
|
||||
|
||||
liballoc_unlock();
|
||||
liballoc_unlock (&ctxliba);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
void free(void *ptr)
|
||||
{
|
||||
void free (void* ptr) {
|
||||
int index;
|
||||
struct boundary_tag* tag;
|
||||
spin_lock_ctx_t ctxliba;
|
||||
|
||||
if ( ptr == NULL ) return;
|
||||
|
||||
liballoc_lock();
|
||||
if (ptr == NULL)
|
||||
return;
|
||||
|
||||
liballoc_lock (&ctxliba);
|
||||
|
||||
tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag));
|
||||
|
||||
if ( tag->magic != LIBALLOC_MAGIC )
|
||||
{
|
||||
liballoc_unlock(); // release the lock
|
||||
if (tag->magic != LIBALLOC_MAGIC) {
|
||||
liballoc_unlock (&ctxliba); // release the lock
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
l_inuse -= tag->size;
|
||||
printf("free: %x, %i, %i\n", ptr, (int)l_inuse / 1024, (int)l_allocated / 1024 );
|
||||
#endif
|
||||
|
||||
|
||||
// MELT LEFT...
|
||||
while ( (tag->split_left != NULL) && (tag->split_left->index >= 0) )
|
||||
{
|
||||
#ifdef DEBUG
|
||||
printf("Melting tag left into available memory. Left was %i, becomes %i (%i)\n", tag->split_left->real_size, tag->split_left->real_size + tag->real_size, tag->split_left->real_size );
|
||||
#endif
|
||||
while ((tag->split_left != NULL) && (tag->split_left->index >= 0)) {
|
||||
tag = melt_left (tag);
|
||||
remove_tag (tag);
|
||||
}
|
||||
|
||||
// MELT RIGHT...
|
||||
while ( (tag->split_right != NULL) && (tag->split_right->index >= 0) )
|
||||
{
|
||||
#ifdef DEBUG
|
||||
printf("Melting tag right into available memory. This was was %i, becomes %i (%i)\n", tag->real_size, tag->split_right->real_size + tag->real_size, tag->split_right->real_size );
|
||||
#endif
|
||||
while ((tag->split_right != NULL) && (tag->split_right->index >= 0)) {
|
||||
tag = absorb_right (tag);
|
||||
}
|
||||
|
||||
|
||||
// Where is it going back to?
|
||||
index = getexp (tag->real_size - sizeof (struct boundary_tag));
|
||||
if ( index < MINEXP ) index = MINEXP;
|
||||
if (index < MINEXP)
|
||||
index = MINEXP;
|
||||
|
||||
// A whole, empty block?
|
||||
if ( (tag->split_left == NULL) && (tag->split_right == NULL) )
|
||||
{
|
||||
|
||||
if ( l_completePages[ index ] == MAXCOMPLETE )
|
||||
{
|
||||
if ((tag->split_left == NULL) && (tag->split_right == NULL)) {
|
||||
if (l_completePages[index] == MAXCOMPLETE) {
|
||||
// Too many standing by to keep. Free this one.
|
||||
unsigned int pages = tag->real_size / l_pageSize;
|
||||
|
||||
if ( (tag->real_size % l_pageSize) != 0 ) pages += 1;
|
||||
if ( pages < (unsigned int)l_pageCount ) pages = l_pageCount;
|
||||
if ((tag->real_size % l_pageSize) != 0)
|
||||
pages += 1;
|
||||
if (pages < (unsigned int)l_pageCount)
|
||||
pages = l_pageCount;
|
||||
|
||||
liballoc_free (tag, pages);
|
||||
|
||||
#ifdef DEBUG
|
||||
l_allocated -= pages * l_pageSize;
|
||||
printf("Resource freeing %x of %i pages\n", tag, pages );
|
||||
dump_array();
|
||||
#endif
|
||||
|
||||
liballoc_unlock();
|
||||
liballoc_unlock (&ctxliba);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
l_completePages[index] += 1; // Increase the count of complete pages.
|
||||
}
|
||||
|
||||
|
||||
// ..........
|
||||
|
||||
|
||||
insert_tag (tag, index);
|
||||
|
||||
#ifdef DEBUG
|
||||
printf("Returning tag with %i bytes (requested %i bytes), which has exponent: %i\n", tag->real_size, tag->size, index );
|
||||
dump_array();
|
||||
#endif
|
||||
|
||||
liballoc_unlock();
|
||||
liballoc_unlock (&ctxliba);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void* calloc(size_t nobj, size_t size)
|
||||
{
|
||||
void* calloc (size_t nobj, size_t size) {
|
||||
int real_size;
|
||||
void* p;
|
||||
|
||||
@@ -542,27 +383,28 @@ void* calloc(size_t nobj, size_t size)
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void* realloc(void *p, size_t size)
|
||||
{
|
||||
void* realloc (void* p, size_t size) {
|
||||
void* ptr;
|
||||
struct boundary_tag* tag;
|
||||
int real_size;
|
||||
spin_lock_ctx_t ctxliba;
|
||||
|
||||
if ( size == 0 )
|
||||
{
|
||||
if (size == 0) {
|
||||
free (p);
|
||||
return NULL;
|
||||
}
|
||||
if ( p == NULL ) return malloc( size );
|
||||
if (p == NULL)
|
||||
return malloc (size);
|
||||
|
||||
if ( &liballoc_lock != NULL ) liballoc_lock(); // lockit
|
||||
if (&liballoc_lock != NULL)
|
||||
liballoc_lock (&ctxliba); // lockit
|
||||
tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag));
|
||||
real_size = tag->size;
|
||||
if ( &liballoc_unlock != NULL ) liballoc_unlock();
|
||||
if (&liballoc_unlock != NULL)
|
||||
liballoc_unlock (&ctxliba);
|
||||
|
||||
if ( (size_t)real_size > size ) real_size = size;
|
||||
if ((size_t)real_size > size)
|
||||
real_size = size;
|
||||
|
||||
ptr = malloc (size);
|
||||
liballoc_memcpy (ptr, p, real_size);
|
||||
@@ -570,6 +412,3 @@ void* realloc(void *p, size_t size)
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
typedef unsigned int size_t;
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef NULL
|
||||
#define NULL 0
|
||||
#endif
|
||||
@@ -23,14 +22,12 @@ typedef unsigned int size_t;
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/** This is a boundary tag which is prepended to the
|
||||
* page or section of a page which we have allocated. It is
|
||||
* used to identify valid memory blocks that the
|
||||
* application is trying to free.
|
||||
*/
|
||||
struct boundary_tag
|
||||
{
|
||||
struct boundary_tag {
|
||||
unsigned int magic; //< It's a kind of ...
|
||||
unsigned int size; //< Requested size.
|
||||
unsigned int real_size; //< Actual size.
|
||||
@@ -43,9 +40,6 @@ struct boundary_tag
|
||||
struct boundary_tag* prev; //< Linked list info.
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
/** This function is supposed to lock the memory data structures. It
|
||||
* could be as simple as disabling interrupts or acquiring a spinlock.
|
||||
* It's up to you to decide.
|
||||
@@ -53,7 +47,7 @@ struct boundary_tag
|
||||
* \return 0 if the lock was acquired successfully. Anything else is
|
||||
* failure.
|
||||
*/
|
||||
extern int liballoc_lock();
|
||||
extern int liballoc_lock (void* ctx);
|
||||
|
||||
/** This function unlocks what was previously locked by the liballoc_lock
|
||||
* function. If it disabled interrupts, it enables interrupts. If it
|
||||
@@ -61,7 +55,7 @@ extern int liballoc_lock();
|
||||
*
|
||||
* \return 0 if the lock was successfully released.
|
||||
*/
|
||||
extern int liballoc_unlock();
|
||||
extern int liballoc_unlock (void* ctx);
|
||||
|
||||
/** This is the hook into the local system which allocates pages. It
|
||||
* accepts an integer parameter which is the number of pages
|
||||
@@ -82,18 +76,13 @@ extern void* liballoc_alloc(int);
|
||||
*/
|
||||
extern int liballoc_free (void*, int);
|
||||
|
||||
|
||||
|
||||
void* malloc (size_t); //< The standard function.
|
||||
void* realloc (void*, size_t); //< The standard function.
|
||||
void* calloc (size_t, size_t); //< The standard function.
|
||||
void free (void*); //< The standard function.
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user