Compare commits

..

98 Commits

Author SHA1 Message Date
38e26a9c12 Implement argument_ptr () syscall for handling process arguments
All checks were successful
Build documentation / build-and-deploy (push) Successful in 37s
2026-01-30 14:05:47 +01:00
124aa12f5b Redesign scheduling points
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-30 02:36:27 +01:00
d2f5c032d9 Fix TLS alignment issues, works on BOCHS now too!
All checks were successful
Build documentation / build-and-deploy (push) Successful in 40s
2026-01-29 18:18:24 +01:00
73e42588fb Fix BOCHS clock
All checks were successful
Build documentation / build-and-deploy (push) Successful in 41s
2026-01-29 15:04:06 +01:00
e78bfb9984 Move suspension q code into proc/suspension_q.c
All checks were successful
Build documentation / build-and-deploy (push) Successful in 24s
2026-01-29 01:52:18 +01:00
d2a88b3641 Move suspension q's cleanup to proc/suspension_q.c 2026-01-29 01:43:01 +01:00
fdda2e2df8 Unlock mutexes on process death 2026-01-29 01:38:44 +01:00
388418a718 Nice wrappers around process management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 34s
2026-01-29 00:08:54 +01:00
1c64d608bd Rename make/libc.mk -> make/libmsl.mk
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-28 23:57:28 +01:00
3d23187acf Implement userspace TLS, remove RW Locks 2026-01-28 23:52:48 +01:00
a3b62ebd3d Clean up AMD64 memory management code, remove dependency on pd.lock 2026-01-27 19:03:03 +01:00
8bda300f6a Fix sys_clone () wrong argument bug
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-27 18:05:02 +01:00
cf51600c6a Cleanup syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 34s
2026-01-27 17:34:43 +01:00
b388b30b24 Redesign userspace memory management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-27 17:04:08 +01:00
600886a7ee Organize resources into process groups 2026-01-27 14:18:05 +01:00
67b66f2b39 Implement proper mutex cleanup
All checks were successful
Build documentation / build-and-deploy (push) Successful in 23s
2026-01-25 23:10:12 +01:00
18f791222e Remove dead process from it's suspension queues 2026-01-25 22:39:29 +01:00
5e16bb647c Multiple process suspension queues 2026-01-25 22:10:04 +01:00
a68373e4ee Dynamically assign cpu upon mutex unlock
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-25 20:39:51 +01:00
8650010992 Fix user CPU context saving
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
2026-01-25 17:39:34 +01:00
95f590fb3b multi-cpu scheduling WIP 2026-01-25 15:54:00 +01:00
7bb3b77ede Disable kernel preemption, fix requesting rescheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
2026-01-22 19:32:15 +01:00
c26fd3cb2b Fix scheduler locking hierarchy 2026-01-22 15:59:29 +01:00
fea0999726 Fix scheduler starvation, use lists for scheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-22 11:54:52 +01:00
7eceecf6e3 Add mutex syscalls 2026-01-20 22:18:43 +01:00
fff51321bc Redesign syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 40s
2026-01-20 20:46:34 +01:00
a29233f853 Rename proc_spawn_thread to proc_clone 2026-01-19 22:01:44 +01:00
38a43b59b0 Resolve strange IRQ issues which cause the scheduler to behave weirdly (IRQ mapping)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 52s
2026-01-19 01:51:34 +01:00
ddafc4eb19 Rewrite resource subsystem 2026-01-18 20:50:45 +01:00
4f7077d458 Move mutex and mem create/cleanup functions into mutex.c and mem.c respectively
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-16 22:13:17 +01:00
9a7dbf0594 Properly implement liballoc_free () 2026-01-16 22:09:16 +01:00
ab8093cc6c CI install pymdown-extensions from pip
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-16 20:28:26 +01:00
ddbb66b5e4 Docs processes overview 2026-01-16 20:26:23 +01:00
11a1eb52aa Move status codes into a separate header
All checks were successful
Build documentation / build-and-deploy (push) Successful in 36s
2026-01-16 19:07:32 +01:00
a054257336 Port liballoc to userspace 2026-01-16 18:50:40 +01:00
9fc8521e63 sys_proc_mutex_unlock () automatically reschedule at the end
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
2026-01-16 00:28:46 +01:00
711da8aeab Implement proc_spawn_thread syscall, fix proc_resume and proc_suspend
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-16 00:26:37 +01:00
ebd9f0cac6 Let the user application decide upon the resource ID (RID)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 22s
2026-01-14 23:19:39 +01:00
7cd5623d36 Use reference counting to track filetime of process PD
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-14 23:11:06 +01:00
270ff507d4 Implement lock IRQ nesting via stack variables/contexts
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s
2026-01-14 22:11:56 +01:00
55166f9d5f syscall doesn't need RPL 3 bits on kernel code
All checks were successful
Build documentation / build-and-deploy (push) Successful in 24s
2026-01-14 21:21:20 +01:00
e5cc3a64d3 Fix syscall return value - preserve RAX register
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-14 20:58:00 +01:00
2ab308d678 Drop m_ prefix from libmsl 2026-01-14 20:56:09 +01:00
d1d772cb42 Fix user apps randomly crashing (APIC, GDT layout, syscall entry)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 23s
2026-01-14 19:51:18 +01:00
0d8f9e565f Fix missing CPU_REQUEST_SCHED IDT entry 2026-01-11 12:07:17 +01:00
f80a26e5eb Load kernel CR3 2026-01-11 03:45:32 +01:00
5bf10c1218 Extra compiler flags for AMD64
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-11 03:42:15 +01:00
41a458b925 Implement Mutexes and supporting syscalls, cleanup/optimize scheduler
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-10 00:12:42 +01:00
6a474c21a0 Use RW spin locks
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-09 19:53:08 +01:00
a5283283f6 Hold proc->lock while killing the process 2026-01-09 00:00:18 +01:00
79768d94e6 Preserve syscall return value in RAX
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-08 23:06:32 +01:00
0555ddd041 Clean up IOAPIC and LAPIC implementations
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-08 22:05:11 +01:00
ebb026b807 proc_cleanup_resources () drop instead of immediate removal
All checks were successful
Build documentation / build-and-deploy (push) Successful in 30s
2026-01-07 23:09:13 +01:00
d7b734306f Introduce concept of Process Resources (PR_MEM), implement necessary syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 42s
2026-01-07 22:47:30 +01:00
28aef30f77 Implement proc_map () and proc_unmap () syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s
2026-01-06 23:32:11 +01:00
9f107a1a5e Implement proc_unmap () 2026-01-06 17:47:21 +01:00
e50f8940a9 Redesign linked list
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-06 16:38:42 +01:00
d09e4d97ad Fix missing headers, generate compile db with bear
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
2026-01-06 03:08:13 +01:00
7915986902 Remove Doxygen-style comments, change formatting to wrap comments
All checks were successful
Build documentation / build-and-deploy (push) Successful in 28s
2026-01-06 02:04:32 +01:00
902682ac11 Remove doxygen infra
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
2026-01-06 01:41:07 +01:00
7747e5e0aa Docs update theme
All checks were successful
Build documentation / build-and-deploy (push) Successful in 47s
2026-01-06 01:37:51 +01:00
a8423fe657 Better proc_kill () and process cleanup
All checks were successful
Build documentation / build-and-deploy (push) Successful in 27s
2026-01-06 01:19:11 +01:00
6538fd8023 Generate new PIDs for processes 2026-01-05 20:24:26 +01:00
fcd5658a80 Use red-black trees to store process run queue and process list
All checks were successful
Build documentation / build-and-deploy (push) Successful in 43s
2026-01-05 18:30:58 +01:00
b1579e4ac1 Implement automatic paging table deallocation 2026-01-04 21:26:11 +01:00
bba36ef057 Remove sign warning in comparison
All checks were successful
Build documentation / build-and-deploy (push) Successful in 22s
2026-01-04 01:45:56 +01:00
b5353cb600 Auxilary scripts for formatting all components
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-04 01:44:02 +01:00
e077d322f4 Rewrite init app in C, introduce MSL (MOP3 System Library)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-04 01:11:31 +01:00
2c954a9ca9 Fix return syscall result
All checks were successful
Build documentation / build-and-deploy (push) Successful in 32s
2026-01-03 15:06:36 +01:00
cf04e3db18 proc_quit () and proc_test () syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 43s
2026-01-03 12:21:56 +01:00
124a7f7215 Docs add kernel build instructions
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-03 02:19:40 +01:00
e52268cd8e First Hello world syscall
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-03 02:04:09 +01:00
1341dc00d9 make -B format_kernel
All checks were successful
Build documentation / build-and-deploy (push) Successful in 32s
2026-01-01 20:17:29 +01:00
99bab4ceee Use generic spin () instead of amd64_spin () 2026-01-01 20:16:40 +01:00
121fb3b33c Move platform-specific code for process loading/init for AMD64 to amd64/
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-01 20:08:37 +01:00
5e6bdcc52d Handle swapgs in interrupts and scheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
2026-01-01 18:42:53 +01:00
3bcbdb5ec4 Fix proc_kill () race, improve scheduler locking
All checks were successful
Build documentation / build-and-deploy (push) Successful in 1m45s
2026-01-01 16:59:04 +01:00
7f53ede2ab CI docs use $REMOTE_IP
All checks were successful
Build documentation / build-and-deploy (push) Successful in 38s
2025-12-31 22:50:59 +01:00
f1e34b78cd CI docs chmod 777 on site build dir
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2025-12-31 22:40:28 +01:00
97ad0b338c Fix CI docs build, install rsync
All checks were successful
Build documentation / build-and-deploy (push) Successful in 43s
2025-12-31 21:25:33 +01:00
74c782d653 mkdir docs/kernel/doxygen
Some checks failed
Build documentation / build-and-deploy (push) Failing after 30s
2025-12-31 21:21:02 +01:00
949f9c5293 Add docs gitea workflow
Some checks failed
Build documentation / build-and-deploy (push) Failing after 1m11s
2025-12-31 20:57:09 +01:00
a6c3f4cf87 Move kernel doxygen stuff to kernel/ 2025-12-30 17:04:05 +01:00
34f1e0ba30 Document amd64 platform-specific code 2025-12-30 16:50:15 +01:00
4f4f5c3d2f Move doxygen-awesome-darkmode-toggle.js to doxytheme/ 2025-12-30 01:52:45 +01:00
d861ab56c4 Remove pre-SMP TSS code 2025-12-30 01:50:47 +01:00
b279774bd6 Generated docs using doxygen and mkdocs 2025-12-30 01:47:29 +01:00
fa7998c323 Run first app from ramdisk! 2025-12-29 23:54:21 +01:00
c16170e4c2 SMP and timer interrupts 2025-12-23 19:50:37 +01:00
259aa732c8 Use separate IST stack for IRQs and cpu exceptions 2025-12-22 22:19:01 +01:00
1fd6f4890d Generic sleep_micro() function 2025-12-22 21:14:58 +01:00
849df9c27d Fix HPET unaligned read/writes on bochs 2025-12-22 21:06:48 +01:00
69feceaaae clang-format set column width to 100 chars 2025-12-22 19:38:32 +01:00
7b33d0757a APIC, HPET, virtual memory 2025-12-22 19:36:43 +01:00
741d0fb9b0 clang-format alignment rules 2025-12-21 23:10:21 +01:00
c85cbd0c01 Use prettier #defines for attributes 2025-12-21 23:03:56 +01:00
b2d8294b12 Use clang-format 2025-12-21 22:53:25 +01:00
8794a61073 Integrate uACPI 2025-12-21 22:24:23 +01:00
340 changed files with 53801 additions and 943 deletions

58
.clang-format Normal file
View File

@@ -0,0 +1,58 @@
BasedOnStyle: LLVM
Language: C
# Indentation
IndentWidth: 2
TabWidth: 2
UseTab: Never
# Braces and blocks
BreakBeforeBraces: Attach
BraceWrapping:
AfterFunction: false
AfterControlStatement: false
AfterStruct: false
AfterEnum: false
AfterUnion: false
BeforeElse: false
# Control statements
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AllowShortBlocksOnASingleLine: Never
# Line breaking
ColumnLimit: 100
BreakBeforeBinaryOperators: None
BreakBeforeTernaryOperators: true
BreakStringLiterals: false
# Spacing
SpaceBeforeParens: Always
SpaceBeforeAssignmentOperators: true
SpacesInParentheses: false
SpacesInSquareBrackets: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
# Pointer alignment
PointerAlignment: Left
DerivePointerAlignment: false
# Alignment
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignOperands: false
# Includes
SortIncludes: true
# Comments
ReflowComments: true
CommentPragmas: '^ IWYU pragma:'
# Misc
KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1

37
.editorconfig Normal file
View File

@@ -0,0 +1,37 @@
root = true
# Default for all files
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
# C / header files
[*.{c,h}]
indent_style = space
indent_size = 2
tab_width = 2
max_line_length = 80
# Assembly (if present; usually tab-sensitive)
[*.S]
indent_style = tab
tab_width = 8
trim_trailing_whitespace = false
# Makefiles (MUST use tabs)
[Makefile]
indent_style = tab
tab_width = 8
trim_trailing_whitespace = false
[*.mk]
indent_style = tab
tab_width = 8
trim_trailing_whitespace = false
# Markdown (avoid wrapping conflicts)
[*.md]
trim_trailing_whitespace = false
max_line_length = off

2
.gdbinit Normal file
View File

@@ -0,0 +1,2 @@
file kernel/build/kernel.elf
target remote :1234

View File

@@ -0,0 +1,43 @@
name: Build documentation
on:
push:
branches:
- master
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Install software
run: |
sudo apt-get update
sudo apt-get install -y doxygen make rsync
- name: Set up python3
uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Install mkdocs
run: |
pip install --upgrade pip
pip install mkdocs mkdocs-material pymdown-extensions
- name: Build
run: make docs
- name: Deploy
env:
SSH_KEY: ${{ secrets.DEPLOY_SSH_KEY }}
REMOTE_IP: ${{ vars.DEPLOY_REMOTE_IP }}
run: |
mkdir -p ~/.ssh
echo "$SSH_KEY" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan -H "$REMOTE_IP" >> ~/.ssh/known_hosts
chmod -R 777 site
rsync -az --delete site/ webuser@"$REMOTE_IP":/home/webuser/mop/

2
.gitignore vendored
View File

@@ -2,3 +2,5 @@ iso_root
mop3.iso
bochs-log.txt
bochs-com1.txt
mop3dist.tar
site/

View File

@@ -1,9 +1,7 @@
platform ?= amd64
all_kernel:
make -C kernel platform=$(platform) all
clean_kernel:
make -C kernel platform=$(platform) clean
.PHONY: all_kernel clean_kernel
include make/apps.mk
include make/kernel.mk
include make/dist.mk
include make/docs.mk
include make/libmsl.mk

11
amd64/flags.mk Normal file
View File

@@ -0,0 +1,11 @@
cflags += --target=x86_64-pc-none-elf \
-mno-sse \
-mno-sse2 \
-mno-avx \
-mno-mmx \
-mno-80387 \
-mno-red-zone \
-mcmodel=large
ldflags += --target=x86_64-pc-none-elf \
-Wl,-zmax-page-size=0x1000

69
amd64/link.ld Normal file
View File

@@ -0,0 +1,69 @@
OUTPUT_FORMAT(elf64-x86-64)
ENTRY(_start)
PHDRS {
text PT_LOAD;
rodata PT_LOAD;
data PT_LOAD;
bss PT_LOAD;
tls PT_TLS;
}
SECTIONS {
. = 0x0000500000000000;
.text : {
*(.text .text.*)
*(.ltext .ltext.*)
} :text
. = ALIGN(0x1000);
.rodata : {
*(.rodata .rodata.*)
} :rodata
. = ALIGN(0x1000);
.data : {
*(.data .data.*)
*(.ldata .ldata.*)
} :data
. = ALIGN(0x1000);
__bss_start = .;
.bss : {
*(.bss .bss.*)
*(.lbss .lbss.*)
} :bss
__bss_end = .;
. = ALIGN(0x1000);
__tdata_start = .;
.tdata : {
*(.tdata .tdata.*)
} :tls
__tdata_end = .;
__tbss_start = .;
.tbss : {
*(.tbss .tbss.*)
} :tls
__tbss_end = .;
__tls_size = __tbss_end - __tdata_start;
/DISCARD/ : {
*(.eh_frame*)
*(.note .note.*)
}
}

View File

@@ -1,13 +1,15 @@
cpu: model=p4_prescott_celeron_336
cpu: model=p4_prescott_celeron_336, ips=200000000
memory: guest=4096 host=2048
romimage: file=/usr/share/bochs/BIOS-bochs-latest, options=fastboot
romimage: file=/usr/share/bochs/BIOS-bochs-latest
vgaromimage: file=/usr/share/bochs/VGABIOS-lgpl-latest.bin
ata0: enabled=1
ata0-master: type=cdrom, path=mop3.iso, status=inserted
com1: enabled=1, mode=file, dev=bochs-com1.txt
pci: enabled=1, chipset=i440fx
clock: sync=realtime, time0=local
boot: cdrom

14
aux/devel.sh Executable file
View File

@@ -0,0 +1,14 @@
#!/bin/sh
set -xe
if [ "$1" = "debug" ]; then
make -B all_kernel buildtype=debug
else
make -B all_kernel
fi
make -B all_libmsl
make -B all_apps
make -B all_dist
./aux/limine_iso_amd64.sh

7
aux/format.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/sh
set -x
make -B format_kernel
make -B format_libmsl
make -B format_apps

View File

@@ -10,6 +10,8 @@ cp -v boot/limine/limine-bios.sys boot/limine/limine-bios-cd.bin \
cp -v boot/limine/BOOTX64.EFI boot/limine/BOOTIA32.EFI iso_root/EFI/BOOT
cp -v mop3dist.tar iso_root/boot
xorriso -as mkisofs -R -r -J -b boot/limine/limine-bios-cd.bin \
-no-emul-boot -boot-load-size 4 -boot-info-table -hfsplus \
-apm-block-size 2048 --efi-boot boot/limine/limine-uefi-cd.bin \

5
aux/qemu_amd64.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -x
qemu-system-x86_64 -M q35 -m 4G -serial stdio -enable-kvm -cdrom mop3.iso -smp 4 $@

5
aux/qemu_amd64_debug.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -x
qemu-system-x86_64 -M q35 -m 4G -serial stdio -cdrom mop3.iso -smp 4 -s -S $@

View File

@@ -3,3 +3,4 @@ timeout: 10
/mop3
protocol: limine
path: boot():/boot/kernel.elf
module_path: boot():/boot/mop3dist.tar

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

44
docs/building_kernel.md Normal file
View File

@@ -0,0 +1,44 @@
# Building the MOP3 kernel
This article describes, how to build the kernel, how the build system works and prerequisites.
## Preprequistes
- POSIX host system (tested on Linux, may break on other systems)
- Git
- GNU make
- LLVM toolchain/Clang C compiler
- Xorriso
## Build steps
cd into root of MOP3 source tree.
Build the kernel:
```
make -B all_kernel buildtype=<debug|release>
```
Build essential system applications:
```
make -B all_apps
```
Prepare the ramdisk:
```
make -B all_dist
```
Build ISO image:
```
./aux/limine_iso_amd64.sh
```
Now you have an ISO image, which can be run my QEMU or you can burn it onto a CD.
For the convenience of the developer, there's a magic "do all" script located in `aux`:
```
./aux/devel.sh # optionally "./aux/devel.sh debug" for debugging
```
It does all the previous steps, just packed into a single script.

4
docs/index.md Normal file
View File

@@ -0,0 +1,4 @@
# MOP3 operating system documentation
MOP3 is a hobby OS project of mine ;).

View File

@@ -0,0 +1,30 @@
# Overview of processes in MOP3
## What is a process?
A process is a structure defined to represent an internal state of a user application's environment. This includes
the necessary stacks, code, data and other resources. A process (usually) has it's own address, but in certain
circumstances may share it with another process.
## Only processes vs. processes-threads model
### Overview
MOP3 doesn't have a process-thread separation. Ususally in operating systems you'd have a "process", which consists
of multiple worker threads. For eg. a single-threaded application is a process, which consists of one worker. In MOP3
we do things a little differently. We only have processes, but some processes may work within the same pool of (generally speaking)
"resources", such as a shared address space, shared memory allocations, mutexes and so on. An application then consists of
not threads, but processes, which are loosely tied together via shared data.
#### Processes-threads model diagram
![Processes-threads model](assets/images/processes-threads.png)
#### Only processes model diagram
![Only processes model](assets/images/only-processes.png)
## Scheduling
MOP3 uses a round-robin based scheduler. For now priorities are left unimplemented, ie. every processes has
equal priority, but this may change in the future.
A good explaination of round-robin scheduling can be found on the OSDev wiki: [the article](https://wiki.osdev.org/Scheduling_Algorithms#Round_Robin)

21
generic/flags.mk Normal file
View File

@@ -0,0 +1,21 @@
cflags += -nostdinc \
-nostdlib \
-ffreestanding \
-fno-builtin \
-std=c11 \
-pedantic \
-Wall \
-Wextra \
-ffunction-sections \
-fdata-sections
cflags += -isystem ../include
ldflags += -ffreestanding \
-nostdlib \
-fno-builtin \
-fuse-ld=lld \
-static \
-Wl,--gc-sections \
-Wl,--strip-all \
-flto

13
include/m/status.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _M_STATUS_H
#define _M_STATUS_H
#define ST_OK 0
#define ST_SYSCALL_NOT_FOUND 1
#define ST_UNALIGNED 2
#define ST_OOM_ERROR 3
#define ST_NOT_FOUND 4
#define ST_BAD_ADDRESS_SPACE 5
#define ST_PERMISSION_ERROR 6
#define ST_BAD_RESOURCE 7
#endif // _M_STATUS_H

16
include/m/syscall_defs.h Normal file
View File

@@ -0,0 +1,16 @@
#ifndef _M_SYSCALL_DEFS_H
#define _M_SYSCALL_DEFS_H
#define SYS_QUIT 1
#define SYS_TEST 2
#define SYS_MAP 3
#define SYS_UNMAP 4
#define SYS_CLONE 5
#define SYS_SCHED 6
#define SYS_MUTEX_CREATE 7
#define SYS_MUTEX_DELETE 8
#define SYS_MUTEX_LOCK 9
#define SYS_MUTEX_UNLOCK 10
#define SYS_ARGUMENT_PTR 11
#endif // _M_SYSCALL_DEFS_H

2
init/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.o
*.exe

1
init/Makefile Normal file
View File

@@ -0,0 +1 @@
include ../make/user.mk

1
init/app.mk Normal file
View File

@@ -0,0 +1 @@
app := init.exe

46
init/init.c Normal file
View File

@@ -0,0 +1,46 @@
#include <limits.h>
#include <proc/local.h>
#include <proc/proc.h>
#include <stddef.h>
#include <stdint.h>
#include <string/string.h>
#define MUTEX 2000
LOCAL volatile char letter = 'c';
void app_proc (void) {
char arg_letter = (char)(uintptr_t)argument_ptr ();
letter = arg_letter;
for (;;) {
mutex_lock (MUTEX);
for (int i = 0; i < 3; i++)
test (letter);
mutex_unlock (MUTEX);
}
process_quit ();
}
void app_main (void) {
mutex_create (MUTEX);
letter = 'a';
process_spawn (&app_proc, (void*)'a');
process_spawn (&app_proc, (void*)'b');
process_spawn (&app_proc, (void*)'c');
for (;;) {
mutex_lock (MUTEX);
for (int i = 0; i < 3; i++)
test (letter);
mutex_unlock (MUTEX);
}
}

3
init/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += init.c
o += init.o

2
kernel/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.json
.cache

View File

@@ -5,6 +5,7 @@ ldflags :=
cflags :=
buildtype ?= release
include vars.mk
include flags.mk
include src.mk
@@ -22,4 +23,13 @@ build/kernel.elf: $(o)
clean:
rm -f $(o) build/kernel.elf
.PHONY: all clean
format:
clang-format -i $$(git ls-files '*.c' '*.h' \
':!limine/limine.h' \
':!c_headers/include/**' \
':!uACPI/source/**' \
':!uACPI/include/**' \
':!uACPI/tests/**' \
':!libk/printf*')
.PHONY: all clean format

271
kernel/amd64/apic.c Normal file
View File

@@ -0,0 +1,271 @@
#include <amd64/apic.h>
#include <amd64/intr_defs.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/std.h>
#include <limine/requests.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/spin.h>
#include <sys/time.h>
#include <uacpi/acpi.h>
#include <uacpi/status.h>
#include <uacpi/tables.h>
#include <uacpi/uacpi.h>
#define IOAPICS_MAX 24
#define INTERRUPT_SRC_OVERRIDES_MAX 24
/* ID of Local APIC */
#define LAPIC_ID 0x20
/* End of interrupt register */
#define LAPIC_EOI 0xB0
/* Spurious interrupt vector register */
#define LAPIC_SIVR 0xF0
/* Interrupt command register */
#define LAPIC_ICR 0x300
/* LVT timer register */
#define LAPIC_LVTTR 0x320
/* Timer initial count register */
#define LAPIC_TIMICT 0x380
/* Timer current count register */
#define LAPIC_TIMCCT 0x390
/* Divide config register */
#define LAPIC_DCR 0x3E0
#define DIVIDER_VALUE 0x0B
struct ioapic {
struct acpi_madt_ioapic table_data;
spin_lock_t lock;
uintptr_t mmio_base;
};
/* Table of IOAPICS */
static struct ioapic ioapics[IOAPICS_MAX];
/* Table of interrupt source overrides */
/* clang-format off */
static struct acpi_madt_interrupt_source_override intr_src_overrides[INTERRUPT_SRC_OVERRIDES_MAX];
/* clang-format on */
/* Count of actual IOAPIC entries */
static size_t ioapic_entries = 0;
/* Count of actual interrupt source overrides */
static size_t intr_src_override_entries = 0;
static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
/* Read IOAPIC */
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
spin_lock_ctx_t ctxioar;
spin_lock (&ioapic->lock, &ctxioar);
*(volatile uint32_t*)ioapic->mmio_base = reg;
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
spin_unlock (&ioapic->lock, &ctxioar);
return ret;
}
/* Write IOAPIC */
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
spin_lock_ctx_t ctxioaw;
spin_lock (&ioapic->lock, &ctxioaw);
*(volatile uint32_t*)ioapic->mmio_base = reg;
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
spin_unlock (&ioapic->lock, &ctxioaw);
}
/* Find an IOAPIC corresposting to provided IRQ */
static struct ioapic* amd64_ioapic_find (uint32_t irq) {
struct ioapic* ioapic = NULL;
for (size_t i = 0; i < ioapic_entries; i++) {
ioapic = &ioapics[i];
uint32_t version = amd64_ioapic_read (ioapic, 1);
uint32_t max = ((version >> 16) & 0xFF);
if ((irq >= ioapic->table_data.gsi_base) && (irq <= (ioapic->table_data.gsi_base + max)))
return ioapic;
}
return NULL;
}
/*
* Route IRQ to an IDT entry of a given Local APIC.
*
* vec - Interrupt vector number, which will be delivered to the CPU.
* irq -Legacy IRQ number to be routed. Can be changed by an interrupt source override
* into a different GSI.
* flags - IOAPIC redirection flags.
* lapic_id - Local APIC that will receive the interrupt.
*/
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id) {
struct ioapic* ioapic = NULL;
struct acpi_madt_interrupt_source_override* override;
bool found_override = false;
for (size_t i = 0; i < intr_src_override_entries; i++) {
override = &intr_src_overrides[i];
if (override->source == irq) {
found_override = true;
break;
}
}
uint64_t calc_flags = (lapic_id << 56) | (flags) | (vec & 0xFF);
if (found_override) {
uint32_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
uint32_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
calc_flags |= (uint64_t)mode << 15;
calc_flags |= (uint64_t)polarity << 13;
}
uint32_t gsi = found_override ? override->gsi : irq;
ioapic = amd64_ioapic_find (gsi);
if (ioapic == NULL)
return;
uint32_t irq_reg = ((gsi - ioapic->table_data.gsi_base) * 2) + 0x10;
amd64_ioapic_write (ioapic, irq_reg + 1, (uint32_t)(calc_flags >> 32));
amd64_ioapic_write (ioapic, irq_reg, (uint32_t)calc_flags);
}
/* Find and initialize the IOAPIC */
void amd64_ioapic_init (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
struct uacpi_table apic_table;
uacpi_status status = uacpi_table_find_by_signature (ACPI_MADT_SIGNATURE, &apic_table);
if (status != UACPI_STATUS_OK) {
DEBUG ("Could not find MADT table!\n");
spin ();
}
struct acpi_madt* apic = (struct acpi_madt*)apic_table.virt_addr;
struct acpi_entry_hdr* current = (struct acpi_entry_hdr*)apic->entries;
for (;;) {
if ((uintptr_t)current >=
((uintptr_t)apic->entries + apic->hdr.length - sizeof (struct acpi_madt)))
break;
switch (current->type) {
case ACPI_MADT_ENTRY_TYPE_IOAPIC: {
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
MM_PG_PRESENT | MM_PG_RW);
ioapics[ioapic_entries++] = (struct ioapic){
.lock = SPIN_LOCK_INIT,
.table_data = *ioapic_table_data,
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
};
} break;
case ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE: {
struct acpi_madt_interrupt_source_override* override =
(struct acpi_madt_interrupt_source_override*)current;
intr_src_overrides[intr_src_override_entries++] = *override;
} break;
}
current = (struct acpi_entry_hdr*)((uintptr_t)current + current->length);
}
}
/* Get MMIO base of Local APIC */
static uintptr_t amd64_lapic_base (void) { return thiscpu->lapic_mmio_base; }
/* Write Local APIC */
static void amd64_lapic_write (uint32_t reg, uint32_t value) {
*(volatile uint32_t*)(amd64_lapic_base () + reg) = value;
}
/* Read Local APIC */
static uint32_t amd64_lapic_read (uint32_t reg) {
return *(volatile uint32_t*)(amd64_lapic_base () + reg);
}
/* Get ID of Local APIC */
uint32_t amd64_lapic_id (void) { return amd64_lapic_read (LAPIC_ID) >> 24; }
/* Send End of interrupt command to Local APIC */
void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
/*
* Calibrate Local APIC to send interrupts in a set interval.
*
* us - Period length in microseconds
*/
static uint32_t amd64_lapic_calibrate (uint32_t us) {
spin_lock_ctx_t ctxlacb;
spin_lock (&lapic_calibration_lock, &ctxlacb);
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF);
sleep_micro (us);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (0 << 16));
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
DEBUG ("timer ticks = %u\n", ticks);
spin_unlock (&lapic_calibration_lock, &ctxlacb);
return ticks;
}
/*
* Starts a Local APIC, configures LVT timer to send interrupts at SCHED_PREEMPT_TIMER.
*
* ticks - Initial tick count
*/
static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
amd64_lapic_write (LAPIC_TIMICT, ticks);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
}
/*
* Initialize Local APIC, configure to send timer interrupts at a given period. See
* amd64_lapic_calibrate and amd64_lapic_start.
*/
void amd64_lapic_init (uint32_t us) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
amd64_wrmsr (MSR_APIC_BASE, amd64_rdmsr (MSR_APIC_BASE) | (1 << 11));
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
thiscpu->lapic_ticks = amd64_lapic_calibrate (us);
amd64_lapic_start (thiscpu->lapic_ticks);
}
/*
* Send an IPI to a given Local APIC. This till invoke an IDT stub located at vec.
*
* lapic_id - Target Local APIC
* vec - Interrupt vector/IDT stub, which will be invoked by the IPI.
*/
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) {
/* wait for previous IPI to finish */
while (amd64_lapic_read (LAPIC_ICR) & (1 << 12)) {
__asm__ volatile ("pause");
}
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
amd64_lapic_write (LAPIC_ICR, vec | (1 << 14));
}

14
kernel/amd64/apic.h Normal file
View File

@@ -0,0 +1,14 @@
#ifndef _KERNEL_AMD64_APIC_H
#define _KERNEL_AMD64_APIC_H
#include <libk/std.h>
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id);
void amd64_ioapic_init (void);
uint32_t amd64_lapic_id (void);
void amd64_lapic_eoi (void);
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec);
void amd64_lapic_init (uint32_t us);
#endif // _KERNEL_AMD64_APIC_H

View File

@@ -1,20 +1,56 @@
#include <limine/limine.h>
#include <amd64/apic.h>
#include <amd64/debug.h>
#include <amd64/hpet.h>
#include <amd64/init.h>
#include <sys/debug.h>
#include <mm/pmm.h>
#include <amd64/intr_defs.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <limine/limine.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <rd/rd.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <uacpi/uacpi.h>
void bootmain(void) {
amd64_init();
DEBUG("Hello from amd64!\n");
#define UACPI_MEMORY_BUFFER_MAX 4096
pmm_init();
ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
int *a = malloc(sizeof(int));
*a = 6969;
DEBUG("a=%p, *a=%d\n", a, *a);
/*
* The kernel starts booting here. This is the entry point after Limine hands control. We set up all
* the necessary platform-dependent subsystems/drivers and jump into the init app.
*/
void bootmain (void) {
struct limine_mp_response* mp = limine_mp_request.response;
*(volatile int *)0 = 123;
struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id);
for (;;);
amd64_init (bsp_cpu, false);
syscall_init ();
amd64_debug_init ();
pmm_init ();
mm_init ();
rd_init ();
uacpi_setup_early_table_access ((void*)uacpi_memory_buffer, sizeof (uacpi_memory_buffer));
amd64_ioapic_init ();
amd64_hpet_init ();
smp_init ();
proc_init ();
for (;;)
;
}

View File

@@ -1,46 +1,76 @@
#include <libk/std.h>
#include <libk/string.h>
#include <libk/printf.h>
#include <sys/debug.h>
#include <amd64/debug.h>
#include <amd64/io.h>
#include <libk/printf.h>
#include <libk/std.h>
#include <libk/string.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#define PORT_COM1 0x03F8
/* Port for printing to serial */
/* TODO: Make this configurable */
#define PORT_COM1 0x03F8
/* debugprintf buffer size */
#define BUFFER_SIZE 1024
/*
* Lock, which ensures that prints to the serial port are atomic (ie. one debugprintf is atomic in
* itself).
*/
static spin_lock_t serial_lock = SPIN_LOCK_INIT;
static bool amd64_debug_serial_tx_empty(void) {
return (bool)(amd64_io_inb(PORT_COM1 + 5) & 0x20);
static bool debug_init = false;
/* Block until TX buffer is empty */
static bool amd64_debug_serial_tx_empty (void) {
return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20);
}
static void amd64_debug_serial_write(char x) {
while (!amd64_debug_serial_tx_empty());
amd64_io_outb(PORT_COM1, (uint8_t)x);
/* Write a single character to serial */
static void amd64_debug_serial_write (char x) {
while (!amd64_debug_serial_tx_empty ())
;
amd64_io_outb (PORT_COM1, (uint8_t)x);
}
void debugprintf(const char *fmt, ...) {
/*
* Formatted printing to serial. serial_lock ensures that all prints are atomic.
*/
void debugprintf (const char* fmt, ...) {
spin_lock_ctx_t ctxdbgp;
if (!debug_init)
return;
char buffer[BUFFER_SIZE];
memset(buffer, 0, sizeof(buffer));
memset (buffer, 0, sizeof (buffer));
va_list ap;
va_start(ap, fmt);
vsnprintf(buffer, sizeof(buffer), fmt, ap);
va_end(ap);
va_start (ap, fmt);
vsnprintf (buffer, sizeof (buffer), fmt, ap);
va_end (ap);
buffer[sizeof(buffer) - 1] = '\0';
buffer[sizeof (buffer) - 1] = '\0';
const char* p = buffer;
spin_lock (&serial_lock, &ctxdbgp);
const char *p = buffer;
while (*p) {
amd64_debug_serial_write(*p);
amd64_debug_serial_write (*p);
p++;
}
spin_unlock (&serial_lock, &ctxdbgp);
}
void amd64_debug_init(void) {
amd64_io_outb(PORT_COM1 + 1, 0x00);
amd64_io_outb(PORT_COM1 + 3, 0x80);
amd64_io_outb(PORT_COM1 + 0, 0x03);
amd64_io_outb(PORT_COM1 + 1, 0x00);
amd64_io_outb(PORT_COM1 + 3, 0x03);
amd64_io_outb(PORT_COM1 + 2, 0xC7);
amd64_io_outb(PORT_COM1 + 4, 0x0B);
/* Initialize serial */
void amd64_debug_init (void) {
amd64_io_outb (PORT_COM1 + 1, 0x00);
amd64_io_outb (PORT_COM1 + 3, 0x80);
amd64_io_outb (PORT_COM1 + 0, 0x03);
amd64_io_outb (PORT_COM1 + 1, 0x00);
amd64_io_outb (PORT_COM1 + 3, 0x03);
amd64_io_outb (PORT_COM1 + 2, 0xC7);
amd64_io_outb (PORT_COM1 + 4, 0x0B);
debug_init = true;
}

View File

@@ -1,6 +1,6 @@
#ifndef _KERNEL_AMD64_DEBUG_H
#define _KERNEL_AMD64_DEBUG_H
void amd64_debug_init(void);
void amd64_debug_init (void);
#endif // _KERNEL_AMD64_DEBUG_H

View File

@@ -1,6 +1,11 @@
cflags += --target=x86_64-pc-none-elf \
-mno-sse \
-mno-avx
-mno-sse2 \
-mno-avx \
-mno-mmx \
-mno-80387 \
-mno-red-zone \
-fno-omit-frame-pointer
ldflags += --target=x86_64-pc-none-elf \
-Wl,-zmax-page-size=0x1000

45
kernel/amd64/gdt.h Normal file
View File

@@ -0,0 +1,45 @@
#ifndef _KERNEL_AMD64_GDT_H
#define _KERNEL_AMD64_GDT_H
#include <aux/compiler.h>
#include <libk/std.h>
#include <proc/proc.h>
#define GDT_KCODE 0x08
#define GDT_KDATA 0x10
#define GDT_UDATA 0x18
#define GDT_UCODE 0x20
#define GDT_TSS 0x28
/* Size of kernel stack */
#define KSTACK_SIZE (32 * 1024)
/*
* 64-bit GDT structure. For more info see:
* - https://wiki.osdev.org/Global_Descriptor_Table
* - https://wiki.osdev.org/GDT_Tutorial
*/
struct gdt_entry {
uint16_t limitlow;
uint16_t baselow;
uint8_t basemid;
uint8_t access;
uint8_t gran;
uint8_t basehigh;
} PACKED;
/* Struct that gets loaded into GDTR */
struct gdt_ptr {
uint16_t limit;
uint64_t base;
} PACKED;
/* New, extended GDT (we need to extend Limine's GDT) */
struct gdt_extended {
struct gdt_entry old[5];
struct gdt_entry tsslow;
struct gdt_entry tsshigh;
} PACKED;
#endif // _KERNEL_AMD64_GDT_H

142
kernel/amd64/hpet.c Normal file
View File

@@ -0,0 +1,142 @@
#include <amd64/hpet.h>
#include <libk/std.h>
#include <limine/requests.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/spin.h>
#include <uacpi/acpi.h>
#include <uacpi/status.h>
#include <uacpi/tables.h>
#include <uacpi/uacpi.h>
/*
* HPET (High Precision Event Timer) driver code. See more at https://wiki.osdev.org/HPET
*/
/* HPET Main Counter Value Register */
#define HPET_MCVR 0xF0
/* HPET General Configuration Register */
#define HPET_GCR 0x10
/* HPET General Capabilities and ID Register */
#define HPET_GCIDR 0x00
/* Set whether we sould use 32-bit or 64-bit reads/writes */
static bool hpet_32bits = 1;
/* Physical address for HPET MMIO */
static uintptr_t hpet_paddr;
/* HPET period in femtoseconds */
static uint64_t hpet_period_fs;
/* Lock, which protects concurrent access. See amd64/smp.c */
static spin_lock_t hpet_lock = SPIN_LOCK_INIT;
/* Read a HPET register. Assumes caller holds hpet_lock */
static uint64_t amd64_hpet_read64 (uint32_t reg) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
return *(volatile uint64_t*)(hpet_vaddr + reg);
}
static uint32_t amd64_hpet_read32 (uint32_t reg) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
return *(volatile uint32_t*)(hpet_vaddr + reg);
}
/* Write a HPET register. Assumes caller holds hpet_lock */
static void amd64_hpet_write64 (uint32_t reg, uint64_t value) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
}
static void amd64_hpet_write32 (uint32_t reg, uint32_t value) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
*(volatile uint32_t*)(hpet_vaddr + reg) = value;
}
/* Read current value of HPET_MCVR register. */
static uint64_t amd64_hpet_read_counter (void) {
uint64_t value;
spin_lock_ctx_t ctxhrc;
spin_lock (&hpet_lock, &ctxhrc);
if (!hpet_32bits)
value = amd64_hpet_read64 (HPET_MCVR);
else {
uint32_t hi1, lo, hi2;
do {
hi1 = amd64_hpet_read32 (HPET_MCVR + 4);
lo = amd64_hpet_read32 (HPET_MCVR + 0);
hi2 = amd64_hpet_read32 (HPET_MCVR + 4);
} while (hi1 != hi2);
value = ((uint64_t)hi1 << 32) | lo;
}
spin_unlock (&hpet_lock, &ctxhrc);
return value;
}
static void amd64_hpet_write_counter (uint64_t value) {
spin_lock_ctx_t ctxhwc;
spin_lock (&hpet_lock, &ctxhwc);
if (!hpet_32bits)
amd64_hpet_write64 (HPET_MCVR, value);
else {
amd64_hpet_write32 (HPET_MCVR, (uint32_t)value);
amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32));
}
spin_unlock (&hpet_lock, &ctxhwc);
}
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being
* held. */
void amd64_hpet_sleep_micro (uint64_t us) {
if (hpet_period_fs == 0)
return;
uint64_t ticks_to_wait = (us * 1000ULL) / (hpet_period_fs / 1000000ULL);
uint64_t start = amd64_hpet_read_counter ();
for (;;) {
uint64_t now = amd64_hpet_read_counter ();
if ((now - start) >= ticks_to_wait)
break;
__asm__ volatile ("pause" ::: "memory");
}
}
/* Initialize HPET */
void amd64_hpet_init (void) {
struct uacpi_table hpet_table;
uacpi_status status = uacpi_table_find_by_signature (ACPI_HPET_SIGNATURE, &hpet_table);
if (status != UACPI_STATUS_OK) {
DEBUG ("Could not find HPET table!\n");
spin ();
}
struct acpi_hpet* hpet = (struct acpi_hpet*)hpet_table.virt_addr;
hpet_paddr = (uintptr_t)hpet->address.address;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
hpet_period_fs = (uint32_t)(caps >> 32);
amd64_hpet_write64 (HPET_GCR, 0);
amd64_hpet_write_counter (0);
amd64_hpet_write64 (HPET_GCR, 1);
}

9
kernel/amd64/hpet.h Normal file
View File

@@ -0,0 +1,9 @@
#ifndef _KERNEL_AMD64_HPET_H
#define _KERNEL_AMD64_HPET_H
#include <libk/std.h>
void amd64_hpet_sleep_micro (uint64_t us);
void amd64_hpet_init (void);
#endif // _KERNEL_AMD64_HPET_H

View File

@@ -1,46 +1,17 @@
#include <amd64/gdt.h>
#include <amd64/init.h>
#include <amd64/intr.h>
#include <amd64/smp.h>
#include <aux/compiler.h>
#include <libk/std.h>
#include <libk/string.h>
#include <amd64/init.h>
#include <amd64/tss.h>
#include <amd64/debug.h>
#include <amd64/intr.h>
#define GDT_KCODE 0x08
#define GDT_KDATA 0x10
#define GDT_UCODE 0x18
#define GDT_UDATA 0x20
#define GDT_TSS 0x28
#define TSS 0x80
#define TSS_PRESENT 0x89
#define KSTACK_SIZE (8*1024)
struct gdt_entry {
uint16_t limitlow;
uint16_t baselow;
uint8_t basemid;
uint8_t access;
uint8_t gran;
uint8_t basehigh;
} __attribute__((packed));
struct gdt_ptr {
uint16_t limit;
uint64_t base;
} __attribute__((packed));
struct gdt_extended {
struct gdt_entry old[5];
struct gdt_entry tsslow;
struct gdt_entry tsshigh;
} __attribute__((packed));
__attribute__((aligned(16))) static volatile uint8_t kernel_stack[KSTACK_SIZE];
__attribute__((aligned(16))) static volatile struct gdt_extended gdt;
static void amd64_gdt_set(volatile struct gdt_entry *ent, uint32_t base,
uint32_t limit, uint8_t acc, uint8_t gran) {
/* Set a GDT entry */
static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit,
uint8_t acc, uint8_t gran) {
ent->baselow = (base & 0xFFFF);
ent->basemid = (base >> 16) & 0xFF;
ent->basehigh = (base >> 24) & 0xFF;
@@ -49,59 +20,70 @@ static void amd64_gdt_set(volatile struct gdt_entry *ent, uint32_t base,
ent->access = acc;
}
static void amd64_gdt_init(void) {
volatile struct tss *tss = amd64_get_tss();
/* Initialize GDT and TSS structures for a given CPU */
static void amd64_gdt_init (struct cpu* cpu) {
volatile struct tss* tss = &cpu->tss;
volatile struct gdt_extended* gdt = &cpu->gdt;
memset((void *)&gdt, 0, sizeof(gdt));
memset((void *)kernel_stack, 0, sizeof(kernel_stack));
memset((void *)tss, 0, sizeof(*tss));
memset ((void*)gdt, 0, sizeof (*gdt));
memset ((void*)tss, 0, sizeof (*tss));
tss->iopb_off = sizeof(*tss);
tss->rsp0 = (uint64_t)((uintptr_t)kernel_stack + sizeof(kernel_stack));
tss->iopb_off = sizeof (*tss);
tss->rsp0 = (uint64_t)((uintptr_t)cpu->kernel_stack + sizeof (cpu->kernel_stack));
tss->ist[0] = (uint64_t)((uintptr_t)cpu->except_stack + sizeof (cpu->except_stack));
tss->ist[1] = (uint64_t)((uintptr_t)cpu->irq_stack + sizeof (cpu->irq_stack));
uint64_t tssbase = (uint64_t)&tss;
uint64_t tsslimit = sizeof(*tss) - 1;
uint64_t tssbase = (uint64_t)tss;
uint64_t tsslimit = sizeof (*tss) - 1;
amd64_gdt_set(&gdt.old[0], 0, 0, 0, 0);
amd64_gdt_set(&gdt.old[1], 0, 0xFFFFF, 0x9A, 0xA0);
amd64_gdt_set(&gdt.old[2], 0, 0xFFFFF, 0x92, 0xC0);
amd64_gdt_set(&gdt.old[3], 0, 0xFFFFF, 0xFA, 0xA0);
amd64_gdt_set(&gdt.old[4], 0, 0xFFFFF, 0xF2, 0xC0);
amd64_gdt_set(&gdt.tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0);
amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0);
amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0);
amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xF2, 0xC0);
amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xFA, 0xA0);
amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
uint32_t tssbasehigh = (tssbase >> 32);
gdt.tsshigh.limitlow = (tssbasehigh & 0xFFFF);
gdt.tsshigh.baselow = (tssbasehigh >> 16) & 0xFFFF;
gdt.tsshigh.basemid = 0;
gdt.tsshigh.basehigh = 0;
gdt.tsshigh.access = 0;
gdt.tsshigh.gran = 0;
gdt->tsshigh.limitlow = (tssbasehigh & 0xFFFF);
gdt->tsshigh.baselow = (tssbasehigh >> 16) & 0xFFFF;
gdt->tsshigh.basemid = 0;
gdt->tsshigh.basehigh = 0;
gdt->tsshigh.access = 0;
gdt->tsshigh.gran = 0;
/* Load GDTR */
struct gdt_ptr gdtr;
gdtr.limit = sizeof(gdt) - 1;
gdtr.base = (uint64_t)&gdt;
__asm__ volatile("lgdt %0" :: "m"(gdtr) : "memory");
gdtr.limit = sizeof (*gdt) - 1;
gdtr.base = (uint64_t)gdt;
__asm__ volatile ("lgdt %0" ::"m"(gdtr) : "memory");
__asm__ volatile(
"pushq %[kcode]\n"
"lea 1f(%%rip), %%rax\n"
"pushq %%rax\n"
"lretq\n"
"1:\n"
"movw %[kdata], %%ax\n"
"movw %%ax, %%ds\n"
"movw %%ax, %%es\n"
"movw %%ax, %%ss\n"
:
: [kcode] "i"(GDT_KCODE), [kdata] "i"(GDT_KDATA)
: "rax", "memory"
);
/* Reload CS */
__asm__ volatile ("pushq %[kcode]\n"
"lea 1f(%%rip), %%rax\n"
"pushq %%rax\n"
"lretq\n"
"1:\n"
"movw %[kdata], %%ax\n"
"movw %%ax, %%ds\n"
"movw %%ax, %%es\n"
"movw %%ax, %%ss\n"
:
: [kcode] "i"(GDT_KCODE), [kdata] "i"(GDT_KDATA)
: "rax", "memory");
__asm__ volatile("ltr %0" :: "r"((uint16_t)GDT_TSS));
__asm__ volatile ("ltr %0" ::"r"((uint16_t)GDT_TSS));
}
void amd64_init(void) {
amd64_gdt_init();
amd64_debug_init();
amd64_intr_init();
/*
* Initialize essentials (GDT, TSS, IDT) for a given CPU
*
* load_idt - Tell whether the IDT needs to be loaded. It only has to be loaded once on
* the BSP
*/
void amd64_init (struct cpu* cpu, bool load_idt) {
amd64_gdt_init (cpu);
if (load_idt)
amd64_load_idt ();
else
amd64_intr_init ();
}

View File

@@ -1,6 +1,8 @@
#ifndef _KERNEL_AMD64_INIT_H
#define _KERNEL_AMD64_INIT_H
void amd64_init(void);
#include <amd64/smp.h>
void amd64_init (struct cpu* cpu, bool load_idt);
#endif // _KERNEL_AMD64_INIT_H

View File

@@ -1,23 +1,33 @@
#include <amd64/apic.h>
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/intr_defs.h>
#include <amd64/io.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <libk/string.h>
#include <m/syscall_defs.h>
#include <sys/debug.h>
#include <amd64/intr.h>
#include <amd64/io.h>
#include <sys/irq.h>
#include <sys/smp.h>
#include <sys/spin.h>
#include <syscall/syscall.h>
/* 8259 PIC defs. */
#define PIC1 0x20
#define PIC2 0xA0
#define PIC1_CMD PIC1
#define PIC1_DATA (PIC1 + 1)
#define PIC2_CMD PIC2
#define PIC2_DATA (PIC2 + 1)
#define PIC_EOI 0x20
#define PIC1 0x20
#define PIC2 0xA0
#define PIC1_CMD PIC1
#define PIC1_DATA (PIC1 + 1)
#define PIC2_CMD PIC2
#define PIC2_DATA (PIC2 + 1)
#define PIC_EOI 0x20
#define ICW1_ICW4 0x01
#define ICW1_SINGLE 0x02
#define ICW1_INTVL4 0x04
#define ICW1_LEVEL 0x08
#define ICW1_INIT 0x10
#define ICW1_ICW4 0x01
#define ICW1_SINGLE 0x02
#define ICW1_INTVL4 0x04
#define ICW1_LEVEL 0x08
#define ICW1_INIT 0x10
#define ICW4_8086 0x01
#define ICW4_AUTO 0x02
@@ -25,12 +35,13 @@
#define ICW4_BUFMASER 0x0C
#define ICW4_SFNM 0x10
#define CASCADE_IRQ 2
#define CASCADE_IRQ 2
/* IDT defs. */
#define IDT_ENTRIES_MAX 256
/* 64-bit <IDT entry structure: https://wiki.osdev.org/Interrupt_Descriptor_Table */
struct idt_entry {
uint16_t intrlow;
uint16_t kernel_cs;
@@ -39,119 +50,172 @@ struct idt_entry {
uint16_t intrmid;
uint32_t intrhigh;
uint32_t resv;
} __attribute__((packed));
} PACKED;
struct idt {
uint16_t limit;
uint64_t base;
} __attribute__((packed));
} PACKED;
__attribute__((aligned(16))) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX];
ALIGNED (16) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX];
static volatile struct idt idt;
extern void amd64_spin(void);
/* Remaps and disables old 8259 PIC, since we'll be using APIC. */
static void amd64_init_pic(void) {
#define IO_OP(fn, ...) fn(__VA_ARGS__); amd64_io_wait()
static void amd64_init_pic (void) {
#define IO_OP(fn, ...) \
fn (__VA_ARGS__); \
amd64_io_wait ()
IO_OP(amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4));
IO_OP(amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4));
IO_OP (amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4));
IO_OP (amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4));
IO_OP(amd64_io_outb, PIC1_DATA, 0x20);
IO_OP(amd64_io_outb, PIC2_DATA, 0x28);
IO_OP (amd64_io_outb, PIC1_DATA, 0x20);
IO_OP (amd64_io_outb, PIC2_DATA, 0x28);
IO_OP(amd64_io_outb, PIC1_DATA, (1 << CASCADE_IRQ));
IO_OP(amd64_io_outb, PIC2_DATA, 2);
IO_OP (amd64_io_outb, PIC1_DATA, (1 << CASCADE_IRQ));
IO_OP (amd64_io_outb, PIC2_DATA, 2);
IO_OP(amd64_io_outb, PIC1_DATA, ICW4_8086);
IO_OP(amd64_io_outb, PIC2_DATA, ICW4_8086);
IO_OP (amd64_io_outb, PIC1_DATA, ICW4_8086);
IO_OP (amd64_io_outb, PIC2_DATA, ICW4_8086);
/* Disable */
IO_OP(amd64_io_outb, PIC1_DATA, 0xFF);
IO_OP(amd64_io_outb, PIC2_DATA, 0xFF);
IO_OP (amd64_io_outb, PIC1_DATA, 0xFF);
IO_OP (amd64_io_outb, PIC2_DATA, 0xFF);
#undef IO_OP
}
static void amd64_idt_set(volatile struct idt_entry *ent, uint64_t handler, uint8_t flags) {
/* Set IDT entry */
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags,
uint8_t ist) {
ent->intrlow = (handler & 0xFFFF);
ent->kernel_cs = 0x08; // GDT_KCODE (init.c)
ent->ist = 0;
ent->kernel_cs = GDT_KCODE;
ent->ist = ist;
ent->attrs = flags;
ent->intrmid = ((handler >> 16) & 0xFFFF);
ent->intrhigh = ((handler >> 32) & 0xFFFFFFFF);
ent->resv = 0;
}
static void amd64_idt_init(void) {
memset((void *)idt_entries, 0, sizeof(idt_entries));
/* Load the IDT */
void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); }
#define IDT_ENTRY(n) \
extern void amd64_intr ## n(void); \
amd64_idt_set(&idt_entries[(n)], (uint64_t)&amd64_intr ## n, 0x8E)
IDT_ENTRY(0); IDT_ENTRY(1); IDT_ENTRY(2); IDT_ENTRY(3);
IDT_ENTRY(4); IDT_ENTRY(5); IDT_ENTRY(6); IDT_ENTRY(7);
IDT_ENTRY(8); IDT_ENTRY(9); IDT_ENTRY(10); IDT_ENTRY(11);
IDT_ENTRY(12); IDT_ENTRY(13); IDT_ENTRY(14); IDT_ENTRY(15);
IDT_ENTRY(16); IDT_ENTRY(17); IDT_ENTRY(18); IDT_ENTRY(19);
IDT_ENTRY(20); IDT_ENTRY(21); IDT_ENTRY(22); IDT_ENTRY(23);
IDT_ENTRY(24); IDT_ENTRY(25); IDT_ENTRY(26); IDT_ENTRY(27);
IDT_ENTRY(28); IDT_ENTRY(29); IDT_ENTRY(30); IDT_ENTRY(31);
IDT_ENTRY(32); IDT_ENTRY(33); IDT_ENTRY(34); IDT_ENTRY(35);
IDT_ENTRY(36); IDT_ENTRY(37); IDT_ENTRY(38); IDT_ENTRY(39);
IDT_ENTRY(40); IDT_ENTRY(41); IDT_ENTRY(42); IDT_ENTRY(43);
IDT_ENTRY(44); IDT_ENTRY(45); IDT_ENTRY(46); IDT_ENTRY(47);
/* Initialize IDT entries */
static void amd64_idt_init (void) {
memset ((void*)idt_entries, 0, sizeof (idt_entries));
#define IDT_ENTRY(n, ist) \
extern void amd64_intr##n (void); \
amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist))
/* clang-format off */
IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0);
IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0);
IDT_ENTRY (8, 0); IDT_ENTRY (9, 0); IDT_ENTRY (10, 0); IDT_ENTRY (11, 0);
IDT_ENTRY (12, 0); IDT_ENTRY (13, 0); IDT_ENTRY (14, 0); IDT_ENTRY (15, 0);
IDT_ENTRY (16, 0); IDT_ENTRY (17, 0); IDT_ENTRY (18, 0); IDT_ENTRY (19, 0);
IDT_ENTRY (20, 0); IDT_ENTRY (21, 0); IDT_ENTRY (22, 0); IDT_ENTRY (23, 0);
IDT_ENTRY (24, 0); IDT_ENTRY (25, 0); IDT_ENTRY (26, 0); IDT_ENTRY (27, 0);
IDT_ENTRY (28, 0); IDT_ENTRY (29, 0); IDT_ENTRY (30, 0); IDT_ENTRY (31, 0);
IDT_ENTRY (32, 1); IDT_ENTRY (33, 1); IDT_ENTRY (34, 1); IDT_ENTRY (35, 1);
IDT_ENTRY (36, 1); IDT_ENTRY (37, 1); IDT_ENTRY (38, 1); IDT_ENTRY (39, 1);
IDT_ENTRY (40, 1); IDT_ENTRY (41, 1); IDT_ENTRY (42, 1); IDT_ENTRY (43, 1);
IDT_ENTRY (44, 1); IDT_ENTRY (45, 1); IDT_ENTRY (46, 1); IDT_ENTRY (47, 1);
IDT_ENTRY (SCHED_PREEMPT_TIMER, 1);
IDT_ENTRY (TLB_SHOOTDOWN, 1);
IDT_ENTRY (CPU_REQUEST_SCHED, 1);
IDT_ENTRY (CPU_SPURIOUS, 1);
/* clang-format on */
#undef IDT_ENTRY
idt.limit = sizeof(idt_entries) - 1;
idt.limit = sizeof (idt_entries) - 1;
idt.base = (uint64_t)idt_entries;
__asm__ volatile("lidt %0" :: "m"(idt));
__asm__ volatile("sti");
amd64_load_idt ();
}
static void amd64_intr_exception(struct saved_regs *regs) {
DEBUG("cpu exception %lu (%lu)\n", regs->trap, regs->error);
/* Handle CPU exception and dump registers. If incoming CS has CPL3, kill the process. */
static void amd64_intr_exception (struct saved_regs* regs) {
DEBUG ("cpu exception %lu (%lu)\n", regs->trap, regs->error);
uint64_t cr2;
__asm__ volatile("movq %%cr2, %0" : "=r"(cr2));
__asm__ volatile ("movq %%cr2, %0" : "=r"(cr2));
uint64_t cr3;
__asm__ volatile("movq %%cr3, %0" : "=r"(cr3));
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3));
debugprintf(
"r15=%016lx r14=%016lx r13=%016lx\n"
"r12=%016lx r11=%016lx r10=%016lx\n"
"r9 =%016lx r8 =%016lx rbp=%016lx\n"
"rdi=%016lx rsi=%016lx rdx=%016lx\n"
"rcx=%016lx rax=%016lx trp=%016lx\n"
"err=%016lx rip=%016lx cs =%016lx\n"
"rfl=%016lx rsp=%016lx ss =%016lx\n"
"cr2=%016lx cr3=%016lx rbx=%016lx\n",
regs->r15, regs->r14, regs->r13,
regs->r12, regs->r11, regs->r10,
regs->r9, regs->r8, regs->rbp,
regs->rdi, regs->rsi, regs->rdx,
regs->rcx, regs->rax, regs->trap,
regs->error, regs->rip, regs->cs,
regs->rflags, regs->rsp, regs->ss,
cr2, cr3, regs->rbx
);
debugprintf ("r15=%016lx r14=%016lx r13=%016lx\n"
"r12=%016lx r11=%016lx r10=%016lx\n"
"r9 =%016lx r8 =%016lx rbp=%016lx\n"
"rdi=%016lx rsi=%016lx rdx=%016lx\n"
"rcx=%016lx rax=%016lx trp=%016lx\n"
"err=%016lx rip=%016lx cs =%016lx\n"
"rfl=%016lx rsp=%016lx ss =%016lx\n"
"cr2=%016lx cr3=%016lx rbx=%016lx\n",
regs->r15, regs->r14, regs->r13, regs->r12, regs->r11, regs->r10, regs->r9, regs->r8,
regs->rbp, regs->rdi, regs->rsi, regs->rdx, regs->rcx, regs->rax, regs->trap,
regs->error, regs->rip, regs->cs, regs->rflags, regs->rsp, regs->ss, cr2, cr3,
regs->rbx);
amd64_spin();
}
void amd64_intr_handler(void *stack_ptr) {
struct saved_regs *regs = stack_ptr;
if (regs->trap <= 31) {
amd64_intr_exception(regs);
if (regs->cs == (GDT_UCODE | 0x03)) {
proc_kill (thiscpu->proc_current);
} else {
DEBUG("unknown trap %lu\n", regs->trap);
spin ();
}
}
void amd64_intr_init(void) {
amd64_init_pic();
amd64_idt_init();
/* Handle incoming interrupt, dispatch IRQ handlers. */
void amd64_intr_handler (void* stack_ptr) {
spin_lock_ctx_t ctxcpu, ctxpr;
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr;
spin_lock (&thiscpu->lock, &ctxcpu);
struct proc* proc_current = thiscpu->proc_current;
spin_lock (&proc_current->lock, &ctxpr);
memcpy (&proc_current->pdata.regs, regs, sizeof (struct saved_regs));
spin_unlock (&proc_current->lock, &ctxpr);
spin_unlock (&thiscpu->lock, &ctxcpu);
if (regs->trap <= 31) {
amd64_intr_exception (regs);
} else {
amd64_lapic_eoi ();
struct irq* irq = irq_find (regs->trap);
if (irq != NULL) {
irq->func (irq->arg, stack_ptr);
}
}
}
/* Initialize interrupts */
void amd64_intr_init (void) {
amd64_init_pic ();
amd64_idt_init ();
}
/* Aux. */
/* Save RFLAGS of the current CPU */
static uint64_t amd64_irq_save_flags (void) {
uint64_t rflags;
__asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc");
return rflags;
}
/* Restore interrupts (IF bit) from RFLAGS */
static void amd64_irq_restore_flags (uint64_t rflags) {
if (rflags & (1ULL << 9))
__asm__ volatile ("sti");
}
/* Save current interrupt state */
void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
/* Restore interrupt state */
void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }

View File

@@ -1,6 +1,7 @@
#ifndef _KERNEL_AMD64_INTR_H
#define _KERNEL_AMD64_INTR_H
#include <aux/compiler.h>
#include <libk/std.h>
struct saved_regs {
@@ -28,8 +29,9 @@ struct saved_regs {
uint64_t rflags;
uint64_t rsp;
uint64_t ss;
} __attribute__((packed));
} PACKED;
void amd64_intr_init(void);
void amd64_load_idt (void);
void amd64_intr_init (void);
#endif // _KERNEL_AMD64_INTR_H

12
kernel/amd64/intr_defs.h Normal file
View File

@@ -0,0 +1,12 @@
#ifndef _KERNEL_AMD64_INTR_DEFS_H
#define _KERNEL_AMD64_INTR_DEFS_H
/* Definitions for custom, nonstandard IDT entries. They have to be remapped by amd64_resolve_irq
* into legacy IRQs. */
#define SCHED_PREEMPT_TIMER 80
#define TLB_SHOOTDOWN 81
#define CPU_REQUEST_SCHED 82
#define CPU_SPURIOUS 255
#endif // _KERNEL_AMD64_INTR_DEFS_H

View File

@@ -1,62 +1,47 @@
.extern amd64_intr_handler
#include <amd64/intr_defs.h>
#include <amd64/regsasm.h>
dupa:
jmp dupa
.extern amd64_intr_handler
#define err(z) \
pushq $z;
#define no_err(z) \
pushq $0; \
pushq $0; \
pushq $z;
#define push_regs \
pushq %rax; \
pushq %rcx; \
pushq %rdx; \
pushq %rsi; \
pushq %rdi; \
pushq %rbp; \
pushq %rbx; \
pushq %r8; \
pushq %r9; \
pushq %r10; \
pushq %r11; \
pushq %r12; \
pushq %r13; \
pushq %r14; \
pushq %r15;
#define pop_regs \
popq %r15; \
popq %r14; \
popq %r13; \
popq %r12; \
popq %r11; \
popq %r10; \
popq %r9; \
popq %r8; \
pushq %rbx; \
popq %rbp; \
popq %rdi; \
popq %rsi; \
popq %rdx; \
popq %rcx; \
popq %rax;
#define make_intr_stub(x, n) \
.global amd64_intr ## n; \
amd64_intr ## n:; \
x(n); \
cli; \
push_regs; \
cld; \
movq %rsp, %rdi; \
andq $~0xF, %rsp; \
callq amd64_intr_handler; \
movq %rdi, %rsp; \
pop_regs; \
addq $16, %rsp; \
#define make_intr_stub(x, n) \
.global amd64_intr ## n; \
amd64_intr ## n:; \
x(n); \
cli; \
; \
push_regs; \
; \
movw $0x10, %ax; \
movw %ax, %ds; \
movw %ax, %es; \
; \
cld; \
; \
movq %rsp, %rdi; \
; \
movq %cr3, %rax; pushq %rax; \
; \
movq %rsp, %rbp; \
; \
subq $8, %rsp; \
andq $-16, %rsp; \
; \
callq amd64_intr_handler; \
; \
movq %rbp, %rsp; \
; \
popq %rax; movq %rax, %cr3; \
; \
pop_regs; \
addq $16, %rsp; \
; \
iretq;
@@ -108,3 +93,8 @@ make_intr_stub(no_err, 44)
make_intr_stub(no_err, 45)
make_intr_stub(no_err, 46)
make_intr_stub(no_err, 47)
make_intr_stub(no_err, SCHED_PREEMPT_TIMER)
make_intr_stub(no_err, TLB_SHOOTDOWN)
make_intr_stub(no_err, CPU_REQUEST_SCHED)
make_intr_stub(no_err, CPU_SPURIOUS)

View File

@@ -1,54 +1,51 @@
#include <libk/std.h>
#include <amd64/io.h>
#include <libk/std.h>
void amd64_io_outb(uint16_t port, uint8_t v) {
__asm__ volatile("outb %1, %0" :: "dN"(port), "a"(v));
/// Perform outb instruction (send 8-bit int)
void amd64_io_outb (uint16_t port, uint8_t v) {
__asm__ volatile ("outb %1, %0" ::"dN"(port), "a"(v));
}
void amd64_io_outw(uint16_t port, uint16_t v) {
__asm__ volatile("outw %%ax, %%dx" :: "a"(v), "d"(port));
/// Perform outw instruction (send 16-bit int)
void amd64_io_outw (uint16_t port, uint16_t v) {
__asm__ volatile ("outw %%ax, %%dx" ::"a"(v), "d"(port));
}
void amd64_io_outl(uint16_t port, uint32_t v) {
__asm__ volatile("outl %%eax, %%dx" :: "d"(port), "a"(v));
/// Perform outl instruction (send 32-bit int)
void amd64_io_outl (uint16_t port, uint32_t v) {
__asm__ volatile ("outl %%eax, %%dx" ::"d"(port), "a"(v));
}
void amd64_io_outsw(uint16_t port, const void *addr, int cnt) {
__asm__ volatile(
"cld; rep outsw"
: "+S"(addr), "+c"(cnt)
: "d"(port)
: "memory", "cc"
);
/// Perform outsw instruction (send a string)
void amd64_io_outsw (uint16_t port, const void* addr, int cnt) {
__asm__ volatile ("cld; rep outsw" : "+S"(addr), "+c"(cnt) : "d"(port) : "memory", "cc");
}
uint8_t amd64_io_inb(uint16_t port) {
/// Perform inb instruction (receive 8-bit int)
uint8_t amd64_io_inb (uint16_t port) {
uint8_t r;
__asm__ volatile("inb %1, %0" : "=a"(r) : "dN"(port));
__asm__ volatile ("inb %1, %0" : "=a"(r) : "dN"(port));
return r;
}
uint16_t amd64_io_inw(uint16_t port) {
/// Perform inw instruction (receive 16-bit int)
uint16_t amd64_io_inw (uint16_t port) {
uint16_t r;
__asm__ volatile("inw %%dx, %%ax" : "=a"(r) : "d"(port));
__asm__ volatile ("inw %%dx, %%ax" : "=a"(r) : "d"(port));
return r;
}
uint32_t amd64_io_inl(uint16_t port) {
/// Perform inl instruction (receive 32-bit int)
uint32_t amd64_io_inl (uint16_t port) {
uint32_t r;
__asm__ volatile("inl %%dx, %%eax" : "=a"(r) : "d"(port));
__asm__ volatile ("inl %%dx, %%eax" : "=a"(r) : "d"(port));
return r;
}
void amd64_io_insw(uint16_t port, void *addr, int cnt) {
__asm__ volatile(
"cld; rep insw"
: "+D"(addr), "+c"(cnt)
: "d"(port)
: "memory", "cc"
);
/// Perform insw instruction (receive a string)
void amd64_io_insw (uint16_t port, void* addr, int cnt) {
__asm__ volatile ("cld; rep insw" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory", "cc");
}
void amd64_io_wait(void) {
amd64_io_outb(0x80, 0);
}
/// output a byte on port 0x80, which does a small IO delay
void amd64_io_wait (void) { amd64_io_outb (0x80, 0); }

View File

@@ -3,14 +3,14 @@
#include <libk/std.h>
void amd64_io_outb(uint16_t port, uint8_t v);
void amd64_io_outw(uint16_t port, uint16_t v);
void amd64_io_outl(uint16_t port, uint32_t v);
void amd64_io_outsw(uint16_t port, const void *addr, int cnt);
uint8_t amd64_io_inb(uint16_t port);
uint16_t amd64_io_inw(uint16_t port);
uint32_t amd64_io_inl(uint16_t port);
void amd64_io_insw(uint16_t port, void *addr, int cnt);
void amd64_io_wait(void);
void amd64_io_outb (uint16_t port, uint8_t v);
void amd64_io_outw (uint16_t port, uint16_t v);
void amd64_io_outl (uint16_t port, uint32_t v);
void amd64_io_outsw (uint16_t port, const void* addr, int cnt);
uint8_t amd64_io_inb (uint16_t port);
uint16_t amd64_io_inw (uint16_t port);
uint32_t amd64_io_inl (uint16_t port);
void amd64_io_insw (uint16_t port, void* addr, int cnt);
void amd64_io_wait (void);
#endif // _KERNEL_AMD64_IO_H

321
kernel/amd64/mm.c Normal file
View File

@@ -0,0 +1,321 @@
#include <amd64/apic.h>
#include <amd64/intr_defs.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/pmm.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
#define AMD64_PG_PRESENT (1 << 0)
#define AMD64_PG_RW (1 << 1)
#define AMD64_PG_USER (1 << 2)
#define AMD64_PG_HUGE (1 << 7)
/* Auxilary struct for page directory walking */
struct pg_index {
uint16_t pml4, pml3, pml2, pml1;
} PACKED;
/* Kernel page directory */
static struct pd kernel_pd;
static spin_lock_t kernel_pd_lock;
void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
/* Get current value of CR3 register */
static uintptr_t amd64_current_cr3 (void) {
uintptr_t cr3;
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory");
return cr3;
}
/* Load kernel CR3 as current CR3 */
void amd64_load_kernel_cr3 (void) {
uintptr_t cr3 = amd64_current_cr3 ();
if (cr3 != kernel_pd.cr3_paddr) {
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
}
}
struct pd* mm_get_kernel_pd (void) { return &kernel_pd; }
/* Extract PML info from virtual address */
static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
struct pg_index ret;
ret.pml4 = ((vaddr >> 39) & 0x1FF);
ret.pml3 = ((vaddr >> 30) & 0x1FF);
ret.pml2 = ((vaddr >> 21) & 0x1FF);
ret.pml1 = ((vaddr >> 12) & 0x1FF);
return ret;
}
/* Walk paging tables and allocate necessary structures along the way */
static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) {
uint64_t entry = table[entry_idx];
physaddr_t paddr;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
if (entry & AMD64_PG_PRESENT) {
if (entry & AMD64_PG_HUGE)
return NULL;
paddr = entry & ~0xFFFULL;
} else {
if (!alloc)
return NULL;
paddr = pmm_alloc (1);
if (paddr == PMM_ALLOC_ERR)
return NULL;
memset ((void*)((uintptr_t)hhdm->offset + (uintptr_t)paddr), 0, PAGE_SIZE);
table[entry_idx] = paddr | AMD64_PG_PRESENT | AMD64_PG_RW | AMD64_PG_USER;
}
return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr);
}
static bool amd64_mm_is_table_empty (uint64_t* table) {
for (size_t i = 0; i < 512; i++) {
if (table[i] & AMD64_PG_PRESENT)
return false;
}
return true;
}
/* Convert generic memory management subsystem flags into AMD64-specific flags */
static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
uint64_t flags = 0;
flags |= ((generic & MM_PG_PRESENT) ? AMD64_PG_PRESENT : 0);
flags |= ((generic & MM_PG_RW) ? AMD64_PG_RW : 0);
flags |= ((generic & MM_PG_USER) ? AMD64_PG_USER : 0);
return flags;
}
/* Reload the current CR3 value ON A LOCAL CPU */
static void amd64_reload_cr3 (void) {
uint64_t cr3;
__asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory");
}
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
if (pml3 == NULL)
return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
if (pml2 == NULL)
return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
if (pml1 == NULL)
return;
uint64_t* pte = &pml1[pg_index.pml1];
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
}
/* Map a page into kernel page directory */
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
mm_map_page (&kernel_pd, paddr, vaddr, flags);
amd64_reload_cr3 ();
}
/* Unmap a virtual address. TLB needs to be flushed afterwards */
void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL)
return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
return;
uint64_t* pte = &pml1[pg_index.pml1];
if ((*pte) & AMD64_PG_PRESENT)
*pte = 0;
if (amd64_mm_is_table_empty (pml1)) {
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
pmm_free (pml1_phys, 1);
pml2[pg_index.pml2] = 0;
if (amd64_mm_is_table_empty (pml2)) {
uintptr_t pml2_phys = pml3[pg_index.pml3] & ~0xFFFULL;
pmm_free (pml2_phys, 1);
pml3[pg_index.pml3] = 0;
if (amd64_mm_is_table_empty (pml3)) {
uintptr_t pml3_phys = pml4[pg_index.pml4] & ~0xFFFULL;
pmm_free (pml3_phys, 1);
pml4[pg_index.pml4] = 0;
}
}
}
}
/* Unmap a page from kernel page directory */
void mm_unmap_kernel_page (uintptr_t vaddr) {
mm_unmap_page (&kernel_pd, vaddr);
amd64_reload_cr3 ();
}
/* Allocate a userspace-ready page directory */
uintptr_t mm_alloc_user_pd_phys (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
physaddr_t cr3 = pmm_alloc (1);
if (cr3 == PMM_ALLOC_ERR)
return 0;
uint8_t* vu_cr3 = (uint8_t*)((uintptr_t)hhdm->offset + cr3);
memset ((void*)vu_cr3, 0, PAGE_SIZE / 2);
uint8_t* vk_cr3 = (uint8_t*)((uintptr_t)hhdm->offset + (uintptr_t)kernel_pd.cr3_paddr);
memcpy (&vu_cr3[PAGE_SIZE / 2], &vk_cr3[PAGE_SIZE / 2], PAGE_SIZE / 2);
return cr3;
}
bool mm_validate (struct pd* pd, uintptr_t vaddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool ret = false;
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL)
goto done;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
goto done;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
goto done;
uint64_t pte = pml1[pg_index.pml1];
ret = (pte & AMD64_PG_PRESENT) != 0;
done:
return ret;
}
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
bool ok = true;
for (size_t i = 0; i < size; i++) {
ok = mm_validate (pd, vaddr + i);
if (!ok)
goto done;
}
done:
return ok;
}
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0;
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
for (size_t i4 = 0; i4 < 512; i4++) {
if (!(pml4[i4] & AMD64_PG_PRESENT))
continue;
uint64_t* pml3 = (uint64_t*)((uintptr_t)hhdm->offset + (pml4[i4] & ~0xFFFULL));
for (size_t i3 = 0; i3 < 512; i3++) {
if (!(pml3[i3] & AMD64_PG_PRESENT))
continue;
uint64_t* pml2 = (uint64_t*)((uintptr_t)hhdm->offset + (pml3[i3] & ~0xFFFULL));
for (size_t i2 = 0; i2 < 512; i2++) {
if (!(pml2[i2] & AMD64_PG_PRESENT))
continue;
uint64_t* pml1 = (uint64_t*)((uintptr_t)hhdm->offset + (pml2[i2] & ~0xFFFULL));
for (size_t i1 = 0; i1 < 512; i1++) {
if ((pml1[i1] & AMD64_PG_PRESENT) && ((pml1[i1] & ~0xFFFULL) == (paddr & ~0xFFFULL))) {
struct pg_index idx = {i4, i3, i2, i1};
ret = (((uint64_t)idx.pml4 << 39) | ((uint64_t)idx.pml3 << 30) |
((uint64_t)idx.pml2 << 21) | ((uint64_t)idx.pml1 << 12) | (paddr & 0xFFFULL));
goto done;
}
}
}
}
}
done:
return ret;
}
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0;
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL)
goto done;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
goto done;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
goto done;
uint64_t pte = pml1[pg_index.pml1];
if (!(pte & AMD64_PG_PRESENT))
goto done;
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
done:
return ret;
}
/* Initialize essentials for the AMD64 memory management subsystem */
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }

View File

@@ -1,6 +1,15 @@
#ifndef _KERNEL_AMD64_MM_H
#define _KERNEL_AMD64_MM_H
#define PAGE_SIZE 4096
#include <libk/std.h>
#include <sync/spin_lock.h>
#define PAGE_SIZE 4096
struct pd {
uintptr_t cr3_paddr;
};
void amd64_load_kernel_cr3 (void);
#endif // _KERNEL_AMD64_MM_H

1093
kernel/amd64/msr-index.h Normal file

File diff suppressed because it is too large Load Diff

16
kernel/amd64/msr.c Normal file
View File

@@ -0,0 +1,16 @@
#include <amd64/msr.h>
#include <libk/std.h>
/// Read a model-specific register
uint64_t amd64_rdmsr (uint32_t msr) {
uint32_t low, high;
__asm__ volatile ("rdmsr" : "=a"(low), "=d"(high) : "c"(msr));
return ((uint64_t)high << 32 | (uint64_t)low);
}
/// Write a model-specific register
void amd64_wrmsr (uint32_t msr, uint64_t value) {
uint32_t low = (uint32_t)(value & 0xFFFFFFFF);
uint32_t high = (uint32_t)(value >> 32);
__asm__ volatile ("wrmsr" ::"c"(msr), "a"(low), "d"(high));
}

9
kernel/amd64/msr.h Normal file
View File

@@ -0,0 +1,9 @@
#ifndef _KERNEL_AMD64_MSR_H
#define _KERNEL_AMD64_MSR_H
#include <libk/std.h>
uint64_t amd64_rdmsr (uint32_t msr);
void amd64_wrmsr (uint32_t msr, uint64_t value);
#endif // _KERNEL_AMD64_MSR_H

138
kernel/amd64/proc.c Normal file
View File

@@ -0,0 +1,138 @@
#include <amd64/gdt.h>
#include <amd64/proc.h>
#include <aux/elf.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/mutex.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/proc.h>
static atomic_int pids = 0;
struct proc* proc_from_elf (uint8_t* elf_contents) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL)
return NULL;
memset (proc, 0, sizeof (*proc));
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1);
proc->procgroup = procgroup_create ();
if (proc->procgroup == NULL) {
free (proc);
return NULL;
}
procgroup_attach (proc->procgroup, proc);
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
procgroup_map (proc->procgroup, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW, NULL);
proc->flags |= PROC_USTK_PREALLOC;
struct elf_aux aux = proc_load_segments (proc, elf_contents);
proc->pdata.regs.ss = GDT_UDATA | 0x03;
proc->pdata.regs.rsp = (uint64_t)PROC_USTACK_TOP;
proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = aux.entry;
return proc;
}
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
uintptr_t argument_ptr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprt;
struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL)
return NULL;
memset (proc, 0, sizeof (*proc));
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1);
spin_lock (&proto->lock, &ctxprt);
proc->procgroup = proto->procgroup;
procgroup_attach (proc->procgroup, proc);
spin_unlock (&proto->lock, &ctxprt);
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
proc->pdata.regs.ss = GDT_UDATA | 0x03;
proc->pdata.regs.rsp = (uint64_t)vstack_top;
proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = (uint64_t)entry;
proc->uvaddr_argument = argument_ptr;
proc_init_tls (proc);
return proc;
}
void proc_cleanup (struct proc* proc) {
proc_sqs_cleanup (proc);
proc_mutexes_cleanup (proc);
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
procgroup_detach (proc->procgroup, proc);
/* clean the process */
free (proc);
}
void proc_init_tls (struct proc* proc) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
if (proc->procgroup->tls.tls_tmpl == NULL)
return;
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
size_t pages = proc->procgroup->tls.tls_tmpl_pages;
uintptr_t tls_paddr;
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
uintptr_t tls_vaddr = procgroup_map (proc->procgroup, 0, pages, flags, &tls_paddr);
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
memset ((void*)k_tls_addr, 0, pages * PAGE_SIZE);
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
uintptr_t ktcb = k_tls_addr + tls_size;
uintptr_t utcb = tls_vaddr + tls_size;
*(uintptr_t*)ktcb = utcb;
proc->pdata.fs_base = utcb;
proc->pdata.tls_vaddr = tls_vaddr;
}

22
kernel/amd64/proc.h Normal file
View File

@@ -0,0 +1,22 @@
#ifndef _KERNEL_AMD64_PROC_H
#define _KERNEL_AMD64_PROC_H
#include <amd64/intr.h>
#include <libk/std.h>
/* Top of userspace process' stack */
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
/* Size of userspace process' stack */
#define USTACK_SIZE (256 * PAGE_SIZE)
/* proc_map () base address */
#define PROC_MAP_BASE 0x0000700000000000
/* Platform-dependent process data */
struct proc_platformdata {
struct saved_regs regs;
uintptr_t kernel_stack;
uint64_t fs_base;
uintptr_t tls_vaddr;
};
#endif // _KERNEL_AMD64_PROC_H

13
kernel/amd64/procgroup.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _KERNEL_AMD64_PROCGRPUP_H
#define _KERNEL_AMD64_PROCGRPUP_H
#include <libk/std.h>
struct procgroup_tls {
uint8_t* tls_tmpl;
size_t tls_tmpl_size;
size_t tls_tmpl_total_size;
size_t tls_tmpl_pages;
};
#endif // _KERNEL_AMD64_PROCGRPUP_H

55
kernel/amd64/regsasm.h Normal file
View File

@@ -0,0 +1,55 @@
#ifndef _KERNEL_AMD64_REGSASM_H
#define _KERNEL_AMD64_REGSASM_H
#define push_regs \
pushq % rax; \
pushq % rcx; \
pushq % rdx; \
pushq % rsi; \
pushq % rdi; \
pushq % rbp; \
pushq % rbx; \
pushq % r8; \
pushq % r9; \
pushq % r10; \
pushq % r11; \
pushq % r12; \
pushq % r13; \
pushq % r14; \
pushq % r15;
#define pop_regs \
popq % r15; \
popq % r14; \
popq % r13; \
popq % r12; \
popq % r11; \
popq % r10; \
popq % r9; \
popq % r8; \
popq % rbx; \
popq % rbp; \
popq % rdi; \
popq % rsi; \
popq % rdx; \
popq % rcx; \
popq % rax;
#define pop_regs_skip_rax \
popq % r15; \
popq % r14; \
popq % r13; \
popq % r12; \
popq % r11; \
popq % r10; \
popq % r9; \
popq % r8; \
popq % rbx; \
popq % rbp; \
popq % rdi; \
popq % rsi; \
popq % rdx; \
popq % rcx; \
addq $8, % rsp
#endif // _KERNEL_AMD64_REGSASM_H

9
kernel/amd64/sched.S Normal file
View File

@@ -0,0 +1,9 @@
#include <amd64/regsasm.h>
.global amd64_do_sched
amd64_do_sched:
movq %rsi, %cr3
movq %rdi, %rsp
pop_regs
addq $16, %rsp
iretq

7
kernel/amd64/sched.h Normal file
View File

@@ -0,0 +1,7 @@
#ifndef _KERNEL_AMD64_SCHED_H
#define _KERNEL_AMD64_SCHED_H
/// Perform process context switch
void amd64_do_sched (void* regs, void* cr3);
#endif // _KERNEL_AMD64_SCHED_H

23
kernel/amd64/sched1.c Normal file
View File

@@ -0,0 +1,23 @@
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <amd64/sched.h>
#include <libk/std.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/mm.h>
#include <sys/smp.h>
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu) {
spin_lock_ctx_t ctxpr;
spin_lock (&proc->lock, &ctxpr);
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (cpu_lock, ctxcpu);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr);
}

114
kernel/amd64/smp.c Normal file
View File

@@ -0,0 +1,114 @@
#include <amd64/apic.h>
#include <amd64/init.h>
#include <amd64/intr_defs.h>
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/syscall.h>
/// Cpu ID counter
static atomic_uint cpu_counter = 0;
/// The CPUs
static struct cpu cpus[CPUS_MAX];
static atomic_int cpu_init_count;
/// Allocate a CPU structure
struct cpu* cpu_make (uint64_t lapic_id) {
int id = atomic_fetch_add (&cpu_counter, 1);
struct cpu* cpu = &cpus[id];
memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT;
cpu->id = id;
cpu->lapic_id = lapic_id;
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
return cpu;
}
struct cpu* cpu_get (void) {
struct cpu* ptr = (struct cpu*)amd64_rdmsr (MSR_GS_BASE);
return ptr;
}
void cpu_request_sched (struct cpu* cpu) {
if (cpu == thiscpu) {
proc_sched ();
return;
}
amd64_lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED);
}
struct cpu* cpu_find_lightest (void) {
struct cpu* cpu = &cpus[0];
int load = atomic_load (&cpu->proc_run_q_count);
for (unsigned int i = 1; i < cpu_counter; i++) {
struct cpu* new_cpu = &cpus[i];
int new_load = atomic_load (&new_cpu->proc_run_q_count);
if (new_load < load) {
load = new_load;
cpu = new_cpu;
}
}
return cpu;
}
/// Bootstrap code for non-BSP CPUs
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_load_kernel_cr3 ();
struct cpu* cpu = cpu_make (mp_info->lapic_id);
amd64_init (cpu, true); /* gdt + idt */
syscall_init ();
amd64_lapic_init (1000);
DEBUG ("CPU %u is online!\n", thiscpu->id);
atomic_fetch_sub (&cpu_init_count, 1);
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
proc_register (spin_proc, thiscpu);
spin_lock_ctx_t ctxcpu;
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
}
/// Initialize SMP subsystem for AMD64. Start AP CPUs
void smp_init (void) {
amd64_lapic_init (1000);
struct limine_mp_response* mp = limine_mp_request.response;
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
for (size_t i = 0; i < mp->cpu_count; i++) {
if (mp->cpus[i]->lapic_id != thiscpu->lapic_id) {
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
}
}
while (atomic_load (&cpu_init_count) > 0)
;
DEBUG ("All CPUs are online\n");
}

44
kernel/amd64/smp.h Normal file
View File

@@ -0,0 +1,44 @@
#ifndef _KERNEL_AMD64_SMP_H
#define _KERNEL_AMD64_SMP_H
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/tss.h>
#include <aux/compiler.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#define CPUS_MAX 32
struct cpu {
/* for syscall instruction */
uintptr_t syscall_user_stack;
uintptr_t syscall_kernel_stack;
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
volatile struct gdt_extended gdt ALIGNED (16);
volatile struct tss tss;
uintptr_t lapic_mmio_base;
uint64_t lapic_ticks;
uint64_t lapic_id;
uint32_t id;
spin_lock_t lock;
struct list_node_link* proc_run_q;
struct proc* proc_current;
atomic_int proc_run_q_count;
};
struct cpu* cpu_make (uint64_t lapic_id);
struct cpu* cpu_get (void);
void cpu_request_sched (struct cpu* cpu);
struct cpu* cpu_find_lightest (void);
#define thiscpu (cpu_get ())
#endif // _KERNEL_AMD64_SMP_H

View File

@@ -1,3 +1,4 @@
.global amd64_spin
amd64_spin:
hlt
jmp amd64_spin

View File

@@ -1,5 +1,4 @@
#include <sys/spin_lock.h>
void spin_lock_relax(void) {
__asm__ volatile("pause");
}
/// Relax the spinlock using AMD64 pause instruction
void spin_lock_relax (void) { __asm__ volatile ("pause"); }

View File

@@ -1,20 +1,40 @@
c += amd64/bootmain.c \
amd64/init.c \
amd64/tss.c \
amd64/io.c \
amd64/debug.c \
amd64/spin_lock.c \
amd64/intr.c
amd64/intr.c \
amd64/apic.c \
amd64/msr.c \
amd64/hpet.c \
amd64/mm.c \
amd64/time.c \
amd64/smp.c \
amd64/sched1.c \
amd64/proc.c \
amd64/syscall.c
S += amd64/intr_stub.S \
amd64/spin.S
amd64/spin.S \
amd64/sched.S \
amd64/syscallentry.S
o += amd64/bootmain.o \
amd64/init.o \
amd64/tss.o \
amd64/io.o \
amd64/debug.o \
amd64/spin_lock.o \
amd64/intr.o \
amd64/intr_stub.o \
amd64/spin.o
amd64/spin.o \
amd64/apic.o \
amd64/msr.o \
amd64/hpet.o \
amd64/mm.o \
amd64/time.o \
amd64/smp.o \
amd64/sched.o \
amd64/sched1.o \
amd64/proc.o \
amd64/syscall.o \
amd64/syscallentry.o

46
kernel/amd64/syscall.c Normal file
View File

@@ -0,0 +1,46 @@
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/string.h>
#include <m/status.h>
#include <m/syscall_defs.h>
#include <proc/proc.h>
#include <sys/debug.h>
#include <sys/smp.h>
#include <syscall/syscall.h>
extern void amd64_syscall_entry (void);
uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
spin_lock_ctx_t ctxcpu, ctxpr;
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr;
spin_lock (&thiscpu->lock, &ctxcpu);
struct proc* caller = thiscpu->proc_current;
spin_lock (&caller->lock, &ctxpr);
memcpy (&caller->pdata.regs, regs, sizeof (struct saved_regs));
spin_unlock (&caller->lock, &ctxpr);
spin_unlock (&thiscpu->lock, &ctxcpu);
int syscall_num = regs->rax;
syscall_handler_func_t func = syscall_find_handler (syscall_num);
if (func == NULL) {
return -ST_SYSCALL_NOT_FOUND;
}
return func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
}
void syscall_init (void) {
amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48));
amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry);
amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9));
amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE);
}

View File

@@ -0,0 +1,49 @@
#include <amd64/regsasm.h>
.extern amd64_syscall_dispatch
.global amd64_syscall_entry
amd64_syscall_entry:
cli
movq %rsp, %gs:0
movq %gs:8, %rsp
pushq $0x1b
pushq %gs:0
pushq %r11
pushq $0x23
pushq %rcx
pushq $0
pushq $0
push_regs
movw $0x10, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
cld
movq %rsp, %rdi
movq %cr3, %rax; pushq %rax
movq %rsp, %rbp
subq $8, %rsp
andq $-16, %rsp
callq amd64_syscall_dispatch
movq %rbp, %rsp
popq %rbx; movq %rbx, %cr3
pop_regs_skip_rax
addq $56, %rsp
movq %gs:0, %rsp
sysretq

6
kernel/amd64/time.c Normal file
View File

@@ -0,0 +1,6 @@
#include <amd64/hpet.h>
#include <libk/std.h>
#include <sys/time.h>
/// Sleep for given amount of microseconds
void sleep_micro (size_t us) { amd64_hpet_sleep_micro (us); }

View File

@@ -1,8 +0,0 @@
#include <libk/std.h>
#include <amd64/tss.h>
__attribute__((aligned(16))) static volatile struct tss tss;
volatile struct tss *amd64_get_tss(void) {
return &tss;
}

View File

@@ -1,8 +1,10 @@
#ifndef _KERNEL_AMD64_TSS_H
#define _KERNEL_AMD64_TSS_H
#include <aux/compiler.h>
#include <libk/std.h>
/// 64-bit TSS structure: https://wiki.osdev.org/Task_State_Segment
struct tss {
uint32_t resv0;
uint64_t rsp0;
@@ -13,8 +15,6 @@ struct tss {
uint64_t resv2;
uint16_t resv3;
uint16_t iopb_off;
} __attribute__((packed));
volatile struct tss *amd64_get_tss(void);
} PACKED;
#endif // _KERNEL_AMD64_TSS_H

2
kernel/amd64/vars.mk Normal file
View File

@@ -0,0 +1,2 @@
# make vars
PLATFORM_ACPI=1

9
kernel/aux/compiler.h Normal file
View File

@@ -0,0 +1,9 @@
#ifndef _KERNEL_AUX_COMPILER_H
#define _KERNEL_AUX_COMPILER_H
#define PACKED __attribute__ ((packed))
#define ALIGNED(N) __attribute__ ((aligned ((N))))
#define SECTION(name) __attribute__ ((section (name)))
#define UNUSED __attribute__ ((unused))
#endif // _KERNEL_AUX_COMPILER_H

4555
kernel/aux/elf.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +1,6 @@
include $(platform)/flags.mk
include generic/flags.mk
ifeq ($(PLATFORM_ACPI),1)
include uACPI/flags.mk
endif

View File

@@ -8,7 +8,7 @@ cflags += -nostdinc \
-Wextra \
-mcmodel=kernel
cflags += -isystem . -isystem c_headers/include
cflags += -isystem . -isystem ../include
cflags += -DPRINTF_INCLUDE_CONFIG_H=1 \
-D_ALLOC_SKIP_DEFINE

1
kernel/irq/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

46
kernel/irq/irq.c Normal file
View File

@@ -0,0 +1,46 @@
#include <irq/irq.h>
#include <libk/list.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#if defined(__x86_64__)
#include <amd64/apic.h>
#include <amd64/intr.h>
#endif
struct irq* irq_table[0x100];
static spin_lock_t irqs_lock = SPIN_LOCK_INIT;
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
spin_lock_ctx_t ctxiqa;
struct irq* irq = malloc (sizeof (*irq));
if (irq == NULL) {
return false;
}
irq->func = func;
irq->arg = arg;
irq->irq_num = irq_num;
spin_lock (&irqs_lock, &ctxiqa);
irq_table[irq_num] = irq;
spin_unlock (&irqs_lock, &ctxiqa);
return true;
}
struct irq* irq_find (uint32_t irq_num) {
spin_lock_ctx_t ctxiqa;
spin_lock (&irqs_lock, &ctxiqa);
struct irq* irq = irq_table[irq_num];
spin_unlock (&irqs_lock, &ctxiqa);
return irq;
}

20
kernel/irq/irq.h Normal file
View File

@@ -0,0 +1,20 @@
#ifndef _KERNEL_IRQ_IRQ_H
#define _KERNEL_IRQ_IRQ_H
#include <libk/list.h>
#include <libk/std.h>
typedef void (*irq_func_t) (void* arg, void* regs);
struct irq {
struct list_node_link irqs_link;
irq_func_t func;
void* arg;
uint32_t irq_num;
};
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num);
struct irq* irq_find (uint32_t irq_num);
#endif // _KERNEL_IRQ_IRQ_H

3
kernel/irq/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += irq/irq.c
o += irq/irq.o

View File

@@ -2,7 +2,7 @@
#define _KERNEL_LIBK_ALIGN_H
#define div_align_up(x, div) (((x) + (div) - 1) / (div))
#define align_down(x, a) ((x) & ~((a) - 1))
#define align_up(x, a) (((x) + ((a) - 1)) & ~((a) - 1))
#define align_down(x, a) ((x) & ~((a) - 1))
#define align_up(x, a) (((x) + ((a) - 1)) & ~((a) - 1))
#endif // _KERNEL_LIBK_ALIGN_H

15
kernel/libk/assert.h Normal file
View File

@@ -0,0 +1,15 @@
#ifndef _KERNEL_LIBK_ASSERT_H
#define _KERNEL_LIBK_ASSERT_H
#include <sys/spin.h>
#define assert(x) \
do { \
if (!(x)) { \
DEBUG ("%s ssertion failed\n", #x); \
spin (); \
__builtin_unreachable (); \
} \
} while (0)
#endif // _KERNEL_LIBK_ASSERT_H

View File

@@ -1,74 +1,74 @@
#include <libk/std.h>
#include <libk/bm.h>
#include <libk/std.h>
#include <libk/string.h>
void bm_init(struct bm *bm, uint8_t *base, size_t nbits) {
void bm_init (struct bm* bm, uint8_t* base, size_t nbits) {
bm->base = base;
bm->nbits = nbits;
memset(bm->base, 0, (nbits + 7) / 8);
memset (bm->base, 0, (nbits + 7) / 8);
}
/*
* Set a bit in a bitmap.
*/
void bm_set(struct bm *bm, size_t k) {
void bm_set (struct bm* bm, size_t k) {
if (k >= bm->nbits)
return;
uint8_t *b = (uint8_t *)((uintptr_t)bm->base + (k / 8));
uint8_t* b = (uint8_t*)((uintptr_t)bm->base + (k / 8));
*b = ((*b) | (1 << (k % 8)));
}
/*
* Clear a bit in a bitmap.
*/
void bm_clear(struct bm *bm, size_t k) {
void bm_clear (struct bm* bm, size_t k) {
if (k >= bm->nbits)
return;
uint8_t *b = (uint8_t *)((uintptr_t)bm->base + (k / 8));
uint8_t* b = (uint8_t*)((uintptr_t)bm->base + (k / 8));
*b = ((*b) & ~(1 << (k % 8)));
}
/*
* Test (true/false) a bit in a bitmap.
*/
bool bm_test(struct bm *bm, size_t k) {
bool bm_test (struct bm* bm, size_t k) {
if (k >= bm->nbits)
return false;
uint8_t *b = (uint8_t *)((uintptr_t)bm->base + (k / 8));
uint8_t* b = (uint8_t*)((uintptr_t)bm->base + (k / 8));
return (*b) & (1 << (k % 8));
}
/*
* Set a range of bits in a bitmap. if starting bit is out of range, we fail.
*/
bool bm_set_region(struct bm *bm, size_t k, size_t m) {
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
bool bm_set_region (struct bm* bm, size_t k, size_t m) {
if (((k + m) > bm->nbits) || (k + m) < k)
return false;
for (size_t i = k; i < m; i++) {
bool taken = bm_test(bm, i);
for (size_t i = k; i < (k + m); i++) {
bool taken = bm_test (bm, i);
if (taken)
return false;
}
for (size_t i = k; i < m; i++)
bm_set(bm, i);
for (size_t i = k; i < (k + m); i++)
bm_set (bm, i);
return true;
}
/*
* Clear a range of bits in a bitmap. starting bit must be in range.
*/
void bm_clear_region(struct bm *bm, size_t k, size_t m) {
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
void bm_clear_region (struct bm* bm, size_t k, size_t m) {
if (((k + m) > bm->nbits) || (k + m) < k)
return;
for (size_t i = k; i < m; i++)
bm_clear(bm, i);
for (size_t i = k; i < (k + m); i++)
bm_clear (bm, i);
}
/*
@@ -77,12 +77,12 @@ void bm_clear_region(struct bm *bm, size_t k, size_t m) {
* are out of range, act as if the bits are set / bitmap is full - this is
* useful for implementing the physical memory manager algorithm.
*/
bool bm_test_region(struct bm *bm, size_t k, size_t m) {
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
bool bm_test_region (struct bm* bm, size_t k, size_t m) {
if (((k + m) > bm->nbits) || (k + m) < k)
return true;
for (size_t i = k; i < m; i++) {
bool test = bm_test(bm, i);
for (size_t i = k; i < (k + m); i++) {
bool test = bm_test (bm, i);
if (test)
return true;
}

View File

@@ -4,16 +4,16 @@
#include <libk/std.h>
struct bm {
uint8_t *base;
uint8_t* base;
size_t nbits;
};
void bm_init(struct bm *bm, uint8_t *base, size_t nbits);
void bm_set(struct bm *bm, size_t k);
bool bm_set_region(struct bm *bm, size_t k, size_t m);
void bm_clear(struct bm *bm, size_t k);
void bm_clear_region(struct bm *bm, size_t k, size_t m);
bool bm_test(struct bm *bm, size_t k);
bool bm_test_region(struct bm *bm, size_t k, size_t m);
void bm_init (struct bm* bm, uint8_t* base, size_t nbits);
void bm_set (struct bm* bm, size_t k);
bool bm_set_region (struct bm* bm, size_t k, size_t m);
void bm_clear (struct bm* bm, size_t k);
void bm_clear_region (struct bm* bm, size_t k, size_t m);
bool bm_test (struct bm* bm, size_t k);
bool bm_test_region (struct bm* bm, size_t k, size_t m);
#endif // _KERNEL_LIBK_BM_H

170
kernel/libk/list.h Normal file
View File

@@ -0,0 +1,170 @@
#ifndef _KERNEL_LIBK_LIST_H
#define _KERNEL_LIBK_LIST_H
struct list_node_link {
struct list_node_link* next;
struct list_node_link* prev;
};
#define list_entry(ptr, type, member) ((type*)((char*)(ptr) - offsetof (type, member)))
#define list_append(head, new) \
do { \
if ((new) != NULL) { \
(new)->next = NULL; \
if ((head) != NULL) { \
struct list_node_link* __tmp = (head); \
while (__tmp->next != NULL) { \
__tmp = __tmp->next; \
} \
__tmp->next = (new); \
(new)->prev = __tmp; \
} else { \
(new)->prev = NULL; \
(head) = (new); \
} \
} \
} while (0)
#define list_prepend(head, new) \
do { \
if ((new) != NULL) { \
(new)->prev = NULL; \
(new)->next = (head); \
if ((head) != NULL) { \
(head)->prev = (new); \
} \
(head) = (new); \
} \
} while (0)
#define list_remove(head, ele) \
do { \
if ((ele) != NULL) { \
if ((ele)->prev != NULL) { \
(ele)->prev->next = (ele)->next; \
} else { \
(head) = (ele)->next; \
} \
if ((ele)->next != NULL) { \
(ele)->next->prev = (ele)->prev; \
} \
(ele)->next = NULL; \
(ele)->prev = NULL; \
} \
} while (0)
#define list_find(head, out, propname, propvalue) \
do { \
(out) = NULL; \
struct list_node_link* __tmp = (head); \
while (__tmp) { \
if (__tmp->propname == (propvalue)) { \
(out) = __tmp; \
break; \
} \
__tmp = __tmp->next; \
} \
} while (0)
#define list_foreach(head, var, tmp) \
for (var = (head), tmp = (var ? var->next : NULL); var != NULL; \
var = tmp, tmp = (var ? var->next : NULL))
#define list_foreach_index(head, var, tmp, idx) \
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL; \
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
#define list_foreach_index_limit(head, var, tmp, idx, max) \
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL && (idx) < (max); \
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
#define list_back(head, out) \
do { \
(out) = NULL; \
if ((head) != NULL) { \
struct list_node_link* __tmp = (head); \
while (__tmp->next != NULL) { \
__tmp = __tmp->next; \
} \
(out) = __tmp; \
} \
} while (0)
#define list_front(head, out) \
do { \
(out) = NULL; \
if ((head) != NULL) { \
struct list_node_link* __tmp = (head); \
while (__tmp->prev != NULL) { \
__tmp = __tmp->prev; \
} \
(out) = __tmp; \
} \
} while (0)
#define list_insert_after(head, pos, new) \
do { \
if ((pos) != NULL && (new) != NULL) { \
(new)->prev = (pos); \
(new)->next = (pos)->next; \
if ((pos)->next != NULL) { \
(pos)->next->prev = (new); \
} \
(pos)->next = (new); \
} else if ((pos) == NULL && (head) == NULL) { \
(new)->prev = NULL; \
(new)->next = NULL; \
(head) = (new); \
} \
} while (0)
#define list_insert_before(head, pos, new) \
do { \
if ((pos) != NULL && (new) != NULL) { \
(new)->next = (pos); \
(new)->prev = (pos)->prev; \
if ((pos)->prev != NULL) { \
(pos)->prev->next = (new); \
} else { \
(head) = (new); \
} \
(pos)->prev = (new); \
} else if ((pos) == NULL && (head) == NULL) { \
(new)->prev = NULL; \
(new)->next = NULL; \
(head) = (new); \
} \
} while (0)
#define list_index_of(head, ele, out_idx) \
do { \
(out_idx) = -1; \
int __idx = 0; \
struct list_node_link* __tmp = (head); \
while (__tmp != NULL) { \
if (__tmp == (ele)) { \
(out_idx) = __idx; \
break; \
} \
__tmp = __tmp->next; \
__idx++; \
} \
} while (0)
#define list_index_of_prop(head, propname, propvalue, out_idx) \
do { \
(out_idx) = -1; \
int __idx = 0; \
struct list_node_link* __tmp = (head); \
while (__tmp != NULL) { \
if (__tmp->propname == (propvalue)) { \
(out_idx) = __idx; \
break; \
} \
__tmp = __tmp->next; \
__idx++; \
} \
} while (0)
#endif // _KERNEL_LIBK_LIST_H

View File

@@ -1,3 +1,2 @@
void putchar_(char x) { (void)x; }
void putchar_ (char x) { (void)x; }

323
kernel/libk/rbtree.h Normal file
View File

@@ -0,0 +1,323 @@
#ifndef _KERNEL_LIBK_RBTREE_H
#define _KERNEL_LIBK_RBTREE_H
struct rb_node_link {
struct rb_node_link* left;
struct rb_node_link* right;
struct rb_node_link* parent;
int color;
};
#define RBTREE_RED 0
#define RBTREE_BLACK 1
#define rbtree_parent(x) ((x)->parent)
#define rbtree_left(x) ((x)->left)
#define rbtree_right(x) ((x)->right)
#define rbtree_color(x) ((x)->color)
#define rbtree_entry(node, type, member) ((type*)((char*)(node) - offsetof (type, member)))
#define rbtree_node_color(x) ((x) ? (x)->color : RBTREE_BLACK)
#define rbtree_rotate_left(root_ptr, x_node) \
do { \
struct rb_node_link* __x = (x_node); \
struct rb_node_link* __y = __x->right; \
__x->right = __y->left; \
if (__y->left) \
__y->left->parent = __x; \
__y->parent = __x->parent; \
if (!__x->parent) \
*(root_ptr) = __y; \
else if (__x == __x->parent->left) \
__x->parent->left = __y; \
else \
__x->parent->right = __y; \
__y->left = __x; \
__x->parent = __y; \
} while (0)
#define rbtree_rotate_right(root_ptr, y_node) \
do { \
struct rb_node_link* __y = (y_node); \
struct rb_node_link* __x = __y->left; \
__y->left = __x->right; \
if (__x->right) \
__x->right->parent = __y; \
__x->parent = __y->parent; \
if (!__y->parent) \
*(root_ptr) = __x; \
else if (__y == __y->parent->right) \
__y->parent->right = __x; \
else \
__y->parent->left = __x; \
__x->right = __y; \
__y->parent = __x; \
} while (0)
#define rbtree_insert_fixup(root_ptr, z_node) \
do { \
struct rb_node_link* __z = (z_node); \
while (__z->parent && __z->parent->color == RBTREE_RED) { \
if (__z->parent == __z->parent->parent->left) { \
struct rb_node_link* __y = __z->parent->parent->right; \
if (rbtree_node_color (__y) == RBTREE_RED) { \
__z->parent->color = RBTREE_BLACK; \
__y->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \
__z = __z->parent->parent; \
} else { \
if (__z == __z->parent->right) { \
__z = __z->parent; \
rbtree_rotate_left (root_ptr, __z); \
} \
__z->parent->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \
rbtree_rotate_right (root_ptr, __z->parent->parent); \
} \
} else { \
struct rb_node_link* __y = __z->parent->parent->left; \
if (rbtree_node_color (__y) == RBTREE_RED) { \
__z->parent->color = RBTREE_BLACK; \
__y->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \
__z = __z->parent->parent; \
} else { \
if (__z == __z->parent->left) { \
__z = __z->parent; \
rbtree_rotate_right (root_ptr, __z); \
} \
__z->parent->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \
rbtree_rotate_left (root_ptr, __z->parent->parent); \
} \
} \
} \
(*(root_ptr))->color = RBTREE_BLACK; \
} while (0)
#define rbtree_insert(type, root_ptr, node, member, keyfield) \
do { \
struct rb_node_link** __link = (root_ptr); \
struct rb_node_link* __parent = NULL; \
struct rb_node_link* __new = (node); \
type* __nobj = rbtree_entry (__new, type, member); \
while (*__link) { \
__parent = *__link; \
type* __xobj = rbtree_entry (*__link, type, member); \
if (__nobj->keyfield < __xobj->keyfield) \
__link = &((*__link)->left); \
else \
__link = &((*__link)->right); \
} \
__new->parent = __parent; \
__new->left = __new->right = NULL; \
__new->color = RBTREE_RED; \
*__link = __new; \
rbtree_insert_fixup (root_ptr, __new); \
} while (0)
#define rbtree_find(type, root_ptr, keyval, out, member, keyfield) \
do { \
(out) = NULL; \
struct rb_node_link* __cur = *(root_ptr); \
while (__cur) { \
type* __obj = rbtree_entry (__cur, type, member); \
if ((keyval) == __obj->keyfield) { \
(out) = rbtree_entry (__cur, type, member); \
break; \
} else if ((keyval) < __obj->keyfield) \
__cur = __cur->left; \
else \
__cur = __cur->right; \
} \
} while (0)
#define rbtree_min(node, out) \
do { \
(out) = NULL; \
struct rb_node_link* __n = (node); \
while (__n && __n->left) \
__n = __n->left; \
(out) = __n; \
} while (0)
#define rbtree_max(node, out) \
do { \
(out) = NULL; \
struct rb_node_link* __n = (node); \
while (__n && __n->right) \
__n = __n->right; \
(out) = __n; \
} while (0)
#define rbtree_first(root_ptr, out) rbtree_min (*(root_ptr), out)
#define rbtree_last(root_ptr, out) rbtree_max (*(root_ptr), out)
#define rbtree_transplant(root_ptr, u_node, v_node) \
do { \
struct rb_node_link* __u = (u_node); \
struct rb_node_link* __v = (v_node); \
if (!__u->parent) \
*(root_ptr) = __v; \
else if (__u == __u->parent->left) \
__u->parent->left = __v; \
else \
__u->parent->right = __v; \
if (__v) \
__v->parent = __u->parent; \
} while (0)
#define rbtree_delete_fixup(root_ptr, x_node, xparent_node) \
do { \
struct rb_node_link* __rdf_x = (x_node); \
struct rb_node_link* __rdf_xp = (xparent_node); \
while (__rdf_xp && (__rdf_x == NULL || __rdf_x->color == RBTREE_BLACK)) { \
if (__rdf_x == __rdf_xp->left) { \
struct rb_node_link* __w = __rdf_xp->right; \
if (rbtree_node_color (__w) == RBTREE_RED) { \
__w->color = RBTREE_BLACK; \
__rdf_xp->color = RBTREE_RED; \
rbtree_rotate_left (root_ptr, __rdf_xp); \
__w = __rdf_xp->right; \
} \
if (rbtree_node_color (__w->left) == RBTREE_BLACK && \
rbtree_node_color (__w->right) == RBTREE_BLACK) { \
if (__w) \
__w->color = RBTREE_RED; \
__rdf_x = __rdf_xp; \
__rdf_xp = __rdf_x->parent; \
} else { \
if (rbtree_node_color (__w->right) == RBTREE_BLACK) { \
if (__w->left) \
__w->left->color = RBTREE_BLACK; \
__w->color = RBTREE_RED; \
rbtree_rotate_right (root_ptr, __w); \
__w = __rdf_xp->right; \
} \
__w->color = __rdf_xp->color; \
__rdf_xp->color = RBTREE_BLACK; \
if (__w->right) \
__w->right->color = RBTREE_BLACK; \
rbtree_rotate_left (root_ptr, __rdf_xp); \
__rdf_x = *(root_ptr); \
break; \
} \
} else { \
struct rb_node_link* __w = __rdf_xp->left; \
if (rbtree_node_color (__w) == RBTREE_RED) { \
__w->color = RBTREE_BLACK; \
__rdf_xp->color = RBTREE_RED; \
rbtree_rotate_right (root_ptr, __rdf_xp); \
__w = __rdf_xp->left; \
} \
if (rbtree_node_color (__w->right) == RBTREE_BLACK && \
rbtree_node_color (__w->left) == RBTREE_BLACK) { \
if (__w) \
__w->color = RBTREE_RED; \
__rdf_x = __rdf_xp; \
__rdf_xp = __rdf_x->parent; \
} else { \
if (rbtree_node_color (__w->left) == RBTREE_BLACK) { \
if (__w->right) \
__w->right->color = RBTREE_BLACK; \
__w->color = RBTREE_RED; \
rbtree_rotate_left (root_ptr, __w); \
__w = __rdf_xp->left; \
} \
__w->color = __rdf_xp->color; \
__rdf_xp->color = RBTREE_BLACK; \
if (__w->left) \
__w->left->color = RBTREE_BLACK; \
rbtree_rotate_right (root_ptr, __rdf_xp); \
__rdf_x = *(root_ptr); \
break; \
} \
} \
} \
if (__rdf_x) \
__rdf_x->color = RBTREE_BLACK; \
} while (0)
#define rbtree_delete(root_ptr, z_node) \
do { \
struct rb_node_link* __rd_z = (z_node); \
struct rb_node_link* __rd_y = __rd_z; \
struct rb_node_link* __rd_x = NULL; \
struct rb_node_link* __rd_xp = NULL; \
int __rd_y_orig_color = __rd_y->color; \
if (!__rd_z->left) { \
__rd_x = __rd_z->right; \
__rd_xp = __rd_z->parent; \
rbtree_transplant (root_ptr, __rd_z, __rd_z->right); \
} else if (!__rd_z->right) { \
__rd_x = __rd_z->left; \
__rd_xp = __rd_z->parent; \
rbtree_transplant (root_ptr, __rd_z, __rd_z->left); \
} else { \
rbtree_min (__rd_z->right, __rd_y); \
__rd_y_orig_color = __rd_y->color; \
__rd_x = __rd_y->right; \
if (__rd_y->parent == __rd_z) { \
__rd_xp = __rd_y; \
if (__rd_x) \
__rd_x->parent = __rd_y; \
} else { \
__rd_xp = __rd_y->parent; \
rbtree_transplant (root_ptr, __rd_y, __rd_y->right); \
__rd_y->right = __rd_z->right; \
__rd_y->right->parent = __rd_y; \
} \
rbtree_transplant (root_ptr, __rd_z, __rd_y); \
__rd_y->left = __rd_z->left; \
__rd_y->left->parent = __rd_y; \
__rd_y->color = __rd_z->color; \
} \
if (__rd_y_orig_color == RBTREE_BLACK) \
rbtree_delete_fixup (root_ptr, __rd_x, __rd_xp); \
} while (0)
#define rbtree_next(node, out) \
do { \
(out) = NULL; \
if (node) { \
if ((node)->right) { \
struct rb_node_link* __n = (node)->right; \
while (__n->left) \
__n = __n->left; \
(out) = __n; \
} else { \
struct rb_node_link* __n = (node); \
struct rb_node_link* __p = (node)->parent; \
while (__p && __n == __p->right) { \
__n = __p; \
__p = __p->parent; \
} \
(out) = __p; \
} \
} \
} while (0)
#define rbtree_prev(node, out) \
do { \
(out) = NULL; \
if (node) { \
if ((node)->left) { \
struct rb_node_link* __n = (node)->left; \
while (__n->right) \
__n = __n->right; \
(out) = __n; \
} else { \
struct rb_node_link* __n = (node); \
struct rb_node_link* __p = (node)->parent; \
while (__p && __n == __p->left) { \
__n = __p; \
__p = __p->parent; \
} \
(out) = __p; \
} \
} \
} while (0)
#endif // _KERNEL_LIBK_RBTREE_H

View File

@@ -4,10 +4,10 @@
#include <limits.h>
#include <stdalign.h>
#include <stdarg.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdnoreturn.h>
#include <stdatomic.h>
#endif // _KERNEL_LIBK_STD_H

View File

@@ -1,17 +1,17 @@
#include <libk/std.h>
#include <libk/string.h>
size_t memset(void *dst, uint8_t b, size_t n) {
uint8_t *dst1 = dst;
size_t memset (void* dst, uint8_t b, size_t n) {
uint8_t* dst1 = dst;
size_t i;
for (i = 0; i < n; i++)
dst1[i] = b;
return i;
}
size_t memcpy(void *dst, const void *src, size_t n) {
uint8_t *dst1 = dst;
const uint8_t *src1 = src;
size_t memcpy (void* dst, const void* src, size_t n) {
uint8_t* dst1 = dst;
const uint8_t* src1 = src;
size_t i;
for (i = 0; i < n; i++)
dst1[i] = src1[i];
@@ -19,20 +19,22 @@ size_t memcpy(void *dst, const void *src, size_t n) {
}
// SOURCE: https://stackoverflow.com/a/48967408
void strncpy(char* dst, const char* src, size_t n) {
size_t i = 0;
while(i++ != n && (*dst++ = *src++));
void strncpy (char* dst, const char* src, size_t n) {
size_t i = 0;
while (i++ != n && (*dst++ = *src++))
;
}
size_t strlen(const char *str) {
const char *s;
for (s = str; *s; ++s);
size_t strlen (const char* str) {
const char* s;
for (s = str; *s; ++s)
;
return (s - str);
}
int memcmp(const void *s1, const void *s2, size_t n) {
unsigned char *p = (unsigned char *)s1;
unsigned char *q = (unsigned char *)s2;
int memcmp (const void* s1, const void* s2, size_t n) {
unsigned char* p = (unsigned char*)s1;
unsigned char* q = (unsigned char*)s2;
while (n--) {
if (*p != *q) {

View File

@@ -1,10 +1,12 @@
#ifndef _KERNEL_LIBK_STRING_H
#define _KERNEL_LIBK_STRING_H
size_t memset(void *dst, uint8_t b, size_t n);
size_t memcpy(void *dst, const void *src, size_t n);
void strncpy(char* dst, const char* src, size_t n);
size_t strlen(const char *str);
int memcmp(const void *s1, const void *s2, size_t n);
#include <libk/std.h>
size_t memset (void* dst, uint8_t b, size_t n);
size_t memcpy (void* dst, const void* src, size_t n);
void strncpy (char* dst, const char* src, size_t n);
size_t strlen (const char* str);
int memcmp (const void* s1, const void* s2, size_t n);
#endif // _KERNEL_LIBK_STRING_H

Some files were not shown because too many files have changed in this diff Show More