Compare commits

...

100 Commits

Author SHA1 Message Date
38e26a9c12 Implement argument_ptr () syscall for handling process arguments
All checks were successful
Build documentation / build-and-deploy (push) Successful in 37s
2026-01-30 14:05:47 +01:00
124aa12f5b Redesign scheduling points
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-30 02:36:27 +01:00
d2f5c032d9 Fix TLS alignment issues, works on BOCHS now too!
All checks were successful
Build documentation / build-and-deploy (push) Successful in 40s
2026-01-29 18:18:24 +01:00
73e42588fb Fix BOCHS clock
All checks were successful
Build documentation / build-and-deploy (push) Successful in 41s
2026-01-29 15:04:06 +01:00
e78bfb9984 Move suspension q code into proc/suspension_q.c
All checks were successful
Build documentation / build-and-deploy (push) Successful in 24s
2026-01-29 01:52:18 +01:00
d2a88b3641 Move suspension q's cleanup to proc/suspension_q.c 2026-01-29 01:43:01 +01:00
fdda2e2df8 Unlock mutexes on process death 2026-01-29 01:38:44 +01:00
388418a718 Nice wrappers around process management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 34s
2026-01-29 00:08:54 +01:00
1c64d608bd Rename make/libc.mk -> make/libmsl.mk
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-28 23:57:28 +01:00
3d23187acf Implement userspace TLS, remove RW Locks 2026-01-28 23:52:48 +01:00
a3b62ebd3d Clean up AMD64 memory management code, remove dependency on pd.lock 2026-01-27 19:03:03 +01:00
8bda300f6a Fix sys_clone () wrong argument bug
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-27 18:05:02 +01:00
cf51600c6a Cleanup syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 34s
2026-01-27 17:34:43 +01:00
b388b30b24 Redesign userspace memory management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-27 17:04:08 +01:00
600886a7ee Organize resources into process groups 2026-01-27 14:18:05 +01:00
67b66f2b39 Implement proper mutex cleanup
All checks were successful
Build documentation / build-and-deploy (push) Successful in 23s
2026-01-25 23:10:12 +01:00
18f791222e Remove dead process from it's suspension queues 2026-01-25 22:39:29 +01:00
5e16bb647c Multiple process suspension queues 2026-01-25 22:10:04 +01:00
a68373e4ee Dynamically assign cpu upon mutex unlock
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-25 20:39:51 +01:00
8650010992 Fix user CPU context saving
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
2026-01-25 17:39:34 +01:00
95f590fb3b multi-cpu scheduling WIP 2026-01-25 15:54:00 +01:00
7bb3b77ede Disable kernel preemption, fix requesting rescheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
2026-01-22 19:32:15 +01:00
c26fd3cb2b Fix scheduler locking hierarchy 2026-01-22 15:59:29 +01:00
fea0999726 Fix scheduler starvation, use lists for scheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-22 11:54:52 +01:00
7eceecf6e3 Add mutex syscalls 2026-01-20 22:18:43 +01:00
fff51321bc Redesign syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 40s
2026-01-20 20:46:34 +01:00
a29233f853 Rename proc_spawn_thread to proc_clone 2026-01-19 22:01:44 +01:00
38a43b59b0 Resolve strange IRQ issues which cause the scheduler to behave weirdly (IRQ mapping)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 52s
2026-01-19 01:51:34 +01:00
ddafc4eb19 Rewrite resource subsystem 2026-01-18 20:50:45 +01:00
4f7077d458 Move mutex and mem create/cleanup functions into mutex.c and mem.c respectively
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-16 22:13:17 +01:00
9a7dbf0594 Properly implement liballoc_free () 2026-01-16 22:09:16 +01:00
ab8093cc6c CI install pymdown-extensions from pip
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-16 20:28:26 +01:00
ddbb66b5e4 Docs processes overview 2026-01-16 20:26:23 +01:00
11a1eb52aa Move status codes into a separate header
All checks were successful
Build documentation / build-and-deploy (push) Successful in 36s
2026-01-16 19:07:32 +01:00
a054257336 Port liballoc to userspace 2026-01-16 18:50:40 +01:00
9fc8521e63 sys_proc_mutex_unlock () automatically reschedule at the end
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
2026-01-16 00:28:46 +01:00
711da8aeab Implement proc_spawn_thread syscall, fix proc_resume and proc_suspend
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-16 00:26:37 +01:00
ebd9f0cac6 Let the user application decide upon the resource ID (RID)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 22s
2026-01-14 23:19:39 +01:00
7cd5623d36 Use reference counting to track filetime of process PD
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-14 23:11:06 +01:00
270ff507d4 Implement lock IRQ nesting via stack variables/contexts
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s
2026-01-14 22:11:56 +01:00
55166f9d5f syscall doesn't need RPL 3 bits on kernel code
All checks were successful
Build documentation / build-and-deploy (push) Successful in 24s
2026-01-14 21:21:20 +01:00
e5cc3a64d3 Fix syscall return value - preserve RAX register
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-14 20:58:00 +01:00
2ab308d678 Drop m_ prefix from libmsl 2026-01-14 20:56:09 +01:00
d1d772cb42 Fix user apps randomly crashing (APIC, GDT layout, syscall entry)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 23s
2026-01-14 19:51:18 +01:00
0d8f9e565f Fix missing CPU_REQUEST_SCHED IDT entry 2026-01-11 12:07:17 +01:00
f80a26e5eb Load kernel CR3 2026-01-11 03:45:32 +01:00
5bf10c1218 Extra compiler flags for AMD64
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-11 03:42:15 +01:00
41a458b925 Implement Mutexes and supporting syscalls, cleanup/optimize scheduler
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-10 00:12:42 +01:00
6a474c21a0 Use RW spin locks
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-09 19:53:08 +01:00
a5283283f6 Hold proc->lock while killing the process 2026-01-09 00:00:18 +01:00
79768d94e6 Preserve syscall return value in RAX
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-08 23:06:32 +01:00
0555ddd041 Clean up IOAPIC and LAPIC implementations
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-08 22:05:11 +01:00
ebb026b807 proc_cleanup_resources () drop instead of immediate removal
All checks were successful
Build documentation / build-and-deploy (push) Successful in 30s
2026-01-07 23:09:13 +01:00
d7b734306f Introduce concept of Process Resources (PR_MEM), implement necessary syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 42s
2026-01-07 22:47:30 +01:00
28aef30f77 Implement proc_map () and proc_unmap () syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s
2026-01-06 23:32:11 +01:00
9f107a1a5e Implement proc_unmap () 2026-01-06 17:47:21 +01:00
e50f8940a9 Redesign linked list
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-06 16:38:42 +01:00
d09e4d97ad Fix missing headers, generate compile db with bear
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
2026-01-06 03:08:13 +01:00
7915986902 Remove Doxygen-style comments, change formatting to wrap comments
All checks were successful
Build documentation / build-and-deploy (push) Successful in 28s
2026-01-06 02:04:32 +01:00
902682ac11 Remove doxygen infra
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
2026-01-06 01:41:07 +01:00
7747e5e0aa Docs update theme
All checks were successful
Build documentation / build-and-deploy (push) Successful in 47s
2026-01-06 01:37:51 +01:00
a8423fe657 Better proc_kill () and process cleanup
All checks were successful
Build documentation / build-and-deploy (push) Successful in 27s
2026-01-06 01:19:11 +01:00
6538fd8023 Generate new PIDs for processes 2026-01-05 20:24:26 +01:00
fcd5658a80 Use red-black trees to store process run queue and process list
All checks were successful
Build documentation / build-and-deploy (push) Successful in 43s
2026-01-05 18:30:58 +01:00
b1579e4ac1 Implement automatic paging table deallocation 2026-01-04 21:26:11 +01:00
bba36ef057 Remove sign warning in comparison
All checks were successful
Build documentation / build-and-deploy (push) Successful in 22s
2026-01-04 01:45:56 +01:00
b5353cb600 Auxilary scripts for formatting all components
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-04 01:44:02 +01:00
e077d322f4 Rewrite init app in C, introduce MSL (MOP3 System Library)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-04 01:11:31 +01:00
2c954a9ca9 Fix return syscall result
All checks were successful
Build documentation / build-and-deploy (push) Successful in 32s
2026-01-03 15:06:36 +01:00
cf04e3db18 proc_quit () and proc_test () syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 43s
2026-01-03 12:21:56 +01:00
124a7f7215 Docs add kernel build instructions
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-03 02:19:40 +01:00
e52268cd8e First Hello world syscall
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-03 02:04:09 +01:00
1341dc00d9 make -B format_kernel
All checks were successful
Build documentation / build-and-deploy (push) Successful in 32s
2026-01-01 20:17:29 +01:00
99bab4ceee Use generic spin () instead of amd64_spin () 2026-01-01 20:16:40 +01:00
121fb3b33c Move platform-specific code for process loading/init for AMD64 to amd64/
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
2026-01-01 20:08:37 +01:00
5e6bdcc52d Handle swapgs in interrupts and scheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
2026-01-01 18:42:53 +01:00
3bcbdb5ec4 Fix proc_kill () race, improve scheduler locking
All checks were successful
Build documentation / build-and-deploy (push) Successful in 1m45s
2026-01-01 16:59:04 +01:00
7f53ede2ab CI docs use $REMOTE_IP
All checks were successful
Build documentation / build-and-deploy (push) Successful in 38s
2025-12-31 22:50:59 +01:00
f1e34b78cd CI docs chmod 777 on site build dir
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2025-12-31 22:40:28 +01:00
97ad0b338c Fix CI docs build, install rsync
All checks were successful
Build documentation / build-and-deploy (push) Successful in 43s
2025-12-31 21:25:33 +01:00
74c782d653 mkdir docs/kernel/doxygen
Some checks failed
Build documentation / build-and-deploy (push) Failing after 30s
2025-12-31 21:21:02 +01:00
949f9c5293 Add docs gitea workflow
Some checks failed
Build documentation / build-and-deploy (push) Failing after 1m11s
2025-12-31 20:57:09 +01:00
a6c3f4cf87 Move kernel doxygen stuff to kernel/ 2025-12-30 17:04:05 +01:00
34f1e0ba30 Document amd64 platform-specific code 2025-12-30 16:50:15 +01:00
4f4f5c3d2f Move doxygen-awesome-darkmode-toggle.js to doxytheme/ 2025-12-30 01:52:45 +01:00
d861ab56c4 Remove pre-SMP TSS code 2025-12-30 01:50:47 +01:00
b279774bd6 Generated docs using doxygen and mkdocs 2025-12-30 01:47:29 +01:00
fa7998c323 Run first app from ramdisk! 2025-12-29 23:54:21 +01:00
c16170e4c2 SMP and timer interrupts 2025-12-23 19:50:37 +01:00
259aa732c8 Use separate IST stack for IRQs and cpu exceptions 2025-12-22 22:19:01 +01:00
1fd6f4890d Generic sleep_micro() function 2025-12-22 21:14:58 +01:00
849df9c27d Fix HPET unaligned read/writes on bochs 2025-12-22 21:06:48 +01:00
69feceaaae clang-format set column width to 100 chars 2025-12-22 19:38:32 +01:00
7b33d0757a APIC, HPET, virtual memory 2025-12-22 19:36:43 +01:00
741d0fb9b0 clang-format alignment rules 2025-12-21 23:10:21 +01:00
c85cbd0c01 Use prettier #defines for attributes 2025-12-21 23:03:56 +01:00
b2d8294b12 Use clang-format 2025-12-21 22:53:25 +01:00
8794a61073 Integrate uACPI 2025-12-21 22:24:23 +01:00
c3123192d8 Interrupt handling/cpu exceptions 2025-12-21 11:55:49 +01:00
84c600b903 Add limine as raw source 2025-12-17 22:45:17 +01:00
356 changed files with 56582 additions and 799 deletions

58
.clang-format Normal file
View File

@@ -0,0 +1,58 @@
BasedOnStyle: LLVM
Language: C
# Indentation
IndentWidth: 2
TabWidth: 2
UseTab: Never
# Braces and blocks
BreakBeforeBraces: Attach
BraceWrapping:
AfterFunction: false
AfterControlStatement: false
AfterStruct: false
AfterEnum: false
AfterUnion: false
BeforeElse: false
# Control statements
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AllowShortBlocksOnASingleLine: Never
# Line breaking
ColumnLimit: 100
BreakBeforeBinaryOperators: None
BreakBeforeTernaryOperators: true
BreakStringLiterals: false
# Spacing
SpaceBeforeParens: Always
SpaceBeforeAssignmentOperators: true
SpacesInParentheses: false
SpacesInSquareBrackets: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
# Pointer alignment
PointerAlignment: Left
DerivePointerAlignment: false
# Alignment
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignOperands: false
# Includes
SortIncludes: true
# Comments
ReflowComments: true
CommentPragmas: '^ IWYU pragma:'
# Misc
KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1

37
.editorconfig Normal file
View File

@@ -0,0 +1,37 @@
root = true
# Default for all files
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
# C / header files
[*.{c,h}]
indent_style = space
indent_size = 2
tab_width = 2
max_line_length = 80
# Assembly (if present; usually tab-sensitive)
[*.S]
indent_style = tab
tab_width = 8
trim_trailing_whitespace = false
# Makefiles (MUST use tabs)
[Makefile]
indent_style = tab
tab_width = 8
trim_trailing_whitespace = false
[*.mk]
indent_style = tab
tab_width = 8
trim_trailing_whitespace = false
# Markdown (avoid wrapping conflicts)
[*.md]
trim_trailing_whitespace = false
max_line_length = off

2
.gdbinit Normal file
View File

@@ -0,0 +1,2 @@
file kernel/build/kernel.elf
target remote :1234

View File

@@ -0,0 +1,43 @@
name: Build documentation
on:
push:
branches:
- master
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Install software
run: |
sudo apt-get update
sudo apt-get install -y doxygen make rsync
- name: Set up python3
uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Install mkdocs
run: |
pip install --upgrade pip
pip install mkdocs mkdocs-material pymdown-extensions
- name: Build
run: make docs
- name: Deploy
env:
SSH_KEY: ${{ secrets.DEPLOY_SSH_KEY }}
REMOTE_IP: ${{ vars.DEPLOY_REMOTE_IP }}
run: |
mkdir -p ~/.ssh
echo "$SSH_KEY" > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
ssh-keyscan -H "$REMOTE_IP" >> ~/.ssh/known_hosts
chmod -R 777 site
rsync -az --delete site/ webuser@"$REMOTE_IP":/home/webuser/mop/

2
.gitignore vendored
View File

@@ -2,3 +2,5 @@ iso_root
mop3.iso
bochs-log.txt
bochs-com1.txt
mop3dist.tar
site/

View File

@@ -1,9 +1,7 @@
platform ?= amd64
all_kernel:
make -C kernel platform=$(platform) all
clean_kernel:
make -C kernel platform=$(platform) clean
.PHONY: all_kernel clean_kernel
include make/apps.mk
include make/kernel.mk
include make/dist.mk
include make/docs.mk
include make/libmsl.mk

11
amd64/flags.mk Normal file
View File

@@ -0,0 +1,11 @@
cflags += --target=x86_64-pc-none-elf \
-mno-sse \
-mno-sse2 \
-mno-avx \
-mno-mmx \
-mno-80387 \
-mno-red-zone \
-mcmodel=large
ldflags += --target=x86_64-pc-none-elf \
-Wl,-zmax-page-size=0x1000

69
amd64/link.ld Normal file
View File

@@ -0,0 +1,69 @@
OUTPUT_FORMAT(elf64-x86-64)
ENTRY(_start)
PHDRS {
text PT_LOAD;
rodata PT_LOAD;
data PT_LOAD;
bss PT_LOAD;
tls PT_TLS;
}
SECTIONS {
. = 0x0000500000000000;
.text : {
*(.text .text.*)
*(.ltext .ltext.*)
} :text
. = ALIGN(0x1000);
.rodata : {
*(.rodata .rodata.*)
} :rodata
. = ALIGN(0x1000);
.data : {
*(.data .data.*)
*(.ldata .ldata.*)
} :data
. = ALIGN(0x1000);
__bss_start = .;
.bss : {
*(.bss .bss.*)
*(.lbss .lbss.*)
} :bss
__bss_end = .;
. = ALIGN(0x1000);
__tdata_start = .;
.tdata : {
*(.tdata .tdata.*)
} :tls
__tdata_end = .;
__tbss_start = .;
.tbss : {
*(.tbss .tbss.*)
} :tls
__tbss_end = .;
__tls_size = __tbss_end - __tdata_start;
/DISCARD/ : {
*(.eh_frame*)
*(.note .note.*)
}
}

View File

@@ -1,13 +1,15 @@
cpu: model=p4_prescott_celeron_336
cpu: model=p4_prescott_celeron_336, ips=200000000
memory: guest=4096 host=2048
romimage: file=/usr/share/bochs/BIOS-bochs-latest, options=fastboot
romimage: file=/usr/share/bochs/BIOS-bochs-latest
vgaromimage: file=/usr/share/bochs/VGABIOS-lgpl-latest.bin
ata0: enabled=1
ata0-master: type=cdrom, path=mop3.iso, status=inserted
com1: enabled=1, mode=file, dev=bochs-com1.txt
pci: enabled=1, chipset=i440fx
clock: sync=realtime, time0=local
boot: cdrom

14
aux/devel.sh Executable file
View File

@@ -0,0 +1,14 @@
#!/bin/sh
set -xe
if [ "$1" = "debug" ]; then
make -B all_kernel buildtype=debug
else
make -B all_kernel
fi
make -B all_libmsl
make -B all_apps
make -B all_dist
./aux/limine_iso_amd64.sh

7
aux/format.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/sh
set -x
make -B format_kernel
make -B format_libmsl
make -B format_apps

View File

@@ -10,6 +10,8 @@ cp -v boot/limine/limine-bios.sys boot/limine/limine-bios-cd.bin \
cp -v boot/limine/BOOTX64.EFI boot/limine/BOOTIA32.EFI iso_root/EFI/BOOT
cp -v mop3dist.tar iso_root/boot
xorriso -as mkisofs -R -r -J -b boot/limine/limine-bios-cd.bin \
-no-emul-boot -boot-load-size 4 -boot-info-table -hfsplus \
-apm-block-size 2048 --efi-boot boot/limine/limine-uefi-cd.bin \

5
aux/qemu_amd64.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -x
qemu-system-x86_64 -M q35 -m 4G -serial stdio -enable-kvm -cdrom mop3.iso -smp 4 $@

5
aux/qemu_amd64_debug.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -x
qemu-system-x86_64 -M q35 -m 4G -serial stdio -cdrom mop3.iso -smp 4 -s -S $@

Submodule boot/limine deleted from f777d332c6

View File

@@ -3,3 +3,4 @@ timeout: 10
/mop3
protocol: limine
path: boot():/boot/kernel.elf
module_path: boot():/boot/mop3dist.tar

2
boot/limine/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
limine
limine.exe

BIN
boot/limine/BOOTAA64.EFI Normal file

Binary file not shown.

BIN
boot/limine/BOOTIA32.EFI Normal file

Binary file not shown.

Binary file not shown.

BIN
boot/limine/BOOTRISCV64.EFI Normal file

Binary file not shown.

BIN
boot/limine/BOOTX64.EFI Normal file

Binary file not shown.

22
boot/limine/LICENSE Normal file
View File

@@ -0,0 +1,22 @@
Copyright (C) 2019-2025 Mintsuki and contributors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

19
boot/limine/Makefile Normal file
View File

@@ -0,0 +1,19 @@
.POSIX:
SHELL=/bin/sh
CC=cc
CFLAGS=-g -O2 -pipe
CPPFLAGS=
LDFLAGS=
LIBS=
.PHONY: all
all: limine
.PHONY: clean
clean:
rm -f limine limine.exe
limine: limine.c
$(CC) $(CFLAGS) -std=c99 $(CPPFLAGS) $(LDFLAGS) $< $(LIBS) -o $@

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

BIN
boot/limine/limine-bios.sys Normal file

Binary file not shown.

Binary file not shown.

1354
boot/limine/limine.c Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

44
docs/building_kernel.md Normal file
View File

@@ -0,0 +1,44 @@
# Building the MOP3 kernel
This article describes, how to build the kernel, how the build system works and prerequisites.
## Preprequistes
- POSIX host system (tested on Linux, may break on other systems)
- Git
- GNU make
- LLVM toolchain/Clang C compiler
- Xorriso
## Build steps
cd into root of MOP3 source tree.
Build the kernel:
```
make -B all_kernel buildtype=<debug|release>
```
Build essential system applications:
```
make -B all_apps
```
Prepare the ramdisk:
```
make -B all_dist
```
Build ISO image:
```
./aux/limine_iso_amd64.sh
```
Now you have an ISO image, which can be run my QEMU or you can burn it onto a CD.
For the convenience of the developer, there's a magic "do all" script located in `aux`:
```
./aux/devel.sh # optionally "./aux/devel.sh debug" for debugging
```
It does all the previous steps, just packed into a single script.

4
docs/index.md Normal file
View File

@@ -0,0 +1,4 @@
# MOP3 operating system documentation
MOP3 is a hobby OS project of mine ;).

View File

@@ -0,0 +1,30 @@
# Overview of processes in MOP3
## What is a process?
A process is a structure defined to represent an internal state of a user application's environment. This includes
the necessary stacks, code, data and other resources. A process (usually) has it's own address, but in certain
circumstances may share it with another process.
## Only processes vs. processes-threads model
### Overview
MOP3 doesn't have a process-thread separation. Ususally in operating systems you'd have a "process", which consists
of multiple worker threads. For eg. a single-threaded application is a process, which consists of one worker. In MOP3
we do things a little differently. We only have processes, but some processes may work within the same pool of (generally speaking)
"resources", such as a shared address space, shared memory allocations, mutexes and so on. An application then consists of
not threads, but processes, which are loosely tied together via shared data.
#### Processes-threads model diagram
![Processes-threads model](assets/images/processes-threads.png)
#### Only processes model diagram
![Only processes model](assets/images/only-processes.png)
## Scheduling
MOP3 uses a round-robin based scheduler. For now priorities are left unimplemented, ie. every processes has
equal priority, but this may change in the future.
A good explaination of round-robin scheduling can be found on the OSDev wiki: [the article](https://wiki.osdev.org/Scheduling_Algorithms#Round_Robin)

21
generic/flags.mk Normal file
View File

@@ -0,0 +1,21 @@
cflags += -nostdinc \
-nostdlib \
-ffreestanding \
-fno-builtin \
-std=c11 \
-pedantic \
-Wall \
-Wextra \
-ffunction-sections \
-fdata-sections
cflags += -isystem ../include
ldflags += -ffreestanding \
-nostdlib \
-fno-builtin \
-fuse-ld=lld \
-static \
-Wl,--gc-sections \
-Wl,--strip-all \
-flto

13
include/m/status.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _M_STATUS_H
#define _M_STATUS_H
#define ST_OK 0
#define ST_SYSCALL_NOT_FOUND 1
#define ST_UNALIGNED 2
#define ST_OOM_ERROR 3
#define ST_NOT_FOUND 4
#define ST_BAD_ADDRESS_SPACE 5
#define ST_PERMISSION_ERROR 6
#define ST_BAD_RESOURCE 7
#endif // _M_STATUS_H

16
include/m/syscall_defs.h Normal file
View File

@@ -0,0 +1,16 @@
#ifndef _M_SYSCALL_DEFS_H
#define _M_SYSCALL_DEFS_H
#define SYS_QUIT 1
#define SYS_TEST 2
#define SYS_MAP 3
#define SYS_UNMAP 4
#define SYS_CLONE 5
#define SYS_SCHED 6
#define SYS_MUTEX_CREATE 7
#define SYS_MUTEX_DELETE 8
#define SYS_MUTEX_LOCK 9
#define SYS_MUTEX_UNLOCK 10
#define SYS_ARGUMENT_PTR 11
#endif // _M_SYSCALL_DEFS_H

2
init/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.o
*.exe

1
init/Makefile Normal file
View File

@@ -0,0 +1 @@
include ../make/user.mk

1
init/app.mk Normal file
View File

@@ -0,0 +1 @@
app := init.exe

46
init/init.c Normal file
View File

@@ -0,0 +1,46 @@
#include <limits.h>
#include <proc/local.h>
#include <proc/proc.h>
#include <stddef.h>
#include <stdint.h>
#include <string/string.h>
#define MUTEX 2000
LOCAL volatile char letter = 'c';
void app_proc (void) {
char arg_letter = (char)(uintptr_t)argument_ptr ();
letter = arg_letter;
for (;;) {
mutex_lock (MUTEX);
for (int i = 0; i < 3; i++)
test (letter);
mutex_unlock (MUTEX);
}
process_quit ();
}
void app_main (void) {
mutex_create (MUTEX);
letter = 'a';
process_spawn (&app_proc, (void*)'a');
process_spawn (&app_proc, (void*)'b');
process_spawn (&app_proc, (void*)'c');
for (;;) {
mutex_lock (MUTEX);
for (int i = 0; i < 3; i++)
test (letter);
mutex_unlock (MUTEX);
}
}

3
init/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += init.c
o += init.o

2
kernel/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.json
.cache

View File

@@ -5,6 +5,7 @@ ldflags :=
cflags :=
buildtype ?= release
include vars.mk
include flags.mk
include src.mk
@@ -22,4 +23,13 @@ build/kernel.elf: $(o)
clean:
rm -f $(o) build/kernel.elf
.PHONY: all clean
format:
clang-format -i $$(git ls-files '*.c' '*.h' \
':!limine/limine.h' \
':!c_headers/include/**' \
':!uACPI/source/**' \
':!uACPI/include/**' \
':!uACPI/tests/**' \
':!libk/printf*')
.PHONY: all clean format

271
kernel/amd64/apic.c Normal file
View File

@@ -0,0 +1,271 @@
#include <amd64/apic.h>
#include <amd64/intr_defs.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/std.h>
#include <limine/requests.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/spin.h>
#include <sys/time.h>
#include <uacpi/acpi.h>
#include <uacpi/status.h>
#include <uacpi/tables.h>
#include <uacpi/uacpi.h>
#define IOAPICS_MAX 24
#define INTERRUPT_SRC_OVERRIDES_MAX 24
/* ID of Local APIC */
#define LAPIC_ID 0x20
/* End of interrupt register */
#define LAPIC_EOI 0xB0
/* Spurious interrupt vector register */
#define LAPIC_SIVR 0xF0
/* Interrupt command register */
#define LAPIC_ICR 0x300
/* LVT timer register */
#define LAPIC_LVTTR 0x320
/* Timer initial count register */
#define LAPIC_TIMICT 0x380
/* Timer current count register */
#define LAPIC_TIMCCT 0x390
/* Divide config register */
#define LAPIC_DCR 0x3E0
#define DIVIDER_VALUE 0x0B
struct ioapic {
struct acpi_madt_ioapic table_data;
spin_lock_t lock;
uintptr_t mmio_base;
};
/* Table of IOAPICS */
static struct ioapic ioapics[IOAPICS_MAX];
/* Table of interrupt source overrides */
/* clang-format off */
static struct acpi_madt_interrupt_source_override intr_src_overrides[INTERRUPT_SRC_OVERRIDES_MAX];
/* clang-format on */
/* Count of actual IOAPIC entries */
static size_t ioapic_entries = 0;
/* Count of actual interrupt source overrides */
static size_t intr_src_override_entries = 0;
static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
/* Read IOAPIC */
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
spin_lock_ctx_t ctxioar;
spin_lock (&ioapic->lock, &ctxioar);
*(volatile uint32_t*)ioapic->mmio_base = reg;
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
spin_unlock (&ioapic->lock, &ctxioar);
return ret;
}
/* Write IOAPIC */
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
spin_lock_ctx_t ctxioaw;
spin_lock (&ioapic->lock, &ctxioaw);
*(volatile uint32_t*)ioapic->mmio_base = reg;
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
spin_unlock (&ioapic->lock, &ctxioaw);
}
/* Find an IOAPIC corresposting to provided IRQ */
static struct ioapic* amd64_ioapic_find (uint32_t irq) {
struct ioapic* ioapic = NULL;
for (size_t i = 0; i < ioapic_entries; i++) {
ioapic = &ioapics[i];
uint32_t version = amd64_ioapic_read (ioapic, 1);
uint32_t max = ((version >> 16) & 0xFF);
if ((irq >= ioapic->table_data.gsi_base) && (irq <= (ioapic->table_data.gsi_base + max)))
return ioapic;
}
return NULL;
}
/*
* Route IRQ to an IDT entry of a given Local APIC.
*
* vec - Interrupt vector number, which will be delivered to the CPU.
* irq -Legacy IRQ number to be routed. Can be changed by an interrupt source override
* into a different GSI.
* flags - IOAPIC redirection flags.
* lapic_id - Local APIC that will receive the interrupt.
*/
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id) {
struct ioapic* ioapic = NULL;
struct acpi_madt_interrupt_source_override* override;
bool found_override = false;
for (size_t i = 0; i < intr_src_override_entries; i++) {
override = &intr_src_overrides[i];
if (override->source == irq) {
found_override = true;
break;
}
}
uint64_t calc_flags = (lapic_id << 56) | (flags) | (vec & 0xFF);
if (found_override) {
uint32_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
uint32_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
calc_flags |= (uint64_t)mode << 15;
calc_flags |= (uint64_t)polarity << 13;
}
uint32_t gsi = found_override ? override->gsi : irq;
ioapic = amd64_ioapic_find (gsi);
if (ioapic == NULL)
return;
uint32_t irq_reg = ((gsi - ioapic->table_data.gsi_base) * 2) + 0x10;
amd64_ioapic_write (ioapic, irq_reg + 1, (uint32_t)(calc_flags >> 32));
amd64_ioapic_write (ioapic, irq_reg, (uint32_t)calc_flags);
}
/* Find and initialize the IOAPIC */
void amd64_ioapic_init (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
struct uacpi_table apic_table;
uacpi_status status = uacpi_table_find_by_signature (ACPI_MADT_SIGNATURE, &apic_table);
if (status != UACPI_STATUS_OK) {
DEBUG ("Could not find MADT table!\n");
spin ();
}
struct acpi_madt* apic = (struct acpi_madt*)apic_table.virt_addr;
struct acpi_entry_hdr* current = (struct acpi_entry_hdr*)apic->entries;
for (;;) {
if ((uintptr_t)current >=
((uintptr_t)apic->entries + apic->hdr.length - sizeof (struct acpi_madt)))
break;
switch (current->type) {
case ACPI_MADT_ENTRY_TYPE_IOAPIC: {
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
MM_PG_PRESENT | MM_PG_RW);
ioapics[ioapic_entries++] = (struct ioapic){
.lock = SPIN_LOCK_INIT,
.table_data = *ioapic_table_data,
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
};
} break;
case ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE: {
struct acpi_madt_interrupt_source_override* override =
(struct acpi_madt_interrupt_source_override*)current;
intr_src_overrides[intr_src_override_entries++] = *override;
} break;
}
current = (struct acpi_entry_hdr*)((uintptr_t)current + current->length);
}
}
/* Get MMIO base of Local APIC */
static uintptr_t amd64_lapic_base (void) { return thiscpu->lapic_mmio_base; }
/* Write Local APIC */
static void amd64_lapic_write (uint32_t reg, uint32_t value) {
*(volatile uint32_t*)(amd64_lapic_base () + reg) = value;
}
/* Read Local APIC */
static uint32_t amd64_lapic_read (uint32_t reg) {
return *(volatile uint32_t*)(amd64_lapic_base () + reg);
}
/* Get ID of Local APIC */
uint32_t amd64_lapic_id (void) { return amd64_lapic_read (LAPIC_ID) >> 24; }
/* Send End of interrupt command to Local APIC */
void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
/*
* Calibrate Local APIC to send interrupts in a set interval.
*
* us - Period length in microseconds
*/
static uint32_t amd64_lapic_calibrate (uint32_t us) {
spin_lock_ctx_t ctxlacb;
spin_lock (&lapic_calibration_lock, &ctxlacb);
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF);
sleep_micro (us);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (0 << 16));
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
DEBUG ("timer ticks = %u\n", ticks);
spin_unlock (&lapic_calibration_lock, &ctxlacb);
return ticks;
}
/*
* Starts a Local APIC, configures LVT timer to send interrupts at SCHED_PREEMPT_TIMER.
*
* ticks - Initial tick count
*/
static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
amd64_lapic_write (LAPIC_TIMICT, ticks);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
}
/*
* Initialize Local APIC, configure to send timer interrupts at a given period. See
* amd64_lapic_calibrate and amd64_lapic_start.
*/
void amd64_lapic_init (uint32_t us) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
amd64_wrmsr (MSR_APIC_BASE, amd64_rdmsr (MSR_APIC_BASE) | (1 << 11));
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
thiscpu->lapic_ticks = amd64_lapic_calibrate (us);
amd64_lapic_start (thiscpu->lapic_ticks);
}
/*
* Send an IPI to a given Local APIC. This till invoke an IDT stub located at vec.
*
* lapic_id - Target Local APIC
* vec - Interrupt vector/IDT stub, which will be invoked by the IPI.
*/
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) {
/* wait for previous IPI to finish */
while (amd64_lapic_read (LAPIC_ICR) & (1 << 12)) {
__asm__ volatile ("pause");
}
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
amd64_lapic_write (LAPIC_ICR, vec | (1 << 14));
}

14
kernel/amd64/apic.h Normal file
View File

@@ -0,0 +1,14 @@
#ifndef _KERNEL_AMD64_APIC_H
#define _KERNEL_AMD64_APIC_H
#include <libk/std.h>
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id);
void amd64_ioapic_init (void);
uint32_t amd64_lapic_id (void);
void amd64_lapic_eoi (void);
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec);
void amd64_lapic_init (uint32_t us);
#endif // _KERNEL_AMD64_APIC_H

View File

@@ -1,18 +1,56 @@
#include <limine/limine.h>
#include <amd64/apic.h>
#include <amd64/debug.h>
#include <amd64/hpet.h>
#include <amd64/init.h>
#include <sys/debug.h>
#include <mm/pmm.h>
#include <amd64/intr_defs.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <limine/limine.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <rd/rd.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <uacpi/uacpi.h>
void bootmain(void) {
amd64_init();
DEBUG("Hello from amd64!\n");
#define UACPI_MEMORY_BUFFER_MAX 4096
pmm_init();
ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
int *a = malloc(sizeof(int));
*a = 6969;
DEBUG("a=%p, *a=%d\n", a, *a);
/*
* The kernel starts booting here. This is the entry point after Limine hands control. We set up all
* the necessary platform-dependent subsystems/drivers and jump into the init app.
*/
void bootmain (void) {
struct limine_mp_response* mp = limine_mp_request.response;
for (;;);
struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id);
amd64_init (bsp_cpu, false);
syscall_init ();
amd64_debug_init ();
pmm_init ();
mm_init ();
rd_init ();
uacpi_setup_early_table_access ((void*)uacpi_memory_buffer, sizeof (uacpi_memory_buffer));
amd64_ioapic_init ();
amd64_hpet_init ();
smp_init ();
proc_init ();
for (;;)
;
}

View File

@@ -1,46 +1,76 @@
#include <libk/std.h>
#include <libk/string.h>
#include <libk/printf.h>
#include <sys/debug.h>
#include <amd64/debug.h>
#include <amd64/io.h>
#include <libk/printf.h>
#include <libk/std.h>
#include <libk/string.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#define PORT_COM1 0x03F8
/* Port for printing to serial */
/* TODO: Make this configurable */
#define PORT_COM1 0x03F8
/* debugprintf buffer size */
#define BUFFER_SIZE 1024
/*
* Lock, which ensures that prints to the serial port are atomic (ie. one debugprintf is atomic in
* itself).
*/
static spin_lock_t serial_lock = SPIN_LOCK_INIT;
static bool amd64_debug_serial_tx_empty(void) {
return (bool)(amd64_io_inb(PORT_COM1 + 5) & 0x20);
static bool debug_init = false;
/* Block until TX buffer is empty */
static bool amd64_debug_serial_tx_empty (void) {
return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20);
}
static void amd64_debug_serial_write(char x) {
while (!amd64_debug_serial_tx_empty());
amd64_io_outb(PORT_COM1, (uint8_t)x);
/* Write a single character to serial */
static void amd64_debug_serial_write (char x) {
while (!amd64_debug_serial_tx_empty ())
;
amd64_io_outb (PORT_COM1, (uint8_t)x);
}
void debugprintf(const char *fmt, ...) {
/*
* Formatted printing to serial. serial_lock ensures that all prints are atomic.
*/
void debugprintf (const char* fmt, ...) {
spin_lock_ctx_t ctxdbgp;
if (!debug_init)
return;
char buffer[BUFFER_SIZE];
memset(buffer, 0, sizeof(buffer));
memset (buffer, 0, sizeof (buffer));
va_list ap;
va_start(ap, fmt);
vsnprintf(buffer, sizeof(buffer), fmt, ap);
va_end(ap);
va_start (ap, fmt);
vsnprintf (buffer, sizeof (buffer), fmt, ap);
va_end (ap);
buffer[sizeof(buffer) - 1] = '\0';
buffer[sizeof (buffer) - 1] = '\0';
const char* p = buffer;
spin_lock (&serial_lock, &ctxdbgp);
const char *p = buffer;
while (*p) {
amd64_debug_serial_write(*p);
amd64_debug_serial_write (*p);
p++;
}
spin_unlock (&serial_lock, &ctxdbgp);
}
void amd64_debug_init(void) {
amd64_io_outb(PORT_COM1 + 1, 0x00);
amd64_io_outb(PORT_COM1 + 3, 0x80);
amd64_io_outb(PORT_COM1 + 0, 0x03);
amd64_io_outb(PORT_COM1 + 1, 0x00);
amd64_io_outb(PORT_COM1 + 3, 0x03);
amd64_io_outb(PORT_COM1 + 2, 0xC7);
amd64_io_outb(PORT_COM1 + 4, 0x0B);
/* Initialize serial */
void amd64_debug_init (void) {
amd64_io_outb (PORT_COM1 + 1, 0x00);
amd64_io_outb (PORT_COM1 + 3, 0x80);
amd64_io_outb (PORT_COM1 + 0, 0x03);
amd64_io_outb (PORT_COM1 + 1, 0x00);
amd64_io_outb (PORT_COM1 + 3, 0x03);
amd64_io_outb (PORT_COM1 + 2, 0xC7);
amd64_io_outb (PORT_COM1 + 4, 0x0B);
debug_init = true;
}

View File

@@ -1,6 +1,6 @@
#ifndef _KERNEL_AMD64_DEBUG_H
#define _KERNEL_AMD64_DEBUG_H
void amd64_debug_init(void);
void amd64_debug_init (void);
#endif // _KERNEL_AMD64_DEBUG_H

View File

@@ -1,4 +1,11 @@
cflags += --target=x86_64-pc-none-elf
cflags += --target=x86_64-pc-none-elf \
-mno-sse \
-mno-sse2 \
-mno-avx \
-mno-mmx \
-mno-80387 \
-mno-red-zone \
-fno-omit-frame-pointer
ldflags += --target=x86_64-pc-none-elf \
-Wl,-zmax-page-size=0x1000

45
kernel/amd64/gdt.h Normal file
View File

@@ -0,0 +1,45 @@
#ifndef _KERNEL_AMD64_GDT_H
#define _KERNEL_AMD64_GDT_H
#include <aux/compiler.h>
#include <libk/std.h>
#include <proc/proc.h>
#define GDT_KCODE 0x08
#define GDT_KDATA 0x10
#define GDT_UDATA 0x18
#define GDT_UCODE 0x20
#define GDT_TSS 0x28
/* Size of kernel stack */
#define KSTACK_SIZE (32 * 1024)
/*
* 64-bit GDT structure. For more info see:
* - https://wiki.osdev.org/Global_Descriptor_Table
* - https://wiki.osdev.org/GDT_Tutorial
*/
struct gdt_entry {
uint16_t limitlow;
uint16_t baselow;
uint8_t basemid;
uint8_t access;
uint8_t gran;
uint8_t basehigh;
} PACKED;
/* Struct that gets loaded into GDTR */
struct gdt_ptr {
uint16_t limit;
uint64_t base;
} PACKED;
/* New, extended GDT (we need to extend Limine's GDT) */
struct gdt_extended {
struct gdt_entry old[5];
struct gdt_entry tsslow;
struct gdt_entry tsshigh;
} PACKED;
#endif // _KERNEL_AMD64_GDT_H

142
kernel/amd64/hpet.c Normal file
View File

@@ -0,0 +1,142 @@
#include <amd64/hpet.h>
#include <libk/std.h>
#include <limine/requests.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/spin.h>
#include <uacpi/acpi.h>
#include <uacpi/status.h>
#include <uacpi/tables.h>
#include <uacpi/uacpi.h>
/*
* HPET (High Precision Event Timer) driver code. See more at https://wiki.osdev.org/HPET
*/
/* HPET Main Counter Value Register */
#define HPET_MCVR 0xF0
/* HPET General Configuration Register */
#define HPET_GCR 0x10
/* HPET General Capabilities and ID Register */
#define HPET_GCIDR 0x00
/* Set whether we sould use 32-bit or 64-bit reads/writes */
static bool hpet_32bits = 1;
/* Physical address for HPET MMIO */
static uintptr_t hpet_paddr;
/* HPET period in femtoseconds */
static uint64_t hpet_period_fs;
/* Lock, which protects concurrent access. See amd64/smp.c */
static spin_lock_t hpet_lock = SPIN_LOCK_INIT;
/* Read a HPET register. Assumes caller holds hpet_lock */
static uint64_t amd64_hpet_read64 (uint32_t reg) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
return *(volatile uint64_t*)(hpet_vaddr + reg);
}
static uint32_t amd64_hpet_read32 (uint32_t reg) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
return *(volatile uint32_t*)(hpet_vaddr + reg);
}
/* Write a HPET register. Assumes caller holds hpet_lock */
static void amd64_hpet_write64 (uint32_t reg, uint64_t value) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
}
static void amd64_hpet_write32 (uint32_t reg, uint32_t value) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
*(volatile uint32_t*)(hpet_vaddr + reg) = value;
}
/* Read current value of HPET_MCVR register. */
static uint64_t amd64_hpet_read_counter (void) {
uint64_t value;
spin_lock_ctx_t ctxhrc;
spin_lock (&hpet_lock, &ctxhrc);
if (!hpet_32bits)
value = amd64_hpet_read64 (HPET_MCVR);
else {
uint32_t hi1, lo, hi2;
do {
hi1 = amd64_hpet_read32 (HPET_MCVR + 4);
lo = amd64_hpet_read32 (HPET_MCVR + 0);
hi2 = amd64_hpet_read32 (HPET_MCVR + 4);
} while (hi1 != hi2);
value = ((uint64_t)hi1 << 32) | lo;
}
spin_unlock (&hpet_lock, &ctxhrc);
return value;
}
static void amd64_hpet_write_counter (uint64_t value) {
spin_lock_ctx_t ctxhwc;
spin_lock (&hpet_lock, &ctxhwc);
if (!hpet_32bits)
amd64_hpet_write64 (HPET_MCVR, value);
else {
amd64_hpet_write32 (HPET_MCVR, (uint32_t)value);
amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32));
}
spin_unlock (&hpet_lock, &ctxhwc);
}
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being
* held. */
void amd64_hpet_sleep_micro (uint64_t us) {
if (hpet_period_fs == 0)
return;
uint64_t ticks_to_wait = (us * 1000ULL) / (hpet_period_fs / 1000000ULL);
uint64_t start = amd64_hpet_read_counter ();
for (;;) {
uint64_t now = amd64_hpet_read_counter ();
if ((now - start) >= ticks_to_wait)
break;
__asm__ volatile ("pause" ::: "memory");
}
}
/* Initialize HPET */
void amd64_hpet_init (void) {
struct uacpi_table hpet_table;
uacpi_status status = uacpi_table_find_by_signature (ACPI_HPET_SIGNATURE, &hpet_table);
if (status != UACPI_STATUS_OK) {
DEBUG ("Could not find HPET table!\n");
spin ();
}
struct acpi_hpet* hpet = (struct acpi_hpet*)hpet_table.virt_addr;
hpet_paddr = (uintptr_t)hpet->address.address;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
hpet_period_fs = (uint32_t)(caps >> 32);
amd64_hpet_write64 (HPET_GCR, 0);
amd64_hpet_write_counter (0);
amd64_hpet_write64 (HPET_GCR, 1);
}

9
kernel/amd64/hpet.h Normal file
View File

@@ -0,0 +1,9 @@
#ifndef _KERNEL_AMD64_HPET_H
#define _KERNEL_AMD64_HPET_H
#include <libk/std.h>
void amd64_hpet_sleep_micro (uint64_t us);
void amd64_hpet_init (void);
#endif // _KERNEL_AMD64_HPET_H

View File

@@ -1,45 +1,17 @@
#include <amd64/gdt.h>
#include <amd64/init.h>
#include <amd64/intr.h>
#include <amd64/smp.h>
#include <aux/compiler.h>
#include <libk/std.h>
#include <libk/string.h>
#include <amd64/init.h>
#include <amd64/tss.h>
#include <amd64/debug.h>
#define GDT_KCODE 0x08
#define GDT_KDATA 0x10
#define GDT_UCODE 0x18
#define GDT_UDATA 0x20
#define GDT_TSS 0x28
#define TSS 0x80
#define TSS_PRESENT 0x89
#define KSTACK_SIZE (8*1024)
struct gdt_entry {
uint16_t limitlow;
uint16_t baselow;
uint8_t basemid;
uint8_t access;
uint8_t gran;
uint8_t basehigh;
} __attribute__((packed));
struct gdt_ptr {
uint16_t limit;
uint64_t base;
} __attribute__((packed));
struct gdt_extended {
struct gdt_entry old[5];
struct gdt_entry tsslow;
struct gdt_entry tsshigh;
} __attribute__((packed));
__attribute__((aligned(16))) static volatile uint8_t kernel_stack[KSTACK_SIZE];
__attribute__((aligned(16))) static volatile struct gdt_extended gdt;
static void amd64_gdt_set(volatile struct gdt_entry *ent, uint32_t base,
uint32_t limit, uint8_t acc, uint8_t gran) {
/* Set a GDT entry */
static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit,
uint8_t acc, uint8_t gran) {
ent->baselow = (base & 0xFFFF);
ent->basemid = (base >> 16) & 0xFF;
ent->basehigh = (base >> 24) & 0xFF;
@@ -48,58 +20,70 @@ static void amd64_gdt_set(volatile struct gdt_entry *ent, uint32_t base,
ent->access = acc;
}
static void amd64_gdt_init(void) {
volatile struct tss *tss = amd64_get_tss();
/* Initialize GDT and TSS structures for a given CPU */
static void amd64_gdt_init (struct cpu* cpu) {
volatile struct tss* tss = &cpu->tss;
volatile struct gdt_extended* gdt = &cpu->gdt;
memset((void *)&gdt, 0, sizeof(gdt));
memset((void *)kernel_stack, 0, sizeof(kernel_stack));
memset((void *)tss, 0, sizeof(*tss));
memset ((void*)gdt, 0, sizeof (*gdt));
memset ((void*)tss, 0, sizeof (*tss));
tss->iopb_off = sizeof(*tss);
tss->rsp0 = (uint64_t)((uintptr_t)kernel_stack + sizeof(kernel_stack));
tss->iopb_off = sizeof (*tss);
tss->rsp0 = (uint64_t)((uintptr_t)cpu->kernel_stack + sizeof (cpu->kernel_stack));
tss->ist[0] = (uint64_t)((uintptr_t)cpu->except_stack + sizeof (cpu->except_stack));
tss->ist[1] = (uint64_t)((uintptr_t)cpu->irq_stack + sizeof (cpu->irq_stack));
uint64_t tssbase = (uint64_t)&tss;
uint64_t tsslimit = sizeof(*tss) - 1;
uint64_t tssbase = (uint64_t)tss;
uint64_t tsslimit = sizeof (*tss) - 1;
amd64_gdt_set(&gdt.old[0], 0, 0, 0, 0);
amd64_gdt_set(&gdt.old[1], 0, 0xFFFFF, 0x9A, 0xA0);
amd64_gdt_set(&gdt.old[2], 0, 0xFFFFF, 0x92, 0xC0);
amd64_gdt_set(&gdt.old[3], 0, 0xFFFFF, 0xFA, 0xA0);
amd64_gdt_set(&gdt.old[4], 0, 0xFFFFF, 0xF2, 0xC0);
amd64_gdt_set(&gdt.tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0);
amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0);
amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0);
amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xF2, 0xC0);
amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xFA, 0xA0);
amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
uint32_t tssbasehigh = (tssbase >> 32);
gdt.tsshigh.limitlow = (tssbasehigh & 0xFFFF);
gdt.tsshigh.baselow = (tssbasehigh >> 16) & 0xFFFF;
gdt.tsshigh.basemid = 0;
gdt.tsshigh.basehigh = 0;
gdt.tsshigh.access = 0;
gdt.tsshigh.gran = 0;
gdt->tsshigh.limitlow = (tssbasehigh & 0xFFFF);
gdt->tsshigh.baselow = (tssbasehigh >> 16) & 0xFFFF;
gdt->tsshigh.basemid = 0;
gdt->tsshigh.basehigh = 0;
gdt->tsshigh.access = 0;
gdt->tsshigh.gran = 0;
/* Load GDTR */
struct gdt_ptr gdtr;
gdtr.limit = sizeof(gdt) - 1;
gdtr.base = (uint64_t)&gdt;
__asm__ volatile("lgdt %0" :: "m"(gdtr) : "memory");
gdtr.limit = sizeof (*gdt) - 1;
gdtr.base = (uint64_t)gdt;
__asm__ volatile ("lgdt %0" ::"m"(gdtr) : "memory");
__asm__ volatile(
"pushq %[kcode]\n"
"lea 1f(%%rip), %%rax\n"
"pushq %%rax\n"
"lretq\n"
"1:\n"
"movw %[kdata], %%ax\n"
"movw %%ax, %%ds\n"
"movw %%ax, %%es\n"
"movw %%ax, %%ss\n"
:
: [kcode] "i"(GDT_KCODE), [kdata] "i"(GDT_KDATA)
: "rax", "memory"
);
/* Reload CS */
__asm__ volatile ("pushq %[kcode]\n"
"lea 1f(%%rip), %%rax\n"
"pushq %%rax\n"
"lretq\n"
"1:\n"
"movw %[kdata], %%ax\n"
"movw %%ax, %%ds\n"
"movw %%ax, %%es\n"
"movw %%ax, %%ss\n"
:
: [kcode] "i"(GDT_KCODE), [kdata] "i"(GDT_KDATA)
: "rax", "memory");
__asm__ volatile("ltr %0" :: "r"((uint16_t)GDT_TSS));
__asm__ volatile ("ltr %0" ::"r"((uint16_t)GDT_TSS));
}
void amd64_init(void) {
amd64_gdt_init();
amd64_debug_init();
/*
* Initialize essentials (GDT, TSS, IDT) for a given CPU
*
* load_idt - Tell whether the IDT needs to be loaded. It only has to be loaded once on
* the BSP
*/
void amd64_init (struct cpu* cpu, bool load_idt) {
amd64_gdt_init (cpu);
if (load_idt)
amd64_load_idt ();
else
amd64_intr_init ();
}

View File

@@ -1,6 +1,8 @@
#ifndef _KERNEL_AMD64_INIT_H
#define _KERNEL_AMD64_INIT_H
void amd64_init(void);
#include <amd64/smp.h>
void amd64_init (struct cpu* cpu, bool load_idt);
#endif // _KERNEL_AMD64_INIT_H

221
kernel/amd64/intr.c Normal file
View File

@@ -0,0 +1,221 @@
#include <amd64/apic.h>
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/intr_defs.h>
#include <amd64/io.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <libk/string.h>
#include <m/syscall_defs.h>
#include <sys/debug.h>
#include <sys/irq.h>
#include <sys/smp.h>
#include <sys/spin.h>
#include <syscall/syscall.h>
/* 8259 PIC defs. */
#define PIC1 0x20
#define PIC2 0xA0
#define PIC1_CMD PIC1
#define PIC1_DATA (PIC1 + 1)
#define PIC2_CMD PIC2
#define PIC2_DATA (PIC2 + 1)
#define PIC_EOI 0x20
#define ICW1_ICW4 0x01
#define ICW1_SINGLE 0x02
#define ICW1_INTVL4 0x04
#define ICW1_LEVEL 0x08
#define ICW1_INIT 0x10
#define ICW4_8086 0x01
#define ICW4_AUTO 0x02
#define ICW4_BUFSLAVE 0x08
#define ICW4_BUFMASER 0x0C
#define ICW4_SFNM 0x10
#define CASCADE_IRQ 2
/* IDT defs. */
#define IDT_ENTRIES_MAX 256
/* 64-bit <IDT entry structure: https://wiki.osdev.org/Interrupt_Descriptor_Table */
struct idt_entry {
uint16_t intrlow;
uint16_t kernel_cs;
uint8_t ist;
uint8_t attrs;
uint16_t intrmid;
uint32_t intrhigh;
uint32_t resv;
} PACKED;
struct idt {
uint16_t limit;
uint64_t base;
} PACKED;
ALIGNED (16) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX];
static volatile struct idt idt;
/* Remaps and disables old 8259 PIC, since we'll be using APIC. */
static void amd64_init_pic (void) {
#define IO_OP(fn, ...) \
fn (__VA_ARGS__); \
amd64_io_wait ()
IO_OP (amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4));
IO_OP (amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4));
IO_OP (amd64_io_outb, PIC1_DATA, 0x20);
IO_OP (amd64_io_outb, PIC2_DATA, 0x28);
IO_OP (amd64_io_outb, PIC1_DATA, (1 << CASCADE_IRQ));
IO_OP (amd64_io_outb, PIC2_DATA, 2);
IO_OP (amd64_io_outb, PIC1_DATA, ICW4_8086);
IO_OP (amd64_io_outb, PIC2_DATA, ICW4_8086);
/* Disable */
IO_OP (amd64_io_outb, PIC1_DATA, 0xFF);
IO_OP (amd64_io_outb, PIC2_DATA, 0xFF);
#undef IO_OP
}
/* Set IDT entry */
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags,
uint8_t ist) {
ent->intrlow = (handler & 0xFFFF);
ent->kernel_cs = GDT_KCODE;
ent->ist = ist;
ent->attrs = flags;
ent->intrmid = ((handler >> 16) & 0xFFFF);
ent->intrhigh = ((handler >> 32) & 0xFFFFFFFF);
ent->resv = 0;
}
/* Load the IDT */
void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); }
/* Initialize IDT entries */
static void amd64_idt_init (void) {
memset ((void*)idt_entries, 0, sizeof (idt_entries));
#define IDT_ENTRY(n, ist) \
extern void amd64_intr##n (void); \
amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist))
/* clang-format off */
IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0);
IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0);
IDT_ENTRY (8, 0); IDT_ENTRY (9, 0); IDT_ENTRY (10, 0); IDT_ENTRY (11, 0);
IDT_ENTRY (12, 0); IDT_ENTRY (13, 0); IDT_ENTRY (14, 0); IDT_ENTRY (15, 0);
IDT_ENTRY (16, 0); IDT_ENTRY (17, 0); IDT_ENTRY (18, 0); IDT_ENTRY (19, 0);
IDT_ENTRY (20, 0); IDT_ENTRY (21, 0); IDT_ENTRY (22, 0); IDT_ENTRY (23, 0);
IDT_ENTRY (24, 0); IDT_ENTRY (25, 0); IDT_ENTRY (26, 0); IDT_ENTRY (27, 0);
IDT_ENTRY (28, 0); IDT_ENTRY (29, 0); IDT_ENTRY (30, 0); IDT_ENTRY (31, 0);
IDT_ENTRY (32, 1); IDT_ENTRY (33, 1); IDT_ENTRY (34, 1); IDT_ENTRY (35, 1);
IDT_ENTRY (36, 1); IDT_ENTRY (37, 1); IDT_ENTRY (38, 1); IDT_ENTRY (39, 1);
IDT_ENTRY (40, 1); IDT_ENTRY (41, 1); IDT_ENTRY (42, 1); IDT_ENTRY (43, 1);
IDT_ENTRY (44, 1); IDT_ENTRY (45, 1); IDT_ENTRY (46, 1); IDT_ENTRY (47, 1);
IDT_ENTRY (SCHED_PREEMPT_TIMER, 1);
IDT_ENTRY (TLB_SHOOTDOWN, 1);
IDT_ENTRY (CPU_REQUEST_SCHED, 1);
IDT_ENTRY (CPU_SPURIOUS, 1);
/* clang-format on */
#undef IDT_ENTRY
idt.limit = sizeof (idt_entries) - 1;
idt.base = (uint64_t)idt_entries;
amd64_load_idt ();
}
/* Handle CPU exception and dump registers. If incoming CS has CPL3, kill the process. */
static void amd64_intr_exception (struct saved_regs* regs) {
DEBUG ("cpu exception %lu (%lu)\n", regs->trap, regs->error);
uint64_t cr2;
__asm__ volatile ("movq %%cr2, %0" : "=r"(cr2));
uint64_t cr3;
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3));
debugprintf ("r15=%016lx r14=%016lx r13=%016lx\n"
"r12=%016lx r11=%016lx r10=%016lx\n"
"r9 =%016lx r8 =%016lx rbp=%016lx\n"
"rdi=%016lx rsi=%016lx rdx=%016lx\n"
"rcx=%016lx rax=%016lx trp=%016lx\n"
"err=%016lx rip=%016lx cs =%016lx\n"
"rfl=%016lx rsp=%016lx ss =%016lx\n"
"cr2=%016lx cr3=%016lx rbx=%016lx\n",
regs->r15, regs->r14, regs->r13, regs->r12, regs->r11, regs->r10, regs->r9, regs->r8,
regs->rbp, regs->rdi, regs->rsi, regs->rdx, regs->rcx, regs->rax, regs->trap,
regs->error, regs->rip, regs->cs, regs->rflags, regs->rsp, regs->ss, cr2, cr3,
regs->rbx);
if (regs->cs == (GDT_UCODE | 0x03)) {
proc_kill (thiscpu->proc_current);
} else {
spin ();
}
}
/* Handle incoming interrupt, dispatch IRQ handlers. */
void amd64_intr_handler (void* stack_ptr) {
spin_lock_ctx_t ctxcpu, ctxpr;
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr;
spin_lock (&thiscpu->lock, &ctxcpu);
struct proc* proc_current = thiscpu->proc_current;
spin_lock (&proc_current->lock, &ctxpr);
memcpy (&proc_current->pdata.regs, regs, sizeof (struct saved_regs));
spin_unlock (&proc_current->lock, &ctxpr);
spin_unlock (&thiscpu->lock, &ctxcpu);
if (regs->trap <= 31) {
amd64_intr_exception (regs);
} else {
amd64_lapic_eoi ();
struct irq* irq = irq_find (regs->trap);
if (irq != NULL) {
irq->func (irq->arg, stack_ptr);
}
}
}
/* Initialize interrupts */
void amd64_intr_init (void) {
amd64_init_pic ();
amd64_idt_init ();
}
/* Aux. */
/* Save RFLAGS of the current CPU */
static uint64_t amd64_irq_save_flags (void) {
uint64_t rflags;
__asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc");
return rflags;
}
/* Restore interrupts (IF bit) from RFLAGS */
static void amd64_irq_restore_flags (uint64_t rflags) {
if (rflags & (1ULL << 9))
__asm__ volatile ("sti");
}
/* Save current interrupt state */
void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
/* Restore interrupt state */
void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }

37
kernel/amd64/intr.h Normal file
View File

@@ -0,0 +1,37 @@
#ifndef _KERNEL_AMD64_INTR_H
#define _KERNEL_AMD64_INTR_H
#include <aux/compiler.h>
#include <libk/std.h>
struct saved_regs {
/* regs */
uint64_t r15;
uint64_t r14;
uint64_t r13;
uint64_t r12;
uint64_t r11;
uint64_t r10;
uint64_t r9;
uint64_t r8;
uint64_t rbx;
uint64_t rbp;
uint64_t rdi;
uint64_t rsi;
uint64_t rdx;
uint64_t rcx;
uint64_t rax;
/* interrupt stuff */
uint64_t trap;
uint64_t error;
uint64_t rip;
uint64_t cs;
uint64_t rflags;
uint64_t rsp;
uint64_t ss;
} PACKED;
void amd64_load_idt (void);
void amd64_intr_init (void);
#endif // _KERNEL_AMD64_INTR_H

12
kernel/amd64/intr_defs.h Normal file
View File

@@ -0,0 +1,12 @@
#ifndef _KERNEL_AMD64_INTR_DEFS_H
#define _KERNEL_AMD64_INTR_DEFS_H
/* Definitions for custom, nonstandard IDT entries. They have to be remapped by amd64_resolve_irq
* into legacy IRQs. */
#define SCHED_PREEMPT_TIMER 80
#define TLB_SHOOTDOWN 81
#define CPU_REQUEST_SCHED 82
#define CPU_SPURIOUS 255
#endif // _KERNEL_AMD64_INTR_DEFS_H

100
kernel/amd64/intr_stub.S Normal file
View File

@@ -0,0 +1,100 @@
#include <amd64/intr_defs.h>
#include <amd64/regsasm.h>
.extern amd64_intr_handler
#define err(z) \
pushq $z;
#define no_err(z) \
pushq $0; \
pushq $z;
#define make_intr_stub(x, n) \
.global amd64_intr ## n; \
amd64_intr ## n:; \
x(n); \
cli; \
; \
push_regs; \
; \
movw $0x10, %ax; \
movw %ax, %ds; \
movw %ax, %es; \
; \
cld; \
; \
movq %rsp, %rdi; \
; \
movq %cr3, %rax; pushq %rax; \
; \
movq %rsp, %rbp; \
; \
subq $8, %rsp; \
andq $-16, %rsp; \
; \
callq amd64_intr_handler; \
; \
movq %rbp, %rsp; \
; \
popq %rax; movq %rax, %cr3; \
; \
pop_regs; \
addq $16, %rsp; \
; \
iretq;
make_intr_stub(no_err, 0)
make_intr_stub(no_err, 1)
make_intr_stub(no_err, 2)
make_intr_stub(no_err, 3)
make_intr_stub(no_err, 4)
make_intr_stub(no_err, 5)
make_intr_stub(no_err, 6)
make_intr_stub(no_err, 7)
make_intr_stub(err, 8)
make_intr_stub(no_err, 9)
make_intr_stub(err, 10)
make_intr_stub(err, 11)
make_intr_stub(err, 12)
make_intr_stub(err, 13)
make_intr_stub(err, 14)
make_intr_stub(no_err, 15)
make_intr_stub(no_err, 16)
make_intr_stub(err, 17)
make_intr_stub(no_err, 18)
make_intr_stub(no_err, 19)
make_intr_stub(no_err, 20)
make_intr_stub(err, 21)
make_intr_stub(no_err, 22)
make_intr_stub(no_err, 23)
make_intr_stub(no_err, 24)
make_intr_stub(no_err, 25)
make_intr_stub(no_err, 26)
make_intr_stub(no_err, 27)
make_intr_stub(no_err, 28)
make_intr_stub(err, 29)
make_intr_stub(err, 30)
make_intr_stub(no_err, 31)
make_intr_stub(no_err, 32)
make_intr_stub(no_err, 33)
make_intr_stub(no_err, 34)
make_intr_stub(no_err, 35)
make_intr_stub(no_err, 36)
make_intr_stub(no_err, 37)
make_intr_stub(no_err, 38)
make_intr_stub(no_err, 39)
make_intr_stub(no_err, 40)
make_intr_stub(no_err, 41)
make_intr_stub(no_err, 42)
make_intr_stub(no_err, 43)
make_intr_stub(no_err, 44)
make_intr_stub(no_err, 45)
make_intr_stub(no_err, 46)
make_intr_stub(no_err, 47)
make_intr_stub(no_err, SCHED_PREEMPT_TIMER)
make_intr_stub(no_err, TLB_SHOOTDOWN)
make_intr_stub(no_err, CPU_REQUEST_SCHED)
make_intr_stub(no_err, CPU_SPURIOUS)

View File

@@ -1,54 +1,51 @@
#include <libk/std.h>
#include <amd64/io.h>
#include <libk/std.h>
void amd64_io_outb(uint16_t port, uint8_t v) {
__asm__ volatile("outb %1, %0" :: "dN"(port), "a"(v));
/// Perform outb instruction (send 8-bit int)
void amd64_io_outb (uint16_t port, uint8_t v) {
__asm__ volatile ("outb %1, %0" ::"dN"(port), "a"(v));
}
void amd64_io_outw(uint16_t port, uint16_t v) {
__asm__ volatile("outw %%ax, %%dx" :: "a"(v), "d"(port));
/// Perform outw instruction (send 16-bit int)
void amd64_io_outw (uint16_t port, uint16_t v) {
__asm__ volatile ("outw %%ax, %%dx" ::"a"(v), "d"(port));
}
void amd64_io_outl(uint16_t port, uint32_t v) {
__asm__ volatile("outl %%eax, %%dx" :: "d"(port), "a"(v));
/// Perform outl instruction (send 32-bit int)
void amd64_io_outl (uint16_t port, uint32_t v) {
__asm__ volatile ("outl %%eax, %%dx" ::"d"(port), "a"(v));
}
void amd64_io_outsw(uint16_t port, const void *addr, int cnt) {
__asm__ volatile(
"cld; rep outsw"
: "+S"(addr), "+c"(cnt)
: "d"(port)
: "memory", "cc"
);
/// Perform outsw instruction (send a string)
void amd64_io_outsw (uint16_t port, const void* addr, int cnt) {
__asm__ volatile ("cld; rep outsw" : "+S"(addr), "+c"(cnt) : "d"(port) : "memory", "cc");
}
uint8_t amd64_io_inb(uint16_t port) {
/// Perform inb instruction (receive 8-bit int)
uint8_t amd64_io_inb (uint16_t port) {
uint8_t r;
__asm__ volatile("inb %1, %0" : "=a"(r) : "dN"(port));
__asm__ volatile ("inb %1, %0" : "=a"(r) : "dN"(port));
return r;
}
uint16_t amd64_io_inw(uint16_t port) {
/// Perform inw instruction (receive 16-bit int)
uint16_t amd64_io_inw (uint16_t port) {
uint16_t r;
__asm__ volatile("inw %%dx, %%ax" : "=a"(r) : "d"(port));
__asm__ volatile ("inw %%dx, %%ax" : "=a"(r) : "d"(port));
return r;
}
uint32_t amd64_io_inl(uint16_t port) {
/// Perform inl instruction (receive 32-bit int)
uint32_t amd64_io_inl (uint16_t port) {
uint32_t r;
__asm__ volatile("inl %%dx, %%eax" : "=a"(r) : "d"(port));
__asm__ volatile ("inl %%dx, %%eax" : "=a"(r) : "d"(port));
return r;
}
void amd64_io_insw(uint16_t port, void *addr, int cnt) {
__asm__ volatile(
"cld; rep insw"
: "+D"(addr), "+c"(cnt)
: "d"(port)
: "memory", "cc"
);
/// Perform insw instruction (receive a string)
void amd64_io_insw (uint16_t port, void* addr, int cnt) {
__asm__ volatile ("cld; rep insw" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory", "cc");
}
void amd64_io_wait(void) {
amd64_io_outb(0x80, 0);
}
/// output a byte on port 0x80, which does a small IO delay
void amd64_io_wait (void) { amd64_io_outb (0x80, 0); }

View File

@@ -3,14 +3,14 @@
#include <libk/std.h>
void amd64_io_outb(uint16_t port, uint8_t v);
void amd64_io_outw(uint16_t port, uint16_t v);
void amd64_io_outl(uint16_t port, uint32_t v);
void amd64_io_outsw(uint16_t port, const void *addr, int cnt);
uint8_t amd64_io_inb(uint16_t port);
uint16_t amd64_io_inw(uint16_t port);
uint32_t amd64_io_inl(uint16_t port);
void amd64_io_insw(uint16_t port, void *addr, int cnt);
void amd64_io_wait(void);
void amd64_io_outb (uint16_t port, uint8_t v);
void amd64_io_outw (uint16_t port, uint16_t v);
void amd64_io_outl (uint16_t port, uint32_t v);
void amd64_io_outsw (uint16_t port, const void* addr, int cnt);
uint8_t amd64_io_inb (uint16_t port);
uint16_t amd64_io_inw (uint16_t port);
uint32_t amd64_io_inl (uint16_t port);
void amd64_io_insw (uint16_t port, void* addr, int cnt);
void amd64_io_wait (void);
#endif // _KERNEL_AMD64_IO_H

321
kernel/amd64/mm.c Normal file
View File

@@ -0,0 +1,321 @@
#include <amd64/apic.h>
#include <amd64/intr_defs.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/pmm.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
#define AMD64_PG_PRESENT (1 << 0)
#define AMD64_PG_RW (1 << 1)
#define AMD64_PG_USER (1 << 2)
#define AMD64_PG_HUGE (1 << 7)
/* Auxilary struct for page directory walking */
struct pg_index {
uint16_t pml4, pml3, pml2, pml1;
} PACKED;
/* Kernel page directory */
static struct pd kernel_pd;
static spin_lock_t kernel_pd_lock;
void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
/* Get current value of CR3 register */
static uintptr_t amd64_current_cr3 (void) {
uintptr_t cr3;
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory");
return cr3;
}
/* Load kernel CR3 as current CR3 */
void amd64_load_kernel_cr3 (void) {
uintptr_t cr3 = amd64_current_cr3 ();
if (cr3 != kernel_pd.cr3_paddr) {
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
}
}
struct pd* mm_get_kernel_pd (void) { return &kernel_pd; }
/* Extract PML info from virtual address */
static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
struct pg_index ret;
ret.pml4 = ((vaddr >> 39) & 0x1FF);
ret.pml3 = ((vaddr >> 30) & 0x1FF);
ret.pml2 = ((vaddr >> 21) & 0x1FF);
ret.pml1 = ((vaddr >> 12) & 0x1FF);
return ret;
}
/* Walk paging tables and allocate necessary structures along the way */
static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) {
uint64_t entry = table[entry_idx];
physaddr_t paddr;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
if (entry & AMD64_PG_PRESENT) {
if (entry & AMD64_PG_HUGE)
return NULL;
paddr = entry & ~0xFFFULL;
} else {
if (!alloc)
return NULL;
paddr = pmm_alloc (1);
if (paddr == PMM_ALLOC_ERR)
return NULL;
memset ((void*)((uintptr_t)hhdm->offset + (uintptr_t)paddr), 0, PAGE_SIZE);
table[entry_idx] = paddr | AMD64_PG_PRESENT | AMD64_PG_RW | AMD64_PG_USER;
}
return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr);
}
static bool amd64_mm_is_table_empty (uint64_t* table) {
for (size_t i = 0; i < 512; i++) {
if (table[i] & AMD64_PG_PRESENT)
return false;
}
return true;
}
/* Convert generic memory management subsystem flags into AMD64-specific flags */
static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
uint64_t flags = 0;
flags |= ((generic & MM_PG_PRESENT) ? AMD64_PG_PRESENT : 0);
flags |= ((generic & MM_PG_RW) ? AMD64_PG_RW : 0);
flags |= ((generic & MM_PG_USER) ? AMD64_PG_USER : 0);
return flags;
}
/* Reload the current CR3 value ON A LOCAL CPU */
static void amd64_reload_cr3 (void) {
uint64_t cr3;
__asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory");
}
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
if (pml3 == NULL)
return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
if (pml2 == NULL)
return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
if (pml1 == NULL)
return;
uint64_t* pte = &pml1[pg_index.pml1];
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
}
/* Map a page into kernel page directory */
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
mm_map_page (&kernel_pd, paddr, vaddr, flags);
amd64_reload_cr3 ();
}
/* Unmap a virtual address. TLB needs to be flushed afterwards */
void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL)
return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
return;
uint64_t* pte = &pml1[pg_index.pml1];
if ((*pte) & AMD64_PG_PRESENT)
*pte = 0;
if (amd64_mm_is_table_empty (pml1)) {
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
pmm_free (pml1_phys, 1);
pml2[pg_index.pml2] = 0;
if (amd64_mm_is_table_empty (pml2)) {
uintptr_t pml2_phys = pml3[pg_index.pml3] & ~0xFFFULL;
pmm_free (pml2_phys, 1);
pml3[pg_index.pml3] = 0;
if (amd64_mm_is_table_empty (pml3)) {
uintptr_t pml3_phys = pml4[pg_index.pml4] & ~0xFFFULL;
pmm_free (pml3_phys, 1);
pml4[pg_index.pml4] = 0;
}
}
}
}
/* Unmap a page from kernel page directory */
void mm_unmap_kernel_page (uintptr_t vaddr) {
mm_unmap_page (&kernel_pd, vaddr);
amd64_reload_cr3 ();
}
/* Allocate a userspace-ready page directory */
uintptr_t mm_alloc_user_pd_phys (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
physaddr_t cr3 = pmm_alloc (1);
if (cr3 == PMM_ALLOC_ERR)
return 0;
uint8_t* vu_cr3 = (uint8_t*)((uintptr_t)hhdm->offset + cr3);
memset ((void*)vu_cr3, 0, PAGE_SIZE / 2);
uint8_t* vk_cr3 = (uint8_t*)((uintptr_t)hhdm->offset + (uintptr_t)kernel_pd.cr3_paddr);
memcpy (&vu_cr3[PAGE_SIZE / 2], &vk_cr3[PAGE_SIZE / 2], PAGE_SIZE / 2);
return cr3;
}
bool mm_validate (struct pd* pd, uintptr_t vaddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool ret = false;
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL)
goto done;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
goto done;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
goto done;
uint64_t pte = pml1[pg_index.pml1];
ret = (pte & AMD64_PG_PRESENT) != 0;
done:
return ret;
}
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
bool ok = true;
for (size_t i = 0; i < size; i++) {
ok = mm_validate (pd, vaddr + i);
if (!ok)
goto done;
}
done:
return ok;
}
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0;
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
for (size_t i4 = 0; i4 < 512; i4++) {
if (!(pml4[i4] & AMD64_PG_PRESENT))
continue;
uint64_t* pml3 = (uint64_t*)((uintptr_t)hhdm->offset + (pml4[i4] & ~0xFFFULL));
for (size_t i3 = 0; i3 < 512; i3++) {
if (!(pml3[i3] & AMD64_PG_PRESENT))
continue;
uint64_t* pml2 = (uint64_t*)((uintptr_t)hhdm->offset + (pml3[i3] & ~0xFFFULL));
for (size_t i2 = 0; i2 < 512; i2++) {
if (!(pml2[i2] & AMD64_PG_PRESENT))
continue;
uint64_t* pml1 = (uint64_t*)((uintptr_t)hhdm->offset + (pml2[i2] & ~0xFFFULL));
for (size_t i1 = 0; i1 < 512; i1++) {
if ((pml1[i1] & AMD64_PG_PRESENT) && ((pml1[i1] & ~0xFFFULL) == (paddr & ~0xFFFULL))) {
struct pg_index idx = {i4, i3, i2, i1};
ret = (((uint64_t)idx.pml4 << 39) | ((uint64_t)idx.pml3 << 30) |
((uint64_t)idx.pml2 << 21) | ((uint64_t)idx.pml1 << 12) | (paddr & 0xFFFULL));
goto done;
}
}
}
}
}
done:
return ret;
}
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0;
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL)
goto done;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
goto done;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
goto done;
uint64_t pte = pml1[pg_index.pml1];
if (!(pte & AMD64_PG_PRESENT))
goto done;
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
done:
return ret;
}
/* Initialize essentials for the AMD64 memory management subsystem */
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }

View File

@@ -1,6 +1,15 @@
#ifndef _KERNEL_AMD64_MM_H
#define _KERNEL_AMD64_MM_H
#define PAGE_SIZE 4096
#include <libk/std.h>
#include <sync/spin_lock.h>
#define PAGE_SIZE 4096
struct pd {
uintptr_t cr3_paddr;
};
void amd64_load_kernel_cr3 (void);
#endif // _KERNEL_AMD64_MM_H

1093
kernel/amd64/msr-index.h Normal file

File diff suppressed because it is too large Load Diff

16
kernel/amd64/msr.c Normal file
View File

@@ -0,0 +1,16 @@
#include <amd64/msr.h>
#include <libk/std.h>
/// Read a model-specific register
uint64_t amd64_rdmsr (uint32_t msr) {
uint32_t low, high;
__asm__ volatile ("rdmsr" : "=a"(low), "=d"(high) : "c"(msr));
return ((uint64_t)high << 32 | (uint64_t)low);
}
/// Write a model-specific register
void amd64_wrmsr (uint32_t msr, uint64_t value) {
uint32_t low = (uint32_t)(value & 0xFFFFFFFF);
uint32_t high = (uint32_t)(value >> 32);
__asm__ volatile ("wrmsr" ::"c"(msr), "a"(low), "d"(high));
}

9
kernel/amd64/msr.h Normal file
View File

@@ -0,0 +1,9 @@
#ifndef _KERNEL_AMD64_MSR_H
#define _KERNEL_AMD64_MSR_H
#include <libk/std.h>
uint64_t amd64_rdmsr (uint32_t msr);
void amd64_wrmsr (uint32_t msr, uint64_t value);
#endif // _KERNEL_AMD64_MSR_H

138
kernel/amd64/proc.c Normal file
View File

@@ -0,0 +1,138 @@
#include <amd64/gdt.h>
#include <amd64/proc.h>
#include <aux/elf.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/mutex.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/proc.h>
static atomic_int pids = 0;
struct proc* proc_from_elf (uint8_t* elf_contents) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL)
return NULL;
memset (proc, 0, sizeof (*proc));
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1);
proc->procgroup = procgroup_create ();
if (proc->procgroup == NULL) {
free (proc);
return NULL;
}
procgroup_attach (proc->procgroup, proc);
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
procgroup_map (proc->procgroup, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW, NULL);
proc->flags |= PROC_USTK_PREALLOC;
struct elf_aux aux = proc_load_segments (proc, elf_contents);
proc->pdata.regs.ss = GDT_UDATA | 0x03;
proc->pdata.regs.rsp = (uint64_t)PROC_USTACK_TOP;
proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = aux.entry;
return proc;
}
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
uintptr_t argument_ptr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprt;
struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL)
return NULL;
memset (proc, 0, sizeof (*proc));
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1);
spin_lock (&proto->lock, &ctxprt);
proc->procgroup = proto->procgroup;
procgroup_attach (proc->procgroup, proc);
spin_unlock (&proto->lock, &ctxprt);
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
proc->pdata.regs.ss = GDT_UDATA | 0x03;
proc->pdata.regs.rsp = (uint64_t)vstack_top;
proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = (uint64_t)entry;
proc->uvaddr_argument = argument_ptr;
proc_init_tls (proc);
return proc;
}
void proc_cleanup (struct proc* proc) {
proc_sqs_cleanup (proc);
proc_mutexes_cleanup (proc);
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
procgroup_detach (proc->procgroup, proc);
/* clean the process */
free (proc);
}
void proc_init_tls (struct proc* proc) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
if (proc->procgroup->tls.tls_tmpl == NULL)
return;
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
size_t pages = proc->procgroup->tls.tls_tmpl_pages;
uintptr_t tls_paddr;
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
uintptr_t tls_vaddr = procgroup_map (proc->procgroup, 0, pages, flags, &tls_paddr);
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
memset ((void*)k_tls_addr, 0, pages * PAGE_SIZE);
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
uintptr_t ktcb = k_tls_addr + tls_size;
uintptr_t utcb = tls_vaddr + tls_size;
*(uintptr_t*)ktcb = utcb;
proc->pdata.fs_base = utcb;
proc->pdata.tls_vaddr = tls_vaddr;
}

22
kernel/amd64/proc.h Normal file
View File

@@ -0,0 +1,22 @@
#ifndef _KERNEL_AMD64_PROC_H
#define _KERNEL_AMD64_PROC_H
#include <amd64/intr.h>
#include <libk/std.h>
/* Top of userspace process' stack */
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
/* Size of userspace process' stack */
#define USTACK_SIZE (256 * PAGE_SIZE)
/* proc_map () base address */
#define PROC_MAP_BASE 0x0000700000000000
/* Platform-dependent process data */
struct proc_platformdata {
struct saved_regs regs;
uintptr_t kernel_stack;
uint64_t fs_base;
uintptr_t tls_vaddr;
};
#endif // _KERNEL_AMD64_PROC_H

13
kernel/amd64/procgroup.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _KERNEL_AMD64_PROCGRPUP_H
#define _KERNEL_AMD64_PROCGRPUP_H
#include <libk/std.h>
struct procgroup_tls {
uint8_t* tls_tmpl;
size_t tls_tmpl_size;
size_t tls_tmpl_total_size;
size_t tls_tmpl_pages;
};
#endif // _KERNEL_AMD64_PROCGRPUP_H

55
kernel/amd64/regsasm.h Normal file
View File

@@ -0,0 +1,55 @@
#ifndef _KERNEL_AMD64_REGSASM_H
#define _KERNEL_AMD64_REGSASM_H
#define push_regs \
pushq % rax; \
pushq % rcx; \
pushq % rdx; \
pushq % rsi; \
pushq % rdi; \
pushq % rbp; \
pushq % rbx; \
pushq % r8; \
pushq % r9; \
pushq % r10; \
pushq % r11; \
pushq % r12; \
pushq % r13; \
pushq % r14; \
pushq % r15;
#define pop_regs \
popq % r15; \
popq % r14; \
popq % r13; \
popq % r12; \
popq % r11; \
popq % r10; \
popq % r9; \
popq % r8; \
popq % rbx; \
popq % rbp; \
popq % rdi; \
popq % rsi; \
popq % rdx; \
popq % rcx; \
popq % rax;
#define pop_regs_skip_rax \
popq % r15; \
popq % r14; \
popq % r13; \
popq % r12; \
popq % r11; \
popq % r10; \
popq % r9; \
popq % r8; \
popq % rbx; \
popq % rbp; \
popq % rdi; \
popq % rsi; \
popq % rdx; \
popq % rcx; \
addq $8, % rsp
#endif // _KERNEL_AMD64_REGSASM_H

9
kernel/amd64/sched.S Normal file
View File

@@ -0,0 +1,9 @@
#include <amd64/regsasm.h>
.global amd64_do_sched
amd64_do_sched:
movq %rsi, %cr3
movq %rdi, %rsp
pop_regs
addq $16, %rsp
iretq

7
kernel/amd64/sched.h Normal file
View File

@@ -0,0 +1,7 @@
#ifndef _KERNEL_AMD64_SCHED_H
#define _KERNEL_AMD64_SCHED_H
/// Perform process context switch
void amd64_do_sched (void* regs, void* cr3);
#endif // _KERNEL_AMD64_SCHED_H

23
kernel/amd64/sched1.c Normal file
View File

@@ -0,0 +1,23 @@
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <amd64/sched.h>
#include <libk/std.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/mm.h>
#include <sys/smp.h>
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu) {
spin_lock_ctx_t ctxpr;
spin_lock (&proc->lock, &ctxpr);
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (cpu_lock, ctxcpu);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr);
}

114
kernel/amd64/smp.c Normal file
View File

@@ -0,0 +1,114 @@
#include <amd64/apic.h>
#include <amd64/init.h>
#include <amd64/intr_defs.h>
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/syscall.h>
/// Cpu ID counter
static atomic_uint cpu_counter = 0;
/// The CPUs
static struct cpu cpus[CPUS_MAX];
static atomic_int cpu_init_count;
/// Allocate a CPU structure
struct cpu* cpu_make (uint64_t lapic_id) {
int id = atomic_fetch_add (&cpu_counter, 1);
struct cpu* cpu = &cpus[id];
memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT;
cpu->id = id;
cpu->lapic_id = lapic_id;
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
return cpu;
}
struct cpu* cpu_get (void) {
struct cpu* ptr = (struct cpu*)amd64_rdmsr (MSR_GS_BASE);
return ptr;
}
void cpu_request_sched (struct cpu* cpu) {
if (cpu == thiscpu) {
proc_sched ();
return;
}
amd64_lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED);
}
struct cpu* cpu_find_lightest (void) {
struct cpu* cpu = &cpus[0];
int load = atomic_load (&cpu->proc_run_q_count);
for (unsigned int i = 1; i < cpu_counter; i++) {
struct cpu* new_cpu = &cpus[i];
int new_load = atomic_load (&new_cpu->proc_run_q_count);
if (new_load < load) {
load = new_load;
cpu = new_cpu;
}
}
return cpu;
}
/// Bootstrap code for non-BSP CPUs
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_load_kernel_cr3 ();
struct cpu* cpu = cpu_make (mp_info->lapic_id);
amd64_init (cpu, true); /* gdt + idt */
syscall_init ();
amd64_lapic_init (1000);
DEBUG ("CPU %u is online!\n", thiscpu->id);
atomic_fetch_sub (&cpu_init_count, 1);
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
proc_register (spin_proc, thiscpu);
spin_lock_ctx_t ctxcpu;
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
}
/// Initialize SMP subsystem for AMD64. Start AP CPUs
void smp_init (void) {
amd64_lapic_init (1000);
struct limine_mp_response* mp = limine_mp_request.response;
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
for (size_t i = 0; i < mp->cpu_count; i++) {
if (mp->cpus[i]->lapic_id != thiscpu->lapic_id) {
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
}
}
while (atomic_load (&cpu_init_count) > 0)
;
DEBUG ("All CPUs are online\n");
}

44
kernel/amd64/smp.h Normal file
View File

@@ -0,0 +1,44 @@
#ifndef _KERNEL_AMD64_SMP_H
#define _KERNEL_AMD64_SMP_H
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/tss.h>
#include <aux/compiler.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#define CPUS_MAX 32
struct cpu {
/* for syscall instruction */
uintptr_t syscall_user_stack;
uintptr_t syscall_kernel_stack;
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
volatile struct gdt_extended gdt ALIGNED (16);
volatile struct tss tss;
uintptr_t lapic_mmio_base;
uint64_t lapic_ticks;
uint64_t lapic_id;
uint32_t id;
spin_lock_t lock;
struct list_node_link* proc_run_q;
struct proc* proc_current;
atomic_int proc_run_q_count;
};
struct cpu* cpu_make (uint64_t lapic_id);
struct cpu* cpu_get (void);
void cpu_request_sched (struct cpu* cpu);
struct cpu* cpu_find_lightest (void);
#define thiscpu (cpu_get ())
#endif // _KERNEL_AMD64_SMP_H

4
kernel/amd64/spin.S Normal file
View File

@@ -0,0 +1,4 @@
.global amd64_spin
amd64_spin:
hlt
jmp amd64_spin

View File

@@ -1,5 +1,4 @@
#include <sys/spin_lock.h>
void spin_lock_relax(void) {
__asm__ volatile("pause");
}
/// Relax the spinlock using AMD64 pause instruction
void spin_lock_relax (void) { __asm__ volatile ("pause"); }

View File

@@ -1,13 +1,40 @@
c += amd64/bootmain.c \
amd64/init.c \
amd64/tss.c \
amd64/io.c \
amd64/debug.c \
amd64/spin_lock.c
amd64/spin_lock.c \
amd64/intr.c \
amd64/apic.c \
amd64/msr.c \
amd64/hpet.c \
amd64/mm.c \
amd64/time.c \
amd64/smp.c \
amd64/sched1.c \
amd64/proc.c \
amd64/syscall.c
S += amd64/intr_stub.S \
amd64/spin.S \
amd64/sched.S \
amd64/syscallentry.S
o += amd64/bootmain.o \
amd64/init.o \
amd64/tss.o \
amd64/io.o \
amd64/debug.o \
amd64/spin_lock.o
amd64/spin_lock.o \
amd64/intr.o \
amd64/intr_stub.o \
amd64/spin.o \
amd64/apic.o \
amd64/msr.o \
amd64/hpet.o \
amd64/mm.o \
amd64/time.o \
amd64/smp.o \
amd64/sched.o \
amd64/sched1.o \
amd64/proc.o \
amd64/syscall.o \
amd64/syscallentry.o

46
kernel/amd64/syscall.c Normal file
View File

@@ -0,0 +1,46 @@
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/string.h>
#include <m/status.h>
#include <m/syscall_defs.h>
#include <proc/proc.h>
#include <sys/debug.h>
#include <sys/smp.h>
#include <syscall/syscall.h>
extern void amd64_syscall_entry (void);
uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
spin_lock_ctx_t ctxcpu, ctxpr;
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr;
spin_lock (&thiscpu->lock, &ctxcpu);
struct proc* caller = thiscpu->proc_current;
spin_lock (&caller->lock, &ctxpr);
memcpy (&caller->pdata.regs, regs, sizeof (struct saved_regs));
spin_unlock (&caller->lock, &ctxpr);
spin_unlock (&thiscpu->lock, &ctxcpu);
int syscall_num = regs->rax;
syscall_handler_func_t func = syscall_find_handler (syscall_num);
if (func == NULL) {
return -ST_SYSCALL_NOT_FOUND;
}
return func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
}
void syscall_init (void) {
amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48));
amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry);
amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9));
amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE);
}

View File

@@ -0,0 +1,49 @@
#include <amd64/regsasm.h>
.extern amd64_syscall_dispatch
.global amd64_syscall_entry
amd64_syscall_entry:
cli
movq %rsp, %gs:0
movq %gs:8, %rsp
pushq $0x1b
pushq %gs:0
pushq %r11
pushq $0x23
pushq %rcx
pushq $0
pushq $0
push_regs
movw $0x10, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
cld
movq %rsp, %rdi
movq %cr3, %rax; pushq %rax
movq %rsp, %rbp
subq $8, %rsp
andq $-16, %rsp
callq amd64_syscall_dispatch
movq %rbp, %rsp
popq %rbx; movq %rbx, %cr3
pop_regs_skip_rax
addq $56, %rsp
movq %gs:0, %rsp
sysretq

6
kernel/amd64/time.c Normal file
View File

@@ -0,0 +1,6 @@
#include <amd64/hpet.h>
#include <libk/std.h>
#include <sys/time.h>
/// Sleep for given amount of microseconds
void sleep_micro (size_t us) { amd64_hpet_sleep_micro (us); }

View File

@@ -1,8 +0,0 @@
#include <libk/std.h>
#include <amd64/tss.h>
__attribute__((aligned(16))) static volatile struct tss tss;
volatile struct tss *amd64_get_tss(void) {
return &tss;
}

View File

@@ -1,8 +1,10 @@
#ifndef _KERNEL_AMD64_TSS_H
#define _KERNEL_AMD64_TSS_H
#include <aux/compiler.h>
#include <libk/std.h>
/// 64-bit TSS structure: https://wiki.osdev.org/Task_State_Segment
struct tss {
uint32_t resv0;
uint64_t rsp0;
@@ -13,8 +15,6 @@ struct tss {
uint64_t resv2;
uint16_t resv3;
uint16_t iopb_off;
} __attribute__((packed));
volatile struct tss *amd64_get_tss(void);
} PACKED;
#endif // _KERNEL_AMD64_TSS_H

2
kernel/amd64/vars.mk Normal file
View File

@@ -0,0 +1,2 @@
# make vars
PLATFORM_ACPI=1

9
kernel/aux/compiler.h Normal file
View File

@@ -0,0 +1,9 @@
#ifndef _KERNEL_AUX_COMPILER_H
#define _KERNEL_AUX_COMPILER_H
#define PACKED __attribute__ ((packed))
#define ALIGNED(N) __attribute__ ((aligned ((N))))
#define SECTION(name) __attribute__ ((section (name)))
#define UNUSED __attribute__ ((unused))
#endif // _KERNEL_AUX_COMPILER_H

4555
kernel/aux/elf.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +1,6 @@
include $(platform)/flags.mk
include generic/flags.mk
ifeq ($(PLATFORM_ACPI),1)
include uACPI/flags.mk
endif

Some files were not shown because too many files have changed in this diff Show More