First Hello world syscall
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s

This commit is contained in:
2026-01-03 02:04:09 +01:00
parent 1341dc00d9
commit e52268cd8e
26 changed files with 228 additions and 140 deletions

2
.gdbinit Normal file
View File

@@ -0,0 +1,2 @@
file kernel/build/kernel.elf
target remote :1234

View File

@@ -9,7 +9,7 @@ PHDRS {
}
SECTIONS {
. = 0x0000000050000000;
. = 0x0000500000000000;
.text : {
*(.text .text.*)

5
aux/qemu_amd64_debug.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -x
qemu-system-x86_64 -M q35 -m 4G -serial stdio -cdrom mop3.iso -smp 4 -s -S $@

View File

@@ -2,4 +2,7 @@
_start:
pushq $123
addq $8, %rsp
syscall
jmp _start

View File

@@ -5,6 +5,8 @@
#include <amd64/hpet.h>
#include <amd64/init.h>
#include <amd64/intr_defs.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
@@ -16,6 +18,7 @@
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <uacpi/uacpi.h>
@@ -32,9 +35,10 @@ ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
*/
void bootmain (void) {
struct cpu* bsp_cpu = cpu_make ();
cpu_assign (bsp_cpu->id);
amd64_thiscpu_set_init ();
amd64_init (bsp_cpu, false);
syscall_init ();
amd64_debug_init ();
pmm_init ();
mm_init ();
@@ -54,8 +58,6 @@ void bootmain (void) {
mm_init2 ();
__asm__ volatile ("sti");
proc_init ();
for (;;)

View File

@@ -13,6 +13,8 @@
/// Lock, which ensures that prints to the serial port are atomic
static spin_lock_t serial_lock = SPIN_LOCK_INIT;
static bool debug_init = false;
/// Block until TX buffer is empty
static bool amd64_debug_serial_tx_empty (void) {
return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20);
@@ -30,6 +32,9 @@ static void amd64_debug_serial_write (char x) {
* all prints are atomic.
*/
void debugprintf (const char* fmt, ...) {
if (!debug_init)
return;
char buffer[BUFFER_SIZE];
memset (buffer, 0, sizeof (buffer));
@@ -61,4 +66,6 @@ void amd64_debug_init (void) {
amd64_io_outb (PORT_COM1 + 3, 0x03);
amd64_io_outb (PORT_COM1 + 2, 0xC7);
amd64_io_outb (PORT_COM1 + 4, 0x0B);
debug_init = true;
}

View File

@@ -4,7 +4,8 @@ cflags += --target=x86_64-pc-none-elf \
-mno-avx \
-mno-mmx \
-mno-80387 \
-mno-red-zone
-mno-red-zone \
-fno-omit-frame-pointer
ldflags += --target=x86_64-pc-none-elf \
-Wl,-zmax-page-size=0x1000

View File

@@ -5,6 +5,12 @@
#include <libk/std.h>
#include <proc/proc.h>
#define GDT_KCODE 0x08
#define GDT_KDATA 0x10
#define GDT_UCODE 0x18
#define GDT_UDATA 0x20
#define GDT_TSS 0x28
/// Size of kernel stack
#define KSTACK_SIZE (32 * 1024)

View File

@@ -1,3 +1,4 @@
#include <amd64/gdt.h>
#include <amd64/init.h>
#include <amd64/intr.h>
#include <amd64/smp.h>
@@ -5,12 +6,6 @@
#include <libk/std.h>
#include <libk/string.h>
#define GDT_KCODE 0x08
#define GDT_KDATA 0x10
#define GDT_UCODE 0x18
#define GDT_UDATA 0x20
#define GDT_TSS 0x28
#define TSS 0x80
#define TSS_PRESENT 0x89
@@ -70,6 +65,8 @@ static void amd64_gdt_init (struct cpu* cpu) {
"movw %%ax, %%ds\n"
"movw %%ax, %%es\n"
"movw %%ax, %%ss\n"
"movw %%ax, %%fs\n"
"movw %%ax, %%gs\n"
:
: [kcode] "i"(GDT_KCODE), [kdata] "i"(GDT_KDATA)
: "rax", "memory");

View File

@@ -1,4 +1,5 @@
#include <amd64/apic.h>
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/intr_defs.h>
#include <amd64/io.h>
@@ -90,7 +91,7 @@ static void amd64_init_pic (void) {
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags,
uint8_t ist) {
ent->intrlow = (handler & 0xFFFF);
ent->kernel_cs = 0x08; // GDT_KCODE (init.c)
ent->kernel_cs = GDT_KCODE;
ent->ist = ist;
ent->attrs = flags;
ent->intrmid = ((handler >> 16) & 0xFFFF);
@@ -163,7 +164,7 @@ static void amd64_intr_exception (struct saved_regs* regs) {
regs->error, regs->rip, regs->cs, regs->rflags, regs->rsp, regs->ss, cr2, cr3,
regs->rbx);
if (regs->cs == (0x18 | 0x03)) {
if (regs->cs == (GDT_UCODE | 0x03)) {
proc_kill (thiscpu->proc_current);
} else {
spin ();
@@ -182,12 +183,12 @@ void amd64_intr_handler (void* stack_ptr) {
struct irq* irq = irq_find (regs->trap);
if (irq != NULL) {
if (!(irq->flags & IRQ_INTERRUPT_SAFE))
if ((irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("sti");
irq->func (irq->arg, stack_ptr);
if (!(irq->flags & IRQ_INTERRUPT_SAFE))
if ((irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("cli");
}
}

View File

@@ -1,4 +1,5 @@
#include <amd64/intr_defs.h>
#include <amd64/regsasm.h>
.extern amd64_intr_handler
@@ -9,67 +10,20 @@
pushq $0; \
pushq $z;
#define push_regs \
pushq %rax; \
pushq %rcx; \
pushq %rdx; \
pushq %rsi; \
pushq %rdi; \
pushq %rbp; \
pushq %rbx; \
pushq %r8; \
pushq %r9; \
pushq %r10; \
pushq %r11; \
pushq %r12; \
pushq %r13; \
pushq %r14; \
pushq %r15;
#define pop_regs \
popq %r15; \
popq %r14; \
popq %r13; \
popq %r12; \
popq %r11; \
popq %r10; \
popq %r9; \
popq %r8; \
popq %rbx; \
popq %rbp; \
popq %rdi; \
popq %rsi; \
popq %rdx; \
popq %rcx; \
popq %rax;
#define make_intr_stub(x, n) \
.global amd64_intr ## n; \
amd64_intr ## n:; \
x(n); \
cli; \
movq %rsp, %rax; \
movq 144(%rax), %rax; \
testb $3, %al; \
jz 1f; \
swapgs; \
1:; \
push_regs; \
cld; \
movq %rsp, %rdi; \
movq %rsp, %rax; \
movq %rsp, %rbp; \
subq $8, %rsp; \
andq $~0xF, %rsp; \
movq %rax, (%rsp); \
callq amd64_intr_handler; \
movq (%rsp), %rsp; \
movq %rbp, %rsp; \
pop_regs; \
movq %rsp, %rax; \
movq 144(%rax), %rax; \
testb $3, %al; \
jz 2f; \
swapgs; \
2:; \
addq $16, %rsp; \
iretq;

View File

@@ -1,12 +1,17 @@
#include <amd64/gdt.h>
#include <aux/elf.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
struct proc* proc_from_elf (uint8_t* elf_contents) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL)
return NULL;
@@ -20,22 +25,21 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
return NULL;
}
proc->pdata.syscall_stack = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
if (proc->pdata.syscall_stack == PMM_ALLOC_ERR) {
proc->pdata.kernel_stack = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
if (proc->pdata.kernel_stack == PMM_ALLOC_ERR) {
free (proc);
return NULL;
}
uintptr_t kernel_stack = proc->pdata.kernel_stack;
proc->pdata.kernel_stack += (uintptr_t)hhdm->offset + KSTACK_SIZE;
proc->pdata.user_stack = pmm_alloc (USTACK_SIZE / PAGE_SIZE);
if (proc->pdata.user_stack == PMM_ALLOC_ERR) {
free (proc);
pmm_free (proc->pdata.syscall_stack, USTACK_SIZE / PAGE_SIZE);
pmm_free (kernel_stack, USTACK_SIZE / PAGE_SIZE);
return NULL;
}
uintptr_t user_stack = proc->pdata.user_stack;
proc->pdata.syscall_stack += KSTACK_SIZE;
proc->pdata.user_stack += USTACK_SIZE;
proc_map (proc, user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
@@ -43,10 +47,10 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
struct elf_aux aux = proc_load_segments (proc, elf_contents);
proc->pdata.regs.ss = 0x20 | 0x03;
proc->pdata.regs.ss = GDT_UDATA | 0x03;
proc->pdata.regs.rsp = (uint64_t)PROC_USTACK_TOP;
proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = 0x18 | 0x03;
proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = aux.entry;
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);

View File

@@ -12,9 +12,9 @@
/// Platform-dependent process data
struct proc_platformdata {
struct saved_regs regs;
uintptr_t syscall_stack;
uintptr_t user_stack;
uint64_t fsbase;
uintptr_t kernel_stack;
uint64_t gs_base;
};
#endif // _KERNEL_AMD64_PROC_H

38
kernel/amd64/regsasm.h Normal file
View File

@@ -0,0 +1,38 @@
#ifndef _KERNEL_AMD64_REGSASM_H
#define _KERNEL_AMD64_REGSASM_H
#define push_regs \
pushq % rax; \
pushq % rcx; \
pushq % rdx; \
pushq % rsi; \
pushq % rdi; \
pushq % rbp; \
pushq % rbx; \
pushq % r8; \
pushq % r9; \
pushq % r10; \
pushq % r11; \
pushq % r12; \
pushq % r13; \
pushq % r14; \
pushq % r15;
#define pop_regs \
popq % r15; \
popq % r14; \
popq % r13; \
popq % r12; \
popq % r11; \
popq % r10; \
popq % r9; \
popq % r8; \
popq % rbx; \
popq % rbp; \
popq % rdi; \
popq % rsi; \
popq % rdx; \
popq % rcx; \
popq % rax;
#endif // _KERNEL_AMD64_REGSASM_H

View File

@@ -1,30 +1,10 @@
#define pop_regs \
popq %r15; \
popq %r14; \
popq %r13; \
popq %r12; \
popq %r11; \
popq %r10; \
popq %r9; \
popq %r8; \
popq %rbx; \
popq %rbp; \
popq %rdi; \
popq %rsi; \
popq %rdx; \
popq %rcx; \
popq %rax;
#include <amd64/regsasm.h>
.global amd64_do_sched
amd64_do_sched:
cli
movq %rsi, %cr3
movq %rdi, %rsp
pop_regs
movq 144(%rsp), %rax
testb $3, %al
jz 1f
swapgs
1:
add $16, %rsp
iretq

View File

@@ -1,4 +1,14 @@
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <amd64/sched.h>
#include <libk/std.h>
#include <proc/proc.h>
#include <sys/mm.h>
#include <sys/smp.h>
void do_sched (void* regs, struct pd* pd) { amd64_do_sched (regs, (void*)pd->cr3_paddr); }
void do_sched (struct proc* proc) {
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
amd64_wrmsr (MSR_GS_BASE, proc->pdata.gs_base);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd.cr3_paddr);
}

View File

@@ -10,6 +10,7 @@
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/smp.h>
#include <sys/syscall.h>
/// Cpu ID counter
static uint32_t cpu_counter = 0;
@@ -18,6 +19,10 @@ static spin_lock_t cpu_counter_lock = SPIN_LOCK_INIT;
/// The CPUs
static struct cpu cpus[CPUS_MAX];
static bool thiscpu_init = false;
void amd64_thiscpu_set_init (void) { thiscpu_init = true; }
/// Allocate a CPU structure
struct cpu* cpu_make (void) {
spin_lock (&cpu_counter_lock);
@@ -30,30 +35,26 @@ struct cpu* cpu_make (void) {
cpu->lock = SPIN_LOCK_INIT;
cpu->id = id;
return cpu_get (id);
amd64_wrmsr (MSR_SHADOW_GS_BASE, (uint64_t)cpu);
return cpu;
}
struct cpu* cpu_get (uint32_t id) {
if (id >= CPUS_MAX)
struct cpu* cpu_get (void) {
if (!thiscpu_init)
return NULL;
return &cpus[id];
return (struct cpu*)amd64_rdmsr (MSR_SHADOW_GS_BASE);
}
/// Get ID of current running CPU
uint32_t cpu_id (void) { return (uint32_t)amd64_rdmsr (MSR_SHADOW_GS_BASE); }
/// Assign an ID to the current running CPU
void cpu_assign (uint32_t id) { amd64_wrmsr (MSR_SHADOW_GS_BASE, (uint64_t)id); }
/// Bootstrap code for non-BSP CPUs
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_load_kernel_cr3 ();
struct cpu* cpu = cpu_make ();
cpu_assign (cpu->id);
amd64_init (cpu, true); /* gdt + idt */
syscall_init ();
thiscpu->lapic_ticks = amd64_lapic_init (2500);
amd64_lapic_tick (thiscpu->lapic_ticks);

View File

@@ -10,6 +10,15 @@
#define CPUS_MAX 32
struct cpu {
/* for syscall instruction */
uintptr_t syscall_user_stack;
uintptr_t syscall_kernel_stack;
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
volatile struct gdt_extended gdt ALIGNED (16);
volatile struct tss tss;
uint64_t lapic_ticks;
uint32_t id;
@@ -18,25 +27,16 @@ struct cpu {
atomic_int nesting;
} irq_ctx;
uint8_t user_stack[USTACK_SIZE] ALIGNED (16);
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
volatile struct gdt_extended gdt ALIGNED (16);
volatile struct tss tss;
spin_lock_t lock;
struct proc* proc_run_q;
struct proc* proc_current;
};
} PACKED;
struct cpu* cpu_make (void);
struct cpu* cpu_get (uint32_t id);
void cpu_assign (uint32_t id);
uint32_t cpu_id (void);
struct cpu* cpu_get (void);
void amd64_thiscpu_set_init (void);
#define thiscpu (cpu_get (cpu_id ()))
#define thiscpu (cpu_get ())
#endif // _KERNEL_AMD64_SMP_H

View File

@@ -11,11 +11,13 @@ c += amd64/bootmain.c \
amd64/time.c \
amd64/smp.c \
amd64/sched1.c \
amd64/proc.c
amd64/proc.c \
amd64/syscall.c
S += amd64/intr_stub.S \
amd64/spin.S \
amd64/sched.S
amd64/sched.S \
amd64/syscallentry.S
o += amd64/bootmain.o \
amd64/init.o \
@@ -33,4 +35,6 @@ o += amd64/bootmain.o \
amd64/smp.o \
amd64/sched.o \
amd64/sched1.o \
amd64/proc.o
amd64/proc.o \
amd64/syscall.o \
amd64/syscallentry.o

19
kernel/amd64/syscall.c Normal file
View File

@@ -0,0 +1,19 @@
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <sys/debug.h>
extern void amd64_syscall_entry (void);
void amd64_syscall_dispatch (void* stack_ptr) {
struct saved_regs* regs = stack_ptr;
DEBUG ("hello syscall\n");
}
void syscall_init (void) {
amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_UCODE - 16) << 48));
amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry);
amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9));
amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE);
}

View File

@@ -0,0 +1,40 @@
#include <amd64/regsasm.h>
.extern amd64_syscall_dispatch
dupa:
jmp dupa
.global amd64_syscall_entry
amd64_syscall_entry:
cli
swapgs
movq %rsp, %gs:0
movq %gs:8, %rsp
pushq $0x23
pushq %gs:0
pushq %r11
pushq $0x1b
pushq %rcx
pushq $0
pushq $0
push_regs
cld
movq %rsp, %rdi
movq %rsp, %rbp
subq $8, %rsp
andq $~0xF, %rsp
callq amd64_syscall_dispatch
movq %rbp, %rsp
pop_regs
cli
addq $16, %rsp
popq %rcx
addq $8, %rsp
popq %r11
addq $16, %rsp
movq %gs:0, %rsp
swapgs
sysretq

View File

@@ -3,7 +3,8 @@
#include <libk/std.h>
#define IRQ_INTERRUPT_SAFE (1 << 0)
#define IRQ_INTERRUPT_SAFE (1 << 0)
#define IRQ_INTERRUPT_UNSAFE (1 << 1)
typedef void (*irq_func_t) (void* arg, void* regs);

View File

@@ -108,25 +108,26 @@ static struct proc* proc_spawn_rd (char* name) {
return proc_from_elf (rd_file->content);
}
static void proc_register (struct proc* proc) {
static void proc_register_for_cpu (struct proc* proc, struct cpu* cpu) {
/* make available globally. */
struct procw* procw = malloc (sizeof (*procw));
if (procw == NULL)
return;
procw->proc = proc;
proc->procw = procw;
proc->cpu = cpu;
spin_lock (&procs_lock);
spin_lock (&thiscpu->lock);
spin_lock (&cpu->lock);
linklist_append (struct procw*, procs, procw);
linklist_append (struct proc*, thiscpu->proc_run_q, proc);
linklist_append (struct proc*, cpu->proc_run_q, proc);
if (thiscpu->proc_current == NULL)
thiscpu->proc_current = proc;
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
spin_unlock (&thiscpu->lock);
spin_unlock (&cpu->lock);
spin_unlock (&procs_lock);
}
@@ -170,9 +171,8 @@ void proc_sched (void) {
spin_unlock (&thiscpu->lock);
if (next != NULL && atomic_load (&next->state) == PROC_READY) {
do_sched (&next->pdata.regs, &next->pd);
}
if (next != NULL && atomic_load (&next->state) == PROC_READY)
do_sched (next);
idle:
spin ();
@@ -190,11 +190,11 @@ static void proc_irq_sched (void* arg, void* regs) {
void proc_init (void) {
struct proc* init = proc_spawn_rd ("init.exe");
proc_register (init);
proc_register_for_cpu (init, thiscpu);
#if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, 0);
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_SAFE);
#endif
do_sched (&init->pdata.regs, &init->pd);
do_sched (init);
}

View File

@@ -1,11 +1,17 @@
#ifndef _KERNEL_SYS_DEBUG_H
#define _KERNEL_SYS_DEBUG_H
#include <sys/smp.h>
void debugprintf (const char* fmt, ...);
#define DEBUG(fmt, ...) \
do { \
debugprintf ("%s: " fmt, __func__, ##__VA_ARGS__); \
if (thiscpu != NULL) { \
debugprintf ("(CPU %u) %s:%d: " fmt, thiscpu->id, __func__, __LINE__, ##__VA_ARGS__); \
} else { \
debugprintf ("%s:%d: " fmt, __func__, __LINE__, ##__VA_ARGS__); \
} \
} while (0)
#endif // _KERNEL_SYS_DEBUG_H

View File

@@ -1,8 +1,9 @@
#ifndef _KERNEL_SYS_SCHED_H
#define _KERNEL_SYS_SCHED_H
#include <sys/mm.h>
#include <libk/std.h>
#include <proc/proc.h>
void do_sched (void* regs, struct pd* pd);
void do_sched (struct proc* proc);
#endif // _KERNEL_SYS_SCHED_H

6
kernel/sys/syscall.h Normal file
View File

@@ -0,0 +1,6 @@
#ifndef _KERNEL_SYS_SYSCALL_H
#define _KERNEL_SYS_SYSCALL_H
void syscall_init (void);
#endif // _KERNEL_SYS_SYSCALL_H