Rewrite init app in C, introduce MSL (MOP3 System Library)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s

This commit is contained in:
2026-01-04 01:11:31 +01:00
parent 2c954a9ca9
commit e077d322f4
57 changed files with 214 additions and 120 deletions

View File

@@ -235,7 +235,7 @@ void amd64_lapic_tick (uint32_t tick) { amd64_lapic_write (LAPIC_TIMICT, tick);
* @return amount of ticsk in a given period
*/
static uint32_t amd64_lapic_calibrate (uint32_t us) {
amd64_lapic_write (LAPIC_DCR, 0x03);
amd64_lapic_write (LAPIC_DCR, 0x0B);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
@@ -256,7 +256,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
* Initial tick count
*/
static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, 0x03);
amd64_lapic_write (LAPIC_DCR, 0x0B);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));

View File

@@ -35,7 +35,6 @@ ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
*/
void bootmain (void) {
struct cpu* bsp_cpu = cpu_make ();
amd64_thiscpu_set_init ();
amd64_init (bsp_cpu, false);
syscall_init ();
@@ -52,10 +51,6 @@ void bootmain (void) {
smp_init ();
/* busy wait for cpus to come online */
for (volatile int i = 0; i < INT_MAX; i++)
;
mm_init2 ();
proc_init ();

View File

@@ -28,8 +28,8 @@
static bool hpet_32bits = 1;
/// Physical address for HPET MMIO
static uintptr_t hpet_paddr;
/// HPET nanoseconds for conversion
static uint64_t hpet_clock_nano;
/// HPET period in femtoseconds
static uint64_t hpet_period_fs;
/// Lock, which protects concurrent access. See \ref amd64/smp.c
static spin_lock_t hpet_lock = SPIN_LOCK_INIT;
@@ -54,32 +54,22 @@ static void amd64_hpet_write (uint32_t reg, uint64_t value) {
/// Read current value of \ref HPET_MCVR register.
static uint64_t amd64_hpet_timestamp (void) { return amd64_hpet_read (HPET_MCVR); }
/**
* @brief Get current HPET timestamp in nanoseconds
*
* @param lock
* if true, hold \ref hpet_lock
*/
uint64_t amd64_hpet_current_nano (bool lock) {
if (lock)
spin_lock (&hpet_lock);
uint64_t t = amd64_hpet_timestamp () * hpet_clock_nano;
if (lock)
spin_unlock (&hpet_lock);
return t;
}
/// Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being held.
void amd64_hpet_sleep_micro (uint64_t us) {
spin_lock (&hpet_lock);
uint64_t start = amd64_hpet_timestamp ();
uint64_t conv = us * 1000;
while (((amd64_hpet_timestamp () - start) * hpet_clock_nano) < conv)
uint64_t target_fs = us * 1000000000ULL;
for (;;) {
uint64_t current = amd64_hpet_timestamp ();
uint64_t dt = current - start;
if ((dt * hpet_period_fs) >= target_fs)
break;
__asm__ volatile ("pause" ::: "memory");
}
spin_unlock (&hpet_lock);
}
@@ -114,7 +104,5 @@ void amd64_hpet_init (void) {
gcidr = (((uint64_t)high << 32) | low);
}
uint64_t period_fs = (gcidr >> 32);
hpet_clock_nano = period_fs / 1000000;
hpet_period_fs = (gcidr >> 32);
}

View File

@@ -3,7 +3,6 @@
#include <libk/std.h>
uint64_t amd64_hpet_current_nano (bool lock);
void amd64_hpet_sleep_micro (uint64_t us);
void amd64_hpet_init (void);

View File

@@ -65,8 +65,6 @@ static void amd64_gdt_init (struct cpu* cpu) {
"movw %%ax, %%ds\n"
"movw %%ax, %%es\n"
"movw %%ax, %%ss\n"
"movw %%ax, %%fs\n"
"movw %%ax, %%gs\n"
:
: [kcode] "i"(GDT_KCODE), [kdata] "i"(GDT_KDATA)
: "rax", "memory");

View File

@@ -175,8 +175,6 @@ static void amd64_intr_exception (struct saved_regs* regs) {
void amd64_intr_handler (void* stack_ptr) {
struct saved_regs* regs = stack_ptr;
amd64_load_kernel_cr3 ();
if (regs->trap <= 31) {
amd64_intr_exception (regs);
} else {
@@ -219,10 +217,6 @@ static void amd64_irq_restore_flags (uint64_t rflags) {
/// Save current interrupt state
void irq_save (void) {
/* before smp init. */
if (thiscpu == NULL)
return;
int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 0)
thiscpu->irq_ctx.rflags = amd64_irq_save_flags ();
@@ -230,10 +224,6 @@ void irq_save (void) {
/// Restore interrupt state
void irq_restore (void) {
/* before smp init. */
if (thiscpu == NULL)
return;
int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 1)
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);

View File

@@ -22,9 +22,6 @@
;\
movq %rsp, %rdi; \
;\
movq %cr3, %rax; \
pushq %rax; \
;\
movq %rsp, %rbp; \
;\
subq $8, %rsp; \
@@ -34,9 +31,6 @@
;\
movq %rbp, %rsp; \
;\
popq %rax; \
movq %rax, %cr3; \
;\
pop_regs; \
addq $16, %rsp; \
;\

View File

@@ -9,6 +9,6 @@
void do_sched (struct proc* proc) {
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
amd64_wrmsr (MSR_GS_BASE, (uint64_t)proc->pdata.gs_base);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd.cr3_paddr);
}

View File

@@ -13,38 +13,31 @@
#include <sys/syscall.h>
/// Cpu ID counter
static uint32_t cpu_counter = 0;
/// Lock for \ref cpu_counter
static spin_lock_t cpu_counter_lock = SPIN_LOCK_INIT;
static atomic_uint cpu_counter = 0;
/// The CPUs
static struct cpu cpus[CPUS_MAX];
static bool thiscpu_init = false;
void amd64_thiscpu_set_init (void) { thiscpu_init = true; }
static atomic_int cpu_init_count;
/// Allocate a CPU structure
struct cpu* cpu_make (void) {
spin_lock (&cpu_counter_lock);
int id = cpu_counter++;
spin_unlock (&cpu_counter_lock);
int id = atomic_fetch_add (&cpu_counter, 1);
struct cpu* cpu = &cpus[id];
memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT;
cpu->id = id;
cpu->self = cpu;
amd64_wrmsr (MSR_SHADOW_GS_BASE, (uint64_t)cpu);
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
return cpu;
}
struct cpu* cpu_get (void) {
if (!thiscpu_init)
return NULL;
return (struct cpu*)amd64_rdmsr (MSR_SHADOW_GS_BASE);
struct cpu* ptr = (struct cpu*)amd64_rdmsr (MSR_GS_BASE);
return ptr;
}
/// Bootstrap code for non-BSP CPUs
@@ -56,27 +49,36 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_init (cpu, true); /* gdt + idt */
syscall_init ();
thiscpu->lapic_ticks = amd64_lapic_init (2500);
thiscpu->lapic_ticks = amd64_lapic_init (10000);
amd64_lapic_tick (thiscpu->lapic_ticks);
DEBUG ("CPU %u is online!\n", thiscpu->id);
__asm__ volatile ("sti");
atomic_fetch_sub (&cpu_init_count, 1);
for (;;)
;
}
/// Initialize SMP subsystem for AMD64. Start AP CPUs
void smp_init (void) {
thiscpu->lapic_ticks = amd64_lapic_init (2500);
thiscpu->lapic_ticks = amd64_lapic_init (10000);
struct limine_mp_response* mp = limine_mp_request.response;
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
for (size_t i = 0; i < mp->cpu_count; i++) {
if (mp->cpus[i]->lapic_id != thiscpu->id) {
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
}
}
while (atomic_load (&cpu_init_count) > 0)
;
DEBUG ("All CPUs are online\n");
}

View File

@@ -13,6 +13,7 @@ struct cpu {
/* for syscall instruction */
uintptr_t syscall_user_stack;
uintptr_t syscall_kernel_stack;
struct cpu* self;
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);

View File

@@ -6,7 +6,7 @@
#include <proc/proc.h>
#include <sys/debug.h>
#include <sys/smp.h>
#include <syscall/defs.h>
#include <m/syscall_defs.h>
#include <syscall/syscall.h>
extern void amd64_syscall_entry (void);
@@ -14,8 +14,6 @@ extern void amd64_syscall_entry (void);
int amd64_syscall_dispatch (void* stack_ptr) {
struct saved_regs* regs = stack_ptr;
amd64_load_kernel_cr3 ();
int syscall_num = regs->rax;
syscall_handler_func_t func = syscall_find_handler (syscall_num);

View File

@@ -5,7 +5,6 @@
.global amd64_syscall_entry
amd64_syscall_entry:
cli
swapgs
movq %rsp, %gs:0
movq %gs:8, %rsp
@@ -20,15 +19,10 @@ amd64_syscall_entry:
push_regs
swapgs
cld
movq %rsp, %rdi
movq %cr3, %rax
pushq %rax
movq %rsp, %rbp
subq $8, %rsp
@@ -38,20 +32,9 @@ amd64_syscall_entry:
movq %rbp, %rsp
popq %rax
movq %rax, %cr3
pop_regs
swapgs
addq $16, %rsp
popq %rcx
addq $8, %rsp
popq %r11
addq $16, %rsp
addq $56, %rsp
movq %gs:0, %rsp
swapgs
sysretq