Compare commits

..

3 Commits

Author SHA1 Message Date
d1d772cb42 Fix user apps randomly crashing (APIC, GDT layout, syscall entry)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 23s
2026-01-14 19:51:18 +01:00
0d8f9e565f Fix missing CPU_REQUEST_SCHED IDT entry 2026-01-11 12:07:17 +01:00
f80a26e5eb Load kernel CR3 2026-01-11 03:45:32 +01:00
23 changed files with 284 additions and 194 deletions

View File

@@ -4,25 +4,72 @@
#include <stdint.h> #include <stdint.h>
#include <string/string.h> #include <string/string.h>
char c = 'a';
void app_main (void) { void app_main (void) {
uintptr_t out_paddr; /* uintptr_t out_paddr; */
int mem_rid = m_proc_create_resource_mem (16, RV_PRIVATE, &out_paddr); /* int mem_rid = m_proc_create_resource_mem (16, RV_PRIVATE, &out_paddr); */
m_proc_map (out_paddr, M_PROC_MAP_BASE, 16, PM_PRESENT | PM_RW | PM_USER); /* m_proc_map (out_paddr, M_PROC_MAP_BASE, 16, PM_PRESENT | PM_RW | PM_USER); */
memset ((void*)M_PROC_MAP_BASE, 0, M_PAGE_SIZE * 16); /* memset ((void*)M_PROC_MAP_BASE, 0, M_PAGE_SIZE * 16); */
m_proc_unmap (M_PROC_MAP_BASE, 16); /* m_proc_unmap (M_PROC_MAP_BASE, 16); */
m_proc_drop_resource (mem_rid); /* m_proc_drop_resource (mem_rid); */
/* m_proc_test (); */ /* m_proc_test (); */
int mutex_rid = m_proc_create_resource_mutex (RV_PRIVATE); /* int mutex_rid = m_proc_create_resource_mutex (RV_PRIVATE); */
m_proc_mutex_lock (mutex_rid); /* m_proc_mutex_lock (mutex_rid); */
/* m_proc_test (); */ /* m_proc_test (); */
m_proc_mutex_unlock (mutex_rid); /* m_proc_mutex_unlock (mutex_rid); */
/* m_proc_test (); */ if (c > 'z')
c = 'a';
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
__asm__ volatile ("dupa: nop; nop; nop; jmp dupa");
/* for (volatile uint64_t i = 0; i < 1000*1000*100; i++); */
/* if (c > 'z') */
/* c = 'a'; */
/* m_proc_test ('k'); */
/* m_proc_test ('l'); */
/* m_proc_test ('m'); */
/* m_proc_test ('n'); */
/* m_proc_test ('o'); */
/* m_proc_test ('p'); */
/* m_proc_test ('r'); */
/* m_proc_test ('s'); */
/* m_proc_test ('t'); */
/* m_proc_test ('u'); */
} }

View File

@@ -34,6 +34,8 @@
/* Divide config register */ /* Divide config register */
#define LAPIC_DCR 0x3E0 #define LAPIC_DCR 0x3E0
#define DIVIDER_VALUE 0x0B
struct ioapic { struct ioapic {
struct acpi_madt_ioapic table_data; struct acpi_madt_ioapic table_data;
rw_spin_lock_t lock; rw_spin_lock_t lock;
@@ -51,7 +53,7 @@ static size_t ioapic_entries = 0;
/* Count of actual interrupt source overrides */ /* Count of actual interrupt source overrides */
static size_t intr_src_override_entries = 0; static size_t intr_src_override_entries = 0;
static uint64_t lapic_ticks; static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
/* Read IOAPIC */ /* Read IOAPIC */
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) { static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
@@ -117,18 +119,16 @@ void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t
uint8_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0; uint8_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
calc_flags |= (uint64_t)mode << 15; calc_flags |= (uint64_t)mode << 15;
calc_flags |= (uint64_t)polarity << 13; calc_flags |= (uint64_t)polarity << 13;
calc_flags |= flags;
} else {
calc_flags |= flags;
} }
ioapic = amd64_ioapic_find (irq); uint8_t gsi = found_override ? override->gsi : irq;
ioapic = amd64_ioapic_find (gsi);
if (ioapic == NULL) if (ioapic == NULL)
return; return;
uint32_t irq_reg = ((irq - ioapic->table_data.gsi_base) * 2) + 0x10; uint32_t irq_reg = ((gsi - ioapic->table_data.gsi_base) * 2) + 0x10;
amd64_ioapic_write (ioapic, irq_reg + 1, (uint32_t)(calc_flags >> 32)); amd64_ioapic_write (ioapic, irq_reg + 1, (uint32_t)(calc_flags >> 32));
amd64_ioapic_write (ioapic, irq_reg, (uint32_t)calc_flags); amd64_ioapic_write (ioapic, irq_reg, (uint32_t)calc_flags);
@@ -201,15 +201,20 @@ void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
* us - Period length in microseconds * us - Period length in microseconds
*/ */
static uint32_t amd64_lapic_calibrate (uint32_t us) { static uint32_t amd64_lapic_calibrate (uint32_t us) {
amd64_lapic_write (LAPIC_DCR, 0x0B); spin_lock (&lapic_calibration_lock);
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16)); amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF); amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF);
sleep_micro (us); sleep_micro (us);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (0 << 16));
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT); uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
DEBUG ("timer ticks = %u\n", ticks);
spin_unlock (&lapic_calibration_lock);
return ticks; return ticks;
} }
@@ -220,11 +225,9 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
* ticks - Initial tick count * ticks - Initial tick count
*/ */
static void amd64_lapic_start (uint32_t ticks) { static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, 0x0B); amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
amd64_lapic_write (LAPIC_TIMICT, ticks); amd64_lapic_write (LAPIC_TIMICT, ticks);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17) | (1 << 16));
} }
/* /*
@@ -244,11 +247,8 @@ void amd64_lapic_init (uint32_t us) {
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8)); amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
if (thiscpu->id == 0) { thiscpu->lapic_ticks = amd64_lapic_calibrate (us);
lapic_ticks = amd64_lapic_calibrate (us); amd64_lapic_start (thiscpu->lapic_ticks);
}
amd64_lapic_start (lapic_ticks);
} }
/* /*

View File

@@ -7,8 +7,8 @@
#define GDT_KCODE 0x08 #define GDT_KCODE 0x08
#define GDT_KDATA 0x10 #define GDT_KDATA 0x10
#define GDT_UCODE 0x18 #define GDT_UDATA 0x18
#define GDT_UDATA 0x20 #define GDT_UCODE 0x20
#define GDT_TSS 0x28 #define GDT_TSS 0x28
/* Size of kernel stack */ /* Size of kernel stack */

View File

@@ -30,46 +30,87 @@ static uint64_t hpet_period_fs;
/* Lock, which protects concurrent access. See amd64/smp.c */ /* Lock, which protects concurrent access. See amd64/smp.c */
static spin_lock_t hpet_lock = SPIN_LOCK_INIT; static spin_lock_t hpet_lock = SPIN_LOCK_INIT;
/* Read a HPET register. Assumes caller holds \ref hpet_lock */ /* Read a HPET register. Assumes caller holds hpet_lock */
static uint64_t amd64_hpet_read (uint32_t reg) { static uint64_t amd64_hpet_read64 (uint32_t reg) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset; uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
return (hpet_32bits ? *(volatile uint32_t*)(hpet_vaddr + reg) return *(volatile uint64_t*)(hpet_vaddr + reg);
: *(volatile uint64_t*)(hpet_vaddr + reg));
} }
/* Write a HPET register. Assumes caller holds \ref hpet_lock */ static uint32_t amd64_hpet_read32 (uint32_t reg) {
static void amd64_hpet_write (uint32_t reg, uint64_t value) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset; uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
if (hpet_32bits) return *(volatile uint32_t*)(hpet_vaddr + reg);
*(volatile uint32_t*)(hpet_vaddr + reg) = (value & 0xFFFFFFFF);
else
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
} }
/* Read current value of \ref HPET_MCVR register. */ /* Write a HPET register. Assumes caller holds hpet_lock */
static uint64_t amd64_hpet_timestamp (void) { return amd64_hpet_read (HPET_MCVR); } static void amd64_hpet_write64 (uint32_t reg, uint64_t value) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
}
static void amd64_hpet_write32 (uint32_t reg, uint32_t value) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
*(volatile uint32_t*)(hpet_vaddr + reg) = value;
}
/* Read current value of HPET_MCVR register. */
static uint64_t amd64_hpet_read_counter (void) {
uint64_t value;
spin_lock (&hpet_lock);
if (!hpet_32bits)
value = amd64_hpet_read64 (HPET_MCVR);
else {
uint32_t hi1, lo, hi2;
do {
hi1 = amd64_hpet_read32 (HPET_MCVR + 4);
lo = amd64_hpet_read32 (HPET_MCVR + 0);
hi2 = amd64_hpet_read32 (HPET_MCVR + 4);
} while (hi1 != hi2);
value = ((uint64_t)hi1 << 32) | lo;
}
spin_unlock (&hpet_lock);
return value;
}
static void amd64_hpet_write_counter (uint64_t value) {
spin_lock (&hpet_lock);
if (!hpet_32bits)
amd64_hpet_write64 (HPET_MCVR, value);
else {
amd64_hpet_write32 (HPET_MCVR, (uint32_t)value);
amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32));
}
spin_unlock (&hpet_lock);
}
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being /* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being
* held. */ * held. */
void amd64_hpet_sleep_micro (uint64_t us) { void amd64_hpet_sleep_micro (uint64_t us) {
spin_lock (&hpet_lock); if (hpet_period_fs == 0)
return;
uint64_t start = amd64_hpet_timestamp (); uint64_t ticks_to_wait = (us * 1000ULL) / (hpet_period_fs / 1000000ULL);
uint64_t target_fs = us * 1000000000ULL; uint64_t start = amd64_hpet_read_counter ();
for (;;) { for (;;) {
uint64_t current = amd64_hpet_timestamp (); uint64_t now = amd64_hpet_read_counter ();
uint64_t dt = current - start;
if ((dt * hpet_period_fs) >= target_fs) if ((now - start) >= ticks_to_wait)
break; break;
__asm__ volatile ("pause" ::: "memory"); __asm__ volatile ("pause" ::: "memory");
} }
spin_unlock (&hpet_lock);
} }
/* Initialize HPET */ /* Initialize HPET */
@@ -88,19 +129,12 @@ void amd64_hpet_init (void) {
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr,
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD); MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
hpet_32bits = (amd64_hpet_read (HPET_GCIDR) & (1 << 13)) ? 0 : 1; uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
/* reset */ hpet_period_fs = (uint32_t)(caps >> 32);
amd64_hpet_write (HPET_GCR, 0);
amd64_hpet_write (HPET_MCVR, 0);
amd64_hpet_write (HPET_GCR, 1);
uint64_t gcidr = amd64_hpet_read (HPET_GCIDR); amd64_hpet_write64 (HPET_GCR, 0);
if (hpet_32bits) { amd64_hpet_write_counter (0);
uint32_t low = (uint32_t)gcidr; amd64_hpet_write64 (HPET_GCR, 1);
uint32_t high = (uint32_t)amd64_hpet_read (HPET_GCIDR + 4);
gcidr = (((uint64_t)high << 32) | low);
}
hpet_period_fs = (gcidr >> 32);
} }

View File

@@ -39,8 +39,8 @@ static void amd64_gdt_init (struct cpu* cpu) {
amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0); amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0);
amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0); amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0);
amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0); amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0);
amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xFA, 0xA0); amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xF2, 0xC0);
amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xF2, 0xC0); amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xFA, 0xA0);
amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0); amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
uint32_t tssbasehigh = (tssbase >> 32); uint32_t tssbasehigh = (tssbase >> 32);

View File

@@ -7,10 +7,12 @@
#include <irq/irq.h> #include <irq/irq.h>
#include <libk/std.h> #include <libk/std.h>
#include <libk/string.h> #include <libk/string.h>
#include <m/syscall_defs.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/irq.h> #include <sys/irq.h>
#include <sys/smp.h> #include <sys/smp.h>
#include <sys/spin.h> #include <sys/spin.h>
#include <syscall/syscall.h>
/* 8259 PIC defs. */ /* 8259 PIC defs. */
#define PIC1 0x20 #define PIC1 0x20
@@ -121,6 +123,8 @@ static void amd64_idt_init (void) {
IDT_ENTRY (SCHED_PREEMPT_TIMER, 1); IDT_ENTRY (SCHED_PREEMPT_TIMER, 1);
IDT_ENTRY (TLB_SHOOTDOWN, 1); IDT_ENTRY (TLB_SHOOTDOWN, 1);
IDT_ENTRY (CPU_REQUEST_SCHED, 1);
IDT_ENTRY (CPU_SPURIOUS, 1);
/* clang-format on */ /* clang-format on */
#undef IDT_ENTRY #undef IDT_ENTRY
@@ -153,7 +157,7 @@ static void amd64_intr_exception (struct saved_regs* regs) {
regs->rbx); regs->rbx);
if (regs->cs == (GDT_UCODE | 0x03)) { if (regs->cs == (GDT_UCODE | 0x03)) {
proc_kill (thiscpu->proc_current); proc_kill (thiscpu->proc_current, regs);
} else { } else {
spin (); spin ();
} }
@@ -161,6 +165,8 @@ static void amd64_intr_exception (struct saved_regs* regs) {
/* Handle incoming interrupt, dispatch IRQ handlers. */ /* Handle incoming interrupt, dispatch IRQ handlers. */
void amd64_intr_handler (void* stack_ptr) { void amd64_intr_handler (void* stack_ptr) {
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr; struct saved_regs* regs = stack_ptr;
if (regs->trap <= 31) { if (regs->trap <= 31) {
@@ -222,6 +228,8 @@ uint8_t amd64_resolve_irq (uint8_t irq) {
static const uint8_t mappings[] = { static const uint8_t mappings[] = {
[SCHED_PREEMPT_TIMER] = 0, [SCHED_PREEMPT_TIMER] = 0,
[TLB_SHOOTDOWN] = 1, [TLB_SHOOTDOWN] = 1,
[CPU_REQUEST_SCHED] = 2,
[CPU_SPURIOUS] = 3,
}; };
return mappings[irq]; return mappings[irq];

View File

@@ -7,5 +7,6 @@
#define SCHED_PREEMPT_TIMER 80 #define SCHED_PREEMPT_TIMER 80
#define TLB_SHOOTDOWN 81 #define TLB_SHOOTDOWN 81
#define CPU_REQUEST_SCHED 82 #define CPU_REQUEST_SCHED 82
#define CPU_SPURIOUS 255
#endif // _KERNEL_AMD64_INTR_DEFS_H #endif // _KERNEL_AMD64_INTR_DEFS_H

View File

@@ -7,33 +7,41 @@
pushq $z; pushq $z;
#define no_err(z) \ #define no_err(z) \
pushq $0; \ pushq $0; \
pushq $z; pushq $z;
#define make_intr_stub(x, n) \ #define make_intr_stub(x, n) \
.global amd64_intr ## n; \ .global amd64_intr ## n; \
amd64_intr ## n:; \ amd64_intr ## n:; \
x(n); \ x(n); \
cli; \ cli; \
;\ ; \
push_regs; \ push_regs; \
;\ ; \
cld; \ movw $0x10, %ax; \
;\ movw %ax, %ds; \
movq %rsp, %rdi; \ movw %ax, %es; \
;\ ; \
movq %rsp, %rbp; \ cld; \
;\ ; \
subq $8, %rsp; \ movq %rsp, %rdi; \
andq $~0xF, %rsp; \ ; \
;\ movq %cr3, %rax; pushq %rax; \
callq amd64_intr_handler; \ ; \
;\ movq %rsp, %rbp; \
movq %rbp, %rsp; \ ; \
;\ subq $8, %rsp; \
pop_regs; \ andq $-16, %rsp; \
addq $16, %rsp; \ ; \
;\ callq amd64_intr_handler; \
; \
movq %rbp, %rsp; \
; \
popq %rax; movq %rax, %cr3; \
; \
pop_regs; \
addq $16, %rsp; \
; \
iretq; iretq;
@@ -89,3 +97,4 @@ make_intr_stub(no_err, 47)
make_intr_stub(no_err, SCHED_PREEMPT_TIMER) make_intr_stub(no_err, SCHED_PREEMPT_TIMER)
make_intr_stub(no_err, TLB_SHOOTDOWN) make_intr_stub(no_err, TLB_SHOOTDOWN)
make_intr_stub(no_err, CPU_REQUEST_SCHED) make_intr_stub(no_err, CPU_REQUEST_SCHED)
make_intr_stub(no_err, CPU_SPURIOUS)

View File

@@ -37,7 +37,11 @@ static uintptr_t amd64_current_cr3 (void) {
/* Load kernel CR3 as current CR3 */ /* Load kernel CR3 as current CR3 */
void amd64_load_kernel_cr3 (void) { void amd64_load_kernel_cr3 (void) {
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory"); uintptr_t cr3 = amd64_current_cr3 ();
if (cr3 != kernel_pd.cr3_paddr) {
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
}
} }
/* Extract PML info from virtual address */ /* Extract PML info from virtual address */

View File

@@ -2,9 +2,8 @@
.global amd64_do_sched .global amd64_do_sched
amd64_do_sched: amd64_do_sched:
cli
movq %rsi, %cr3 movq %rsi, %cr3
movq %rdi, %rsp movq %rdi, %rsp
pop_regs pop_regs
add $16, %rsp addq $16, %rsp
iretq iretq

View File

@@ -7,6 +7,8 @@
#include <sys/smp.h> #include <sys/smp.h>
void do_sched (struct proc* proc) { void do_sched (struct proc* proc) {
__asm__ volatile ("cli");
thiscpu->tss.rsp0 = proc->pdata.kernel_stack; thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack; thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;

View File

@@ -61,7 +61,7 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_init (cpu, true); /* gdt + idt */ amd64_init (cpu, true); /* gdt + idt */
syscall_init (); syscall_init ();
amd64_lapic_init (0); amd64_lapic_init (1000);
DEBUG ("CPU %u is online!\n", thiscpu->id); DEBUG ("CPU %u is online!\n", thiscpu->id);
@@ -75,7 +75,7 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
/// Initialize SMP subsystem for AMD64. Start AP CPUs /// Initialize SMP subsystem for AMD64. Start AP CPUs
void smp_init (void) { void smp_init (void) {
amd64_lapic_init (10000); amd64_lapic_init (1000);
struct limine_mp_response* mp = limine_mp_request.response; struct limine_mp_response* mp = limine_mp_request.response;

View File

@@ -23,6 +23,7 @@ struct cpu {
volatile struct tss tss; volatile struct tss tss;
uintptr_t lapic_mmio_base; uintptr_t lapic_mmio_base;
uint64_t lapic_ticks;
uint32_t id; uint32_t id;
struct { struct {

View File

@@ -12,6 +12,8 @@
extern void amd64_syscall_entry (void); extern void amd64_syscall_entry (void);
int amd64_syscall_dispatch (void* stack_ptr) { int amd64_syscall_dispatch (void* stack_ptr) {
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr; struct saved_regs* regs = stack_ptr;
int syscall_num = regs->rax; int syscall_num = regs->rax;
@@ -24,7 +26,7 @@ int amd64_syscall_dispatch (void* stack_ptr) {
__asm__ volatile ("sti"); __asm__ volatile ("sti");
int result = func (caller, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9); int result = func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
__asm__ volatile ("cli"); __asm__ volatile ("cli");
@@ -32,7 +34,8 @@ int amd64_syscall_dispatch (void* stack_ptr) {
} }
void syscall_init (void) { void syscall_init (void) {
amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_UCODE - 16) << 48)); amd64_wrmsr (MSR_STAR,
((uint64_t)(GDT_KCODE | 0x03) << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48));
amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry); amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry);
amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9)); amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9));
amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE); amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE);

View File

@@ -4,34 +4,41 @@
.global amd64_syscall_entry .global amd64_syscall_entry
amd64_syscall_entry: amd64_syscall_entry:
cli
movq %rsp, %gs:0 movq %rsp, %gs:0
movq %gs:8, %rsp movq %gs:8, %rsp
pushq $0x23 pushq $0x1b
pushq %gs:0 pushq %gs:0
pushq %r11 pushq %r11
pushq $0x1b pushq $0x23
pushq %rcx pushq %rcx
pushq $0 pushq $0
pushq $0 pushq $0
push_regs push_regs
movw $0x10, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
cld cld
movq %rsp, %rdi movq %rsp, %rdi
movq %cr3, %rax; pushq %rax
movq %rsp, %rbp movq %rsp, %rbp
subq $8, %rsp subq $8, %rsp
andq $~0xF, %rsp andq $-16, %rsp
callq amd64_syscall_dispatch callq amd64_syscall_dispatch
movq %rbp, %rsp movq %rbp, %rsp
popq %rax; movq %rax, %cr3
pop_regs_skip_rax pop_regs_skip_rax
addq $56, %rsp addq $56, %rsp

View File

@@ -9,9 +9,8 @@
#include <amd64/intr.h> #include <amd64/intr.h>
#endif #endif
/* TODO: figure out a generic way to work with IRQs */ struct irq* irq_table[0x100];
static struct list_node_link* irqs = NULL;
static rw_spin_lock_t irqs_lock; static rw_spin_lock_t irqs_lock;
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) { bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) {
@@ -26,7 +25,7 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
irq->flags = flags; irq->flags = flags;
rw_spin_write_lock (&irqs_lock); rw_spin_write_lock (&irqs_lock);
list_append (irqs, &irq->irqs_link); irq_table[irq_num] = irq;
rw_spin_write_unlock (&irqs_lock); rw_spin_write_unlock (&irqs_lock);
#if defined(__x86_64__) #if defined(__x86_64__)
@@ -37,42 +36,12 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
return true; return true;
} }
void irq_detach (void (*func) (void*, void*)) {
struct list_node_link *irq_link, *irq_link_tmp;
struct irq* irq = NULL;
rw_spin_read_lock (&irqs_lock);
list_foreach (irqs, irq_link, irq_link_tmp) {
irq = list_entry (irq_link, struct irq, irqs_link);
if ((uintptr_t)irq->func == (uintptr_t)func)
break;
}
rw_spin_read_unlock (&irqs_lock);
if (irq != NULL) {
rw_spin_write_lock (&irqs_lock);
list_remove (irqs, &irq->irqs_link);
rw_spin_write_unlock (&irqs_lock);
}
}
struct irq* irq_find (uint32_t irq_num) { struct irq* irq_find (uint32_t irq_num) {
struct list_node_link *irq_link, *irq_link_tmp;
rw_spin_read_lock (&irqs_lock); rw_spin_read_lock (&irqs_lock);
list_foreach (irqs, irq_link, irq_link_tmp) { struct irq* irq = irq_table[irq_num];
struct irq* irq = list_entry (irq_link, struct irq, irqs_link);
if (irq->irq_num == irq_num) {
rw_spin_read_unlock (&irqs_lock);
return irq;
}
}
rw_spin_read_unlock (&irqs_lock); rw_spin_read_unlock (&irqs_lock);
return NULL; return irq;
} }

View File

@@ -19,7 +19,6 @@ struct irq {
}; };
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num, uint32_t flags); bool irq_attach (irq_func_t, void* arg, uint32_t irq_num, uint32_t flags);
void irq_detach (irq_func_t func);
struct irq* irq_find (uint32_t irq_num); struct irq* irq_find (uint32_t irq_num);
#endif // _KERNEL_IRQ_IRQ_H #endif // _KERNEL_IRQ_IRQ_H

View File

@@ -214,11 +214,11 @@ static void proc_register (struct proc* proc, struct cpu* cpu) {
/* caller holds cpu->lock */ /* caller holds cpu->lock */
static struct proc* proc_find_sched (struct cpu* cpu) { static struct proc* proc_find_sched (struct cpu* cpu) {
struct rb_node_link* node = NULL; struct rb_node_link* node = NULL;
struct proc* start = cpu->proc_current; struct proc* current = cpu->proc_current;
struct proc* proc = NULL; struct proc* proc = NULL;
if (start) if (current)
node = &start->cpu_run_q_link; rbtree_next (&current->cpu_run_q_link, node);
if (!node) if (!node)
rbtree_first (&cpu->proc_run_q, node); rbtree_first (&cpu->proc_run_q, node);
@@ -235,15 +235,12 @@ static struct proc* proc_find_sched (struct cpu* cpu) {
rbtree_next (node, node); rbtree_next (node, node);
if (!node) { if (!node)
rbtree_first (&cpu->proc_run_q, node); rbtree_first (&cpu->proc_run_q, node);
}
if (node == first)
break;
} while (node != first); } while (node != first);
return NULL; return ((atomic_load (&current->state) == PROC_READY) ? current : NULL);
} }
static void proc_reap (void) { static void proc_reap (void) {
@@ -283,8 +280,10 @@ static void proc_reap (void) {
} }
} }
void proc_sched (void) { void proc_sched (void* regs) {
if (atomic_fetch_add (&sched_cycles, 1) % SCHED_REAP_FREQ == 0) int s_cycles = atomic_fetch_add (&sched_cycles, 1);
if (s_cycles % SCHED_REAP_FREQ == 0)
proc_reap (); proc_reap ();
struct proc* next = NULL; struct proc* next = NULL;
@@ -292,25 +291,36 @@ void proc_sched (void) {
spin_lock (&cpu->lock); spin_lock (&cpu->lock);
if (cpu->proc_run_q != NULL) { struct proc* prev = cpu->proc_current;
next = proc_find_sched (cpu);
if (next) if (prev != NULL) {
cpu->proc_current = next; spin_lock (&prev->lock);
prev->pdata.regs = *(struct saved_regs*)regs;
spin_unlock (&prev->lock);
} }
spin_unlock (&thiscpu->lock); next = proc_find_sched (cpu);
if (next) {
cpu->proc_current = next;
spin_unlock (&cpu->lock);
if ((next != NULL) && (atomic_load (&next->state) == PROC_READY))
do_sched (next); do_sched (next);
} else {
cpu->proc_current = NULL;
spin_unlock (&cpu->lock);
spin (); spin ();
}
} }
void proc_kill (struct proc* proc) { void proc_kill (struct proc* proc, void* regs) {
atomic_store (&proc->state, PROC_DEAD);
struct cpu* cpu = proc->cpu; struct cpu* cpu = proc->cpu;
spin_lock (&proc->lock);
atomic_store (&proc->state, PROC_DEAD);
spin_unlock (&proc->lock);
spin_lock (&cpu->lock); spin_lock (&cpu->lock);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link); rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
@@ -322,39 +332,35 @@ void proc_kill (struct proc* proc) {
DEBUG ("killed PID %d\n", proc->pid); DEBUG ("killed PID %d\n", proc->pid);
if (cpu == thiscpu) if (cpu == thiscpu)
proc_sched (); proc_sched (regs);
else else
cpu_request_sched (cpu); cpu_request_sched (cpu);
} }
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) { void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
struct cpu* cpu = proc->cpu;
spin_lock (&proc->lock); spin_lock (&proc->lock);
atomic_store (&proc->state, PROC_SUSPENDED); atomic_store (&proc->state, PROC_SUSPENDED);
proc->suspension_q = sq;
struct cpu* cpu = proc->cpu; spin_unlock (&proc->lock);
/* remove from run q */ /* remove from run q */
spin_lock (&cpu->lock); spin_lock (&cpu->lock);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link); rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == proc) if (cpu->proc_current == proc)
cpu->proc_current = NULL; cpu->proc_current = NULL;
spin_unlock (&cpu->lock); spin_unlock (&cpu->lock);
proc->suspension_q = sq; spin_lock (&sq->lock);
rbtree_insert (struct proc, &sq->proc_tree, &proc->suspension_link, suspension_link, pid);
spin_lock (&proc->suspension_q->lock); spin_unlock (&sq->lock);
rbtree_insert (struct proc, &proc->suspension_q->proc_tree, &proc->suspension_link,
suspension_link, pid);
spin_unlock (&proc->suspension_q->lock);
spin_unlock (&proc->lock);
cpu_request_sched (cpu); cpu_request_sched (cpu);
} }
void proc_resume (struct proc* proc) { void proc_resume (struct proc* proc) {
spin_lock (&proc->lock);
struct cpu* cpu = proc->cpu; struct cpu* cpu = proc->cpu;
struct proc_suspension_q* sq = proc->suspension_q; struct proc_suspension_q* sq = proc->suspension_q;
@@ -362,26 +368,27 @@ void proc_resume (struct proc* proc) {
rbtree_delete (&sq->proc_tree, &proc->suspension_link); rbtree_delete (&sq->proc_tree, &proc->suspension_link);
spin_unlock (&sq->lock); spin_unlock (&sq->lock);
spin_lock (&proc->lock);
proc->suspension_q = NULL; proc->suspension_q = NULL;
atomic_store (&proc->state, PROC_READY);
spin_unlock (&proc->lock);
spin_lock (&cpu->lock); spin_lock (&cpu->lock);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid); rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
spin_unlock (&cpu->lock); spin_unlock (&cpu->lock);
atomic_store (&proc->state, PROC_READY);
spin_unlock (&proc->lock);
cpu_request_sched (cpu); cpu_request_sched (cpu);
} }
static void proc_irq_sched (void* arg, void* regs) { static void proc_irq_sched (void* arg, void* regs) {
(void)arg, (void)regs; (void)arg;
proc_sched ();
}
static void proc_irq_cpu_request_sched (void* arg, void* regs) { #if defined(__x86_64__)
(void)arg, (void)regs; struct saved_regs* s_regs = regs;
proc_sched (); /* Only schedule, when we came from usermode */
if ((s_regs->cs & 0x03))
proc_sched (regs);
#endif
} }
void proc_init (void) { void proc_init (void) {
@@ -389,8 +396,8 @@ void proc_init (void) {
proc_register (init, thiscpu); proc_register (init, thiscpu);
#if defined(__x86_64__) #if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_SAFE); irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_UNSAFE);
irq_attach (&proc_irq_cpu_request_sched, NULL, CPU_REQUEST_SCHED, IRQ_INTERRUPT_SAFE); irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED, IRQ_INTERRUPT_UNSAFE);
#endif #endif
do_sched (init); do_sched (init);

View File

@@ -55,8 +55,8 @@ struct proc {
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq); void proc_suspend (struct proc* proc, struct proc_suspension_q* sq);
void proc_resume (struct proc* proc); void proc_resume (struct proc* proc);
void proc_sched (void); void proc_sched (void* regs);
void proc_kill (struct proc* proc); void proc_kill (struct proc* proc, void* regs);
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages, bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags); uint32_t flags);
bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages); bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages);

View File

@@ -12,19 +12,19 @@
#include <syscall/syscall.h> #include <syscall/syscall.h>
#define DEFINE_SYSCALL(name) \ #define DEFINE_SYSCALL(name) \
int name (struct proc* proc, uintptr_t UNUSED a1, uintptr_t UNUSED a2, uintptr_t UNUSED a3, \ int name (struct proc* proc, void* UNUSED regs, uintptr_t UNUSED a1, uintptr_t UNUSED a2, \
uintptr_t UNUSED a4, uintptr_t UNUSED a5, uintptr_t UNUSED a6) uintptr_t UNUSED a3, uintptr_t UNUSED a4, uintptr_t UNUSED a5, uintptr_t UNUSED a6)
/* int proc_quit (void) */ /* int proc_quit (void) */
DEFINE_SYSCALL (sys_proc_quit) { DEFINE_SYSCALL (sys_proc_quit) {
proc_kill (proc); proc_kill (proc, regs);
proc_sched ();
return SR_OK; return SR_OK;
} }
/* int proc_test (void) */ /* int proc_test (void) */
DEFINE_SYSCALL (sys_proc_test) { DEFINE_SYSCALL (sys_proc_test) {
DEBUG ("test syscall message!\n"); char c = (char)a1;
DEBUG ("test syscall! %c\n", c);
return SR_OK; return SR_OK;
} }

View File

@@ -4,8 +4,8 @@
#include <libk/std.h> #include <libk/std.h>
#include <proc/proc.h> #include <proc/proc.h>
typedef int (*syscall_handler_func_t) (struct proc* proc, uintptr_t a1, uintptr_t a2, uintptr_t a3, typedef int (*syscall_handler_func_t) (struct proc* proc, void* regs, uintptr_t a1, uintptr_t a2,
uintptr_t a4, uintptr_t a5, uintptr_t a6); uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6);
syscall_handler_func_t syscall_find_handler (int syscall_num); syscall_handler_func_t syscall_find_handler (int syscall_num);

View File

@@ -5,7 +5,7 @@
int m_proc_quit (void) { return m_syscall (SYS_PROC_QUIT, 0, 0, 0, 0, 0, 0); } int m_proc_quit (void) { return m_syscall (SYS_PROC_QUIT, 0, 0, 0, 0, 0, 0); }
int m_proc_test (void) { return m_syscall (SYS_PROC_TEST, 0, 0, 0, 0, 0, 0); } int m_proc_test (char c) { return m_syscall (SYS_PROC_TEST, (uintptr_t)c, 0, 0, 0, 0, 0); }
int m_proc_map (uintptr_t paddr, uintptr_t vaddr, size_t pages, uint32_t flags) { int m_proc_map (uintptr_t paddr, uintptr_t vaddr, size_t pages, uint32_t flags) {
return m_syscall (SYS_PROC_MAP, paddr, vaddr, (uintptr_t)pages, (uintptr_t)flags, 0, 0); return m_syscall (SYS_PROC_MAP, paddr, vaddr, (uintptr_t)pages, (uintptr_t)flags, 0, 0);

View File

@@ -17,7 +17,7 @@
#include <stdint.h> #include <stdint.h>
int m_proc_quit (void); int m_proc_quit (void);
int m_proc_test (void); int m_proc_test (char c);
int m_proc_map (uintptr_t paddr, uintptr_t vaddr, size_t pages, uint32_t flags); int m_proc_map (uintptr_t paddr, uintptr_t vaddr, size_t pages, uint32_t flags);
int m_proc_unmap (uintptr_t vaddr, size_t pages); int m_proc_unmap (uintptr_t vaddr, size_t pages);
int m_proc_create_resource_mem (size_t pages, int vis, uintptr_t* out_paddr); int m_proc_create_resource_mem (size_t pages, int vis, uintptr_t* out_paddr);