Fix user apps randomly crashing (APIC, GDT layout, syscall entry)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 23s
All checks were successful
Build documentation / build-and-deploy (push) Successful in 23s
This commit is contained in:
@@ -34,6 +34,8 @@
|
||||
/* Divide config register */
|
||||
#define LAPIC_DCR 0x3E0
|
||||
|
||||
#define DIVIDER_VALUE 0x0B
|
||||
|
||||
struct ioapic {
|
||||
struct acpi_madt_ioapic table_data;
|
||||
rw_spin_lock_t lock;
|
||||
@@ -51,7 +53,7 @@ static size_t ioapic_entries = 0;
|
||||
/* Count of actual interrupt source overrides */
|
||||
static size_t intr_src_override_entries = 0;
|
||||
|
||||
static uint64_t lapic_ticks;
|
||||
static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
|
||||
|
||||
/* Read IOAPIC */
|
||||
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
|
||||
@@ -117,18 +119,16 @@ void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t
|
||||
uint8_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
|
||||
calc_flags |= (uint64_t)mode << 15;
|
||||
calc_flags |= (uint64_t)polarity << 13;
|
||||
|
||||
calc_flags |= flags;
|
||||
} else {
|
||||
calc_flags |= flags;
|
||||
}
|
||||
|
||||
ioapic = amd64_ioapic_find (irq);
|
||||
uint8_t gsi = found_override ? override->gsi : irq;
|
||||
|
||||
ioapic = amd64_ioapic_find (gsi);
|
||||
|
||||
if (ioapic == NULL)
|
||||
return;
|
||||
|
||||
uint32_t irq_reg = ((irq - ioapic->table_data.gsi_base) * 2) + 0x10;
|
||||
uint32_t irq_reg = ((gsi - ioapic->table_data.gsi_base) * 2) + 0x10;
|
||||
|
||||
amd64_ioapic_write (ioapic, irq_reg + 1, (uint32_t)(calc_flags >> 32));
|
||||
amd64_ioapic_write (ioapic, irq_reg, (uint32_t)calc_flags);
|
||||
@@ -201,15 +201,20 @@ void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
|
||||
* us - Period length in microseconds
|
||||
*/
|
||||
static uint32_t amd64_lapic_calibrate (uint32_t us) {
|
||||
amd64_lapic_write (LAPIC_DCR, 0x0B);
|
||||
spin_lock (&lapic_calibration_lock);
|
||||
|
||||
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
|
||||
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
|
||||
|
||||
amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF);
|
||||
|
||||
sleep_micro (us);
|
||||
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (0 << 16));
|
||||
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
|
||||
DEBUG ("timer ticks = %u\n", ticks);
|
||||
|
||||
spin_unlock (&lapic_calibration_lock);
|
||||
|
||||
return ticks;
|
||||
}
|
||||
@@ -220,11 +225,9 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
|
||||
* ticks - Initial tick count
|
||||
*/
|
||||
static void amd64_lapic_start (uint32_t ticks) {
|
||||
amd64_lapic_write (LAPIC_DCR, 0x0B);
|
||||
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
|
||||
|
||||
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
|
||||
amd64_lapic_write (LAPIC_TIMICT, ticks);
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17) | (1 << 16));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -244,11 +247,8 @@ void amd64_lapic_init (uint32_t us) {
|
||||
|
||||
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
|
||||
|
||||
if (thiscpu->id == 0) {
|
||||
lapic_ticks = amd64_lapic_calibrate (us);
|
||||
}
|
||||
|
||||
amd64_lapic_start (lapic_ticks);
|
||||
thiscpu->lapic_ticks = amd64_lapic_calibrate (us);
|
||||
amd64_lapic_start (thiscpu->lapic_ticks);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
|
||||
#define GDT_KCODE 0x08
|
||||
#define GDT_KDATA 0x10
|
||||
#define GDT_UCODE 0x18
|
||||
#define GDT_UDATA 0x20
|
||||
#define GDT_UDATA 0x18
|
||||
#define GDT_UCODE 0x20
|
||||
#define GDT_TSS 0x28
|
||||
|
||||
/* Size of kernel stack */
|
||||
|
||||
@@ -30,46 +30,87 @@ static uint64_t hpet_period_fs;
|
||||
/* Lock, which protects concurrent access. See amd64/smp.c */
|
||||
static spin_lock_t hpet_lock = SPIN_LOCK_INIT;
|
||||
|
||||
/* Read a HPET register. Assumes caller holds \ref hpet_lock */
|
||||
static uint64_t amd64_hpet_read (uint32_t reg) {
|
||||
/* Read a HPET register. Assumes caller holds hpet_lock */
|
||||
static uint64_t amd64_hpet_read64 (uint32_t reg) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
return (hpet_32bits ? *(volatile uint32_t*)(hpet_vaddr + reg)
|
||||
: *(volatile uint64_t*)(hpet_vaddr + reg));
|
||||
return *(volatile uint64_t*)(hpet_vaddr + reg);
|
||||
}
|
||||
|
||||
/* Write a HPET register. Assumes caller holds \ref hpet_lock */
|
||||
static void amd64_hpet_write (uint32_t reg, uint64_t value) {
|
||||
static uint32_t amd64_hpet_read32 (uint32_t reg) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
if (hpet_32bits)
|
||||
*(volatile uint32_t*)(hpet_vaddr + reg) = (value & 0xFFFFFFFF);
|
||||
else
|
||||
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
|
||||
return *(volatile uint32_t*)(hpet_vaddr + reg);
|
||||
}
|
||||
|
||||
/* Read current value of \ref HPET_MCVR register. */
|
||||
static uint64_t amd64_hpet_timestamp (void) { return amd64_hpet_read (HPET_MCVR); }
|
||||
/* Write a HPET register. Assumes caller holds hpet_lock */
|
||||
static void amd64_hpet_write64 (uint32_t reg, uint64_t value) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
|
||||
}
|
||||
|
||||
static void amd64_hpet_write32 (uint32_t reg, uint32_t value) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
*(volatile uint32_t*)(hpet_vaddr + reg) = value;
|
||||
}
|
||||
|
||||
/* Read current value of HPET_MCVR register. */
|
||||
|
||||
static uint64_t amd64_hpet_read_counter (void) {
|
||||
uint64_t value;
|
||||
|
||||
spin_lock (&hpet_lock);
|
||||
|
||||
if (!hpet_32bits)
|
||||
value = amd64_hpet_read64 (HPET_MCVR);
|
||||
else {
|
||||
uint32_t hi1, lo, hi2;
|
||||
do {
|
||||
hi1 = amd64_hpet_read32 (HPET_MCVR + 4);
|
||||
lo = amd64_hpet_read32 (HPET_MCVR + 0);
|
||||
hi2 = amd64_hpet_read32 (HPET_MCVR + 4);
|
||||
} while (hi1 != hi2);
|
||||
|
||||
value = ((uint64_t)hi1 << 32) | lo;
|
||||
}
|
||||
|
||||
spin_unlock (&hpet_lock);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static void amd64_hpet_write_counter (uint64_t value) {
|
||||
spin_lock (&hpet_lock);
|
||||
|
||||
if (!hpet_32bits)
|
||||
amd64_hpet_write64 (HPET_MCVR, value);
|
||||
else {
|
||||
amd64_hpet_write32 (HPET_MCVR, (uint32_t)value);
|
||||
amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32));
|
||||
}
|
||||
|
||||
spin_unlock (&hpet_lock);
|
||||
}
|
||||
|
||||
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being
|
||||
* held. */
|
||||
void amd64_hpet_sleep_micro (uint64_t us) {
|
||||
spin_lock (&hpet_lock);
|
||||
if (hpet_period_fs == 0)
|
||||
return;
|
||||
|
||||
uint64_t start = amd64_hpet_timestamp ();
|
||||
uint64_t target_fs = us * 1000000000ULL;
|
||||
uint64_t ticks_to_wait = (us * 1000ULL) / (hpet_period_fs / 1000000ULL);
|
||||
uint64_t start = amd64_hpet_read_counter ();
|
||||
|
||||
for (;;) {
|
||||
uint64_t current = amd64_hpet_timestamp ();
|
||||
uint64_t dt = current - start;
|
||||
uint64_t now = amd64_hpet_read_counter ();
|
||||
|
||||
if ((dt * hpet_period_fs) >= target_fs)
|
||||
if ((now - start) >= ticks_to_wait)
|
||||
break;
|
||||
|
||||
__asm__ volatile ("pause" ::: "memory");
|
||||
}
|
||||
|
||||
spin_unlock (&hpet_lock);
|
||||
}
|
||||
|
||||
/* Initialize HPET */
|
||||
@@ -88,19 +129,12 @@ void amd64_hpet_init (void) {
|
||||
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr,
|
||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
|
||||
|
||||
hpet_32bits = (amd64_hpet_read (HPET_GCIDR) & (1 << 13)) ? 0 : 1;
|
||||
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
|
||||
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
|
||||
|
||||
/* reset */
|
||||
amd64_hpet_write (HPET_GCR, 0);
|
||||
amd64_hpet_write (HPET_MCVR, 0);
|
||||
amd64_hpet_write (HPET_GCR, 1);
|
||||
hpet_period_fs = (uint32_t)(caps >> 32);
|
||||
|
||||
uint64_t gcidr = amd64_hpet_read (HPET_GCIDR);
|
||||
if (hpet_32bits) {
|
||||
uint32_t low = (uint32_t)gcidr;
|
||||
uint32_t high = (uint32_t)amd64_hpet_read (HPET_GCIDR + 4);
|
||||
gcidr = (((uint64_t)high << 32) | low);
|
||||
}
|
||||
|
||||
hpet_period_fs = (gcidr >> 32);
|
||||
amd64_hpet_write64 (HPET_GCR, 0);
|
||||
amd64_hpet_write_counter (0);
|
||||
amd64_hpet_write64 (HPET_GCR, 1);
|
||||
}
|
||||
|
||||
@@ -39,8 +39,8 @@ static void amd64_gdt_init (struct cpu* cpu) {
|
||||
amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0);
|
||||
amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0);
|
||||
amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0);
|
||||
amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xFA, 0xA0);
|
||||
amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xF2, 0xC0);
|
||||
amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xF2, 0xC0);
|
||||
amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xFA, 0xA0);
|
||||
amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
|
||||
|
||||
uint32_t tssbasehigh = (tssbase >> 32);
|
||||
|
||||
@@ -7,10 +7,12 @@
|
||||
#include <irq/irq.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <m/syscall_defs.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/irq.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/spin.h>
|
||||
#include <syscall/syscall.h>
|
||||
|
||||
/* 8259 PIC defs. */
|
||||
#define PIC1 0x20
|
||||
@@ -122,6 +124,7 @@ static void amd64_idt_init (void) {
|
||||
IDT_ENTRY (SCHED_PREEMPT_TIMER, 1);
|
||||
IDT_ENTRY (TLB_SHOOTDOWN, 1);
|
||||
IDT_ENTRY (CPU_REQUEST_SCHED, 1);
|
||||
IDT_ENTRY (CPU_SPURIOUS, 1);
|
||||
/* clang-format on */
|
||||
#undef IDT_ENTRY
|
||||
|
||||
@@ -154,7 +157,7 @@ static void amd64_intr_exception (struct saved_regs* regs) {
|
||||
regs->rbx);
|
||||
|
||||
if (regs->cs == (GDT_UCODE | 0x03)) {
|
||||
proc_kill (thiscpu->proc_current);
|
||||
proc_kill (thiscpu->proc_current, regs);
|
||||
} else {
|
||||
spin ();
|
||||
}
|
||||
@@ -226,6 +229,7 @@ uint8_t amd64_resolve_irq (uint8_t irq) {
|
||||
[SCHED_PREEMPT_TIMER] = 0,
|
||||
[TLB_SHOOTDOWN] = 1,
|
||||
[CPU_REQUEST_SCHED] = 2,
|
||||
[CPU_SPURIOUS] = 3,
|
||||
};
|
||||
|
||||
return mappings[irq];
|
||||
|
||||
@@ -7,5 +7,6 @@
|
||||
#define SCHED_PREEMPT_TIMER 80
|
||||
#define TLB_SHOOTDOWN 81
|
||||
#define CPU_REQUEST_SCHED 82
|
||||
#define CPU_SPURIOUS 255
|
||||
|
||||
#endif // _KERNEL_AMD64_INTR_DEFS_H
|
||||
|
||||
@@ -7,39 +7,41 @@
|
||||
pushq $z;
|
||||
|
||||
#define no_err(z) \
|
||||
pushq $0; \
|
||||
pushq $0; \
|
||||
pushq $z;
|
||||
|
||||
#define make_intr_stub(x, n) \
|
||||
.global amd64_intr ## n; \
|
||||
amd64_intr ## n:; \
|
||||
x(n); \
|
||||
cli; \
|
||||
;\
|
||||
push_regs; \
|
||||
;\
|
||||
cld; \
|
||||
;\
|
||||
movq %rsp, %rdi; \
|
||||
;\
|
||||
movq %cr3, %rax; \
|
||||
pushq %rax; \
|
||||
;\
|
||||
movq %rsp, %rbp; \
|
||||
;\
|
||||
subq $8, %rsp; \
|
||||
andq $~0xF, %rsp; \
|
||||
;\
|
||||
callq amd64_intr_handler; \
|
||||
;\
|
||||
movq %rbp, %rsp; \
|
||||
;\
|
||||
popq %rax; \
|
||||
movq %rax, %cr3
|
||||
;\
|
||||
pop_regs; \
|
||||
addq $16, %rsp; \
|
||||
;\
|
||||
#define make_intr_stub(x, n) \
|
||||
.global amd64_intr ## n; \
|
||||
amd64_intr ## n:; \
|
||||
x(n); \
|
||||
cli; \
|
||||
; \
|
||||
push_regs; \
|
||||
; \
|
||||
movw $0x10, %ax; \
|
||||
movw %ax, %ds; \
|
||||
movw %ax, %es; \
|
||||
; \
|
||||
cld; \
|
||||
; \
|
||||
movq %rsp, %rdi; \
|
||||
; \
|
||||
movq %cr3, %rax; pushq %rax; \
|
||||
; \
|
||||
movq %rsp, %rbp; \
|
||||
; \
|
||||
subq $8, %rsp; \
|
||||
andq $-16, %rsp; \
|
||||
; \
|
||||
callq amd64_intr_handler; \
|
||||
; \
|
||||
movq %rbp, %rsp; \
|
||||
; \
|
||||
popq %rax; movq %rax, %cr3; \
|
||||
; \
|
||||
pop_regs; \
|
||||
addq $16, %rsp; \
|
||||
; \
|
||||
iretq;
|
||||
|
||||
|
||||
@@ -95,3 +97,4 @@ make_intr_stub(no_err, 47)
|
||||
make_intr_stub(no_err, SCHED_PREEMPT_TIMER)
|
||||
make_intr_stub(no_err, TLB_SHOOTDOWN)
|
||||
make_intr_stub(no_err, CPU_REQUEST_SCHED)
|
||||
make_intr_stub(no_err, CPU_SPURIOUS)
|
||||
|
||||
@@ -37,7 +37,11 @@ static uintptr_t amd64_current_cr3 (void) {
|
||||
|
||||
/* Load kernel CR3 as current CR3 */
|
||||
void amd64_load_kernel_cr3 (void) {
|
||||
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
|
||||
uintptr_t cr3 = amd64_current_cr3 ();
|
||||
|
||||
if (cr3 != kernel_pd.cr3_paddr) {
|
||||
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
|
||||
}
|
||||
}
|
||||
|
||||
/* Extract PML info from virtual address */
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
|
||||
.global amd64_do_sched
|
||||
amd64_do_sched:
|
||||
cli
|
||||
movq %rsi, %cr3
|
||||
movq %rdi, %rsp
|
||||
pop_regs
|
||||
add $16, %rsp
|
||||
addq $16, %rsp
|
||||
iretq
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
#include <sys/smp.h>
|
||||
|
||||
void do_sched (struct proc* proc) {
|
||||
__asm__ volatile ("cli");
|
||||
|
||||
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
|
||||
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
|
||||
amd64_init (cpu, true); /* gdt + idt */
|
||||
syscall_init ();
|
||||
|
||||
amd64_lapic_init (0);
|
||||
amd64_lapic_init (1000);
|
||||
|
||||
DEBUG ("CPU %u is online!\n", thiscpu->id);
|
||||
|
||||
@@ -75,7 +75,7 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
|
||||
|
||||
/// Initialize SMP subsystem for AMD64. Start AP CPUs
|
||||
void smp_init (void) {
|
||||
amd64_lapic_init (10000);
|
||||
amd64_lapic_init (1000);
|
||||
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ struct cpu {
|
||||
volatile struct tss tss;
|
||||
|
||||
uintptr_t lapic_mmio_base;
|
||||
uint64_t lapic_ticks;
|
||||
uint32_t id;
|
||||
|
||||
struct {
|
||||
|
||||
@@ -26,7 +26,7 @@ int amd64_syscall_dispatch (void* stack_ptr) {
|
||||
|
||||
__asm__ volatile ("sti");
|
||||
|
||||
int result = func (caller, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
||||
int result = func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
||||
|
||||
__asm__ volatile ("cli");
|
||||
|
||||
@@ -34,7 +34,8 @@ int amd64_syscall_dispatch (void* stack_ptr) {
|
||||
}
|
||||
|
||||
void syscall_init (void) {
|
||||
amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_UCODE - 16) << 48));
|
||||
amd64_wrmsr (MSR_STAR,
|
||||
((uint64_t)(GDT_KCODE | 0x03) << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48));
|
||||
amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry);
|
||||
amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9));
|
||||
amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE);
|
||||
|
||||
@@ -4,39 +4,40 @@
|
||||
|
||||
.global amd64_syscall_entry
|
||||
amd64_syscall_entry:
|
||||
cli
|
||||
|
||||
movq %rsp, %gs:0
|
||||
movq %gs:8, %rsp
|
||||
|
||||
pushq $0x23
|
||||
pushq $0x1b
|
||||
pushq %gs:0
|
||||
pushq %r11
|
||||
pushq $0x1b
|
||||
pushq $0x23
|
||||
pushq %rcx
|
||||
pushq $0
|
||||
pushq $0
|
||||
|
||||
push_regs
|
||||
|
||||
movw $0x10, %ax
|
||||
movw %ax, %ds
|
||||
movw %ax, %es
|
||||
movw %ax, %ss
|
||||
|
||||
cld
|
||||
|
||||
movq %rsp, %rdi
|
||||
|
||||
movq %cr3, %rax
|
||||
pushq %rax
|
||||
movq %cr3, %rax; pushq %rax
|
||||
|
||||
movq %rsp, %rbp
|
||||
|
||||
subq $8, %rsp
|
||||
andq $~0xF, %rsp
|
||||
andq $-16, %rsp
|
||||
|
||||
callq amd64_syscall_dispatch
|
||||
|
||||
movq %rbp, %rsp
|
||||
|
||||
popq %rax
|
||||
movq %rax, %cr3
|
||||
popq %rax; movq %rax, %cr3
|
||||
|
||||
pop_regs_skip_rax
|
||||
|
||||
|
||||
Reference in New Issue
Block a user