Disable kernel preemption, fix requesting rescheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s

This commit is contained in:
2026-01-22 19:32:15 +01:00
parent c26fd3cb2b
commit 7bb3b77ede
11 changed files with 32 additions and 27 deletions

View File

@@ -270,5 +270,5 @@ void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) {
}
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
amd64_lapic_write (LAPIC_ICR, vec);
amd64_lapic_write (LAPIC_ICR, vec | (1 << 14));
}

View File

@@ -165,10 +165,15 @@ static void amd64_intr_exception (struct saved_regs* regs) {
/* Handle incoming interrupt, dispatch IRQ handlers. */
void amd64_intr_handler (void* stack_ptr) {
spin_lock_ctx_t ctxcpu;
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr;
spin_lock (&thiscpu->lock, &ctxcpu);
memcpy (&thiscpu->regs, regs, sizeof (struct saved_regs));
spin_unlock (&thiscpu->lock, &ctxcpu);
if (regs->trap <= 31) {
amd64_intr_exception (regs);
} else {
@@ -177,13 +182,7 @@ void amd64_intr_handler (void* stack_ptr) {
struct irq* irq = irq_find (regs->trap);
if (irq != NULL) {
if ((irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("sti");
irq->func (irq->arg, stack_ptr);
if ((irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("cli");
}
}
}

View File

@@ -418,9 +418,7 @@ static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
/* Continue initializing memory management subsystem for AMD64 after the essential parts were
* initialized */
void mm_init2 (void) {
irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE);
}
void mm_init2 (void) { irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN); }
/* Initialize essentials for the AMD64 memory management subsystem */
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }

View File

@@ -8,6 +8,7 @@
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/smp.h>
@@ -41,6 +42,11 @@ struct cpu* cpu_get (void) {
}
void cpu_request_sched (struct cpu* cpu) {
if (cpu == thiscpu) {
proc_sched ();
return;
}
struct limine_mp_response* mp = limine_mp_request.response;
for (size_t i = 0; i < mp->cpu_count; i++) {

View File

@@ -2,6 +2,7 @@
#define _KERNEL_AMD64_SMP_H
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/tss.h>
#include <aux/compiler.h>
#include <libk/rbtree.h>
@@ -20,6 +21,7 @@ struct cpu {
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
volatile struct gdt_extended gdt ALIGNED (16);
volatile struct tss tss;
struct saved_regs regs;
uintptr_t lapic_mmio_base;
uint64_t lapic_ticks;

View File

@@ -3,6 +3,7 @@
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/string.h>
#include <m/status.h>
#include <m/syscall_defs.h>
#include <proc/proc.h>
@@ -13,10 +14,16 @@
extern void amd64_syscall_entry (void);
int amd64_syscall_dispatch (void* stack_ptr) {
spin_lock_ctx_t ctxcpu;
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr;
spin_lock (&thiscpu->lock, &ctxcpu);
memcpy (&thiscpu->regs, regs, sizeof (struct saved_regs));
spin_unlock (&thiscpu->lock, &ctxcpu);
int syscall_num = regs->rax;
syscall_handler_func_t func = syscall_find_handler (syscall_num);