diff --git a/kernel/amd64/apic.c b/kernel/amd64/apic.c index 8abd8df..16b1e5d 100644 --- a/kernel/amd64/apic.c +++ b/kernel/amd64/apic.c @@ -270,5 +270,5 @@ void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) { } amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24)); - amd64_lapic_write (LAPIC_ICR, vec); + amd64_lapic_write (LAPIC_ICR, vec | (1 << 14)); } diff --git a/kernel/amd64/intr.c b/kernel/amd64/intr.c index 7ada73a..d750874 100644 --- a/kernel/amd64/intr.c +++ b/kernel/amd64/intr.c @@ -165,10 +165,15 @@ static void amd64_intr_exception (struct saved_regs* regs) { /* Handle incoming interrupt, dispatch IRQ handlers. */ void amd64_intr_handler (void* stack_ptr) { + spin_lock_ctx_t ctxcpu; amd64_load_kernel_cr3 (); struct saved_regs* regs = stack_ptr; + spin_lock (&thiscpu->lock, &ctxcpu); + memcpy (&thiscpu->regs, regs, sizeof (struct saved_regs)); + spin_unlock (&thiscpu->lock, &ctxcpu); + if (regs->trap <= 31) { amd64_intr_exception (regs); } else { @@ -177,13 +182,7 @@ void amd64_intr_handler (void* stack_ptr) { struct irq* irq = irq_find (regs->trap); if (irq != NULL) { - if ((irq->flags & IRQ_INTERRUPT_SAFE)) - __asm__ volatile ("sti"); - irq->func (irq->arg, stack_ptr); - - if ((irq->flags & IRQ_INTERRUPT_SAFE)) - __asm__ volatile ("cli"); } } } diff --git a/kernel/amd64/mm.c b/kernel/amd64/mm.c index cea9a6f..898cff1 100644 --- a/kernel/amd64/mm.c +++ b/kernel/amd64/mm.c @@ -418,9 +418,7 @@ static void amd64_tlb_shootdown_irq (void* arg, void* regs) { /* Continue initializing memory management subsystem for AMD64 after the essential parts were * initialized */ -void mm_init2 (void) { - irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE); -} +void mm_init2 (void) { irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN); } /* Initialize essentials for the AMD64 memory management subsystem */ void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); } diff --git a/kernel/amd64/smp.c b/kernel/amd64/smp.c index a722f96..7651cc4 100644 --- a/kernel/amd64/smp.c +++ b/kernel/amd64/smp.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +42,11 @@ struct cpu* cpu_get (void) { } void cpu_request_sched (struct cpu* cpu) { + if (cpu == thiscpu) { + proc_sched (); + return; + } + struct limine_mp_response* mp = limine_mp_request.response; for (size_t i = 0; i < mp->cpu_count; i++) { diff --git a/kernel/amd64/smp.h b/kernel/amd64/smp.h index 4fd5102..a8c7ac8 100644 --- a/kernel/amd64/smp.h +++ b/kernel/amd64/smp.h @@ -2,6 +2,7 @@ #define _KERNEL_AMD64_SMP_H #include +#include #include #include #include @@ -20,6 +21,7 @@ struct cpu { volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16); volatile struct gdt_extended gdt ALIGNED (16); volatile struct tss tss; + struct saved_regs regs; uintptr_t lapic_mmio_base; uint64_t lapic_ticks; diff --git a/kernel/amd64/syscall.c b/kernel/amd64/syscall.c index e604b72..dd7da56 100644 --- a/kernel/amd64/syscall.c +++ b/kernel/amd64/syscall.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -13,10 +14,16 @@ extern void amd64_syscall_entry (void); int amd64_syscall_dispatch (void* stack_ptr) { + spin_lock_ctx_t ctxcpu; + amd64_load_kernel_cr3 (); struct saved_regs* regs = stack_ptr; + spin_lock (&thiscpu->lock, &ctxcpu); + memcpy (&thiscpu->regs, regs, sizeof (struct saved_regs)); + spin_unlock (&thiscpu->lock, &ctxcpu); + int syscall_num = regs->rax; syscall_handler_func_t func = syscall_find_handler (syscall_num); diff --git a/kernel/irq/irq.c b/kernel/irq/irq.c index e3edb18..4fb3bbd 100644 --- a/kernel/irq/irq.c +++ b/kernel/irq/irq.c @@ -14,7 +14,7 @@ struct irq* irq_table[0x100]; static rw_spin_lock_t irqs_lock; -bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) { +bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) { spin_lock_ctx_t ctxiqa; struct irq* irq = malloc (sizeof (*irq)); @@ -25,7 +25,6 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3 irq->func = func; irq->arg = arg; irq->irq_num = irq_num; - irq->flags = flags; rw_spin_write_lock (&irqs_lock, &ctxiqa); irq_table[irq_num] = irq; diff --git a/kernel/irq/irq.h b/kernel/irq/irq.h index 145b56d..418fba2 100644 --- a/kernel/irq/irq.h +++ b/kernel/irq/irq.h @@ -4,9 +4,6 @@ #include #include -#define IRQ_INTERRUPT_SAFE (1 << 0) -#define IRQ_INTERRUPT_UNSAFE (1 << 1) - typedef void (*irq_func_t) (void* arg, void* regs); struct irq { @@ -15,10 +12,9 @@ struct irq { irq_func_t func; void* arg; uint32_t irq_num; - uint32_t flags; }; -bool irq_attach (irq_func_t, void* arg, uint32_t irq_num, uint32_t flags); +bool irq_attach (irq_func_t, void* arg, uint32_t irq_num); struct irq* irq_find (uint32_t irq_num); #endif // _KERNEL_IRQ_IRQ_H diff --git a/kernel/proc/proc.c b/kernel/proc/proc.c index d3b8063..f58b480 100644 --- a/kernel/proc/proc.c +++ b/kernel/proc/proc.c @@ -289,7 +289,7 @@ static void proc_reap (void) { } } -void proc_sched (void* regs) { +void proc_sched (void) { spin_lock_ctx_t ctxcpu, ctxpr; int s_cycles = atomic_fetch_add (&sched_cycles, 1); @@ -303,15 +303,13 @@ void proc_sched (void* regs) { spin_lock (&cpu->lock, &ctxcpu); struct proc* prev = cpu->proc_current; + next = proc_find_sched (cpu); if (prev != NULL) { - spin_lock (&prev->lock, &ctxpr); - memcpy (&prev->pdata.regs, regs, sizeof (struct saved_regs)); + memcpy (&prev->pdata.regs, &cpu->regs, sizeof (struct saved_regs)); spin_unlock (&prev->lock, &ctxpr); } - next = proc_find_sched (cpu); - if (next) { cpu->proc_current = next; spin_unlock (&cpu->lock, &ctxcpu); @@ -348,7 +346,7 @@ void proc_kill (struct proc* proc) { static void proc_irq_sched (void* arg, void* regs) { (void)arg; - proc_sched (regs); + proc_sched (); } static void proc_kpproc_init (void) { @@ -405,8 +403,8 @@ static void proc_kpproc_init (void) { void proc_init (void) { #if defined(__x86_64__) - irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_SAFE); - irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED, IRQ_INTERRUPT_SAFE); + irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER); + irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED); #endif proc_kpproc_init (); diff --git a/kernel/proc/proc.h b/kernel/proc/proc.h index ca0648f..c21fa82 100644 --- a/kernel/proc/proc.h +++ b/kernel/proc/proc.h @@ -60,7 +60,7 @@ struct proc { struct proc_resources* resources; }; -void proc_sched (void* regs); +void proc_sched (void); void proc_kill (struct proc* proc); bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages, uint32_t flags); diff --git a/kernel/syscall/syscall.c b/kernel/syscall/syscall.c index 4afc457..766a370 100644 --- a/kernel/syscall/syscall.c +++ b/kernel/syscall/syscall.c @@ -179,7 +179,7 @@ DEFINE_SYSCALL (sys_clone) { /* int sched (void) */ DEFINE_SYSCALL (sys_sched) { - proc_sched (regs); + proc_sched (); return ST_OK; }