Disable kernel preemption, fix requesting rescheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
This commit is contained in:
@@ -270,5 +270,5 @@ void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) {
|
||||
}
|
||||
|
||||
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
|
||||
amd64_lapic_write (LAPIC_ICR, vec);
|
||||
amd64_lapic_write (LAPIC_ICR, vec | (1 << 14));
|
||||
}
|
||||
|
||||
@@ -165,10 +165,15 @@ static void amd64_intr_exception (struct saved_regs* regs) {
|
||||
|
||||
/* Handle incoming interrupt, dispatch IRQ handlers. */
|
||||
void amd64_intr_handler (void* stack_ptr) {
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
amd64_load_kernel_cr3 ();
|
||||
|
||||
struct saved_regs* regs = stack_ptr;
|
||||
|
||||
spin_lock (&thiscpu->lock, &ctxcpu);
|
||||
memcpy (&thiscpu->regs, regs, sizeof (struct saved_regs));
|
||||
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||
|
||||
if (regs->trap <= 31) {
|
||||
amd64_intr_exception (regs);
|
||||
} else {
|
||||
@@ -177,13 +182,7 @@ void amd64_intr_handler (void* stack_ptr) {
|
||||
struct irq* irq = irq_find (regs->trap);
|
||||
|
||||
if (irq != NULL) {
|
||||
if ((irq->flags & IRQ_INTERRUPT_SAFE))
|
||||
__asm__ volatile ("sti");
|
||||
|
||||
irq->func (irq->arg, stack_ptr);
|
||||
|
||||
if ((irq->flags & IRQ_INTERRUPT_SAFE))
|
||||
__asm__ volatile ("cli");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -418,9 +418,7 @@ static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
|
||||
|
||||
/* Continue initializing memory management subsystem for AMD64 after the essential parts were
|
||||
* initialized */
|
||||
void mm_init2 (void) {
|
||||
irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE);
|
||||
}
|
||||
void mm_init2 (void) { irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN); }
|
||||
|
||||
/* Initialize essentials for the AMD64 memory management subsystem */
|
||||
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <libk/string.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <proc/proc.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/smp.h>
|
||||
@@ -41,6 +42,11 @@ struct cpu* cpu_get (void) {
|
||||
}
|
||||
|
||||
void cpu_request_sched (struct cpu* cpu) {
|
||||
if (cpu == thiscpu) {
|
||||
proc_sched ();
|
||||
return;
|
||||
}
|
||||
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
|
||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#define _KERNEL_AMD64_SMP_H
|
||||
|
||||
#include <amd64/gdt.h>
|
||||
#include <amd64/intr.h>
|
||||
#include <amd64/tss.h>
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/rbtree.h>
|
||||
@@ -20,6 +21,7 @@ struct cpu {
|
||||
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
|
||||
volatile struct gdt_extended gdt ALIGNED (16);
|
||||
volatile struct tss tss;
|
||||
struct saved_regs regs;
|
||||
|
||||
uintptr_t lapic_mmio_base;
|
||||
uint64_t lapic_ticks;
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#include <amd64/mm.h>
|
||||
#include <amd64/msr-index.h>
|
||||
#include <amd64/msr.h>
|
||||
#include <libk/string.h>
|
||||
#include <m/status.h>
|
||||
#include <m/syscall_defs.h>
|
||||
#include <proc/proc.h>
|
||||
@@ -13,10 +14,16 @@
|
||||
extern void amd64_syscall_entry (void);
|
||||
|
||||
int amd64_syscall_dispatch (void* stack_ptr) {
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
|
||||
amd64_load_kernel_cr3 ();
|
||||
|
||||
struct saved_regs* regs = stack_ptr;
|
||||
|
||||
spin_lock (&thiscpu->lock, &ctxcpu);
|
||||
memcpy (&thiscpu->regs, regs, sizeof (struct saved_regs));
|
||||
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||
|
||||
int syscall_num = regs->rax;
|
||||
syscall_handler_func_t func = syscall_find_handler (syscall_num);
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ struct irq* irq_table[0x100];
|
||||
|
||||
static rw_spin_lock_t irqs_lock;
|
||||
|
||||
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) {
|
||||
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
|
||||
spin_lock_ctx_t ctxiqa;
|
||||
|
||||
struct irq* irq = malloc (sizeof (*irq));
|
||||
@@ -25,7 +25,6 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
|
||||
irq->func = func;
|
||||
irq->arg = arg;
|
||||
irq->irq_num = irq_num;
|
||||
irq->flags = flags;
|
||||
|
||||
rw_spin_write_lock (&irqs_lock, &ctxiqa);
|
||||
irq_table[irq_num] = irq;
|
||||
|
||||
@@ -4,9 +4,6 @@
|
||||
#include <libk/list.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
#define IRQ_INTERRUPT_SAFE (1 << 0)
|
||||
#define IRQ_INTERRUPT_UNSAFE (1 << 1)
|
||||
|
||||
typedef void (*irq_func_t) (void* arg, void* regs);
|
||||
|
||||
struct irq {
|
||||
@@ -15,10 +12,9 @@ struct irq {
|
||||
irq_func_t func;
|
||||
void* arg;
|
||||
uint32_t irq_num;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num, uint32_t flags);
|
||||
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num);
|
||||
struct irq* irq_find (uint32_t irq_num);
|
||||
|
||||
#endif // _KERNEL_IRQ_IRQ_H
|
||||
|
||||
@@ -289,7 +289,7 @@ static void proc_reap (void) {
|
||||
}
|
||||
}
|
||||
|
||||
void proc_sched (void* regs) {
|
||||
void proc_sched (void) {
|
||||
spin_lock_ctx_t ctxcpu, ctxpr;
|
||||
|
||||
int s_cycles = atomic_fetch_add (&sched_cycles, 1);
|
||||
@@ -303,15 +303,13 @@ void proc_sched (void* regs) {
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
|
||||
struct proc* prev = cpu->proc_current;
|
||||
next = proc_find_sched (cpu);
|
||||
|
||||
if (prev != NULL) {
|
||||
spin_lock (&prev->lock, &ctxpr);
|
||||
memcpy (&prev->pdata.regs, regs, sizeof (struct saved_regs));
|
||||
memcpy (&prev->pdata.regs, &cpu->regs, sizeof (struct saved_regs));
|
||||
spin_unlock (&prev->lock, &ctxpr);
|
||||
}
|
||||
|
||||
next = proc_find_sched (cpu);
|
||||
|
||||
if (next) {
|
||||
cpu->proc_current = next;
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
@@ -348,7 +346,7 @@ void proc_kill (struct proc* proc) {
|
||||
|
||||
static void proc_irq_sched (void* arg, void* regs) {
|
||||
(void)arg;
|
||||
proc_sched (regs);
|
||||
proc_sched ();
|
||||
}
|
||||
|
||||
static void proc_kpproc_init (void) {
|
||||
@@ -405,8 +403,8 @@ static void proc_kpproc_init (void) {
|
||||
|
||||
void proc_init (void) {
|
||||
#if defined(__x86_64__)
|
||||
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_SAFE);
|
||||
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED, IRQ_INTERRUPT_SAFE);
|
||||
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER);
|
||||
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED);
|
||||
#endif
|
||||
|
||||
proc_kpproc_init ();
|
||||
|
||||
@@ -60,7 +60,7 @@ struct proc {
|
||||
struct proc_resources* resources;
|
||||
};
|
||||
|
||||
void proc_sched (void* regs);
|
||||
void proc_sched (void);
|
||||
void proc_kill (struct proc* proc);
|
||||
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
|
||||
uint32_t flags);
|
||||
|
||||
@@ -179,7 +179,7 @@ DEFINE_SYSCALL (sys_clone) {
|
||||
|
||||
/* int sched (void) */
|
||||
DEFINE_SYSCALL (sys_sched) {
|
||||
proc_sched (regs);
|
||||
proc_sched ();
|
||||
return ST_OK;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user