Redesign reschedule points, allow one operation to reschedule many cpus at once
All checks were successful
Build documentation / build-and-deploy (push) Successful in 2m12s

This commit is contained in:
2026-02-18 23:16:03 +01:00
parent ae0a6024da
commit f103bdd739
39 changed files with 376 additions and 223 deletions

View File

@@ -16,10 +16,12 @@
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <proc/reschedule.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
#include <sys/spin.h>
#include <sys/spin_lock.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <uacpi/uacpi.h>
@@ -51,14 +53,9 @@ void bootmain (void) {
devices_init ();
vfs_init ();
bool reschedule = false;
struct device* ramdisk_device = device_find (RAMDISK_DEVICE);
struct device_op_ctx op_ctx = {
.proc = NULL,
.reschedule_cpu = NULL,
.reschedule = &reschedule,
};
int ret = vfs_create_mountpoint ("ramdisk", VFS_TARFS, ramdisk_device, &op_ctx);
struct reschedule_ctx rctx = {.entries = NULL, .lock = SPIN_LOCK_INIT};
int ret = vfs_create_mountpoint ("ramdisk", VFS_TARFS, ramdisk_device, NULL, &rctx);
if (ret < 0) {
DEBUG ("could not mount ramdisk! (%d)\n", ret);

View File

@@ -26,9 +26,13 @@ static bool amd64_debug_serial_tx_empty (void) {
/* Write a single character to serial */
static void amd64_debug_serial_write (char x) {
spin_lock (&serial_lock);
while (!amd64_debug_serial_tx_empty ())
;
amd64_io_outb (PORT_COM1, (uint8_t)x);
spin_unlock (&serial_lock);
}
/*
@@ -50,14 +54,10 @@ void debugprintf (const char* fmt, ...) {
const char* p = buffer;
spin_lock (&serial_lock);
while (*p) {
amd64_debug_serial_write (*p);
p++;
}
spin_unlock (&serial_lock);
}
/* Initialize serial */

View File

@@ -8,6 +8,8 @@
#include <libk/std.h>
#include <libk/string.h>
#include <m/syscall_defs.h>
#include <mm/liballoc.h>
#include <proc/reschedule.h>
#include <sys/debug.h>
#include <sys/smp.h>
#include <sys/spin.h>
@@ -155,9 +157,34 @@ static void amd64_intr_exception (struct saved_regs* regs) {
regs->rbx);
if (regs->cs == (GDT_UCODE | 0x03)) {
struct cpu* reschedule_cpu;
if (proc_kill (thiscpu->proc_current, &reschedule_cpu) == PROC_NEED_RESCHEDULE)
cpu_request_sched (reschedule_cpu);
struct reschedule_ctx rctx = {.entries = NULL, .lock = SPIN_LOCK_INIT};
proc_kill (thiscpu->proc_current, &rctx);
bool reschedule_thiscpu = false;
spin_lock (&rctx.lock);
struct list_node_link *node, *tmp;
list_foreach (rctx.entries, node, tmp) {
struct reschedule_entry* entry = list_entry (node, struct reschedule_entry, link);
struct cpu* cpu = entry->cpu;
if (cpu != thiscpu) {
cpu_request_sched (cpu);
} else {
reschedule_thiscpu = true;
}
list_remove (rctx.entries, &entry->link);
free (entry);
}
spin_unlock (&rctx.lock);
if (reschedule_thiscpu) {
proc_sched ();
}
} else {
spin ();
}
@@ -185,12 +212,35 @@ void amd64_intr_handler (void* stack_ptr) {
struct irq* irq = irq_find (regs->trap);
if (irq != NULL) {
struct cpu* reschedule_cpu = NULL;
bool reschedule = irq->func (&reschedule_cpu, irq->arg, stack_ptr);
if (irq == NULL)
return;
if (reschedule)
cpu_request_sched (reschedule_cpu);
struct reschedule_ctx rctx = {.entries = NULL, .lock = SPIN_LOCK_INIT};
irq->func (irq->arg, stack_ptr, &rctx);
bool reschedule_thiscpu = false;
spin_lock (&rctx.lock);
struct list_node_link *node, *tmp;
list_foreach (rctx.entries, node, tmp) {
struct reschedule_entry* entry = list_entry (node, struct reschedule_entry, link);
struct cpu* cpu = entry->cpu;
if (cpu != thiscpu) {
cpu_request_sched (cpu);
} else {
reschedule_thiscpu = true;
}
list_remove (rctx.entries, &entry->link);
free (entry);
}
spin_unlock (&rctx.lock);
if (reschedule_thiscpu) {
proc_sched ();
}
}
}

View File

@@ -99,14 +99,14 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t ent
return proc;
}
void proc_cleanup (struct proc* proc) {
void proc_cleanup (struct proc* proc, struct reschedule_ctx* rctx) {
proc_sqs_cleanup (proc);
proc_mutexes_cleanup (proc);
proc_mutexes_cleanup (proc, rctx);
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
procgroup_detach (proc->procgroup, proc);
procgroup_detach (proc->procgroup, proc, rctx);
/* clean the process */
free (proc);

View File

@@ -2,6 +2,7 @@
#include <amd64/msr.h>
#include <amd64/sched.h>
#include <libk/std.h>
#include <libk/string.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/mm.h>
@@ -14,8 +15,12 @@ void do_sched (struct proc* proc, spin_lock_t* cpu_lock) {
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
void* cr3 = (void*)proc->procgroup->pd.cr3_paddr;
struct saved_regs regs;
memcpy (&regs, &proc->pdata.regs, sizeof (regs));
spin_unlock (&proc->lock);
spin_unlock (cpu_lock);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr);
amd64_do_sched ((void*)&regs, cr3);
}

View File

@@ -9,6 +9,7 @@
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <proc/reschedule.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/sched.h>
@@ -88,8 +89,7 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
atomic_fetch_sub (&cpu_counter, 1);
struct proc* spin_proc = proc_from_file (NULL, "ramdisk", "/spin");
struct cpu* spin_cpu = thiscpu;
proc_register (spin_proc, &spin_cpu);
proc_register (spin_proc, thiscpu, NULL);
spin_lock (&spin_proc->cpu->lock);
do_sched (spin_proc, &spin_proc->cpu->lock);

View File

@@ -3,10 +3,13 @@
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/list.h>
#include <libk/string.h>
#include <m/status.h>
#include <m/syscall_defs.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <proc/reschedule.h>
#include <sys/debug.h>
#include <sys/smp.h>
#include <syscall/syscall.h>
@@ -34,11 +37,10 @@ uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
return -ST_SYSCALL_NOT_FOUND;
}
bool reschedule = false;
struct reschedule_ctx rctx = {.entries = NULL, .lock = SPIN_LOCK_INIT};
struct cpu* reschedule_cpu = NULL;
uintptr_t r = func (caller, regs, &reschedule, &reschedule_cpu, regs->rdi, regs->rsi, regs->rdx,
regs->r10, regs->r8, regs->r9);
uintptr_t r =
func (caller, regs, &rctx, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
caller = proc_find_pid (caller_pid);
@@ -48,8 +50,30 @@ uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
spin_unlock (&caller->lock);
}
if (reschedule)
cpu_request_sched (reschedule_cpu);
bool reschedule_thiscpu = false;
spin_lock (&rctx.lock);
struct list_node_link *node, *tmp;
list_foreach (rctx.entries, node, tmp) {
struct reschedule_entry* entry = list_entry (node, struct reschedule_entry, link);
struct cpu* cpu = entry->cpu;
if (cpu != thiscpu) {
cpu_request_sched (cpu);
} else {
reschedule_thiscpu = true;
}
list_remove (rctx.entries, &entry->link);
free (entry);
}
spin_unlock (&rctx.lock);
if (reschedule_thiscpu) {
proc_sched ();
}
return r;
}