Fix CPU load balancer bugs, scheduling points support for remote CPUs
All checks were successful
Build documentation / build-and-deploy (push) Successful in 28s

This commit is contained in:
2026-02-05 23:44:32 +01:00
parent 5283787a80
commit 5fe9d0a158
19 changed files with 129 additions and 79 deletions

View File

@@ -32,7 +32,7 @@ ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
void bootmain (void) {
struct limine_mp_response* mp = limine_mp_request.response;
struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id);
struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id, 0);
amd64_init (bsp_cpu, false);
syscall_init ();

View File

@@ -157,7 +157,9 @@ static void amd64_intr_exception (struct saved_regs* regs) {
regs->rbx);
if (regs->cs == (GDT_UCODE | 0x03)) {
proc_kill (thiscpu->proc_current);
struct cpu* reschedule_cpu;
if (proc_kill (thiscpu->proc_current, &reschedule_cpu) == PROC_NEED_RESCHEDULE)
cpu_request_sched (reschedule_cpu);
} else {
spin ();
}

View File

@@ -15,22 +15,19 @@
#include <sys/smp.h>
#include <sys/syscall.h>
/// Cpu ID counter
static atomic_uint cpu_counter = 0;
/// The CPUs
static struct cpu cpus[CPUS_MAX];
static atomic_int cpu_init_count;
static atomic_int last_cpu_index = 0;
static atomic_int cpu_counter;
/// Allocate a CPU structure
struct cpu* cpu_make (uint64_t lapic_id) {
int id = atomic_fetch_add (&cpu_counter, 1);
struct cpu* cpu = &cpus[id];
struct cpu* cpu_make (uint64_t lapic_id, uint64_t cpu_id) {
struct cpu* cpu = &cpus[cpu_id];
memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT;
cpu->id = id;
cpu->id = cpu_id;
cpu->lapic_id = lapic_id;
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
@@ -53,27 +50,30 @@ void cpu_request_sched (struct cpu* cpu) {
}
struct cpu* cpu_find_lightest (void) {
struct cpu* cpu = &cpus[0];
struct limine_mp_response* mp = limine_mp_request.response;
int load = atomic_load (&cpu->proc_run_q_count);
int start = atomic_fetch_add (&last_cpu_index, 1) % mp->cpu_count;
struct cpu* best_cpu = &cpus[start];
int best_load = atomic_load (&best_cpu->proc_run_q_count);
for (unsigned int i = 1; i < cpu_counter; i++) {
struct cpu* new_cpu = &cpus[i];
int new_load = atomic_load (&new_cpu->proc_run_q_count);
if (new_load < load) {
load = new_load;
cpu = new_cpu;
for (int i = 1; i < (int)mp->cpu_count; i++) {
int idx = (start + i) % mp->cpu_count;
struct cpu* cpu = &cpus[idx];
int l = atomic_load (&cpu->proc_run_q_count);
if (l < best_load) {
best_load = l;
best_cpu = cpu;
}
}
return cpu;
return best_cpu;
}
/// Bootstrap code for non-BSP CPUs
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_load_kernel_cr3 ();
struct cpu* cpu = cpu_make (mp_info->lapic_id);
struct cpu* cpu = cpu_make (mp_info->lapic_id, mp_info->processor_id);
amd64_init (cpu, true); /* gdt + idt */
syscall_init ();
@@ -82,10 +82,11 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
DEBUG ("CPU %u is online!\n", thiscpu->id);
atomic_fetch_sub (&cpu_init_count, 1);
atomic_fetch_sub (&cpu_counter, 1);
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
proc_register (spin_proc, thiscpu);
struct cpu* spin_cpu = thiscpu;
proc_register (spin_proc, &spin_cpu);
spin_lock_ctx_t ctxcpu;
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
@@ -98,7 +99,7 @@ void smp_init (void) {
struct limine_mp_response* mp = limine_mp_request.response;
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
cpu_counter = mp->cpu_count - 1;
for (size_t i = 0; i < mp->cpu_count; i++) {
if (mp->cpus[i]->lapic_id != thiscpu->lapic_id) {
@@ -106,8 +107,6 @@ void smp_init (void) {
}
}
while (atomic_load (&cpu_init_count) > 0)
while (atomic_load (&cpu_counter) > 0)
;
DEBUG ("All CPUs are online\n");
}

View File

@@ -34,7 +34,7 @@ struct cpu {
atomic_int proc_run_q_count;
};
struct cpu* cpu_make (uint64_t lapic_id);
struct cpu* cpu_make (uint64_t lapic_id, uint64_t cpu_id);
struct cpu* cpu_get (void);
void cpu_request_sched (struct cpu* cpu);
struct cpu* cpu_find_lightest (void);

View File

@@ -21,6 +21,7 @@ uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
spin_lock (&thiscpu->lock, &ctxcpu);
struct proc* caller = thiscpu->proc_current;
int caller_pid = caller->pid;
spin_lock (&caller->lock, &ctxpr);
memcpy (&caller->pdata.regs, regs, sizeof (struct saved_regs));
@@ -35,7 +36,24 @@ uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
return -ST_SYSCALL_NOT_FOUND;
}
return func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
bool reschedule = false;
struct cpu* reschedule_cpu = NULL;
uintptr_t r = func (caller, regs, &reschedule, &reschedule_cpu, regs->rdi, regs->rsi, regs->rdx,
regs->r10, regs->r8, regs->r9);
caller = proc_find_pid (caller_pid);
if (caller != NULL) {
spin_lock (&caller->lock, &ctxpr);
caller->pdata.regs.rax = r;
spin_unlock (&caller->lock, &ctxpr);
}
if (reschedule)
cpu_request_sched (reschedule_cpu);
return r;
}
void syscall_init (void) {