Files
mop3/kernel/amd64/smp.c
kamkow1 8650010992
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
Fix user CPU context saving
2026-01-25 17:39:34 +01:00

115 lines
2.6 KiB
C

#include <amd64/apic.h>
#include <amd64/init.h>
#include <amd64/intr_defs.h>
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/syscall.h>
/// Cpu ID counter
static atomic_uint cpu_counter = 0;
/// The CPUs
static struct cpu cpus[CPUS_MAX];
static atomic_int cpu_init_count;
/// Allocate a CPU structure
struct cpu* cpu_make (uint64_t lapic_id) {
int id = atomic_fetch_add (&cpu_counter, 1);
struct cpu* cpu = &cpus[id];
memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT;
cpu->id = id;
cpu->lapic_id = lapic_id;
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
return cpu;
}
struct cpu* cpu_get (void) {
struct cpu* ptr = (struct cpu*)amd64_rdmsr (MSR_GS_BASE);
return ptr;
}
void cpu_request_sched (struct cpu* cpu) {
if (cpu == thiscpu) {
proc_sched ();
return;
}
amd64_lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED);
}
struct cpu* cpu_find_lightest (void) {
struct cpu* cpu = &cpus[0];
int load = atomic_load (&cpu->proc_run_q_count);
for (unsigned int i = 1; i < cpu_counter; i++) {
struct cpu* new_cpu = &cpus[i];
int new_load = atomic_load (&new_cpu->proc_run_q_count);
if (new_load < load) {
load = new_load;
cpu = new_cpu;
}
}
return cpu;
}
/// Bootstrap code for non-BSP CPUs
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_load_kernel_cr3 ();
struct cpu* cpu = cpu_make (mp_info->lapic_id);
amd64_init (cpu, true); /* gdt + idt */
syscall_init ();
amd64_lapic_init (1000);
DEBUG ("CPU %u is online!\n", thiscpu->id);
atomic_fetch_sub (&cpu_init_count, 1);
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
proc_register (spin_proc, thiscpu);
spin_lock_ctx_t ctxcpu;
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
}
/// Initialize SMP subsystem for AMD64. Start AP CPUs
void smp_init (void) {
amd64_lapic_init (1000);
struct limine_mp_response* mp = limine_mp_request.response;
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
for (size_t i = 0; i < mp->cpu_count; i++) {
if (mp->cpus[i]->lapic_id != thiscpu->lapic_id) {
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
}
}
while (atomic_load (&cpu_init_count) > 0)
;
DEBUG ("All CPUs are online\n");
}