Files
mop3/kernel/amd64/smp.c
kamkow1 c191ac0a50
All checks were successful
Build ISO image / build-and-deploy (push) Successful in 1m30s
Build documentation / build-and-deploy (push) Successful in 48s
Use intr_enable() and intr_disable() to perform cli/sti
2026-04-26 23:06:12 +02:00

146 lines
3.0 KiB
C

#include <amd64/apic.h>
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/intr_defs.h>
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <amd64/sse.h>
#include <fs/vfs.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <proc/proc.h>
#include <proc/reschedule.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/intr.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/syscall.h>
/// The CPUs
static struct cpu cpus[CPUS_MAX];
static atomic_int cpu_counter;
static atomic_int cpu_id_counter = 0;
/// Allocate a CPU structure
struct cpu* cpu_make(uint64_t lapic_id, uint64_t acpi_id) {
int id = atomic_fetch_add(&cpu_id_counter, 1);
struct cpu* cpu = &cpus[id];
memset(cpu, 0, sizeof(*cpu));
cpu->lock = SPIN_LOCK_INIT;
cpu->id = id;
cpu->acpi_id = acpi_id;
cpu->lapic_id = lapic_id;
wrmsr(MSR_GS_BASE, (uint64_t)cpu);
return cpu;
}
struct cpu* cpu_get(void) {
struct cpu* ptr = (struct cpu*)rdmsr(MSR_GS_BASE);
return ptr;
}
void cpu_request_sched(struct cpu* cpu, bool user) {
if (cpu == thiscpu) {
proc_sched(user);
return;
}
lapic_ipi(cpu->lapic_id, INTR_CPU_REQUEST_SCHED);
}
struct cpu* cpu_find_lightest(void) {
uint64_t fc;
struct limine_mp_response* mp = limine_mp_request.response;
int start = thiscpu->id;
struct cpu* best_cpu = &cpus[start];
spin_lock(&best_cpu->lock, &fc);
int best_load = best_cpu->proc_run_q_count;
spin_unlock(&best_cpu->lock, fc);
for (int i = 1; i < (int)mp->cpu_count; i++) {
int idx = (start + i) % mp->cpu_count;
struct cpu* cpu = &cpus[idx];
spin_lock(&cpu->lock, &fc);
int l = cpu->proc_run_q_count;
spin_unlock(&cpu->lock, fc);
if (l < best_load) {
best_load = l;
best_cpu = cpu;
}
if (best_load == 0)
break;
}
return best_cpu;
}
/// Bootstrap code for non-BSP CPUs
static void smp_bootstrap(struct limine_mp_info* mp_info) {
uint64_t fc;
load_kernel_cr3();
struct cpu* cpu = cpu_make(mp_info->lapic_id, mp_info->processor_id);
gdt_init(cpu);
idt_load();
syscall_init();
sse_enable();
lapic_init(1000);
DEBUG("CPU %u is online!\n", thiscpu->id);
cpu->kproc = kproc_create();
atomic_fetch_sub(&cpu_counter, 1);
intr_enable();
struct reschedule_ctx rctx;
memset(&rctx, 0, sizeof(rctx));
struct proc* spin_proc = proc_from_file(thiscpu->kproc, "sys", "/spin", &rctx);
proc_register(spin_proc, thiscpu, &rctx);
spin_lock(&spin_proc->cpu->lock, &fc);
do_sched(spin_proc, &spin_proc->cpu->lock);
for (;;)
;
}
/// Initialize SMP subsystem for AMD64. Start AP CPUs
void smp_init(void) {
struct limine_mp_response* mp = limine_mp_request.response;
cpu_counter = mp->cpu_count - 1;
for (size_t i = 0; i < mp->cpu_count; i++) {
if (mp->cpus[i]->processor_id != thiscpu->acpi_id) {
mp->cpus[i]->goto_address = &smp_bootstrap;
}
}
DEBUG("Waiting for other CPUs:\n");
while (atomic_load(&cpu_counter) > 0)
;
DEBUG("All CPUs are up!\n");
}