All checks were successful
Build documentation / build-and-deploy (push) Successful in 3m35s
133 lines
3.0 KiB
C
133 lines
3.0 KiB
C
#include <amd64/apic.h>
|
|
#include <amd64/gdt.h>
|
|
#include <amd64/intr.h>
|
|
#include <amd64/intr_defs.h>
|
|
#include <amd64/mm.h>
|
|
#include <amd64/msr-index.h>
|
|
#include <amd64/msr.h>
|
|
#include <fs/vfs.h>
|
|
#include <libk/std.h>
|
|
#include <libk/string.h>
|
|
#include <limine/requests.h>
|
|
#include <mm/liballoc.h>
|
|
#include <proc/proc.h>
|
|
#include <proc/reschedule.h>
|
|
#include <sync/spin_lock.h>
|
|
#include <sys/debug.h>
|
|
#include <sys/sched.h>
|
|
#include <sys/smp.h>
|
|
#include <sys/syscall.h>
|
|
|
|
/// The CPUs
|
|
static struct cpu cpus[CPUS_MAX];
|
|
|
|
static atomic_int last_cpu_index = 0;
|
|
static atomic_int cpu_counter;
|
|
static atomic_int cpu_id_counter = 0;
|
|
|
|
/// Allocate a CPU structure
|
|
struct cpu* cpu_make (uint64_t lapic_id, uint64_t acpi_id) {
|
|
int id = atomic_fetch_add (&cpu_id_counter, 1);
|
|
struct cpu* cpu = &cpus[id];
|
|
|
|
memset (cpu, 0, sizeof (*cpu));
|
|
cpu->lock = SPIN_LOCK_INIT;
|
|
cpu->id = id;
|
|
cpu->acpi_id = acpi_id;
|
|
cpu->lapic_id = lapic_id;
|
|
|
|
wrmsr (MSR_GS_BASE, (uint64_t)cpu);
|
|
|
|
return cpu;
|
|
}
|
|
|
|
struct cpu* cpu_get (void) {
|
|
struct cpu* ptr = (struct cpu*)rdmsr (MSR_GS_BASE);
|
|
return ptr;
|
|
}
|
|
|
|
void cpu_request_sched (struct cpu* cpu) {
|
|
if (cpu == thiscpu) {
|
|
proc_sched ();
|
|
return;
|
|
}
|
|
|
|
lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED);
|
|
}
|
|
|
|
struct cpu* cpu_find_lightest (void) {
|
|
struct limine_mp_response* mp = limine_mp_request.response;
|
|
|
|
int start = atomic_fetch_add (&last_cpu_index, 1) % mp->cpu_count;
|
|
|
|
struct cpu* best_cpu = &cpus[start];
|
|
|
|
spin_lock (&best_cpu->lock);
|
|
int best_load = best_cpu->proc_run_q_count;
|
|
spin_unlock (&best_cpu->lock);
|
|
|
|
for (int i = 1; i < (int)mp->cpu_count; i++) {
|
|
int idx = (start + i) % mp->cpu_count;
|
|
|
|
struct cpu* cpu = &cpus[idx];
|
|
|
|
spin_lock (&cpu->lock);
|
|
int l = cpu->proc_run_q_count;
|
|
spin_unlock (&cpu->lock);
|
|
|
|
if (l < best_load) {
|
|
best_load = l;
|
|
best_cpu = cpu;
|
|
}
|
|
}
|
|
|
|
return best_cpu;
|
|
}
|
|
|
|
/// Bootstrap code for non-BSP CPUs
|
|
static void smp_bootstrap (struct limine_mp_info* mp_info) {
|
|
load_kernel_cr3 ();
|
|
|
|
struct cpu* cpu = cpu_make (mp_info->lapic_id, mp_info->processor_id);
|
|
|
|
gdt_init (cpu);
|
|
idt_load ();
|
|
syscall_init ();
|
|
|
|
lapic_init (1000);
|
|
|
|
DEBUG ("CPU %u is online!\n", thiscpu->id);
|
|
|
|
atomic_fetch_sub (&cpu_counter, 1);
|
|
|
|
struct reschedule_ctx rctx = {.cpu = NULL, .reschedule = false};
|
|
|
|
struct proc* spin_proc = proc_from_file (VFS_KERNEL, "RD", "/spin", &rctx);
|
|
proc_register (spin_proc, thiscpu, NULL);
|
|
|
|
spin_lock (&spin_proc->cpu->lock);
|
|
do_sched (spin_proc, &spin_proc->cpu->lock);
|
|
}
|
|
|
|
/// Initialize SMP subsystem for AMD64. Start AP CPUs
|
|
void smp_init (void) {
|
|
lapic_init (1000);
|
|
|
|
struct limine_mp_response* mp = limine_mp_request.response;
|
|
|
|
cpu_counter = mp->cpu_count - 1;
|
|
|
|
for (size_t i = 0; i < mp->cpu_count; i++) {
|
|
if (mp->cpus[i]->processor_id != thiscpu->acpi_id) {
|
|
mp->cpus[i]->goto_address = &smp_bootstrap;
|
|
}
|
|
}
|
|
|
|
DEBUG ("Waiting for other CPUs:\n");
|
|
|
|
while (atomic_load (&cpu_counter) > 0)
|
|
debugprintf (".\n");
|
|
|
|
DEBUG ("All CPUs are up!\n");
|
|
}
|