multi-cpu scheduling WIP
This commit is contained in:
@@ -82,8 +82,6 @@ static struct ioapic* amd64_ioapic_find (uint32_t irq) {
|
||||
|
||||
for (size_t i = 0; i < ioapic_entries; i++) {
|
||||
ioapic = &ioapics[i];
|
||||
/* uint32_t version = amd64_ioapic_read ((uintptr_t)hhdm->offset +
|
||||
* (uintptr_t)ioapic->table_data.address, 1); */
|
||||
uint32_t version = amd64_ioapic_read (ioapic, 1);
|
||||
uint32_t max = ((version >> 16) & 0xFF);
|
||||
|
||||
@@ -233,7 +231,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
|
||||
static void amd64_lapic_start (uint32_t ticks) {
|
||||
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
|
||||
amd64_lapic_write (LAPIC_TIMICT, ticks);
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17) | (1 << 16));
|
||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <irq/irq.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/limine.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/proc.h>
|
||||
@@ -29,7 +30,9 @@ ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
|
||||
* the necessary platform-dependent subsystems/drivers and jump into the init app.
|
||||
*/
|
||||
void bootmain (void) {
|
||||
struct cpu* bsp_cpu = cpu_make ();
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
|
||||
struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id);
|
||||
|
||||
amd64_init (bsp_cpu, false);
|
||||
syscall_init ();
|
||||
@@ -44,9 +47,9 @@ void bootmain (void) {
|
||||
amd64_ioapic_init ();
|
||||
amd64_hpet_init ();
|
||||
|
||||
smp_init ();
|
||||
|
||||
mm_init2 ();
|
||||
|
||||
smp_init ();
|
||||
|
||||
proc_init ();
|
||||
|
||||
|
||||
@@ -213,15 +213,3 @@ void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
|
||||
|
||||
/* Restore interrupt state */
|
||||
void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }
|
||||
|
||||
/* Map custom IRQ mappings to legacy IRQs */
|
||||
uint32_t amd64_resolve_irq (uint32_t irq) {
|
||||
static const uint32_t mappings[] = {
|
||||
[SCHED_PREEMPT_TIMER] = 0,
|
||||
[TLB_SHOOTDOWN] = 6,
|
||||
[CPU_REQUEST_SCHED] = 3,
|
||||
[CPU_SPURIOUS] = 5,
|
||||
};
|
||||
|
||||
return mappings[irq];
|
||||
}
|
||||
|
||||
@@ -32,7 +32,6 @@ struct saved_regs {
|
||||
} PACKED;
|
||||
|
||||
void amd64_load_idt (void);
|
||||
uint32_t amd64_resolve_irq (uint32_t irq);
|
||||
void amd64_intr_init (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_INTR_H
|
||||
|
||||
@@ -5,12 +5,18 @@
|
||||
#include <proc/proc.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
void do_sched (struct proc* proc) {
|
||||
__asm__ volatile ("cli");
|
||||
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu) {
|
||||
spin_lock_ctx_t ctxpr;
|
||||
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
|
||||
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
|
||||
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (cpu_lock, ctxcpu);
|
||||
|
||||
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd->cr3_paddr);
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <sys/debug.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/sched.h>
|
||||
|
||||
/// Cpu ID counter
|
||||
static atomic_uint cpu_counter = 0;
|
||||
@@ -22,7 +23,7 @@ static struct cpu cpus[CPUS_MAX];
|
||||
static atomic_int cpu_init_count;
|
||||
|
||||
/// Allocate a CPU structure
|
||||
struct cpu* cpu_make (void) {
|
||||
struct cpu* cpu_make (uint64_t lapic_id) {
|
||||
int id = atomic_fetch_add (&cpu_counter, 1);
|
||||
|
||||
struct cpu* cpu = &cpus[id];
|
||||
@@ -30,6 +31,7 @@ struct cpu* cpu_make (void) {
|
||||
memset (cpu, 0, sizeof (*cpu));
|
||||
cpu->lock = SPIN_LOCK_INIT;
|
||||
cpu->id = id;
|
||||
cpu->lapic_id = lapic_id;
|
||||
|
||||
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
|
||||
|
||||
@@ -47,21 +49,31 @@ void cpu_request_sched (struct cpu* cpu) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
amd64_lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
||||
if (cpu->id == i) {
|
||||
amd64_lapic_ipi (mp->cpus[i]->lapic_id, CPU_REQUEST_SCHED);
|
||||
break;
|
||||
struct cpu* cpu_find_lightest (void) {
|
||||
struct cpu* cpu = &cpus[0];
|
||||
|
||||
int load = atomic_load (&cpu->proc_run_q_count);
|
||||
|
||||
for (unsigned int i = 1; i < cpu_counter; i++) {
|
||||
struct cpu* new_cpu = &cpus[i];
|
||||
int new_load = atomic_load (&new_cpu->proc_run_q_count);
|
||||
if (new_load < load) {
|
||||
load = new_load;
|
||||
cpu = new_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
/// Bootstrap code for non-BSP CPUs
|
||||
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
|
||||
amd64_load_kernel_cr3 ();
|
||||
|
||||
struct cpu* cpu = cpu_make ();
|
||||
struct cpu* cpu = cpu_make (mp_info->lapic_id);
|
||||
|
||||
amd64_init (cpu, true); /* gdt + idt */
|
||||
syscall_init ();
|
||||
@@ -70,12 +82,14 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
|
||||
|
||||
DEBUG ("CPU %u is online!\n", thiscpu->id);
|
||||
|
||||
__asm__ volatile ("sti");
|
||||
|
||||
atomic_fetch_sub (&cpu_init_count, 1);
|
||||
|
||||
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
|
||||
proc_register (spin_proc, thiscpu);
|
||||
|
||||
for (;;)
|
||||
;
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
|
||||
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
|
||||
}
|
||||
|
||||
/// Initialize SMP subsystem for AMD64. Start AP CPUs
|
||||
@@ -87,7 +101,7 @@ void smp_init (void) {
|
||||
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
|
||||
|
||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
||||
if (mp->cpus[i]->lapic_id != thiscpu->id) {
|
||||
if (mp->cpus[i]->lapic_id != thiscpu->lapic_id) {
|
||||
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
|
||||
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
|
||||
}
|
||||
|
||||
@@ -25,17 +25,20 @@ struct cpu {
|
||||
|
||||
uintptr_t lapic_mmio_base;
|
||||
uint64_t lapic_ticks;
|
||||
uint64_t lapic_id;
|
||||
uint32_t id;
|
||||
|
||||
spin_lock_t lock;
|
||||
|
||||
struct list_node_link* proc_run_q;
|
||||
struct proc* proc_current;
|
||||
atomic_int proc_run_q_count;
|
||||
};
|
||||
|
||||
struct cpu* cpu_make (void);
|
||||
struct cpu* cpu_make (uint64_t lapic_id);
|
||||
struct cpu* cpu_get (void);
|
||||
void cpu_request_sched (struct cpu* cpu);
|
||||
struct cpu* cpu_find_lightest (void);
|
||||
|
||||
#define thiscpu (cpu_get ())
|
||||
|
||||
|
||||
@@ -15,25 +15,27 @@ extern void amd64_syscall_entry (void);
|
||||
|
||||
int amd64_syscall_dispatch (void* stack_ptr) {
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
|
||||
spin_lock (&thiscpu->lock, &ctxcpu);
|
||||
|
||||
amd64_load_kernel_cr3 ();
|
||||
|
||||
struct saved_regs* regs = stack_ptr;
|
||||
|
||||
spin_lock (&thiscpu->lock, &ctxcpu);
|
||||
memcpy (&thiscpu->regs, regs, sizeof (struct saved_regs));
|
||||
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||
|
||||
int syscall_num = regs->rax;
|
||||
syscall_handler_func_t func = syscall_find_handler (syscall_num);
|
||||
|
||||
if (func == NULL)
|
||||
if (func == NULL) {
|
||||
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||
return -ST_SYSCALL_NOT_FOUND;
|
||||
|
||||
}
|
||||
|
||||
struct proc* caller = thiscpu->proc_current;
|
||||
|
||||
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||
|
||||
int result = func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
.global amd64_syscall_entry
|
||||
amd64_syscall_entry:
|
||||
cli
|
||||
|
||||
movq %rsp, %gs:0
|
||||
movq %gs:8, %rsp
|
||||
|
||||
|
||||
@@ -30,11 +30,6 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
|
||||
irq_table[irq_num] = irq;
|
||||
rw_spin_write_unlock (&irqs_lock, &ctxiqa);
|
||||
|
||||
#if defined(__x86_64__)
|
||||
uint8_t resolution = amd64_resolve_irq (irq_num);
|
||||
amd64_ioapic_route_irq (irq_num, resolution, 0, amd64_lapic_id ());
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
#include <proc/proc.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
bool proc_create_resource_mutex (struct proc_mutex* mutex) {
|
||||
memset (mutex, 0, sizeof (*mutex));
|
||||
@@ -34,6 +36,8 @@ static void proc_mutex_suspend (struct proc* proc, struct proc_suspension_q* sq,
|
||||
proc->suspension_q = sq;
|
||||
|
||||
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
|
||||
|
||||
if (cpu->proc_current == proc)
|
||||
cpu->proc_current = NULL;
|
||||
|
||||
@@ -64,6 +68,7 @@ static void proc_mutex_resume (struct proc* proc) {
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
}
|
||||
|
||||
@@ -181,7 +181,7 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||
return aux;
|
||||
}
|
||||
|
||||
static struct proc* proc_spawn_rd (char* name) {
|
||||
struct proc* proc_spawn_rd (char* name) {
|
||||
struct rd_file* rd_file = rd_get_file (name);
|
||||
|
||||
bool ok = proc_check_elf (rd_file->content);
|
||||
@@ -204,10 +204,13 @@ struct proc* proc_find_pid (int pid) {
|
||||
return proc;
|
||||
}
|
||||
|
||||
void proc_register (struct proc* proc, struct cpu* cpu) {
|
||||
void proc_register (struct proc* proc, struct cpu* cpu1) {
|
||||
spin_lock_ctx_t ctxcpu, ctxprtr;
|
||||
|
||||
proc->cpu = cpu;
|
||||
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
|
||||
DEBUG ("Assigning CPU %d to PID %d\n", proc->cpu->id, proc->pid);
|
||||
|
||||
struct cpu* cpu = proc->cpu;
|
||||
|
||||
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
||||
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
||||
@@ -215,6 +218,7 @@ void proc_register (struct proc* proc, struct cpu* cpu) {
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||
|
||||
if (cpu->proc_current == NULL)
|
||||
cpu->proc_current = proc;
|
||||
@@ -306,15 +310,15 @@ void proc_sched (void) {
|
||||
next = proc_find_sched (cpu);
|
||||
|
||||
if (prev != NULL) {
|
||||
spin_lock (&prev->lock, &ctxpr);
|
||||
memcpy (&prev->pdata.regs, &cpu->regs, sizeof (struct saved_regs));
|
||||
spin_unlock (&prev->lock, &ctxpr);
|
||||
}
|
||||
|
||||
if (next) {
|
||||
cpu->proc_current = next;
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
do_sched (next);
|
||||
do_sched (next, &cpu->lock, &ctxcpu);
|
||||
} else {
|
||||
cpu->proc_current = NULL;
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
@@ -329,11 +333,13 @@ void proc_kill (struct proc* proc) {
|
||||
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
atomic_store (&proc->state, PROC_DEAD);
|
||||
proc->cpu = NULL;
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
|
||||
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
|
||||
if (cpu->proc_current == proc)
|
||||
cpu->proc_current = NULL;
|
||||
|
||||
@@ -409,8 +415,13 @@ void proc_init (void) {
|
||||
|
||||
proc_kpproc_init ();
|
||||
|
||||
struct proc* init = proc_spawn_rd ("init.exe");
|
||||
proc_register (init, thiscpu);
|
||||
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
|
||||
proc_register (spin_proc, thiscpu);
|
||||
|
||||
do_sched (init);
|
||||
struct proc* init = proc_spawn_rd ("init.exe");
|
||||
proc_register (init, NULL);
|
||||
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
spin_lock (&init->cpu->lock, &ctxcpu);
|
||||
do_sched (init, &init->cpu->lock, &ctxcpu);
|
||||
}
|
||||
|
||||
@@ -68,6 +68,7 @@ bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages);
|
||||
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf);
|
||||
void proc_register (struct proc* proc, struct cpu* cpu);
|
||||
struct proc* proc_find_pid (int pid);
|
||||
struct proc* proc_spawn_rd (char* name);
|
||||
void proc_init (void);
|
||||
|
||||
#endif // _KERNEL_PROC_PROC_H
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
#include <libk/std.h>
|
||||
#include <proc/proc.h>
|
||||
|
||||
void do_sched (struct proc* proc);
|
||||
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu);
|
||||
|
||||
#endif // _KERNEL_SYS_SCHED_H
|
||||
|
||||
@@ -160,8 +160,6 @@ DEFINE_SYSCALL (sys_clone) {
|
||||
size_t stack_size = (size_t)a2;
|
||||
uintptr_t entry = a3;
|
||||
|
||||
struct cpu* cpu = proc->cpu;
|
||||
|
||||
struct proc* new = proc_clone (proc, vstack_top, stack_size, entry);
|
||||
|
||||
DEBUG ("new=%p\n", new);
|
||||
@@ -172,7 +170,7 @@ DEFINE_SYSCALL (sys_clone) {
|
||||
|
||||
int pid = new->pid;
|
||||
|
||||
proc_register (new, cpu);
|
||||
proc_register (new, NULL);
|
||||
|
||||
return pid;
|
||||
}
|
||||
@@ -234,7 +232,6 @@ DEFINE_SYSCALL (sys_lock_mutex) {
|
||||
if (mutex_resource == NULL)
|
||||
return -ST_NOT_FOUND;
|
||||
|
||||
DEBUG ("locking %d\n", proc->pid);
|
||||
proc_mutex_lock (proc, &mutex_resource->u.mutex);
|
||||
|
||||
return ST_OK;
|
||||
@@ -253,7 +250,6 @@ DEFINE_SYSCALL (sys_unlock_mutex) {
|
||||
if (mutex_resource == NULL)
|
||||
return -ST_NOT_FOUND;
|
||||
|
||||
DEBUG ("unlocking %d\n", proc->pid);
|
||||
return proc_mutex_unlock (proc, &mutex_resource->u.mutex) ? ST_OK : -ST_PERMISSION_ERROR;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user