All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
258 lines
6.2 KiB
C
258 lines
6.2 KiB
C
#include <aux/compiler.h>
|
|
#include <aux/elf.h>
|
|
#include <irq/irq.h>
|
|
#include <libk/align.h>
|
|
#include <libk/list.h>
|
|
#include <libk/std.h>
|
|
#include <libk/string.h>
|
|
#include <limine/requests.h>
|
|
#include <mm/liballoc.h>
|
|
#include <mm/pmm.h>
|
|
#include <proc/proc.h>
|
|
#include <rd/rd.h>
|
|
#include <sync/spin_lock.h>
|
|
#include <sys/debug.h>
|
|
#include <sys/mm.h>
|
|
#include <sys/sched.h>
|
|
#include <sys/smp.h>
|
|
|
|
#if defined(__x86_64__)
|
|
#include <amd64/intr_defs.h>
|
|
#include <amd64/msr.h>
|
|
#include <amd64/msr-index.h>
|
|
#endif
|
|
|
|
struct elf_aux {
|
|
uint64_t entry;
|
|
uint64_t phdr;
|
|
uint64_t phent;
|
|
uint64_t phnum;
|
|
};
|
|
|
|
static struct procw* procs;
|
|
static spin_lock_t procs_lock = SPIN_LOCK_INIT;
|
|
|
|
static bool proc_check_elf (uint8_t* elf) {
|
|
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
void proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
|
|
uint32_t flags) {
|
|
struct proc_mapping* mapping = malloc (sizeof (*mapping));
|
|
mapping->paddr = start_paddr;
|
|
mapping->vaddr = start_vaddr;
|
|
mapping->size = pages * PAGE_SIZE;
|
|
|
|
flags &= ~MM_PD_LOCK; /* clear LOCK flag if present, because we lock manualy */
|
|
|
|
spin_lock (&proc->pd.lock);
|
|
|
|
linklist_append (struct proc_mapping*, proc->mappings, mapping);
|
|
|
|
for (uintptr_t vpage = start_vaddr, ppage = start_paddr; vpage < start_vaddr + pages * PAGE_SIZE;
|
|
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
|
|
mm_map_page (&proc->pd, ppage, vpage, flags);
|
|
}
|
|
|
|
spin_unlock (&proc->pd.lock);
|
|
}
|
|
|
|
static struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
|
struct elf_aux aux;
|
|
|
|
Elf64_Ehdr* ehdr = (Elf64_Ehdr*)elf;
|
|
aux.entry = ehdr->e_entry;
|
|
aux.phnum = ehdr->e_phnum;
|
|
aux.phent = ehdr->e_phentsize;
|
|
|
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
|
|
|
for (uint64_t segment = 0; segment < ehdr->e_phnum; segment++) {
|
|
Elf64_Phdr* phdr =
|
|
(Elf64_Phdr*)((uintptr_t)elf + ehdr->e_phoff + (ehdr->e_phentsize * segment));
|
|
|
|
switch (phdr->p_type) {
|
|
case PT_PHDR: {
|
|
aux.phdr = (uint64_t)phdr->p_vaddr;
|
|
} break;
|
|
case PT_LOAD: {
|
|
uintptr_t v_addr = align_down (phdr->p_vaddr, PAGE_SIZE);
|
|
uintptr_t off = phdr->p_vaddr - v_addr;
|
|
|
|
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
|
|
|
|
uintptr_t p_addr = pmm_alloc (blks);
|
|
if (p_addr == PMM_ALLOC_ERR)
|
|
DEBUG ("pmm oom error while loading ELF segments! (tried to alloc %zu blks)\n", blks);
|
|
|
|
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
|
|
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
|
|
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
|
|
|
|
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
|
|
if (phdr->p_flags & PF_W)
|
|
pg_flags |= MM_PG_RW;
|
|
|
|
proc_map (proc, p_addr, v_addr, blks, pg_flags);
|
|
} break;
|
|
}
|
|
}
|
|
|
|
return aux;
|
|
}
|
|
|
|
static struct proc* proc_spawn_rd (char* name) {
|
|
struct rd_file* rd_file = rd_get_file (name);
|
|
|
|
bool ok = proc_check_elf (rd_file->content);
|
|
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
|
|
|
|
if (!ok)
|
|
return NULL;
|
|
|
|
struct proc* proc = malloc (sizeof (*proc));
|
|
if (proc == NULL)
|
|
return NULL;
|
|
|
|
memset (proc, 0, sizeof (*proc));
|
|
|
|
#if defined(__x86_64__)
|
|
proc->pd.lock = SPIN_LOCK_INIT;
|
|
proc->pd.cr3_paddr = mm_alloc_user_pd_phys ();
|
|
if (proc->pd.cr3_paddr == 0) {
|
|
free (proc);
|
|
return NULL;
|
|
}
|
|
|
|
proc->pdata.syscall_stack = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
|
|
if (proc->pdata.syscall_stack == PMM_ALLOC_ERR) {
|
|
free (proc);
|
|
return NULL;
|
|
}
|
|
|
|
proc->pdata.user_stack = pmm_alloc (USTACK_SIZE / PAGE_SIZE);
|
|
if (proc->pdata.user_stack == PMM_ALLOC_ERR) {
|
|
free (proc);
|
|
pmm_free (proc->pdata.syscall_stack, USTACK_SIZE / PAGE_SIZE);
|
|
return NULL;
|
|
}
|
|
|
|
uintptr_t user_stack = proc->pdata.user_stack;
|
|
|
|
proc->pdata.syscall_stack += KSTACK_SIZE;
|
|
proc->pdata.user_stack += USTACK_SIZE;
|
|
|
|
proc_map (proc, user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
|
|
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
|
|
|
|
struct elf_aux aux = proc_load_segments (proc, rd_file->content);
|
|
|
|
proc->pdata.regs.ss = 0x20 | 0x03;
|
|
proc->pdata.regs.rsp = (uint64_t)PROC_USTACK_TOP;
|
|
proc->pdata.regs.rflags = 0x202;
|
|
proc->pdata.regs.cs = 0x18 | 0x03;
|
|
proc->pdata.regs.rip = aux.entry;
|
|
proc->lock = SPIN_LOCK_INIT;
|
|
atomic_store (&proc->state, PROC_READY);
|
|
#endif
|
|
|
|
return proc;
|
|
}
|
|
|
|
static void proc_register (struct proc* proc) {
|
|
/* make available globally. */
|
|
struct procw* procw = malloc (sizeof (*procw));
|
|
if (procw == NULL)
|
|
return;
|
|
procw->proc = proc;
|
|
proc->procw = procw;
|
|
|
|
spin_lock (&procs_lock);
|
|
|
|
spin_lock (&thiscpu->lock);
|
|
|
|
linklist_append (struct procw*, procs, procw);
|
|
linklist_append (struct proc*, thiscpu->proc_run_q, proc);
|
|
|
|
if (thiscpu->proc_current == NULL)
|
|
thiscpu->proc_current = proc;
|
|
|
|
spin_unlock (&thiscpu->lock);
|
|
|
|
spin_unlock (&procs_lock);
|
|
}
|
|
|
|
static struct proc *proc_find_sched (void) {
|
|
struct proc *start = thiscpu->proc_current;
|
|
struct proc *proc = start->next;
|
|
|
|
for (;;) {
|
|
if (proc == NULL) {
|
|
proc = thiscpu->proc_run_q;
|
|
}
|
|
|
|
if (atomic_load (&proc->state) == PROC_READY) {
|
|
return proc;
|
|
}
|
|
|
|
/* No runnable processes found. */
|
|
if (proc == start) {
|
|
return NULL;
|
|
}
|
|
|
|
proc = proc->next;
|
|
}
|
|
}
|
|
|
|
void proc_sched (void) {
|
|
struct proc *next = NULL;
|
|
|
|
spin_lock (&thiscpu->lock);
|
|
|
|
if (thiscpu->proc_run_q == NULL || thiscpu->proc_current == NULL) {
|
|
spin_unlock (&thiscpu->lock);
|
|
goto idle;
|
|
}
|
|
|
|
next = proc_find_sched ();
|
|
|
|
if (next != NULL)
|
|
thiscpu->proc_current = next;
|
|
|
|
spin_unlock (&thiscpu->lock);
|
|
|
|
if (next != NULL && atomic_load (&next->state) == PROC_READY) {
|
|
do_sched (&next->pdata.regs, &next->pd);
|
|
}
|
|
|
|
idle:
|
|
#if defined(__x86_64__)
|
|
extern void amd64_spin (void);
|
|
|
|
amd64_spin ();
|
|
#endif
|
|
}
|
|
|
|
void proc_kill (struct proc* proc) {
|
|
/* mark for garbage collection */
|
|
atomic_store (&proc->state, PROC_DEAD);
|
|
}
|
|
|
|
static void proc_irq_sched (void* arg, void* regs) {
|
|
(void)arg, (void)regs;
|
|
proc_sched ();
|
|
}
|
|
|
|
void proc_init (void) {
|
|
struct proc* init = proc_spawn_rd ("init.exe");
|
|
proc_register (init);
|
|
|
|
#if defined(__x86_64__)
|
|
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, 0);
|
|
#endif
|
|
|
|
do_sched (&init->pdata.regs, &init->pd);
|
|
}
|