Files
mop3/kernel/proc/proc.c
kamkow1 121fb3b33c
All checks were successful
Build documentation / build-and-deploy (push) Successful in 49s
Move platform-specific code for process loading/init for AMD64 to amd64/
2026-01-01 20:08:37 +01:00

201 lines
4.8 KiB
C

#include <aux/compiler.h>
#include <aux/elf.h>
#include <irq/irq.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <rd/rd.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/proc.h>
#include <sys/spin.h>
#if defined(__x86_64__)
#include <amd64/intr_defs.h>
#endif
static struct procw* procs;
static spin_lock_t procs_lock = SPIN_LOCK_INIT;
static bool proc_check_elf (uint8_t* elf) {
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
return false;
return true;
}
void proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags) {
struct proc_mapping* mapping = malloc (sizeof (*mapping));
mapping->paddr = start_paddr;
mapping->vaddr = start_vaddr;
mapping->size = pages * PAGE_SIZE;
flags &= ~MM_PD_LOCK; /* clear LOCK flag if present, because we lock manualy */
spin_lock (&proc->pd.lock);
linklist_append (struct proc_mapping*, proc->mappings, mapping);
for (uintptr_t vpage = start_vaddr, ppage = start_paddr; vpage < start_vaddr + pages * PAGE_SIZE;
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
mm_map_page (&proc->pd, ppage, vpage, flags);
}
spin_unlock (&proc->pd.lock);
}
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
struct elf_aux aux;
Elf64_Ehdr* ehdr = (Elf64_Ehdr*)elf;
aux.entry = ehdr->e_entry;
aux.phnum = ehdr->e_phnum;
aux.phent = ehdr->e_phentsize;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
for (uint64_t segment = 0; segment < ehdr->e_phnum; segment++) {
Elf64_Phdr* phdr =
(Elf64_Phdr*)((uintptr_t)elf + ehdr->e_phoff + (ehdr->e_phentsize * segment));
switch (phdr->p_type) {
case PT_PHDR: {
aux.phdr = (uint64_t)phdr->p_vaddr;
} break;
case PT_LOAD: {
uintptr_t v_addr = align_down (phdr->p_vaddr, PAGE_SIZE);
uintptr_t off = phdr->p_vaddr - v_addr;
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
uintptr_t p_addr = pmm_alloc (blks);
if (p_addr == PMM_ALLOC_ERR)
DEBUG ("pmm oom error while loading ELF segments! (tried to alloc %zu blks)\n", blks);
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
if (phdr->p_flags & PF_W)
pg_flags |= MM_PG_RW;
proc_map (proc, p_addr, v_addr, blks, pg_flags);
} break;
}
}
return aux;
}
static struct proc* proc_spawn_rd (char* name) {
struct rd_file* rd_file = rd_get_file (name);
bool ok = proc_check_elf (rd_file->content);
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
if (!ok)
return NULL;
return proc_from_elf (rd_file->content);
}
static void proc_register (struct proc* proc) {
/* make available globally. */
struct procw* procw = malloc (sizeof (*procw));
if (procw == NULL)
return;
procw->proc = proc;
proc->procw = procw;
spin_lock (&procs_lock);
spin_lock (&thiscpu->lock);
linklist_append (struct procw*, procs, procw);
linklist_append (struct proc*, thiscpu->proc_run_q, proc);
if (thiscpu->proc_current == NULL)
thiscpu->proc_current = proc;
spin_unlock (&thiscpu->lock);
spin_unlock (&procs_lock);
}
static struct proc *proc_find_sched (void) {
struct proc *start = thiscpu->proc_current;
struct proc *proc = start->next;
for (;;) {
if (proc == NULL) {
proc = thiscpu->proc_run_q;
}
if (atomic_load (&proc->state) == PROC_READY) {
return proc;
}
/* No runnable processes found. */
if (proc == start) {
return NULL;
}
proc = proc->next;
}
}
void proc_sched (void) {
struct proc *next = NULL;
spin_lock (&thiscpu->lock);
if (thiscpu->proc_run_q == NULL || thiscpu->proc_current == NULL) {
spin_unlock (&thiscpu->lock);
goto idle;
}
next = proc_find_sched ();
if (next != NULL)
thiscpu->proc_current = next;
spin_unlock (&thiscpu->lock);
if (next != NULL && atomic_load (&next->state) == PROC_READY) {
do_sched (&next->pdata.regs, &next->pd);
}
idle:
spin ();
}
void proc_kill (struct proc* proc) {
/* mark for garbage collection */
atomic_store (&proc->state, PROC_DEAD);
}
static void proc_irq_sched (void* arg, void* regs) {
(void)arg, (void)regs;
proc_sched ();
}
void proc_init (void) {
struct proc* init = proc_spawn_rd ("init.exe");
proc_register (init);
#if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, 0);
#endif
do_sched (&init->pdata.regs, &init->pd);
}