Files
mop3/kernel/proc/proc.c
kamkow1 d2f5c032d9
All checks were successful
Build documentation / build-and-deploy (push) Successful in 40s
Fix TLS alignment issues, works on BOCHS now too!
2026-01-29 18:18:24 +01:00

284 lines
7.1 KiB
C

#include <aux/compiler.h>
#include <aux/elf.h>
#include <irq/irq.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <rd/rd.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/spin.h>
#if defined(__x86_64__)
#include <amd64/intr_defs.h>
#endif
#define SCHED_REAP_FREQ 10
static struct rb_node_link* proc_tree = NULL;
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
static atomic_int sched_cycles = 0;
static bool proc_check_elf (uint8_t* elf) {
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
return false;
return true;
}
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
struct elf_aux aux;
Elf64_Ehdr* ehdr = (Elf64_Ehdr*)elf;
aux.entry = ehdr->e_entry;
aux.phnum = ehdr->e_phnum;
aux.phent = ehdr->e_phentsize;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
for (uint64_t segment = 0; segment < ehdr->e_phnum; segment++) {
Elf64_Phdr* phdr =
(Elf64_Phdr*)((uintptr_t)elf + ehdr->e_phoff + (ehdr->e_phentsize * segment));
switch (phdr->p_type) {
case PT_PHDR: {
aux.phdr = (uint64_t)phdr->p_vaddr;
} break;
case PT_LOAD: {
uintptr_t v_addr = align_down (phdr->p_vaddr, PAGE_SIZE);
uintptr_t off = phdr->p_vaddr - v_addr;
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
if (phdr->p_flags & PF_W)
pg_flags |= MM_PG_RW;
uintptr_t p_addr;
procgroup_map (proc->procgroup, v_addr, blks, pg_flags, &p_addr);
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
} break;
case PT_TLS: {
#if defined(__x86_64__)
if (phdr->p_memsz > 0) {
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
size_t tls_size = align_up (phdr->p_memsz, tls_align);
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
proc->procgroup->tls.tls_tmpl_pages = blks;
proc->procgroup->tls.tls_tmpl_size = tls_size;
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
phdr->p_filesz);
proc_init_tls (proc);
}
#endif
} break;
}
}
return aux;
}
struct proc* proc_spawn_rd (char* name) {
struct rd_file* rd_file = rd_get_file (name);
bool ok = proc_check_elf (rd_file->content);
if (!ok)
return NULL;
return proc_from_elf (rd_file->content);
}
struct proc* proc_find_pid (int pid) {
spin_lock_ctx_t ctxprtr;
struct proc* proc = NULL;
spin_lock (&proc_tree_lock, &ctxprtr);
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
spin_unlock (&proc_tree_lock, &ctxprtr);
return proc;
}
void proc_register (struct proc* proc, struct cpu* cpu1) {
spin_lock_ctx_t ctxcpu, ctxprtr;
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
struct cpu* cpu = proc->cpu;
spin_lock (&proc_tree_lock, &ctxprtr);
spin_lock (&cpu->lock, &ctxcpu);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
spin_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&cpu->lock, &ctxcpu);
}
/* caller holds cpu->lock */
static struct proc* proc_find_sched (struct cpu* cpu) {
if (!cpu->proc_run_q)
return NULL;
struct list_node_link *current, *start;
if (cpu->proc_current)
current = cpu->proc_current->cpu_run_q_link.next;
else
current = cpu->proc_run_q;
if (!current)
current = cpu->proc_run_q;
start = current;
do {
struct proc* proc = list_entry (current, struct proc, cpu_run_q_link);
if (atomic_load (&proc->state) == PROC_READY)
return proc;
current = current->next ? current->next : cpu->proc_run_q;
} while (current != start);
return NULL;
}
static void proc_reap (void) {
struct proc* proc = NULL;
struct list_node_link* reap_list = NULL;
spin_lock_ctx_t ctxprtr;
spin_lock_ctx_t ctxpr;
spin_lock (&proc_tree_lock, &ctxprtr);
struct rb_node_link* node;
rbtree_first (&proc_tree, node);
while (node) {
struct rb_node_link* next;
rbtree_next (node, next);
proc = rbtree_entry (node, struct proc, proc_tree_link);
if (atomic_load (&proc->state) == PROC_DEAD) {
spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
list_append (reap_list, &proc->reap_link);
spin_unlock (&proc->lock, &ctxpr);
}
node = next;
}
spin_unlock (&proc_tree_lock, &ctxprtr);
struct list_node_link *reap_link, *reap_link_tmp;
list_foreach (reap_list, reap_link, reap_link_tmp) {
proc = list_entry (reap_link, struct proc, reap_link);
list_remove (reap_list, &proc->reap_link);
DEBUG ("cleanup PID %d\n", proc->pid);
proc_cleanup (proc);
}
}
void proc_sched (void) {
spin_lock_ctx_t ctxcpu;
int s_cycles = atomic_fetch_add (&sched_cycles, 1);
if (s_cycles % SCHED_REAP_FREQ == 0)
proc_reap ();
struct proc* next = NULL;
struct cpu* cpu = thiscpu;
spin_lock (&cpu->lock, &ctxcpu);
next = proc_find_sched (cpu);
if (next) {
cpu->proc_current = next;
do_sched (next, &cpu->lock, &ctxcpu);
} else {
cpu->proc_current = NULL;
spin_unlock (&cpu->lock, &ctxcpu);
spin ();
}
}
void proc_kill (struct proc* proc) {
spin_lock_ctx_t ctxpr, ctxcpu;
struct cpu* cpu = proc->cpu;
spin_lock (&proc->lock, &ctxpr);
atomic_store (&proc->state, PROC_DEAD);
proc->cpu = NULL;
spin_unlock (&proc->lock, &ctxpr);
spin_lock (&cpu->lock, &ctxcpu);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
spin_unlock (&cpu->lock, &ctxcpu);
DEBUG ("killed PID %d\n", proc->pid);
cpu_request_sched (cpu);
}
static void proc_irq_sched (void* arg, void* regs) {
(void)arg;
proc_sched ();
}
void proc_init (void) {
#if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER);
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED);
#endif
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
proc_register (spin_proc, thiscpu);
struct proc* init = proc_spawn_rd ("init.exe");
proc_register (init, NULL);
spin_lock_ctx_t ctxcpu;
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
}