Files
mop3/kernel/proc/proc.c
kamkow1 c8fb575bdd
All checks were successful
Build ISO image / build-and-deploy (push) Successful in 2m7s
Build documentation / build-and-deploy (push) Successful in 39s
Change formatting rules
2026-04-24 01:54:48 +02:00

409 lines
9.8 KiB
C

#include <aux/compiler.h>
#include <aux/elf.h>
#include <desc.h>
#include <fs/vfs.h>
#include <id/id_alloc.h>
#include <irq/irq.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/printf.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/malloc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/reschedule.h>
#include <proc/resource.h>
#include <proc_info.h>
#include <status.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/spin.h>
#if defined(__x86_64__)
#include <amd64/intr_defs.h>
#endif
#define PIDS_MAX 1024
static struct rb_node_link* proc_tree = NULL;
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
static struct id_alloc pid_alloc;
int proc_alloc_pid(void) { return id_alloc(&pid_alloc); }
void proc_free_pid(int pid) { id_free(&pid_alloc, pid); }
void proc_pid_alloc_init(void) { id_alloc_init(&pid_alloc, PIDS_MAX); }
struct proc* kproc_create(void) {
struct proc* kproc = malloc(sizeof(*kproc));
memset(kproc, 0, sizeof(*kproc));
kproc->lock = SPIN_LOCK_INIT;
kproc->flags |= PROC_KPROC;
kproc->state = PROC_READY;
kproc->pid = proc_alloc_pid();
snprintf(kproc->name, sizeof(kproc->name), "KPROC CPU %u", thiscpu->id);
kproc->procgroup = procgroup_create();
procgroup_attach(kproc->procgroup, kproc);
kproc->exec_pid = -1;
struct reschedule_ctx rctx;
memset(&rctx, 0, sizeof(rctx));
proc_register(kproc, thiscpu, &rctx);
return kproc;
}
size_t proc_populate_proc_infos(struct proc_info* proc_info, size_t count) {
uint64_t fpt, fp, fpg;
if (count > PIDS_MAX) {
count = PIDS_MAX;
}
spin_lock(&proc_tree_lock, &fpt);
struct rb_node_link* node;
rbtree_first(&proc_tree, node);
size_t i = 0;
while (node != NULL && i < count) {
struct rb_node_link* next;
rbtree_next(node, next);
struct proc* proc = rbtree_entry(node, struct proc, proc_tree_link);
node = next;
spin_lock(&proc->lock, &fp);
struct procgroup* procgroup = proc->procgroup;
spin_unlock(&proc->lock, fp);
spin_lock(&procgroup->lock, &fpg);
spin_lock(&proc->lock, &fp);
struct cpu* cpu = proc->cpu;
proc_info[i].cpu = cpu->id;
proc_info[i].exec_pid = proc->exec_pid;
proc_info[i].flags = proc->flags;
proc_info[i].pid = proc->pid;
proc_info[i].state = proc->state;
proc_info[i].pgid = proc->procgroup->pgid;
memcpy(proc_info[i].name, proc->name, sizeof(proc->name));
spin_unlock(&proc->lock, fp);
spin_unlock(&procgroup->lock, fpg);
i++;
}
spin_unlock(&proc_tree_lock, fpt);
return i;
}
static bool proc_check_elf(uint8_t* elf) {
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
return false;
return true;
}
struct elf_aux proc_load_segments(struct proc* proc, uint8_t* elf) {
struct elf_aux aux;
Elf64_Ehdr* ehdr = (Elf64_Ehdr*)elf;
aux.entry = ehdr->e_entry;
aux.phnum = ehdr->e_phnum;
aux.phent = ehdr->e_phentsize;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
for (uint64_t segment = 0; segment < ehdr->e_phnum; segment++) {
Elf64_Phdr* phdr =
(Elf64_Phdr*)((uintptr_t)elf + ehdr->e_phoff + (ehdr->e_phentsize * segment));
switch (phdr->p_type) {
case PT_PHDR: {
aux.phdr = (uint64_t)phdr->p_vaddr;
} break;
case PT_LOAD: {
uintptr_t v_addr = align_down(phdr->p_vaddr, PAGE_SIZE);
uintptr_t off = phdr->p_vaddr - v_addr;
size_t blks = div_align_up(phdr->p_memsz + off, PAGE_SIZE);
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
if (phdr->p_flags & PF_W)
pg_flags |= MM_PG_RW;
uintptr_t p_addr;
procgroup_map(proc->procgroup, v_addr, blks, pg_flags, &p_addr);
memset((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
} break;
case PT_TLS: {
#if defined(__x86_64__)
if (phdr->p_memsz > 0) {
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof(uintptr_t);
size_t tls_size = align_up(phdr->p_memsz, tls_align);
size_t tls_total_needed = tls_size + sizeof(uintptr_t);
size_t blks = div_align_up(tls_total_needed, PAGE_SIZE);
proc->procgroup->tls.tls_tmpl_pages = blks;
proc->procgroup->tls.tls_tmpl_size = tls_size;
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
proc->procgroup->tls.tls_tmpl = malloc(blks * PAGE_SIZE);
memset(proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
memcpy(proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
phdr->p_filesz);
proc_init_tls(proc);
}
#endif
} break;
}
}
return aux;
}
struct proc* proc_from_file(struct proc* proc1, const char* volume, const char* path,
struct reschedule_ctx* rctx) {
struct desc desc;
int ret;
if ((ret = vfs_volume_open(proc1, volume, rctx)) < 0)
return NULL;
if ((ret = vfs_describe(proc1, rctx, volume, path, &desc)) < 0) {
vfs_volume_close(proc1, volume, rctx);
return NULL;
}
if (desc.type != FS_FILE) {
vfs_volume_close(proc1, volume, rctx);
return NULL;
}
uint8_t* temp_buffer = malloc(desc.size);
if (temp_buffer == NULL) {
vfs_volume_close(proc1, volume, rctx);
return NULL;
}
if ((ret = vfs_read_file(proc1, rctx, volume, path, temp_buffer, 0, desc.size)) < 0) {
free(temp_buffer);
vfs_volume_close(proc1, volume, rctx);
return NULL;
}
vfs_volume_close(proc1, volume, rctx);
if (!proc_check_elf(temp_buffer)) {
free(temp_buffer);
return NULL;
}
struct proc* proc = proc_from_elf(temp_buffer);
free(temp_buffer);
snprintf(proc->name, sizeof(proc->name), "%s:%s", volume, path);
return proc;
}
struct proc* proc_find_pid(int pid) {
uint64_t fpt;
struct proc* proc = NULL;
spin_lock(&proc_tree_lock, &fpt);
rbtree_find(struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
spin_unlock(&proc_tree_lock, fpt);
return proc;
}
void proc_register(struct proc* proc, struct cpu* register_cpu, struct reschedule_ctx* rctx) {
uint64_t fpt, fc, fp;
struct cpu* cpu = register_cpu != NULL ? register_cpu : cpu_find_lightest();
spin_lock(&proc_tree_lock, &fpt);
spin_lock(&cpu->lock, &fc);
spin_lock(&proc->lock, &fp);
proc->cpu = cpu;
rbtree_insert(struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
cpu->proc_run_q_count++;
list_append(cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
spin_unlock(&proc->lock, fp);
spin_unlock(&cpu->lock, fc);
spin_unlock(&proc_tree_lock, fpt);
rctx_insert_cpu(rctx, cpu);
}
void proc_register_partial(struct proc* proc) {
uint64_t fpt, fp;
spin_lock(&proc_tree_lock, &fpt);
spin_lock(&proc->lock, &fp);
rbtree_insert(struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
spin_unlock(&proc->lock, fp);
spin_unlock(&proc_tree_lock, fpt);
}
/* caller holds cpu->lock */
static struct proc* proc_find_sched(struct cpu* cpu) {
uint64_t fp;
struct list_node_link *current, *start;
if (!cpu->proc_run_q)
return NULL;
if (cpu->proc_current->cpu_run_q_link.next)
current = cpu->proc_current->cpu_run_q_link.next;
else
current = cpu->proc_run_q;
start = current;
do {
struct proc* proc = list_entry(current, struct proc, cpu_run_q_link);
spin_lock(&proc->lock, &fp);
int state = proc->state;
if (state == PROC_READY && !(proc->flags & PROC_KPROC)) {
spin_unlock(&proc->lock, fp);
return proc;
}
spin_unlock(&proc->lock, fp);
current = current->next;
if (!current)
current = cpu->proc_run_q;
} while (current != start);
return NULL;
}
void proc_sched(bool user) {
struct proc* next = NULL;
struct cpu* cpu = thiscpu;
uint64_t fc;
retry:
spin_lock(&cpu->lock, &fc);
next = proc_find_sched(cpu);
if (next) {
cpu->proc_current = next;
if (user)
do_sched(next, &cpu->lock);
else
spin_unlock(&cpu->lock, fc);
} else {
spin_unlock(&cpu->lock, fc);
spin_lock_relax();
goto retry;
}
}
void proc_kill(struct proc* proc, struct reschedule_ctx* rctx) {
uint64_t fp, fc, fpt;
spin_lock(&proc->lock, &fp);
if ((proc->flags & PROC_KPROC)) {
spin_unlock(&proc->lock, fp);
return;
}
struct cpu* cpu = proc->cpu;
spin_unlock(&proc->lock, fp);
spin_lock(&proc_tree_lock, &fpt);
spin_lock(&cpu->lock, &fc);
spin_lock(&proc->lock, &fp);
list_remove(cpu->proc_run_q, &proc->cpu_run_q_link);
cpu->proc_run_q_count--;
rbtree_delete(&proc_tree, &proc->proc_tree_link);
spin_unlock(&proc->lock, fp);
spin_unlock(&cpu->lock, fc);
spin_unlock(&proc_tree_lock, fpt);
DEBUG("killed PID %d\n", proc->pid);
proc_cleanup(proc, rctx);
rctx_insert_cpu(rctx, cpu);
}
void proc_wait_for(struct proc* proc, struct reschedule_ctx* rctx, struct proc* wait_proc) {
uint64_t fwp;
spin_lock(&wait_proc->lock, &fwp);
proc_sq_suspend(proc, &wait_proc->done_sq, NULL, 0, rctx, NULL, NULL);
spin_unlock(&wait_proc->lock, fwp);
}
void proc_irq_sched(void* arg, void* regs, bool user, struct reschedule_ctx* rctx) {
(void)arg, (void)regs, (void)rctx, (void)user;
rctx_insert_cpu(rctx, thiscpu);
}
void proc_init(void) {
uint64_t fc;
struct reschedule_ctx rctx;
memset(&rctx, 0, sizeof(rctx));
struct proc* spin_proc = proc_from_file(thiscpu->kproc, "sys", "/spin", &rctx);
proc_register(spin_proc, thiscpu, &rctx);
struct proc* init = proc_from_file(thiscpu->kproc, "sys", "/init", &rctx);
proc_register(init, thiscpu, &rctx);
spin_lock(&spin_proc->cpu->lock, &fc);
do_sched(spin_proc, &spin_proc->cpu->lock);
}