Files
mop3/kernel/amd64/proc.c
kamkow1 711da8aeab
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
Implement proc_spawn_thread syscall, fix proc_resume and proc_suspend
2026-01-16 00:26:37 +01:00

188 lines
5.3 KiB
C

#include <amd64/gdt.h>
#include <aux/elf.h>
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
static atomic_int pids = 1;
struct proc* proc_from_elf (uint8_t* elf_contents) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL)
return NULL;
memset (proc, 0, sizeof (*proc));
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1);
proc->pd = malloc (sizeof (*proc->pd));
if (proc->pd == NULL) {
free (proc);
return NULL;
}
proc->sys_rids = malloc (sizeof (*proc->sys_rids));
if (proc->sys_rids == NULL) {
free (proc);
return NULL;
}
proc->sys_rids->counter = 0;
proc->sys_rids->refs = 1;
proc->pd->lock = SPIN_LOCK_INIT;
proc->pd->refs = 1;
proc->pd->cr3_paddr = mm_alloc_user_pd_phys ();
if (proc->pd->cr3_paddr == 0) {
free (proc->pd);
free (proc);
return NULL;
}
int kstk_rid = atomic_fetch_add (&proc->sys_rids->counter, 1);
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE};
struct proc_resource* kstk_r =
proc_create_resource (proc, kstk_rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
if (kstk_r == NULL) {
pmm_free (proc->pd->cr3_paddr, 1);
free (proc->pd);
free (proc);
return NULL;
}
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
int ustk_rid = atomic_fetch_add (&proc->sys_rids->counter, 1);
struct proc_resource_mem_init ustk_mem_init = {.pages = USTACK_SIZE / PAGE_SIZE};
struct proc_resource* ustk_r =
proc_create_resource (proc, ustk_rid, PR_MEM, RV_PRIVATE, (void*)&ustk_mem_init);
if (ustk_r == NULL) {
kstk_r->ops.cleanup (proc, kstk_r);
free (kstk_r);
pmm_free (proc->pd->cr3_paddr, 1);
free (proc->pd);
free (proc);
return NULL;
}
proc->pdata.user_stack = ustk_r->u.mem.paddr;
proc_map (proc, proc->pdata.user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
proc->flags |= PROC_USTK_PREALLOC;
struct elf_aux aux = proc_load_segments (proc, elf_contents);
proc->pdata.regs.ss = GDT_UDATA | 0x03;
proc->pdata.regs.rsp = (uint64_t)PROC_USTACK_TOP;
proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = aux.entry;
return proc;
}
struct proc* proc_spawn_thread (struct proc* proto, uintptr_t vstack_top, size_t stack_size,
uintptr_t entry) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprt;
struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL)
return NULL;
memset (proc, 0, sizeof (*proc));
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1);
spin_lock (&proto->lock, &ctxprt);
proc->pd = proto->pd;
proc->mappings = proto->mappings;
atomic_fetch_add (&proto->pd->refs, 1);
proc->resource_tree = proto->resource_tree;
proc->sys_rids = proto->sys_rids;
atomic_fetch_add (&proc->sys_rids->refs, 1);
spin_unlock (&proto->lock, &ctxprt);
uintptr_t vstack_bottom = vstack_top - stack_size;
uintptr_t pstack_bottom = mm_v2p (proc->pd, vstack_bottom, MM_PD_LOCK);
if (pstack_bottom == 0) {
free (proc);
return NULL;
}
int kstk_rid = atomic_fetch_add (&proc->sys_rids->counter, 1);
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE};
struct proc_resource* kstk_r =
proc_create_resource (proc, kstk_rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
if (kstk_r == NULL) {
free (proc);
return NULL;
}
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
proc->pdata.user_stack = pstack_bottom + stack_size;
proc->pdata.regs.ss = GDT_UDATA | 0x03;
proc->pdata.regs.rsp = (uint64_t)vstack_top;
proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = (uint64_t)entry;
return proc;
}
void proc_cleanup (struct proc* proc) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprpd;
proc_cleanup_resources (proc);
if (atomic_fetch_sub (&proc->pd->refs, 1) == 1) {
DEBUG ("PID %d Free virtual address space\n", proc->pid);
struct list_node_link *mapping_link, *mapping_link_tmp;
spin_lock (&proc->pd->lock, &ctxprpd);
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
list_remove (proc->mappings, mapping_link);
free (mapping);
}
pmm_free (proc->pd->cr3_paddr, 1);
spin_unlock (&proc->pd->lock, &ctxprpd);
free (proc->pd);
}
pmm_free (proc->pdata.kernel_stack - (uintptr_t)hhdm->offset - KSTACK_SIZE,
KSTACK_SIZE / PAGE_SIZE);
if ((proc->flags & PROC_USTK_PREALLOC))
pmm_free (proc->pdata.user_stack, USTACK_SIZE / PAGE_SIZE);
DEBUG ("PID %d Free stacks\n", proc->pid);
free (proc);
}