247 lines
6.9 KiB
C
247 lines
6.9 KiB
C
#include <amd64/gdt.h>
|
|
#include <amd64/proc.h>
|
|
#include <aux/elf.h>
|
|
#include <libk/list.h>
|
|
#include <libk/rbtree.h>
|
|
#include <libk/std.h>
|
|
#include <libk/string.h>
|
|
#include <limine/requests.h>
|
|
#include <mm/liballoc.h>
|
|
#include <mm/pmm.h>
|
|
#include <proc/proc.h>
|
|
#include <proc/resource.h>
|
|
#include <sync/rw_spin_lock.h>
|
|
#include <sync/spin_lock.h>
|
|
#include <sys/debug.h>
|
|
|
|
/* 0 is kpproc */
|
|
static atomic_int pids = 1;
|
|
|
|
struct proc* proc_from_elf (uint8_t* elf_contents) {
|
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
|
int rid;
|
|
|
|
struct proc* proc = malloc (sizeof (*proc));
|
|
if (proc == NULL)
|
|
return NULL;
|
|
|
|
memset (proc, 0, sizeof (*proc));
|
|
|
|
proc->lock = SPIN_LOCK_INIT;
|
|
atomic_store (&proc->state, PROC_READY);
|
|
proc->pid = atomic_fetch_add (&pids, 1);
|
|
|
|
proc->resources = malloc (sizeof (*proc->resources));
|
|
if (proc->resources == NULL) {
|
|
free (proc);
|
|
return NULL;
|
|
}
|
|
proc->resources->tree = NULL;
|
|
proc->resources->lock = RW_SPIN_LOCK_INIT;
|
|
proc->resources->refs = 1;
|
|
proc->resources->sys_rids = 0;
|
|
|
|
proc->pd = malloc (sizeof (*proc->pd));
|
|
if (proc->pd == NULL) {
|
|
free (proc->resources);
|
|
free (proc);
|
|
return NULL;
|
|
}
|
|
|
|
proc->pd->lock = SPIN_LOCK_INIT;
|
|
proc->pd->refs = 1;
|
|
proc->pd->cr3_paddr = mm_alloc_user_pd_phys ();
|
|
if (proc->pd->cr3_paddr == 0) {
|
|
free (proc->pd);
|
|
free (proc->resources);
|
|
free (proc);
|
|
return NULL;
|
|
}
|
|
|
|
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE,
|
|
.managed = false};
|
|
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
|
|
struct proc_resource* kstk_r =
|
|
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
|
|
if (kstk_r == NULL) {
|
|
pmm_free (proc->pd->cr3_paddr, 1);
|
|
free (proc->pd);
|
|
free (proc->resources);
|
|
free (proc);
|
|
return NULL;
|
|
}
|
|
|
|
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
|
|
|
struct proc_resource_mem_init ustk_mem_init = {.pages = USTACK_SIZE / PAGE_SIZE,
|
|
.managed = false};
|
|
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
|
|
struct proc_resource* ustk_r =
|
|
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&ustk_mem_init);
|
|
if (ustk_r == NULL) {
|
|
kstk_r->ops.cleanup (proc, kstk_r);
|
|
free (kstk_r);
|
|
pmm_free (proc->pd->cr3_paddr, 1);
|
|
free (proc->pd);
|
|
free (proc->resources);
|
|
free (proc);
|
|
return NULL;
|
|
}
|
|
|
|
proc->pdata.user_stack = ustk_r->u.mem.paddr;
|
|
|
|
proc_map (proc, proc->pdata.user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
|
|
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
|
|
|
|
proc->flags |= PROC_USTK_PREALLOC;
|
|
|
|
struct elf_aux aux = proc_load_segments (proc, elf_contents);
|
|
|
|
proc->pdata.regs.ss = GDT_UDATA | 0x03;
|
|
proc->pdata.regs.rsp = (uint64_t)PROC_USTACK_TOP;
|
|
proc->pdata.regs.rflags = 0x202;
|
|
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
|
proc->pdata.regs.rip = aux.entry;
|
|
|
|
return proc;
|
|
}
|
|
|
|
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_size,
|
|
uintptr_t entry) {
|
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
|
spin_lock_ctx_t ctxprt, ctxrs;
|
|
int rid;
|
|
|
|
struct proc* proc = malloc (sizeof (*proc));
|
|
if (proc == NULL)
|
|
return NULL;
|
|
|
|
memset (proc, 0, sizeof (*proc));
|
|
|
|
proc->lock = SPIN_LOCK_INIT;
|
|
atomic_store (&proc->state, PROC_READY);
|
|
proc->pid = atomic_fetch_add (&pids, 1);
|
|
|
|
spin_lock (&proto->lock, &ctxprt);
|
|
|
|
proc->pd = proto->pd;
|
|
proc->mappings = proto->mappings;
|
|
atomic_fetch_add (&proto->pd->refs, 1);
|
|
|
|
proc->resources = proto->resources;
|
|
|
|
rw_spin_write_lock (&proc->resources->lock, &ctxrs);
|
|
|
|
atomic_fetch_add (&proc->resources->refs, 1);
|
|
|
|
struct rb_node_link* rnode;
|
|
rbtree_first (&proc->resources->tree, rnode);
|
|
while (rnode) {
|
|
struct rb_node_link* next;
|
|
rbtree_next (rnode, next);
|
|
|
|
struct proc_resource* resource =
|
|
rbtree_entry (rnode, struct proc_resource, local_resource_tree_link);
|
|
atomic_fetch_add (&resource->refs, 1);
|
|
|
|
rnode = next;
|
|
}
|
|
|
|
rw_spin_write_unlock (&proc->resources->lock, &ctxrs);
|
|
|
|
spin_unlock (&proto->lock, &ctxprt);
|
|
|
|
uintptr_t vstack_bottom = vstack_top - stack_size;
|
|
|
|
uintptr_t pstack_bottom = mm_v2p (proc->pd, vstack_bottom, MM_PD_LOCK);
|
|
if (pstack_bottom == 0) {
|
|
free (proc);
|
|
return NULL;
|
|
}
|
|
|
|
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE,
|
|
.managed = false};
|
|
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
|
|
struct proc_resource* kstk_r =
|
|
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
|
|
if (kstk_r == NULL) {
|
|
free (proc);
|
|
return NULL;
|
|
}
|
|
|
|
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
|
|
|
proc->pdata.user_stack = pstack_bottom + stack_size;
|
|
|
|
proc->pdata.regs.ss = GDT_UDATA | 0x03;
|
|
proc->pdata.regs.rsp = (uint64_t)vstack_top;
|
|
proc->pdata.regs.rflags = 0x202;
|
|
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
|
proc->pdata.regs.rip = (uint64_t)entry;
|
|
|
|
return proc;
|
|
}
|
|
|
|
void proc_cleanup (struct proc* proc) {
|
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
|
spin_lock_ctx_t ctxprpd, ctxsq, ctxpr;
|
|
|
|
spin_lock (&proc->lock, &ctxpr);
|
|
|
|
/* clean suspension queue entries */
|
|
struct list_node_link *sq_link, *sq_link_tmp;
|
|
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
|
|
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
|
|
struct proc_suspension_q* sq = sq_entry->sq;
|
|
|
|
spin_lock (&sq->lock, &ctxsq);
|
|
|
|
/* remove from sq's list */
|
|
list_remove (sq->proc_list, &sq_entry->sq_link);
|
|
|
|
/* remove from proc's list */
|
|
list_remove (proc->sq_entries, &sq_entry->proc_link);
|
|
|
|
spin_unlock (&sq->lock, &ctxsq);
|
|
|
|
free (sq_entry);
|
|
}
|
|
|
|
spin_unlock (&proc->lock, &ctxpr);
|
|
|
|
/* clean resources */
|
|
proc_cleanup_resources (proc);
|
|
|
|
/* clean virtual address space */
|
|
if (atomic_fetch_sub (&proc->pd->refs, 1) == 1) {
|
|
DEBUG ("PID %d Free virtual address space\n", proc->pid);
|
|
struct list_node_link *mapping_link, *mapping_link_tmp;
|
|
spin_lock (&proc->pd->lock, &ctxprpd);
|
|
|
|
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
|
|
struct proc_mapping* mapping =
|
|
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
|
|
|
list_remove (proc->mappings, mapping_link);
|
|
free (mapping);
|
|
}
|
|
|
|
pmm_free (proc->pd->cr3_paddr, 1);
|
|
spin_unlock (&proc->pd->lock, &ctxprpd);
|
|
free (proc->pd);
|
|
}
|
|
|
|
/* clean kstack */
|
|
pmm_free (proc->pdata.kernel_stack - (uintptr_t)hhdm->offset - KSTACK_SIZE,
|
|
KSTACK_SIZE / PAGE_SIZE);
|
|
|
|
/* clean ustack */
|
|
if ((proc->flags & PROC_USTK_PREALLOC))
|
|
pmm_free (proc->pdata.user_stack, USTACK_SIZE / PAGE_SIZE);
|
|
|
|
DEBUG ("PID %d Free stacks\n", proc->pid);
|
|
|
|
/* clean the process */
|
|
free (proc);
|
|
}
|