Organize resources into process groups
This commit is contained in:
@@ -9,7 +9,6 @@
|
||||
struct pd {
|
||||
spin_lock_t lock;
|
||||
uintptr_t cr3_paddr;
|
||||
atomic_int refs;
|
||||
};
|
||||
|
||||
void amd64_load_kernel_cr3 (void);
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sync/spin_lock.h>
|
||||
@@ -31,59 +32,31 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
proc->pid = atomic_fetch_add (&pids, 1);
|
||||
|
||||
proc->resources = malloc (sizeof (*proc->resources));
|
||||
if (proc->resources == NULL) {
|
||||
proc->procgroup = procgroup_create ();
|
||||
if (proc->procgroup == NULL) {
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
proc->resources->tree = NULL;
|
||||
proc->resources->lock = RW_SPIN_LOCK_INIT;
|
||||
proc->resources->refs = 1;
|
||||
proc->resources->sys_rids = 0;
|
||||
procgroup_attach (proc->procgroup, proc);
|
||||
|
||||
proc->pd = malloc (sizeof (*proc->pd));
|
||||
if (proc->pd == NULL) {
|
||||
free (proc->resources);
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
proc->pd->lock = SPIN_LOCK_INIT;
|
||||
proc->pd->refs = 1;
|
||||
proc->pd->cr3_paddr = mm_alloc_user_pd_phys ();
|
||||
if (proc->pd->cr3_paddr == 0) {
|
||||
free (proc->pd);
|
||||
free (proc->resources);
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE,
|
||||
.managed = false};
|
||||
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
|
||||
rid = procgroup_get_sys_rid (proc->procgroup);
|
||||
struct proc_resource* kstk_r =
|
||||
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
|
||||
proc_create_resource_mem (proc->procgroup, rid, KSTACK_SIZE / PAGE_SIZE, 0, false);
|
||||
if (kstk_r == NULL) {
|
||||
pmm_free (proc->pd->cr3_paddr, 1);
|
||||
free (proc->pd);
|
||||
free (proc->resources);
|
||||
procgroup_detach (proc->procgroup, proc);
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
||||
|
||||
struct proc_resource_mem_init ustk_mem_init = {.pages = USTACK_SIZE / PAGE_SIZE,
|
||||
.managed = false};
|
||||
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
|
||||
rid = procgroup_get_sys_rid (proc->procgroup);
|
||||
struct proc_resource* ustk_r =
|
||||
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&ustk_mem_init);
|
||||
proc_create_resource_mem (proc->procgroup, rid, USTACK_SIZE / PAGE_SIZE, 0, false);
|
||||
if (ustk_r == NULL) {
|
||||
kstk_r->ops.cleanup (proc, kstk_r);
|
||||
kstk_r->ops.cleanup (kstk_r);
|
||||
free (kstk_r);
|
||||
pmm_free (proc->pd->cr3_paddr, 1);
|
||||
free (proc->pd);
|
||||
free (proc->resources);
|
||||
procgroup_detach (proc->procgroup, proc);
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
@@ -109,7 +82,7 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_size,
|
||||
uintptr_t entry) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
spin_lock_ctx_t ctxprt, ctxrs;
|
||||
spin_lock_ctx_t ctxprt;
|
||||
int rid;
|
||||
|
||||
struct proc* proc = malloc (sizeof (*proc));
|
||||
@@ -124,47 +97,25 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_
|
||||
|
||||
spin_lock (&proto->lock, &ctxprt);
|
||||
|
||||
proc->pd = proto->pd;
|
||||
proc->mappings = proto->mappings;
|
||||
atomic_fetch_add (&proto->pd->refs, 1);
|
||||
|
||||
proc->resources = proto->resources;
|
||||
|
||||
rw_spin_write_lock (&proc->resources->lock, &ctxrs);
|
||||
|
||||
atomic_fetch_add (&proc->resources->refs, 1);
|
||||
|
||||
struct rb_node_link* rnode;
|
||||
rbtree_first (&proc->resources->tree, rnode);
|
||||
while (rnode) {
|
||||
struct rb_node_link* next;
|
||||
rbtree_next (rnode, next);
|
||||
|
||||
struct proc_resource* resource =
|
||||
rbtree_entry (rnode, struct proc_resource, local_resource_tree_link);
|
||||
atomic_fetch_add (&resource->refs, 1);
|
||||
|
||||
rnode = next;
|
||||
}
|
||||
|
||||
rw_spin_write_unlock (&proc->resources->lock, &ctxrs);
|
||||
proc->procgroup = proto->procgroup;
|
||||
procgroup_attach (proc->procgroup, proc);
|
||||
|
||||
spin_unlock (&proto->lock, &ctxprt);
|
||||
|
||||
uintptr_t vstack_bottom = vstack_top - stack_size;
|
||||
|
||||
uintptr_t pstack_bottom = mm_v2p (proc->pd, vstack_bottom, MM_PD_LOCK);
|
||||
uintptr_t pstack_bottom = mm_v2p (&proc->procgroup->pd, vstack_bottom, MM_PD_LOCK);
|
||||
if (pstack_bottom == 0) {
|
||||
procgroup_detach (proc->procgroup, proc);
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE,
|
||||
.managed = false};
|
||||
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
|
||||
rid = procgroup_get_sys_rid (proc->procgroup);
|
||||
struct proc_resource* kstk_r =
|
||||
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
|
||||
proc_create_resource_mem (proc->procgroup, rid, KSTACK_SIZE / PAGE_SIZE, 0, false);
|
||||
if (kstk_r == NULL) {
|
||||
procgroup_detach (proc->procgroup, proc);
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
@@ -183,8 +134,7 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_
|
||||
}
|
||||
|
||||
void proc_cleanup (struct proc* proc) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
spin_lock_ctx_t ctxprpd, ctxsq, ctxpr;
|
||||
spin_lock_ctx_t ctxsq, ctxpr;
|
||||
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
|
||||
@@ -209,37 +159,26 @@ void proc_cleanup (struct proc* proc) {
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
|
||||
/* clean resources */
|
||||
proc_cleanup_resources (proc);
|
||||
procgroup_detach (proc->procgroup, proc);
|
||||
|
||||
/* clean virtual address space */
|
||||
if (atomic_fetch_sub (&proc->pd->refs, 1) == 1) {
|
||||
DEBUG ("PID %d Free virtual address space\n", proc->pid);
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
spin_lock (&proc->pd->lock, &ctxprpd);
|
||||
/* if (atomic_fetch_sub (&proc->pd->refs, 1) == 1) { */
|
||||
/* DEBUG ("PID %d Free virtual address space\n", proc->pid); */
|
||||
/* struct list_node_link *mapping_link, *mapping_link_tmp; */
|
||||
/* spin_lock (&proc->pd->lock, &ctxprpd); */
|
||||
|
||||
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
|
||||
struct proc_mapping* mapping =
|
||||
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
||||
/* list_foreach (proc->mappings, mapping_link, mapping_link_tmp) { */
|
||||
/* struct proc_mapping* mapping = */
|
||||
/* list_entry (mapping_link, struct proc_mapping, proc_mappings_link); */
|
||||
|
||||
list_remove (proc->mappings, mapping_link);
|
||||
free (mapping);
|
||||
}
|
||||
/* list_remove (proc->mappings, mapping_link); */
|
||||
/* free (mapping); */
|
||||
/* } */
|
||||
|
||||
pmm_free (proc->pd->cr3_paddr, 1);
|
||||
spin_unlock (&proc->pd->lock, &ctxprpd);
|
||||
free (proc->pd);
|
||||
}
|
||||
|
||||
/* clean kstack */
|
||||
pmm_free (proc->pdata.kernel_stack - (uintptr_t)hhdm->offset - KSTACK_SIZE,
|
||||
KSTACK_SIZE / PAGE_SIZE);
|
||||
|
||||
/* clean ustack */
|
||||
if ((proc->flags & PROC_USTK_PREALLOC))
|
||||
pmm_free (proc->pdata.user_stack, USTACK_SIZE / PAGE_SIZE);
|
||||
|
||||
DEBUG ("PID %d Free stacks\n", proc->pid);
|
||||
/* pmm_free (proc->pd->cr3_paddr, 1); */
|
||||
/* spin_unlock (&proc->pd->lock, &ctxprpd); */
|
||||
/* free (proc->pd); */
|
||||
/* } */
|
||||
|
||||
/* clean the process */
|
||||
free (proc);
|
||||
|
||||
@@ -18,5 +18,5 @@ void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (cpu_lock, ctxcpu);
|
||||
|
||||
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd->cr3_paddr);
|
||||
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user