Use a big-lock for kernel sychronization instead of fine-grained locking
All checks were successful
Build ISO image / build-and-deploy (push) Successful in 2m21s
Build documentation / build-and-deploy (push) Successful in 54s

This commit is contained in:
2026-04-27 18:06:02 +02:00
parent 68cdd8d6d2
commit e5ebd7f3ba
56 changed files with 212 additions and 1206 deletions

View File

@@ -16,36 +16,26 @@
#define PGIDS_MAX 1024
static struct rb_node_link* procgroup_tree = NULL;
static spin_lock_t procgroup_tree_lock = SPIN_LOCK_INIT;
static struct id_alloc pgid_alloc;
void procgroup_pgid_alloc_init(void) { id_alloc_init(&pgid_alloc, PGIDS_MAX); }
struct procgroup* procgroup_find(int pgid) {
uint64_t fpgt;
struct procgroup* procgroup = NULL;
spin_lock(&procgroup_tree_lock, &fpgt);
rbtree_find(struct procgroup, &procgroup_tree, pgid, procgroup, procgroup_tree_link, pgid);
spin_unlock(&procgroup_tree_lock, fpgt);
return procgroup;
}
uintptr_t procgroup_map(struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
uintptr_t* out_paddr) {
uint64_t fpg;
spin_lock(&procgroup->lock, &fpg);
vaddr = (vaddr == 0) ? procgroup->map_base : vaddr;
struct proc_mapping* mapping = malloc(sizeof(*mapping));
if (mapping == NULL) {
spin_unlock(&procgroup->lock, fpg);
return 0;
}
@@ -53,7 +43,6 @@ uintptr_t procgroup_map(struct procgroup* procgroup, uintptr_t vaddr, size_t pag
if (paddr == PMM_ALLOC_ERR) {
free(mapping);
spin_unlock(&procgroup->lock, fpg);
return 0;
}
@@ -73,13 +62,10 @@ uintptr_t procgroup_map(struct procgroup* procgroup, uintptr_t vaddr, size_t pag
mm_map_page(&procgroup->pd, ppage, vpage, flags);
}
spin_unlock(&procgroup->lock, fpg);
return vaddr;
}
bool procgroup_unmap(struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
uint64_t fpg;
size_t unmap_size = pages * PAGE_SIZE;
uintptr_t end_vaddr = start_vaddr + unmap_size;
@@ -91,8 +77,6 @@ bool procgroup_unmap(struct procgroup* procgroup, uintptr_t start_vaddr, size_t
if (tail_mapping == NULL)
return false;
spin_lock(&procgroup->lock, &fpg);
list_foreach(procgroup->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry(mapping_link, struct proc_mapping, proc_mappings_link);
@@ -144,14 +128,10 @@ bool procgroup_unmap(struct procgroup* procgroup, uintptr_t start_vaddr, size_t
mm_unmap_page(&procgroup->pd, vpage);
}
spin_unlock(&procgroup->lock, fpg);
return true;
}
struct procgroup* procgroup_create(void) {
uint64_t fpgt;
struct procgroup* procgroup = malloc(sizeof(*procgroup));
if (procgroup == NULL) {
return NULL;
@@ -173,7 +153,6 @@ struct procgroup* procgroup_create(void) {
}
procgroup->memb_proc_tree = NULL;
procgroup->lock = SPIN_LOCK_INIT;
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys();
procgroup->map_base = PROC_MAP_BASE;
@@ -195,38 +174,20 @@ struct procgroup* procgroup_create(void) {
return NULL;
}
spin_lock(&procgroup_tree_lock, &fpgt);
rbtree_insert(struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,
procgroup_tree_link, pgid);
spin_unlock(&procgroup_tree_lock, fpgt);
return procgroup;
}
void procgroup_attach(struct procgroup* procgroup, struct proc* proc) {
uint64_t fpg, fp;
spin_lock(&procgroup->lock, &fpg);
spin_lock(&proc->lock, &fp);
rbtree_insert(struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link,
procgroup_memb_tree_link, pid);
spin_unlock(&proc->lock, fp);
spin_unlock(&procgroup->lock, fpg);
}
static void procgroup_delete(struct procgroup* procgroup, struct reschedule_ctx* rctx) {
uint64_t fpg, fpgt;
spin_lock(&procgroup_tree_lock, &fpgt);
spin_lock(&procgroup->lock, &fpg);
rbtree_delete(&procgroup_tree, &procgroup->procgroup_tree_link);
spin_unlock(&procgroup->lock, fpg);
spin_unlock(&procgroup_tree_lock, fpgt);
/* delete resources */
struct rb_node_link* rnode;
rbtree_first(&procgroup->resource_tree, rnode);
@@ -264,17 +225,9 @@ static void procgroup_delete(struct procgroup* procgroup, struct reschedule_ctx*
}
void procgroup_detach(struct procgroup* procgroup, struct proc* proc, struct reschedule_ctx* rctx) {
uint64_t fpg, fp;
spin_lock(&procgroup->lock, &fpg);
spin_lock(&proc->lock, &fp);
rbtree_delete(&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link);
struct rb_node_link* memb_tree = procgroup->memb_proc_tree;
spin_unlock(&proc->lock, fp);
spin_unlock(&procgroup->lock, fpg);
if (memb_tree == NULL) {
procgroup_delete(procgroup, rctx);
}