Clean up AMD64 memory management code, remove dependency on pd.lock

This commit is contained in:
2026-01-27 19:03:03 +01:00
parent 8bda300f6a
commit a3b62ebd3d
14 changed files with 104 additions and 178 deletions

View File

@@ -15,19 +15,24 @@ static atomic_int pgids = 0;
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
uintptr_t* out_paddr) {
spin_lock_ctx_t ctxprpd;
spin_lock_ctx_t ctxpg;
vaddr = (vaddr == 0) ? PROC_MAP_BASE : vaddr;
spin_lock (&procgroup->lock, &ctxpg);
vaddr = (vaddr == 0) ? procgroup->map_base : vaddr;
struct proc_mapping* mapping = malloc (sizeof (*mapping));
if (mapping == NULL)
if (mapping == NULL) {
spin_unlock (&procgroup->lock, &ctxpg);
return 0;
}
uintptr_t paddr = pmm_alloc (pages);
if (paddr == PMM_ALLOC_ERR) {
free (mapping);
spin_unlock (&procgroup->lock, &ctxpg);
return 0;
}
@@ -38,9 +43,7 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
mapping->vaddr = vaddr;
mapping->size = pages * PAGE_SIZE;
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
spin_lock (&procgroup->pd.lock, &ctxprpd);
procgroup->map_base += pages * PAGE_SIZE;
list_append (procgroup->mappings, &mapping->proc_mappings_link);
@@ -49,7 +52,7 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
mm_map_page (&procgroup->pd, ppage, vpage, flags);
}
spin_unlock (&procgroup->pd.lock, &ctxprpd);
spin_unlock (&procgroup->lock, &ctxpg);
return vaddr;
}
@@ -57,15 +60,17 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
size_t unmap_size = pages * PAGE_SIZE;
uintptr_t end_vaddr = start_vaddr + unmap_size;
struct list_node_link *mapping_link, *mapping_link_tmp;
bool used_tail_mapping = false;
spin_lock_ctx_t ctxprpd;
spin_lock_ctx_t ctxpg;
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
if (tail_mapping == NULL)
return false;
spin_lock (&procgroup->pd.lock, &ctxprpd);
spin_lock (&procgroup->lock, &ctxpg);
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
@@ -115,10 +120,10 @@ bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t
free (tail_mapping);
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
mm_unmap_page (&procgroup->pd, vpage, 0);
mm_unmap_page (&procgroup->pd, vpage);
}
spin_unlock (&procgroup->pd.lock, &ctxprpd);
spin_unlock (&procgroup->lock, &ctxpg);
return true;
}
@@ -135,8 +140,8 @@ struct procgroup* procgroup_create (void) {
procgroup->memb_proc_tree = NULL;
procgroup->lock = SPIN_LOCK_INIT;
procgroup->pgid = atomic_fetch_add (&pgids, 1);
procgroup->pd.lock = SPIN_LOCK_INIT;
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys ();
procgroup->map_base = PROC_MAP_BASE;
rw_spin_write_lock (&procgroup_tree_lock, &ctxpgtr);
rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,