Implement lock IRQ nesting via stack variables/contexts
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s

This commit is contained in:
2026-01-14 22:11:56 +01:00
parent 55166f9d5f
commit 270ff507d4
22 changed files with 197 additions and 145 deletions

View File

@@ -48,6 +48,7 @@ static bool proc_check_elf (uint8_t* elf) {
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags) {
spin_lock_ctx_t ctxprpd;
struct proc_mapping* mapping = malloc (sizeof (*mapping));
if (mapping == NULL)
@@ -59,7 +60,7 @@ bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr,
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
spin_lock (&proc->pd.lock);
spin_lock (&proc->pd.lock, &ctxprpd);
list_append (proc->mappings, &mapping->proc_mappings_link);
@@ -68,7 +69,7 @@ bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr,
mm_map_page (&proc->pd, ppage, vpage, flags);
}
spin_unlock (&proc->pd.lock);
spin_unlock (&proc->pd.lock, &ctxprpd);
return true;
}
@@ -78,12 +79,13 @@ bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages) {
uintptr_t end_vaddr = start_vaddr + unmap_size;
struct list_node_link *mapping_link, *mapping_link_tmp;
bool used_tail_mapping = false;
spin_lock_ctx_t ctxprpd;
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
if (tail_mapping == NULL)
return false;
spin_lock (&proc->pd.lock);
spin_lock (&proc->pd.lock, &ctxprpd);
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
@@ -128,7 +130,7 @@ bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages) {
mm_unmap_page (&proc->pd, vpage, 0);
}
spin_unlock (&proc->pd.lock);
spin_unlock (&proc->pd.lock, &ctxprpd);
return true;
}
@@ -196,19 +198,21 @@ static struct proc* proc_spawn_rd (char* name) {
}
static void proc_register (struct proc* proc, struct cpu* cpu) {
spin_lock_ctx_t ctxcpu, ctxprtr;
proc->cpu = cpu;
spin_lock (&cpu->lock);
spin_lock (&cpu->lock, &ctxcpu);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
spin_unlock (&cpu->lock);
spin_unlock (&cpu->lock, &ctxcpu);
rw_spin_write_lock (&proc_tree_lock);
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
rw_spin_write_unlock (&proc_tree_lock);
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
}
/* caller holds cpu->lock */
@@ -246,8 +250,10 @@ static struct proc* proc_find_sched (struct cpu* cpu) {
static void proc_reap (void) {
struct proc* proc = NULL;
struct list_node_link* reap_list = NULL;
spin_lock_ctx_t ctxprtr;
spin_lock_ctx_t ctxpr;
rw_spin_write_lock (&proc_tree_lock);
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
struct rb_node_link* node;
rbtree_first (&proc_tree, node);
@@ -258,9 +264,9 @@ static void proc_reap (void) {
proc = rbtree_entry (node, struct proc, proc_tree_link);
if (atomic_load (&proc->state) == PROC_DEAD) {
spin_lock (&proc->lock);
spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
spin_unlock (&proc->lock);
spin_unlock (&proc->lock, &ctxpr);
list_append (reap_list, &proc->reap_link);
}
@@ -268,7 +274,7 @@ static void proc_reap (void) {
node = next;
}
rw_spin_write_unlock (&proc_tree_lock);
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
struct list_node_link *reap_link, *reap_link_tmp;
list_foreach (reap_list, reap_link, reap_link_tmp) {
@@ -281,6 +287,8 @@ static void proc_reap (void) {
}
void proc_sched (void* regs) {
spin_lock_ctx_t ctxcpu, ctxpr;
int s_cycles = atomic_fetch_add (&sched_cycles, 1);
if (s_cycles % SCHED_REAP_FREQ == 0)
@@ -289,45 +297,46 @@ void proc_sched (void* regs) {
struct proc* next = NULL;
struct cpu* cpu = thiscpu;
spin_lock (&cpu->lock);
spin_lock (&cpu->lock, &ctxcpu);
struct proc* prev = cpu->proc_current;
if (prev != NULL) {
spin_lock (&prev->lock);
spin_lock (&prev->lock, &ctxpr);
prev->pdata.regs = *(struct saved_regs*)regs;
spin_unlock (&prev->lock);
spin_unlock (&prev->lock, &ctxpr);
}
next = proc_find_sched (cpu);
if (next) {
cpu->proc_current = next;
spin_unlock (&cpu->lock);
spin_unlock (&cpu->lock, &ctxcpu);
do_sched (next);
} else {
cpu->proc_current = NULL;
spin_unlock (&cpu->lock);
spin_unlock (&cpu->lock, &ctxcpu);
spin ();
}
}
void proc_kill (struct proc* proc, void* regs) {
spin_lock_ctx_t ctxpr, ctxcpu;
struct cpu* cpu = proc->cpu;
spin_lock (&proc->lock);
spin_lock (&proc->lock, &ctxpr);
atomic_store (&proc->state, PROC_DEAD);
spin_unlock (&proc->lock);
spin_unlock (&proc->lock, &ctxpr);
spin_lock (&cpu->lock);
spin_lock (&cpu->lock, &ctxcpu);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
spin_unlock (&cpu->lock);
spin_unlock (&cpu->lock, &ctxcpu);
DEBUG ("killed PID %d\n", proc->pid);
@@ -338,44 +347,46 @@ void proc_kill (struct proc* proc, void* regs) {
}
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
struct cpu* cpu = proc->cpu;
spin_lock (&proc->lock);
spin_lock (&proc->lock, &ctxpr);
atomic_store (&proc->state, PROC_SUSPENDED);
proc->suspension_q = sq;
spin_unlock (&proc->lock);
spin_unlock (&proc->lock, &ctxpr);
/* remove from run q */
spin_lock (&cpu->lock);
spin_lock (&cpu->lock, &ctxcpu);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
spin_unlock (&cpu->lock);
spin_unlock (&cpu->lock, &ctxcpu);
spin_lock (&sq->lock);
spin_lock (&sq->lock, &ctxsq);
rbtree_insert (struct proc, &sq->proc_tree, &proc->suspension_link, suspension_link, pid);
spin_unlock (&sq->lock);
spin_unlock (&sq->lock, &ctxsq);
cpu_request_sched (cpu);
}
void proc_resume (struct proc* proc) {
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
struct cpu* cpu = proc->cpu;
struct proc_suspension_q* sq = proc->suspension_q;
spin_lock (&sq->lock);
spin_lock (&sq->lock, &ctxsq);
rbtree_delete (&sq->proc_tree, &proc->suspension_link);
spin_unlock (&sq->lock);
spin_unlock (&sq->lock, &ctxsq);
spin_lock (&proc->lock);
spin_lock (&proc->lock, &ctxpr);
proc->suspension_q = NULL;
atomic_store (&proc->state, PROC_READY);
spin_unlock (&proc->lock);
spin_unlock (&proc->lock, &ctxpr);
spin_lock (&cpu->lock);
spin_lock (&cpu->lock, &ctxcpu);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
spin_unlock (&cpu->lock);
spin_unlock (&cpu->lock, &ctxcpu);
cpu_request_sched (cpu);
}

View File

@@ -28,12 +28,14 @@ void proc_cleanup_resources (struct proc* proc) {
}
void proc_drop_resource (struct proc* proc, struct proc_resource* resource) {
spin_lock_ctx_t ctxpr;
DEBUG ("resource=%p, type=%d, rid=%d\n", resource, resource->type, resource->rid);
if (atomic_fetch_sub (&resource->refs, 1) == 1) {
spin_lock (&proc->lock);
spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&proc->resource_tree, &resource->proc_resource_tree_link);
spin_unlock (&proc->lock);
spin_unlock (&proc->lock, &ctxpr);
resource->ops.cleanup (proc, resource);
free (resource);
@@ -74,6 +76,7 @@ static void proc_cleanup_resource_mutex (struct proc* proc, struct proc_resource
struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type, int vis,
void* data) {
spin_lock_ctx_t ctxpr;
/* Check if resource RID already exists */
struct proc_resource* resource_check;
rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource_check,
@@ -112,10 +115,10 @@ struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type
} break;
}
spin_lock (&proc->lock);
spin_lock (&proc->lock, &ctxpr);
rbtree_insert (struct proc_resource, &proc->resource_tree, &resource->proc_resource_tree_link,
proc_resource_tree_link, rid);
spin_unlock (&proc->lock);
spin_unlock (&proc->lock, &ctxpr);
return resource;
}