Remove spinlock contexts
All checks were successful
Build documentation / build-and-deploy (push) Successful in 28s

This commit is contained in:
2026-02-08 18:58:53 +01:00
parent 1ca3d11bac
commit 9e6035bd68
26 changed files with 161 additions and 262 deletions

View File

@@ -105,6 +105,8 @@ struct proc* proc_spawn_rd (char* name) {
bool ok = proc_check_elf (rd_file->content);
DEBUG ("Spawning %s, elf header %s\n", name, ok ? "ok" : "bad");
if (!ok)
return NULL;
@@ -112,24 +114,21 @@ struct proc* proc_spawn_rd (char* name) {
}
struct proc* proc_find_pid (int pid) {
spin_lock_ctx_t ctxprtr;
struct proc* proc = NULL;
spin_lock (&proc_tree_lock, &ctxprtr);
spin_lock (&proc_tree_lock);
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
spin_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&proc_tree_lock);
return proc;
}
bool proc_register (struct proc* proc, struct cpu** reschedule_cpu) {
spin_lock_ctx_t ctxcpu, ctxprtr, ctxpr;
struct cpu* cpu = *reschedule_cpu != NULL ? *reschedule_cpu : cpu_find_lightest ();
spin_lock (&proc_tree_lock, &ctxprtr);
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&proc_tree_lock);
spin_lock (&cpu->lock);
spin_lock (&proc->lock);
proc->cpu = cpu;
@@ -140,9 +139,9 @@ bool proc_register (struct proc* proc, struct cpu** reschedule_cpu) {
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
spin_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&proc->lock);
spin_unlock (&cpu->lock);
spin_unlock (&proc_tree_lock);
*reschedule_cpu = cpu;
@@ -181,10 +180,8 @@ static struct proc* proc_find_sched (struct cpu* cpu) {
static void proc_reap (void) {
struct proc* proc = NULL;
struct list_node_link* reap_list = NULL;
spin_lock_ctx_t ctxprtr;
spin_lock_ctx_t ctxpr;
spin_lock (&proc_tree_lock, &ctxprtr);
spin_lock (&proc_tree_lock);
struct rb_node_link* node;
rbtree_first (&proc_tree, node);
@@ -195,16 +192,16 @@ static void proc_reap (void) {
proc = rbtree_entry (node, struct proc, proc_tree_link);
if (atomic_load (&proc->state) == PROC_DEAD) {
spin_lock (&proc->lock, &ctxpr);
spin_lock (&proc->lock);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
list_append (reap_list, &proc->reap_link);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&proc->lock);
}
node = next;
}
spin_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&proc_tree_lock);
struct list_node_link *reap_link, *reap_link_tmp;
list_foreach (reap_list, reap_link, reap_link_tmp) {
@@ -217,8 +214,6 @@ static void proc_reap (void) {
}
void proc_sched (void) {
spin_lock_ctx_t ctxcpu;
int s_cycles = atomic_fetch_add (&sched_cycles, 1);
if (s_cycles % SCHED_REAP_FREQ == 0)
@@ -227,31 +222,29 @@ void proc_sched (void) {
struct proc* next = NULL;
struct cpu* cpu = thiscpu;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&cpu->lock);
next = proc_find_sched (cpu);
if (next) {
cpu->proc_current = next;
do_sched (next, &cpu->lock, &ctxcpu);
do_sched (next, &cpu->lock);
} else {
cpu->proc_current = NULL;
spin_unlock (&cpu->lock, &ctxcpu);
spin_unlock (&cpu->lock);
spin ();
}
}
bool proc_kill (struct proc* proc, struct cpu** reschedule_cpu) {
spin_lock_ctx_t ctxpr, ctxcpu;
spin_lock (&proc->lock, &ctxpr);
spin_lock (&proc->lock);
struct cpu* cpu = proc->cpu;
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&proc->lock);
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&cpu->lock);
spin_lock (&proc->lock);
atomic_store (&proc->state, PROC_DEAD);
proc->cpu = NULL;
@@ -261,8 +254,8 @@ bool proc_kill (struct proc* proc, struct cpu** reschedule_cpu) {
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
spin_unlock (&proc->lock);
spin_unlock (&cpu->lock);
DEBUG ("killed PID %d\n", proc->pid);
@@ -290,7 +283,6 @@ void proc_init (void) {
struct cpu* init_cpu = thiscpu;
proc_register (init, &init_cpu);
spin_lock_ctx_t ctxcpu;
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
spin_lock (&spin_proc->cpu->lock);
do_sched (spin_proc, &spin_proc->cpu->lock);
}