Implement Mutexes and supporting syscalls, cleanup/optimize scheduler
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s

This commit is contained in:
2026-01-10 00:12:42 +01:00
parent 6a474c21a0
commit 41a458b925
17 changed files with 276 additions and 73 deletions

View File

@@ -25,6 +25,8 @@
#include <amd64/intr_defs.h>
#endif
#define SCHED_REAP_FREQ 200
/*
* Lock hierachy:
* - proc_tree_lock
@@ -36,6 +38,8 @@
static struct rb_node_link* proc_tree = NULL;
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
static atomic_int sched_cycles = 0;
static bool proc_check_elf (uint8_t* elf) {
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
return false;
@@ -194,29 +198,30 @@ static struct proc* proc_spawn_rd (char* name) {
static void proc_register (struct proc* proc, struct cpu* cpu) {
proc->cpu = cpu;
rw_spin_write_lock (&proc_tree_lock);
rw_spin_write_lock (&cpu->lock);
spin_lock (&cpu->lock);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
rw_spin_write_unlock (&cpu->lock);
spin_unlock (&cpu->lock);
rw_spin_write_lock (&proc_tree_lock);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
rw_spin_write_unlock (&proc_tree_lock);
}
static struct proc* proc_find_sched (void) {
/* caller holds cpu->lock */
static struct proc* proc_find_sched (struct cpu* cpu) {
struct rb_node_link* node = NULL;
struct proc* start = thiscpu->proc_current;
struct proc* start = cpu->proc_current;
struct proc* proc = NULL;
if (start)
node = &start->cpu_run_q_link;
if (!node)
rbtree_first (&thiscpu->proc_run_q, node);
rbtree_first (&cpu->proc_run_q, node);
if (!node)
return NULL;
@@ -231,7 +236,7 @@ static struct proc* proc_find_sched (void) {
rbtree_next (node, node);
if (!node) {
rbtree_first (&thiscpu->proc_run_q, node);
rbtree_first (&cpu->proc_run_q, node);
}
if (node == first)
@@ -241,51 +246,81 @@ static struct proc* proc_find_sched (void) {
return NULL;
}
static void proc_reap (void) {
struct proc* proc = NULL;
struct list_node_link* reap_list = NULL;
rw_spin_write_lock (&proc_tree_lock);
struct rb_node_link* node;
rbtree_first (&proc_tree, node);
while (node) {
struct rb_node_link* next;
rbtree_next (node, next);
proc = rbtree_entry (node, struct proc, proc_tree_link);
if (atomic_load (&proc->state) == PROC_DEAD) {
spin_lock (&proc->lock);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
spin_unlock (&proc->lock);
list_append (reap_list, &proc->reap_link);
}
node = next;
}
rw_spin_write_unlock (&proc_tree_lock);
struct list_node_link *reap_link, *reap_link_tmp;
list_foreach (reap_list, reap_link, reap_link_tmp) {
proc = list_entry (reap_link, struct proc, reap_link);
list_remove (reap_list, &proc->reap_link);
DEBUG ("cleanup PID %d\n", proc->pid);
proc_cleanup (proc);
}
}
void proc_sched (void) {
if (atomic_fetch_add (&sched_cycles, 1) % SCHED_REAP_FREQ == 0)
proc_reap ();
struct proc* next = NULL;
struct cpu* cpu = thiscpu;
rw_spin_read_lock (&thiscpu->lock);
spin_lock (&cpu->lock);
if (thiscpu->proc_run_q == NULL) {
rw_spin_read_unlock (&thiscpu->lock);
goto idle;
if (cpu->proc_run_q != NULL) {
next = proc_find_sched (cpu);
if (next)
cpu->proc_current = next;
}
next = proc_find_sched ();
spin_unlock (&thiscpu->lock);
rw_spin_read_unlock (&thiscpu->lock);
if (next != NULL) {
rw_spin_write_lock (&thiscpu->lock);
thiscpu->proc_current = next;
rw_spin_write_unlock (&thiscpu->lock);
}
if (next != NULL && atomic_load (&next->state) == PROC_READY)
if ((next != NULL) && (atomic_load (&next->state) == PROC_READY))
do_sched (next);
idle:
spin ();
}
void proc_kill (struct proc* proc) {
atomic_store (&proc->state, PROC_DEAD);
rw_spin_write_lock (&proc_tree_lock);
spin_lock (&proc->lock);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
spin_unlock (&proc->lock);
rw_spin_write_unlock (&proc_tree_lock);
struct cpu* cpu = proc->cpu;
rw_spin_write_lock (&cpu->lock);
spin_lock (&cpu->lock);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
rw_spin_write_unlock (&cpu->lock);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
spin_unlock (&cpu->lock);
DEBUG ("killed PID %d\n", proc->pid);
proc_cleanup (proc);
if (cpu == thiscpu)
proc_sched ();
else
@@ -293,22 +328,20 @@ void proc_kill (struct proc* proc) {
}
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
struct cpu* cpu;
spin_lock (&proc->lock);
atomic_store (&proc->state, PROC_SUSPENDED);
cpu = proc->cpu;
struct cpu* cpu = proc->cpu;
/* remove from run q */
rw_spin_write_lock (&cpu->lock);
spin_lock (&cpu->lock);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
rw_spin_write_unlock (&cpu->lock);
spin_unlock (&cpu->lock);
proc->suspension_q = sq;
spin_lock (&proc->suspension_q->lock);
rbtree_insert (struct proc, &proc->suspension_q->proc_tree, &proc->suspension_link,
suspension_link, pid);
@@ -319,25 +352,23 @@ void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
cpu_request_sched (cpu);
}
void proc_wakeup (struct proc* proc) {
struct cpu* cpu;
void proc_resume (struct proc* proc) {
spin_lock (&proc->lock);
cpu = proc->cpu;
struct cpu* cpu = proc->cpu;
struct proc_suspension_q* sq = proc->suspension_q;
spin_lock (&proc->suspension_q->lock);
rbtree_delete (&proc->suspension_q->proc_tree, &proc->suspension_link);
spin_unlock (&proc->suspension_q->lock);
spin_lock (&sq->lock);
rbtree_delete (&sq->proc_tree, &proc->suspension_link);
spin_unlock (&sq->lock);
proc->suspension_q = NULL;
rw_spin_write_lock (&cpu->lock);
spin_lock (&cpu->lock);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
rw_spin_write_unlock (&cpu->lock);
spin_unlock (&cpu->lock);
atomic_store (&proc->state, PROC_READY);
spin_unlock (&proc->lock);
cpu_request_sched (cpu);