Fix scheduler starvation, use lists for scheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s

This commit is contained in:
2026-01-22 11:54:52 +01:00
parent 7eceecf6e3
commit fea0999726
9 changed files with 83 additions and 54 deletions

View File

@@ -34,6 +34,7 @@ try:
spin_unlock (&mutex->resource->lock, &ctxmt);
DEBUG ("proc->pid=%d\n", proc->pid);
proc_suspend (proc, &mutex->suspension_q);
goto try;
@@ -52,11 +53,10 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
spin_lock (&mutex->suspension_q.lock, &ctxsq);
struct proc* resumed_proc = NULL;
struct rb_node_link* node;
rbtree_first (&mutex->suspension_q.proc_tree, node);
struct list_node_link* node = mutex->suspension_q.proc_list;
if (node) {
resumed_proc = rbtree_entry (node, struct proc, suspension_link);
resumed_proc = list_entry (node, struct proc, suspension_link);
mutex->owner = resumed_proc;
mutex->locked = true;

View File

@@ -28,14 +28,6 @@
#define SCHED_REAP_FREQ 10
/*
* Lock hierachy:
* - proc_tree_lock
* - cpu->lock
* - proc->lock
* - suspension_q->lock
*/
static struct rb_node_link* proc_tree = NULL;
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
@@ -217,49 +209,46 @@ void proc_register (struct proc* proc, struct cpu* cpu) {
proc->cpu = cpu;
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
spin_lock (&cpu->lock, &ctxcpu);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
spin_unlock (&cpu->lock, &ctxcpu);
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
}
/* caller holds cpu->lock */
static struct proc* proc_find_sched (struct cpu* cpu) {
struct rb_node_link* node = NULL;
struct proc* current = cpu->proc_current;
struct proc* proc = NULL;
if (current)
rbtree_next (&current->cpu_run_q_link, node);
if (!node)
rbtree_first (&cpu->proc_run_q, node);
if (!node)
if (!cpu->proc_run_q)
return NULL;
struct rb_node_link* first = node;
struct list_node_link *current, *start;
if (cpu->proc_current)
current = cpu->proc_current->cpu_run_q_link.next;
else
current = cpu->proc_run_q;
if (!current)
current = cpu->proc_run_q;
start = current;
do {
proc = rbtree_entry (node, struct proc, cpu_run_q_link);
struct proc* proc = list_entry (current, struct proc, cpu_run_q_link);
if (atomic_load (&proc->state) == PROC_READY)
return proc;
rbtree_next (node, node);
current = current->next ? current->next : cpu->proc_run_q;
} while (current != start);
if (!node)
rbtree_first (&cpu->proc_run_q, node);
} while (node != first);
return ((atomic_load (&current->state) == PROC_READY) ? current : NULL);
return NULL;
}
static void proc_reap (void) {
@@ -281,9 +270,8 @@ static void proc_reap (void) {
if (atomic_load (&proc->state) == PROC_DEAD) {
spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
spin_unlock (&proc->lock, &ctxpr);
list_append (reap_list, &proc->reap_link);
spin_unlock (&proc->lock, &ctxpr);
}
node = next;
@@ -318,7 +306,7 @@ void proc_sched (void* regs) {
if (prev != NULL) {
spin_lock (&prev->lock, &ctxpr);
prev->pdata.regs = *(struct saved_regs*)regs;
memcpy (&prev->pdata.regs, regs, sizeof (struct saved_regs));
spin_unlock (&prev->lock, &ctxpr);
}
@@ -347,7 +335,7 @@ void proc_kill (struct proc* proc) {
spin_lock (&cpu->lock, &ctxcpu);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
@@ -370,13 +358,13 @@ void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
/* remove from run q */
spin_lock (&cpu->lock, &ctxcpu);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
spin_unlock (&cpu->lock, &ctxcpu);
spin_lock (&sq->lock, &ctxsq);
rbtree_insert (struct proc, &sq->proc_tree, &proc->suspension_link, suspension_link, pid);
list_append (sq->proc_list, &proc->suspension_link);
spin_unlock (&sq->lock, &ctxsq);
cpu_request_sched (cpu);
@@ -388,7 +376,7 @@ void proc_resume (struct proc* proc) {
struct proc_suspension_q* sq = proc->suspension_q;
spin_lock (&sq->lock, &ctxsq);
rbtree_delete (&sq->proc_tree, &proc->suspension_link);
list_remove (sq->proc_list, &proc->suspension_link);
spin_unlock (&sq->lock, &ctxsq);
spin_lock (&proc->lock, &ctxpr);
@@ -397,7 +385,7 @@ void proc_resume (struct proc* proc) {
spin_unlock (&proc->lock, &ctxpr);
spin_lock (&cpu->lock, &ctxcpu);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
spin_unlock (&cpu->lock, &ctxcpu);
cpu_request_sched (cpu);

View File

@@ -45,8 +45,8 @@ struct proc_resources {
struct proc {
int pid;
struct rb_node_link proc_tree_link;
struct rb_node_link cpu_run_q_link;
struct rb_node_link suspension_link;
struct list_node_link cpu_run_q_link;
struct list_node_link suspension_link;
struct list_node_link reap_link;
struct list_node_link* mappings; /* pd.lock implicitly protects this field */

View File

@@ -1,11 +1,11 @@
#ifndef _KERNEL_PROC_SUSPENTION_Q_H
#define _KERNEL_PROC_SUSPENTION_Q_H
#include <libk/rbtree.h>
#include <libk/list.h>
#include <sync/spin_lock.h>
struct proc_suspension_q {
struct rb_node_link* proc_tree;
struct list_node_link* proc_list;
spin_lock_t lock;
};