Use red-black trees to store process run queue and process list
All checks were successful
Build documentation / build-and-deploy (push) Successful in 43s

This commit is contained in:
2026-01-05 18:30:58 +01:00
parent b1579e4ac1
commit fcd5658a80
4 changed files with 320 additions and 34 deletions

View File

@@ -3,6 +3,7 @@
#include <irq/irq.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
@@ -22,8 +23,8 @@
#include <amd64/intr_defs.h>
#endif
static struct procw* procs;
static spin_lock_t procs_lock = SPIN_LOCK_INIT;
static struct rb_node_link* proc_tree = NULL;
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
static bool proc_check_elf (uint8_t* elf) {
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
@@ -108,50 +109,49 @@ static struct proc* proc_spawn_rd (char* name) {
return proc_from_elf (rd_file->content);
}
static void proc_register_for_cpu (struct proc* proc, struct cpu* cpu) {
/* make available globally. */
struct procw* procw = malloc (sizeof (*procw));
if (procw == NULL)
return;
procw->proc = proc;
proc->procw = procw;
static void proc_register (struct proc* proc, struct cpu* cpu) {
proc->cpu = cpu;
spin_lock (&procs_lock);
spin_lock (&proc_tree_lock);
spin_lock (&cpu->lock);
linklist_append (struct procw*, procs, procw);
linklist_append (struct proc*, cpu->proc_run_q, proc);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
spin_unlock (&cpu->lock);
spin_unlock (&procs_lock);
spin_unlock (&proc_tree_lock);
}
static struct proc* proc_find_sched (void) {
struct rb_node_link* node = NULL;
struct proc* start = thiscpu->proc_current;
struct proc* proc = start->next;
struct proc* proc = NULL;
for (;;) {
if (proc == NULL) {
proc = thiscpu->proc_run_q;
}
if (start)
node = &start->cpu_run_q_link;
if (atomic_load (&proc->state) == PROC_READY) {
if (!node)
rbtree_first (&thiscpu->proc_run_q, node);
struct rb_node_link* first = node;
while (node) {
proc = rbtree_entry (node, struct proc, cpu_run_q_link);
if (atomic_load (&proc->state) == PROC_READY)
return proc;
}
/* No runnable processes found. */
if (proc == start) {
return NULL;
}
rbtree_next (node, node);
proc = proc->next;
if (node == first)
break;
}
return NULL;
}
void proc_sched (void) {
@@ -190,7 +190,7 @@ static void proc_irq_sched (void* arg, void* regs) {
void proc_init (void) {
struct proc* init = proc_spawn_rd ("init.exe");
proc_register_for_cpu (init, thiscpu);
proc_register (init, thiscpu);
#if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_SAFE);

View File

@@ -3,6 +3,7 @@
#include <aux/compiler.h>
#include <aux/elf.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <sync/spin_lock.h>
#include <sys/mm.h>
@@ -29,13 +30,16 @@ struct proc_mapping {
struct procw;
struct proc {
struct proc* next;
int pid;
struct rb_node_link proc_tree_link;
struct rb_node_link cpu_run_q_link;
struct proc_mapping* mappings; /* pd.lock implicitly protects this field */
struct proc_platformdata pdata;
struct pd pd;
spin_lock_t lock;
struct cpu* cpu;
struct procw* procw; /* link to it's global struct */
// struct procw* procw; /* link to it's global struct */
atomic_int state;
};
@@ -44,10 +48,10 @@ struct proc {
* struct procw is a process wrapper that is a member of
* a global process list.
*/
struct procw {
struct procw* next;
struct proc* proc;
};
/* struct procw { */
/* struct procw* next; */
/* struct proc* proc; */
/* }; */
void proc_sched (void);
void proc_kill (struct proc* proc);