Fix scheduler starvation, use lists for scheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
This commit is contained in:
44
init/init.c
44
init/init.c
@@ -6,7 +6,7 @@
|
||||
#include <stdint.h>
|
||||
#include <string/string.h>
|
||||
|
||||
#define EXAMPLE 1
|
||||
#define EXAMPLE 2
|
||||
|
||||
#if EXAMPLE == 1
|
||||
|
||||
@@ -26,4 +26,46 @@ int spawn (void (*fn) (void)) {
|
||||
}
|
||||
|
||||
void app_main (void) { spawn (&app_thread1); }
|
||||
#elif EXAMPLE == 2
|
||||
|
||||
#define MUTEX 2000
|
||||
|
||||
void app_thread1 (void);
|
||||
|
||||
int spawn (void (*fn) (void)) {
|
||||
size_t stack_size = 256 * PAGE_SIZE;
|
||||
void* stack = malloc (stack_size);
|
||||
if (stack == NULL)
|
||||
return -ST_OOM_ERROR;
|
||||
|
||||
uintptr_t stack_top = (uintptr_t)stack + stack_size;
|
||||
return clone (stack_top, stack_size, fn);
|
||||
}
|
||||
|
||||
void app_main (void) {
|
||||
create_mutex (MUTEX, RV_PRIVATE);
|
||||
|
||||
spawn (&app_thread1);
|
||||
|
||||
for (;;) {
|
||||
/* lock_mutex (MUTEX, RV_PRIVATE); */
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test ('a');
|
||||
|
||||
/* unlock_mutex (MUTEX, RV_PRIVATE); */
|
||||
}
|
||||
}
|
||||
|
||||
void app_thread1 (void) {
|
||||
for (int i = 0; i < 3; i++)
|
||||
test ('b');
|
||||
for (;;) {
|
||||
/* lock_mutex (MUTEX, RV_PRIVATE); */
|
||||
|
||||
/* unlock_mutex (MUTEX, RV_PRIVATE); */
|
||||
}
|
||||
|
||||
quit ();
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -27,7 +27,7 @@ struct cpu {
|
||||
|
||||
spin_lock_t lock;
|
||||
|
||||
struct rb_node_link* proc_run_q;
|
||||
struct list_node_link* proc_run_q;
|
||||
struct proc* proc_current;
|
||||
};
|
||||
|
||||
|
||||
@@ -25,12 +25,8 @@ int amd64_syscall_dispatch (void* stack_ptr) {
|
||||
|
||||
struct proc* caller = thiscpu->proc_current;
|
||||
|
||||
__asm__ volatile ("sti");
|
||||
|
||||
int result = func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
||||
|
||||
__asm__ volatile ("cli");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ try:
|
||||
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
DEBUG ("proc->pid=%d\n", proc->pid);
|
||||
proc_suspend (proc, &mutex->suspension_q);
|
||||
|
||||
goto try;
|
||||
@@ -52,11 +53,10 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
|
||||
struct proc* resumed_proc = NULL;
|
||||
struct rb_node_link* node;
|
||||
rbtree_first (&mutex->suspension_q.proc_tree, node);
|
||||
struct list_node_link* node = mutex->suspension_q.proc_list;
|
||||
|
||||
if (node) {
|
||||
resumed_proc = rbtree_entry (node, struct proc, suspension_link);
|
||||
resumed_proc = list_entry (node, struct proc, suspension_link);
|
||||
mutex->owner = resumed_proc;
|
||||
mutex->locked = true;
|
||||
|
||||
|
||||
@@ -28,14 +28,6 @@
|
||||
|
||||
#define SCHED_REAP_FREQ 10
|
||||
|
||||
/*
|
||||
* Lock hierachy:
|
||||
* - proc_tree_lock
|
||||
* - cpu->lock
|
||||
* - proc->lock
|
||||
* - suspension_q->lock
|
||||
*/
|
||||
|
||||
static struct rb_node_link* proc_tree = NULL;
|
||||
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
|
||||
|
||||
@@ -217,49 +209,46 @@ void proc_register (struct proc* proc, struct cpu* cpu) {
|
||||
|
||||
proc->cpu = cpu;
|
||||
|
||||
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
||||
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
||||
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
|
||||
if (cpu->proc_current == NULL)
|
||||
cpu->proc_current = proc;
|
||||
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
||||
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
||||
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
|
||||
}
|
||||
|
||||
/* caller holds cpu->lock */
|
||||
static struct proc* proc_find_sched (struct cpu* cpu) {
|
||||
struct rb_node_link* node = NULL;
|
||||
struct proc* current = cpu->proc_current;
|
||||
struct proc* proc = NULL;
|
||||
|
||||
if (current)
|
||||
rbtree_next (¤t->cpu_run_q_link, node);
|
||||
|
||||
if (!node)
|
||||
rbtree_first (&cpu->proc_run_q, node);
|
||||
|
||||
if (!node)
|
||||
if (!cpu->proc_run_q)
|
||||
return NULL;
|
||||
|
||||
struct rb_node_link* first = node;
|
||||
struct list_node_link *current, *start;
|
||||
|
||||
if (cpu->proc_current)
|
||||
current = cpu->proc_current->cpu_run_q_link.next;
|
||||
else
|
||||
current = cpu->proc_run_q;
|
||||
|
||||
if (!current)
|
||||
current = cpu->proc_run_q;
|
||||
|
||||
start = current;
|
||||
|
||||
do {
|
||||
proc = rbtree_entry (node, struct proc, cpu_run_q_link);
|
||||
struct proc* proc = list_entry (current, struct proc, cpu_run_q_link);
|
||||
|
||||
if (atomic_load (&proc->state) == PROC_READY)
|
||||
return proc;
|
||||
|
||||
rbtree_next (node, node);
|
||||
current = current->next ? current->next : cpu->proc_run_q;
|
||||
} while (current != start);
|
||||
|
||||
if (!node)
|
||||
rbtree_first (&cpu->proc_run_q, node);
|
||||
|
||||
} while (node != first);
|
||||
|
||||
return ((atomic_load (¤t->state) == PROC_READY) ? current : NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void proc_reap (void) {
|
||||
@@ -281,9 +270,8 @@ static void proc_reap (void) {
|
||||
if (atomic_load (&proc->state) == PROC_DEAD) {
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
rbtree_delete (&proc_tree, &proc->proc_tree_link);
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
|
||||
list_append (reap_list, &proc->reap_link);
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
}
|
||||
|
||||
node = next;
|
||||
@@ -318,7 +306,7 @@ void proc_sched (void* regs) {
|
||||
|
||||
if (prev != NULL) {
|
||||
spin_lock (&prev->lock, &ctxpr);
|
||||
prev->pdata.regs = *(struct saved_regs*)regs;
|
||||
memcpy (&prev->pdata.regs, regs, sizeof (struct saved_regs));
|
||||
spin_unlock (&prev->lock, &ctxpr);
|
||||
}
|
||||
|
||||
@@ -347,7 +335,7 @@ void proc_kill (struct proc* proc) {
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
|
||||
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
if (cpu->proc_current == proc)
|
||||
cpu->proc_current = NULL;
|
||||
|
||||
@@ -370,13 +358,13 @@ void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
|
||||
/* remove from run q */
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
|
||||
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
if (cpu->proc_current == proc)
|
||||
cpu->proc_current = NULL;
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
rbtree_insert (struct proc, &sq->proc_tree, &proc->suspension_link, suspension_link, pid);
|
||||
list_append (sq->proc_list, &proc->suspension_link);
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
|
||||
cpu_request_sched (cpu);
|
||||
@@ -388,7 +376,7 @@ void proc_resume (struct proc* proc) {
|
||||
struct proc_suspension_q* sq = proc->suspension_q;
|
||||
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
rbtree_delete (&sq->proc_tree, &proc->suspension_link);
|
||||
list_remove (sq->proc_list, &proc->suspension_link);
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
@@ -397,7 +385,7 @@ void proc_resume (struct proc* proc) {
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
cpu_request_sched (cpu);
|
||||
|
||||
@@ -45,8 +45,8 @@ struct proc_resources {
|
||||
struct proc {
|
||||
int pid;
|
||||
struct rb_node_link proc_tree_link;
|
||||
struct rb_node_link cpu_run_q_link;
|
||||
struct rb_node_link suspension_link;
|
||||
struct list_node_link cpu_run_q_link;
|
||||
struct list_node_link suspension_link;
|
||||
struct list_node_link reap_link;
|
||||
|
||||
struct list_node_link* mappings; /* pd.lock implicitly protects this field */
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#ifndef _KERNEL_PROC_SUSPENTION_Q_H
|
||||
#define _KERNEL_PROC_SUSPENTION_Q_H
|
||||
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/list.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
struct proc_suspension_q {
|
||||
struct rb_node_link* proc_tree;
|
||||
struct list_node_link* proc_list;
|
||||
spin_lock_t lock;
|
||||
};
|
||||
|
||||
|
||||
@@ -13,6 +13,8 @@ int quit (void) { return do_syscall (SYS_QUIT, 0); }
|
||||
|
||||
int test (char c) { return do_syscall (SYS_TEST, c); }
|
||||
|
||||
int sched (void) { return do_syscall (SYS_SCHED, 0); }
|
||||
|
||||
int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags) {
|
||||
return do_syscall (SYS_MAP, mem_rid, vis, vaddr, flags);
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
|
||||
int quit (void);
|
||||
int test (char c);
|
||||
int sched (void);
|
||||
int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags);
|
||||
int unmap (uintptr_t vaddr, size_t pages);
|
||||
int create_mem (int mem_rid, int vis, size_t pages);
|
||||
|
||||
Reference in New Issue
Block a user