Compare commits

...

3 Commits

Author SHA1 Message Date
e78bfb9984 Move suspension q code into proc/suspension_q.c
All checks were successful
Build documentation / build-and-deploy (push) Successful in 24s
2026-01-29 01:52:18 +01:00
d2a88b3641 Move suspension q's cleanup to proc/suspension_q.c 2026-01-29 01:43:01 +01:00
fdda2e2df8 Unlock mutexes on process death 2026-01-29 01:38:44 +01:00
8 changed files with 167 additions and 106 deletions

View File

@@ -37,6 +37,8 @@ void app_main (void) {
for (int i = 0; i < 3; i++)
test (letter);
process_quit ();
mutex_unlock (MUTEX);
}
}

View File

@@ -9,6 +9,7 @@
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/mutex.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
@@ -93,30 +94,8 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t ent
}
void proc_cleanup (struct proc* proc) {
spin_lock_ctx_t ctxsq, ctxpr;
spin_lock (&proc->lock, &ctxpr);
/* clean suspension queue entries */
struct list_node_link *sq_link, *sq_link_tmp;
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
spin_unlock (&sq->lock, &ctxsq);
free (sq_entry);
}
spin_unlock (&proc->lock, &ctxpr);
proc_sqs_cleanup (proc);
proc_mutexes_cleanup (proc);
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);

View File

@@ -5,84 +5,43 @@
#include <mm/liballoc.h>
#include <proc/mutex.h>
#include <proc/proc.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/smp.h>
#include <sys/spin_lock.h>
static void proc_mutex_suspend (struct proc* proc, struct proc_suspension_q* sq,
spin_lock_t* resource_lock, spin_lock_ctx_t* ctxrl) {
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
struct cpu* cpu = proc->cpu;
void proc_mutexes_cleanup (struct proc* proc) {
spin_lock_ctx_t ctxpg, ctxrs;
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
if (!sq_entry) {
spin_unlock (resource_lock, ctxrl);
return;
spin_lock (&proc->procgroup->lock, &ctxpg);
struct rb_node_link* rnode;
rbtree_first (&proc->procgroup->resource_tree, rnode);
while (rnode) {
struct rb_node_link* next;
rbtree_next (rnode, next);
struct proc_resource* resource = rbtree_entry (rnode, struct proc_resource, resource_tree_link);
rnode = next;
spin_lock (&resource->lock, &ctxrs);
if (resource->type != PR_MUTEX) {
spin_unlock (&resource->lock, &ctxrs);
continue;
}
sq_entry->proc = proc;
sq_entry->sq = sq;
if (resource->u.mutex.owner == proc && resource->u.mutex.locked) {
spin_unlock (&resource->lock, &ctxrs);
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
spin_unlock (resource_lock, ctxrl);
atomic_store (&proc->state, PROC_SUSPENDED);
/* append to sq's list */
list_append (sq->proc_list, &sq_entry->sq_link);
/* append to proc's list */
list_append (proc->sq_entries, &sq_entry->proc_link);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
proc->cpu = NULL;
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
cpu_request_sched (cpu);
proc_mutex_unlock (proc, &resource->u.mutex);
}
}
static void proc_mutex_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
struct cpu* cpu = cpu_find_lightest ();
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
proc->cpu = cpu;
if (proc->sq_entries == NULL)
atomic_store (&proc->state, PROC_READY);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
free (sq_entry);
cpu_request_sched (cpu);
spin_unlock (&proc->procgroup->lock, &ctxpg);
}
void proc_cleanup_resource_mutex (struct proc_resource* resource) {
@@ -101,7 +60,7 @@ void proc_cleanup_resource_mutex (struct proc_resource* resource) {
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt);
proc_mutex_resume (suspended_proc, sq_entry);
proc_sq_resume (suspended_proc, sq_entry);
/* reacquire */
spin_lock (&mutex->resource->lock, &ctxmt);
@@ -128,7 +87,7 @@ void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
return;
}
proc_mutex_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
}
}
@@ -156,7 +115,7 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt);
proc_mutex_resume (resumed_proc, sq_entry);
proc_sq_resume (resumed_proc, sq_entry);
return true;
}

View File

@@ -18,5 +18,6 @@ struct proc_mutex {
void proc_cleanup_resource_mutex (struct proc_resource* resource);
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex);
void proc_mutexes_cleanup (struct proc* proc);
#endif // _KERNEL_PROC_MUTEX_H

View File

@@ -27,13 +27,6 @@
struct cpu;
struct proc_sq_entry {
struct list_node_link sq_link;
struct list_node_link proc_link;
struct proc* proc;
struct proc_suspension_q* sq;
};
struct proc {
int pid;
struct rb_node_link proc_tree_link;

View File

@@ -1,9 +1,11 @@
c += proc/proc.c \
proc/resource.c \
proc/mutex.c \
proc/procgroup.c
proc/procgroup.c \
proc/suspension_q.c
o += proc/proc.o \
proc/resource.o \
proc/mutex.o \
proc/procgroup.o
proc/procgroup.o \
proc/suspension_q.o

111
kernel/proc/suspension_q.c Normal file
View File

@@ -0,0 +1,111 @@
#include <libk/list.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <proc/resource.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h>
#include <sys/smp.h>
#include <sys/spin_lock.h>
void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
spin_lock_ctx_t* ctxrl) {
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
struct cpu* cpu = proc->cpu;
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
if (!sq_entry) {
spin_unlock (resource_lock, ctxrl);
return;
}
sq_entry->proc = proc;
sq_entry->sq = sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
spin_unlock (resource_lock, ctxrl);
atomic_store (&proc->state, PROC_SUSPENDED);
/* append to sq's list */
list_append (sq->proc_list, &sq_entry->sq_link);
/* append to proc's list */
list_append (proc->sq_entries, &sq_entry->proc_link);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
proc->cpu = NULL;
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
cpu_request_sched (cpu);
}
void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
struct cpu* cpu = cpu_find_lightest ();
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
proc->cpu = cpu;
if (proc->sq_entries == NULL)
atomic_store (&proc->state, PROC_READY);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
free (sq_entry);
cpu_request_sched (cpu);
}
void proc_sqs_cleanup (struct proc* proc) {
spin_lock_ctx_t ctxsq, ctxpr;
spin_lock (&proc->lock, &ctxpr);
/* clean suspension queue entries */
struct list_node_link *sq_link, *sq_link_tmp;
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
spin_unlock (&sq->lock, &ctxsq);
free (sq_entry);
}
spin_unlock (&proc->lock, &ctxpr);
}

View File

@@ -4,9 +4,23 @@
#include <libk/list.h>
#include <sync/spin_lock.h>
struct proc;
struct proc_suspension_q {
struct list_node_link* proc_list;
spin_lock_t lock;
};
struct proc_sq_entry {
struct list_node_link sq_link;
struct list_node_link proc_link;
struct proc* proc;
struct proc_suspension_q* sq;
};
void proc_sqs_cleanup (struct proc* proc);
void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
spin_lock_ctx_t* ctxrl);
void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
#endif // _KERNEL_PROC_SUSPENTION_Q_H