All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
135 lines
3.3 KiB
C
135 lines
3.3 KiB
C
#include <libk/assert.h>
|
|
#include <libk/rbtree.h>
|
|
#include <libk/std.h>
|
|
#include <libk/string.h>
|
|
#include <proc/mutex.h>
|
|
#include <proc/proc.h>
|
|
#include <sync/spin_lock.h>
|
|
#include <sys/debug.h>
|
|
#include <sys/smp.h>
|
|
#include <sys/spin_lock.h>
|
|
|
|
bool proc_create_resource_mutex (struct proc_mutex* mutex) {
|
|
memset (mutex, 0, sizeof (*mutex));
|
|
|
|
return true;
|
|
}
|
|
|
|
void proc_cleanup_resource_mutex (struct proc* proc, struct proc_resource* resource) {
|
|
struct proc_mutex* mutex = &resource->u.mutex;
|
|
|
|
/* proc_mutex_unlock (proc, mutex); */
|
|
}
|
|
|
|
static void proc_mutex_suspend (struct proc* proc, struct proc_suspension_q* sq,
|
|
spin_lock_t* resource_lock, spin_lock_ctx_t* ctxrl) {
|
|
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
|
|
struct cpu* cpu = proc->cpu;
|
|
|
|
spin_lock (&cpu->lock, &ctxcpu);
|
|
spin_lock (&proc->lock, &ctxpr);
|
|
spin_lock (&sq->lock, &ctxsq);
|
|
|
|
spin_unlock (resource_lock, ctxrl);
|
|
|
|
atomic_store (&proc->state, PROC_SUSPENDED);
|
|
proc->suspension_q = sq;
|
|
|
|
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
|
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
|
|
|
|
if (cpu->proc_current == proc)
|
|
cpu->proc_current = NULL;
|
|
|
|
list_append (sq->proc_list, &proc->suspension_link);
|
|
|
|
spin_unlock (&sq->lock, &ctxsq);
|
|
spin_unlock (&proc->lock, &ctxpr);
|
|
spin_unlock (&cpu->lock, &ctxcpu);
|
|
|
|
cpu_request_sched (cpu);
|
|
}
|
|
|
|
static void proc_mutex_resume (struct proc* proc) {
|
|
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
|
|
struct cpu* cpu = proc->cpu;
|
|
|
|
spin_lock (&cpu->lock, &ctxcpu);
|
|
spin_lock (&proc->lock, &ctxpr);
|
|
|
|
struct proc_suspension_q* sq = proc->suspension_q;
|
|
|
|
if (sq) {
|
|
spin_lock (&sq->lock, &ctxsq);
|
|
|
|
list_remove (sq->proc_list, &proc->suspension_link);
|
|
|
|
proc->suspension_q = NULL;
|
|
atomic_store (&proc->state, PROC_READY);
|
|
|
|
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
|
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
|
|
|
spin_unlock (&sq->lock, &ctxsq);
|
|
}
|
|
|
|
spin_unlock (&proc->lock, &ctxpr);
|
|
spin_unlock (&cpu->lock, &ctxcpu);
|
|
|
|
cpu_request_sched (cpu);
|
|
}
|
|
|
|
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
|
|
spin_lock_ctx_t ctxmt;
|
|
|
|
for (;;) {
|
|
spin_lock (&mutex->resource->lock, &ctxmt);
|
|
|
|
if (!mutex->locked || mutex->owner == proc) {
|
|
mutex->locked = true;
|
|
mutex->owner = proc;
|
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
|
return;
|
|
}
|
|
|
|
proc_mutex_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
|
|
}
|
|
}
|
|
|
|
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
|
spin_lock_ctx_t ctxmt, ctxsq;
|
|
|
|
spin_lock (&mutex->resource->lock, &ctxmt);
|
|
|
|
if (mutex->owner != proc) {
|
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
|
return false;
|
|
}
|
|
|
|
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
|
|
|
struct proc* resumed_proc = NULL;
|
|
struct list_node_link* node = mutex->suspension_q.proc_list;
|
|
|
|
if (node) {
|
|
resumed_proc = list_entry (node, struct proc, suspension_link);
|
|
mutex->owner = resumed_proc;
|
|
mutex->locked = true;
|
|
|
|
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
|
|
|
proc_mutex_resume (resumed_proc);
|
|
|
|
return true;
|
|
}
|
|
|
|
mutex->locked = false;
|
|
mutex->owner = NULL;
|
|
|
|
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
|
|
|
return true;
|
|
}
|