Redesign scheduling points
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
This commit is contained in:
@@ -44,13 +44,15 @@ void proc_mutexes_cleanup (struct proc* proc) {
|
||||
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||
}
|
||||
|
||||
void proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
bool proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
struct proc_mutex* mutex = &resource->u.mutex;
|
||||
spin_lock_ctx_t ctxmt, ctxsq;
|
||||
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
|
||||
bool reschedule = PROC_NO_RESCHEDULE;
|
||||
|
||||
while (mutex->suspension_q.proc_list != NULL) {
|
||||
struct list_node_link* node = mutex->suspension_q.proc_list;
|
||||
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
|
||||
@@ -60,7 +62,7 @@ void proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
proc_sq_resume (suspended_proc, sq_entry);
|
||||
reschedule = reschedule || proc_sq_resume (suspended_proc, sq_entry);
|
||||
|
||||
/* reacquire */
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
@@ -72,23 +74,23 @@ void proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
return reschedule;
|
||||
}
|
||||
|
||||
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_lock_ctx_t ctxmt;
|
||||
|
||||
for (;;) {
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
if (!mutex->locked || mutex->owner == proc) {
|
||||
mutex->locked = true;
|
||||
mutex->owner = proc;
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return;
|
||||
}
|
||||
|
||||
proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
|
||||
if (!mutex->locked || mutex->owner == proc) {
|
||||
mutex->locked = true;
|
||||
mutex->owner = proc;
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
|
||||
}
|
||||
|
||||
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
@@ -98,7 +100,7 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
|
||||
if (mutex->owner != proc) {
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return false;
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
@@ -115,9 +117,7 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
proc_sq_resume (resumed_proc, sq_entry);
|
||||
|
||||
return true;
|
||||
return proc_sq_resume (resumed_proc, sq_entry);
|
||||
}
|
||||
|
||||
mutex->locked = false;
|
||||
@@ -126,5 +126,5 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
return true;
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
@@ -15,8 +15,8 @@ struct proc_mutex {
|
||||
struct proc* owner;
|
||||
};
|
||||
|
||||
void proc_cleanup_resource_mutex (struct proc_resource* resource);
|
||||
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
|
||||
bool proc_cleanup_resource_mutex (struct proc_resource* resource);
|
||||
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
|
||||
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex);
|
||||
void proc_mutexes_cleanup (struct proc* proc);
|
||||
|
||||
|
||||
@@ -17,6 +17,9 @@
|
||||
#include <amd64/proc.h> /* USTACK_SIZE */
|
||||
#endif
|
||||
|
||||
#define PROC_NEED_RESCHEDULE true
|
||||
#define PROC_NO_RESCHEDULE false
|
||||
|
||||
/* process states */
|
||||
#define PROC_READY 0
|
||||
#define PROC_DEAD 1
|
||||
|
||||
@@ -51,7 +51,9 @@ struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, i
|
||||
return resource;
|
||||
}
|
||||
|
||||
void proc_delete_resource (struct proc_resource* resource) {
|
||||
resource->ops.cleanup (resource);
|
||||
bool proc_delete_resource (struct proc_resource* resource) {
|
||||
bool reschedule = resource->ops.cleanup (resource);
|
||||
free (resource);
|
||||
|
||||
return reschedule;
|
||||
}
|
||||
|
||||
@@ -21,12 +21,12 @@ struct proc_resource {
|
||||
struct proc_mutex mutex;
|
||||
} u;
|
||||
struct {
|
||||
void (*cleanup) (struct proc_resource* resource);
|
||||
bool (*cleanup) (struct proc_resource* resource);
|
||||
} ops;
|
||||
};
|
||||
|
||||
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid);
|
||||
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid);
|
||||
void proc_delete_resource (struct proc_resource* resource);
|
||||
bool proc_delete_resource (struct proc_resource* resource);
|
||||
|
||||
#endif // _KERNEL_PROC_RESOURCE_H
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#include <sys/smp.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
spin_lock_ctx_t* ctxrl) {
|
||||
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
|
||||
struct cpu* cpu = proc->cpu;
|
||||
@@ -16,7 +16,7 @@ void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock
|
||||
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
|
||||
if (!sq_entry) {
|
||||
spin_unlock (resource_lock, ctxrl);
|
||||
return;
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
sq_entry->proc = proc;
|
||||
@@ -48,10 +48,10 @@ void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
cpu_request_sched (cpu);
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
|
||||
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
|
||||
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
|
||||
struct cpu* cpu = cpu_find_lightest ();
|
||||
struct proc_suspension_q* sq = sq_entry->sq;
|
||||
@@ -80,7 +80,7 @@ void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
|
||||
|
||||
free (sq_entry);
|
||||
|
||||
cpu_request_sched (cpu);
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
void proc_sqs_cleanup (struct proc* proc) {
|
||||
|
||||
@@ -19,8 +19,8 @@ struct proc_sq_entry {
|
||||
};
|
||||
|
||||
void proc_sqs_cleanup (struct proc* proc);
|
||||
void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
spin_lock_ctx_t* ctxrl);
|
||||
void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
|
||||
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
|
||||
|
||||
#endif // _KERNEL_PROC_SUSPENTION_Q_H
|
||||
|
||||
Reference in New Issue
Block a user