#include #include #include #include #include #include #include #include #include #include #include #include void proc_mutexes_cleanup (struct proc* proc) { spin_lock_ctx_t ctxpg, ctxrs; spin_lock (&proc->procgroup->lock, &ctxpg); struct rb_node_link* rnode; rbtree_first (&proc->procgroup->resource_tree, rnode); while (rnode) { struct rb_node_link* next; rbtree_next (rnode, next); struct proc_resource* resource = rbtree_entry (rnode, struct proc_resource, resource_tree_link); rnode = next; spin_lock (&resource->lock, &ctxrs); if (resource->type != PR_MUTEX) { spin_unlock (&resource->lock, &ctxrs); continue; } if (resource->u.mutex.owner == proc && resource->u.mutex.locked) { spin_unlock (&resource->lock, &ctxrs); proc_mutex_unlock (proc, &resource->u.mutex); } } spin_unlock (&proc->procgroup->lock, &ctxpg); } bool proc_cleanup_resource_mutex (struct proc_resource* resource) { struct proc_mutex* mutex = &resource->u.mutex; spin_lock_ctx_t ctxmt, ctxsq; spin_lock (&mutex->resource->lock, &ctxmt); spin_lock (&mutex->suspension_q.lock, &ctxsq); bool reschedule = PROC_NO_RESCHEDULE; while (mutex->suspension_q.proc_list != NULL) { struct list_node_link* node = mutex->suspension_q.proc_list; struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link); struct proc* suspended_proc = sq_entry->proc; /* we will relock during resume */ spin_unlock (&mutex->suspension_q.lock, &ctxsq); spin_unlock (&mutex->resource->lock, &ctxmt); reschedule = reschedule || proc_sq_resume (suspended_proc, sq_entry); /* reacquire */ spin_lock (&mutex->resource->lock, &ctxmt); spin_lock (&mutex->suspension_q.lock, &ctxsq); } mutex->locked = false; mutex->owner = NULL; spin_unlock (&mutex->suspension_q.lock, &ctxsq); spin_unlock (&mutex->resource->lock, &ctxmt); return reschedule; } bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) { spin_lock_ctx_t ctxmt; spin_lock (&mutex->resource->lock, &ctxmt); if (!mutex->locked || mutex->owner == proc) { mutex->locked = true; mutex->owner = proc; spin_unlock (&mutex->resource->lock, &ctxmt); return PROC_NO_RESCHEDULE; } return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt); } bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) { spin_lock_ctx_t ctxmt, ctxsq; spin_lock (&mutex->resource->lock, &ctxmt); if (mutex->owner != proc) { spin_unlock (&mutex->resource->lock, &ctxmt); return PROC_NO_RESCHEDULE; } spin_lock (&mutex->suspension_q.lock, &ctxsq); struct list_node_link* node = mutex->suspension_q.proc_list; if (node) { struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link); struct proc* resumed_proc = sq_entry->proc; mutex->owner = resumed_proc; mutex->locked = true; spin_unlock (&mutex->suspension_q.lock, &ctxsq); spin_unlock (&mutex->resource->lock, &ctxmt); return proc_sq_resume (resumed_proc, sq_entry); } mutex->locked = false; mutex->owner = NULL; spin_unlock (&mutex->suspension_q.lock, &ctxsq); spin_unlock (&mutex->resource->lock, &ctxmt); return PROC_NEED_RESCHEDULE; }