Make proc->state not atomic
This commit is contained in:
@@ -29,7 +29,7 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
memset (proc, 0, sizeof (*proc));
|
||||
|
||||
proc->lock = SPIN_LOCK_INIT;
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
proc->state = PROC_READY;
|
||||
proc->pid = atomic_fetch_add (&pids, 1);
|
||||
|
||||
proc->procgroup = procgroup_create ();
|
||||
@@ -71,7 +71,7 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t ent
|
||||
memset (proc, 0, sizeof (*proc));
|
||||
|
||||
proc->lock = SPIN_LOCK_INIT;
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
proc->state = PROC_READY;
|
||||
proc->pid = atomic_fetch_add (&pids, 1);
|
||||
|
||||
spin_lock (&proto->lock);
|
||||
|
||||
@@ -233,7 +233,11 @@ static struct proc* proc_find_sched (struct cpu* cpu) {
|
||||
do {
|
||||
struct proc* proc = list_entry (current, struct proc, cpu_run_q_link);
|
||||
|
||||
if (atomic_load (&proc->state) == PROC_READY)
|
||||
spin_lock (&proc->lock);
|
||||
int state = proc->state;
|
||||
spin_unlock (&proc->lock);
|
||||
|
||||
if (state == PROC_READY)
|
||||
return proc;
|
||||
|
||||
current = current->next ? current->next : cpu->proc_run_q;
|
||||
@@ -256,13 +260,16 @@ static void proc_reap (struct reschedule_ctx* rctx) {
|
||||
rbtree_next (node, next);
|
||||
proc = rbtree_entry (node, struct proc, proc_tree_link);
|
||||
|
||||
if (atomic_load (&proc->state) == PROC_DEAD) {
|
||||
node = next;
|
||||
|
||||
spin_lock (&proc->lock);
|
||||
|
||||
if (proc->state == PROC_DEAD) {
|
||||
list_append (reap_list, &proc->reap_link);
|
||||
spin_unlock (&proc->lock);
|
||||
rbtree_delete (&proc_tree, &proc->proc_tree_link);
|
||||
}
|
||||
|
||||
node = next;
|
||||
spin_unlock (&proc->lock);
|
||||
}
|
||||
|
||||
spin_unlock (&proc_tree_lock);
|
||||
@@ -270,8 +277,9 @@ static void proc_reap (struct reschedule_ctx* rctx) {
|
||||
struct list_node_link *reap_link, *reap_link_tmp;
|
||||
list_foreach (reap_list, reap_link, reap_link_tmp) {
|
||||
proc = list_entry (reap_link, struct proc, reap_link);
|
||||
rbtree_delete (&proc_tree, &proc->proc_tree_link);
|
||||
|
||||
list_remove (reap_list, &proc->reap_link);
|
||||
|
||||
DEBUG ("cleanup PID %d\n", proc->pid);
|
||||
proc_cleanup (proc, rctx);
|
||||
}
|
||||
@@ -311,7 +319,7 @@ void proc_kill (struct proc* proc, struct reschedule_ctx* rctx) {
|
||||
spin_lock (&cpu->lock);
|
||||
spin_lock (&proc->lock);
|
||||
|
||||
atomic_store (&proc->state, PROC_DEAD);
|
||||
proc->state = PROC_DEAD;
|
||||
proc->cpu = NULL;
|
||||
|
||||
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
|
||||
@@ -41,7 +41,7 @@ struct proc {
|
||||
uint32_t flags;
|
||||
spin_lock_t lock;
|
||||
struct cpu* cpu;
|
||||
atomic_int state;
|
||||
int state;
|
||||
uintptr_t uvaddr_argument;
|
||||
void* mail_recv_buffer;
|
||||
size_t mail_recv_size;
|
||||
|
||||
@@ -28,7 +28,7 @@ void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock
|
||||
|
||||
spin_unlock (resource_lock);
|
||||
|
||||
atomic_store (&proc->state, PROC_SUSPENDED);
|
||||
proc->state = PROC_SUSPENDED;
|
||||
|
||||
/* append to sq's list */
|
||||
list_append (sq->proc_list, &sq_entry->sq_link);
|
||||
@@ -70,7 +70,7 @@ void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry,
|
||||
proc->cpu = cpu;
|
||||
|
||||
if (proc->sq_entries == NULL)
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
proc->state = PROC_READY;
|
||||
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
cpu->proc_run_q_count++;
|
||||
|
||||
Reference in New Issue
Block a user