Nonblocking mail_receive, fix proc_kill scheduling issues
All checks were successful
Build documentation / build-and-deploy (push) Successful in 3m15s

This commit is contained in:
2026-03-17 21:36:09 +01:00
parent 57abf96daf
commit 0b85d3a0da
14 changed files with 121 additions and 60 deletions

View File

@@ -147,3 +147,51 @@ void proc_mail_receive (struct proc* proc, struct proc_mail* mail, struct resche
/* nothing to receive */
proc_sq_suspend (proc, &mail->recv_sq, &mail->resource->lock, fr, rctx);
}
bool proc_mail_receive_nonblock (struct proc* proc, struct proc_mail* mail,
struct reschedule_ctx* rctx, void* recv_buffer, size_t recv_size) {
uint64_t fp, fr, fssq;
spin_lock (&proc->lock, &fp);
proc->mail_recv_buffer = recv_buffer;
proc->mail_recv_size = recv_size;
spin_unlock (&proc->lock, fp);
spin_lock (&mail->resource->lock, &fr);
/* consume mesg if available */
if (mail->packets_count > 0) {
struct mail_packet* packet = list_entry (mail->packets, struct mail_packet, packets_link);
list_remove (mail->packets, &packet->packets_link);
mail->packets_count--;
memcpy (recv_buffer, packet->packet_buffer, min (recv_size, packet->packet_size));
free (packet->packet_buffer);
free (packet);
/* check for suspended sender */
spin_lock (&mail->send_sq.lock, &fssq);
struct list_node_link* node = mail->send_sq.proc_list;
if (node != NULL) {
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
struct proc* resumed_proc = sq_entry->proc;
spin_unlock (&mail->send_sq.lock, fssq);
spin_unlock (&mail->resource->lock, fr);
proc_sq_resume (resumed_proc, sq_entry, rctx);
return true;
}
spin_unlock (&mail->send_sq.lock, fssq);
spin_unlock (&mail->resource->lock, fr);
return true;
}
spin_unlock (&mail->resource->lock, fr);
return false;
}

View File

@@ -37,4 +37,7 @@ void proc_mail_send (struct proc* proc, struct proc_mail* mail, struct reschedul
void proc_mail_receive (struct proc* proc, struct proc_mail* mail, struct reschedule_ctx* rctx,
void* recv_buffer, size_t recv_size);
bool proc_mail_receive_nonblock (struct proc* proc, struct proc_mail* mail,
struct reschedule_ctx* rctx, void* recv_buffer, size_t recv_size);
#endif // _KERNEL_PROC_MAIL_H

View File

@@ -273,40 +273,7 @@ static struct proc* proc_find_sched (struct cpu* cpu) {
return NULL;
}
static void proc_reaper (struct reschedule_ctx* rctx) {
uint64_t fpt, fp, fc;
struct list_node_link* reaper_list = NULL;
spin_lock (&proc_tree_lock, &fpt);
spin_lock (&thiscpu->lock, &fc);
struct list_node_link *run_link, *tmp_run_link;
list_foreach (thiscpu->proc_run_q, run_link, tmp_run_link) {
struct proc* proc = list_entry (run_link, struct proc, cpu_run_q_link);
if (!proc->dead)
continue;
spin_lock (&proc->lock, &fp);
list_remove (thiscpu->proc_run_q, &proc->cpu_run_q_link);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
list_append (reaper_list, &proc->reaper_list_link);
spin_unlock (&proc->lock, fp);
}
spin_unlock (&thiscpu->lock, fc);
spin_unlock (&proc_tree_lock, fpt);
struct list_node_link *rlink, *tmp_rlink;
list_foreach (reaper_list, rlink, tmp_rlink) {
struct proc* proc = list_entry (rlink, struct proc, reaper_list_link);
list_remove (reaper_list, &proc->reaper_list_link);
proc_cleanup (proc, rctx);
}
}
void proc_sched (void) {
void proc_sched (bool user) {
struct proc* next = NULL;
struct cpu* cpu = thiscpu;
uint64_t fc;
@@ -319,7 +286,10 @@ retry:
if (next) {
cpu->proc_current = next;
do_sched (next, &cpu->lock, fc);
if (user)
do_sched (next, &cpu->lock, fc);
else
spin_unlock (&cpu->lock, fc);
} else {
cpu->proc_current = NULL;
spin_unlock (&cpu->lock, fc);
@@ -343,22 +313,25 @@ void proc_kill (struct proc* proc, struct reschedule_ctx* rctx) {
struct cpu* cpu = proc->cpu;
spin_unlock (&proc->lock, fp);
spin_lock (&proc_tree_lock, &fpt);
spin_lock (&cpu->lock, &fc);
spin_lock (&proc->lock, &fp);
proc->cpu = NULL;
proc->dead = true;
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
cpu->proc_run_q_count--;
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
proc->dead = true;
rbtree_delete (&proc_tree, &proc->proc_tree_link);
spin_unlock (&proc->lock, fp);
spin_unlock (&cpu->lock, fc);
rctx_insert_cpu (rctx, cpu);
spin_unlock (&proc_tree_lock, fpt);
DEBUG ("killed PID %d\n", proc->pid);
proc_cleanup (proc, rctx);
rctx_insert_cpu (rctx, cpu);
}
void proc_wait_for (struct proc* proc, struct reschedule_ctx* rctx, struct proc* wait_proc) {
@@ -368,12 +341,6 @@ void proc_wait_for (struct proc* proc, struct reschedule_ctx* rctx, struct proc*
static void proc_irq_sched (void* arg, void* regs, bool user, struct reschedule_ctx* rctx) {
(void)arg, (void)regs, (void)rctx;
proc_reaper (rctx);
if (!user) {
return;
}
rctx_insert_cpu (rctx, thiscpu);
}

View File

@@ -54,7 +54,7 @@ struct proc {
struct proc_env env;
};
void proc_sched (void);
void proc_sched (bool user);
void proc_kill (struct proc* proc, struct reschedule_ctx* rctx);

View File

@@ -81,6 +81,8 @@ int proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry,
proc->state = PROC_READY;
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
cpu->proc_run_q_count++;
int state = proc->state;