Implement waiting for process, CE add command cancelation, rctx many cpus
All checks were successful
Build documentation / build-and-deploy (push) Successful in 2m27s
All checks were successful
Build documentation / build-and-deploy (push) Successful in 2m27s
This commit is contained in:
@@ -56,8 +56,6 @@ void bootmain (void) {
|
||||
devices_init ();
|
||||
vfs_init ();
|
||||
|
||||
struct reschedule_ctx rctx = {.cpu = NULL, .reschedule = false};
|
||||
|
||||
struct device* ramdisk = device_find ("RD");
|
||||
vfs_create_volume ("RD", VFS_TARFS, ramdisk, false);
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <amd64/io.h>
|
||||
#include <aux/compiler.h>
|
||||
#include <irq/irq.h>
|
||||
#include <libk/lengthof.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <mm/liballoc.h>
|
||||
@@ -160,11 +161,19 @@ static void intr_exception (struct saved_regs* regs) {
|
||||
regs->rbx);
|
||||
|
||||
if (regs->cs == (GDT_UCODE | 0x03)) {
|
||||
struct reschedule_ctx rctx = {.reschedule = false, .cpu = NULL};
|
||||
struct reschedule_ctx rctx = {0};
|
||||
proc_kill (thiscpu->proc_current, &rctx);
|
||||
|
||||
if (rctx.reschedule)
|
||||
cpu_request_sched (rctx.cpu);
|
||||
bool do_thiscpu = false;
|
||||
for (size_t i = 0; i < lengthof (rctx.cpus); i++) {
|
||||
if (rctx.cpus[i] != NULL && rctx.cpus[i] != thiscpu)
|
||||
cpu_request_sched (rctx.cpus[i]);
|
||||
else
|
||||
do_thiscpu = true;
|
||||
}
|
||||
|
||||
if (do_thiscpu)
|
||||
cpu_request_sched (thiscpu);
|
||||
} else {
|
||||
__asm__ volatile ("cli");
|
||||
spin ();
|
||||
@@ -196,11 +205,19 @@ void intr_handler (void* stack_ptr) {
|
||||
if (irq == NULL)
|
||||
return;
|
||||
|
||||
struct reschedule_ctx rctx = {.reschedule = false, .cpu = NULL};
|
||||
struct reschedule_ctx rctx = {0};
|
||||
irq->func (irq->arg, stack_ptr, &rctx);
|
||||
|
||||
if (rctx.reschedule)
|
||||
cpu_request_sched (rctx.cpu);
|
||||
bool do_thiscpu = false;
|
||||
for (size_t i = 0; i < lengthof (rctx.cpus); i++) {
|
||||
if (rctx.cpus[i] != NULL && rctx.cpus[i] != thiscpu)
|
||||
cpu_request_sched (rctx.cpus[i]);
|
||||
else
|
||||
do_thiscpu = true;
|
||||
}
|
||||
|
||||
if (do_thiscpu)
|
||||
cpu_request_sched (thiscpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -109,6 +109,26 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t ent
|
||||
}
|
||||
|
||||
void proc_cleanup (struct proc* proc, struct reschedule_ctx* rctx) {
|
||||
spin_lock (&proc->lock);
|
||||
spin_lock (&proc->done_sq.lock);
|
||||
|
||||
while (proc->done_sq.proc_list != NULL) {
|
||||
struct list_node_link* node = proc->done_sq.proc_list;
|
||||
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
|
||||
struct proc* suspended_proc = sq_entry->proc;
|
||||
|
||||
spin_unlock (&proc->done_sq.lock);
|
||||
spin_unlock (&proc->lock);
|
||||
|
||||
proc_sq_resume (suspended_proc, sq_entry, rctx);
|
||||
|
||||
spin_lock (&proc->lock);
|
||||
spin_lock (&proc->done_sq.lock);
|
||||
}
|
||||
|
||||
spin_unlock (&proc->done_sq.lock);
|
||||
spin_unlock (&proc->lock);
|
||||
|
||||
proc_sqs_cleanup (proc);
|
||||
proc_mutexes_cleanup (proc, rctx);
|
||||
|
||||
|
||||
@@ -100,10 +100,10 @@ static void smp_bootstrap (struct limine_mp_info* mp_info) {
|
||||
|
||||
atomic_fetch_sub (&cpu_counter, 1);
|
||||
|
||||
struct reschedule_ctx rctx = {.cpu = NULL, .reschedule = false};
|
||||
struct reschedule_ctx rctx = {0};
|
||||
|
||||
struct proc* spin_proc = proc_from_file (VFS_KERNEL, "RD", "/spin", &rctx);
|
||||
proc_register (spin_proc, thiscpu, NULL);
|
||||
proc_register (spin_proc, thiscpu, &rctx);
|
||||
|
||||
spin_lock (&spin_proc->cpu->lock);
|
||||
do_sched (spin_proc, &spin_proc->cpu->lock);
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#include <amd64/mm.h>
|
||||
#include <amd64/msr-index.h>
|
||||
#include <amd64/msr.h>
|
||||
#include <libk/lengthof.h>
|
||||
#include <libk/list.h>
|
||||
#include <libk/string.h>
|
||||
#include <mm/liballoc.h>
|
||||
@@ -37,7 +38,7 @@ uintptr_t syscall_dispatch (void* stack_ptr) {
|
||||
return -ST_SYSCALL_NOT_FOUND;
|
||||
}
|
||||
|
||||
struct reschedule_ctx rctx = {.reschedule = false, .cpu = NULL};
|
||||
struct reschedule_ctx rctx = {0};
|
||||
|
||||
uintptr_t r =
|
||||
func (caller, regs, &rctx, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
||||
@@ -50,8 +51,16 @@ uintptr_t syscall_dispatch (void* stack_ptr) {
|
||||
spin_unlock (&caller->lock);
|
||||
}
|
||||
|
||||
if (rctx.reschedule)
|
||||
cpu_request_sched (rctx.cpu);
|
||||
bool do_thiscpu = false;
|
||||
for (size_t i = 0; i < lengthof (rctx.cpus); i++) {
|
||||
if (rctx.cpus[i] != NULL && rctx.cpus[i] != thiscpu)
|
||||
cpu_request_sched (rctx.cpus[i]);
|
||||
else
|
||||
do_thiscpu = true;
|
||||
}
|
||||
|
||||
if (do_thiscpu)
|
||||
cpu_request_sched (thiscpu);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@@ -37,6 +37,8 @@ void proc_mutexes_cleanup (struct proc* proc, struct reschedule_ctx* rctx) {
|
||||
|
||||
proc_mutex_unlock (proc, &resource->u.mutex, rctx);
|
||||
}
|
||||
|
||||
spin_unlock (&resource->lock);
|
||||
}
|
||||
|
||||
spin_unlock (&proc->procgroup->lock);
|
||||
|
||||
@@ -198,10 +198,7 @@ void proc_register (struct proc* proc, struct cpu* register_cpu, struct reschedu
|
||||
spin_unlock (&cpu->lock);
|
||||
spin_unlock (&proc_tree_lock);
|
||||
|
||||
if (rctx != NULL) {
|
||||
rctx->reschedule = true;
|
||||
rctx->cpu = cpu;
|
||||
}
|
||||
rctx_insert_cpu (rctx, cpu);
|
||||
}
|
||||
|
||||
/* caller holds cpu->lock */
|
||||
@@ -225,11 +222,15 @@ static struct proc* proc_find_sched (struct cpu* cpu) {
|
||||
struct proc* proc = list_entry (current, struct proc, cpu_run_q_link);
|
||||
|
||||
spin_lock (&proc->lock);
|
||||
int state = proc->state;
|
||||
spin_unlock (&proc->lock);
|
||||
|
||||
if (state == PROC_READY)
|
||||
int state = proc->state;
|
||||
|
||||
if (state == PROC_READY) {
|
||||
spin_unlock (&proc->lock);
|
||||
return proc;
|
||||
}
|
||||
|
||||
spin_unlock (&proc->lock);
|
||||
|
||||
current = current->next ? current->next : cpu->proc_run_q;
|
||||
} while (current != start);
|
||||
@@ -278,7 +279,7 @@ static void proc_reap (struct reschedule_ctx* rctx) {
|
||||
|
||||
void proc_sched (void) {
|
||||
int s_cycles = atomic_fetch_add (&sched_cycles, 1);
|
||||
struct reschedule_ctx rctx = {.reschedule = false, .cpu = NULL};
|
||||
struct reschedule_ctx rctx = {0};
|
||||
|
||||
if (s_cycles % SCHED_REAP_FREQ == 0)
|
||||
proc_reap (&rctx);
|
||||
@@ -321,14 +322,24 @@ void proc_kill (struct proc* proc, struct reschedule_ctx* rctx) {
|
||||
spin_unlock (&proc->lock);
|
||||
spin_unlock (&cpu->lock);
|
||||
|
||||
rctx->reschedule = true;
|
||||
rctx->cpu = cpu;
|
||||
rctx_insert_cpu (rctx, cpu);
|
||||
|
||||
DEBUG ("killed PID %d\n", proc->pid);
|
||||
}
|
||||
|
||||
void proc_wait_for (struct proc* proc, struct reschedule_ctx* rctx, struct proc* wait_proc) {
|
||||
proc_sq_suspend (proc, &wait_proc->done_sq, NULL, rctx);
|
||||
}
|
||||
|
||||
static void proc_irq_sched (void* arg, void* regs, struct reschedule_ctx* rctx) {
|
||||
(void)arg, (void)regs;
|
||||
|
||||
#if defined(__x86_64__)
|
||||
struct saved_regs* sr = regs;
|
||||
if (sr->cs != (GDT_UCODE | 0x3))
|
||||
return;
|
||||
#endif
|
||||
|
||||
proc_sched ();
|
||||
}
|
||||
|
||||
@@ -338,14 +349,14 @@ void proc_init (void) {
|
||||
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED);
|
||||
#endif
|
||||
|
||||
struct reschedule_ctx rctx = {.cpu = NULL, .reschedule = false};
|
||||
struct reschedule_ctx rctx = {0};
|
||||
|
||||
struct proc* spin_proc = proc_from_file (VFS_KERNEL, "RD", "/spin", &rctx);
|
||||
proc_register (spin_proc, thiscpu, NULL);
|
||||
proc_register (spin_proc, thiscpu, &rctx);
|
||||
|
||||
struct proc* init = proc_from_file (VFS_KERNEL, "RD", "/init", &rctx);
|
||||
init->procgroup->capabilities |= (PROC_CAP_TERMINAL | PROC_CAP_KB);
|
||||
proc_register (init, thiscpu, NULL);
|
||||
proc_register (init, thiscpu, &rctx);
|
||||
|
||||
spin_lock (&spin_proc->cpu->lock);
|
||||
do_sched (spin_proc, &spin_proc->cpu->lock);
|
||||
|
||||
@@ -47,6 +47,7 @@ struct proc {
|
||||
void* mail_recv_buffer;
|
||||
size_t mail_recv_size;
|
||||
char cwv[VOLUME_MAX];
|
||||
struct proc_suspension_q done_sq;
|
||||
};
|
||||
|
||||
void proc_sched (void);
|
||||
@@ -68,6 +69,8 @@ int proc_alloc_pid (void);
|
||||
|
||||
void proc_pid_alloc_init (void);
|
||||
|
||||
void proc_wait_for (struct proc* proc, struct reschedule_ctx* rctx, struct proc* wait_proc);
|
||||
|
||||
void proc_init (void);
|
||||
|
||||
#endif // _KERNEL_PROC_PROC_H
|
||||
|
||||
@@ -6,8 +6,24 @@
|
||||
#include <sys/smp.h>
|
||||
|
||||
struct reschedule_ctx {
|
||||
bool reschedule;
|
||||
struct cpu* cpu;
|
||||
struct cpu* cpus[CPUS_MAX];
|
||||
};
|
||||
|
||||
#define rctx_insert_cpu(rctx, cpu) \
|
||||
do { \
|
||||
bool __found = false; \
|
||||
for (size_t __i = 0; __i < CPUS_MAX; __i++) { \
|
||||
if ((rctx)->cpus[__i] == (cpu)) \
|
||||
__found = true; \
|
||||
} \
|
||||
if (!__found) { \
|
||||
for (size_t __i = 0; __i < CPUS_MAX; __i++) { \
|
||||
if ((rctx)->cpus[__i] == NULL) { \
|
||||
(rctx)->cpus[__i] = (cpu); \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif // _KERNEL_PROC_RESCHEDULE_H
|
||||
|
||||
@@ -15,7 +15,8 @@ void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock
|
||||
|
||||
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
|
||||
if (!sq_entry) {
|
||||
spin_unlock (resource_lock);
|
||||
if (resource_lock != NULL)
|
||||
spin_unlock (resource_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -26,7 +27,8 @@ void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock
|
||||
spin_lock (&proc->lock);
|
||||
spin_lock (&sq->lock);
|
||||
|
||||
spin_unlock (resource_lock);
|
||||
if (resource_lock != NULL)
|
||||
spin_unlock (resource_lock);
|
||||
|
||||
proc->state = PROC_SUSPENDED;
|
||||
|
||||
@@ -48,8 +50,7 @@ void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock
|
||||
spin_unlock (&proc->lock);
|
||||
spin_unlock (&cpu->lock);
|
||||
|
||||
rctx->reschedule = true;
|
||||
rctx->cpu = cpu;
|
||||
rctx_insert_cpu (rctx, cpu);
|
||||
}
|
||||
|
||||
void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry,
|
||||
@@ -81,8 +82,7 @@ void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry,
|
||||
|
||||
free (sq_entry);
|
||||
|
||||
rctx->reschedule = true;
|
||||
rctx->cpu = cpu;
|
||||
rctx_insert_cpu (rctx, cpu);
|
||||
}
|
||||
|
||||
void proc_sqs_cleanup (struct proc* proc) {
|
||||
|
||||
@@ -105,12 +105,17 @@ DEFINE_SYSCALL (sys_clone) {
|
||||
}
|
||||
|
||||
/* void* argument_ptr (void) */
|
||||
DEFINE_SYSCALL (sys_argument_ptr) { return proc->uvaddr_argument; }
|
||||
DEFINE_SYSCALL (sys_argument_ptr) {
|
||||
spin_lock (&proc->lock);
|
||||
uintptr_t p = proc->uvaddr_argument;
|
||||
spin_unlock (&proc->lock);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/* int sched (void) */
|
||||
DEFINE_SYSCALL (sys_sched) {
|
||||
rctx->reschedule = true;
|
||||
rctx->cpu = thiscpu;
|
||||
rctx_insert_cpu (rctx, thiscpu);
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
@@ -121,7 +126,11 @@ DEFINE_SYSCALL (sys_mutex_create) {
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_OOM_ERROR);
|
||||
|
||||
return SYSRESULT (mutex_resource->rid);
|
||||
spin_lock (&mutex_resource->lock);
|
||||
int rid = mutex_resource->rid;
|
||||
spin_unlock (&mutex_resource->lock);
|
||||
|
||||
return SYSRESULT (rid);
|
||||
}
|
||||
|
||||
/* int mutex_delete (int mutex_rid) */
|
||||
@@ -425,7 +434,18 @@ DEFINE_SYSCALL (sys_get_procgroup) {
|
||||
if (target_proc == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
return SYSRESULT (target_proc->procgroup->pgid);
|
||||
spin_lock (&target_proc->lock);
|
||||
|
||||
if (target_proc->state == PROC_DEAD) {
|
||||
spin_unlock (&target_proc->lock);
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
}
|
||||
|
||||
int pgid = target_proc->procgroup->pgid;
|
||||
|
||||
spin_unlock (&target_proc->lock);
|
||||
|
||||
return SYSRESULT (pgid);
|
||||
}
|
||||
|
||||
/* int read_dir_entry (char* path, struct dir_entry* entry, size_t entry_num) */
|
||||
@@ -516,7 +536,60 @@ DEFINE_SYSCALL (sys_write_file) {
|
||||
}
|
||||
|
||||
/* int get_exec_pid (void) */
|
||||
DEFINE_SYSCALL (sys_get_exec_pid) { return SYSRESULT (proc->exec_pid); }
|
||||
DEFINE_SYSCALL (sys_get_exec_pid) {
|
||||
spin_lock (&proc->lock);
|
||||
int exec_pid = proc->exec_pid;
|
||||
spin_unlock (&proc->lock);
|
||||
|
||||
return SYSRESULT (exec_pid);
|
||||
}
|
||||
|
||||
/* wait_for_pid (int pid) */
|
||||
DEFINE_SYSCALL (sys_wait_for_pid) {
|
||||
int pid = (int)a1;
|
||||
|
||||
struct proc* wait_proc = proc_find_pid (pid);
|
||||
|
||||
if (wait_proc == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
spin_lock (&wait_proc->lock);
|
||||
|
||||
if (wait_proc->state == PROC_DEAD) {
|
||||
spin_unlock (&wait_proc->lock);
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
}
|
||||
|
||||
spin_unlock (&wait_proc->lock);
|
||||
|
||||
proc_wait_for (proc, rctx, wait_proc);
|
||||
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
/* int kill (int pid) */
|
||||
|
||||
DEFINE_SYSCALL (sys_kill) {
|
||||
int pid = (int)a1;
|
||||
|
||||
struct proc* target_proc = proc_find_pid (pid);
|
||||
|
||||
if (target_proc == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
spin_lock (&target_proc->lock);
|
||||
|
||||
if (target_proc->state == PROC_DEAD) {
|
||||
spin_unlock (&target_proc->lock);
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
}
|
||||
|
||||
spin_unlock (&target_proc->lock);
|
||||
|
||||
proc_kill (target_proc, rctx);
|
||||
|
||||
return ST_OK;
|
||||
}
|
||||
|
||||
static syscall_handler_func_t handler_table[] = {
|
||||
[SYS_QUIT] = &sys_quit,
|
||||
@@ -543,6 +616,8 @@ static syscall_handler_func_t handler_table[] = {
|
||||
[SYS_READ_DIR_ENTRY] = &sys_read_dir_entry,
|
||||
[SYS_CREATE_FILE] = &sys_create_file,
|
||||
[SYS_WRITE_FILE] = &sys_write_file,
|
||||
[SYS_WAIT_FOR_PID] = &sys_wait_for_pid,
|
||||
[SYS_KILL] = &sys_kill,
|
||||
};
|
||||
|
||||
syscall_handler_func_t syscall_find_handler (int syscall_num) {
|
||||
|
||||
Reference in New Issue
Block a user