Compare commits

...

2 Commits

Author SHA1 Message Date
a8423fe657 Better proc_kill () and process cleanup
All checks were successful
Build documentation / build-and-deploy (push) Successful in 27s
2026-01-06 01:19:11 +01:00
6538fd8023 Generate new PIDs for processes 2026-01-05 20:24:26 +01:00
10 changed files with 219 additions and 116 deletions

View File

@@ -9,5 +9,6 @@
#define SCHED_PREEMPT_TIMER 80
#define TLB_SHOOTDOWN 81
#define CPU_REQUEST_SCHED 82
#endif // _KERNEL_AMD64_INTR_DEFS_H

View File

@@ -88,3 +88,4 @@ make_intr_stub(no_err, 47)
make_intr_stub(no_err, SCHED_PREEMPT_TIMER)
make_intr_stub(no_err, TLB_SHOOTDOWN)
make_intr_stub(no_err, CPU_REQUEST_SCHED)

View File

@@ -1,5 +1,6 @@
#include <amd64/gdt.h>
#include <aux/elf.h>
#include <libk/list.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
@@ -9,6 +10,8 @@
#include <sync/spin_lock.h>
#include <sys/debug.h>
static atomic_int pids = 1;
struct proc* proc_from_elf (uint8_t* elf_contents) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -39,10 +42,8 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
pmm_free (kernel_stack, USTACK_SIZE / PAGE_SIZE);
return NULL;
}
uintptr_t user_stack = proc->pdata.user_stack;
proc->pdata.user_stack += USTACK_SIZE;
proc_map (proc, user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
proc_map (proc, proc->pdata.user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
struct elf_aux aux = proc_load_segments (proc, elf_contents);
@@ -54,6 +55,30 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
proc->pdata.regs.rip = aux.entry;
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1);
return proc;
}
void proc_cleanup (struct proc* proc) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
struct proc_mapping *mapping, *mapping_tmp;
spin_lock (&proc->pd.lock);
linklist_foreach (proc->mappings, mapping, mapping_tmp) {
pmm_free (mapping->paddr, mapping->size / PAGE_SIZE);
linklist_remove (struct proc_mapping*, proc->mappings, mapping);
free (mapping);
}
spin_unlock (&proc->pd.lock);
pmm_free (proc->pd.cr3_paddr, 1);
pmm_free (proc->pdata.kernel_stack - (uintptr_t)hhdm->offset - KSTACK_SIZE,
KSTACK_SIZE / PAGE_SIZE);
pmm_free (proc->pdata.user_stack, USTACK_SIZE / PAGE_SIZE);
free (proc);
}

View File

@@ -1,5 +1,6 @@
#include <amd64/apic.h>
#include <amd64/init.h>
#include <amd64/intr_defs.h>
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
@@ -40,6 +41,17 @@ struct cpu* cpu_get (void) {
return ptr;
}
void cpu_request_sched (struct cpu* cpu) {
struct limine_mp_response* mp = limine_mp_request.response;
for (size_t i = 0; i < mp->cpu_count; i++) {
if (cpu->id == i) {
amd64_lapic_ipi (mp->cpus[i]->lapic_id, CPU_REQUEST_SCHED);
break;
}
}
}
/// Bootstrap code for non-BSP CPUs
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_load_kernel_cr3 ();

View File

@@ -37,7 +37,7 @@ struct cpu {
struct cpu* cpu_make (void);
struct cpu* cpu_get (void);
void amd64_thiscpu_set_init (void);
void cpu_request_sched (struct cpu* cpu);
#define thiscpu (cpu_get ())

View File

@@ -1,3 +1,4 @@
.global amd64_spin
amd64_spin:
hlt
jmp amd64_spin

View File

@@ -16,42 +16,44 @@ struct rb_node_link {
#define rbtree_right(x) ((x)->right)
#define rbtree_color(x) ((x)->color)
#define rbtree_container_of(ptr, type, member) ((type*)((char*)(ptr) - offsetof (type, member)))
#define rbtree_entry(node, type, member) ((type*)((char*)(node) - offsetof (type, member)))
#define rbtree_entry(node, type, member) rbtree_container_of (node, type, member)
#define rbtree_node_color(x) ((x) ? (x)->color : RBTREE_BLACK)
#define rbtree_rotate_left(root_ptr, x) \
#define rbtree_rotate_left(root_ptr, x_node) \
do { \
struct rb_node_link* __y = (x)->right; \
(x)->right = __y->left; \
struct rb_node_link* __x = (x_node); \
struct rb_node_link* __y = __x->right; \
__x->right = __y->left; \
if (__y->left) \
__y->left->parent = (x); \
__y->parent = (x)->parent; \
if (!(x)->parent) \
__y->left->parent = __x; \
__y->parent = __x->parent; \
if (!__x->parent) \
*(root_ptr) = __y; \
else if ((x) == (x)->parent->left) \
(x)->parent->left = __y; \
else if (__x == __x->parent->left) \
__x->parent->left = __y; \
else \
(x)->parent->right = __y; \
__y->left = (x); \
(x)->parent = __y; \
__x->parent->right = __y; \
__y->left = __x; \
__x->parent = __y; \
} while (0)
#define rbtree_rotate_right(root_ptr, y) \
#define rbtree_rotate_right(root_ptr, y_node) \
do { \
struct rb_node_link* __x = (y)->left; \
(y)->left = __x->right; \
struct rb_node_link* __y = (y_node); \
struct rb_node_link* __x = __y->left; \
__y->left = __x->right; \
if (__x->right) \
__x->right->parent = (y); \
__x->parent = (y)->parent; \
if (!(y)->parent) \
__x->right->parent = __y; \
__x->parent = __y->parent; \
if (!__y->parent) \
*(root_ptr) = __x; \
else if ((y) == (y)->parent->right) \
(y)->parent->right = __x; \
else if (__y == __y->parent->right) \
__y->parent->right = __x; \
else \
(y)->parent->left = __x; \
__x->right = (y); \
(y)->parent = __x; \
__y->parent->left = __x; \
__x->right = __y; \
__y->parent = __x; \
} while (0)
#define rbtree_insert_fixup(root_ptr, z_node) \
@@ -60,7 +62,7 @@ struct rb_node_link {
while (__z->parent && __z->parent->color == RBTREE_RED) { \
if (__z->parent == __z->parent->parent->left) { \
struct rb_node_link* __y = __z->parent->parent->right; \
if (__y && __y->color == RBTREE_RED) { \
if (rbtree_node_color (__y) == RBTREE_RED) { \
__z->parent->color = RBTREE_BLACK; \
__y->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \
@@ -68,15 +70,15 @@ struct rb_node_link {
} else { \
if (__z == __z->parent->right) { \
__z = __z->parent; \
rbtree_rotate_left ((root_ptr), __z); \
rbtree_rotate_left (root_ptr, __z); \
} \
__z->parent->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \
rbtree_rotate_right ((root_ptr), __z->parent->parent); \
rbtree_rotate_right (root_ptr, __z->parent->parent); \
} \
} else { \
struct rb_node_link* __y = __z->parent->parent->left; \
if (__y && __y->color == RBTREE_RED) { \
if (rbtree_node_color (__y) == RBTREE_RED) { \
__z->parent->color = RBTREE_BLACK; \
__y->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \
@@ -84,11 +86,11 @@ struct rb_node_link {
} else { \
if (__z == __z->parent->left) { \
__z = __z->parent; \
rbtree_rotate_right ((root_ptr), __z); \
rbtree_rotate_right (root_ptr, __z); \
} \
__z->parent->color = RBTREE_BLACK; \
__z->parent->parent->color = RBTREE_RED; \
rbtree_rotate_left ((root_ptr), __z->parent->parent); \
rbtree_rotate_left (root_ptr, __z->parent->parent); \
} \
} \
} \
@@ -110,8 +112,7 @@ struct rb_node_link {
__link = &((*__link)->right); \
} \
__new->parent = __parent; \
__new->left = NULL; \
__new->right = NULL; \
__new->left = __new->right = NULL; \
__new->color = RBTREE_RED; \
*__link = __new; \
rbtree_insert_fixup (root_ptr, __new); \
@@ -136,91 +137,145 @@ struct rb_node_link {
#define rbtree_min(node, out) \
do { \
(out) = NULL; \
if ((node)) { \
struct rb_node_link* __n = (node); \
while (__n->left) \
while (__n && __n->left) \
__n = __n->left; \
(out) = __n; \
} \
} while (0)
#define rbtree_transplant(root_ptr, u, v) \
#define rbtree_max(node, out) \
do { \
if (!(u)->parent) \
*(root_ptr) = (v); \
else if ((u) == (u)->parent->left) \
(u)->parent->left = (v); \
(out) = NULL; \
struct rb_node_link* __n = (node); \
while (__n && __n->right) \
__n = __n->right; \
(out) = __n; \
} while (0)
#define rbtree_first(root_ptr, out) rbtree_min (*(root_ptr), out)
#define rbtree_last(root_ptr, out) rbtree_max (*(root_ptr), out)
#define rbtree_transplant(root_ptr, u_node, v_node) \
do { \
struct rb_node_link* __u = (u_node); \
struct rb_node_link* __v = (v_node); \
if (!__u->parent) \
*(root_ptr) = __v; \
else if (__u == __u->parent->left) \
__u->parent->left = __v; \
else \
(u)->parent->right = (v); \
if (v) \
(v)->parent = (u)->parent; \
__u->parent->right = __v; \
if (__v) \
__v->parent = __u->parent; \
} while (0)
#define rbtree_delete_fixup(root_ptr, x_node, xparent_node) \
do { \
struct rb_node_link* __x = (x_node); \
struct rb_node_link* __xparent = (xparent_node); \
while (__x != *(root_ptr) && (__x == NULL || __x->color == RBTREE_BLACK)) { \
if (__x == __xparent->left) { \
struct rb_node_link* __w = __xparent->right; \
if (__w && __w->color == RBTREE_RED) { \
struct rb_node_link* __rdf_x = (x_node); \
struct rb_node_link* __rdf_xp = (xparent_node); \
while (__rdf_xp && (__rdf_x == NULL || __rdf_x->color == RBTREE_BLACK)) { \
if (__rdf_x == __rdf_xp->left) { \
struct rb_node_link* __w = __rdf_xp->right; \
if (rbtree_node_color (__w) == RBTREE_RED) { \
__w->color = RBTREE_BLACK; \
__xparent->color = RBTREE_RED; \
rbtree_rotate_left (root_ptr, __xparent); \
__w = __xparent->right; \
__rdf_xp->color = RBTREE_RED; \
rbtree_rotate_left (root_ptr, __rdf_xp); \
__w = __rdf_xp->right; \
} \
if ((!__w->left || __w->left->color == RBTREE_BLACK) && \
(!__w->right || __w->right->color == RBTREE_BLACK)) { \
if (rbtree_node_color (__w->left) == RBTREE_BLACK && \
rbtree_node_color (__w->right) == RBTREE_BLACK) { \
if (__w) \
__w->color = RBTREE_RED; \
__x = __xparent; \
__xparent = __x->parent; \
__rdf_x = __rdf_xp; \
__rdf_xp = __rdf_x->parent; \
} else { \
if (!__w->right || __w->right->color == RBTREE_BLACK) { \
if (rbtree_node_color (__w->right) == RBTREE_BLACK) { \
if (__w->left) \
__w->left->color = RBTREE_BLACK; \
__w->color = RBTREE_RED; \
rbtree_rotate_right (root_ptr, __w); \
__w = __xparent->right; \
__w = __rdf_xp->right; \
} \
__w->color = __xparent->color; \
__xparent->color = RBTREE_BLACK; \
__w->color = __rdf_xp->color; \
__rdf_xp->color = RBTREE_BLACK; \
if (__w->right) \
__w->right->color = RBTREE_BLACK; \
rbtree_rotate_left (root_ptr, __xparent); \
__x = *(root_ptr); \
rbtree_rotate_left (root_ptr, __rdf_xp); \
__rdf_x = *(root_ptr); \
break; \
} \
} else { \
struct rb_node_link* __w = __xparent->left; \
if (__w && __w->color == RBTREE_RED) { \
struct rb_node_link* __w = __rdf_xp->left; \
if (rbtree_node_color (__w) == RBTREE_RED) { \
__w->color = RBTREE_BLACK; \
__xparent->color = RBTREE_RED; \
rbtree_rotate_right (root_ptr, __xparent); \
__w = __xparent->left; \
__rdf_xp->color = RBTREE_RED; \
rbtree_rotate_right (root_ptr, __rdf_xp); \
__w = __rdf_xp->left; \
} \
if ((!__w->right || __w->right->color == RBTREE_BLACK) && \
(!__w->left || __w->left->color == RBTREE_BLACK)) { \
if (rbtree_node_color (__w->right) == RBTREE_BLACK && \
rbtree_node_color (__w->left) == RBTREE_BLACK) { \
if (__w) \
__w->color = RBTREE_RED; \
__x = __xparent; \
__xparent = __x->parent; \
__rdf_x = __rdf_xp; \
__rdf_xp = __rdf_x->parent; \
} else { \
if (!__w->left || __w->left->color == RBTREE_BLACK) { \
if (rbtree_node_color (__w->left) == RBTREE_BLACK) { \
if (__w->right) \
__w->right->color = RBTREE_BLACK; \
__w->color = RBTREE_RED; \
rbtree_rotate_left (root_ptr, __w); \
__w = __xparent->left; \
__w = __rdf_xp->left; \
} \
__w->color = __xparent->color; \
__xparent->color = RBTREE_BLACK; \
__w->color = __rdf_xp->color; \
__rdf_xp->color = RBTREE_BLACK; \
if (__w->left) \
__w->left->color = RBTREE_BLACK; \
rbtree_rotate_right (root_ptr, __xparent); \
__x = *(root_ptr); \
rbtree_rotate_right (root_ptr, __rdf_xp); \
__rdf_x = *(root_ptr); \
break; \
} \
} \
} \
if (__x) \
__x->color = RBTREE_BLACK; \
if (__rdf_x) \
__rdf_x->color = RBTREE_BLACK; \
} while (0)
#define rbtree_delete(root_ptr, z_node) \
do { \
struct rb_node_link* __rd_z = (z_node); \
struct rb_node_link* __rd_y = __rd_z; \
struct rb_node_link* __rd_x = NULL; \
struct rb_node_link* __rd_xp = NULL; \
int __rd_y_orig_color = __rd_y->color; \
if (!__rd_z->left) { \
__rd_x = __rd_z->right; \
__rd_xp = __rd_z->parent; \
rbtree_transplant (root_ptr, __rd_z, __rd_z->right); \
} else if (!__rd_z->right) { \
__rd_x = __rd_z->left; \
__rd_xp = __rd_z->parent; \
rbtree_transplant (root_ptr, __rd_z, __rd_z->left); \
} else { \
rbtree_min (__rd_z->right, __rd_y); \
__rd_y_orig_color = __rd_y->color; \
__rd_x = __rd_y->right; \
if (__rd_y->parent == __rd_z) { \
__rd_xp = __rd_y; \
if (__rd_x) \
__rd_x->parent = __rd_y; \
} else { \
__rd_xp = __rd_y->parent; \
rbtree_transplant (root_ptr, __rd_y, __rd_y->right); \
__rd_y->right = __rd_z->right; \
__rd_y->right->parent = __rd_y; \
} \
rbtree_transplant (root_ptr, __rd_z, __rd_y); \
__rd_y->left = __rd_z->left; \
__rd_y->left->parent = __rd_y; \
__rd_y->color = __rd_z->color; \
} \
if (__rd_y_orig_color == RBTREE_BLACK) \
rbtree_delete_fixup (root_ptr, __rd_x, __rd_xp); \
} while (0)
#define rbtree_next(node, out) \
@@ -265,17 +320,4 @@ struct rb_node_link {
} \
} while (0)
#define rbtree_first(root_ptr, out) rbtree_min (*(root_ptr), out)
#define rbtree_last(root_ptr, out) \
do { \
(out) = NULL; \
struct rb_node_link* __n = *(root_ptr); \
if (__n) { \
while (__n->right) \
__n = __n->right; \
(out) = __n; \
} \
} while (0)
#endif // _KERNEL_LIBK_RBTREE_H

View File

@@ -23,6 +23,12 @@
#include <amd64/intr_defs.h>
#endif
/*
* Lock ordering:
* 1. proc_tree_lock
* 2. [cpu]->lock
*/
static struct rb_node_link* proc_tree = NULL;
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
@@ -113,7 +119,6 @@ static void proc_register (struct proc* proc, struct cpu* cpu) {
proc->cpu = cpu;
spin_lock (&proc_tree_lock);
spin_lock (&cpu->lock);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
@@ -123,7 +128,6 @@ static void proc_register (struct proc* proc, struct cpu* cpu) {
cpu->proc_current = proc;
spin_unlock (&cpu->lock);
spin_unlock (&proc_tree_lock);
}
@@ -147,6 +151,10 @@ static struct proc* proc_find_sched (void) {
rbtree_next (node, node);
if (!node) {
rbtree_first (&thiscpu->proc_run_q, node);
}
if (node == first)
break;
}
@@ -159,7 +167,7 @@ void proc_sched (void) {
spin_lock (&thiscpu->lock);
if (thiscpu->proc_run_q == NULL || thiscpu->proc_current == NULL) {
if (thiscpu->proc_run_q == NULL) {
spin_unlock (&thiscpu->lock);
goto idle;
}
@@ -179,8 +187,25 @@ idle:
}
void proc_kill (struct proc* proc) {
/* mark for garbage collection */
atomic_store (&proc->state, PROC_DEAD);
spin_lock (&proc_tree_lock);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
spin_unlock (&proc_tree_lock);
struct cpu* cpu = proc->cpu;
spin_lock (&cpu->lock);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
spin_unlock (&cpu->lock);
DEBUG ("killed PID %d\n", proc->pid);
proc_cleanup (proc);
if (cpu == thiscpu)
proc_sched ();
else
cpu_request_sched (cpu);
}
static void proc_irq_sched (void* arg, void* regs) {
@@ -188,12 +213,18 @@ static void proc_irq_sched (void* arg, void* regs) {
proc_sched ();
}
static void proc_irq_cpu_request_sched (void* arg, void* regs) {
(void)arg, (void)regs;
proc_sched ();
}
void proc_init (void) {
struct proc* init = proc_spawn_rd ("init.exe");
proc_register (init, thiscpu);
#if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_SAFE);
irq_attach (&proc_irq_cpu_request_sched, NULL, CPU_REQUEST_SCHED, IRQ_INTERRUPT_SAFE);
#endif
do_sched (init);

View File

@@ -39,20 +39,9 @@ struct proc {
struct pd pd;
spin_lock_t lock;
struct cpu* cpu;
// struct procw* procw; /* link to it's global struct */
atomic_int state;
};
/*
* struct proc is a member of a CPU's proc_run_q.
* struct procw is a process wrapper that is a member of
* a global process list.
*/
/* struct procw { */
/* struct procw* next; */
/* struct proc* proc; */
/* }; */
void proc_sched (void);
void proc_kill (struct proc* proc);
void proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,

View File

@@ -4,5 +4,6 @@
struct proc;
struct proc* proc_from_elf (uint8_t* elf_contents);
void proc_cleanup (struct proc* proc);
#endif // _KERNEL_SYS_PROC_H