Compare commits

...

2 Commits

Author SHA1 Message Date
6a474c21a0 Use RW spin locks
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-09 19:53:08 +01:00
a5283283f6 Hold proc->lock while killing the process 2026-01-09 00:00:18 +01:00
9 changed files with 197 additions and 42 deletions

View File

@@ -4,7 +4,7 @@
#include <amd64/msr.h>
#include <libk/std.h>
#include <limine/requests.h>
#include <sync/spin_lock.h>
#include <sync/rw_spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/spin.h>
@@ -36,7 +36,7 @@
struct ioapic {
struct acpi_madt_ioapic table_data;
spin_lock_t lock;
rw_spin_lock_t lock;
uintptr_t mmio_base;
};
@@ -55,19 +55,19 @@ static uint64_t lapic_ticks;
/* Read IOAPIC */
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
spin_lock (&ioapic->lock);
rw_spin_read_lock (&ioapic->lock);
*(volatile uint32_t*)ioapic->mmio_base = reg;
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
spin_unlock (&ioapic->lock);
rw_spin_read_unlock (&ioapic->lock);
return ret;
}
/* Write IOAPIC */
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
spin_lock (&ioapic->lock);
rw_spin_write_lock (&ioapic->lock);
*(volatile uint32_t*)ioapic->mmio_base = reg;
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
spin_unlock (&ioapic->lock);
rw_spin_write_unlock (&ioapic->lock);
}
/* Find an IOAPIC corresposting to provided IRQ */
@@ -160,7 +160,7 @@ void amd64_ioapic_init (void) {
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
ioapics[ioapic_entries++] = (struct ioapic){
.lock = SPIN_LOCK_INIT,
.lock = RW_SPIN_LOCK_INIT,
.table_data = *ioapic_table_data,
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
};

View File

@@ -8,7 +8,7 @@
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <sync/spin_lock.h>
#include <sync/rw_spin_lock.h>
#include <sys/debug.h>
#include <sys/smp.h>
#include <sys/syscall.h>
@@ -27,7 +27,7 @@ struct cpu* cpu_make (void) {
struct cpu* cpu = &cpus[id];
memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT;
cpu->lock = RW_SPIN_LOCK_INIT;
cpu->id = id;
cpu->self = cpu;

View File

@@ -7,6 +7,7 @@
#include <libk/rbtree.h>
#include <libk/std.h>
#include <proc/proc.h>
#include <sync/rw_spin_lock.h>
#define CPUS_MAX 32
@@ -29,7 +30,7 @@ struct cpu {
atomic_int nesting;
} irq_ctx;
spin_lock_t lock;
rw_spin_lock_t lock;
struct rb_node_link* proc_run_q;
struct proc* proc_current;

View File

@@ -2,7 +2,7 @@
#include <libk/list.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <sync/spin_lock.h>
#include <sync/rw_spin_lock.h>
#if defined(__x86_64__)
#include <amd64/apic.h>
@@ -12,7 +12,7 @@
/* TODO: figure out a generic way to work with IRQs */
static struct list_node_link* irqs = NULL;
static spin_lock_t irqs_lock;
static rw_spin_lock_t irqs_lock;
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) {
struct irq* irq = malloc (sizeof (*irq));
@@ -25,9 +25,9 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
irq->irq_num = irq_num;
irq->flags = flags;
spin_lock (&irqs_lock);
rw_spin_write_lock (&irqs_lock);
list_append (irqs, &irq->irqs_link);
spin_unlock (&irqs_lock);
rw_spin_write_unlock (&irqs_lock);
#if defined(__x86_64__)
uint8_t resolution = amd64_resolve_irq (irq_num);
@@ -39,32 +39,40 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
void irq_detach (void (*func) (void*, void*)) {
struct list_node_link *irq_link, *irq_link_tmp;
spin_lock (&irqs_lock);
struct irq* irq = NULL;
rw_spin_read_lock (&irqs_lock);
list_foreach (irqs, irq_link, irq_link_tmp) {
struct irq* irq = list_entry (irq_link, struct irq, irqs_link);
irq = list_entry (irq_link, struct irq, irqs_link);
if ((uintptr_t)irq->func == (uintptr_t)func)
list_remove (irqs, irq_link);
break;
}
spin_unlock (&irqs_lock);
rw_spin_read_unlock (&irqs_lock);
if (irq != NULL) {
rw_spin_write_lock (&irqs_lock);
list_remove (irqs, &irq->irqs_link);
rw_spin_write_unlock (&irqs_lock);
}
}
struct irq* irq_find (uint32_t irq_num) {
struct list_node_link *irq_link, *irq_link_tmp;
spin_lock (&irqs_lock);
rw_spin_read_lock (&irqs_lock);
list_foreach (irqs, irq_link, irq_link_tmp) {
struct irq* irq = list_entry (irq_link, struct irq, irqs_link);
if (irq->irq_num == irq_num) {
spin_unlock (&irqs_lock);
rw_spin_read_unlock (&irqs_lock);
return irq;
}
}
spin_unlock (&irqs_lock);
rw_spin_read_unlock (&irqs_lock);
return NULL;
}

View File

@@ -12,6 +12,7 @@
#include <proc/proc.h>
#include <proc/resource.h>
#include <rd/rd.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
@@ -25,13 +26,15 @@
#endif
/*
* Lock ordering:
* 1. proc_tree_lock
* 2. [cpu]->lock
* Lock hierachy:
* - proc_tree_lock
* - cpu->lock
* - proc->lock
* - suspension_q->lock
*/
static struct rb_node_link* proc_tree = NULL;
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
static bool proc_check_elf (uint8_t* elf) {
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
@@ -191,8 +194,8 @@ static struct proc* proc_spawn_rd (char* name) {
static void proc_register (struct proc* proc, struct cpu* cpu) {
proc->cpu = cpu;
spin_lock (&proc_tree_lock);
spin_lock (&cpu->lock);
rw_spin_write_lock (&proc_tree_lock);
rw_spin_write_lock (&cpu->lock);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
@@ -200,8 +203,8 @@ static void proc_register (struct proc* proc, struct cpu* cpu) {
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
spin_unlock (&cpu->lock);
spin_unlock (&proc_tree_lock);
rw_spin_write_unlock (&cpu->lock);
rw_spin_write_unlock (&proc_tree_lock);
}
static struct proc* proc_find_sched (void) {
@@ -241,19 +244,22 @@ static struct proc* proc_find_sched (void) {
void proc_sched (void) {
struct proc* next = NULL;
spin_lock (&thiscpu->lock);
rw_spin_read_lock (&thiscpu->lock);
if (thiscpu->proc_run_q == NULL) {
spin_unlock (&thiscpu->lock);
rw_spin_read_unlock (&thiscpu->lock);
goto idle;
}
next = proc_find_sched ();
if (next != NULL)
thiscpu->proc_current = next;
rw_spin_read_unlock (&thiscpu->lock);
spin_unlock (&thiscpu->lock);
if (next != NULL) {
rw_spin_write_lock (&thiscpu->lock);
thiscpu->proc_current = next;
rw_spin_write_unlock (&thiscpu->lock);
}
if (next != NULL && atomic_load (&next->state) == PROC_READY)
do_sched (next);
@@ -265,14 +271,16 @@ idle:
void proc_kill (struct proc* proc) {
atomic_store (&proc->state, PROC_DEAD);
spin_lock (&proc_tree_lock);
rw_spin_write_lock (&proc_tree_lock);
spin_lock (&proc->lock);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
spin_unlock (&proc_tree_lock);
spin_unlock (&proc->lock);
rw_spin_write_unlock (&proc_tree_lock);
struct cpu* cpu = proc->cpu;
spin_lock (&cpu->lock);
rw_spin_write_lock (&cpu->lock);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
spin_unlock (&cpu->lock);
rw_spin_write_unlock (&cpu->lock);
DEBUG ("killed PID %d\n", proc->pid);
@@ -284,6 +292,57 @@ void proc_kill (struct proc* proc) {
cpu_request_sched (cpu);
}
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
struct cpu* cpu;
spin_lock (&proc->lock);
atomic_store (&proc->state, PROC_SUSPENDED);
cpu = proc->cpu;
/* remove from run q */
rw_spin_write_lock (&cpu->lock);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
rw_spin_write_unlock (&cpu->lock);
proc->suspension_q = sq;
spin_lock (&proc->suspension_q->lock);
rbtree_insert (struct proc, &proc->suspension_q->proc_tree, &proc->suspension_link,
suspension_link, pid);
spin_unlock (&proc->suspension_q->lock);
spin_unlock (&proc->lock);
cpu_request_sched (cpu);
}
void proc_wakeup (struct proc* proc) {
struct cpu* cpu;
spin_lock (&proc->lock);
cpu = proc->cpu;
spin_lock (&proc->suspension_q->lock);
rbtree_delete (&proc->suspension_q->proc_tree, &proc->suspension_link);
spin_unlock (&proc->suspension_q->lock);
proc->suspension_q = NULL;
rw_spin_write_lock (&cpu->lock);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
rw_spin_write_unlock (&cpu->lock);
atomic_store (&proc->state, PROC_READY);
spin_unlock (&proc->lock);
cpu_request_sched (cpu);
}
static void proc_irq_sched (void* arg, void* regs) {
(void)arg, (void)regs;
proc_sched ();

View File

@@ -15,10 +15,12 @@
#include <amd64/proc.h> /* USTACK_SIZE */
#endif
/// Process is ready to run
/* Process is ready to run */
#define PROC_READY 0
/// Process marked garbage collection
/* Process marked garbage collection */
#define PROC_DEAD 1
/* Process is suspended */
#define PROC_SUSPENDED 2
#define PROC_RESOURCES_MAX 1024
@@ -36,6 +38,7 @@ struct proc {
int pid;
struct rb_node_link proc_tree_link;
struct rb_node_link cpu_run_q_link;
struct rb_node_link suspension_link;
struct list_node_link* mappings; /* pd.lock implicitly protects this field */
struct proc_platformdata pdata;
@@ -45,8 +48,15 @@ struct proc {
atomic_int state;
struct rb_node_link* resource_tree;
atomic_int rids;
struct proc_suspension_q* suspension_q;
};
struct proc_suspension_q {
struct rb_node_link* proc_tree;
spin_lock_t lock;
};
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq);
void proc_sched (void);
void proc_kill (struct proc* proc);
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,

View File

@@ -0,0 +1,59 @@
#include <libk/assert.h>
#include <libk/std.h>
#include <sync/rw_spin_lock.h>
#include <sys/debug.h>
#include <sys/spin_lock.h>
#define WRITER_WAIT (1U << 31)
#define READER_MASK (~WRITER_WAIT)
void rw_spin_read_lock (rw_spin_lock_t* rw) {
uint32_t value;
for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed);
if ((value & WRITER_WAIT) == 0) {
if (atomic_compare_exchange_weak_explicit (rw, &value, value + 1, memory_order_acquire,
memory_order_relaxed)) {
return;
}
}
spin_lock_relax ();
}
}
void rw_spin_read_unlock (rw_spin_lock_t* rw) {
uint32_t old = atomic_fetch_sub_explicit (rw, 1, memory_order_release);
assert ((old & READER_MASK) > 0);
}
void rw_spin_write_lock (rw_spin_lock_t* rw) {
uint32_t value;
/* announce writer */
for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed);
if ((value & WRITER_WAIT) == 0) {
if (atomic_compare_exchange_weak_explicit (rw, &value, (value | WRITER_WAIT),
memory_order_acquire, memory_order_relaxed))
break;
} else
spin_lock_relax ();
}
/* wait for readers */
for (;;) {
value = atomic_load_explicit (rw, memory_order_acquire);
if ((value & READER_MASK) == 0)
return;
spin_lock_relax ();
}
}
void rw_spin_write_unlock (rw_spin_lock_t* rw) {
atomic_store_explicit (rw, 0, memory_order_release);
}

View File

@@ -0,0 +1,16 @@
#ifndef _KERNEL_SYNC_RW_SPIN_LOCK_H
#define _KERNEL_SYNC_RW_SPIN_LOCK_H
#include <libk/std.h>
#include <sync/spin_lock.h>
#define RW_SPIN_LOCK_INIT 0
typedef _Atomic (uint32_t) rw_spin_lock_t;
void rw_spin_read_lock (rw_spin_lock_t* rw);
void rw_spin_read_unlock (rw_spin_lock_t* rw);
void rw_spin_write_lock (rw_spin_lock_t* rw);
void rw_spin_write_unlock (rw_spin_lock_t* rw);
#endif // _KERNEL_SYNC_RW_SPIN_LOCK_H

View File

@@ -1,3 +1,5 @@
c += sync/spin_lock.c
c += sync/spin_lock.c \
sync/rw_spin_lock.c
o += sync/spin_lock.o
o += sync/spin_lock.o \
sync/rw_spin_lock.o