Implement lock IRQ nesting via stack variables/contexts
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s

This commit is contained in:
2026-01-14 22:11:56 +01:00
parent 55166f9d5f
commit 270ff507d4
22 changed files with 197 additions and 145 deletions

View File

@@ -25,7 +25,4 @@ void app_main (void) {
proc_mutex_lock (mutex_rid); proc_mutex_lock (mutex_rid);
proc_test ('b'); proc_test ('b');
proc_mutex_unlock (mutex_rid); proc_mutex_unlock (mutex_rid);
for (;;)
;
} }

View File

@@ -57,19 +57,23 @@ static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
/* Read IOAPIC */ /* Read IOAPIC */
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) { static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
rw_spin_read_lock (&ioapic->lock); spin_lock_ctx_t ctxioar;
rw_spin_read_lock (&ioapic->lock, &ctxioar);
*(volatile uint32_t*)ioapic->mmio_base = reg; *(volatile uint32_t*)ioapic->mmio_base = reg;
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10); uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
rw_spin_read_unlock (&ioapic->lock); rw_spin_read_unlock (&ioapic->lock, &ctxioar);
return ret; return ret;
} }
/* Write IOAPIC */ /* Write IOAPIC */
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) { static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
rw_spin_write_lock (&ioapic->lock); spin_lock_ctx_t ctxioaw;
rw_spin_write_lock (&ioapic->lock, &ctxioaw);
*(volatile uint32_t*)ioapic->mmio_base = reg; *(volatile uint32_t*)ioapic->mmio_base = reg;
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value; *(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
rw_spin_write_unlock (&ioapic->lock); rw_spin_write_unlock (&ioapic->lock, &ctxioaw);
} }
/* Find an IOAPIC corresposting to provided IRQ */ /* Find an IOAPIC corresposting to provided IRQ */
@@ -201,7 +205,9 @@ void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
* us - Period length in microseconds * us - Period length in microseconds
*/ */
static uint32_t amd64_lapic_calibrate (uint32_t us) { static uint32_t amd64_lapic_calibrate (uint32_t us) {
spin_lock (&lapic_calibration_lock); spin_lock_ctx_t ctxlacb;
spin_lock (&lapic_calibration_lock, &ctxlacb);
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE); amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
@@ -214,7 +220,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT); uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
DEBUG ("timer ticks = %u\n", ticks); DEBUG ("timer ticks = %u\n", ticks);
spin_unlock (&lapic_calibration_lock); spin_unlock (&lapic_calibration_lock, &ctxlacb);
return ticks; return ticks;
} }

View File

@@ -35,6 +35,8 @@ static void amd64_debug_serial_write (char x) {
* Formatted printing to serial. serial_lock ensures that all prints are atomic. * Formatted printing to serial. serial_lock ensures that all prints are atomic.
*/ */
void debugprintf (const char* fmt, ...) { void debugprintf (const char* fmt, ...) {
spin_lock_ctx_t ctxdbgp;
if (!debug_init) if (!debug_init)
return; return;
@@ -50,14 +52,14 @@ void debugprintf (const char* fmt, ...) {
const char* p = buffer; const char* p = buffer;
spin_lock (&serial_lock); spin_lock (&serial_lock, &ctxdbgp);
while (*p) { while (*p) {
amd64_debug_serial_write (*p); amd64_debug_serial_write (*p);
p++; p++;
} }
spin_unlock (&serial_lock); spin_unlock (&serial_lock, &ctxdbgp);
} }
/* Initialize serial */ /* Initialize serial */

View File

@@ -60,8 +60,9 @@ static void amd64_hpet_write32 (uint32_t reg, uint32_t value) {
static uint64_t amd64_hpet_read_counter (void) { static uint64_t amd64_hpet_read_counter (void) {
uint64_t value; uint64_t value;
spin_lock_ctx_t ctxhrc;
spin_lock (&hpet_lock); spin_lock (&hpet_lock, &ctxhrc);
if (!hpet_32bits) if (!hpet_32bits)
value = amd64_hpet_read64 (HPET_MCVR); value = amd64_hpet_read64 (HPET_MCVR);
@@ -76,13 +77,15 @@ static uint64_t amd64_hpet_read_counter (void) {
value = ((uint64_t)hi1 << 32) | lo; value = ((uint64_t)hi1 << 32) | lo;
} }
spin_unlock (&hpet_lock); spin_unlock (&hpet_lock, &ctxhrc);
return value; return value;
} }
static void amd64_hpet_write_counter (uint64_t value) { static void amd64_hpet_write_counter (uint64_t value) {
spin_lock (&hpet_lock); spin_lock_ctx_t ctxhwc;
spin_lock (&hpet_lock, &ctxhwc);
if (!hpet_32bits) if (!hpet_32bits)
amd64_hpet_write64 (HPET_MCVR, value); amd64_hpet_write64 (HPET_MCVR, value);
@@ -91,7 +94,7 @@ static void amd64_hpet_write_counter (uint64_t value) {
amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32)); amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32));
} }
spin_unlock (&hpet_lock); spin_unlock (&hpet_lock, &ctxhwc);
} }
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being /* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being

View File

@@ -210,18 +210,10 @@ static void amd64_irq_restore_flags (uint64_t rflags) {
} }
/* Save current interrupt state */ /* Save current interrupt state */
void irq_save (void) { void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 0)
thiscpu->irq_ctx.rflags = amd64_irq_save_flags ();
}
/* Restore interrupt state */ /* Restore interrupt state */
void irq_restore (void) { void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }
int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 1)
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);
}
/* Map custom IRQ mappings to legacy IRQs */ /* Map custom IRQ mappings to legacy IRQs */
uint8_t amd64_resolve_irq (uint8_t irq) { uint8_t amd64_resolve_irq (uint8_t irq) {

View File

@@ -25,6 +25,7 @@ struct pg_index {
/* Kernel page directory */ /* Kernel page directory */
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT}; static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
static spin_lock_ctx_t ctxkpd;
/* Lock needed to sync between map/unmap operations and TLB shootdown */ /* Lock needed to sync between map/unmap operations and TLB shootdown */
static spin_lock_t mm_lock = SPIN_LOCK_INIT; static spin_lock_t mm_lock = SPIN_LOCK_INIT;
@@ -108,13 +109,15 @@ static void amd64_reload_cr3 (void) {
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */ /* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock); spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false; bool do_reload = false;
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_lock (&pd->lock); spin_lock (&pd->lock, &ctxpd);
uint64_t amd64_flags = amd64_mm_resolve_flags (flags); uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
@@ -143,9 +146,9 @@ done:
amd64_reload_cr3 (); amd64_reload_cr3 ();
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock); spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock); spin_unlock (&mm_lock, &ctxmm);
} }
/* Map a page into kernel page directory */ /* Map a page into kernel page directory */
@@ -155,13 +158,15 @@ void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
/* Unmap a virtual address. TLB needs to be flushed afterwards */ /* Unmap a virtual address. TLB needs to be flushed afterwards */
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) { void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock); spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false; bool do_reload = false;
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_lock (&pd->lock); spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr); struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -208,9 +213,9 @@ done:
amd64_reload_cr3 (); amd64_reload_cr3 ();
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock); spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock); spin_unlock (&mm_lock, &ctxmm);
} }
/* Unmap a page from kernel page directory */ /* Unmap a page from kernel page directory */
@@ -219,10 +224,10 @@ void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
} }
/* Lock kernel page directory */ /* Lock kernel page directory */
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); } void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock, &ctxkpd); }
/* Unlock kernel page directory */ /* Unlock kernel page directory */
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); } void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock, &ctxkpd); }
/* Allocate a userspace-ready page directory */ /* Allocate a userspace-ready page directory */
uintptr_t mm_alloc_user_pd_phys (void) { uintptr_t mm_alloc_user_pd_phys (void) {
@@ -252,13 +257,15 @@ void mm_reload (void) {
} }
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) { bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock); spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool ret = false; bool ret = false;
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_lock (&pd->lock); spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr); struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -280,18 +287,19 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
done: done:
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock); spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock); spin_unlock (&mm_lock, &ctxmm);
return ret; return ret;
} }
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) { bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) {
bool ok = true; bool ok = true;
spin_lock_ctx_t ctxpd;
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_lock (&pd->lock); spin_lock (&pd->lock, &ctxpd);
for (size_t i = 0; i < size; i++) { for (size_t i = 0; i < size; i++) {
ok = mm_validate (pd, vaddr + i, 0); ok = mm_validate (pd, vaddr + i, 0);
@@ -301,19 +309,21 @@ bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t f
done: done:
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock); spin_unlock (&pd->lock, &ctxpd);
return ok; return ok;
} }
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) { uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
spin_lock (&mm_lock); spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0; uintptr_t ret = 0;
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_lock (&pd->lock); spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
@@ -346,21 +356,23 @@ uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
done: done:
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock); spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock); spin_unlock (&mm_lock, &ctxmm);
return ret; return ret;
} }
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) { uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock); spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0; uintptr_t ret = 0;
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_lock (&pd->lock); spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr); struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -386,9 +398,9 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
done: done:
if (flags & MM_PD_LOCK) if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock); spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock); spin_unlock (&mm_lock, &ctxmm);
return ret; return ret;
} }

View File

@@ -74,11 +74,12 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
void proc_cleanup (struct proc* proc) { void proc_cleanup (struct proc* proc) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprpd;
proc_cleanup_resources (proc); proc_cleanup_resources (proc);
struct list_node_link *mapping_link, *mapping_link_tmp; struct list_node_link *mapping_link, *mapping_link_tmp;
spin_lock (&proc->pd.lock); spin_lock (&proc->pd.lock, &ctxprpd);
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) { list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping = struct proc_mapping* mapping =
@@ -88,7 +89,7 @@ void proc_cleanup (struct proc* proc) {
free (mapping); free (mapping);
} }
spin_unlock (&proc->pd.lock); spin_unlock (&proc->pd.lock, &ctxprpd);
pmm_free (proc->pd.cr3_paddr, 1); pmm_free (proc->pd.cr3_paddr, 1);

View File

@@ -29,7 +29,6 @@ struct cpu* cpu_make (void) {
memset (cpu, 0, sizeof (*cpu)); memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT; cpu->lock = SPIN_LOCK_INIT;
cpu->id = id; cpu->id = id;
cpu->self = cpu;
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu); amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);

View File

@@ -15,7 +15,6 @@ struct cpu {
/* for syscall instruction */ /* for syscall instruction */
uintptr_t syscall_user_stack; uintptr_t syscall_user_stack;
uintptr_t syscall_kernel_stack; uintptr_t syscall_kernel_stack;
struct cpu* self;
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16); volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16); volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16); volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
@@ -26,11 +25,6 @@ struct cpu {
uint64_t lapic_ticks; uint64_t lapic_ticks;
uint32_t id; uint32_t id;
struct {
uint64_t rflags;
atomic_int nesting;
} irq_ctx;
spin_lock_t lock; spin_lock_t lock;
struct rb_node_link* proc_run_q; struct rb_node_link* proc_run_q;

View File

@@ -14,6 +14,8 @@ struct irq* irq_table[0x100];
static rw_spin_lock_t irqs_lock; static rw_spin_lock_t irqs_lock;
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) { bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) {
spin_lock_ctx_t ctxiqa;
struct irq* irq = malloc (sizeof (*irq)); struct irq* irq = malloc (sizeof (*irq));
if (irq == NULL) { if (irq == NULL) {
return false; return false;
@@ -24,9 +26,9 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
irq->irq_num = irq_num; irq->irq_num = irq_num;
irq->flags = flags; irq->flags = flags;
rw_spin_write_lock (&irqs_lock); rw_spin_write_lock (&irqs_lock, &ctxiqa);
irq_table[irq_num] = irq; irq_table[irq_num] = irq;
rw_spin_write_unlock (&irqs_lock); rw_spin_write_unlock (&irqs_lock, &ctxiqa);
#if defined(__x86_64__) #if defined(__x86_64__)
uint8_t resolution = amd64_resolve_irq (irq_num); uint8_t resolution = amd64_resolve_irq (irq_num);
@@ -37,11 +39,13 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
} }
struct irq* irq_find (uint32_t irq_num) { struct irq* irq_find (uint32_t irq_num) {
rw_spin_read_lock (&irqs_lock); spin_lock_ctx_t ctxiqa;
rw_spin_read_lock (&irqs_lock, &ctxiqa);
struct irq* irq = irq_table[irq_num]; struct irq* irq = irq_table[irq_num];
rw_spin_read_unlock (&irqs_lock); rw_spin_read_unlock (&irqs_lock, &ctxiqa);
return irq; return irq;
} }

View File

@@ -11,13 +11,13 @@
spin_lock_t _liballoc_lock = SPIN_LOCK_INIT; spin_lock_t _liballoc_lock = SPIN_LOCK_INIT;
int liballoc_lock (void) { int liballoc_lock (void* ctx) {
spin_lock (&_liballoc_lock); spin_lock (&_liballoc_lock, (spin_lock_ctx_t*)ctx);
return 0; return 0;
} }
int liballoc_unlock (void) { int liballoc_unlock (void* ctx) {
spin_unlock (&_liballoc_lock); spin_unlock (&_liballoc_lock, (spin_lock_ctx_t*)ctx);
return 0; return 0;
} }
@@ -243,8 +243,9 @@ void* malloc (size_t size) {
int index; int index;
void* ptr; void* ptr;
struct boundary_tag* tag = NULL; struct boundary_tag* tag = NULL;
spin_lock_ctx_t ctxliba;
liballoc_lock (); liballoc_lock (&ctxliba);
if (l_initialized == 0) { if (l_initialized == 0) {
for (index = 0; index < MAXEXP; index++) { for (index = 0; index < MAXEXP; index++) {
@@ -272,7 +273,7 @@ void* malloc (size_t size) {
// No page found. Make one. // No page found. Make one.
if (tag == NULL) { if (tag == NULL) {
if ((tag = allocate_new_tag (size)) == NULL) { if ((tag = allocate_new_tag (size)) == NULL) {
liballoc_unlock (); liballoc_unlock (&ctxliba);
return NULL; return NULL;
} }
@@ -305,23 +306,24 @@ void* malloc (size_t size) {
ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag)); ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag));
liballoc_unlock (); liballoc_unlock (&ctxliba);
return ptr; return ptr;
} }
void free (void* ptr) { void free (void* ptr) {
int index; int index;
struct boundary_tag* tag; struct boundary_tag* tag;
spin_lock_ctx_t ctxliba;
if (ptr == NULL) if (ptr == NULL)
return; return;
liballoc_lock (); liballoc_lock (&ctxliba);
tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag)); tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag));
if (tag->magic != LIBALLOC_MAGIC) { if (tag->magic != LIBALLOC_MAGIC) {
liballoc_unlock (); // release the lock liballoc_unlock (&ctxliba); // release the lock
return; return;
} }
@@ -354,7 +356,7 @@ void free (void* ptr) {
liballoc_free (tag, pages); liballoc_free (tag, pages);
liballoc_unlock (); liballoc_unlock (&ctxliba);
return; return;
} }
@@ -365,7 +367,7 @@ void free (void* ptr) {
insert_tag (tag, index); insert_tag (tag, index);
liballoc_unlock (); liballoc_unlock (&ctxliba);
} }
void* calloc (size_t nobj, size_t size) { void* calloc (size_t nobj, size_t size) {
@@ -385,6 +387,7 @@ void* realloc (void* p, size_t size) {
void* ptr; void* ptr;
struct boundary_tag* tag; struct boundary_tag* tag;
int real_size; int real_size;
spin_lock_ctx_t ctxliba;
if (size == 0) { if (size == 0) {
free (p); free (p);
@@ -394,11 +397,11 @@ void* realloc (void* p, size_t size) {
return malloc (size); return malloc (size);
if (&liballoc_lock != NULL) if (&liballoc_lock != NULL)
liballoc_lock (); // lockit liballoc_lock (&ctxliba); // lockit
tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag)); tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag));
real_size = tag->size; real_size = tag->size;
if (&liballoc_unlock != NULL) if (&liballoc_unlock != NULL)
liballoc_unlock (); liballoc_unlock (&ctxliba);
if ((size_t)real_size > size) if ((size_t)real_size > size)
real_size = size; real_size = size;

View File

@@ -47,7 +47,7 @@ struct boundary_tag {
* \return 0 if the lock was acquired successfully. Anything else is * \return 0 if the lock was acquired successfully. Anything else is
* failure. * failure.
*/ */
extern int liballoc_lock (); extern int liballoc_lock (void* ctx);
/** This function unlocks what was previously locked by the liballoc_lock /** This function unlocks what was previously locked by the liballoc_lock
* function. If it disabled interrupts, it enables interrupts. If it * function. If it disabled interrupts, it enables interrupts. If it
@@ -55,7 +55,7 @@ extern int liballoc_lock ();
* *
* \return 0 if the lock was successfully released. * \return 0 if the lock was successfully released.
*/ */
extern int liballoc_unlock (); extern int liballoc_unlock (void* ctx);
/** This is the hook into the local system which allocates pages. It /** This is the hook into the local system which allocates pages. It
* accepts an integer parameter which is the number of pages * accepts an integer parameter which is the number of pages

View File

@@ -100,6 +100,8 @@ static size_t pmm_find_free_space (struct pmm_region* pmm_region, size_t nblks)
} }
physaddr_t pmm_alloc (size_t nblks) { physaddr_t pmm_alloc (size_t nblks) {
spin_lock_ctx_t ctxpmmr;
for (size_t region = 0; region < PMM_REGIONS_MAX; region++) { for (size_t region = 0; region < PMM_REGIONS_MAX; region++) {
struct pmm_region* pmm_region = &pmm.regions[region]; struct pmm_region* pmm_region = &pmm.regions[region];
@@ -107,7 +109,7 @@ physaddr_t pmm_alloc (size_t nblks) {
if (!(pmm_region->flags & PMM_REGION_ACTIVE)) if (!(pmm_region->flags & PMM_REGION_ACTIVE))
continue; continue;
spin_lock (&pmm_region->lock); spin_lock (&pmm_region->lock, &ctxpmmr);
/* Find starting bit of the free bit range */ /* Find starting bit of the free bit range */
size_t bit = pmm_find_free_space (pmm_region, nblks); size_t bit = pmm_find_free_space (pmm_region, nblks);
@@ -116,18 +118,19 @@ physaddr_t pmm_alloc (size_t nblks) {
if (bit != (size_t)-1) { if (bit != (size_t)-1) {
/* Mark it */ /* Mark it */
bm_set_region (&pmm_region->bm, bit, nblks); bm_set_region (&pmm_region->bm, bit, nblks);
spin_unlock (&pmm_region->lock); spin_unlock (&pmm_region->lock, &ctxpmmr);
return pmm_region->membase + bit * PAGE_SIZE; return pmm_region->membase + bit * PAGE_SIZE;
} }
spin_unlock (&pmm_region->lock); spin_unlock (&pmm_region->lock, &ctxpmmr);
} }
return PMM_ALLOC_ERR; return PMM_ALLOC_ERR;
} }
void pmm_free (physaddr_t p_addr, size_t nblks) { void pmm_free (physaddr_t p_addr, size_t nblks) {
spin_lock_ctx_t ctxpmmr;
/* Round down to nearest page boundary */ /* Round down to nearest page boundary */
physaddr_t aligned_p_addr = align_down (p_addr, PAGE_SIZE); physaddr_t aligned_p_addr = align_down (p_addr, PAGE_SIZE);
@@ -145,11 +148,11 @@ void pmm_free (physaddr_t p_addr, size_t nblks) {
size_t bit = div_align_up (addr, PAGE_SIZE); size_t bit = div_align_up (addr, PAGE_SIZE);
spin_lock (&pmm_region->lock); spin_lock (&pmm_region->lock, &ctxpmmr);
bm_clear_region (&pmm_region->bm, bit, nblks); bm_clear_region (&pmm_region->bm, bit, nblks);
spin_unlock (&pmm_region->lock); spin_unlock (&pmm_region->lock, &ctxpmmr);
break; break;
} }

View File

@@ -48,6 +48,7 @@ static bool proc_check_elf (uint8_t* elf) {
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages, bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags) { uint32_t flags) {
spin_lock_ctx_t ctxprpd;
struct proc_mapping* mapping = malloc (sizeof (*mapping)); struct proc_mapping* mapping = malloc (sizeof (*mapping));
if (mapping == NULL) if (mapping == NULL)
@@ -59,7 +60,7 @@ bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr,
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */ flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
spin_lock (&proc->pd.lock); spin_lock (&proc->pd.lock, &ctxprpd);
list_append (proc->mappings, &mapping->proc_mappings_link); list_append (proc->mappings, &mapping->proc_mappings_link);
@@ -68,7 +69,7 @@ bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr,
mm_map_page (&proc->pd, ppage, vpage, flags); mm_map_page (&proc->pd, ppage, vpage, flags);
} }
spin_unlock (&proc->pd.lock); spin_unlock (&proc->pd.lock, &ctxprpd);
return true; return true;
} }
@@ -78,12 +79,13 @@ bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages) {
uintptr_t end_vaddr = start_vaddr + unmap_size; uintptr_t end_vaddr = start_vaddr + unmap_size;
struct list_node_link *mapping_link, *mapping_link_tmp; struct list_node_link *mapping_link, *mapping_link_tmp;
bool used_tail_mapping = false; bool used_tail_mapping = false;
spin_lock_ctx_t ctxprpd;
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping)); struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
if (tail_mapping == NULL) if (tail_mapping == NULL)
return false; return false;
spin_lock (&proc->pd.lock); spin_lock (&proc->pd.lock, &ctxprpd);
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) { list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping = struct proc_mapping* mapping =
@@ -128,7 +130,7 @@ bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages) {
mm_unmap_page (&proc->pd, vpage, 0); mm_unmap_page (&proc->pd, vpage, 0);
} }
spin_unlock (&proc->pd.lock); spin_unlock (&proc->pd.lock, &ctxprpd);
return true; return true;
} }
@@ -196,19 +198,21 @@ static struct proc* proc_spawn_rd (char* name) {
} }
static void proc_register (struct proc* proc, struct cpu* cpu) { static void proc_register (struct proc* proc, struct cpu* cpu) {
spin_lock_ctx_t ctxcpu, ctxprtr;
proc->cpu = cpu; proc->cpu = cpu;
spin_lock (&cpu->lock); spin_lock (&cpu->lock, &ctxcpu);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid); rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
if (cpu->proc_current == NULL) if (cpu->proc_current == NULL)
cpu->proc_current = proc; cpu->proc_current = proc;
spin_unlock (&cpu->lock); spin_unlock (&cpu->lock, &ctxcpu);
rw_spin_write_lock (&proc_tree_lock); rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid); rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
rw_spin_write_unlock (&proc_tree_lock); rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
} }
/* caller holds cpu->lock */ /* caller holds cpu->lock */
@@ -246,8 +250,10 @@ static struct proc* proc_find_sched (struct cpu* cpu) {
static void proc_reap (void) { static void proc_reap (void) {
struct proc* proc = NULL; struct proc* proc = NULL;
struct list_node_link* reap_list = NULL; struct list_node_link* reap_list = NULL;
spin_lock_ctx_t ctxprtr;
spin_lock_ctx_t ctxpr;
rw_spin_write_lock (&proc_tree_lock); rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
struct rb_node_link* node; struct rb_node_link* node;
rbtree_first (&proc_tree, node); rbtree_first (&proc_tree, node);
@@ -258,9 +264,9 @@ static void proc_reap (void) {
proc = rbtree_entry (node, struct proc, proc_tree_link); proc = rbtree_entry (node, struct proc, proc_tree_link);
if (atomic_load (&proc->state) == PROC_DEAD) { if (atomic_load (&proc->state) == PROC_DEAD) {
spin_lock (&proc->lock); spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&proc_tree, &proc->proc_tree_link); rbtree_delete (&proc_tree, &proc->proc_tree_link);
spin_unlock (&proc->lock); spin_unlock (&proc->lock, &ctxpr);
list_append (reap_list, &proc->reap_link); list_append (reap_list, &proc->reap_link);
} }
@@ -268,7 +274,7 @@ static void proc_reap (void) {
node = next; node = next;
} }
rw_spin_write_unlock (&proc_tree_lock); rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
struct list_node_link *reap_link, *reap_link_tmp; struct list_node_link *reap_link, *reap_link_tmp;
list_foreach (reap_list, reap_link, reap_link_tmp) { list_foreach (reap_list, reap_link, reap_link_tmp) {
@@ -281,6 +287,8 @@ static void proc_reap (void) {
} }
void proc_sched (void* regs) { void proc_sched (void* regs) {
spin_lock_ctx_t ctxcpu, ctxpr;
int s_cycles = atomic_fetch_add (&sched_cycles, 1); int s_cycles = atomic_fetch_add (&sched_cycles, 1);
if (s_cycles % SCHED_REAP_FREQ == 0) if (s_cycles % SCHED_REAP_FREQ == 0)
@@ -289,45 +297,46 @@ void proc_sched (void* regs) {
struct proc* next = NULL; struct proc* next = NULL;
struct cpu* cpu = thiscpu; struct cpu* cpu = thiscpu;
spin_lock (&cpu->lock); spin_lock (&cpu->lock, &ctxcpu);
struct proc* prev = cpu->proc_current; struct proc* prev = cpu->proc_current;
if (prev != NULL) { if (prev != NULL) {
spin_lock (&prev->lock); spin_lock (&prev->lock, &ctxpr);
prev->pdata.regs = *(struct saved_regs*)regs; prev->pdata.regs = *(struct saved_regs*)regs;
spin_unlock (&prev->lock); spin_unlock (&prev->lock, &ctxpr);
} }
next = proc_find_sched (cpu); next = proc_find_sched (cpu);
if (next) { if (next) {
cpu->proc_current = next; cpu->proc_current = next;
spin_unlock (&cpu->lock); spin_unlock (&cpu->lock, &ctxcpu);
do_sched (next); do_sched (next);
} else { } else {
cpu->proc_current = NULL; cpu->proc_current = NULL;
spin_unlock (&cpu->lock); spin_unlock (&cpu->lock, &ctxcpu);
spin (); spin ();
} }
} }
void proc_kill (struct proc* proc, void* regs) { void proc_kill (struct proc* proc, void* regs) {
spin_lock_ctx_t ctxpr, ctxcpu;
struct cpu* cpu = proc->cpu; struct cpu* cpu = proc->cpu;
spin_lock (&proc->lock); spin_lock (&proc->lock, &ctxpr);
atomic_store (&proc->state, PROC_DEAD); atomic_store (&proc->state, PROC_DEAD);
spin_unlock (&proc->lock); spin_unlock (&proc->lock, &ctxpr);
spin_lock (&cpu->lock); spin_lock (&cpu->lock, &ctxcpu);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link); rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == proc) if (cpu->proc_current == proc)
cpu->proc_current = NULL; cpu->proc_current = NULL;
spin_unlock (&cpu->lock); spin_unlock (&cpu->lock, &ctxcpu);
DEBUG ("killed PID %d\n", proc->pid); DEBUG ("killed PID %d\n", proc->pid);
@@ -338,44 +347,46 @@ void proc_kill (struct proc* proc, void* regs) {
} }
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) { void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
struct cpu* cpu = proc->cpu; struct cpu* cpu = proc->cpu;
spin_lock (&proc->lock); spin_lock (&proc->lock, &ctxpr);
atomic_store (&proc->state, PROC_SUSPENDED); atomic_store (&proc->state, PROC_SUSPENDED);
proc->suspension_q = sq; proc->suspension_q = sq;
spin_unlock (&proc->lock); spin_unlock (&proc->lock, &ctxpr);
/* remove from run q */ /* remove from run q */
spin_lock (&cpu->lock); spin_lock (&cpu->lock, &ctxcpu);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link); rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == proc) if (cpu->proc_current == proc)
cpu->proc_current = NULL; cpu->proc_current = NULL;
spin_unlock (&cpu->lock); spin_unlock (&cpu->lock, &ctxcpu);
spin_lock (&sq->lock); spin_lock (&sq->lock, &ctxsq);
rbtree_insert (struct proc, &sq->proc_tree, &proc->suspension_link, suspension_link, pid); rbtree_insert (struct proc, &sq->proc_tree, &proc->suspension_link, suspension_link, pid);
spin_unlock (&sq->lock); spin_unlock (&sq->lock, &ctxsq);
cpu_request_sched (cpu); cpu_request_sched (cpu);
} }
void proc_resume (struct proc* proc) { void proc_resume (struct proc* proc) {
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
struct cpu* cpu = proc->cpu; struct cpu* cpu = proc->cpu;
struct proc_suspension_q* sq = proc->suspension_q; struct proc_suspension_q* sq = proc->suspension_q;
spin_lock (&sq->lock); spin_lock (&sq->lock, &ctxsq);
rbtree_delete (&sq->proc_tree, &proc->suspension_link); rbtree_delete (&sq->proc_tree, &proc->suspension_link);
spin_unlock (&sq->lock); spin_unlock (&sq->lock, &ctxsq);
spin_lock (&proc->lock); spin_lock (&proc->lock, &ctxpr);
proc->suspension_q = NULL; proc->suspension_q = NULL;
atomic_store (&proc->state, PROC_READY); atomic_store (&proc->state, PROC_READY);
spin_unlock (&proc->lock); spin_unlock (&proc->lock, &ctxpr);
spin_lock (&cpu->lock); spin_lock (&cpu->lock, &ctxcpu);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid); rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
spin_unlock (&cpu->lock); spin_unlock (&cpu->lock, &ctxcpu);
cpu_request_sched (cpu); cpu_request_sched (cpu);
} }

View File

@@ -28,12 +28,14 @@ void proc_cleanup_resources (struct proc* proc) {
} }
void proc_drop_resource (struct proc* proc, struct proc_resource* resource) { void proc_drop_resource (struct proc* proc, struct proc_resource* resource) {
spin_lock_ctx_t ctxpr;
DEBUG ("resource=%p, type=%d, rid=%d\n", resource, resource->type, resource->rid); DEBUG ("resource=%p, type=%d, rid=%d\n", resource, resource->type, resource->rid);
if (atomic_fetch_sub (&resource->refs, 1) == 1) { if (atomic_fetch_sub (&resource->refs, 1) == 1) {
spin_lock (&proc->lock); spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&proc->resource_tree, &resource->proc_resource_tree_link); rbtree_delete (&proc->resource_tree, &resource->proc_resource_tree_link);
spin_unlock (&proc->lock); spin_unlock (&proc->lock, &ctxpr);
resource->ops.cleanup (proc, resource); resource->ops.cleanup (proc, resource);
free (resource); free (resource);
@@ -74,6 +76,7 @@ static void proc_cleanup_resource_mutex (struct proc* proc, struct proc_resource
struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type, int vis, struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type, int vis,
void* data) { void* data) {
spin_lock_ctx_t ctxpr;
/* Check if resource RID already exists */ /* Check if resource RID already exists */
struct proc_resource* resource_check; struct proc_resource* resource_check;
rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource_check, rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource_check,
@@ -112,10 +115,10 @@ struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type
} break; } break;
} }
spin_lock (&proc->lock); spin_lock (&proc->lock, &ctxpr);
rbtree_insert (struct proc_resource, &proc->resource_tree, &resource->proc_resource_tree_link, rbtree_insert (struct proc_resource, &proc->resource_tree, &resource->proc_resource_tree_link,
proc_resource_tree_link, rid); proc_resource_tree_link, rid);
spin_unlock (&proc->lock); spin_unlock (&proc->lock, &ctxpr);
return resource; return resource;
} }

View File

@@ -2,14 +2,17 @@
#include <libk/std.h> #include <libk/std.h>
#include <sync/rw_spin_lock.h> #include <sync/rw_spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/irq.h>
#include <sys/spin_lock.h> #include <sys/spin_lock.h>
#define WRITER_WAIT (1U << 31) #define WRITER_WAIT (1U << 31)
#define READER_MASK (~WRITER_WAIT) #define READER_MASK (~WRITER_WAIT)
void rw_spin_read_lock (rw_spin_lock_t* rw) { void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t value; uint32_t value;
irq_save (ctx);
for (;;) { for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed); value = atomic_load_explicit (rw, memory_order_relaxed);
@@ -24,14 +27,17 @@ void rw_spin_read_lock (rw_spin_lock_t* rw) {
} }
} }
void rw_spin_read_unlock (rw_spin_lock_t* rw) { void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t old = atomic_fetch_sub_explicit (rw, 1, memory_order_release); uint32_t old = atomic_fetch_sub_explicit (rw, 1, memory_order_release);
assert ((old & READER_MASK) > 0); assert ((old & READER_MASK) > 0);
irq_restore (ctx);
} }
void rw_spin_write_lock (rw_spin_lock_t* rw) { void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t value; uint32_t value;
irq_save (ctx);
/* announce writer */ /* announce writer */
for (;;) { for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed); value = atomic_load_explicit (rw, memory_order_relaxed);
@@ -40,9 +46,10 @@ void rw_spin_write_lock (rw_spin_lock_t* rw) {
if (atomic_compare_exchange_weak_explicit (rw, &value, (value | WRITER_WAIT), if (atomic_compare_exchange_weak_explicit (rw, &value, (value | WRITER_WAIT),
memory_order_acquire, memory_order_relaxed)) memory_order_acquire, memory_order_relaxed))
break; break;
} else } else {
spin_lock_relax (); spin_lock_relax ();
} }
}
/* wait for readers */ /* wait for readers */
for (;;) { for (;;) {
@@ -54,6 +61,7 @@ void rw_spin_write_lock (rw_spin_lock_t* rw) {
} }
} }
void rw_spin_write_unlock (rw_spin_lock_t* rw) { void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
atomic_store_explicit (rw, 0, memory_order_release); atomic_store_explicit (rw, 0, memory_order_release);
irq_restore (ctx);
} }

View File

@@ -3,14 +3,15 @@
#include <libk/std.h> #include <libk/std.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/spin_lock.h>
#define RW_SPIN_LOCK_INIT 0 #define RW_SPIN_LOCK_INIT 0
typedef _Atomic (uint32_t) rw_spin_lock_t; typedef _Atomic (uint32_t) rw_spin_lock_t;
void rw_spin_read_lock (rw_spin_lock_t* rw); void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
void rw_spin_read_unlock (rw_spin_lock_t* rw); void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
void rw_spin_write_lock (rw_spin_lock_t* rw); void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
void rw_spin_write_unlock (rw_spin_lock_t* rw); void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
#endif // _KERNEL_SYNC_RW_SPIN_LOCK_H #endif // _KERNEL_SYNC_RW_SPIN_LOCK_H

View File

@@ -3,15 +3,15 @@
#include <sys/irq.h> #include <sys/irq.h>
#include <sys/spin_lock.h> #include <sys/spin_lock.h>
void spin_lock (spin_lock_t* sl) { void spin_lock (spin_lock_t* sl, spin_lock_ctx_t* ctx) {
irq_save (); irq_save (ctx);
while (atomic_flag_test_and_set_explicit (sl, memory_order_acquire)) while (atomic_flag_test_and_set_explicit (sl, memory_order_acquire))
spin_lock_relax (); spin_lock_relax ();
} }
void spin_unlock (spin_lock_t* sl) { void spin_unlock (spin_lock_t* sl, spin_lock_ctx_t* ctx) {
atomic_flag_clear_explicit (sl, memory_order_release); atomic_flag_clear_explicit (sl, memory_order_release);
irq_restore (); irq_restore (ctx);
} }

View File

@@ -2,12 +2,13 @@
#define _KERNEL_SYNC_SPIN_LOCK_H #define _KERNEL_SYNC_SPIN_LOCK_H
#include <libk/std.h> #include <libk/std.h>
#include <sys/spin_lock.h>
#define SPIN_LOCK_INIT ATOMIC_FLAG_INIT #define SPIN_LOCK_INIT ATOMIC_FLAG_INIT
typedef atomic_flag spin_lock_t; typedef atomic_flag spin_lock_t;
void spin_lock (spin_lock_t* sl); void spin_lock (spin_lock_t* sl, spin_lock_ctx_t* ctx);
void spin_unlock (spin_lock_t* sl); void spin_unlock (spin_lock_t* sl, spin_lock_ctx_t* ctx);
#endif // _KERNEL_SYNC_SPIN_LOCK_H #endif // _KERNEL_SYNC_SPIN_LOCK_H

View File

@@ -1,7 +1,9 @@
#ifndef _KERNEL_SYS_IRQ_H #ifndef _KERNEL_SYS_IRQ_H
#define _KERNEL_SYS_IRQ_H #define _KERNEL_SYS_IRQ_H
void irq_save (void); #include <sys/spin_lock.h>
void irq_restore (void);
void irq_save (spin_lock_ctx_t* ctx);
void irq_restore (spin_lock_ctx_t* ctx);
#endif // _KERNEL_SYS_IRQ_H #endif // _KERNEL_SYS_IRQ_H

View File

@@ -1,6 +1,12 @@
#ifndef _KERNEL_SYS_SPIN_LOCK_H #ifndef _KERNEL_SYS_SPIN_LOCK_H
#define _KERNEL_SYS_SPIN_LOCK_H #define _KERNEL_SYS_SPIN_LOCK_H
#include <libk/std.h>
#if defined(__x86_64__)
typedef uint64_t spin_lock_ctx_t;
#endif
void spin_lock_relax (void); void spin_lock_relax (void);
#endif // _KERNEL_SYS_SPIN_LOCK_H #endif // _KERNEL_SYS_SPIN_LOCK_H

View File

@@ -60,21 +60,22 @@ DEFINE_SYSCALL (sys_proc_unmap) {
/* int proc_create_resource_mem (size_t pages, int vis, uintptr_t* out_paddr) */ /* int proc_create_resource_mem (size_t pages, int vis, uintptr_t* out_paddr) */
DEFINE_SYSCALL (sys_proc_create_resource_mem) { DEFINE_SYSCALL (sys_proc_create_resource_mem) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprpd;
size_t pages = (size_t)a1; size_t pages = (size_t)a1;
int vis = (int)a2; int vis = (int)a2;
uintptr_t* out_paddr_buf = (uintptr_t*)a3; uintptr_t* out_paddr_buf = (uintptr_t*)a3;
spin_lock (&proc->pd.lock); spin_lock (&proc->pd.lock, &ctxprpd);
uintptr_t out_paddr_buf_paddr = mm_v2p (&proc->pd, (uintptr_t)out_paddr_buf, 0); uintptr_t out_paddr_buf_paddr = mm_v2p (&proc->pd, (uintptr_t)out_paddr_buf, 0);
if (!mm_validate_buffer (&proc->pd, (uintptr_t)out_paddr_buf, sizeof (uintptr_t), 0)) { if (!mm_validate_buffer (&proc->pd, (uintptr_t)out_paddr_buf, sizeof (uintptr_t), 0)) {
spin_unlock (&proc->pd.lock); spin_unlock (&proc->pd.lock, &ctxprpd);
return -SR_BAD_ADDRESS_SPACE; return -SR_BAD_ADDRESS_SPACE;
} }
spin_unlock (&proc->pd.lock); spin_unlock (&proc->pd.lock, &ctxprpd);
uintptr_t* out_paddr_buf_vaddr = (uintptr_t*)((uintptr_t)hhdm->offset + out_paddr_buf_paddr); uintptr_t* out_paddr_buf_vaddr = (uintptr_t*)((uintptr_t)hhdm->offset + out_paddr_buf_paddr);
@@ -104,13 +105,14 @@ DEFINE_SYSCALL (sys_proc_create_resource_mutex) {
/* int proc_mutex_lock (int mutex_rid) */ /* int proc_mutex_lock (int mutex_rid) */
DEFINE_SYSCALL (sys_proc_mutex_lock) { DEFINE_SYSCALL (sys_proc_mutex_lock) {
spin_lock_ctx_t ctxpr;
int rid = (int)a1; int rid = (int)a1;
struct proc_resource* resource; struct proc_resource* resource;
spin_lock (&proc->lock); spin_lock (&proc->lock, &ctxpr);
rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource, proc_resource_tree_link, rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource, proc_resource_tree_link,
rid); rid);
spin_unlock (&proc->lock); spin_unlock (&proc->lock, &ctxpr);
if (resource == NULL) if (resource == NULL)
return -SR_NOT_FOUND; return -SR_NOT_FOUND;
@@ -121,13 +123,14 @@ DEFINE_SYSCALL (sys_proc_mutex_lock) {
} }
DEFINE_SYSCALL (sys_proc_mutex_unlock) { DEFINE_SYSCALL (sys_proc_mutex_unlock) {
spin_lock_ctx_t ctxpr;
int rid = (int)a1; int rid = (int)a1;
struct proc_resource* resource; struct proc_resource* resource;
spin_lock (&proc->lock); spin_lock (&proc->lock, &ctxpr);
rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource, proc_resource_tree_link, rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource, proc_resource_tree_link,
rid); rid);
spin_unlock (&proc->lock); spin_unlock (&proc->lock, &ctxpr);
if (resource == NULL) if (resource == NULL)
return -SR_NOT_FOUND; return -SR_NOT_FOUND;
@@ -137,13 +140,14 @@ DEFINE_SYSCALL (sys_proc_mutex_unlock) {
/* int proc_drop_resource (int rid) */ /* int proc_drop_resource (int rid) */
DEFINE_SYSCALL (sys_proc_drop_resource) { DEFINE_SYSCALL (sys_proc_drop_resource) {
spin_lock_ctx_t ctxpr;
int rid = (int)a1; int rid = (int)a1;
struct proc_resource* resource; struct proc_resource* resource;
spin_lock (&proc->lock); spin_lock (&proc->lock, &ctxpr);
rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource, proc_resource_tree_link, rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource, proc_resource_tree_link,
rid); rid);
spin_unlock (&proc->lock); spin_unlock (&proc->lock, &ctxpr);
if (resource == NULL) if (resource == NULL)
return -SR_NOT_FOUND; return -SR_NOT_FOUND;