Files
mop3/kernel/sync/rw_spin_lock.c
kamkow1 270ff507d4
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s
Implement lock IRQ nesting via stack variables/contexts
2026-01-14 22:11:56 +01:00

68 lines
1.6 KiB
C

#include <libk/assert.h>
#include <libk/std.h>
#include <sync/rw_spin_lock.h>
#include <sys/debug.h>
#include <sys/irq.h>
#include <sys/spin_lock.h>
#define WRITER_WAIT (1U << 31)
#define READER_MASK (~WRITER_WAIT)
void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t value;
irq_save (ctx);
for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed);
if ((value & WRITER_WAIT) == 0) {
if (atomic_compare_exchange_weak_explicit (rw, &value, value + 1, memory_order_acquire,
memory_order_relaxed)) {
return;
}
}
spin_lock_relax ();
}
}
void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t old = atomic_fetch_sub_explicit (rw, 1, memory_order_release);
assert ((old & READER_MASK) > 0);
irq_restore (ctx);
}
void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t value;
irq_save (ctx);
/* announce writer */
for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed);
if ((value & WRITER_WAIT) == 0) {
if (atomic_compare_exchange_weak_explicit (rw, &value, (value | WRITER_WAIT),
memory_order_acquire, memory_order_relaxed))
break;
} else {
spin_lock_relax ();
}
}
/* wait for readers */
for (;;) {
value = atomic_load_explicit (rw, memory_order_acquire);
if ((value & READER_MASK) == 0)
return;
spin_lock_relax ();
}
}
void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
atomic_store_explicit (rw, 0, memory_order_release);
irq_restore (ctx);
}