Fix very subtle spinlock race condition due to atomics misuse
This commit is contained in:
@ -5,17 +5,15 @@
|
||||
#include "hal/hal.h"
|
||||
#include "kprintf.h"
|
||||
|
||||
#define SPINLOCK_IRQ_DBG 1
|
||||
|
||||
#define SPINLOCK_HINT() asm volatile("pause")
|
||||
|
||||
struct {
|
||||
atomic_uint64_t irq_flags;
|
||||
uint64_t irq_flags;
|
||||
atomic_int irq_nest;
|
||||
} IRQ_CTX;
|
||||
} IRQ_CTX = {0};
|
||||
|
||||
void spinlock_init(SpinLock *sl) {
|
||||
atomic_store(&sl->lock, false);
|
||||
atomic_clear_flag_explicit(&sl->lock, memory_order_relaxed);
|
||||
}
|
||||
|
||||
uint64_t irqsave(void) {
|
||||
@ -25,37 +23,22 @@ uint64_t irqsave(void) {
|
||||
}
|
||||
|
||||
void irqrestore(uint64_t flags) {
|
||||
if (flags & (1<<9)) {
|
||||
if (flags & (1ULL << 9)) {
|
||||
asm volatile("sti" ::: "memory", "cc");
|
||||
}
|
||||
}
|
||||
|
||||
void irqsave_nested(void) {
|
||||
if (atomic_load(&IRQ_CTX.irq_nest) == 0) {
|
||||
atomic_store(&IRQ_CTX.irq_flags, irqsave());
|
||||
int prev = atomic_fetch_add_explicit(&IRQ_CTX.irq_nest, 1, memory_order_acq_rel);
|
||||
if (prev == 0) {
|
||||
IRQ_CTX.irq_flags = irqsave();
|
||||
}
|
||||
atomic_inc(&IRQ_CTX.irq_nest);
|
||||
|
||||
#if SPINLOCK_IRQ_DBG
|
||||
if (atomic_load(&IRQ_CTX.irq_nest) < 0) {
|
||||
kprintf("IRQ_CTX.irq_nest underflow\n");
|
||||
hal_hang();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void irqrestore_nested(void) {
|
||||
#if SPINLOCK_IRQ_DBG
|
||||
int irq_nest = atomic_load(&IRQ_CTX.irq_nest);
|
||||
if (irq_nest <= 0) {
|
||||
kprintf("spinlock: irqs restored too many times: %d\n", irq_nest);
|
||||
hal_hang();
|
||||
}
|
||||
#endif
|
||||
|
||||
atomic_dec(&IRQ_CTX.irq_nest);
|
||||
if (atomic_load(&IRQ_CTX.irq_nest) == 0) {
|
||||
irqrestore(atomic_load(&IRQ_CTX.irq_flags));
|
||||
int prev = atomic_fetch_sub_explicit(&IRQ_CTX.irq_nest, 1, memory_order_acq_rel);
|
||||
if (prev == 1) {
|
||||
irqrestore(IRQ_CTX.irq_flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user