#include #include #include #include "spinlock.h" #include "hal/hal.h" #include "kprintf.h" #define SPINLOCK_IRQ_DBG 1 #define SPINLOCK_HINT() asm volatile("pause") struct { atomic_uint64_t irq_flags; atomic_int irq_nest; } IRQ_CTX; void spinlock_init(SpinLock *sl) { atomic_store(&sl->lock, false); } uint64_t irqsave(void) { uint64_t flags; asm volatile("pushfq; cli; popq %0" : "=r"(flags) :: "memory", "cc"); return flags; } void irqrestore(uint64_t flags) { if (flags & (1<<9)) { asm volatile("sti" ::: "memory", "cc"); } } void irqsave_nested(void) { if (atomic_load(&IRQ_CTX.irq_nest) == 0) { atomic_store(&IRQ_CTX.irq_flags, irqsave()); } atomic_inc(&IRQ_CTX.irq_nest); #if SPINLOCK_IRQ_DBG if (atomic_load(&IRQ_CTX.irq_nest) < 0) { kprintf("IRQ_CTX.irq_nest underflow\n"); hal_hang(); } #endif } void irqrestore_nested(void) { #if SPINLOCK_IRQ_DBG int irq_nest = atomic_load(&IRQ_CTX.irq_nest); if (irq_nest <= 0) { kprintf("spinlock: irqs restored too many times: %d\n", irq_nest); hal_hang(); } #endif atomic_dec(&IRQ_CTX.irq_nest); if (atomic_load(&IRQ_CTX.irq_nest) == 0) { irqrestore(atomic_load(&IRQ_CTX.irq_flags)); } } void spinlock_acquire(SpinLock *sl) { irqsave_nested(); while (atomic_test_and_set_explicit(&sl->lock, memory_order_acquire)) { SPINLOCK_HINT(); } } void spinlock_release(SpinLock *sl) { atomic_clear_flag_explicit(&sl->lock, memory_order_release); irqrestore_nested(); }