Rework x86_64 paging and vmm
This commit is contained in:
@ -13,11 +13,12 @@ void spinlock_acquire(SpinLock *sl) {
|
||||
unlocked = false;
|
||||
SPINLOCK_HINT();
|
||||
}
|
||||
hal_intr_disable();
|
||||
}
|
||||
|
||||
void spinlock_release(SpinLock *sl) {
|
||||
atomic_store(&sl->lock, false);
|
||||
hal_intr_enable();
|
||||
bool locked = true;
|
||||
if (atomic_compare_exchange_strong(&sl->lock, &locked, false)) {
|
||||
atomic_store(&sl->lock, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3,9 +3,10 @@
|
||||
|
||||
#include <stdatomic.h>
|
||||
#include <stdint.h>
|
||||
#include "hal/hal.h"
|
||||
|
||||
typedef struct { atomic_bool lock; } SpinLock;
|
||||
typedef struct {
|
||||
atomic_bool lock;
|
||||
} SpinLock;
|
||||
|
||||
// Spin more efficiently - cpu dependant
|
||||
#if defined(__x86_64__)
|
||||
|
Reference in New Issue
Block a user