Implement lock IRQ nesting via stack variables/contexts
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s
This commit is contained in:
@@ -57,19 +57,23 @@ static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
|
||||
|
||||
/* Read IOAPIC */
|
||||
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
|
||||
rw_spin_read_lock (&ioapic->lock);
|
||||
spin_lock_ctx_t ctxioar;
|
||||
|
||||
rw_spin_read_lock (&ioapic->lock, &ctxioar);
|
||||
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
||||
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
|
||||
rw_spin_read_unlock (&ioapic->lock);
|
||||
rw_spin_read_unlock (&ioapic->lock, &ctxioar);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Write IOAPIC */
|
||||
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
|
||||
rw_spin_write_lock (&ioapic->lock);
|
||||
spin_lock_ctx_t ctxioaw;
|
||||
|
||||
rw_spin_write_lock (&ioapic->lock, &ctxioaw);
|
||||
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
||||
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
|
||||
rw_spin_write_unlock (&ioapic->lock);
|
||||
rw_spin_write_unlock (&ioapic->lock, &ctxioaw);
|
||||
}
|
||||
|
||||
/* Find an IOAPIC corresposting to provided IRQ */
|
||||
@@ -201,7 +205,9 @@ void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
|
||||
* us - Period length in microseconds
|
||||
*/
|
||||
static uint32_t amd64_lapic_calibrate (uint32_t us) {
|
||||
spin_lock (&lapic_calibration_lock);
|
||||
spin_lock_ctx_t ctxlacb;
|
||||
|
||||
spin_lock (&lapic_calibration_lock, &ctxlacb);
|
||||
|
||||
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
|
||||
|
||||
@@ -214,7 +220,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
|
||||
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
|
||||
DEBUG ("timer ticks = %u\n", ticks);
|
||||
|
||||
spin_unlock (&lapic_calibration_lock);
|
||||
spin_unlock (&lapic_calibration_lock, &ctxlacb);
|
||||
|
||||
return ticks;
|
||||
}
|
||||
|
||||
@@ -35,6 +35,8 @@ static void amd64_debug_serial_write (char x) {
|
||||
* Formatted printing to serial. serial_lock ensures that all prints are atomic.
|
||||
*/
|
||||
void debugprintf (const char* fmt, ...) {
|
||||
spin_lock_ctx_t ctxdbgp;
|
||||
|
||||
if (!debug_init)
|
||||
return;
|
||||
|
||||
@@ -50,14 +52,14 @@ void debugprintf (const char* fmt, ...) {
|
||||
|
||||
const char* p = buffer;
|
||||
|
||||
spin_lock (&serial_lock);
|
||||
spin_lock (&serial_lock, &ctxdbgp);
|
||||
|
||||
while (*p) {
|
||||
amd64_debug_serial_write (*p);
|
||||
p++;
|
||||
}
|
||||
|
||||
spin_unlock (&serial_lock);
|
||||
spin_unlock (&serial_lock, &ctxdbgp);
|
||||
}
|
||||
|
||||
/* Initialize serial */
|
||||
|
||||
@@ -60,8 +60,9 @@ static void amd64_hpet_write32 (uint32_t reg, uint32_t value) {
|
||||
|
||||
static uint64_t amd64_hpet_read_counter (void) {
|
||||
uint64_t value;
|
||||
spin_lock_ctx_t ctxhrc;
|
||||
|
||||
spin_lock (&hpet_lock);
|
||||
spin_lock (&hpet_lock, &ctxhrc);
|
||||
|
||||
if (!hpet_32bits)
|
||||
value = amd64_hpet_read64 (HPET_MCVR);
|
||||
@@ -76,13 +77,15 @@ static uint64_t amd64_hpet_read_counter (void) {
|
||||
value = ((uint64_t)hi1 << 32) | lo;
|
||||
}
|
||||
|
||||
spin_unlock (&hpet_lock);
|
||||
spin_unlock (&hpet_lock, &ctxhrc);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static void amd64_hpet_write_counter (uint64_t value) {
|
||||
spin_lock (&hpet_lock);
|
||||
spin_lock_ctx_t ctxhwc;
|
||||
|
||||
spin_lock (&hpet_lock, &ctxhwc);
|
||||
|
||||
if (!hpet_32bits)
|
||||
amd64_hpet_write64 (HPET_MCVR, value);
|
||||
@@ -91,7 +94,7 @@ static void amd64_hpet_write_counter (uint64_t value) {
|
||||
amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32));
|
||||
}
|
||||
|
||||
spin_unlock (&hpet_lock);
|
||||
spin_unlock (&hpet_lock, &ctxhwc);
|
||||
}
|
||||
|
||||
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being
|
||||
|
||||
@@ -210,18 +210,10 @@ static void amd64_irq_restore_flags (uint64_t rflags) {
|
||||
}
|
||||
|
||||
/* Save current interrupt state */
|
||||
void irq_save (void) {
|
||||
int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
|
||||
if (prev == 0)
|
||||
thiscpu->irq_ctx.rflags = amd64_irq_save_flags ();
|
||||
}
|
||||
void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
|
||||
|
||||
/* Restore interrupt state */
|
||||
void irq_restore (void) {
|
||||
int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
|
||||
if (prev == 1)
|
||||
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);
|
||||
}
|
||||
void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }
|
||||
|
||||
/* Map custom IRQ mappings to legacy IRQs */
|
||||
uint8_t amd64_resolve_irq (uint8_t irq) {
|
||||
|
||||
@@ -25,6 +25,7 @@ struct pg_index {
|
||||
|
||||
/* Kernel page directory */
|
||||
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
|
||||
static spin_lock_ctx_t ctxkpd;
|
||||
/* Lock needed to sync between map/unmap operations and TLB shootdown */
|
||||
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
|
||||
|
||||
@@ -108,13 +109,15 @@ static void amd64_reload_cr3 (void) {
|
||||
|
||||
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
|
||||
|
||||
@@ -143,9 +146,9 @@ done:
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
}
|
||||
|
||||
/* Map a page into kernel page directory */
|
||||
@@ -155,13 +158,15 @@ void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
|
||||
/* Unmap a virtual address. TLB needs to be flushed afterwards */
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
@@ -208,9 +213,9 @@ done:
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
}
|
||||
|
||||
/* Unmap a page from kernel page directory */
|
||||
@@ -219,10 +224,10 @@ void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
|
||||
}
|
||||
|
||||
/* Lock kernel page directory */
|
||||
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); }
|
||||
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock, &ctxkpd); }
|
||||
|
||||
/* Unlock kernel page directory */
|
||||
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); }
|
||||
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock, &ctxkpd); }
|
||||
|
||||
/* Allocate a userspace-ready page directory */
|
||||
uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
@@ -252,13 +257,15 @@ void mm_reload (void) {
|
||||
}
|
||||
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool ret = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
@@ -280,18 +287,19 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) {
|
||||
bool ok = true;
|
||||
spin_lock_ctx_t ctxpd;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
ok = mm_validate (pd, vaddr + i, 0);
|
||||
@@ -301,19 +309,21 @@ bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t f
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
|
||||
@@ -346,21 +356,23 @@ uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
@@ -386,9 +398,9 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -74,11 +74,12 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
|
||||
void proc_cleanup (struct proc* proc) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
|
||||
proc_cleanup_resources (proc);
|
||||
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
spin_lock (&proc->pd.lock);
|
||||
spin_lock (&proc->pd.lock, &ctxprpd);
|
||||
|
||||
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
|
||||
struct proc_mapping* mapping =
|
||||
@@ -88,7 +89,7 @@ void proc_cleanup (struct proc* proc) {
|
||||
free (mapping);
|
||||
}
|
||||
|
||||
spin_unlock (&proc->pd.lock);
|
||||
spin_unlock (&proc->pd.lock, &ctxprpd);
|
||||
|
||||
pmm_free (proc->pd.cr3_paddr, 1);
|
||||
|
||||
|
||||
@@ -29,7 +29,6 @@ struct cpu* cpu_make (void) {
|
||||
memset (cpu, 0, sizeof (*cpu));
|
||||
cpu->lock = SPIN_LOCK_INIT;
|
||||
cpu->id = id;
|
||||
cpu->self = cpu;
|
||||
|
||||
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ struct cpu {
|
||||
/* for syscall instruction */
|
||||
uintptr_t syscall_user_stack;
|
||||
uintptr_t syscall_kernel_stack;
|
||||
struct cpu* self;
|
||||
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
|
||||
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
|
||||
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
|
||||
@@ -26,11 +25,6 @@ struct cpu {
|
||||
uint64_t lapic_ticks;
|
||||
uint32_t id;
|
||||
|
||||
struct {
|
||||
uint64_t rflags;
|
||||
atomic_int nesting;
|
||||
} irq_ctx;
|
||||
|
||||
spin_lock_t lock;
|
||||
|
||||
struct rb_node_link* proc_run_q;
|
||||
|
||||
Reference in New Issue
Block a user