Implement lock IRQ nesting via stack variables/contexts
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s

This commit is contained in:
2026-01-14 22:11:56 +01:00
parent 55166f9d5f
commit 270ff507d4
22 changed files with 197 additions and 145 deletions

View File

@@ -25,6 +25,7 @@ struct pg_index {
/* Kernel page directory */
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
static spin_lock_ctx_t ctxkpd;
/* Lock needed to sync between map/unmap operations and TLB shootdown */
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
@@ -108,13 +109,15 @@ static void amd64_reload_cr3 (void) {
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
spin_lock (&pd->lock, &ctxpd);
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
@@ -143,9 +146,9 @@ done:
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock);
spin_unlock (&mm_lock, &ctxmm);
}
/* Map a page into kernel page directory */
@@ -155,13 +158,15 @@ void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
/* Unmap a virtual address. TLB needs to be flushed afterwards */
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -208,9 +213,9 @@ done:
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock);
spin_unlock (&mm_lock, &ctxmm);
}
/* Unmap a page from kernel page directory */
@@ -219,10 +224,10 @@ void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
}
/* Lock kernel page directory */
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); }
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock, &ctxkpd); }
/* Unlock kernel page directory */
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); }
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock, &ctxkpd); }
/* Allocate a userspace-ready page directory */
uintptr_t mm_alloc_user_pd_phys (void) {
@@ -252,13 +257,15 @@ void mm_reload (void) {
}
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool ret = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -280,18 +287,19 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock);
spin_unlock (&mm_lock, &ctxmm);
return ret;
}
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) {
bool ok = true;
spin_lock_ctx_t ctxpd;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
spin_lock (&pd->lock, &ctxpd);
for (size_t i = 0; i < size; i++) {
ok = mm_validate (pd, vaddr + i, 0);
@@ -301,19 +309,21 @@ bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t f
done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&pd->lock, &ctxpd);
return ok;
}
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
spin_lock (&mm_lock);
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
@@ -346,21 +356,23 @@ uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock);
spin_unlock (&mm_lock, &ctxmm);
return ret;
}
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -386,9 +398,9 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock);
spin_unlock (&mm_lock, &ctxmm);
return ret;
}