Clean up AMD64 memory management code, remove dependency on pd.lock
This commit is contained in:
@@ -22,10 +22,12 @@ struct pg_index {
|
||||
} PACKED;
|
||||
|
||||
/* Kernel page directory */
|
||||
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
|
||||
static spin_lock_ctx_t ctxkpd;
|
||||
/* Lock needed to sync between map/unmap operations and TLB shootdown */
|
||||
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
|
||||
static struct pd kernel_pd;
|
||||
static spin_lock_t kernel_pd_lock;
|
||||
|
||||
void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||
|
||||
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||
|
||||
/* Get current value of CR3 register */
|
||||
static uintptr_t amd64_current_cr3 (void) {
|
||||
@@ -112,15 +114,7 @@ static void amd64_reload_cr3 (void) {
|
||||
|
||||
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
|
||||
|
||||
@@ -129,69 +123,50 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
|
||||
do_reload = true;
|
||||
|
||||
done:
|
||||
if (do_reload && (flags & MM_PD_RELOAD))
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
}
|
||||
|
||||
/* Map a page into kernel page directory */
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
mm_map_page (&kernel_pd, paddr, vaddr, flags);
|
||||
amd64_reload_cr3 ();
|
||||
}
|
||||
|
||||
/* Unmap a virtual address. TLB needs to be flushed afterwards */
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
if ((*pte) & AMD64_PG_PRESENT) {
|
||||
if ((*pte) & AMD64_PG_PRESENT)
|
||||
*pte = 0;
|
||||
do_reload = true;
|
||||
}
|
||||
|
||||
if (amd64_mm_is_table_empty (pml1)) {
|
||||
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
|
||||
@@ -210,28 +185,14 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
if (do_reload && (flags & MM_PD_RELOAD))
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
}
|
||||
|
||||
/* Unmap a page from kernel page directory */
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
|
||||
mm_unmap_page (&kernel_pd, vaddr, flags);
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr) {
|
||||
mm_unmap_page (&kernel_pd, vaddr);
|
||||
amd64_reload_cr3 ();
|
||||
}
|
||||
|
||||
/* Lock kernel page directory */
|
||||
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock, &ctxkpd); }
|
||||
|
||||
/* Unlock kernel page directory */
|
||||
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock, &ctxkpd); }
|
||||
|
||||
/* Allocate a userspace-ready page directory */
|
||||
uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
@@ -250,26 +211,10 @@ uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
return cr3;
|
||||
}
|
||||
|
||||
/* Reload after map/unmap operation was performed. This function does the TLB shootdown. */
|
||||
void mm_reload (void) {
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
|
||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
||||
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN);
|
||||
}
|
||||
}
|
||||
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool ret = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
@@ -289,45 +234,26 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
ret = (pte & AMD64_PG_PRESENT) != 0;
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) {
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
|
||||
bool ok = true;
|
||||
spin_lock_ctx_t ctxpd;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
ok = mm_validate (pd, vaddr + i, 0);
|
||||
ok = mm_validate (pd, vaddr + i);
|
||||
if (!ok)
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
|
||||
for (size_t i4 = 0; i4 < 512; i4++) {
|
||||
@@ -358,25 +284,13 @@ uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
||||
}
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
@@ -400,25 +314,8 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* TLB shootdown IRQ handler */
|
||||
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
|
||||
(void)arg, (void)regs;
|
||||
|
||||
amd64_reload_cr3 ();
|
||||
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
|
||||
}
|
||||
|
||||
/* Continue initializing memory management subsystem for AMD64 after the essential parts were
|
||||
* initialized */
|
||||
void mm_init2 (void) { irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN); }
|
||||
|
||||
/* Initialize essentials for the AMD64 memory management subsystem */
|
||||
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }
|
||||
|
||||
Reference in New Issue
Block a user