#include #include #include #include #include #include #include #define AMD64_PG_PRESENT (1 << 0) #define AMD64_PG_RW (1 << 1) #define AMD64_PG_USER (1 << 2) #define AMD64_PG_TABLE_ENTRIES_MAX 512 struct pg_index { uint16_t pml4, pml3, pml2, pml1; } PACKED; struct pd kernel_pd = {.lock = SPIN_LOCK_INIT}; static uintptr_t amd64_current_cr3 (void) { uintptr_t cr3; __asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory"); return cr3; } void amd64_load_kernel_cr3 (void) { __asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory"); } static struct pg_index amd64_mm_page_index (uint64_t vaddr) { struct pg_index ret; ret.pml4 = ((vaddr >> 39) & 0x1FF); ret.pml3 = ((vaddr >> 30) & 0x1FF); ret.pml2 = ((vaddr >> 21) & 0x1FF); ret.pml1 = ((vaddr >> 12) & 0x1FF); return ret; } static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) { uint64_t entry = table[entry_idx]; uint64_t paddr; struct limine_hhdm_response* hhdm = limine_hhdm_request.response; if (entry & AMD64_PG_PRESENT) paddr = entry & ~0xFFFULL; else { if (!alloc) return NULL; paddr = pmm_alloc (1); if (paddr == 0) return NULL; memset ((void*)((uintptr_t)hhdm->offset + (uintptr_t)paddr), 0, PAGE_SIZE); table[entry_idx] = paddr | AMD64_PG_PRESENT | AMD64_PG_RW; } return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr); } static uint64_t amd64_mm_resolve_flags (uint32_t generic) { uint64_t flags = 0; flags |= ((generic & MM_PG_PRESENT) ? AMD64_PG_PRESENT : 0); flags |= ((generic & MM_PG_RW) ? AMD64_PG_RW : 0); flags |= ((generic & MM_PG_USER) ? AMD64_PG_USER : 0); return flags; } static void amd64_reload_cr3 (void) { uint64_t cr3; __asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory"); } void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; bool do_reload = false; if (flags & MM_PD_LOCK) spin_lock (&pd->lock); uint64_t amd64_flags = amd64_mm_resolve_flags (flags); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); struct pg_index pg_index = amd64_mm_page_index (vaddr); uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true); if (pml3 == NULL) goto done; uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true); if (pml2 == NULL) goto done; uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true); if (pml1 == NULL) goto done; uint64_t* pte = &pml1[pg_index.pml1]; *pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL)); do_reload = true; done: if (do_reload) amd64_reload_cr3 (); if (flags & MM_PD_LOCK) spin_unlock (&pd->lock); } void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { mm_map_page (&kernel_pd, paddr, vaddr, flags); } void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; bool do_reload = false; if (flags & MM_PD_LOCK) spin_lock (&pd->lock); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); struct pg_index pg_index = amd64_mm_page_index (vaddr); uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true); if (pml3 == NULL) goto done; uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true); if (pml2 == NULL) goto done; uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true); if (pml1 == NULL) goto done; uint64_t* pte = &pml1[pg_index.pml1]; *pte &= ~AMD64_PG_PRESENT; do_reload = true; done: if (do_reload) amd64_reload_cr3 (); if (flags & MM_PD_LOCK) spin_unlock (&pd->lock); } void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) { mm_unmap_page (&kernel_pd, vaddr, flags); } void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); } void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); } void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }