#include #include #include "vmm/vmm.h" #include "bootinfo/bootinfo.h" #include "pmm/pmm.h" #include "proc/proc.h" #include "spinlock/spinlock.h" #include "std/string.h" #include "kprintf.h" uint64_t KERNEL_CR3 = 0; SpinLock spinlock; uint64_t vmm_current_cr3(void) { uint64_t cr3; asm volatile("mov %%cr3, %0" : "=r"(cr3)); return cr3; } PgIndex vmm_pageindex(uint64_t vaddr) { PgIndex ret; ret.pml4 = (vaddr >> 39) & 0x1ff; ret.pml3 = (vaddr >> 30) & 0x1ff; ret.pml2 = (vaddr >> 21) & 0x1ff; ret.pml1 = (vaddr >> 12) & 0x1ff; return ret; } uint64_t *vmm_nexttable(uint64_t *table, uint64_t ent, bool alloc) { uint64_t entry = table[ent]; uint64_t phys; if (entry & VMM_PG_PRESENT) { phys = entry & ~0xFFFULL; } else { if (!alloc) { return NULL; } uint8_t *newphys = pmm_alloc(1); phys = (uint64_t)newphys; memset(VIRT(phys), 0, VMM_PAGE_SIZE); table[ent] = phys | VMM_PG_USER | VMM_PG_RW | VMM_PG_PRESENT; } return (uint64_t *)((uint8_t *)VIRT(phys)); } void vmm_map_page(uint64_t cr3phys, uint64_t virtaddr, uint64_t physaddr, uint32_t flags) { uint64_t *pml4 = (uint64_t *)VIRT(cr3phys); PgIndex pi = vmm_pageindex(virtaddr); uint64_t *pml3 = vmm_nexttable(pml4, pi.pml4, true); uint64_t *pml2 = vmm_nexttable(pml3, pi.pml3, true); uint64_t *pml1 = vmm_nexttable(pml2, pi.pml2, true); uint64_t *pte = &pml1[pi.pml1]; *pte = (physaddr & ~0xFFFULL) | ((uint64_t)flags & 0x7ULL); } void vmm_unmap_page(uint64_t cr3phys, uint64_t virtaddr) { uint64_t *pml4 = (uint64_t *)VIRT(cr3phys); PgIndex pi = vmm_pageindex(virtaddr); uint64_t *pml3 = vmm_nexttable(pml4, pi.pml4, false); uint64_t *pml2 = vmm_nexttable(pml3, pi.pml3, false); uint64_t *pml1 = vmm_nexttable(pml2, pi.pml2, false); uint64_t *pte = &pml1[pi.pml1]; *pte &= ~VMM_PG_PRESENT; } void vmm_map_range(uint64_t cr3phys, void *virtstart, void *physstart, size_t size, uint32_t flags) { if (size % VMM_PAGE_SIZE != 0 || (uint64_t)virtstart % VMM_PAGE_SIZE != 0 || (uint64_t)physstart % VMM_PAGE_SIZE != 0) { return; } spinlock_acquire(&spinlock); uint8_t *vaddr = (uint8_t *)virtstart; uint8_t *paddr = (uint8_t *)physstart; uint8_t *end = (uint8_t *)virtstart + size; for (; vaddr < end; vaddr += VMM_PAGE_SIZE, paddr += VMM_PAGE_SIZE) { vmm_map_page(cr3phys, (uint64_t)vaddr, (uint64_t)paddr, flags); } spinlock_release(&spinlock); } void vmm_unmap_range(uint64_t cr3phys, void *virtstart, void *physstart, size_t size) { if (size % VMM_PAGE_SIZE != 0 || (uint64_t)virtstart % VMM_PAGE_SIZE != 0 || (uint64_t)physstart % VMM_PAGE_SIZE != 0) { return; } spinlock_acquire(&spinlock); uint8_t *vaddr = (uint8_t *)virtstart; uint8_t *end = vaddr + size; for (; vaddr < end; vaddr += VMM_PAGE_SIZE) { vmm_unmap_page(cr3phys, (uint64_t)vaddr); } spinlock_release(&spinlock); } void vmm_map_kern(uint64_t targetcr3) { uint64_t *kcr3 = (uint64_t *)VIRT(KERNEL_CR3); uint64_t *cr3 = (uint64_t *)VIRT(targetcr3); for (size_t i = 0; i < 512; i++) { cr3[i] = kcr3[i]; } } uint64_t vmm_userproc_pml4_phys(void) { uint8_t *cr3phys = pmm_alloc(1); uint64_t phys = (uint64_t)cr3phys; memset(VIRT(phys), 0, VMM_PAGE_SIZE); uint64_t *kcr3 = (uint64_t *)VIRT(KERNEL_CR3); uint64_t *pml4 = (uint64_t *)VIRT(phys); for (size_t i = 256; i < 512; i++) { pml4[i] = kcr3[i]; } return phys; } void vmm_init(void) { spinlock_init(&spinlock); KERNEL_CR3 = vmm_current_cr3(); }