130 lines
3.7 KiB
C
130 lines
3.7 KiB
C
#include <stddef.h>
|
|
#include <stdint.h>
|
|
#include "vmm.h"
|
|
#include "hal/hal.h"
|
|
#include "bootinfo/bootinfo.h"
|
|
#include "pmm/pmm.h"
|
|
#include "paging.h"
|
|
#include "proc/proc.h"
|
|
#include "kprintf.h"
|
|
#include "spinlock/spinlock.h"
|
|
|
|
uint64_t KERNEL_CR3 = 0;
|
|
SpinLock spinlock;
|
|
|
|
uint64_t hal_vmm_current_cr3(void) {
|
|
uint64_t cr3;
|
|
asm volatile("mov %%cr3, %0" : "=r"(cr3));
|
|
return cr3;
|
|
}
|
|
|
|
PgIndex hal_vmm_pageindex(uint64_t vaddr) {
|
|
PgIndex ret;
|
|
|
|
ret.pml4 = (vaddr >> 39) & 0x1ff;
|
|
ret.pml3 = (vaddr >> 30) & 0x1ff;
|
|
ret.pml2 = (vaddr >> 21) & 0x1ff;
|
|
ret.pml1 = (vaddr >> 12) & 0x1ff;
|
|
|
|
return ret;
|
|
}
|
|
|
|
uint64_t *hal_vmm_nexttable(uint64_t *table, uint64_t ent, bool alloc) {
|
|
uint64_t entry = table[ent];
|
|
uint64_t phys;
|
|
if (entry & HAL_PG_PRESENT) {
|
|
phys = entry & ~0xFFFULL;
|
|
} else {
|
|
if (!alloc) {
|
|
return NULL;
|
|
}
|
|
|
|
uint8_t *newphys = pmm_alloc(1);
|
|
phys = (uint64_t)newphys;
|
|
hal_memset(VIRT(phys), 0, HAL_PAGE_SIZE);
|
|
table[ent] = phys | HAL_PG_USER | HAL_PG_RW | HAL_PG_PRESENT;
|
|
}
|
|
return (uint64_t *)((uint8_t *)VIRT(phys));
|
|
}
|
|
|
|
void hal_vmm_map_page(uint64_t cr3phys, uint64_t virtaddr, uint64_t physaddr, uint32_t flags) {
|
|
uint64_t *pml4 = (uint64_t *)VIRT(cr3phys);
|
|
PgIndex pi = hal_vmm_pageindex(virtaddr);
|
|
|
|
uint64_t *pml3 = hal_vmm_nexttable(pml4, pi.pml4, true);
|
|
uint64_t *pml2 = hal_vmm_nexttable(pml3, pi.pml3, true);
|
|
uint64_t *pml1 = hal_vmm_nexttable(pml2, pi.pml2, true);
|
|
uint64_t *pte = &pml1[pi.pml1];
|
|
|
|
*pte = (physaddr & ~0xFFFULL) | ((uint64_t)flags & 0x7ULL);
|
|
}
|
|
|
|
void hal_vmm_unmap_page(uint64_t cr3phys, uint64_t virtaddr, uint64_t physaddr) {
|
|
uint64_t *pml4 = (uint64_t *)VIRT(cr3phys);
|
|
PgIndex pi = hal_vmm_pageindex(virtaddr);
|
|
|
|
uint64_t *pml3 = hal_vmm_nexttable(pml4, pi.pml4, false);
|
|
uint64_t *pml2 = hal_vmm_nexttable(pml3, pi.pml3, false);
|
|
uint64_t *pml1 = hal_vmm_nexttable(pml2, pi.pml2, false);
|
|
uint64_t *pte = &pml1[pi.pml1];
|
|
|
|
*pte &= ~HAL_PG_PRESENT;
|
|
}
|
|
|
|
void hal_vmm_map_range(uint64_t cr3phys, void *virtstart, void *physstart, size_t size, uint32_t flags) {
|
|
if (size % HAL_PAGE_SIZE != 0 || (uint64_t)virtstart % HAL_PAGE_SIZE != 0 || (uint64_t)physstart % HAL_PAGE_SIZE != 0) {
|
|
return;
|
|
}
|
|
|
|
spinlock_acquire(&spinlock);
|
|
uint8_t *vaddr = (uint8_t *)virtstart;
|
|
uint8_t *paddr = (uint8_t *)physstart;
|
|
uint8_t *end = (uint8_t *)virtstart + size;
|
|
for (; vaddr < end; vaddr += HAL_PAGE_SIZE, paddr += HAL_PAGE_SIZE) {
|
|
hal_vmm_map_page(cr3phys, (uint64_t)vaddr, (uint64_t)paddr, flags);
|
|
}
|
|
spinlock_release(&spinlock);
|
|
}
|
|
|
|
void hal_vmm_unmap_range(uint64_t cr3phys, void *virtstart, void *physstart, size_t size) {
|
|
if (size % HAL_PAGE_SIZE != 0 || (uint64_t)virtstart % HAL_PAGE_SIZE != 0 || (uint64_t)physstart % HAL_PAGE_SIZE != 0) {
|
|
return;
|
|
}
|
|
|
|
spinlock_acquire(&spinlock);
|
|
uint8_t *vaddr = (uint8_t *)virtstart;
|
|
uint8_t *paddr = (uint8_t *)physstart;
|
|
uint8_t *end = vaddr + size;
|
|
|
|
for (; vaddr < end; vaddr += HAL_PAGE_SIZE, paddr += HAL_PAGE_SIZE) {
|
|
hal_vmm_unmap_page(cr3phys, (uint64_t)vaddr, (uint64_t)paddr);
|
|
}
|
|
spinlock_release(&spinlock);
|
|
}
|
|
|
|
void hal_vmm_map_kern(uint64_t targetcr3) {
|
|
uint64_t *kcr3 = (uint64_t *)VIRT(KERNEL_CR3);
|
|
uint64_t *cr3 = (uint64_t *)VIRT(targetcr3);
|
|
for (size_t i = 0; i < 512; i++) {
|
|
cr3[i] = kcr3[i];
|
|
}
|
|
}
|
|
|
|
uint64_t hal_vmm_userproc_pml4_phys(void) {
|
|
uint8_t *cr3phys = pmm_alloc(1);
|
|
uint64_t phys = (uint64_t)cr3phys;
|
|
hal_memset(VIRT(phys), 0, HAL_PAGE_SIZE);
|
|
|
|
uint64_t *kcr3 = (uint64_t *)VIRT(KERNEL_CR3);
|
|
uint64_t *pml4 = (uint64_t *)VIRT(phys);
|
|
for (size_t i = 256; i < 512; i++) {
|
|
pml4[i] = kcr3[i];
|
|
}
|
|
return phys;
|
|
}
|
|
|
|
void hal_vmm_init(void) {
|
|
spinlock_init(&spinlock);
|
|
KERNEL_CR3 = hal_vmm_current_cr3();
|
|
}
|