Document amd64 platform-specific code
This commit is contained in:
@@ -11,28 +11,36 @@
|
||||
#include <sys/mm.h>
|
||||
#include <sys/smp.h>
|
||||
|
||||
/// Present flag
|
||||
#define AMD64_PG_PRESENT (1 << 0)
|
||||
#define AMD64_PG_RW (1 << 1)
|
||||
#define AMD64_PG_USER (1 << 2)
|
||||
/// Writable flag
|
||||
#define AMD64_PG_RW (1 << 1)
|
||||
/// User-accessible flag
|
||||
#define AMD64_PG_USER (1 << 2)
|
||||
|
||||
/// Auxilary struct for page directory walking
|
||||
struct pg_index {
|
||||
uint16_t pml4, pml3, pml2, pml1;
|
||||
} PACKED;
|
||||
|
||||
/// Kernel page directory
|
||||
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
|
||||
/* This is needed to sync between map/unmap operations and TLB shootdown. */
|
||||
/// Lock needed to sync between map/unmap operations and TLB shootdown
|
||||
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
|
||||
|
||||
/// Get current value of CR3 register
|
||||
static uintptr_t amd64_current_cr3 (void) {
|
||||
uintptr_t cr3;
|
||||
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory");
|
||||
return cr3;
|
||||
}
|
||||
|
||||
/// Load kernel CR3 as current CR3
|
||||
void amd64_load_kernel_cr3 (void) {
|
||||
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
|
||||
}
|
||||
|
||||
/// Extract PML info from virtual address
|
||||
static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
|
||||
struct pg_index ret;
|
||||
|
||||
@@ -44,6 +52,7 @@ static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/// Walk paging tables and allocate necessary structures along the way
|
||||
static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) {
|
||||
uint64_t entry = table[entry_idx];
|
||||
physaddr_t paddr;
|
||||
@@ -68,6 +77,7 @@ static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool
|
||||
return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr);
|
||||
}
|
||||
|
||||
/// Convert generic memory management subsystem flags into AMD64-specific flags
|
||||
static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
|
||||
uint64_t flags = 0;
|
||||
|
||||
@@ -78,11 +88,16 @@ static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
|
||||
return flags;
|
||||
}
|
||||
|
||||
/// Reload the current CR3 value ON A LOCAL CPU
|
||||
static void amd64_reload_cr3 (void) {
|
||||
uint64_t cr3;
|
||||
__asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Map physical address to virtual address with flags. TLB needs to be flushed
|
||||
* afterwards.
|
||||
*/
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
|
||||
@@ -124,10 +139,12 @@ done:
|
||||
spin_unlock (&mm_lock);
|
||||
}
|
||||
|
||||
/// Map a page into kernel page directory
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
mm_map_page (&kernel_pd, paddr, vaddr, flags);
|
||||
}
|
||||
|
||||
/// Unmap a virtual address. TLB needs to be flushed afterwards
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
|
||||
@@ -167,14 +184,18 @@ done:
|
||||
spin_unlock (&mm_lock);
|
||||
}
|
||||
|
||||
/// Unmap a page from kernel page directory
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
|
||||
mm_unmap_page (&kernel_pd, vaddr, flags);
|
||||
}
|
||||
|
||||
/// Lock kernel page directory
|
||||
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); }
|
||||
|
||||
/// Unlock kernel page directory
|
||||
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); }
|
||||
|
||||
/// Allocate a userspace-ready page directory
|
||||
uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
@@ -192,6 +213,10 @@ uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
return cr3;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reload after map/unmap operation was performed. This function does the TLB
|
||||
* shootdown.
|
||||
*/
|
||||
void mm_reload (void) {
|
||||
spin_lock (&mm_lock);
|
||||
|
||||
@@ -204,6 +229,7 @@ void mm_reload (void) {
|
||||
spin_unlock (&mm_lock);
|
||||
}
|
||||
|
||||
/// TLB shootdown IRQ handler
|
||||
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
|
||||
(void)arg, (void)regs;
|
||||
|
||||
@@ -211,8 +237,13 @@ static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
|
||||
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Continue initializing memory management subsystem for AMD64 after the
|
||||
* essential parts were initialized
|
||||
*/
|
||||
void mm_init2 (void) {
|
||||
irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE);
|
||||
}
|
||||
|
||||
/// Initialize essentials for the AMD64 memory management subsystem
|
||||
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }
|
||||
|
||||
Reference in New Issue
Block a user