211 lines
5.7 KiB
C
211 lines
5.7 KiB
C
/*
|
|
Copyright 2025 Kamil Kowalczyk
|
|
|
|
Redistribution and use in source and binary forms, with or
|
|
without modification, are permitted provided that the following
|
|
conditions are met:
|
|
|
|
1. Redistributions of source code must retain the above copyright
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
2. Redistributions in binary form must reproduce the above copyright
|
|
notice, this list of conditions and the following disclaimer in the
|
|
documentation and/or other materials provided with the distribution.
|
|
|
|
3. Neither the name of the copyright holder nor the names of its
|
|
contributors may be used to endorse or promote products derived from
|
|
this software without specific prior written permission.
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
“AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
|
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
#include <libk/util.h>
|
|
#include <libk/types.h>
|
|
#include <libk/string.h>
|
|
#include <sys/mm.h>
|
|
#include <sync/spinlock.h>
|
|
#include <mm/bba.h>
|
|
#include <mm/pmm.h>
|
|
#include <config.h>
|
|
|
|
#define PD_INDEX(va) (((va) >> 22) & 0x3FF)
|
|
#define PT_INDEX(va) (((va) >> 12) & 0x3FF)
|
|
#define PD_SHIFT 22
|
|
#define PT_SHIFT 12
|
|
|
|
extern char __init_pd;
|
|
|
|
static struct page_dir *init_pd;
|
|
|
|
struct page_dir *mm_get_kernel_pd(void) { return init_pd; }
|
|
|
|
static struct page_dir *mm_alloc_pd(void) {
|
|
struct page_dir *pd = (struct page_dir *)bba_alloc();
|
|
memset(pd, 0, sizeof(*pd));
|
|
sl_init(&pd->sl, "pd");
|
|
pd->pd = (volatile uint32_t *)bba_alloc();
|
|
memset(pd->pt_refcount, 0, sizeof(pd->pt_refcount));
|
|
return pd;
|
|
}
|
|
|
|
static struct page_dir *mm_alloc_existing_pd(uptr_t ptr) {
|
|
struct page_dir *pd = (struct page_dir *)bba_alloc();
|
|
memset(pd, 0, sizeof(*pd));
|
|
sl_init(&pd->sl, "pd");
|
|
pd->pd = (volatile uint32_t *)ptr;
|
|
memset(pd->pt_refcount, 0, sizeof(pd->pt_refcount));
|
|
return pd;
|
|
}
|
|
|
|
void mm_map_page(struct page_dir *pd, uptr_t vaddr, uptr_t paddr, uint32_t flags) {
|
|
if (!(flags & PF_PRESENT))
|
|
return;
|
|
|
|
if (flags & PF_LOCK)
|
|
sl_lock(&pd->sl);
|
|
|
|
uint32_t pd_idx = PD_INDEX(vaddr);
|
|
uint32_t pt_idx = PT_INDEX(vaddr);
|
|
volatile uint32_t *pt;
|
|
|
|
if (!(pd->pd[pd_idx] & PF_PRESENT)) {
|
|
pt = (volatile uint32_t *)bba_alloc();
|
|
if (pt == 0) {
|
|
dbgf("mm_map_page(): run out of BBA memory to alloc a PD\n");
|
|
goto done;
|
|
}
|
|
|
|
for (usize_t i = 0; i < PAGE_SIZE/sizeof(uint32_t); i++)
|
|
pt[i] = 0;
|
|
uptr_t phys_pt = (uptr_t)pt - VIRT_BASE;
|
|
pd->pd[pd_idx] = phys_pt | PF_PRESENT | PF_WRITABLE;
|
|
pd->pt_refcount[pd_idx] = 1;
|
|
} else {
|
|
uptr_t pt_phys = pd->pd[pd_idx] & ~(PAGE_SIZE - 1);
|
|
pt = (volatile uint32_t *)(pt_phys + VIRT_BASE);
|
|
|
|
if (!(pt[pt_idx] & PF_PRESENT))
|
|
pd->pt_refcount[pd_idx]++;
|
|
}
|
|
|
|
pt[pt_idx] = (paddr & ~(PAGE_SIZE - 1)) | (flags & ~PF_LOCK);
|
|
|
|
done:
|
|
if (flags & PF_LOCK)
|
|
sl_unlock(&pd->sl);
|
|
}
|
|
|
|
void mm_unmap_page(struct page_dir *pd, uptr_t vaddr, uint32_t flags) {
|
|
if (flags & PF_LOCK)
|
|
sl_lock(&pd->sl);
|
|
|
|
uint32_t pd_idx = PD_INDEX(vaddr);
|
|
uint32_t pt_idx = PT_INDEX(vaddr);
|
|
volatile uint32_t *pt;
|
|
|
|
if (!(pd->pd[pd_idx] & PF_PRESENT)) {
|
|
goto done;
|
|
} else {
|
|
uptr_t pt_phys = pd->pd[pd_idx] & ~(PAGE_SIZE - 1);
|
|
pt = (volatile uint32_t *)(pt_phys + VIRT_BASE);
|
|
}
|
|
|
|
if (pt[pt_idx] & PF_PRESENT) {
|
|
pt[pt_idx] &= ~PF_PRESENT;
|
|
|
|
if (pd->pt_refcount[pd_idx] > 0)
|
|
pd->pt_refcount[pd_idx]--;
|
|
|
|
if (pd->pt_refcount[pd_idx] == 0) {
|
|
bba_free((uptr_t)pt);
|
|
pd->pd[pd_idx] = 0;
|
|
}
|
|
}
|
|
|
|
__asm__ volatile ("invlpg (%0)" :: "r"(vaddr) : "memory");
|
|
|
|
done:
|
|
if (flags & PF_LOCK)
|
|
sl_unlock(&pd->sl);
|
|
}
|
|
|
|
uptr_t mm_translate_v2p(struct page_dir *pd, uptr_t vaddr, uint32_t flags) {
|
|
if (flags & PF_LOCK)
|
|
sl_lock(&pd->sl);
|
|
|
|
uint32_t pd_idx = PD_INDEX(vaddr);
|
|
uint32_t pt_idx = PT_INDEX(vaddr);
|
|
volatile uint32_t *pt;
|
|
uptr_t paddr = 0;
|
|
|
|
if (!(pd->pd[pd_idx] & PF_PRESENT))
|
|
goto done;
|
|
|
|
uptr_t pt_phys = pd->pd[pd_idx] & ~(PAGE_SIZE - 1);
|
|
pt = (volatile uint32_t *)(pt_phys + VIRT_BASE);
|
|
if (!(pt[pt_idx] & PF_PRESENT))
|
|
goto done;
|
|
|
|
paddr = (pt[pt_idx] & ~(PAGE_SIZE - 1)) | (vaddr & (PAGE_SIZE - 1));
|
|
|
|
done:
|
|
if (flags & PF_LOCK)
|
|
sl_unlock(&pd->sl);
|
|
return paddr;
|
|
}
|
|
|
|
uptr_t mm_translate_p2v(struct page_dir *pd, uptr_t paddr, uint32_t flags) {
|
|
uptr_t vaddr;
|
|
bool_t found = 0;
|
|
|
|
if (flags & PF_LOCK)
|
|
sl_lock(&pd->sl);
|
|
|
|
for (uint32_t pd_idx = 0; pd_idx < 1024; pd_idx++) {
|
|
if (!(pd->pd[pd_idx] & PF_PRESENT))
|
|
continue;
|
|
|
|
uptr_t pt_phys = pd->pd[pd_idx] & ~(PAGE_SIZE - 1);
|
|
volatile uint32_t *pt = (volatile uint32_t *)(pt_phys + VIRT_BASE);
|
|
|
|
for (uint32_t pt_idx = 0; pt_idx < 1024; pt_idx++) {
|
|
if (!(pt[pt_idx] & PF_PRESENT))
|
|
continue;
|
|
|
|
uptr_t pt_addr = pt[pt_idx] & ~(PAGE_SIZE - 1);
|
|
if (pt_addr == (paddr & ~(PAGE_SIZE - 1))) {
|
|
vaddr = (pd_idx << PD_SHIFT) | (pt_idx << PT_SHIFT) | (paddr & (PAGE_SIZE - 1));
|
|
found = 1;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (found)
|
|
break;
|
|
}
|
|
|
|
if (flags & PF_LOCK)
|
|
sl_unlock(&pd->sl);
|
|
|
|
return found ? vaddr : 0;
|
|
}
|
|
|
|
void mm_load_pd(struct page_dir *pd) {
|
|
uptr_t phys = (uptr_t)pd->pd - VIRT_BASE;
|
|
__asm__ volatile ("movl %0, %%cr3" :: "r"(phys));
|
|
}
|
|
|
|
void mm_init(void) {
|
|
init_pd = mm_alloc_existing_pd((uptr_t)&__init_pd);
|
|
}
|