Introduce concept of Process Resources (PR_MEM), implement necessary syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 42s
All checks were successful
Build documentation / build-and-deploy (push) Successful in 42s
This commit is contained in:
@@ -251,6 +251,148 @@ void mm_reload (void) {
|
||||
spin_unlock (&mm_lock);
|
||||
}
|
||||
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool ret = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t pte = pml1[pg_index.pml1];
|
||||
ret = (pte & AMD64_PG_PRESENT) != 0;
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) {
|
||||
bool ok = true;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
ok = mm_validate (pd, vaddr + i, 0);
|
||||
if (!ok)
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
|
||||
for (size_t i4 = 0; i4 < 512; i4++) {
|
||||
if (!(pml4[i4] & AMD64_PG_PRESENT))
|
||||
continue;
|
||||
|
||||
uint64_t* pml3 = (uint64_t*)((uintptr_t)hhdm->offset + (pml4[i4] & ~0xFFFULL));
|
||||
for (size_t i3 = 0; i3 < 512; i3++) {
|
||||
if (!(pml3[i3] & AMD64_PG_PRESENT))
|
||||
continue;
|
||||
|
||||
uint64_t* pml2 = (uint64_t*)((uintptr_t)hhdm->offset + (pml3[i3] & ~0xFFFULL));
|
||||
for (size_t i2 = 0; i2 < 512; i2++) {
|
||||
if (!(pml2[i2] & AMD64_PG_PRESENT))
|
||||
continue;
|
||||
|
||||
uint64_t* pml1 = (uint64_t*)((uintptr_t)hhdm->offset + (pml2[i2] & ~0xFFFULL));
|
||||
for (size_t i1 = 0; i1 < 512; i1++) {
|
||||
if ((pml1[i1] & AMD64_PG_PRESENT) && ((pml1[i1] & ~0xFFFULL) == (paddr & ~0xFFFULL))) {
|
||||
struct pg_index idx = {i4, i3, i2, i1};
|
||||
ret = (((uint64_t)idx.pml4 << 39) | ((uint64_t)idx.pml3 << 30) |
|
||||
((uint64_t)idx.pml2 << 21) | ((uint64_t)idx.pml1 << 12) | (paddr & 0xFFFULL));
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock (&mm_lock);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t pte = pml1[pg_index.pml1];
|
||||
|
||||
if (!(pte & AMD64_PG_PRESENT))
|
||||
goto done;
|
||||
|
||||
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
|
||||
spin_unlock (&mm_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* TLB shootdown IRQ handler */
|
||||
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
|
||||
(void)arg, (void)regs;
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
#include <amd64/gdt.h>
|
||||
#include <aux/elf.h>
|
||||
#include <libk/list.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/resource.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
|
||||
@@ -21,6 +23,10 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
|
||||
memset (proc, 0, sizeof (*proc));
|
||||
|
||||
proc->lock = SPIN_LOCK_INIT;
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
proc->pid = atomic_fetch_add (&pids, 1);
|
||||
|
||||
proc->pd.lock = SPIN_LOCK_INIT;
|
||||
proc->pd.cr3_paddr = mm_alloc_user_pd_phys ();
|
||||
if (proc->pd.cr3_paddr == 0) {
|
||||
@@ -28,21 +34,30 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
proc->pdata.kernel_stack = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
|
||||
if (proc->pdata.kernel_stack == PMM_ALLOC_ERR) {
|
||||
int kstk_rid = atomic_fetch_add (&proc->rids, 1);
|
||||
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE};
|
||||
struct proc_resource* kstk_r =
|
||||
proc_create_resource (proc, kstk_rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
|
||||
if (kstk_r == NULL) {
|
||||
free (proc);
|
||||
return NULL;
|
||||
}
|
||||
uintptr_t kernel_stack = proc->pdata.kernel_stack;
|
||||
proc->pdata.kernel_stack += (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
||||
|
||||
proc->pdata.user_stack = pmm_alloc (USTACK_SIZE / PAGE_SIZE);
|
||||
if (proc->pdata.user_stack == PMM_ALLOC_ERR) {
|
||||
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
||||
|
||||
int ustk_rid = atomic_fetch_add (&proc->rids, 1);
|
||||
struct proc_resource_mem_init ustk_mem_init = {.pages = USTACK_SIZE / PAGE_SIZE};
|
||||
struct proc_resource* ustk_r =
|
||||
proc_create_resource (proc, ustk_rid, PR_MEM, RV_PRIVATE, (void*)&ustk_mem_init);
|
||||
if (ustk_r == NULL) {
|
||||
kstk_r->ops.cleanup (kstk_r);
|
||||
free (kstk_r);
|
||||
free (proc);
|
||||
pmm_free (kernel_stack, USTACK_SIZE / PAGE_SIZE);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
proc->pdata.user_stack = ustk_r->u.mem.paddr;
|
||||
|
||||
proc_map (proc, proc->pdata.user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
|
||||
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
|
||||
|
||||
@@ -53,9 +68,6 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
proc->pdata.regs.rflags = 0x202;
|
||||
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
||||
proc->pdata.regs.rip = aux.entry;
|
||||
proc->lock = SPIN_LOCK_INIT;
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
proc->pid = atomic_fetch_add (&pids, 1);
|
||||
|
||||
return proc;
|
||||
}
|
||||
@@ -63,6 +75,8 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
void proc_cleanup (struct proc* proc) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
proc_cleanup_resources (proc);
|
||||
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
spin_lock (&proc->pd.lock);
|
||||
|
||||
@@ -70,9 +84,6 @@ void proc_cleanup (struct proc* proc) {
|
||||
struct proc_mapping* mapping =
|
||||
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
||||
|
||||
DEBUG ("mapping vaddr=%p, paddr=%p, size=%zu\n", mapping->vaddr, mapping->paddr, mapping->size);
|
||||
|
||||
pmm_free (mapping->paddr, mapping->size / PAGE_SIZE);
|
||||
list_remove (proc->mappings, mapping_link);
|
||||
free (mapping);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user