Redesign userspace memory management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s

This commit is contained in:
2026-01-27 17:04:08 +01:00
parent 600886a7ee
commit b388b30b24
23 changed files with 195 additions and 484 deletions

View File

@@ -9,7 +9,6 @@
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/kpproc_fb.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
@@ -34,104 +33,12 @@ static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
static atomic_int sched_cycles = 0;
/* kernel pseudo process */
static struct proc kpproc;
static bool proc_check_elf (uint8_t* elf) {
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
return false;
return true;
}
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags) {
spin_lock_ctx_t ctxprpd;
struct proc_mapping* mapping = malloc (sizeof (*mapping));
if (mapping == NULL)
return false;
mapping->paddr = start_paddr;
mapping->vaddr = start_vaddr;
mapping->size = pages * PAGE_SIZE;
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
spin_lock (&proc->procgroup->pd.lock, &ctxprpd);
list_append (proc->procgroup->mappings, &mapping->proc_mappings_link);
for (uintptr_t vpage = start_vaddr, ppage = start_paddr; vpage < start_vaddr + pages * PAGE_SIZE;
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
mm_map_page (&proc->procgroup->pd, ppage, vpage, flags);
}
spin_unlock (&proc->procgroup->pd.lock, &ctxprpd);
return true;
}
bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages) {
size_t unmap_size = pages * PAGE_SIZE;
uintptr_t end_vaddr = start_vaddr + unmap_size;
struct list_node_link *mapping_link, *mapping_link_tmp;
bool used_tail_mapping = false;
spin_lock_ctx_t ctxprpd;
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
if (tail_mapping == NULL)
return false;
spin_lock (&proc->procgroup->pd.lock, &ctxprpd);
list_foreach (proc->procgroup->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
uintptr_t m_end = mapping->vaddr + mapping->size;
/* check overlap */
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
/* split in the middle */
if ((start_vaddr > mapping->vaddr) && (end_vaddr < m_end)) {
tail_mapping->vaddr = end_vaddr;
tail_mapping->paddr = mapping->paddr + (end_vaddr - mapping->vaddr);
tail_mapping->size = m_end - end_vaddr;
mapping->size = start_vaddr - mapping->vaddr;
list_insert_after (proc->procgroup->mappings, &mapping->proc_mappings_link,
&tail_mapping->proc_mappings_link);
used_tail_mapping = true;
break;
} else if ((start_vaddr <= mapping->vaddr) && (end_vaddr < m_end)) { /* shrink left */
size_t diff = end_vaddr - mapping->vaddr;
mapping->vaddr += diff;
mapping->paddr += diff;
mapping->size -= diff;
} else if ((start_vaddr > mapping->vaddr) && (end_vaddr >= m_end)) { /* shrink right */
mapping->size = start_vaddr - mapping->vaddr;
} else { /* full overlap */
list_remove (proc->procgroup->mappings, &mapping->proc_mappings_link);
free (mapping);
}
}
}
if (!used_tail_mapping)
free (tail_mapping);
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
mm_unmap_page (&proc->procgroup->pd, vpage, 0);
}
spin_unlock (&proc->procgroup->pd.lock, &ctxprpd);
return true;
}
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
struct elf_aux aux;
@@ -156,23 +63,16 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
int rid = procgroup_get_sys_rid (proc->procgroup);
struct proc_resource* r = proc_create_resource_mem (proc->procgroup, rid, blks, 0, false);
if (r == NULL) {
DEBUG ("pmm oom error while loading ELF segments! (tried to alloc %zu blks)\n", blks);
}
uintptr_t p_addr = r->u.mem.paddr;
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
if (phdr->p_flags & PF_W)
pg_flags |= MM_PG_RW;
proc_map (proc, p_addr, v_addr, blks, pg_flags);
uintptr_t p_addr;
procgroup_map (proc->procgroup, v_addr, blks, pg_flags, &p_addr);
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
} break;
}
}
@@ -347,59 +247,12 @@ static void proc_irq_sched (void* arg, void* regs) {
proc_sched ();
}
static void proc_kpproc_init (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
memset (&kpproc, 0, sizeof (kpproc));
kpproc.lock = SPIN_LOCK_INIT;
kpproc.state = PROC_PSEUDO;
kpproc.pid = 0;
kpproc.cpu = thiscpu;
kpproc.procgroup = procgroup_create ();
procgroup_attach (kpproc.procgroup, &kpproc);
rbtree_insert (struct proc, &proc_tree, &kpproc.proc_tree_link, proc_tree_link, pid);
/* prepare kernel resources */
{
/* frame buffer */
struct limine_framebuffer_response* fb = limine_framebuffer_request.response;
struct kpproc_fb fb_info = {
.paddr = (uintptr_t)fb->framebuffers[0]->address - (uintptr_t)hhdm->offset,
.w = fb->framebuffers[0]->width,
.h = fb->framebuffers[0]->height,
.pitch = fb->framebuffers[0]->pitch,
.bpp = fb->framebuffers[0]->bpp,
.red_mask_size = fb->framebuffers[0]->red_mask_size,
.red_mask_shift = fb->framebuffers[0]->red_mask_shift,
.green_mask_size = fb->framebuffers[0]->green_mask_size,
.green_mask_shift = fb->framebuffers[0]->green_mask_shift,
.blue_mask_size = fb->framebuffers[0]->blue_mask_size,
.blue_mask_shift = fb->framebuffers[0]->blue_mask_shift,
};
DEBUG ("Framebuffer address %p\n", fb_info.paddr);
size_t pages = align_up (sizeof (fb_info), PAGE_SIZE) / PAGE_SIZE;
uintptr_t fb_info_memblk_paddr = pmm_alloc (pages);
memcpy ((struct kpproc_fb*)((uintptr_t)hhdm->offset + fb_info_memblk_paddr), &fb_info,
sizeof (fb_info));
proc_create_resource_mem (kpproc.procgroup, 0, pages, fb_info_memblk_paddr, true);
}
}
void proc_init (void) {
#if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER);
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED);
#endif
proc_kpproc_init ();
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
proc_register (spin_proc, thiscpu);