Clean up AMD64 memory management code, remove dependency on pd.lock
This commit is contained in:
@@ -6,6 +6,8 @@ PHDRS {
|
||||
text PT_LOAD;
|
||||
rodata PT_LOAD;
|
||||
data PT_LOAD;
|
||||
tls PT_TLS;
|
||||
bss PT_LOAD;
|
||||
}
|
||||
|
||||
SECTIONS {
|
||||
@@ -21,16 +23,36 @@ SECTIONS {
|
||||
*(.rodata .rodata.*)
|
||||
} :rodata
|
||||
|
||||
.note.gnu.build-id : {
|
||||
*(.note.gnu.build-id)
|
||||
} :rodata
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
|
||||
.data : {
|
||||
*(.data .data.*)
|
||||
} :data
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
|
||||
__tdata_start = .;
|
||||
|
||||
.tdata : {
|
||||
*(.tdata .tdata.*)
|
||||
} :data :tls
|
||||
|
||||
__tdata_end = .;
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
|
||||
__tbss_start = .;
|
||||
|
||||
.tbss : {
|
||||
*(.tbss .tbss.*)
|
||||
} :bss :tls
|
||||
|
||||
__tbss_end = .;
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
|
||||
__tls_size = __tbss_end - __tdata_start;
|
||||
|
||||
__bss_start = .;
|
||||
|
||||
.bss : {
|
||||
|
||||
22
init/init.c
22
init/init.c
@@ -8,6 +8,8 @@
|
||||
|
||||
#define MUTEX 2000
|
||||
|
||||
/* __thread char letter; */
|
||||
|
||||
void app_thread1 (void);
|
||||
|
||||
int spawn (void (*fn) (void)) {
|
||||
@@ -23,26 +25,30 @@ int spawn (void (*fn) (void)) {
|
||||
void app_main (void) {
|
||||
mutex_create (MUTEX);
|
||||
|
||||
/* letter = 'd'; */
|
||||
|
||||
spawn (&app_thread1);
|
||||
|
||||
for (;;) {
|
||||
mutex_lock (MUTEX);
|
||||
/* mutex_lock (MUTEX); */
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test ('a');
|
||||
/* for (int i = 0; i < 3; i++) */
|
||||
/* test (letter); */
|
||||
|
||||
mutex_unlock (MUTEX);
|
||||
/* mutex_unlock (MUTEX); */
|
||||
}
|
||||
}
|
||||
|
||||
void app_thread1 (void) {
|
||||
/* letter = 'c'; */
|
||||
|
||||
for (;;) {
|
||||
mutex_lock (MUTEX);
|
||||
/* mutex_lock (MUTEX); */
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test ('b');
|
||||
/* for (int i = 0; i < 3; i++) */
|
||||
/* test (letter); */
|
||||
|
||||
mutex_unlock (MUTEX);
|
||||
/* mutex_unlock (MUTEX); */
|
||||
}
|
||||
|
||||
quit ();
|
||||
|
||||
@@ -160,7 +160,7 @@ void amd64_ioapic_init (void) {
|
||||
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
|
||||
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
|
||||
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
|
||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
|
||||
MM_PG_PRESENT | MM_PG_RW);
|
||||
ioapics[ioapic_entries++] = (struct ioapic){
|
||||
.lock = RW_SPIN_LOCK_INIT,
|
||||
.table_data = *ioapic_table_data,
|
||||
@@ -246,8 +246,7 @@ void amd64_lapic_init (uint32_t us) {
|
||||
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
|
||||
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
|
||||
|
||||
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base,
|
||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK | MM_PD_RELOAD);
|
||||
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
|
||||
|
||||
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
|
||||
|
||||
|
||||
@@ -47,8 +47,6 @@ void bootmain (void) {
|
||||
amd64_ioapic_init ();
|
||||
amd64_hpet_init ();
|
||||
|
||||
mm_init2 ();
|
||||
|
||||
smp_init ();
|
||||
|
||||
proc_init ();
|
||||
|
||||
@@ -129,8 +129,7 @@ void amd64_hpet_init (void) {
|
||||
hpet_paddr = (uintptr_t)hpet->address.address;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr,
|
||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
|
||||
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
|
||||
|
||||
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
|
||||
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
|
||||
|
||||
@@ -22,10 +22,12 @@ struct pg_index {
|
||||
} PACKED;
|
||||
|
||||
/* Kernel page directory */
|
||||
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
|
||||
static spin_lock_ctx_t ctxkpd;
|
||||
/* Lock needed to sync between map/unmap operations and TLB shootdown */
|
||||
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
|
||||
static struct pd kernel_pd;
|
||||
static spin_lock_t kernel_pd_lock;
|
||||
|
||||
void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||
|
||||
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||
|
||||
/* Get current value of CR3 register */
|
||||
static uintptr_t amd64_current_cr3 (void) {
|
||||
@@ -112,15 +114,7 @@ static void amd64_reload_cr3 (void) {
|
||||
|
||||
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
|
||||
|
||||
@@ -129,69 +123,50 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
|
||||
do_reload = true;
|
||||
|
||||
done:
|
||||
if (do_reload && (flags & MM_PD_RELOAD))
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
}
|
||||
|
||||
/* Map a page into kernel page directory */
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
mm_map_page (&kernel_pd, paddr, vaddr, flags);
|
||||
amd64_reload_cr3 ();
|
||||
}
|
||||
|
||||
/* Unmap a virtual address. TLB needs to be flushed afterwards */
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
if ((*pte) & AMD64_PG_PRESENT) {
|
||||
if ((*pte) & AMD64_PG_PRESENT)
|
||||
*pte = 0;
|
||||
do_reload = true;
|
||||
}
|
||||
|
||||
if (amd64_mm_is_table_empty (pml1)) {
|
||||
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
|
||||
@@ -210,28 +185,14 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
if (do_reload && (flags & MM_PD_RELOAD))
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
}
|
||||
|
||||
/* Unmap a page from kernel page directory */
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
|
||||
mm_unmap_page (&kernel_pd, vaddr, flags);
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr) {
|
||||
mm_unmap_page (&kernel_pd, vaddr);
|
||||
amd64_reload_cr3 ();
|
||||
}
|
||||
|
||||
/* Lock kernel page directory */
|
||||
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock, &ctxkpd); }
|
||||
|
||||
/* Unlock kernel page directory */
|
||||
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock, &ctxkpd); }
|
||||
|
||||
/* Allocate a userspace-ready page directory */
|
||||
uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
@@ -250,26 +211,10 @@ uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
return cr3;
|
||||
}
|
||||
|
||||
/* Reload after map/unmap operation was performed. This function does the TLB shootdown. */
|
||||
void mm_reload (void) {
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
|
||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
||||
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN);
|
||||
}
|
||||
}
|
||||
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool ret = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
@@ -289,45 +234,26 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
ret = (pte & AMD64_PG_PRESENT) != 0;
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) {
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
|
||||
bool ok = true;
|
||||
spin_lock_ctx_t ctxpd;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
ok = mm_validate (pd, vaddr + i, 0);
|
||||
ok = mm_validate (pd, vaddr + i);
|
||||
if (!ok)
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
|
||||
for (size_t i4 = 0; i4 < 512; i4++) {
|
||||
@@ -358,25 +284,13 @@ uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
||||
}
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
@@ -400,25 +314,8 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* TLB shootdown IRQ handler */
|
||||
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
|
||||
(void)arg, (void)regs;
|
||||
|
||||
amd64_reload_cr3 ();
|
||||
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
|
||||
}
|
||||
|
||||
/* Continue initializing memory management subsystem for AMD64 after the essential parts were
|
||||
* initialized */
|
||||
void mm_init2 (void) { irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN); }
|
||||
|
||||
/* Initialize essentials for the AMD64 memory management subsystem */
|
||||
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }
|
||||
|
||||
@@ -7,11 +7,9 @@
|
||||
#define PAGE_SIZE 4096
|
||||
|
||||
struct pd {
|
||||
spin_lock_t lock;
|
||||
uintptr_t cr3_paddr;
|
||||
};
|
||||
|
||||
void amd64_load_kernel_cr3 (void);
|
||||
void mm_init2 (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_MM_H
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
struct proc_platformdata {
|
||||
struct saved_regs regs;
|
||||
uintptr_t kernel_stack;
|
||||
uint64_t gs_base;
|
||||
uint64_t fs_base;
|
||||
};
|
||||
|
||||
#endif // _KERNEL_AMD64_PROC_H
|
||||
|
||||
@@ -14,6 +14,7 @@ void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu
|
||||
|
||||
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
|
||||
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
|
||||
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (cpu_lock, ctxcpu);
|
||||
|
||||
@@ -74,6 +74,8 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
|
||||
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
|
||||
} break;
|
||||
case PT_TLS: {
|
||||
} break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,19 +15,24 @@ static atomic_int pgids = 0;
|
||||
|
||||
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
|
||||
uintptr_t* out_paddr) {
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
spin_lock_ctx_t ctxpg;
|
||||
|
||||
vaddr = (vaddr == 0) ? PROC_MAP_BASE : vaddr;
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
|
||||
vaddr = (vaddr == 0) ? procgroup->map_base : vaddr;
|
||||
|
||||
struct proc_mapping* mapping = malloc (sizeof (*mapping));
|
||||
|
||||
if (mapping == NULL)
|
||||
if (mapping == NULL) {
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
uintptr_t paddr = pmm_alloc (pages);
|
||||
|
||||
if (paddr == PMM_ALLOC_ERR) {
|
||||
free (mapping);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -38,9 +43,7 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
|
||||
mapping->vaddr = vaddr;
|
||||
mapping->size = pages * PAGE_SIZE;
|
||||
|
||||
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
|
||||
|
||||
spin_lock (&procgroup->pd.lock, &ctxprpd);
|
||||
procgroup->map_base += pages * PAGE_SIZE;
|
||||
|
||||
list_append (procgroup->mappings, &mapping->proc_mappings_link);
|
||||
|
||||
@@ -49,7 +52,7 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
|
||||
mm_map_page (&procgroup->pd, ppage, vpage, flags);
|
||||
}
|
||||
|
||||
spin_unlock (&procgroup->pd.lock, &ctxprpd);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
return vaddr;
|
||||
}
|
||||
@@ -57,15 +60,17 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
|
||||
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
|
||||
size_t unmap_size = pages * PAGE_SIZE;
|
||||
uintptr_t end_vaddr = start_vaddr + unmap_size;
|
||||
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
|
||||
bool used_tail_mapping = false;
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
spin_lock_ctx_t ctxpg;
|
||||
|
||||
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
|
||||
if (tail_mapping == NULL)
|
||||
return false;
|
||||
|
||||
spin_lock (&procgroup->pd.lock, &ctxprpd);
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
|
||||
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
|
||||
struct proc_mapping* mapping =
|
||||
@@ -115,10 +120,10 @@ bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t
|
||||
free (tail_mapping);
|
||||
|
||||
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
|
||||
mm_unmap_page (&procgroup->pd, vpage, 0);
|
||||
mm_unmap_page (&procgroup->pd, vpage);
|
||||
}
|
||||
|
||||
spin_unlock (&procgroup->pd.lock, &ctxprpd);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -135,8 +140,8 @@ struct procgroup* procgroup_create (void) {
|
||||
procgroup->memb_proc_tree = NULL;
|
||||
procgroup->lock = SPIN_LOCK_INIT;
|
||||
procgroup->pgid = atomic_fetch_add (&pgids, 1);
|
||||
procgroup->pd.lock = SPIN_LOCK_INIT;
|
||||
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys ();
|
||||
procgroup->map_base = PROC_MAP_BASE;
|
||||
|
||||
rw_spin_write_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,
|
||||
|
||||
@@ -27,7 +27,8 @@ struct procgroup {
|
||||
struct rb_node_link* resource_tree;
|
||||
atomic_int sys_rids;
|
||||
struct pd pd;
|
||||
struct list_node_link* mappings; /* protected by pd.lock */
|
||||
struct list_node_link* mappings;
|
||||
uintptr_t map_base;
|
||||
};
|
||||
|
||||
struct procgroup* procgroup_create (void);
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#define _KERNEL_SYS_MM_H
|
||||
|
||||
#include <libk/std.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include <amd64/mm.h>
|
||||
@@ -10,21 +11,18 @@
|
||||
#define MM_PG_PRESENT (1 << 0)
|
||||
#define MM_PG_RW (1 << 1)
|
||||
#define MM_PG_USER (1 << 2)
|
||||
#define MM_PD_RELOAD (1 << 30)
|
||||
#define MM_PD_LOCK (1 << 31)
|
||||
|
||||
uintptr_t mm_alloc_user_pd_phys (void);
|
||||
void mm_reload (void);
|
||||
void mm_kernel_lock (spin_lock_ctx_t* ctx);
|
||||
void mm_kernel_unlock (spin_lock_ctx_t* ctx);
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags);
|
||||
void mm_lock_kernel (void);
|
||||
void mm_unlock_kernel (void);
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags);
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags);
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr);
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr);
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr);
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size);
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr);
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr);
|
||||
struct pd* mm_get_kernel_pd (void);
|
||||
void mm_init (void);
|
||||
|
||||
|
||||
@@ -24,18 +24,18 @@
|
||||
|
||||
static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
spin_lock_ctx_t ctxpg;
|
||||
|
||||
spin_lock (&proc->procgroup->pd.lock, &ctxprpd);
|
||||
spin_lock (&proc->procgroup->lock, &ctxpg);
|
||||
|
||||
if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size, 0)) {
|
||||
spin_unlock (&proc->procgroup->pd.lock, &ctxprpd);
|
||||
if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size)) {
|
||||
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr, 0);
|
||||
uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr);
|
||||
|
||||
spin_unlock (&proc->procgroup->pd.lock, &ctxprpd);
|
||||
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||
|
||||
uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user