Compare commits

..

3 Commits

Author SHA1 Message Date
1c64d608bd Rename make/libc.mk -> make/libmsl.mk
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-28 23:57:28 +01:00
3d23187acf Implement userspace TLS, remove RW Locks 2026-01-28 23:52:48 +01:00
a3b62ebd3d Clean up AMD64 memory management code, remove dependency on pd.lock 2026-01-27 19:03:03 +01:00
25 changed files with 210 additions and 306 deletions

View File

@@ -4,4 +4,4 @@ include make/apps.mk
include make/kernel.mk include make/kernel.mk
include make/dist.mk include make/dist.mk
include make/docs.mk include make/docs.mk
include make/libc.mk include make/libmsl.mk

View File

@@ -6,6 +6,8 @@ PHDRS {
text PT_LOAD; text PT_LOAD;
rodata PT_LOAD; rodata PT_LOAD;
data PT_LOAD; data PT_LOAD;
bss PT_LOAD;
tls PT_TLS;
} }
SECTIONS { SECTIONS {
@@ -13,6 +15,7 @@ SECTIONS {
.text : { .text : {
*(.text .text.*) *(.text .text.*)
*(.ltext .ltext.*)
} :text } :text
. = ALIGN(CONSTANT(MAXPAGESIZE)); . = ALIGN(CONSTANT(MAXPAGESIZE));
@@ -21,23 +24,43 @@ SECTIONS {
*(.rodata .rodata.*) *(.rodata .rodata.*)
} :rodata } :rodata
.note.gnu.build-id : {
*(.note.gnu.build-id)
} :rodata
. = ALIGN(CONSTANT(MAXPAGESIZE)); . = ALIGN(CONSTANT(MAXPAGESIZE));
.data : { .data : {
*(.data .data.*) *(.data .data.*)
*(.ldata .ldata.*)
} :data } :data
. = ALIGN(CONSTANT(MAXPAGESIZE));
__bss_start = .; __bss_start = .;
.bss : { .bss : {
*(.bss .bss.*) *(.bss .bss.*)
} :data *(.lbss .lbss.*)
} :bss
__bss_end = .; __bss_end = .;
. = ALIGN(CONSTANT(MAXPAGESIZE));
__tdata_start = .;
.tdata : {
*(.tdata .tdata.*)
} :tls
__tdata_end = .;
__tbss_start = .;
.tbss : {
*(.tbss .tbss.*)
} :tls
__tbss_end = .;
__tls_size = __tbss_end - __tdata_start;
/DISCARD/ : { /DISCARD/ : {
*(.eh_frame*) *(.eh_frame*)

View File

@@ -8,6 +8,8 @@
#define MUTEX 2000 #define MUTEX 2000
__thread char letter = 'c';
void app_thread1 (void); void app_thread1 (void);
int spawn (void (*fn) (void)) { int spawn (void (*fn) (void)) {
@@ -23,24 +25,28 @@ int spawn (void (*fn) (void)) {
void app_main (void) { void app_main (void) {
mutex_create (MUTEX); mutex_create (MUTEX);
letter = 'a';
spawn (&app_thread1); spawn (&app_thread1);
for (;;) { for (;;) {
mutex_lock (MUTEX); mutex_lock (MUTEX);
for (int i = 0; i < 3; i++) for (int i = 0; i < 3; i++)
test ('a'); test (letter);
mutex_unlock (MUTEX); mutex_unlock (MUTEX);
} }
} }
void app_thread1 (void) { void app_thread1 (void) {
letter = 'b';
for (;;) { for (;;) {
mutex_lock (MUTEX); mutex_lock (MUTEX);
for (int i = 0; i < 3; i++) for (int i = 0; i < 3; i++)
test ('b'); test (letter);
mutex_unlock (MUTEX); mutex_unlock (MUTEX);
} }

View File

@@ -4,7 +4,7 @@
#include <amd64/msr.h> #include <amd64/msr.h>
#include <libk/std.h> #include <libk/std.h>
#include <limine/requests.h> #include <limine/requests.h>
#include <sync/rw_spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/mm.h> #include <sys/mm.h>
#include <sys/spin.h> #include <sys/spin.h>
@@ -38,7 +38,7 @@
struct ioapic { struct ioapic {
struct acpi_madt_ioapic table_data; struct acpi_madt_ioapic table_data;
rw_spin_lock_t lock; spin_lock_t lock;
uintptr_t mmio_base; uintptr_t mmio_base;
}; };
@@ -59,10 +59,10 @@ static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) { static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
spin_lock_ctx_t ctxioar; spin_lock_ctx_t ctxioar;
rw_spin_read_lock (&ioapic->lock, &ctxioar); spin_lock (&ioapic->lock, &ctxioar);
*(volatile uint32_t*)ioapic->mmio_base = reg; *(volatile uint32_t*)ioapic->mmio_base = reg;
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10); uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
rw_spin_read_unlock (&ioapic->lock, &ctxioar); spin_unlock (&ioapic->lock, &ctxioar);
return ret; return ret;
} }
@@ -70,10 +70,10 @@ static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) { static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
spin_lock_ctx_t ctxioaw; spin_lock_ctx_t ctxioaw;
rw_spin_write_lock (&ioapic->lock, &ctxioaw); spin_lock (&ioapic->lock, &ctxioaw);
*(volatile uint32_t*)ioapic->mmio_base = reg; *(volatile uint32_t*)ioapic->mmio_base = reg;
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value; *(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
rw_spin_write_unlock (&ioapic->lock, &ctxioaw); spin_unlock (&ioapic->lock, &ctxioaw);
} }
/* Find an IOAPIC corresposting to provided IRQ */ /* Find an IOAPIC corresposting to provided IRQ */
@@ -160,9 +160,9 @@ void amd64_ioapic_init (void) {
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current; struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address, mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address, (uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD); MM_PG_PRESENT | MM_PG_RW);
ioapics[ioapic_entries++] = (struct ioapic){ ioapics[ioapic_entries++] = (struct ioapic){
.lock = RW_SPIN_LOCK_INIT, .lock = SPIN_LOCK_INIT,
.table_data = *ioapic_table_data, .table_data = *ioapic_table_data,
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address), .mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
}; };
@@ -246,8 +246,7 @@ void amd64_lapic_init (uint32_t us) {
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000; uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset; thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK | MM_PD_RELOAD);
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8)); amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));

View File

@@ -47,8 +47,6 @@ void bootmain (void) {
amd64_ioapic_init (); amd64_ioapic_init ();
amd64_hpet_init (); amd64_hpet_init ();
mm_init2 ();
smp_init (); smp_init ();
proc_init (); proc_init ();

View File

@@ -129,8 +129,7 @@ void amd64_hpet_init (void) {
hpet_paddr = (uintptr_t)hpet->address.address; hpet_paddr = (uintptr_t)hpet->address.address;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR); uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
hpet_32bits = (caps & (1 << 13)) ? 0 : 1; hpet_32bits = (caps & (1 << 13)) ? 0 : 1;

View File

@@ -22,10 +22,12 @@ struct pg_index {
} PACKED; } PACKED;
/* Kernel page directory */ /* Kernel page directory */
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT}; static struct pd kernel_pd;
static spin_lock_ctx_t ctxkpd; static spin_lock_t kernel_pd_lock;
/* Lock needed to sync between map/unmap operations and TLB shootdown */
static spin_lock_t mm_lock = SPIN_LOCK_INIT; void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
/* Get current value of CR3 register */ /* Get current value of CR3 register */
static uintptr_t amd64_current_cr3 (void) { static uintptr_t amd64_current_cr3 (void) {
@@ -112,15 +114,7 @@ static void amd64_reload_cr3 (void) {
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */ /* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
uint64_t amd64_flags = amd64_mm_resolve_flags (flags); uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
@@ -129,69 +123,50 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true); uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
if (pml3 == NULL) if (pml3 == NULL)
goto done; return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true); uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
if (pml2 == NULL) if (pml2 == NULL)
goto done; return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true); uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
if (pml1 == NULL) if (pml1 == NULL)
goto done; return;
uint64_t* pte = &pml1[pg_index.pml1]; uint64_t* pte = &pml1[pg_index.pml1];
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL)); *pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
do_reload = true;
done:
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock, &ctxmm);
} }
/* Map a page into kernel page directory */ /* Map a page into kernel page directory */
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
mm_map_page (&kernel_pd, paddr, vaddr, flags); mm_map_page (&kernel_pd, paddr, vaddr, flags);
amd64_reload_cr3 ();
} }
/* Unmap a virtual address. TLB needs to be flushed afterwards */ /* Unmap a virtual address. TLB needs to be flushed afterwards */
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) { void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr); struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false); uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL) if (pml3 == NULL)
goto done; return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false); uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL) if (pml2 == NULL)
goto done; return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false); uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL) if (pml1 == NULL)
goto done; return;
uint64_t* pte = &pml1[pg_index.pml1]; uint64_t* pte = &pml1[pg_index.pml1];
if ((*pte) & AMD64_PG_PRESENT) { if ((*pte) & AMD64_PG_PRESENT)
*pte = 0; *pte = 0;
do_reload = true;
}
if (amd64_mm_is_table_empty (pml1)) { if (amd64_mm_is_table_empty (pml1)) {
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL; uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
@@ -210,28 +185,14 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
} }
} }
} }
done:
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock, &ctxmm);
} }
/* Unmap a page from kernel page directory */ /* Unmap a page from kernel page directory */
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) { void mm_unmap_kernel_page (uintptr_t vaddr) {
mm_unmap_page (&kernel_pd, vaddr, flags); mm_unmap_page (&kernel_pd, vaddr);
amd64_reload_cr3 ();
} }
/* Lock kernel page directory */
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock, &ctxkpd); }
/* Unlock kernel page directory */
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock, &ctxkpd); }
/* Allocate a userspace-ready page directory */ /* Allocate a userspace-ready page directory */
uintptr_t mm_alloc_user_pd_phys (void) { uintptr_t mm_alloc_user_pd_phys (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -250,26 +211,10 @@ uintptr_t mm_alloc_user_pd_phys (void) {
return cr3; return cr3;
} }
/* Reload after map/unmap operation was performed. This function does the TLB shootdown. */ bool mm_validate (struct pd* pd, uintptr_t vaddr) {
void mm_reload (void) {
struct limine_mp_response* mp = limine_mp_request.response;
for (size_t i = 0; i < mp->cpu_count; i++) {
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN);
}
}
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool ret = false; bool ret = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr); struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -289,45 +234,26 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
ret = (pte & AMD64_PG_PRESENT) != 0; ret = (pte & AMD64_PG_PRESENT) != 0;
done: done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock, &ctxmm);
return ret; return ret;
} }
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) { bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
bool ok = true; bool ok = true;
spin_lock_ctx_t ctxpd;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
for (size_t i = 0; i < size; i++) { for (size_t i = 0; i < size; i++) {
ok = mm_validate (pd, vaddr + i, 0); ok = mm_validate (pd, vaddr + i);
if (!ok) if (!ok)
goto done; goto done;
} }
done: done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
return ok; return ok;
} }
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) { uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0; uintptr_t ret = 0;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
for (size_t i4 = 0; i4 < 512; i4++) { for (size_t i4 = 0; i4 < 512; i4++) {
@@ -358,25 +284,13 @@ uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
} }
done: done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock, &ctxmm);
return ret; return ret;
} }
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) { uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0; uintptr_t ret = 0;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr); struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -400,25 +314,8 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL)); ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
done: done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock, &ctxmm);
return ret; return ret;
} }
/* TLB shootdown IRQ handler */
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
(void)arg, (void)regs;
amd64_reload_cr3 ();
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
}
/* Continue initializing memory management subsystem for AMD64 after the essential parts were
* initialized */
void mm_init2 (void) { irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN); }
/* Initialize essentials for the AMD64 memory management subsystem */ /* Initialize essentials for the AMD64 memory management subsystem */
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); } void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }

View File

@@ -7,11 +7,9 @@
#define PAGE_SIZE 4096 #define PAGE_SIZE 4096
struct pd { struct pd {
spin_lock_t lock;
uintptr_t cr3_paddr; uintptr_t cr3_paddr;
}; };
void amd64_load_kernel_cr3 (void); void amd64_load_kernel_cr3 (void);
void mm_init2 (void);
#endif // _KERNEL_AMD64_MM_H #endif // _KERNEL_AMD64_MM_H

View File

@@ -1,6 +1,7 @@
#include <amd64/gdt.h> #include <amd64/gdt.h>
#include <amd64/proc.h> #include <amd64/proc.h>
#include <aux/elf.h> #include <aux/elf.h>
#include <libk/align.h>
#include <libk/list.h> #include <libk/list.h>
#include <libk/rbtree.h> #include <libk/rbtree.h>
#include <libk/std.h> #include <libk/std.h>
@@ -11,12 +12,11 @@
#include <proc/proc.h> #include <proc/proc.h>
#include <proc/procgroup.h> #include <proc/procgroup.h>
#include <proc/resource.h> #include <proc/resource.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/proc.h>
/* 0 is kpproc */ static atomic_int pids = 0;
static atomic_int pids = 1;
struct proc* proc_from_elf (uint8_t* elf_contents) { struct proc* proc_from_elf (uint8_t* elf_contents) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -87,6 +87,8 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t ent
proc->pdata.regs.cs = GDT_UCODE | 0x03; proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = (uint64_t)entry; proc->pdata.regs.rip = (uint64_t)entry;
proc_init_tls (proc);
return proc; return proc;
} }
@@ -116,10 +118,36 @@ void proc_cleanup (struct proc* proc) {
spin_unlock (&proc->lock, &ctxpr); spin_unlock (&proc->lock, &ctxpr);
procgroup_detach (proc->procgroup, proc);
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE); pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
procgroup_detach (proc->procgroup, proc);
/* clean the process */ /* clean the process */
free (proc); free (proc);
} }
void proc_init_tls (struct proc* proc) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
uintptr_t tls_paddr;
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
uintptr_t tls_vaddr =
procgroup_map (proc->procgroup, 0, proc->procgroup->tls.tls_tmpl_pages, flags, &tls_paddr);
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
uintptr_t ktcb = k_tls_addr + tls_size;
uintptr_t utcb = tls_vaddr + tls_size;
memset ((void*)k_tls_addr, 0, tls_size);
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
*(uintptr_t*)ktcb = utcb;
proc->pdata.fs_base = utcb;
proc->pdata.tls_vaddr = tls_vaddr;
}

View File

@@ -15,7 +15,8 @@
struct proc_platformdata { struct proc_platformdata {
struct saved_regs regs; struct saved_regs regs;
uintptr_t kernel_stack; uintptr_t kernel_stack;
uint64_t gs_base; uint64_t fs_base;
uintptr_t tls_vaddr;
}; };
#endif // _KERNEL_AMD64_PROC_H #endif // _KERNEL_AMD64_PROC_H

13
kernel/amd64/procgroup.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _KERNEL_AMD64_PROCGRPUP_H
#define _KERNEL_AMD64_PROCGRPUP_H
#include <libk/std.h>
struct procgroup_tls {
uint8_t* tls_tmpl;
size_t tls_tmpl_size;
size_t tls_tmpl_total_size;
size_t tls_tmpl_pages;
};
#endif // _KERNEL_AMD64_PROCGRPUP_H

View File

@@ -14,6 +14,7 @@ void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu
thiscpu->tss.rsp0 = proc->pdata.kernel_stack; thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack; thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
spin_unlock (&proc->lock, &ctxpr); spin_unlock (&proc->lock, &ctxpr);
spin_unlock (cpu_lock, ctxcpu); spin_unlock (cpu_lock, ctxcpu);

View File

@@ -2,7 +2,7 @@
#include <libk/list.h> #include <libk/list.h>
#include <libk/std.h> #include <libk/std.h>
#include <mm/liballoc.h> #include <mm/liballoc.h>
#include <sync/rw_spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#if defined(__x86_64__) #if defined(__x86_64__)
@@ -12,7 +12,7 @@
struct irq* irq_table[0x100]; struct irq* irq_table[0x100];
static rw_spin_lock_t irqs_lock; static spin_lock_t irqs_lock = SPIN_LOCK_INIT;
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) { bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
spin_lock_ctx_t ctxiqa; spin_lock_ctx_t ctxiqa;
@@ -26,9 +26,9 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
irq->arg = arg; irq->arg = arg;
irq->irq_num = irq_num; irq->irq_num = irq_num;
rw_spin_write_lock (&irqs_lock, &ctxiqa); spin_lock (&irqs_lock, &ctxiqa);
irq_table[irq_num] = irq; irq_table[irq_num] = irq;
rw_spin_write_unlock (&irqs_lock, &ctxiqa); spin_unlock (&irqs_lock, &ctxiqa);
return true; return true;
} }
@@ -36,11 +36,11 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
struct irq* irq_find (uint32_t irq_num) { struct irq* irq_find (uint32_t irq_num) {
spin_lock_ctx_t ctxiqa; spin_lock_ctx_t ctxiqa;
rw_spin_read_lock (&irqs_lock, &ctxiqa); spin_lock (&irqs_lock, &ctxiqa);
struct irq* irq = irq_table[irq_num]; struct irq* irq = irq_table[irq_num];
rw_spin_read_unlock (&irqs_lock, &ctxiqa); spin_unlock (&irqs_lock, &ctxiqa);
return irq; return irq;
} }

View File

@@ -13,7 +13,6 @@
#include <proc/procgroup.h> #include <proc/procgroup.h>
#include <proc/resource.h> #include <proc/resource.h>
#include <rd/rd.h> #include <rd/rd.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/mm.h> #include <sys/mm.h>
@@ -29,7 +28,7 @@
#define SCHED_REAP_FREQ 10 #define SCHED_REAP_FREQ 10
static struct rb_node_link* proc_tree = NULL; static struct rb_node_link* proc_tree = NULL;
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT; static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
static atomic_int sched_cycles = 0; static atomic_int sched_cycles = 0;
@@ -74,6 +73,25 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off), memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz); (void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
} break; } break;
case PT_TLS: {
#if defined(__x86_64__)
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
size_t tls_size = phdr->p_memsz;
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
proc->procgroup->tls.tls_tmpl_pages = blks;
proc->procgroup->tls.tls_tmpl_size = tls_size;
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
phdr->p_filesz);
proc_init_tls (proc);
#endif
} break;
} }
} }
@@ -84,7 +102,6 @@ struct proc* proc_spawn_rd (char* name) {
struct rd_file* rd_file = rd_get_file (name); struct rd_file* rd_file = rd_get_file (name);
bool ok = proc_check_elf (rd_file->content); bool ok = proc_check_elf (rd_file->content);
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
if (!ok) if (!ok)
return NULL; return NULL;
@@ -96,9 +113,9 @@ struct proc* proc_find_pid (int pid) {
spin_lock_ctx_t ctxprtr; spin_lock_ctx_t ctxprtr;
struct proc* proc = NULL; struct proc* proc = NULL;
rw_spin_read_lock (&proc_tree_lock, &ctxprtr); spin_lock (&proc_tree_lock, &ctxprtr);
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid); rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
rw_spin_read_unlock (&proc_tree_lock, &ctxprtr); spin_unlock (&proc_tree_lock, &ctxprtr);
return proc; return proc;
} }
@@ -107,21 +124,20 @@ void proc_register (struct proc* proc, struct cpu* cpu1) {
spin_lock_ctx_t ctxcpu, ctxprtr; spin_lock_ctx_t ctxcpu, ctxprtr;
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest (); proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
DEBUG ("Assigning CPU %d to PID %d\n", proc->cpu->id, proc->pid);
struct cpu* cpu = proc->cpu; struct cpu* cpu = proc->cpu;
rw_spin_write_lock (&proc_tree_lock, &ctxprtr); spin_lock (&proc_tree_lock, &ctxprtr);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
spin_lock (&cpu->lock, &ctxcpu); spin_lock (&cpu->lock, &ctxcpu);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == NULL) if (cpu->proc_current == NULL)
cpu->proc_current = proc; cpu->proc_current = proc;
spin_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&cpu->lock, &ctxcpu); spin_unlock (&cpu->lock, &ctxcpu);
} }
@@ -160,7 +176,7 @@ static void proc_reap (void) {
spin_lock_ctx_t ctxprtr; spin_lock_ctx_t ctxprtr;
spin_lock_ctx_t ctxpr; spin_lock_ctx_t ctxpr;
rw_spin_write_lock (&proc_tree_lock, &ctxprtr); spin_lock (&proc_tree_lock, &ctxprtr);
struct rb_node_link* node; struct rb_node_link* node;
rbtree_first (&proc_tree, node); rbtree_first (&proc_tree, node);
@@ -180,7 +196,7 @@ static void proc_reap (void) {
node = next; node = next;
} }
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr); spin_unlock (&proc_tree_lock, &ctxprtr);
struct list_node_link *reap_link, *reap_link_tmp; struct list_node_link *reap_link, *reap_link_tmp;
list_foreach (reap_list, reap_link, reap_link_tmp) { list_foreach (reap_list, reap_link, reap_link_tmp) {

View File

@@ -9,7 +9,6 @@
#include <proc/procgroup.h> #include <proc/procgroup.h>
#include <proc/resource.h> #include <proc/resource.h>
#include <proc/suspension_q.h> #include <proc/suspension_q.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/mm.h> #include <sys/mm.h>

View File

@@ -4,30 +4,34 @@
#include <mm/pmm.h> #include <mm/pmm.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <proc/procgroup.h> #include <proc/procgroup.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/mm.h> #include <sys/mm.h>
static struct rb_node_link* procgroup_tree = NULL; static struct rb_node_link* procgroup_tree = NULL;
static rw_spin_lock_t procgroup_tree_lock = RW_SPIN_LOCK_INIT; static spin_lock_t procgroup_tree_lock = SPIN_LOCK_INIT;
static atomic_int pgids = 0; static atomic_int pgids = 0;
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags, uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
uintptr_t* out_paddr) { uintptr_t* out_paddr) {
spin_lock_ctx_t ctxprpd; spin_lock_ctx_t ctxpg;
vaddr = (vaddr == 0) ? PROC_MAP_BASE : vaddr; spin_lock (&procgroup->lock, &ctxpg);
vaddr = (vaddr == 0) ? procgroup->map_base : vaddr;
struct proc_mapping* mapping = malloc (sizeof (*mapping)); struct proc_mapping* mapping = malloc (sizeof (*mapping));
if (mapping == NULL) if (mapping == NULL) {
spin_unlock (&procgroup->lock, &ctxpg);
return 0; return 0;
}
uintptr_t paddr = pmm_alloc (pages); uintptr_t paddr = pmm_alloc (pages);
if (paddr == PMM_ALLOC_ERR) { if (paddr == PMM_ALLOC_ERR) {
free (mapping); free (mapping);
spin_unlock (&procgroup->lock, &ctxpg);
return 0; return 0;
} }
@@ -38,9 +42,7 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
mapping->vaddr = vaddr; mapping->vaddr = vaddr;
mapping->size = pages * PAGE_SIZE; mapping->size = pages * PAGE_SIZE;
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */ procgroup->map_base += pages * PAGE_SIZE;
spin_lock (&procgroup->pd.lock, &ctxprpd);
list_append (procgroup->mappings, &mapping->proc_mappings_link); list_append (procgroup->mappings, &mapping->proc_mappings_link);
@@ -49,7 +51,7 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
mm_map_page (&procgroup->pd, ppage, vpage, flags); mm_map_page (&procgroup->pd, ppage, vpage, flags);
} }
spin_unlock (&procgroup->pd.lock, &ctxprpd); spin_unlock (&procgroup->lock, &ctxpg);
return vaddr; return vaddr;
} }
@@ -57,15 +59,17 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) { bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
size_t unmap_size = pages * PAGE_SIZE; size_t unmap_size = pages * PAGE_SIZE;
uintptr_t end_vaddr = start_vaddr + unmap_size; uintptr_t end_vaddr = start_vaddr + unmap_size;
struct list_node_link *mapping_link, *mapping_link_tmp; struct list_node_link *mapping_link, *mapping_link_tmp;
bool used_tail_mapping = false; bool used_tail_mapping = false;
spin_lock_ctx_t ctxprpd; spin_lock_ctx_t ctxpg;
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping)); struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
if (tail_mapping == NULL) if (tail_mapping == NULL)
return false; return false;
spin_lock (&procgroup->pd.lock, &ctxprpd); spin_lock (&procgroup->lock, &ctxpg);
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) { list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping = struct proc_mapping* mapping =
@@ -115,10 +119,10 @@ bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t
free (tail_mapping); free (tail_mapping);
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) { for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
mm_unmap_page (&procgroup->pd, vpage, 0); mm_unmap_page (&procgroup->pd, vpage);
} }
spin_unlock (&procgroup->pd.lock, &ctxprpd); spin_unlock (&procgroup->lock, &ctxpg);
return true; return true;
} }
@@ -135,13 +139,13 @@ struct procgroup* procgroup_create (void) {
procgroup->memb_proc_tree = NULL; procgroup->memb_proc_tree = NULL;
procgroup->lock = SPIN_LOCK_INIT; procgroup->lock = SPIN_LOCK_INIT;
procgroup->pgid = atomic_fetch_add (&pgids, 1); procgroup->pgid = atomic_fetch_add (&pgids, 1);
procgroup->pd.lock = SPIN_LOCK_INIT;
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys (); procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys ();
procgroup->map_base = PROC_MAP_BASE;
rw_spin_write_lock (&procgroup_tree_lock, &ctxpgtr); spin_lock (&procgroup_tree_lock, &ctxpgtr);
rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link, rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,
procgroup_tree_link, pgid); procgroup_tree_link, pgid);
rw_spin_write_unlock (&procgroup_tree_lock, &ctxpgtr); spin_unlock (&procgroup_tree_lock, &ctxpgtr);
return procgroup; return procgroup;
} }
@@ -155,7 +159,6 @@ void procgroup_attach (struct procgroup* procgroup, struct proc* proc) {
rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link, rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link,
procgroup_memb_tree_link, pid); procgroup_memb_tree_link, pid);
atomic_fetch_add (&procgroup->refs, 1); atomic_fetch_add (&procgroup->refs, 1);
DEBUG ("procgrpup attach PID %d to PGID %d\n", proc->pid, procgroup->pgid);
spin_unlock (&proc->lock, &ctxpr); spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&procgroup->lock, &ctxpg); spin_unlock (&procgroup->lock, &ctxpg);
@@ -169,19 +172,18 @@ void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link); rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link);
int refs = atomic_fetch_sub (&procgroup->refs, 1); int refs = atomic_fetch_sub (&procgroup->refs, 1);
DEBUG ("procgrpup detach PID %d to PGID %d\n", proc->pid, procgroup->pgid);
spin_unlock (&proc->lock, &ctxpr); spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&procgroup->lock, &ctxpg); spin_unlock (&procgroup->lock, &ctxpg);
if (refs == 1) { if (refs == 1) {
rw_spin_write_lock (&procgroup_tree_lock, &ctxpgtr); spin_lock (&procgroup_tree_lock, &ctxpgtr);
spin_lock (&procgroup->lock, &ctxpg); spin_lock (&procgroup->lock, &ctxpg);
rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link); rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link);
spin_unlock (&procgroup->lock, &ctxpg); spin_unlock (&procgroup->lock, &ctxpg);
rw_spin_write_unlock (&procgroup_tree_lock, &ctxpgtr); spin_unlock (&procgroup_tree_lock, &ctxpgtr);
/* delete resources */ /* delete resources */
struct rb_node_link* rnode; struct rb_node_link* rnode;
@@ -209,6 +211,8 @@ void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
pmm_free (procgroup->pd.cr3_paddr, 1); pmm_free (procgroup->pd.cr3_paddr, 1);
free (procgroup->tls.tls_tmpl);
free (procgroup); free (procgroup);
} }
} }

View File

@@ -7,6 +7,7 @@
#include <proc/resource.h> #include <proc/resource.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/mm.h> #include <sys/mm.h>
#include <sys/procgroup.h>
struct proc; struct proc;
@@ -27,7 +28,9 @@ struct procgroup {
struct rb_node_link* resource_tree; struct rb_node_link* resource_tree;
atomic_int sys_rids; atomic_int sys_rids;
struct pd pd; struct pd pd;
struct list_node_link* mappings; /* protected by pd.lock */ struct list_node_link* mappings;
uintptr_t map_base;
struct procgroup_tls tls;
}; };
struct procgroup* procgroup_create (void); struct procgroup* procgroup_create (void);

View File

@@ -1,67 +0,0 @@
#include <libk/assert.h>
#include <libk/std.h>
#include <sync/rw_spin_lock.h>
#include <sys/debug.h>
#include <sys/irq.h>
#include <sys/spin_lock.h>
#define WRITER_WAIT (1U << 31)
#define READER_MASK (~WRITER_WAIT)
void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t value;
irq_save (ctx);
for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed);
if ((value & WRITER_WAIT) == 0) {
if (atomic_compare_exchange_weak_explicit (rw, &value, value + 1, memory_order_acquire,
memory_order_relaxed)) {
return;
}
}
spin_lock_relax ();
}
}
void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t old = atomic_fetch_sub_explicit (rw, 1, memory_order_release);
assert ((old & READER_MASK) > 0);
irq_restore (ctx);
}
void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t value;
irq_save (ctx);
/* announce writer */
for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed);
if ((value & WRITER_WAIT) == 0) {
if (atomic_compare_exchange_weak_explicit (rw, &value, (value | WRITER_WAIT),
memory_order_acquire, memory_order_relaxed))
break;
} else {
spin_lock_relax ();
}
}
/* wait for readers */
for (;;) {
value = atomic_load_explicit (rw, memory_order_acquire);
if ((value & READER_MASK) == 0)
return;
spin_lock_relax ();
}
}
void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
atomic_store_explicit (rw, 0, memory_order_release);
irq_restore (ctx);
}

View File

@@ -1,17 +0,0 @@
#ifndef _KERNEL_SYNC_RW_SPIN_LOCK_H
#define _KERNEL_SYNC_RW_SPIN_LOCK_H
#include <libk/std.h>
#include <sync/spin_lock.h>
#include <sys/spin_lock.h>
#define RW_SPIN_LOCK_INIT 0
typedef _Atomic (uint32_t) rw_spin_lock_t;
void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
#endif // _KERNEL_SYNC_RW_SPIN_LOCK_H

View File

@@ -1,5 +1,3 @@
c += sync/spin_lock.c \ c += sync/spin_lock.c
sync/rw_spin_lock.c
o += sync/spin_lock.o \ o += sync/spin_lock.o
sync/rw_spin_lock.o

View File

@@ -2,6 +2,7 @@
#define _KERNEL_SYS_MM_H #define _KERNEL_SYS_MM_H
#include <libk/std.h> #include <libk/std.h>
#include <sync/spin_lock.h>
#if defined(__x86_64__) #if defined(__x86_64__)
#include <amd64/mm.h> #include <amd64/mm.h>
@@ -10,21 +11,18 @@
#define MM_PG_PRESENT (1 << 0) #define MM_PG_PRESENT (1 << 0)
#define MM_PG_RW (1 << 1) #define MM_PG_RW (1 << 1)
#define MM_PG_USER (1 << 2) #define MM_PG_USER (1 << 2)
#define MM_PD_RELOAD (1 << 30)
#define MM_PD_LOCK (1 << 31)
uintptr_t mm_alloc_user_pd_phys (void); uintptr_t mm_alloc_user_pd_phys (void);
void mm_reload (void); void mm_kernel_lock (spin_lock_ctx_t* ctx);
void mm_kernel_unlock (spin_lock_ctx_t* ctx);
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags); void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags); void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags); void mm_unmap_page (struct pd* pd, uintptr_t vaddr);
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags); void mm_unmap_kernel_page (uintptr_t vaddr);
void mm_lock_kernel (void); bool mm_validate (struct pd* pd, uintptr_t vaddr);
void mm_unlock_kernel (void); bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size);
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags); uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr);
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags); uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr);
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags);
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags);
struct pd* mm_get_kernel_pd (void); struct pd* mm_get_kernel_pd (void);
void mm_init (void); void mm_init (void);

View File

@@ -8,5 +8,6 @@ struct proc;
struct proc* proc_from_elf (uint8_t* elf_contents); struct proc* proc_from_elf (uint8_t* elf_contents);
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry); struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry);
void proc_cleanup (struct proc* proc); void proc_cleanup (struct proc* proc);
void proc_init_tls (struct proc* proc);
#endif // _KERNEL_SYS_PROC_H #endif // _KERNEL_SYS_PROC_H

8
kernel/sys/procgroup.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef _KERNEL_SYS_PROCGROUP_H
#define _KERNEL_SYS_PROCGROUP_H
#if defined(__x86_64__)
#include <amd64/procgroup.h>
#endif
#endif // _KERNEL_SYS_PROCGROUP_H

View File

@@ -24,18 +24,18 @@
static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) { static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprpd; spin_lock_ctx_t ctxpg;
spin_lock (&proc->procgroup->pd.lock, &ctxprpd); spin_lock (&proc->procgroup->lock, &ctxpg);
if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size, 0)) { if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size)) {
spin_unlock (&proc->procgroup->pd.lock, &ctxprpd); spin_unlock (&proc->procgroup->lock, &ctxpg);
return NULL; return NULL;
} }
uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr, 0); uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr);
spin_unlock (&proc->procgroup->pd.lock, &ctxprpd); spin_unlock (&proc->procgroup->lock, &ctxpg);
uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr; uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr;
@@ -85,8 +85,6 @@ DEFINE_SYSCALL (sys_clone) {
struct proc* new = proc_clone (proc, vstack_top, entry); struct proc* new = proc_clone (proc, vstack_top, entry);
DEBUG ("new=%p\n", new);
if (new == NULL) { if (new == NULL) {
return SYSRESULT (-ST_OOM_ERROR); return SYSRESULT (-ST_OOM_ERROR);
} }

View File

@@ -7,4 +7,4 @@ clean_libmsl:
format_libmsl: format_libmsl:
make -C libmsl platform=$(platform) format make -C libmsl platform=$(platform) format
.PHONY: all_libmsl clean_libmsl .PHONY: all_libmsl clean_libmsl format_libmsl