Compare commits

..

15 Commits

Author SHA1 Message Date
38e26a9c12 Implement argument_ptr () syscall for handling process arguments
All checks were successful
Build documentation / build-and-deploy (push) Successful in 37s
2026-01-30 14:05:47 +01:00
124aa12f5b Redesign scheduling points
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-30 02:36:27 +01:00
d2f5c032d9 Fix TLS alignment issues, works on BOCHS now too!
All checks were successful
Build documentation / build-and-deploy (push) Successful in 40s
2026-01-29 18:18:24 +01:00
73e42588fb Fix BOCHS clock
All checks were successful
Build documentation / build-and-deploy (push) Successful in 41s
2026-01-29 15:04:06 +01:00
e78bfb9984 Move suspension q code into proc/suspension_q.c
All checks were successful
Build documentation / build-and-deploy (push) Successful in 24s
2026-01-29 01:52:18 +01:00
d2a88b3641 Move suspension q's cleanup to proc/suspension_q.c 2026-01-29 01:43:01 +01:00
fdda2e2df8 Unlock mutexes on process death 2026-01-29 01:38:44 +01:00
388418a718 Nice wrappers around process management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 34s
2026-01-29 00:08:54 +01:00
1c64d608bd Rename make/libc.mk -> make/libmsl.mk
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-28 23:57:28 +01:00
3d23187acf Implement userspace TLS, remove RW Locks 2026-01-28 23:52:48 +01:00
a3b62ebd3d Clean up AMD64 memory management code, remove dependency on pd.lock 2026-01-27 19:03:03 +01:00
8bda300f6a Fix sys_clone () wrong argument bug
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-27 18:05:02 +01:00
cf51600c6a Cleanup syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 34s
2026-01-27 17:34:43 +01:00
b388b30b24 Redesign userspace memory management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-27 17:04:08 +01:00
600886a7ee Organize resources into process groups 2026-01-27 14:18:05 +01:00
52 changed files with 878 additions and 1313 deletions

View File

@@ -4,4 +4,4 @@ include make/apps.mk
include make/kernel.mk include make/kernel.mk
include make/dist.mk include make/dist.mk
include make/docs.mk include make/docs.mk
include make/libc.mk include make/libmsl.mk

View File

@@ -6,6 +6,8 @@ PHDRS {
text PT_LOAD; text PT_LOAD;
rodata PT_LOAD; rodata PT_LOAD;
data PT_LOAD; data PT_LOAD;
bss PT_LOAD;
tls PT_TLS;
} }
SECTIONS { SECTIONS {
@@ -13,32 +15,53 @@ SECTIONS {
.text : { .text : {
*(.text .text.*) *(.text .text.*)
*(.ltext .ltext.*)
} :text } :text
. = ALIGN(CONSTANT(MAXPAGESIZE)); . = ALIGN(0x1000);
.rodata : { .rodata : {
*(.rodata .rodata.*) *(.rodata .rodata.*)
} :rodata } :rodata
.note.gnu.build-id : { . = ALIGN(0x1000);
*(.note.gnu.build-id)
} :rodata
. = ALIGN(CONSTANT(MAXPAGESIZE));
.data : { .data : {
*(.data .data.*) *(.data .data.*)
*(.ldata .ldata.*)
} :data } :data
. = ALIGN(0x1000);
__bss_start = .; __bss_start = .;
.bss : { .bss : {
*(.bss .bss.*) *(.bss .bss.*)
} :data *(.lbss .lbss.*)
} :bss
__bss_end = .; __bss_end = .;
. = ALIGN(0x1000);
__tdata_start = .;
.tdata : {
*(.tdata .tdata.*)
} :tls
__tdata_end = .;
__tbss_start = .;
.tbss : {
*(.tbss .tbss.*)
} :tls
__tbss_end = .;
__tls_size = __tbss_end - __tdata_start;
/DISCARD/ : { /DISCARD/ : {
*(.eh_frame*) *(.eh_frame*)
*(.note .note.*) *(.note .note.*)

View File

@@ -1,4 +1,4 @@
cpu: model=p4_prescott_celeron_336 cpu: model=p4_prescott_celeron_336, ips=200000000
memory: guest=4096 host=2048 memory: guest=4096 host=2048
@@ -9,6 +9,7 @@ ata0: enabled=1
ata0-master: type=cdrom, path=mop3.iso, status=inserted ata0-master: type=cdrom, path=mop3.iso, status=inserted
com1: enabled=1, mode=file, dev=bochs-com1.txt com1: enabled=1, mode=file, dev=bochs-com1.txt
pci: enabled=1, chipset=i440fx pci: enabled=1, chipset=i440fx
clock: sync=realtime, time0=local
boot: cdrom boot: cdrom

View File

@@ -7,11 +7,10 @@
#define SYS_UNMAP 4 #define SYS_UNMAP 4
#define SYS_CLONE 5 #define SYS_CLONE 5
#define SYS_SCHED 6 #define SYS_SCHED 6
#define SYS_CREATE_MEM 7 #define SYS_MUTEX_CREATE 7
#define SYS_UNLINK_MEM 8 #define SYS_MUTEX_DELETE 8
#define SYS_CREATE_MUTEX 9 #define SYS_MUTEX_LOCK 9
#define SYS_UNLINK_MUTEX 10 #define SYS_MUTEX_UNLOCK 10
#define SYS_LOCK_MUTEX 11 #define SYS_ARGUMENT_PTR 11
#define SYS_UNLOCK_MUTEX 12
#endif // _M_SYSCALL_DEFS_H #endif // _M_SYSCALL_DEFS_H

View File

@@ -1,115 +1,46 @@
#include <alloc/liballoc.h>
#include <limits.h> #include <limits.h>
#include <m/status.h> #include <proc/local.h>
#include <m/system.h> #include <proc/proc.h>
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include <string/string.h> #include <string/string.h>
#define EXAMPLE 3
#if EXAMPLE == 1
void app_thread1 (void) {
test ('b');
quit ();
}
int spawn (void (*fn) (void)) {
size_t stack_size = 256 * PAGE_SIZE;
void* stack = malloc (stack_size);
if (stack == NULL)
return -ST_OOM_ERROR;
uintptr_t stack_top = (uintptr_t)stack + stack_size;
return clone (stack_top, stack_size, fn);
}
void app_main (void) { spawn (&app_thread1); }
#elif EXAMPLE == 2
#define MUTEX 2000 #define MUTEX 2000
void app_thread1 (void); LOCAL volatile char letter = 'c';
int spawn (void (*fn) (void)) { void app_proc (void) {
size_t stack_size = 256 * PAGE_SIZE; char arg_letter = (char)(uintptr_t)argument_ptr ();
void* stack = malloc (stack_size);
if (stack == NULL)
return -ST_OOM_ERROR;
uintptr_t stack_top = (uintptr_t)stack + stack_size; letter = arg_letter;
return clone (stack_top, stack_size, fn);
for (;;) {
mutex_lock (MUTEX);
for (int i = 0; i < 3; i++)
test (letter);
mutex_unlock (MUTEX);
}
process_quit ();
} }
void app_main (void) { void app_main (void) {
create_mutex (MUTEX, RV_PRIVATE); mutex_create (MUTEX);
spawn (&app_thread1); letter = 'a';
process_spawn (&app_proc, (void*)'a');
process_spawn (&app_proc, (void*)'b');
process_spawn (&app_proc, (void*)'c');
for (;;) { for (;;) {
lock_mutex (MUTEX, RV_PRIVATE); mutex_lock (MUTEX);
for (int i = 0; i < 3; i++) for (int i = 0; i < 3; i++)
test ('a'); test (letter);
unlock_mutex (MUTEX, RV_PRIVATE); mutex_unlock (MUTEX);
} }
} }
void app_thread1 (void) {
for (;;) {
lock_mutex (MUTEX, RV_PRIVATE);
for (int i = 0; i < 3; i++)
test ('b');
unlock_mutex (MUTEX, RV_PRIVATE);
}
quit ();
}
#elif EXAMPLE == 3
#define MUTEX 2000
void app_thread1 (void);
int spawn (void (*fn) (void)) {
size_t stack_size = 256 * PAGE_SIZE;
void* stack = malloc (stack_size);
if (stack == NULL)
return -ST_OOM_ERROR;
uintptr_t stack_top = (uintptr_t)stack + stack_size;
return clone (stack_top, stack_size, fn);
}
void app_main (void) {
create_mutex (MUTEX, RV_PRIVATE);
spawn (&app_thread1);
for (;;) {
lock_mutex (MUTEX, RV_PRIVATE);
for (int i = 0; i < 3; i++)
test ('a');
quit ();
}
}
void app_thread1 (void) {
for (;;) {
lock_mutex (MUTEX, RV_PRIVATE);
for (int i = 0; i < 3; i++)
test ('b');
unlock_mutex (MUTEX, RV_PRIVATE);
}
quit ();
}
#endif

View File

@@ -4,7 +4,7 @@
#include <amd64/msr.h> #include <amd64/msr.h>
#include <libk/std.h> #include <libk/std.h>
#include <limine/requests.h> #include <limine/requests.h>
#include <sync/rw_spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/mm.h> #include <sys/mm.h>
#include <sys/spin.h> #include <sys/spin.h>
@@ -38,7 +38,7 @@
struct ioapic { struct ioapic {
struct acpi_madt_ioapic table_data; struct acpi_madt_ioapic table_data;
rw_spin_lock_t lock; spin_lock_t lock;
uintptr_t mmio_base; uintptr_t mmio_base;
}; };
@@ -59,10 +59,10 @@ static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) { static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
spin_lock_ctx_t ctxioar; spin_lock_ctx_t ctxioar;
rw_spin_read_lock (&ioapic->lock, &ctxioar); spin_lock (&ioapic->lock, &ctxioar);
*(volatile uint32_t*)ioapic->mmio_base = reg; *(volatile uint32_t*)ioapic->mmio_base = reg;
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10); uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
rw_spin_read_unlock (&ioapic->lock, &ctxioar); spin_unlock (&ioapic->lock, &ctxioar);
return ret; return ret;
} }
@@ -70,10 +70,10 @@ static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) { static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
spin_lock_ctx_t ctxioaw; spin_lock_ctx_t ctxioaw;
rw_spin_write_lock (&ioapic->lock, &ctxioaw); spin_lock (&ioapic->lock, &ctxioaw);
*(volatile uint32_t*)ioapic->mmio_base = reg; *(volatile uint32_t*)ioapic->mmio_base = reg;
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value; *(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
rw_spin_write_unlock (&ioapic->lock, &ctxioaw); spin_unlock (&ioapic->lock, &ctxioaw);
} }
/* Find an IOAPIC corresposting to provided IRQ */ /* Find an IOAPIC corresposting to provided IRQ */
@@ -160,9 +160,9 @@ void amd64_ioapic_init (void) {
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current; struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address, mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address, (uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD); MM_PG_PRESENT | MM_PG_RW);
ioapics[ioapic_entries++] = (struct ioapic){ ioapics[ioapic_entries++] = (struct ioapic){
.lock = RW_SPIN_LOCK_INIT, .lock = SPIN_LOCK_INIT,
.table_data = *ioapic_table_data, .table_data = *ioapic_table_data,
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address), .mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
}; };
@@ -246,8 +246,7 @@ void amd64_lapic_init (uint32_t us) {
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000; uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset; thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK | MM_PD_RELOAD);
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8)); amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));

View File

@@ -47,8 +47,6 @@ void bootmain (void) {
amd64_ioapic_init (); amd64_ioapic_init ();
amd64_hpet_init (); amd64_hpet_init ();
mm_init2 ();
smp_init (); smp_init ();
proc_init (); proc_init ();

View File

@@ -129,8 +129,7 @@ void amd64_hpet_init (void) {
hpet_paddr = (uintptr_t)hpet->address.address; hpet_paddr = (uintptr_t)hpet->address.address;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR); uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
hpet_32bits = (caps & (1 << 13)) ? 0 : 1; hpet_32bits = (caps & (1 << 13)) ? 0 : 1;

View File

@@ -22,10 +22,12 @@ struct pg_index {
} PACKED; } PACKED;
/* Kernel page directory */ /* Kernel page directory */
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT}; static struct pd kernel_pd;
static spin_lock_ctx_t ctxkpd; static spin_lock_t kernel_pd_lock;
/* Lock needed to sync between map/unmap operations and TLB shootdown */
static spin_lock_t mm_lock = SPIN_LOCK_INIT; void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
/* Get current value of CR3 register */ /* Get current value of CR3 register */
static uintptr_t amd64_current_cr3 (void) { static uintptr_t amd64_current_cr3 (void) {
@@ -112,15 +114,7 @@ static void amd64_reload_cr3 (void) {
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */ /* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
uint64_t amd64_flags = amd64_mm_resolve_flags (flags); uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
@@ -129,69 +123,50 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true); uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
if (pml3 == NULL) if (pml3 == NULL)
goto done; return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true); uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
if (pml2 == NULL) if (pml2 == NULL)
goto done; return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true); uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
if (pml1 == NULL) if (pml1 == NULL)
goto done; return;
uint64_t* pte = &pml1[pg_index.pml1]; uint64_t* pte = &pml1[pg_index.pml1];
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL)); *pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
do_reload = true;
done:
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock, &ctxmm);
} }
/* Map a page into kernel page directory */ /* Map a page into kernel page directory */
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
mm_map_page (&kernel_pd, paddr, vaddr, flags); mm_map_page (&kernel_pd, paddr, vaddr, flags);
amd64_reload_cr3 ();
} }
/* Unmap a virtual address. TLB needs to be flushed afterwards */ /* Unmap a virtual address. TLB needs to be flushed afterwards */
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) { void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr); struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false); uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL) if (pml3 == NULL)
goto done; return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false); uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL) if (pml2 == NULL)
goto done; return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false); uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL) if (pml1 == NULL)
goto done; return;
uint64_t* pte = &pml1[pg_index.pml1]; uint64_t* pte = &pml1[pg_index.pml1];
if ((*pte) & AMD64_PG_PRESENT) { if ((*pte) & AMD64_PG_PRESENT)
*pte = 0; *pte = 0;
do_reload = true;
}
if (amd64_mm_is_table_empty (pml1)) { if (amd64_mm_is_table_empty (pml1)) {
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL; uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
@@ -210,28 +185,14 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
} }
} }
} }
done:
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock, &ctxmm);
} }
/* Unmap a page from kernel page directory */ /* Unmap a page from kernel page directory */
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) { void mm_unmap_kernel_page (uintptr_t vaddr) {
mm_unmap_page (&kernel_pd, vaddr, flags); mm_unmap_page (&kernel_pd, vaddr);
amd64_reload_cr3 ();
} }
/* Lock kernel page directory */
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock, &ctxkpd); }
/* Unlock kernel page directory */
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock, &ctxkpd); }
/* Allocate a userspace-ready page directory */ /* Allocate a userspace-ready page directory */
uintptr_t mm_alloc_user_pd_phys (void) { uintptr_t mm_alloc_user_pd_phys (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -250,26 +211,10 @@ uintptr_t mm_alloc_user_pd_phys (void) {
return cr3; return cr3;
} }
/* Reload after map/unmap operation was performed. This function does the TLB shootdown. */ bool mm_validate (struct pd* pd, uintptr_t vaddr) {
void mm_reload (void) {
struct limine_mp_response* mp = limine_mp_request.response;
for (size_t i = 0; i < mp->cpu_count; i++) {
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN);
}
}
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool ret = false; bool ret = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr); struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -289,45 +234,26 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
ret = (pte & AMD64_PG_PRESENT) != 0; ret = (pte & AMD64_PG_PRESENT) != 0;
done: done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock, &ctxmm);
return ret; return ret;
} }
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) { bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
bool ok = true; bool ok = true;
spin_lock_ctx_t ctxpd;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
for (size_t i = 0; i < size; i++) { for (size_t i = 0; i < size; i++) {
ok = mm_validate (pd, vaddr + i, 0); ok = mm_validate (pd, vaddr + i);
if (!ok) if (!ok)
goto done; goto done;
} }
done: done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
return ok; return ok;
} }
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) { uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0; uintptr_t ret = 0;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
for (size_t i4 = 0; i4 < 512; i4++) { for (size_t i4 = 0; i4 < 512; i4++) {
@@ -358,25 +284,13 @@ uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
} }
done: done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock, &ctxmm);
return ret; return ret;
} }
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) { uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
spin_lock_ctx_t ctxmm, ctxpd;
spin_lock (&mm_lock, &ctxmm);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0; uintptr_t ret = 0;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock, &ctxpd);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr); struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -400,25 +314,8 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL)); ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
done: done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock, &ctxpd);
spin_unlock (&mm_lock, &ctxmm);
return ret; return ret;
} }
/* TLB shootdown IRQ handler */
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
(void)arg, (void)regs;
amd64_reload_cr3 ();
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
}
/* Continue initializing memory management subsystem for AMD64 after the essential parts were
* initialized */
void mm_init2 (void) { irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN); }
/* Initialize essentials for the AMD64 memory management subsystem */ /* Initialize essentials for the AMD64 memory management subsystem */
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); } void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }

View File

@@ -7,12 +7,9 @@
#define PAGE_SIZE 4096 #define PAGE_SIZE 4096
struct pd { struct pd {
spin_lock_t lock;
uintptr_t cr3_paddr; uintptr_t cr3_paddr;
atomic_int refs;
}; };
void amd64_load_kernel_cr3 (void); void amd64_load_kernel_cr3 (void);
void mm_init2 (void);
#endif // _KERNEL_AMD64_MM_H #endif // _KERNEL_AMD64_MM_H

View File

@@ -1,6 +1,7 @@
#include <amd64/gdt.h> #include <amd64/gdt.h>
#include <amd64/proc.h> #include <amd64/proc.h>
#include <aux/elf.h> #include <aux/elf.h>
#include <libk/align.h>
#include <libk/list.h> #include <libk/list.h>
#include <libk/rbtree.h> #include <libk/rbtree.h>
#include <libk/std.h> #include <libk/std.h>
@@ -8,18 +9,18 @@
#include <limine/requests.h> #include <limine/requests.h>
#include <mm/liballoc.h> #include <mm/liballoc.h>
#include <mm/pmm.h> #include <mm/pmm.h>
#include <proc/mutex.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h> #include <proc/resource.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/proc.h>
/* 0 is kpproc */ static atomic_int pids = 0;
static atomic_int pids = 1;
struct proc* proc_from_elf (uint8_t* elf_contents) { struct proc* proc_from_elf (uint8_t* elf_contents) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
int rid;
struct proc* proc = malloc (sizeof (*proc)); struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL) if (proc == NULL)
@@ -31,67 +32,18 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
atomic_store (&proc->state, PROC_READY); atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1); proc->pid = atomic_fetch_add (&pids, 1);
proc->resources = malloc (sizeof (*proc->resources)); proc->procgroup = procgroup_create ();
if (proc->resources == NULL) { if (proc->procgroup == NULL) {
free (proc); free (proc);
return NULL; return NULL;
} }
proc->resources->tree = NULL; procgroup_attach (proc->procgroup, proc);
proc->resources->lock = RW_SPIN_LOCK_INIT;
proc->resources->refs = 1;
proc->resources->sys_rids = 0;
proc->pd = malloc (sizeof (*proc->pd)); uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
if (proc->pd == NULL) { proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
free (proc->resources);
free (proc);
return NULL;
}
proc->pd->lock = SPIN_LOCK_INIT; procgroup_map (proc->procgroup, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
proc->pd->refs = 1; MM_PG_USER | MM_PG_PRESENT | MM_PG_RW, NULL);
proc->pd->cr3_paddr = mm_alloc_user_pd_phys ();
if (proc->pd->cr3_paddr == 0) {
free (proc->pd);
free (proc->resources);
free (proc);
return NULL;
}
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE,
.managed = false};
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
struct proc_resource* kstk_r =
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
if (kstk_r == NULL) {
pmm_free (proc->pd->cr3_paddr, 1);
free (proc->pd);
free (proc->resources);
free (proc);
return NULL;
}
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
struct proc_resource_mem_init ustk_mem_init = {.pages = USTACK_SIZE / PAGE_SIZE,
.managed = false};
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
struct proc_resource* ustk_r =
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&ustk_mem_init);
if (ustk_r == NULL) {
kstk_r->ops.cleanup (proc, kstk_r);
free (kstk_r);
pmm_free (proc->pd->cr3_paddr, 1);
free (proc->pd);
free (proc->resources);
free (proc);
return NULL;
}
proc->pdata.user_stack = ustk_r->u.mem.paddr;
proc_map (proc, proc->pdata.user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
proc->flags |= PROC_USTK_PREALLOC; proc->flags |= PROC_USTK_PREALLOC;
@@ -106,11 +58,10 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
return proc; return proc;
} }
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_size, struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
uintptr_t entry) { uintptr_t argument_ptr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprt, ctxrs; spin_lock_ctx_t ctxprt;
int rid;
struct proc* proc = malloc (sizeof (*proc)); struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL) if (proc == NULL)
@@ -124,54 +75,13 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_
spin_lock (&proto->lock, &ctxprt); spin_lock (&proto->lock, &ctxprt);
proc->pd = proto->pd; proc->procgroup = proto->procgroup;
proc->mappings = proto->mappings; procgroup_attach (proc->procgroup, proc);
atomic_fetch_add (&proto->pd->refs, 1);
proc->resources = proto->resources;
rw_spin_write_lock (&proc->resources->lock, &ctxrs);
atomic_fetch_add (&proc->resources->refs, 1);
struct rb_node_link* rnode;
rbtree_first (&proc->resources->tree, rnode);
while (rnode) {
struct rb_node_link* next;
rbtree_next (rnode, next);
struct proc_resource* resource =
rbtree_entry (rnode, struct proc_resource, local_resource_tree_link);
atomic_fetch_add (&resource->refs, 1);
rnode = next;
}
rw_spin_write_unlock (&proc->resources->lock, &ctxrs);
spin_unlock (&proto->lock, &ctxprt); spin_unlock (&proto->lock, &ctxprt);
uintptr_t vstack_bottom = vstack_top - stack_size; uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
uintptr_t pstack_bottom = mm_v2p (proc->pd, vstack_bottom, MM_PD_LOCK);
if (pstack_bottom == 0) {
free (proc);
return NULL;
}
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE,
.managed = false};
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
struct proc_resource* kstk_r =
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
if (kstk_r == NULL) {
free (proc);
return NULL;
}
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
proc->pdata.user_stack = pstack_bottom + stack_size;
proc->pdata.regs.ss = GDT_UDATA | 0x03; proc->pdata.regs.ss = GDT_UDATA | 0x03;
proc->pdata.regs.rsp = (uint64_t)vstack_top; proc->pdata.regs.rsp = (uint64_t)vstack_top;
@@ -179,68 +89,50 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_
proc->pdata.regs.cs = GDT_UCODE | 0x03; proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = (uint64_t)entry; proc->pdata.regs.rip = (uint64_t)entry;
proc->uvaddr_argument = argument_ptr;
proc_init_tls (proc);
return proc; return proc;
} }
void proc_cleanup (struct proc* proc) { void proc_cleanup (struct proc* proc) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; proc_sqs_cleanup (proc);
spin_lock_ctx_t ctxprpd, ctxsq, ctxpr; proc_mutexes_cleanup (proc);
spin_lock (&proc->lock, &ctxpr); pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
/* clean suspension queue entries */ procgroup_detach (proc->procgroup, proc);
struct list_node_link *sq_link, *sq_link_tmp;
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
spin_unlock (&sq->lock, &ctxsq);
free (sq_entry);
}
spin_unlock (&proc->lock, &ctxpr);
/* clean resources */
proc_cleanup_resources (proc);
/* clean virtual address space */
if (atomic_fetch_sub (&proc->pd->refs, 1) == 1) {
DEBUG ("PID %d Free virtual address space\n", proc->pid);
struct list_node_link *mapping_link, *mapping_link_tmp;
spin_lock (&proc->pd->lock, &ctxprpd);
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
list_remove (proc->mappings, mapping_link);
free (mapping);
}
pmm_free (proc->pd->cr3_paddr, 1);
spin_unlock (&proc->pd->lock, &ctxprpd);
free (proc->pd);
}
/* clean kstack */
pmm_free (proc->pdata.kernel_stack - (uintptr_t)hhdm->offset - KSTACK_SIZE,
KSTACK_SIZE / PAGE_SIZE);
/* clean ustack */
if ((proc->flags & PROC_USTK_PREALLOC))
pmm_free (proc->pdata.user_stack, USTACK_SIZE / PAGE_SIZE);
DEBUG ("PID %d Free stacks\n", proc->pid);
/* clean the process */ /* clean the process */
free (proc); free (proc);
} }
void proc_init_tls (struct proc* proc) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
if (proc->procgroup->tls.tls_tmpl == NULL)
return;
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
size_t pages = proc->procgroup->tls.tls_tmpl_pages;
uintptr_t tls_paddr;
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
uintptr_t tls_vaddr = procgroup_map (proc->procgroup, 0, pages, flags, &tls_paddr);
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
memset ((void*)k_tls_addr, 0, pages * PAGE_SIZE);
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
uintptr_t ktcb = k_tls_addr + tls_size;
uintptr_t utcb = tls_vaddr + tls_size;
*(uintptr_t*)ktcb = utcb;
proc->pdata.fs_base = utcb;
proc->pdata.tls_vaddr = tls_vaddr;
}

View File

@@ -4,17 +4,19 @@
#include <amd64/intr.h> #include <amd64/intr.h>
#include <libk/std.h> #include <libk/std.h>
/// Top of userspace process' stack /* Top of userspace process' stack */
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL #define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
/// Size of userspace process' stack /* Size of userspace process' stack */
#define USTACK_SIZE (256 * PAGE_SIZE) #define USTACK_SIZE (256 * PAGE_SIZE)
/* proc_map () base address */
#define PROC_MAP_BASE 0x0000700000000000
/// Platform-dependent process data /* Platform-dependent process data */
struct proc_platformdata { struct proc_platformdata {
struct saved_regs regs; struct saved_regs regs;
uintptr_t user_stack;
uintptr_t kernel_stack; uintptr_t kernel_stack;
uint64_t gs_base; uint64_t fs_base;
uintptr_t tls_vaddr;
}; };
#endif // _KERNEL_AMD64_PROC_H #endif // _KERNEL_AMD64_PROC_H

13
kernel/amd64/procgroup.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _KERNEL_AMD64_PROCGRPUP_H
#define _KERNEL_AMD64_PROCGRPUP_H
#include <libk/std.h>
struct procgroup_tls {
uint8_t* tls_tmpl;
size_t tls_tmpl_size;
size_t tls_tmpl_total_size;
size_t tls_tmpl_pages;
};
#endif // _KERNEL_AMD64_PROCGRPUP_H

View File

@@ -14,9 +14,10 @@ void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu
thiscpu->tss.rsp0 = proc->pdata.kernel_stack; thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack; thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
spin_unlock (&proc->lock, &ctxpr); spin_unlock (&proc->lock, &ctxpr);
spin_unlock (cpu_lock, ctxcpu); spin_unlock (cpu_lock, ctxcpu);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd->cr3_paddr); amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr);
} }

View File

@@ -13,7 +13,7 @@
extern void amd64_syscall_entry (void); extern void amd64_syscall_entry (void);
int amd64_syscall_dispatch (void* stack_ptr) { uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
spin_lock_ctx_t ctxcpu, ctxpr; spin_lock_ctx_t ctxcpu, ctxpr;
amd64_load_kernel_cr3 (); amd64_load_kernel_cr3 ();
@@ -35,8 +35,7 @@ int amd64_syscall_dispatch (void* stack_ptr) {
return -ST_SYSCALL_NOT_FOUND; return -ST_SYSCALL_NOT_FOUND;
} }
int result = func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9); return func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
return result;
} }
void syscall_init (void) { void syscall_init (void) {

View File

@@ -2,7 +2,7 @@
#include <libk/list.h> #include <libk/list.h>
#include <libk/std.h> #include <libk/std.h>
#include <mm/liballoc.h> #include <mm/liballoc.h>
#include <sync/rw_spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#if defined(__x86_64__) #if defined(__x86_64__)
@@ -12,7 +12,7 @@
struct irq* irq_table[0x100]; struct irq* irq_table[0x100];
static rw_spin_lock_t irqs_lock; static spin_lock_t irqs_lock = SPIN_LOCK_INIT;
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) { bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
spin_lock_ctx_t ctxiqa; spin_lock_ctx_t ctxiqa;
@@ -26,9 +26,9 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
irq->arg = arg; irq->arg = arg;
irq->irq_num = irq_num; irq->irq_num = irq_num;
rw_spin_write_lock (&irqs_lock, &ctxiqa); spin_lock (&irqs_lock, &ctxiqa);
irq_table[irq_num] = irq; irq_table[irq_num] = irq;
rw_spin_write_unlock (&irqs_lock, &ctxiqa); spin_unlock (&irqs_lock, &ctxiqa);
return true; return true;
} }
@@ -36,11 +36,11 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
struct irq* irq_find (uint32_t irq_num) { struct irq* irq_find (uint32_t irq_num) {
spin_lock_ctx_t ctxiqa; spin_lock_ctx_t ctxiqa;
rw_spin_read_lock (&irqs_lock, &ctxiqa); spin_lock (&irqs_lock, &ctxiqa);
struct irq* irq = irq_table[irq_num]; struct irq* irq = irq_table[irq_num];
rw_spin_read_unlock (&irqs_lock, &ctxiqa); spin_unlock (&irqs_lock, &ctxiqa);
return irq; return irq;
} }

View File

@@ -1,20 +0,0 @@
#ifndef _KERNEL_PROC_KPPROC_FB_H
#define _KERNEL_PROC_KPPROC_FB_H
#include <aux/compiler.h>
#include <libk/std.h>
/* data to expose as a kpproc resource */
struct kpproc_fb {
uintptr_t paddr;
uint64_t w, h, pitch;
uint16_t bpp;
uint8_t red_mask_size;
uint8_t red_mask_shift;
uint8_t green_mask_size;
uint8_t green_mask_shift;
uint8_t blue_mask_size;
uint8_t blue_mask_shift;
};
#endif // _KERNEL_PROC_KPPROC_FB_H

View File

@@ -2,5 +2,9 @@ Lock hierarchy for process scheduling:
1. proc_tree_lock 1. proc_tree_lock
2. cpu->lock 2. cpu->lock
3. proc->lock 3. procgroup->lock
4. sq->lock 4. proc->lock
5. sq->lock
1. procgroup_tree_lock
2. procgroup->lock

View File

@@ -1,33 +0,0 @@
#include <libk/std.h>
#include <mm/pmm.h>
#include <proc/mem.h>
#include <proc/proc.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
bool proc_create_resource_mem (struct proc_resource_mem* mem, struct proc_resource_mem_init* init) {
if (init->pages == 0)
return false;
if (init->managed) {
mem->paddr = init->paddr;
mem->managed = true;
} else {
uintptr_t paddr = pmm_alloc (init->pages);
if (paddr == PMM_ALLOC_ERR)
return false;
mem->paddr = paddr;
mem->managed = false;
}
mem->pages = mem->alive_pages = init->pages;
return true;
}
void proc_cleanup_resource_mem (struct proc* proc, struct proc_resource* resource) {
(void)proc;
if (!resource->u.mem.managed)
pmm_free (resource->u.mem.paddr, resource->u.mem.pages);
}

View File

@@ -1,27 +0,0 @@
#ifndef _KERNEL_PROC_MEM_H
#define _KERNEL_PROC_MEM_H
#include <libk/std.h>
struct proc;
struct proc_resource;
struct proc_resource_mem {
struct proc_resource* resource;
uintptr_t paddr;
size_t pages;
ptrdiff_t alive_pages;
bool managed;
};
struct proc_resource_mem_init {
uintptr_t paddr;
size_t pages;
bool managed;
};
bool proc_create_resource_mem (struct proc_resource_mem* mem, struct proc_resource_mem_init* init);
void proc_cleanup_resource_mem (struct proc* proc, struct proc_resource* resource);
#endif // _KERNEL_PROC_MEM_H

View File

@@ -5,99 +5,54 @@
#include <mm/liballoc.h> #include <mm/liballoc.h>
#include <proc/mutex.h> #include <proc/mutex.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/smp.h> #include <sys/smp.h>
#include <sys/spin_lock.h> #include <sys/spin_lock.h>
static void proc_mutex_suspend (struct proc* proc, struct proc_suspension_q* sq, void proc_mutexes_cleanup (struct proc* proc) {
spin_lock_t* resource_lock, spin_lock_ctx_t* ctxrl) { spin_lock_ctx_t ctxpg, ctxrs;
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
struct cpu* cpu = proc->cpu;
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry)); spin_lock (&proc->procgroup->lock, &ctxpg);
if (!sq_entry) {
spin_unlock (resource_lock, ctxrl); struct rb_node_link* rnode;
return; rbtree_first (&proc->procgroup->resource_tree, rnode);
while (rnode) {
struct rb_node_link* next;
rbtree_next (rnode, next);
struct proc_resource* resource = rbtree_entry (rnode, struct proc_resource, resource_tree_link);
rnode = next;
spin_lock (&resource->lock, &ctxrs);
if (resource->type != PR_MUTEX) {
spin_unlock (&resource->lock, &ctxrs);
continue;
}
if (resource->u.mutex.owner == proc && resource->u.mutex.locked) {
spin_unlock (&resource->lock, &ctxrs);
proc_mutex_unlock (proc, &resource->u.mutex);
}
} }
sq_entry->proc = proc; spin_unlock (&proc->procgroup->lock, &ctxpg);
sq_entry->sq = sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
spin_unlock (resource_lock, ctxrl);
atomic_store (&proc->state, PROC_SUSPENDED);
/* append to sq's list */
list_append (sq->proc_list, &sq_entry->sq_link);
/* append to proc's list */
list_append (proc->sq_entries, &sq_entry->proc_link);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
proc->cpu = NULL;
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
cpu_request_sched (cpu);
} }
static void proc_mutex_resume (struct proc* proc, struct proc_sq_entry* sq_entry) { bool proc_cleanup_resource_mutex (struct proc_resource* resource) {
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
struct cpu* cpu = cpu_find_lightest ();
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
proc->cpu = cpu;
if (proc->sq_entries == NULL)
atomic_store (&proc->state, PROC_READY);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
free (sq_entry);
cpu_request_sched (cpu);
}
bool proc_create_resource_mutex (struct proc_mutex* mutex) {
memset (mutex, 0, sizeof (*mutex));
return true;
}
void proc_cleanup_resource_mutex (struct proc* proc, struct proc_resource* resource) {
struct proc_mutex* mutex = &resource->u.mutex; struct proc_mutex* mutex = &resource->u.mutex;
spin_lock_ctx_t ctxmt, ctxsq; spin_lock_ctx_t ctxmt, ctxsq;
spin_lock (&mutex->resource->lock, &ctxmt); spin_lock (&mutex->resource->lock, &ctxmt);
spin_lock (&mutex->suspension_q.lock, &ctxsq); spin_lock (&mutex->suspension_q.lock, &ctxsq);
bool reschedule = PROC_NO_RESCHEDULE;
while (mutex->suspension_q.proc_list != NULL) { while (mutex->suspension_q.proc_list != NULL) {
struct list_node_link* node = mutex->suspension_q.proc_list; struct list_node_link* node = mutex->suspension_q.proc_list;
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link); struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
@@ -107,7 +62,7 @@ void proc_cleanup_resource_mutex (struct proc* proc, struct proc_resource* resou
spin_unlock (&mutex->suspension_q.lock, &ctxsq); spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt); spin_unlock (&mutex->resource->lock, &ctxmt);
proc_mutex_resume (suspended_proc, sq_entry); reschedule = reschedule || proc_sq_resume (suspended_proc, sq_entry);
/* reacquire */ /* reacquire */
spin_lock (&mutex->resource->lock, &ctxmt); spin_lock (&mutex->resource->lock, &ctxmt);
@@ -119,23 +74,23 @@ void proc_cleanup_resource_mutex (struct proc* proc, struct proc_resource* resou
spin_unlock (&mutex->suspension_q.lock, &ctxsq); spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt); spin_unlock (&mutex->resource->lock, &ctxmt);
return reschedule;
} }
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) { bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
spin_lock_ctx_t ctxmt; spin_lock_ctx_t ctxmt;
for (;;) { spin_lock (&mutex->resource->lock, &ctxmt);
spin_lock (&mutex->resource->lock, &ctxmt);
if (!mutex->locked || mutex->owner == proc) { if (!mutex->locked || mutex->owner == proc) {
mutex->locked = true; mutex->locked = true;
mutex->owner = proc; mutex->owner = proc;
spin_unlock (&mutex->resource->lock, &ctxmt); spin_unlock (&mutex->resource->lock, &ctxmt);
return; return PROC_NO_RESCHEDULE;
}
proc_mutex_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
} }
return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
} }
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) { bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
@@ -145,7 +100,7 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
if (mutex->owner != proc) { if (mutex->owner != proc) {
spin_unlock (&mutex->resource->lock, &ctxmt); spin_unlock (&mutex->resource->lock, &ctxmt);
return false; return PROC_NO_RESCHEDULE;
} }
spin_lock (&mutex->suspension_q.lock, &ctxsq); spin_lock (&mutex->suspension_q.lock, &ctxsq);
@@ -162,9 +117,7 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
spin_unlock (&mutex->suspension_q.lock, &ctxsq); spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt); spin_unlock (&mutex->resource->lock, &ctxmt);
proc_mutex_resume (resumed_proc, sq_entry); return proc_sq_resume (resumed_proc, sq_entry);
return true;
} }
mutex->locked = false; mutex->locked = false;
@@ -173,5 +126,5 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
spin_unlock (&mutex->suspension_q.lock, &ctxsq); spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt); spin_unlock (&mutex->resource->lock, &ctxmt);
return true; return PROC_NEED_RESCHEDULE;
} }

View File

@@ -15,9 +15,9 @@ struct proc_mutex {
struct proc* owner; struct proc* owner;
}; };
bool proc_create_resource_mutex (struct proc_mutex* mutex); bool proc_cleanup_resource_mutex (struct proc_resource* resource);
void proc_cleanup_resource_mutex (struct proc* proc, struct proc_resource* resource); bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex); bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex);
void proc_mutexes_cleanup (struct proc* proc);
#endif // _KERNEL_PROC_MUTEX_H #endif // _KERNEL_PROC_MUTEX_H

View File

@@ -9,11 +9,10 @@
#include <limine/requests.h> #include <limine/requests.h>
#include <mm/liballoc.h> #include <mm/liballoc.h>
#include <mm/pmm.h> #include <mm/pmm.h>
#include <proc/kpproc_fb.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h> #include <proc/resource.h>
#include <rd/rd.h> #include <rd/rd.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/mm.h> #include <sys/mm.h>
@@ -29,108 +28,16 @@
#define SCHED_REAP_FREQ 10 #define SCHED_REAP_FREQ 10
static struct rb_node_link* proc_tree = NULL; static struct rb_node_link* proc_tree = NULL;
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT; static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
static atomic_int sched_cycles = 0; static atomic_int sched_cycles = 0;
/* kernel pseudo process */
static struct proc kpproc;
static bool proc_check_elf (uint8_t* elf) { static bool proc_check_elf (uint8_t* elf) {
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F'))) if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
return false; return false;
return true; return true;
} }
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags) {
spin_lock_ctx_t ctxprpd;
struct proc_mapping* mapping = malloc (sizeof (*mapping));
if (mapping == NULL)
return false;
mapping->paddr = start_paddr;
mapping->vaddr = start_vaddr;
mapping->size = pages * PAGE_SIZE;
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
spin_lock (&proc->pd->lock, &ctxprpd);
list_append (proc->mappings, &mapping->proc_mappings_link);
for (uintptr_t vpage = start_vaddr, ppage = start_paddr; vpage < start_vaddr + pages * PAGE_SIZE;
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
mm_map_page (proc->pd, ppage, vpage, flags);
}
spin_unlock (&proc->pd->lock, &ctxprpd);
return true;
}
bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages) {
size_t unmap_size = pages * PAGE_SIZE;
uintptr_t end_vaddr = start_vaddr + unmap_size;
struct list_node_link *mapping_link, *mapping_link_tmp;
bool used_tail_mapping = false;
spin_lock_ctx_t ctxprpd;
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
if (tail_mapping == NULL)
return false;
spin_lock (&proc->pd->lock, &ctxprpd);
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
uintptr_t m_end = mapping->vaddr + mapping->size;
/* check overlap */
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
/* split in the middle */
if ((start_vaddr > mapping->vaddr) && (end_vaddr < m_end)) {
tail_mapping->vaddr = end_vaddr;
tail_mapping->paddr = mapping->paddr + (end_vaddr - mapping->vaddr);
tail_mapping->size = m_end - end_vaddr;
mapping->size = start_vaddr - mapping->vaddr;
list_insert_after (proc->mappings, &mapping->proc_mappings_link,
&tail_mapping->proc_mappings_link);
used_tail_mapping = true;
break;
} else if ((start_vaddr <= mapping->vaddr) && (end_vaddr < m_end)) { /* shrink left */
size_t diff = end_vaddr - mapping->vaddr;
mapping->vaddr += diff;
mapping->paddr += diff;
mapping->size -= diff;
} else if ((start_vaddr > mapping->vaddr) && (end_vaddr >= m_end)) { /* shrink right */
mapping->size = start_vaddr - mapping->vaddr;
} else { /* full overlap */
list_remove (proc->mappings, &mapping->proc_mappings_link);
free (mapping);
}
}
}
if (!used_tail_mapping)
free (tail_mapping);
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
mm_unmap_page (proc->pd, vpage, 0);
}
spin_unlock (&proc->pd->lock, &ctxprpd);
return true;
}
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) { struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
struct elf_aux aux; struct elf_aux aux;
@@ -155,25 +62,37 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE); size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
struct proc_resource_mem_init mem_init = {.pages = blks};
int rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
struct proc_resource* r =
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&mem_init);
if (r == NULL) {
DEBUG ("pmm oom error while loading ELF segments! (tried to alloc %zu blks)\n", blks);
}
uintptr_t p_addr = r->u.mem.paddr;
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT; uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
if (phdr->p_flags & PF_W) if (phdr->p_flags & PF_W)
pg_flags |= MM_PG_RW; pg_flags |= MM_PG_RW;
proc_map (proc, p_addr, v_addr, blks, pg_flags); uintptr_t p_addr;
procgroup_map (proc->procgroup, v_addr, blks, pg_flags, &p_addr);
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
} break;
case PT_TLS: {
#if defined(__x86_64__)
if (phdr->p_memsz > 0) {
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
size_t tls_size = align_up (phdr->p_memsz, tls_align);
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
proc->procgroup->tls.tls_tmpl_pages = blks;
proc->procgroup->tls.tls_tmpl_size = tls_size;
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
phdr->p_filesz);
proc_init_tls (proc);
}
#endif
} break; } break;
} }
} }
@@ -185,7 +104,6 @@ struct proc* proc_spawn_rd (char* name) {
struct rd_file* rd_file = rd_get_file (name); struct rd_file* rd_file = rd_get_file (name);
bool ok = proc_check_elf (rd_file->content); bool ok = proc_check_elf (rd_file->content);
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
if (!ok) if (!ok)
return NULL; return NULL;
@@ -197,9 +115,9 @@ struct proc* proc_find_pid (int pid) {
spin_lock_ctx_t ctxprtr; spin_lock_ctx_t ctxprtr;
struct proc* proc = NULL; struct proc* proc = NULL;
rw_spin_read_lock (&proc_tree_lock, &ctxprtr); spin_lock (&proc_tree_lock, &ctxprtr);
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid); rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
rw_spin_read_unlock (&proc_tree_lock, &ctxprtr); spin_unlock (&proc_tree_lock, &ctxprtr);
return proc; return proc;
} }
@@ -208,21 +126,20 @@ void proc_register (struct proc* proc, struct cpu* cpu1) {
spin_lock_ctx_t ctxcpu, ctxprtr; spin_lock_ctx_t ctxcpu, ctxprtr;
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest (); proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
DEBUG ("Assigning CPU %d to PID %d\n", proc->cpu->id, proc->pid);
struct cpu* cpu = proc->cpu; struct cpu* cpu = proc->cpu;
rw_spin_write_lock (&proc_tree_lock, &ctxprtr); spin_lock (&proc_tree_lock, &ctxprtr);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
spin_lock (&cpu->lock, &ctxcpu); spin_lock (&cpu->lock, &ctxcpu);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == NULL) if (cpu->proc_current == NULL)
cpu->proc_current = proc; cpu->proc_current = proc;
spin_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&cpu->lock, &ctxcpu); spin_unlock (&cpu->lock, &ctxcpu);
} }
@@ -261,7 +178,7 @@ static void proc_reap (void) {
spin_lock_ctx_t ctxprtr; spin_lock_ctx_t ctxprtr;
spin_lock_ctx_t ctxpr; spin_lock_ctx_t ctxpr;
rw_spin_write_lock (&proc_tree_lock, &ctxprtr); spin_lock (&proc_tree_lock, &ctxprtr);
struct rb_node_link* node; struct rb_node_link* node;
rbtree_first (&proc_tree, node); rbtree_first (&proc_tree, node);
@@ -281,7 +198,7 @@ static void proc_reap (void) {
node = next; node = next;
} }
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr); spin_unlock (&proc_tree_lock, &ctxprtr);
struct list_node_link *reap_link, *reap_link_tmp; struct list_node_link *reap_link, *reap_link_tmp;
list_foreach (reap_list, reap_link, reap_link_tmp) { list_foreach (reap_list, reap_link, reap_link_tmp) {
@@ -348,66 +265,12 @@ static void proc_irq_sched (void* arg, void* regs) {
proc_sched (); proc_sched ();
} }
static void proc_kpproc_init (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
memset (&kpproc, 0, sizeof (kpproc));
kpproc.lock = SPIN_LOCK_INIT;
kpproc.state = PROC_PSEUDO;
kpproc.pid = 0;
kpproc.resources = malloc (sizeof (*kpproc.resources));
kpproc.resources->tree = NULL;
kpproc.resources->lock = RW_SPIN_LOCK_INIT;
kpproc.resources->refs = 1;
kpproc.resources->sys_rids = 0;
kpproc.pd = mm_get_kernel_pd ();
kpproc.cpu = thiscpu;
rbtree_insert (struct proc, &proc_tree, &kpproc.proc_tree_link, proc_tree_link, pid);
/* prepare kernel resources */
{
/* frame buffer */
struct limine_framebuffer_response* fb = limine_framebuffer_request.response;
struct kpproc_fb fb_info = {
.paddr = (uintptr_t)fb->framebuffers[0]->address - (uintptr_t)hhdm->offset,
.w = fb->framebuffers[0]->width,
.h = fb->framebuffers[0]->height,
.pitch = fb->framebuffers[0]->pitch,
.bpp = fb->framebuffers[0]->bpp,
.red_mask_size = fb->framebuffers[0]->red_mask_size,
.red_mask_shift = fb->framebuffers[0]->red_mask_shift,
.green_mask_size = fb->framebuffers[0]->green_mask_size,
.green_mask_shift = fb->framebuffers[0]->green_mask_shift,
.blue_mask_size = fb->framebuffers[0]->blue_mask_size,
.blue_mask_shift = fb->framebuffers[0]->blue_mask_shift,
};
DEBUG ("Framebuffer address %p\n", fb_info.paddr);
size_t pages = align_up (sizeof (fb_info), PAGE_SIZE) / PAGE_SIZE;
uintptr_t fb_info_memblk_paddr = pmm_alloc (pages);
memcpy ((struct kpproc_fb*)((uintptr_t)hhdm->offset + fb_info_memblk_paddr), &fb_info,
sizeof (fb_info));
struct proc_resource_mem_init mem_init = {
.pages = pages, .paddr = fb_info_memblk_paddr, .managed = true};
proc_create_resource (&kpproc, 0, PR_MEM, RV_PUBLIC, &mem_init);
}
}
void proc_init (void) { void proc_init (void) {
#if defined(__x86_64__) #if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER); irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER);
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED); irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED);
#endif #endif
proc_kpproc_init ();
struct proc* spin_proc = proc_spawn_rd ("spin.exe"); struct proc* spin_proc = proc_spawn_rd ("spin.exe");
proc_register (spin_proc, thiscpu); proc_register (spin_proc, thiscpu);
@@ -415,6 +278,6 @@ void proc_init (void) {
proc_register (init, NULL); proc_register (init, NULL);
spin_lock_ctx_t ctxcpu; spin_lock_ctx_t ctxcpu;
spin_lock (&init->cpu->lock, &ctxcpu); spin_lock (&spin_proc->cpu->lock, &ctxcpu);
do_sched (init, &init->cpu->lock, &ctxcpu); do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
} }

View File

@@ -6,9 +6,9 @@
#include <libk/list.h> #include <libk/list.h>
#include <libk/rbtree.h> #include <libk/rbtree.h>
#include <libk/std.h> #include <libk/std.h>
#include <proc/procgroup.h>
#include <proc/resource.h> #include <proc/resource.h>
#include <proc/suspension_q.h> #include <proc/suspension_q.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/mm.h> #include <sys/mm.h>
@@ -17,60 +17,37 @@
#include <amd64/proc.h> /* USTACK_SIZE */ #include <amd64/proc.h> /* USTACK_SIZE */
#endif #endif
#define PROC_NEED_RESCHEDULE true
#define PROC_NO_RESCHEDULE false
/* process states */ /* process states */
#define PROC_READY 0 #define PROC_READY 0
#define PROC_DEAD 1 #define PROC_DEAD 1
#define PROC_SUSPENDED 2 #define PROC_SUSPENDED 2
#define PROC_PSEUDO 3
/* process flags */
#define PROC_USTK_PREALLOC (1 << 0) #define PROC_USTK_PREALLOC (1 << 0)
struct cpu; struct cpu;
struct proc_mapping {
struct list_node_link proc_mappings_link;
uintptr_t paddr;
uintptr_t vaddr;
size_t size;
};
struct proc_resources {
atomic_int refs;
atomic_int sys_rids;
struct rb_node_link* tree;
rw_spin_lock_t lock;
};
struct proc_sq_entry {
struct list_node_link sq_link;
struct list_node_link proc_link;
struct proc* proc;
struct proc_suspension_q* sq;
};
struct proc { struct proc {
int pid; int pid;
struct rb_node_link proc_tree_link; struct rb_node_link proc_tree_link;
struct rb_node_link procgroup_memb_tree_link;
struct list_node_link cpu_run_q_link; struct list_node_link cpu_run_q_link;
struct list_node_link reap_link; struct list_node_link reap_link;
struct list_node_link* sq_entries; struct list_node_link* sq_entries;
struct procgroup* procgroup;
struct list_node_link* mappings; /* pd.lock implicitly protects this field */
struct proc_platformdata pdata; struct proc_platformdata pdata;
uint32_t flags; uint32_t flags;
struct pd* pd;
spin_lock_t lock; spin_lock_t lock;
struct cpu* cpu; struct cpu* cpu;
atomic_int state; atomic_int state;
struct proc_resources* resources; uintptr_t uvaddr_argument;
}; };
void proc_sched (void); void proc_sched (void);
void proc_kill (struct proc* proc); void proc_kill (struct proc* proc);
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags);
bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages);
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf); struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf);
void proc_register (struct proc* proc, struct cpu* cpu); void proc_register (struct proc* proc, struct cpu* cpu);
struct proc* proc_find_pid (int pid); struct proc* proc_find_pid (int pid);

218
kernel/proc/procgroup.c Normal file
View File

@@ -0,0 +1,218 @@
#include <libk/rbtree.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
static struct rb_node_link* procgroup_tree = NULL;
static spin_lock_t procgroup_tree_lock = SPIN_LOCK_INIT;
static atomic_int pgids = 0;
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
uintptr_t* out_paddr) {
spin_lock_ctx_t ctxpg;
spin_lock (&procgroup->lock, &ctxpg);
vaddr = (vaddr == 0) ? procgroup->map_base : vaddr;
struct proc_mapping* mapping = malloc (sizeof (*mapping));
if (mapping == NULL) {
spin_unlock (&procgroup->lock, &ctxpg);
return 0;
}
uintptr_t paddr = pmm_alloc (pages);
if (paddr == PMM_ALLOC_ERR) {
free (mapping);
spin_unlock (&procgroup->lock, &ctxpg);
return 0;
}
if (out_paddr != NULL)
*out_paddr = paddr;
mapping->paddr = paddr;
mapping->vaddr = vaddr;
mapping->size = pages * PAGE_SIZE;
procgroup->map_base += pages * PAGE_SIZE;
list_append (procgroup->mappings, &mapping->proc_mappings_link);
for (uintptr_t vpage = vaddr, ppage = paddr; vpage < vaddr + pages * PAGE_SIZE;
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
mm_map_page (&procgroup->pd, ppage, vpage, flags);
}
spin_unlock (&procgroup->lock, &ctxpg);
return vaddr;
}
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
size_t unmap_size = pages * PAGE_SIZE;
uintptr_t end_vaddr = start_vaddr + unmap_size;
struct list_node_link *mapping_link, *mapping_link_tmp;
bool used_tail_mapping = false;
spin_lock_ctx_t ctxpg;
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
if (tail_mapping == NULL)
return false;
spin_lock (&procgroup->lock, &ctxpg);
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
uintptr_t m_start = mapping->vaddr;
uintptr_t m_end = mapping->vaddr + mapping->size;
/* check overlap */
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
uintptr_t free_vstart = (start_vaddr > m_start) ? start_vaddr : m_start;
uintptr_t free_vend = (end_vaddr < m_end) ? end_vaddr : m_end;
size_t free_size = free_vend - free_vstart;
uintptr_t ppage_to_free = mapping->paddr + (free_vstart - m_start);
pmm_free (ppage_to_free, free_size / PAGE_SIZE);
/* split in the middle */
if ((start_vaddr > m_start) && (end_vaddr < m_end)) {
tail_mapping->vaddr = end_vaddr;
tail_mapping->paddr = mapping->paddr + (end_vaddr - m_start);
tail_mapping->size = m_end - end_vaddr;
mapping->size = start_vaddr - m_start;
list_insert_after (procgroup->mappings, &mapping->proc_mappings_link,
&tail_mapping->proc_mappings_link);
used_tail_mapping = true;
break;
} else if ((start_vaddr <= m_start) && (end_vaddr < m_end)) { /* shrink left */
size_t diff = end_vaddr - m_start;
mapping->vaddr += diff;
mapping->paddr += diff;
mapping->size -= diff;
} else if ((start_vaddr > m_start) && (end_vaddr >= m_end)) { /* shrink right */
mapping->size = start_vaddr - m_start;
} else { /* full overlap */
list_remove (procgroup->mappings, &mapping->proc_mappings_link);
free (mapping);
}
}
}
if (!used_tail_mapping)
free (tail_mapping);
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
mm_unmap_page (&procgroup->pd, vpage);
}
spin_unlock (&procgroup->lock, &ctxpg);
return true;
}
struct procgroup* procgroup_create (void) {
spin_lock_ctx_t ctxpgtr;
struct procgroup* procgroup = malloc (sizeof (*procgroup));
if (procgroup == NULL) {
return NULL;
}
procgroup->refs = 0;
procgroup->memb_proc_tree = NULL;
procgroup->lock = SPIN_LOCK_INIT;
procgroup->pgid = atomic_fetch_add (&pgids, 1);
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys ();
procgroup->map_base = PROC_MAP_BASE;
spin_lock (&procgroup_tree_lock, &ctxpgtr);
rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,
procgroup_tree_link, pgid);
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
return procgroup;
}
void procgroup_attach (struct procgroup* procgroup, struct proc* proc) {
spin_lock_ctx_t ctxpg, ctxpr;
spin_lock (&procgroup->lock, &ctxpg);
spin_lock (&proc->lock, &ctxpr);
rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link,
procgroup_memb_tree_link, pid);
atomic_fetch_add (&procgroup->refs, 1);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&procgroup->lock, &ctxpg);
}
void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
spin_lock_ctx_t ctxpg, ctxpr, ctxpgtr;
spin_lock (&procgroup->lock, &ctxpg);
spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link);
int refs = atomic_fetch_sub (&procgroup->refs, 1);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&procgroup->lock, &ctxpg);
if (refs == 1) {
spin_lock (&procgroup_tree_lock, &ctxpgtr);
spin_lock (&procgroup->lock, &ctxpg);
rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link);
spin_unlock (&procgroup->lock, &ctxpg);
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
/* delete resources */
struct rb_node_link* rnode;
rbtree_first (&procgroup->resource_tree, rnode);
while (rnode) {
struct rb_node_link* next;
rbtree_next (rnode, next);
struct proc_resource* resource =
rbtree_entry (rnode, struct proc_resource, resource_tree_link);
rnode = next;
proc_delete_resource (resource);
}
struct list_node_link *mapping_link, *mapping_link_tmp;
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
pmm_free (mapping->paddr, mapping->size / PAGE_SIZE);
free (mapping);
}
pmm_free (procgroup->pd.cr3_paddr, 1);
free (procgroup->tls.tls_tmpl);
free (procgroup);
}
}

43
kernel/proc/procgroup.h Normal file
View File

@@ -0,0 +1,43 @@
#ifndef _KERNEL_PROC_PROCGROUP_H
#define _KERNEL_PROC_PROCGROUP_H
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/mm.h>
#include <sys/procgroup.h>
struct proc;
struct proc_mapping {
struct list_node_link proc_mappings_link;
uintptr_t paddr;
uintptr_t vaddr;
size_t size;
};
struct procgroup {
int pgid;
struct rb_node_link procgroup_tree_link;
struct rb_node_link* memb_proc_tree;
spin_lock_t lock;
atomic_int refs;
struct rb_node_link* resource_tree;
atomic_int sys_rids;
struct pd pd;
struct list_node_link* mappings;
uintptr_t map_base;
struct procgroup_tls tls;
};
struct procgroup* procgroup_create (void);
void procgroup_attach (struct procgroup* procgroup, struct proc* proc);
void procgroup_detach (struct procgroup* procgroup, struct proc* proc);
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
uintptr_t* out_paddr);
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages);
#endif // _KERNEL_PROC_PROCGROUP_H

View File

@@ -7,169 +7,53 @@
#include <mm/pmm.h> #include <mm/pmm.h>
#include <proc/mutex.h> #include <proc/mutex.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h> #include <proc/resource.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
static struct rb_node_link* resource_tree = NULL; struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid) {
static rw_spin_lock_t resource_tree_lock = RW_SPIN_LOCK_INIT; spin_lock_ctx_t ctxpg;
void proc_cleanup_resources (struct proc* proc) {
spin_lock_ctx_t ctxrs;
rw_spin_write_lock (&proc->resources->lock, &ctxrs);
struct rb_node_link* rnode;
rbtree_first (&proc->resources->tree, rnode);
while (rnode) {
struct rb_node_link* next;
rbtree_next (rnode, next);
struct proc_resource* resource =
rbtree_entry (rnode, struct proc_resource, local_resource_tree_link);
rnode = next;
proc_drop_resource (proc, resource, false);
}
rw_spin_write_unlock (&proc->resources->lock, &ctxrs);
if (atomic_fetch_sub (&proc->resources->refs, 1) == 1) {
free (proc->resources);
}
}
void proc_drop_resource (struct proc* proc, struct proc_resource* resource, bool lock) {
spin_lock_ctx_t ctxrs;
if (atomic_fetch_sub (&resource->refs, 1) == 1) {
DEBUG ("resource=%p created_by=%d vis=%d type=%d rid=%d refs=%d\n", resource,
resource->created_by_pid, resource->visibility, resource->type, resource->rid,
atomic_load (&resource->refs));
switch (resource->visibility) {
case RV_PRIVATE: {
if (lock)
rw_spin_write_lock (&proc->resources->lock, &ctxrs);
rbtree_delete (&proc->resources->tree, &resource->local_resource_tree_link);
if (lock)
rw_spin_write_unlock (&proc->resources->lock, &ctxrs);
} break;
case RV_PUBLIC: {
if (lock)
rw_spin_write_lock (&resource_tree_lock, &ctxrs);
rbtree_delete (&resource_tree, &resource->global_resource_tree_link);
if (lock)
rw_spin_write_unlock (&resource_tree_lock, &ctxrs);
} break;
default: {
assert (0);
} break;
}
resource->ops.cleanup (proc, resource);
free (resource);
}
}
struct proc_resource* proc_find_resource (struct proc* proc, int rid, int vis) {
struct proc_resource* resource = NULL; struct proc_resource* resource = NULL;
spin_lock_ctx_t ctxrs;
switch (vis) { spin_lock (&procgroup->lock, &ctxpg);
case RV_PRIVATE: { rbtree_find (struct proc_resource, &procgroup->resource_tree, rid, resource, resource_tree_link,
/* User wants to create a private resource, so search locally */ rid);
rw_spin_read_lock (&proc->resources->lock, &ctxrs); spin_unlock (&procgroup->lock, &ctxpg);
rbtree_find (struct proc_resource, &proc->resources->tree, rid, resource,
local_resource_tree_link, rid);
rw_spin_read_unlock (&proc->resources->lock, &ctxrs);
} break;
case RV_PUBLIC: {
/* User wants to create a public resource, so search globally */
rw_spin_read_lock (&resource_tree_lock, &ctxrs);
rbtree_find (struct proc_resource, &resource_tree, rid, resource, global_resource_tree_link,
rid);
rw_spin_read_unlock (&resource_tree_lock, &ctxrs);
} break;
default: {
assert (0);
} break;
}
return resource; return resource;
} }
struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type, int vis, struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid) {
void* data) { spin_lock_ctx_t ctxpg;
spin_lock_ctx_t ctxrs; struct proc_resource* resource;
/* Check if resource RID already exists */ resource = proc_find_resource (procgroup, rid);
struct proc_resource* resource_check = proc_find_resource (proc, rid, vis); if (resource != NULL)
return resource;
/* Resource was found either way, so it already exists */ resource = malloc (sizeof (*resource));
if (resource_check != NULL)
return NULL;
/* create the resource */
struct proc_resource* resource = malloc (sizeof (*resource));
if (resource == NULL) if (resource == NULL)
return NULL; return NULL;
memset (resource, 0, sizeof (*resource)); memset (resource, 0, sizeof (*resource));
resource->lock = SPIN_LOCK_INIT; resource->lock = SPIN_LOCK_INIT;
resource->type = type; resource->ops.cleanup = &proc_cleanup_resource_mutex;
resource->refs = 1; resource->u.mutex.resource = resource;
resource->rid = rid; resource->rid = rid;
resource->visibility = vis; resource->type = PR_MUTEX;
resource->created_by_pid = proc->pid;
switch (resource->type) { spin_lock (&procgroup->lock, &ctxpg);
case PR_MEM: { rbtree_insert (struct proc_resource, &procgroup->resource_tree, &resource->resource_tree_link,
struct proc_resource_mem_init* mem_init = data; resource_tree_link, rid);
proc_create_resource_mem (&resource->u.mem, mem_init); spin_unlock (&procgroup->lock, &ctxpg);
resource->ops.cleanup = &proc_cleanup_resource_mem;
resource->u.mem.resource = resource;
DEBUG ("PR_MEM resource=%p created_by=%d, type=%d rid=%d paddr=%p, pages=%zu\n", resource,
resource->created_by_pid, resource->type, resource->rid, resource->u.mem.paddr,
resource->u.mem.pages);
} break;
case PR_MUTEX: {
proc_create_resource_mutex (&resource->u.mutex);
resource->ops.cleanup = &proc_cleanup_resource_mutex;
resource->u.mutex.resource = resource;
DEBUG ("PR_MUTEX resource=%p created_by=%d type=%d rid=%d\n", resource,
resource->created_by_pid, resource->type, resource->rid);
} break;
default: {
free (resource);
return NULL;
} break;
}
switch (resource->visibility) {
case RV_PRIVATE: {
rw_spin_write_lock (&proc->resources->lock, &ctxrs);
rbtree_insert (struct proc_resource, &proc->resources->tree,
&resource->local_resource_tree_link, local_resource_tree_link, rid);
rw_spin_write_unlock (&proc->resources->lock, &ctxrs);
} break;
case RV_PUBLIC: {
rw_spin_write_lock (&resource_tree_lock, &ctxrs);
rbtree_insert (struct proc_resource, &resource_tree, &resource->global_resource_tree_link,
global_resource_tree_link, rid);
rw_spin_write_unlock (&resource_tree_lock, &ctxrs);
} break;
default: {
assert (0);
} break;
}
return resource; return resource;
} }
bool proc_delete_resource (struct proc_resource* resource) {
bool reschedule = resource->ops.cleanup (resource);
free (resource);
return reschedule;
}

View File

@@ -4,40 +4,29 @@
#include <libk/list.h> #include <libk/list.h>
#include <libk/rbtree.h> #include <libk/rbtree.h>
#include <libk/std.h> #include <libk/std.h>
#include <proc/mem.h>
#include <proc/mutex.h> #include <proc/mutex.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#define PR_MEM 0
#define PR_MUTEX 1 #define PR_MUTEX 1
#define RV_PRIVATE 0
#define RV_PUBLIC 1
struct proc; struct proc;
struct procgroup;
struct proc_resource { struct proc_resource {
int type; int type;
int rid; int rid;
int visibility;
spin_lock_t lock; spin_lock_t lock;
atomic_int refs; struct rb_node_link resource_tree_link;
struct rb_node_link global_resource_tree_link;
struct rb_node_link local_resource_tree_link;
union { union {
struct proc_resource_mem mem;
struct proc_mutex mutex; struct proc_mutex mutex;
} u; } u;
struct { struct {
void (*cleanup) (struct proc* proc, struct proc_resource* resource); bool (*cleanup) (struct proc_resource* resource);
} ops; } ops;
int created_by_pid;
}; };
struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type, int vis, struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid);
void* data); struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid);
struct proc_resource* proc_find_resource (struct proc* proc, int rid, int vis); bool proc_delete_resource (struct proc_resource* resource);
void proc_drop_resource (struct proc* proc, struct proc_resource* resource, bool lock);
void proc_cleanup_resources (struct proc* proc);
#endif // _KERNEL_PROC_RESOURCE_H #endif // _KERNEL_PROC_RESOURCE_H

View File

@@ -1,9 +1,11 @@
c += proc/proc.c \ c += proc/proc.c \
proc/resource.c \ proc/resource.c \
proc/mutex.c \ proc/mutex.c \
proc/mem.c proc/procgroup.c \
proc/suspension_q.c
o += proc/proc.o \ o += proc/proc.o \
proc/resource.o \ proc/resource.o \
proc/mutex.o \ proc/mutex.o \
proc/mem.o proc/procgroup.o \
proc/suspension_q.o

111
kernel/proc/suspension_q.c Normal file
View File

@@ -0,0 +1,111 @@
#include <libk/list.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <proc/resource.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h>
#include <sys/smp.h>
#include <sys/spin_lock.h>
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
spin_lock_ctx_t* ctxrl) {
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
struct cpu* cpu = proc->cpu;
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
if (!sq_entry) {
spin_unlock (resource_lock, ctxrl);
return PROC_NO_RESCHEDULE;
}
sq_entry->proc = proc;
sq_entry->sq = sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
spin_unlock (resource_lock, ctxrl);
atomic_store (&proc->state, PROC_SUSPENDED);
/* append to sq's list */
list_append (sq->proc_list, &sq_entry->sq_link);
/* append to proc's list */
list_append (proc->sq_entries, &sq_entry->proc_link);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
proc->cpu = NULL;
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
return PROC_NEED_RESCHEDULE;
}
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
struct cpu* cpu = cpu_find_lightest ();
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
proc->cpu = cpu;
if (proc->sq_entries == NULL)
atomic_store (&proc->state, PROC_READY);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
free (sq_entry);
return PROC_NEED_RESCHEDULE;
}
void proc_sqs_cleanup (struct proc* proc) {
spin_lock_ctx_t ctxsq, ctxpr;
spin_lock (&proc->lock, &ctxpr);
/* clean suspension queue entries */
struct list_node_link *sq_link, *sq_link_tmp;
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
spin_unlock (&sq->lock, &ctxsq);
free (sq_entry);
}
spin_unlock (&proc->lock, &ctxpr);
}

View File

@@ -4,9 +4,23 @@
#include <libk/list.h> #include <libk/list.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
struct proc;
struct proc_suspension_q { struct proc_suspension_q {
struct list_node_link* proc_list; struct list_node_link* proc_list;
spin_lock_t lock; spin_lock_t lock;
}; };
struct proc_sq_entry {
struct list_node_link sq_link;
struct list_node_link proc_link;
struct proc* proc;
struct proc_suspension_q* sq;
};
void proc_sqs_cleanup (struct proc* proc);
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
spin_lock_ctx_t* ctxrl);
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
#endif // _KERNEL_PROC_SUSPENTION_Q_H #endif // _KERNEL_PROC_SUSPENTION_Q_H

View File

@@ -1,67 +0,0 @@
#include <libk/assert.h>
#include <libk/std.h>
#include <sync/rw_spin_lock.h>
#include <sys/debug.h>
#include <sys/irq.h>
#include <sys/spin_lock.h>
#define WRITER_WAIT (1U << 31)
#define READER_MASK (~WRITER_WAIT)
void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t value;
irq_save (ctx);
for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed);
if ((value & WRITER_WAIT) == 0) {
if (atomic_compare_exchange_weak_explicit (rw, &value, value + 1, memory_order_acquire,
memory_order_relaxed)) {
return;
}
}
spin_lock_relax ();
}
}
void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t old = atomic_fetch_sub_explicit (rw, 1, memory_order_release);
assert ((old & READER_MASK) > 0);
irq_restore (ctx);
}
void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
uint32_t value;
irq_save (ctx);
/* announce writer */
for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed);
if ((value & WRITER_WAIT) == 0) {
if (atomic_compare_exchange_weak_explicit (rw, &value, (value | WRITER_WAIT),
memory_order_acquire, memory_order_relaxed))
break;
} else {
spin_lock_relax ();
}
}
/* wait for readers */
for (;;) {
value = atomic_load_explicit (rw, memory_order_acquire);
if ((value & READER_MASK) == 0)
return;
spin_lock_relax ();
}
}
void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
atomic_store_explicit (rw, 0, memory_order_release);
irq_restore (ctx);
}

View File

@@ -1,17 +0,0 @@
#ifndef _KERNEL_SYNC_RW_SPIN_LOCK_H
#define _KERNEL_SYNC_RW_SPIN_LOCK_H
#include <libk/std.h>
#include <sync/spin_lock.h>
#include <sys/spin_lock.h>
#define RW_SPIN_LOCK_INIT 0
typedef _Atomic (uint32_t) rw_spin_lock_t;
void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
#endif // _KERNEL_SYNC_RW_SPIN_LOCK_H

View File

@@ -1,5 +1,3 @@
c += sync/spin_lock.c \ c += sync/spin_lock.c
sync/rw_spin_lock.c
o += sync/spin_lock.o \ o += sync/spin_lock.o
sync/rw_spin_lock.o

View File

@@ -2,6 +2,7 @@
#define _KERNEL_SYS_MM_H #define _KERNEL_SYS_MM_H
#include <libk/std.h> #include <libk/std.h>
#include <sync/spin_lock.h>
#if defined(__x86_64__) #if defined(__x86_64__)
#include <amd64/mm.h> #include <amd64/mm.h>
@@ -10,21 +11,18 @@
#define MM_PG_PRESENT (1 << 0) #define MM_PG_PRESENT (1 << 0)
#define MM_PG_RW (1 << 1) #define MM_PG_RW (1 << 1)
#define MM_PG_USER (1 << 2) #define MM_PG_USER (1 << 2)
#define MM_PD_RELOAD (1 << 30)
#define MM_PD_LOCK (1 << 31)
uintptr_t mm_alloc_user_pd_phys (void); uintptr_t mm_alloc_user_pd_phys (void);
void mm_reload (void); void mm_kernel_lock (spin_lock_ctx_t* ctx);
void mm_kernel_unlock (spin_lock_ctx_t* ctx);
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags); void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags); void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags); void mm_unmap_page (struct pd* pd, uintptr_t vaddr);
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags); void mm_unmap_kernel_page (uintptr_t vaddr);
void mm_lock_kernel (void); bool mm_validate (struct pd* pd, uintptr_t vaddr);
void mm_unlock_kernel (void); bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size);
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags); uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr);
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags); uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr);
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags);
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags);
struct pd* mm_get_kernel_pd (void); struct pd* mm_get_kernel_pd (void);
void mm_init (void); void mm_init (void);

View File

@@ -6,8 +6,9 @@
struct proc; struct proc;
struct proc* proc_from_elf (uint8_t* elf_contents); struct proc* proc_from_elf (uint8_t* elf_contents);
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_size, struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
uintptr_t entry); uintptr_t argument_ptr);
void proc_cleanup (struct proc* proc); void proc_cleanup (struct proc* proc);
void proc_init_tls (struct proc* proc);
#endif // _KERNEL_SYS_PROC_H #endif // _KERNEL_SYS_PROC_H

8
kernel/sys/procgroup.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef _KERNEL_SYS_PROCGROUP_H
#define _KERNEL_SYS_PROCGROUP_H
#if defined(__x86_64__)
#include <amd64/procgroup.h>
#endif
#endif // _KERNEL_SYS_PROCGROUP_H

View File

@@ -5,9 +5,9 @@
#include <m/status.h> #include <m/status.h>
#include <m/syscall_defs.h> #include <m/syscall_defs.h>
#include <mm/pmm.h> #include <mm/pmm.h>
#include <proc/mem.h>
#include <proc/mutex.h> #include <proc/mutex.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h> #include <proc/resource.h>
#include <sync/spin_lock.h> #include <sync/spin_lock.h>
#include <sys/debug.h> #include <sys/debug.h>
@@ -16,23 +16,26 @@
#include <syscall/syscall.h> #include <syscall/syscall.h>
#define DEFINE_SYSCALL(name) \ #define DEFINE_SYSCALL(name) \
int name (struct proc* UNUSED proc, void* UNUSED regs, uintptr_t UNUSED a1, uintptr_t UNUSED a2, \ uintptr_t name (struct proc* UNUSED proc, void* UNUSED regs, uintptr_t UNUSED a1, \
uintptr_t UNUSED a3, uintptr_t UNUSED a4, uintptr_t UNUSED a5, uintptr_t UNUSED a6) uintptr_t UNUSED a2, uintptr_t UNUSED a3, uintptr_t UNUSED a4, \
uintptr_t UNUSED a5, uintptr_t UNUSED a6)
#define SYSRESULT(x) ((uintptr_t)(x))
static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) { static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprpd; spin_lock_ctx_t ctxpg;
spin_lock (&proc->pd->lock, &ctxprpd); spin_lock (&proc->procgroup->lock, &ctxpg);
if (!mm_validate_buffer (proc->pd, (uintptr_t)uvaddr, size, 0)) { if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size)) {
spin_unlock (&proc->pd->lock, &ctxprpd); spin_unlock (&proc->procgroup->lock, &ctxpg);
return NULL; return NULL;
} }
uintptr_t out_paddr = mm_v2p (proc->pd, uvaddr, 0); uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr);
spin_unlock (&proc->pd->lock, &ctxprpd); spin_unlock (&proc->procgroup->lock, &ctxpg);
uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr; uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr;
@@ -42,50 +45,26 @@ static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t si
/* int quit (void) */ /* int quit (void) */
DEFINE_SYSCALL (sys_quit) { DEFINE_SYSCALL (sys_quit) {
proc_kill (proc); proc_kill (proc);
return ST_OK; return SYSRESULT (ST_OK);
} }
/* int test (void) */ /* int test (void) */
DEFINE_SYSCALL (sys_test) { DEFINE_SYSCALL (sys_test) {
char c = (char)a1; char c = (char)a1;
DEBUG ("test syscall from %d! %c\n", proc->pid, c); DEBUG ("test syscall from %d! %c\n", proc->pid, c);
return ST_OK; return SYSRESULT (ST_OK);
} }
/* int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags) */ /* int map (uintptr_t vaddr, size_t pages, uint32_t flags) */
DEFINE_SYSCALL (sys_map) { DEFINE_SYSCALL (sys_map) {
spin_lock_ctx_t ctxrs; uintptr_t vaddr = a1;
size_t pages = (size_t)a2;
int mem_rid = (int)a1; uint32_t flags = (uint32_t)a3;
int vis = (int)a2;
uintptr_t vaddr = a3;
uint32_t flags = (uint32_t)a4;
if (vaddr % PAGE_SIZE != 0) if (vaddr % PAGE_SIZE != 0)
return -ST_UNALIGNED; return SYSRESULT (-ST_UNALIGNED);
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE)) return SYSRESULT (procgroup_map (proc->procgroup, vaddr, pages, flags, NULL));
return -ST_BAD_RESOURCE;
struct proc_resource* mem_resource = proc_find_resource (proc, mem_rid, vis);
if (mem_resource == NULL) {
return -ST_NOT_FOUND;
}
spin_lock (&mem_resource->lock, &ctxrs);
if (mem_resource->type != PR_MEM) {
spin_unlock (&mem_resource->lock, &ctxrs);
return -ST_BAD_RESOURCE;
}
uintptr_t paddr = mem_resource->u.mem.paddr;
size_t pages = mem_resource->u.mem.pages;
spin_unlock (&mem_resource->lock, &ctxrs);
return proc_map (proc, paddr, vaddr, pages, flags) ? ST_OK : -ST_OOM_ERROR;
} }
/* int unmap (uintptr_t vaddr, size_t pages) */ /* int unmap (uintptr_t vaddr, size_t pages) */
@@ -94,163 +73,94 @@ DEFINE_SYSCALL (sys_unmap) {
size_t pages = (size_t)a2; size_t pages = (size_t)a2;
if (vaddr % PAGE_SIZE != 0) if (vaddr % PAGE_SIZE != 0)
return -ST_UNALIGNED; return SYSRESULT (-ST_UNALIGNED);
return proc_unmap (proc, vaddr, pages) ? ST_OK : -ST_OOM_ERROR; return SYSRESULT (procgroup_unmap (proc->procgroup, vaddr, pages));
} }
/* int create_mem (int rid, int vis, size_t pages) */ /* int clone (uintptr_t vstack_top, void* entry, void* argument_ptr) */
DEFINE_SYSCALL (sys_create_mem) {
int rid = (int)a1;
int vis = (int)a2;
size_t pages = (size_t)a3;
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE))
return -ST_BAD_RESOURCE;
if (pages == 0)
return ST_OK;
int rid1 = rid < 0 ? atomic_fetch_add (&proc->resources->sys_rids, 1) : rid;
struct proc_resource_mem_init mem_init = {.managed = false, .pages = pages};
struct proc_resource* mem_resource = proc_create_resource (proc, rid1, PR_MEM, vis, &mem_init);
if (mem_resource == NULL)
return -ST_OOM_ERROR;
return mem_resource->rid;
}
/* int unlink_mem (int rid, int vis, size_t pages) */
DEFINE_SYSCALL (sys_unlink_mem) {
spin_lock_ctx_t ctxrs;
int rid = (int)a1;
int vis = (int)a2;
size_t pages = (size_t)a3;
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE))
return -ST_BAD_RESOURCE;
struct proc_resource* mem_resource = proc_find_resource (proc, rid, vis);
if (mem_resource == NULL)
return -ST_NOT_FOUND;
spin_lock (&mem_resource->lock, &ctxrs);
if (mem_resource->type != PR_MEM) {
spin_unlock (&mem_resource->lock, &ctxrs);
return -ST_BAD_RESOURCE;
}
mem_resource->u.mem.alive_pages -= pages;
if (mem_resource->u.mem.alive_pages < 0) {
spin_unlock (&mem_resource->lock, &ctxrs);
proc_drop_resource (proc, mem_resource, true);
}
return ST_OK;
}
/* int clone (uintptr_t vstack_top, size_t stack_size, void* entry) */
DEFINE_SYSCALL (sys_clone) { DEFINE_SYSCALL (sys_clone) {
uintptr_t vstack_top = a1; uintptr_t vstack_top = a1;
size_t stack_size = (size_t)a2; uintptr_t entry = a2;
uintptr_t entry = a3; uintptr_t argument_ptr = a3;
struct proc* new = proc_clone (proc, vstack_top, stack_size, entry); struct proc* new = proc_clone (proc, vstack_top, entry, argument_ptr);
DEBUG ("new=%p\n", new);
if (new == NULL) { if (new == NULL) {
return -ST_OOM_ERROR; return SYSRESULT (-ST_OOM_ERROR);
} }
int pid = new->pid; int pid = new->pid;
proc_register (new, NULL); proc_register (new, NULL);
return pid; return SYSRESULT (pid);
} }
/* void* argument_ptr (void) */
DEFINE_SYSCALL (sys_argument_ptr) { return proc->uvaddr_argument; }
/* int sched (void) */ /* int sched (void) */
DEFINE_SYSCALL (sys_sched) { DEFINE_SYSCALL (sys_sched) {
proc_sched (); proc_sched ();
return ST_OK; return SYSRESULT (ST_OK);
} }
/* int create_mutex (int mutex_rid, int vis) */ /* int mutex_create (int mutex_rid) */
DEFINE_SYSCALL (sys_create_mutex) { DEFINE_SYSCALL (sys_mutex_create) {
int mutex_rid = (int)a1; int mutex_rid = (int)a1;
int vis = (int)a2;
if (mutex_rid < 0) struct proc_resource* mutex_resource = proc_create_resource_mutex (proc->procgroup, mutex_rid);
return -ST_BAD_RESOURCE;
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE))
return -ST_BAD_RESOURCE;
struct proc_resource* mutex_resource =
proc_create_resource (proc, mutex_rid, PR_MUTEX, vis, NULL);
if (mutex_resource == NULL) if (mutex_resource == NULL)
return -ST_OOM_ERROR; return SYSRESULT (-ST_OOM_ERROR);
return mutex_resource->rid; return SYSRESULT (mutex_resource->rid);
} }
/* int unlink_mutex (int mutex_rid, int vis) */ /* int mutex_delete (int mutex_rid) */
DEFINE_SYSCALL (sys_unlink_mutex) { DEFINE_SYSCALL (sys_mutex_delete) {
int mutex_rid = (int)a1; int mutex_rid = (int)a1;
int vis = (int)a2;
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE)) struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
return -ST_BAD_RESOURCE;
struct proc_resource* mutex_resource = proc_find_resource (proc, mutex_rid, vis);
if (mutex_resource == NULL) if (mutex_resource == NULL)
return -ST_NOT_FOUND; return SYSRESULT (-ST_NOT_FOUND);
proc_drop_resource (proc, mutex_resource, true); if (proc_delete_resource (mutex_resource) == PROC_NEED_RESCHEDULE)
proc_sched ();
return ST_OK; return SYSRESULT (ST_OK);
} }
/* int lock_mutex (int mutex_rid, int vis) */ /* int mutex_lock (int mutex_rid) */
DEFINE_SYSCALL (sys_lock_mutex) { DEFINE_SYSCALL (sys_mutex_lock) {
int mutex_rid = (int)a1; int mutex_rid = (int)a1;
int vis = (int)a2;
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE)) struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
return -ST_BAD_RESOURCE;
struct proc_resource* mutex_resource = proc_find_resource (proc, mutex_rid, vis);
if (mutex_resource == NULL) if (mutex_resource == NULL)
return -ST_NOT_FOUND; return SYSRESULT (-ST_NOT_FOUND);
proc_mutex_lock (proc, &mutex_resource->u.mutex); if (proc_mutex_lock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
proc_sched ();
return ST_OK; return SYSRESULT (ST_OK);
} }
/* int unlock_mutex (int mutex_rid, int vis) */ /* int mutex_unlock (int mutex_rid) */
DEFINE_SYSCALL (sys_unlock_mutex) { DEFINE_SYSCALL (sys_mutex_unlock) {
int mutex_rid = (int)a1; int mutex_rid = (int)a1;
int vis = (int)a2;
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE)) struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
return -ST_BAD_RESOURCE;
struct proc_resource* mutex_resource = proc_find_resource (proc, mutex_rid, vis);
if (mutex_resource == NULL) if (mutex_resource == NULL)
return -ST_NOT_FOUND; return SYSRESULT (-ST_NOT_FOUND);
return proc_mutex_unlock (proc, &mutex_resource->u.mutex) ? ST_OK : -ST_PERMISSION_ERROR; if (proc_mutex_unlock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
proc_sched ();
return SYSRESULT (ST_OK);
} }
static syscall_handler_func_t handler_table[] = { static syscall_handler_func_t handler_table[] = {
@@ -259,13 +169,12 @@ static syscall_handler_func_t handler_table[] = {
[SYS_MAP] = &sys_map, [SYS_MAP] = &sys_map,
[SYS_UNMAP] = &sys_unmap, [SYS_UNMAP] = &sys_unmap,
[SYS_CLONE] = &sys_clone, [SYS_CLONE] = &sys_clone,
[SYS_ARGUMENT_PTR] = &sys_argument_ptr,
[SYS_SCHED] = &sys_sched, [SYS_SCHED] = &sys_sched,
[SYS_CREATE_MEM] = &sys_create_mem, [SYS_MUTEX_CREATE] = &sys_mutex_create,
[SYS_UNLINK_MEM] = &sys_unlink_mem, [SYS_MUTEX_DELETE] = &sys_mutex_delete,
[SYS_CREATE_MUTEX] = &sys_create_mutex, [SYS_MUTEX_LOCK] = &sys_mutex_lock,
[SYS_UNLINK_MUTEX] = &sys_unlink_mutex, [SYS_MUTEX_UNLOCK] = &sys_mutex_unlock,
[SYS_LOCK_MUTEX] = &sys_lock_mutex,
[SYS_UNLOCK_MUTEX] = &sys_unlock_mutex,
}; };
syscall_handler_func_t syscall_find_handler (int syscall_num) { syscall_handler_func_t syscall_find_handler (int syscall_num) {

View File

@@ -4,8 +4,9 @@
#include <libk/std.h> #include <libk/std.h>
#include <proc/proc.h> #include <proc/proc.h>
typedef int (*syscall_handler_func_t) (struct proc* proc, void* regs, uintptr_t a1, uintptr_t a2, typedef uintptr_t (*syscall_handler_func_t) (struct proc* proc, void* regs, uintptr_t a1,
uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6); uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5,
uintptr_t a6);
syscall_handler_func_t syscall_find_handler (int syscall_num); syscall_handler_func_t syscall_find_handler (int syscall_num);

View File

@@ -6,41 +6,17 @@
#define LIBALLOC_MUTEX 500 #define LIBALLOC_MUTEX 500
static uintptr_t liballoc_map_base = PROC_MAP_BASE; void liballoc_init (void) { mutex_create (LIBALLOC_MUTEX); }
static int mem_rid_base = 1000000;
void liballoc_init (void) { create_mutex (LIBALLOC_MUTEX, RV_PRIVATE); } void liballoc_deinit (void) { mutex_delete (LIBALLOC_MUTEX); }
void liballoc_deinit (void) { unlink_mutex (LIBALLOC_MUTEX, RV_PRIVATE); } int liballoc_lock (void) { return mutex_lock (LIBALLOC_MUTEX); }
int liballoc_lock (void) { return lock_mutex (LIBALLOC_MUTEX, RV_PRIVATE); } int liballoc_unlock (void) { return mutex_unlock (LIBALLOC_MUTEX); }
int liballoc_unlock (void) { return unlock_mutex (LIBALLOC_MUTEX, RV_PRIVATE); } void* liballoc_alloc (int pages) { return map (0, pages, MAP_FLAGS | MAP_RW); }
void* liballoc_alloc (int pages, int* mem_rid) { int liballoc_free (void* ptr, int pages) { return unmap ((uintptr_t)ptr, pages); }
uintptr_t current_base = liballoc_map_base;
*mem_rid = create_mem (mem_rid_base++, RV_PRIVATE, pages);
if (*mem_rid < 0) {
return NULL;
}
if (map (*mem_rid, RV_PRIVATE, current_base, MAP_FLAGS | MAP_RW) < 0) {
unlink_mem (*mem_rid, RV_PRIVATE, pages);
return NULL;
}
uintptr_t old_base = current_base;
current_base += pages * PAGE_SIZE;
return (void*)old_base;
}
int liballoc_free (void* ptr, int pages, int mem_rid) {
unmap ((uintptr_t)ptr, pages);
unlink_mem (mem_rid, RV_PRIVATE, pages);
return 0;
}
/** Durand's Ridiculously Amazing Super Duper Memory functions. */ /** Durand's Ridiculously Amazing Super Duper Memory functions. */
@@ -207,7 +183,6 @@ static struct boundary_tag* allocate_new_tag (unsigned int size) {
unsigned int pages; unsigned int pages;
unsigned int usage; unsigned int usage;
struct boundary_tag* tag; struct boundary_tag* tag;
int mem_rid;
// This is how much space is required. // This is how much space is required.
usage = size + sizeof (struct boundary_tag); usage = size + sizeof (struct boundary_tag);
@@ -221,7 +196,7 @@ static struct boundary_tag* allocate_new_tag (unsigned int size) {
if (pages < (unsigned int)l_pageCount) if (pages < (unsigned int)l_pageCount)
pages = l_pageCount; pages = l_pageCount;
tag = (struct boundary_tag*)liballoc_alloc (pages, &mem_rid); tag = (struct boundary_tag*)liballoc_alloc (pages);
if (tag == NULL) if (tag == NULL)
return NULL; // uh oh, we ran out of memory. return NULL; // uh oh, we ran out of memory.
@@ -230,7 +205,6 @@ static struct boundary_tag* allocate_new_tag (unsigned int size) {
tag->size = size; tag->size = size;
tag->real_size = pages * l_pageSize; tag->real_size = pages * l_pageSize;
tag->index = -1; tag->index = -1;
tag->mem_rid = mem_rid;
tag->next = NULL; tag->next = NULL;
tag->prev = NULL; tag->prev = NULL;
@@ -353,7 +327,7 @@ void free (void* ptr) {
if (pages < (unsigned int)l_pageCount) if (pages < (unsigned int)l_pageCount)
pages = l_pageCount; pages = l_pageCount;
liballoc_free (tag, pages, tag->mem_rid); liballoc_free (tag, pages);
liballoc_unlock (); liballoc_unlock ();
return; return;

View File

@@ -41,8 +41,6 @@ struct boundary_tag {
struct boundary_tag* next; //< Linked list info. struct boundary_tag* next; //< Linked list info.
struct boundary_tag* prev; //< Linked list info. struct boundary_tag* prev; //< Linked list info.
int mem_rid;
}; };
/** This function is supposed to lock the memory data structures. It /** This function is supposed to lock the memory data structures. It
@@ -69,7 +67,7 @@ extern int liballoc_unlock (void);
* \return NULL if the pages were not allocated. * \return NULL if the pages were not allocated.
* \return A pointer to the allocated memory. * \return A pointer to the allocated memory.
*/ */
extern void* liballoc_alloc (int pages, int* mem_rid); extern void* liballoc_alloc (int pages);
/** This frees previously allocated memory. The void* parameter passed /** This frees previously allocated memory. The void* parameter passed
* to the function is the exact same value returned from a previous * to the function is the exact same value returned from a previous
@@ -79,7 +77,7 @@ extern void* liballoc_alloc (int pages, int* mem_rid);
* *
* \return 0 if the memory was successfully freed. * \return 0 if the memory was successfully freed.
*/ */
extern int liballoc_free (void* ptr, int pages, int mem_rid); extern int liballoc_free (void* ptr, int pages);
void* malloc (size_t); //< The standard function. void* malloc (size_t); //< The standard function.
void* realloc (void*, size_t); //< The standard function. void* realloc (void*, size_t); //< The standard function.

View File

@@ -2,8 +2,8 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
int amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
uintptr_t a5, uintptr_t a6) { uintptr_t a5, uintptr_t a6) {
uint64_t result; uint64_t result;
__asm__ volatile ("movq %[a4], %%r10\n" __asm__ volatile ("movq %[a4], %%r10\n"
"movq %[a5], %%r8\n" "movq %[a5], %%r8\n"
@@ -13,5 +13,5 @@ int amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, ui
: "a"(syscall_num), "D"(a1), "S"(a2), : "a"(syscall_num), "D"(a1), "S"(a2),
"d"(a3), [a4] "r"(a4), [a5] "r"(a5), [a6] "r"(a6) "d"(a3), [a4] "r"(a4), [a5] "r"(a5), [a6] "r"(a6)
: "r10", "r8", "r9", "r11", "rcx", "cc", "memory"); : "r10", "r8", "r9", "r11", "rcx", "cc", "memory");
return (int)result; return result;
} }

View File

@@ -3,7 +3,7 @@
#include <stdint.h> #include <stdint.h>
int amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
uintptr_t a5, uintptr_t a6); uintptr_t a5, uintptr_t a6);
#endif // _LIBMSL_AMD64_SYSCALL_H #endif // _LIBMSL_AMD64_SYSCALL_H

View File

@@ -15,28 +15,22 @@ int test (char c) { return do_syscall (SYS_TEST, c); }
int sched (void) { return do_syscall (SYS_SCHED, 0); } int sched (void) { return do_syscall (SYS_SCHED, 0); }
int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags) { void* map (uintptr_t vaddr, size_t pages, uint32_t flags) {
return do_syscall (SYS_MAP, mem_rid, vis, vaddr, flags); return (void*)do_syscall (SYS_MAP, vaddr, pages, flags);
} }
int unmap (uintptr_t vaddr, size_t pages) { return do_syscall (SYS_UNMAP, vaddr, pages); } int unmap (uintptr_t vaddr, size_t pages) { return do_syscall (SYS_UNMAP, vaddr, pages); }
int create_mem (int mem_rid, int vis, size_t pages) { int clone (uintptr_t vstack_top, void (*entry) (void), void* argument_ptr) {
return do_syscall (SYS_CREATE_MEM, mem_rid, vis, pages); return do_syscall (SYS_CLONE, vstack_top, entry, argument_ptr);
} }
int unlink_mem (int mem_rid, int vis, size_t pages) { int mutex_create (int mutex_rid) { return do_syscall (SYS_MUTEX_CREATE, mutex_rid); }
return do_syscall (SYS_UNLINK_MEM, mem_rid, vis, pages);
}
int clone (uintptr_t vstack_top, size_t stack_size, void (*entry) (void)) { int mutex_delete (int mutex_rid) { return do_syscall (SYS_MUTEX_DELETE, mutex_rid); }
return do_syscall (SYS_CLONE, vstack_top, stack_size, entry);
}
int create_mutex (int mutex_rid, int vis) { return do_syscall (SYS_CREATE_MUTEX, mutex_rid, vis); } int mutex_lock (int mutex_rid) { return do_syscall (SYS_MUTEX_LOCK, mutex_rid); }
int unlink_mutex (int mutex_rid, int vis) { return do_syscall (SYS_UNLINK_MUTEX, mutex_rid, vis); } int mutex_unlock (int mutex_rid) { return do_syscall (SYS_MUTEX_UNLOCK, mutex_rid); }
int lock_mutex (int mutex_rid, int vis) { return do_syscall (SYS_LOCK_MUTEX, mutex_rid, vis); } void* argument_ptr (void) { return (void*)do_syscall (SYS_ARGUMENT_PTR, 0); }
int unlock_mutex (int mutex_rid, int vis) { return do_syscall (SYS_UNLOCK_MUTEX, mutex_rid, vis); }

View File

@@ -5,8 +5,7 @@
#include <stdint.h> #include <stdint.h>
#if defined(__x86_64__) #if defined(__x86_64__)
#define PROC_MAP_BASE 0x0000700000000000 #define PAGE_SIZE 4096
#define PAGE_SIZE 4096
#endif #endif
#define MAP_PRESENT (1 << 0) #define MAP_PRESENT (1 << 0)
@@ -14,20 +13,16 @@
#define MAP_USER (1 << 2) #define MAP_USER (1 << 2)
#define MAP_FLAGS (MAP_PRESENT | MAP_USER) #define MAP_FLAGS (MAP_PRESENT | MAP_USER)
#define RV_PRIVATE 0
#define RV_PUBLIC 1
int quit (void); int quit (void);
int test (char c); int test (char c);
int sched (void); int sched (void);
int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags); void* map (uintptr_t vaddr, size_t pages, uint32_t flags);
int unmap (uintptr_t vaddr, size_t pages); int unmap (uintptr_t vaddr, size_t pages);
int create_mem (int mem_rid, int vis, size_t pages); int clone (uintptr_t vstack_top, void (*entry) (void), void* argument_ptr);
int unlink_mem (int mem_rid, int vis, size_t pages); int mutex_create (int mutex_rid);
int clone (uintptr_t vstack_top, size_t stack_size, void (*entry) (void)); int mutex_delete (int mutex_rid);
int create_mutex (int mutex_rid, int vis); int mutex_lock (int mutex_rid);
int unlink_mutex (int mutex_rid, int vis); int mutex_unlock (int mutex_rid);
int lock_mutex (int mutex_rid, int vis); void* argument_ptr (void);
int unlock_mutex (int mutex_rid, int vis);
#endif // _LIBMSL_M_SYSTEM_H #endif // _LIBMSL_M_SYSTEM_H

1
libmsl/proc/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

6
libmsl/proc/local.h Normal file
View File

@@ -0,0 +1,6 @@
#ifndef _LIBMSL_PROC_TLS_H
#define _LIBMSL_PROC_TLS_H
#define LOCAL __thread
#endif // _LIBMSL_PROC_TLS_H

19
libmsl/proc/proc.c Normal file
View File

@@ -0,0 +1,19 @@
#include <alloc/liballoc.h>
#include <m/status.h>
#include <m/system.h>
#include <proc/proc.h>
#include <stddef.h>
#include <stdint.h>
int process_spawn (process_func_t func, void* argument_ptr) {
void* stack = malloc (PROC_STACK_SIZE);
if (stack == NULL)
return -ST_OOM_ERROR;
uintptr_t top = (uintptr_t)stack + PROC_STACK_SIZE;
return clone (top, func, argument_ptr);
}
int process_quit (void) { return quit (); }
void* process_argument (void) { return argument_ptr (); }

14
libmsl/proc/proc.h Normal file
View File

@@ -0,0 +1,14 @@
#ifndef _LIBMSL_PROC_PROC_H
#define _LIBMSL_PROC_PROC_H
#include <m/system.h>
#define PROC_STACK_SIZE 256 * PAGE_SIZE
typedef void (*process_func_t) (void);
int process_spawn (process_func_t func, void* argument_ptr);
int process_quit (void);
void* process_argument (void);
#endif // _LIBMSL_PROC_PROC_H

3
libmsl/proc/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += proc/proc.c
o += proc/proc.o

View File

@@ -3,3 +3,4 @@ include init/src.mk
include m/src.mk include m/src.mk
include string/src.mk include string/src.mk
include alloc/src.mk include alloc/src.mk
include proc/src.mk

View File

@@ -7,4 +7,4 @@ clean_libmsl:
format_libmsl: format_libmsl:
make -C libmsl platform=$(platform) format make -C libmsl platform=$(platform) format
.PHONY: all_libmsl clean_libmsl .PHONY: all_libmsl clean_libmsl format_libmsl