Compare commits
13 Commits
b388b30b24
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 38e26a9c12 | |||
| 124aa12f5b | |||
| d2f5c032d9 | |||
| 73e42588fb | |||
| e78bfb9984 | |||
| d2a88b3641 | |||
| fdda2e2df8 | |||
| 388418a718 | |||
| 1c64d608bd | |||
| 3d23187acf | |||
| a3b62ebd3d | |||
| 8bda300f6a | |||
| cf51600c6a |
2
Makefile
2
Makefile
@@ -4,4 +4,4 @@ include make/apps.mk
|
||||
include make/kernel.mk
|
||||
include make/dist.mk
|
||||
include make/docs.mk
|
||||
include make/libc.mk
|
||||
include make/libmsl.mk
|
||||
|
||||
@@ -6,6 +6,8 @@ PHDRS {
|
||||
text PT_LOAD;
|
||||
rodata PT_LOAD;
|
||||
data PT_LOAD;
|
||||
bss PT_LOAD;
|
||||
tls PT_TLS;
|
||||
}
|
||||
|
||||
SECTIONS {
|
||||
@@ -13,31 +15,52 @@ SECTIONS {
|
||||
|
||||
.text : {
|
||||
*(.text .text.*)
|
||||
*(.ltext .ltext.*)
|
||||
} :text
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
.rodata : {
|
||||
*(.rodata .rodata.*)
|
||||
} :rodata
|
||||
|
||||
.note.gnu.build-id : {
|
||||
*(.note.gnu.build-id)
|
||||
} :rodata
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
.data : {
|
||||
*(.data .data.*)
|
||||
*(.ldata .ldata.*)
|
||||
} :data
|
||||
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
__bss_start = .;
|
||||
|
||||
.bss : {
|
||||
*(.bss .bss.*)
|
||||
} :data
|
||||
*(.lbss .lbss.*)
|
||||
} :bss
|
||||
|
||||
__bss_end = .;
|
||||
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
__tdata_start = .;
|
||||
|
||||
.tdata : {
|
||||
*(.tdata .tdata.*)
|
||||
} :tls
|
||||
|
||||
__tdata_end = .;
|
||||
|
||||
__tbss_start = .;
|
||||
|
||||
.tbss : {
|
||||
*(.tbss .tbss.*)
|
||||
} :tls
|
||||
|
||||
__tbss_end = .;
|
||||
|
||||
__tls_size = __tbss_end - __tdata_start;
|
||||
|
||||
/DISCARD/ : {
|
||||
*(.eh_frame*)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
cpu: model=p4_prescott_celeron_336
|
||||
cpu: model=p4_prescott_celeron_336, ips=200000000
|
||||
|
||||
memory: guest=4096 host=2048
|
||||
|
||||
@@ -9,6 +9,7 @@ ata0: enabled=1
|
||||
ata0-master: type=cdrom, path=mop3.iso, status=inserted
|
||||
com1: enabled=1, mode=file, dev=bochs-com1.txt
|
||||
pci: enabled=1, chipset=i440fx
|
||||
clock: sync=realtime, time0=local
|
||||
|
||||
boot: cdrom
|
||||
|
||||
|
||||
@@ -7,9 +7,10 @@
|
||||
#define SYS_UNMAP 4
|
||||
#define SYS_CLONE 5
|
||||
#define SYS_SCHED 6
|
||||
#define SYS_CREATE_MUTEX 7
|
||||
#define SYS_UNLINK_MUTEX 8
|
||||
#define SYS_LOCK_MUTEX 9
|
||||
#define SYS_UNLOCK_MUTEX 10
|
||||
#define SYS_MUTEX_CREATE 7
|
||||
#define SYS_MUTEX_DELETE 8
|
||||
#define SYS_MUTEX_LOCK 9
|
||||
#define SYS_MUTEX_UNLOCK 10
|
||||
#define SYS_ARGUMENT_PTR 11
|
||||
|
||||
#endif // _M_SYSCALL_DEFS_H
|
||||
|
||||
140
init/init.c
140
init/init.c
@@ -1,136 +1,46 @@
|
||||
#include <alloc/liballoc.h>
|
||||
#include <limits.h>
|
||||
#include <m/status.h>
|
||||
#include <m/system.h>
|
||||
#include <proc/local.h>
|
||||
#include <proc/proc.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <string/string.h>
|
||||
|
||||
#define EXAMPLE 2
|
||||
|
||||
#if EXAMPLE == 1
|
||||
|
||||
#define MUTEX 2000
|
||||
|
||||
void app_thread1 (void);
|
||||
LOCAL volatile char letter = 'c';
|
||||
|
||||
int spawn (void (*fn) (void)) {
|
||||
size_t stack_size = 256 * PAGE_SIZE;
|
||||
void* stack = malloc (stack_size);
|
||||
if (stack == NULL)
|
||||
return -ST_OOM_ERROR;
|
||||
void app_proc (void) {
|
||||
char arg_letter = (char)(uintptr_t)argument_ptr ();
|
||||
|
||||
uintptr_t stack_top = (uintptr_t)stack + stack_size;
|
||||
return clone (stack_top, stack_size, fn);
|
||||
letter = arg_letter;
|
||||
|
||||
for (;;) {
|
||||
mutex_lock (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test (letter);
|
||||
|
||||
mutex_unlock (MUTEX);
|
||||
}
|
||||
|
||||
process_quit ();
|
||||
}
|
||||
|
||||
void app_main (void) {
|
||||
create_mutex (MUTEX);
|
||||
mutex_create (MUTEX);
|
||||
|
||||
spawn (&app_thread1);
|
||||
letter = 'a';
|
||||
|
||||
lock_mutex (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test ('a');
|
||||
|
||||
unlock_mutex (MUTEX);
|
||||
}
|
||||
|
||||
void app_thread1 (void) {
|
||||
lock_mutex (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test ('b');
|
||||
|
||||
unlock_mutex (MUTEX);
|
||||
|
||||
quit ();
|
||||
}
|
||||
#elif EXAMPLE == 2
|
||||
|
||||
#define MUTEX 2000
|
||||
|
||||
void app_thread1 (void);
|
||||
|
||||
int spawn (void (*fn) (void)) {
|
||||
size_t stack_size = 256 * PAGE_SIZE;
|
||||
void* stack = malloc (stack_size);
|
||||
if (stack == NULL)
|
||||
return -ST_OOM_ERROR;
|
||||
|
||||
uintptr_t stack_top = (uintptr_t)stack + stack_size;
|
||||
return clone (stack_top, stack_size, fn);
|
||||
}
|
||||
|
||||
void app_main (void) {
|
||||
create_mutex (MUTEX);
|
||||
|
||||
spawn (&app_thread1);
|
||||
process_spawn (&app_proc, (void*)'a');
|
||||
process_spawn (&app_proc, (void*)'b');
|
||||
process_spawn (&app_proc, (void*)'c');
|
||||
|
||||
for (;;) {
|
||||
lock_mutex (MUTEX);
|
||||
mutex_lock (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test ('a');
|
||||
test (letter);
|
||||
|
||||
unlock_mutex (MUTEX);
|
||||
mutex_unlock (MUTEX);
|
||||
}
|
||||
}
|
||||
|
||||
void app_thread1 (void) {
|
||||
for (;;) {
|
||||
lock_mutex (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test ('b');
|
||||
|
||||
unlock_mutex (MUTEX);
|
||||
}
|
||||
|
||||
quit ();
|
||||
}
|
||||
#elif EXAMPLE == 3
|
||||
|
||||
#define MUTEX 2000
|
||||
|
||||
void app_thread1 (void);
|
||||
|
||||
int spawn (void (*fn) (void)) {
|
||||
size_t stack_size = 256 * PAGE_SIZE;
|
||||
void* stack = malloc (stack_size);
|
||||
if (stack == NULL)
|
||||
return -ST_OOM_ERROR;
|
||||
|
||||
uintptr_t stack_top = (uintptr_t)stack + stack_size;
|
||||
return clone (stack_top, stack_size, fn);
|
||||
}
|
||||
|
||||
void app_main (void) {
|
||||
create_mutex (MUTEX);
|
||||
|
||||
spawn (&app_thread1);
|
||||
|
||||
for (;;) {
|
||||
lock_mutex (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test ('a');
|
||||
|
||||
quit ();
|
||||
}
|
||||
}
|
||||
|
||||
void app_thread1 (void) {
|
||||
for (;;) {
|
||||
lock_mutex (MUTEX);
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
test ('b');
|
||||
|
||||
unlock_mutex (MUTEX);
|
||||
}
|
||||
|
||||
quit ();
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
#include <amd64/msr.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/requests.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/spin.h>
|
||||
@@ -38,7 +38,7 @@
|
||||
|
||||
struct ioapic {
|
||||
struct acpi_madt_ioapic table_data;
|
||||
rw_spin_lock_t lock;
|
||||
spin_lock_t lock;
|
||||
uintptr_t mmio_base;
|
||||
};
|
||||
|
||||
@@ -59,10 +59,10 @@ static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
|
||||
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
|
||||
spin_lock_ctx_t ctxioar;
|
||||
|
||||
rw_spin_read_lock (&ioapic->lock, &ctxioar);
|
||||
spin_lock (&ioapic->lock, &ctxioar);
|
||||
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
||||
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
|
||||
rw_spin_read_unlock (&ioapic->lock, &ctxioar);
|
||||
spin_unlock (&ioapic->lock, &ctxioar);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -70,10 +70,10 @@ static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
|
||||
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
|
||||
spin_lock_ctx_t ctxioaw;
|
||||
|
||||
rw_spin_write_lock (&ioapic->lock, &ctxioaw);
|
||||
spin_lock (&ioapic->lock, &ctxioaw);
|
||||
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
||||
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
|
||||
rw_spin_write_unlock (&ioapic->lock, &ctxioaw);
|
||||
spin_unlock (&ioapic->lock, &ctxioaw);
|
||||
}
|
||||
|
||||
/* Find an IOAPIC corresposting to provided IRQ */
|
||||
@@ -160,9 +160,9 @@ void amd64_ioapic_init (void) {
|
||||
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
|
||||
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
|
||||
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
|
||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
|
||||
MM_PG_PRESENT | MM_PG_RW);
|
||||
ioapics[ioapic_entries++] = (struct ioapic){
|
||||
.lock = RW_SPIN_LOCK_INIT,
|
||||
.lock = SPIN_LOCK_INIT,
|
||||
.table_data = *ioapic_table_data,
|
||||
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
|
||||
};
|
||||
@@ -246,8 +246,7 @@ void amd64_lapic_init (uint32_t us) {
|
||||
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
|
||||
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
|
||||
|
||||
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base,
|
||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK | MM_PD_RELOAD);
|
||||
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
|
||||
|
||||
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
|
||||
|
||||
|
||||
@@ -47,8 +47,6 @@ void bootmain (void) {
|
||||
amd64_ioapic_init ();
|
||||
amd64_hpet_init ();
|
||||
|
||||
mm_init2 ();
|
||||
|
||||
smp_init ();
|
||||
|
||||
proc_init ();
|
||||
|
||||
@@ -129,8 +129,7 @@ void amd64_hpet_init (void) {
|
||||
hpet_paddr = (uintptr_t)hpet->address.address;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr,
|
||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
|
||||
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
|
||||
|
||||
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
|
||||
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
|
||||
|
||||
@@ -22,10 +22,12 @@ struct pg_index {
|
||||
} PACKED;
|
||||
|
||||
/* Kernel page directory */
|
||||
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
|
||||
static spin_lock_ctx_t ctxkpd;
|
||||
/* Lock needed to sync between map/unmap operations and TLB shootdown */
|
||||
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
|
||||
static struct pd kernel_pd;
|
||||
static spin_lock_t kernel_pd_lock;
|
||||
|
||||
void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||
|
||||
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||
|
||||
/* Get current value of CR3 register */
|
||||
static uintptr_t amd64_current_cr3 (void) {
|
||||
@@ -112,15 +114,7 @@ static void amd64_reload_cr3 (void) {
|
||||
|
||||
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
|
||||
|
||||
@@ -129,69 +123,50 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
|
||||
do_reload = true;
|
||||
|
||||
done:
|
||||
if (do_reload && (flags & MM_PD_RELOAD))
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
}
|
||||
|
||||
/* Map a page into kernel page directory */
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
mm_map_page (&kernel_pd, paddr, vaddr, flags);
|
||||
amd64_reload_cr3 ();
|
||||
}
|
||||
|
||||
/* Unmap a virtual address. TLB needs to be flushed afterwards */
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
return;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
if ((*pte) & AMD64_PG_PRESENT) {
|
||||
if ((*pte) & AMD64_PG_PRESENT)
|
||||
*pte = 0;
|
||||
do_reload = true;
|
||||
}
|
||||
|
||||
if (amd64_mm_is_table_empty (pml1)) {
|
||||
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
|
||||
@@ -210,28 +185,14 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
if (do_reload && (flags & MM_PD_RELOAD))
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
}
|
||||
|
||||
/* Unmap a page from kernel page directory */
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
|
||||
mm_unmap_page (&kernel_pd, vaddr, flags);
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr) {
|
||||
mm_unmap_page (&kernel_pd, vaddr);
|
||||
amd64_reload_cr3 ();
|
||||
}
|
||||
|
||||
/* Lock kernel page directory */
|
||||
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock, &ctxkpd); }
|
||||
|
||||
/* Unlock kernel page directory */
|
||||
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock, &ctxkpd); }
|
||||
|
||||
/* Allocate a userspace-ready page directory */
|
||||
uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
@@ -250,26 +211,10 @@ uintptr_t mm_alloc_user_pd_phys (void) {
|
||||
return cr3;
|
||||
}
|
||||
|
||||
/* Reload after map/unmap operation was performed. This function does the TLB shootdown. */
|
||||
void mm_reload (void) {
|
||||
struct limine_mp_response* mp = limine_mp_request.response;
|
||||
|
||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
||||
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN);
|
||||
}
|
||||
}
|
||||
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool ret = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
@@ -289,45 +234,26 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
ret = (pte & AMD64_PG_PRESENT) != 0;
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) {
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
|
||||
bool ok = true;
|
||||
spin_lock_ctx_t ctxpd;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
ok = mm_validate (pd, vaddr + i, 0);
|
||||
ok = mm_validate (pd, vaddr + i);
|
||||
if (!ok)
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
|
||||
for (size_t i4 = 0; i4 < 512; i4++) {
|
||||
@@ -358,25 +284,13 @@ uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
||||
}
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
spin_lock_ctx_t ctxmm, ctxpd;
|
||||
|
||||
spin_lock (&mm_lock, &ctxmm);
|
||||
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t ret = 0;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock, &ctxpd);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
@@ -400,25 +314,8 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
|
||||
|
||||
done:
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock, &ctxpd);
|
||||
|
||||
spin_unlock (&mm_lock, &ctxmm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* TLB shootdown IRQ handler */
|
||||
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
|
||||
(void)arg, (void)regs;
|
||||
|
||||
amd64_reload_cr3 ();
|
||||
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
|
||||
}
|
||||
|
||||
/* Continue initializing memory management subsystem for AMD64 after the essential parts were
|
||||
* initialized */
|
||||
void mm_init2 (void) { irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN); }
|
||||
|
||||
/* Initialize essentials for the AMD64 memory management subsystem */
|
||||
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }
|
||||
|
||||
@@ -7,11 +7,9 @@
|
||||
#define PAGE_SIZE 4096
|
||||
|
||||
struct pd {
|
||||
spin_lock_t lock;
|
||||
uintptr_t cr3_paddr;
|
||||
};
|
||||
|
||||
void amd64_load_kernel_cr3 (void);
|
||||
void mm_init2 (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_MM_H
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include <amd64/gdt.h>
|
||||
#include <amd64/proc.h>
|
||||
#include <aux/elf.h>
|
||||
#include <libk/align.h>
|
||||
#include <libk/list.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
@@ -8,15 +9,15 @@
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/mutex.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/proc.h>
|
||||
|
||||
/* 0 is kpproc */
|
||||
static atomic_int pids = 1;
|
||||
static atomic_int pids = 0;
|
||||
|
||||
struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
@@ -57,8 +58,8 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
return proc;
|
||||
}
|
||||
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_size,
|
||||
uintptr_t entry) {
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
|
||||
uintptr_t argument_ptr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
spin_lock_ctx_t ctxprt;
|
||||
|
||||
@@ -88,39 +89,50 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_
|
||||
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
||||
proc->pdata.regs.rip = (uint64_t)entry;
|
||||
|
||||
proc->uvaddr_argument = argument_ptr;
|
||||
|
||||
proc_init_tls (proc);
|
||||
|
||||
return proc;
|
||||
}
|
||||
|
||||
void proc_cleanup (struct proc* proc) {
|
||||
spin_lock_ctx_t ctxsq, ctxpr;
|
||||
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
|
||||
/* clean suspension queue entries */
|
||||
struct list_node_link *sq_link, *sq_link_tmp;
|
||||
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
|
||||
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
|
||||
struct proc_suspension_q* sq = sq_entry->sq;
|
||||
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
|
||||
/* remove from sq's list */
|
||||
list_remove (sq->proc_list, &sq_entry->sq_link);
|
||||
|
||||
/* remove from proc's list */
|
||||
list_remove (proc->sq_entries, &sq_entry->proc_link);
|
||||
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
|
||||
free (sq_entry);
|
||||
}
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
|
||||
procgroup_detach (proc->procgroup, proc);
|
||||
proc_sqs_cleanup (proc);
|
||||
proc_mutexes_cleanup (proc);
|
||||
|
||||
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
|
||||
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
|
||||
|
||||
procgroup_detach (proc->procgroup, proc);
|
||||
|
||||
/* clean the process */
|
||||
free (proc);
|
||||
}
|
||||
|
||||
void proc_init_tls (struct proc* proc) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
if (proc->procgroup->tls.tls_tmpl == NULL)
|
||||
return;
|
||||
|
||||
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
|
||||
size_t pages = proc->procgroup->tls.tls_tmpl_pages;
|
||||
|
||||
uintptr_t tls_paddr;
|
||||
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
|
||||
|
||||
uintptr_t tls_vaddr = procgroup_map (proc->procgroup, 0, pages, flags, &tls_paddr);
|
||||
|
||||
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
|
||||
|
||||
memset ((void*)k_tls_addr, 0, pages * PAGE_SIZE);
|
||||
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
|
||||
|
||||
uintptr_t ktcb = k_tls_addr + tls_size;
|
||||
uintptr_t utcb = tls_vaddr + tls_size;
|
||||
|
||||
*(uintptr_t*)ktcb = utcb;
|
||||
|
||||
proc->pdata.fs_base = utcb;
|
||||
proc->pdata.tls_vaddr = tls_vaddr;
|
||||
}
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
struct proc_platformdata {
|
||||
struct saved_regs regs;
|
||||
uintptr_t kernel_stack;
|
||||
uint64_t gs_base;
|
||||
uint64_t fs_base;
|
||||
uintptr_t tls_vaddr;
|
||||
};
|
||||
|
||||
#endif // _KERNEL_AMD64_PROC_H
|
||||
|
||||
13
kernel/amd64/procgroup.h
Normal file
13
kernel/amd64/procgroup.h
Normal file
@@ -0,0 +1,13 @@
|
||||
#ifndef _KERNEL_AMD64_PROCGRPUP_H
|
||||
#define _KERNEL_AMD64_PROCGRPUP_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
struct procgroup_tls {
|
||||
uint8_t* tls_tmpl;
|
||||
size_t tls_tmpl_size;
|
||||
size_t tls_tmpl_total_size;
|
||||
size_t tls_tmpl_pages;
|
||||
};
|
||||
|
||||
#endif // _KERNEL_AMD64_PROCGRPUP_H
|
||||
@@ -14,6 +14,7 @@ void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu
|
||||
|
||||
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
|
||||
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
|
||||
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (cpu_lock, ctxcpu);
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#include <libk/list.h>
|
||||
#include <libk/std.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
|
||||
#if defined(__x86_64__)
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
struct irq* irq_table[0x100];
|
||||
|
||||
static rw_spin_lock_t irqs_lock;
|
||||
static spin_lock_t irqs_lock = SPIN_LOCK_INIT;
|
||||
|
||||
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
|
||||
spin_lock_ctx_t ctxiqa;
|
||||
@@ -26,9 +26,9 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
|
||||
irq->arg = arg;
|
||||
irq->irq_num = irq_num;
|
||||
|
||||
rw_spin_write_lock (&irqs_lock, &ctxiqa);
|
||||
spin_lock (&irqs_lock, &ctxiqa);
|
||||
irq_table[irq_num] = irq;
|
||||
rw_spin_write_unlock (&irqs_lock, &ctxiqa);
|
||||
spin_unlock (&irqs_lock, &ctxiqa);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -36,11 +36,11 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
|
||||
struct irq* irq_find (uint32_t irq_num) {
|
||||
spin_lock_ctx_t ctxiqa;
|
||||
|
||||
rw_spin_read_lock (&irqs_lock, &ctxiqa);
|
||||
spin_lock (&irqs_lock, &ctxiqa);
|
||||
|
||||
struct irq* irq = irq_table[irq_num];
|
||||
|
||||
rw_spin_read_unlock (&irqs_lock, &ctxiqa);
|
||||
spin_unlock (&irqs_lock, &ctxiqa);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
@@ -5,93 +5,54 @@
|
||||
#include <mm/liballoc.h>
|
||||
#include <proc/mutex.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/suspension_q.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
static void proc_mutex_suspend (struct proc* proc, struct proc_suspension_q* sq,
|
||||
spin_lock_t* resource_lock, spin_lock_ctx_t* ctxrl) {
|
||||
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
|
||||
struct cpu* cpu = proc->cpu;
|
||||
void proc_mutexes_cleanup (struct proc* proc) {
|
||||
spin_lock_ctx_t ctxpg, ctxrs;
|
||||
|
||||
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
|
||||
if (!sq_entry) {
|
||||
spin_unlock (resource_lock, ctxrl);
|
||||
return;
|
||||
spin_lock (&proc->procgroup->lock, &ctxpg);
|
||||
|
||||
struct rb_node_link* rnode;
|
||||
rbtree_first (&proc->procgroup->resource_tree, rnode);
|
||||
|
||||
while (rnode) {
|
||||
struct rb_node_link* next;
|
||||
rbtree_next (rnode, next);
|
||||
|
||||
struct proc_resource* resource = rbtree_entry (rnode, struct proc_resource, resource_tree_link);
|
||||
|
||||
rnode = next;
|
||||
|
||||
spin_lock (&resource->lock, &ctxrs);
|
||||
|
||||
if (resource->type != PR_MUTEX) {
|
||||
spin_unlock (&resource->lock, &ctxrs);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (resource->u.mutex.owner == proc && resource->u.mutex.locked) {
|
||||
spin_unlock (&resource->lock, &ctxrs);
|
||||
|
||||
proc_mutex_unlock (proc, &resource->u.mutex);
|
||||
}
|
||||
}
|
||||
|
||||
sq_entry->proc = proc;
|
||||
sq_entry->sq = sq;
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
|
||||
spin_unlock (resource_lock, ctxrl);
|
||||
|
||||
atomic_store (&proc->state, PROC_SUSPENDED);
|
||||
|
||||
/* append to sq's list */
|
||||
list_append (sq->proc_list, &sq_entry->sq_link);
|
||||
|
||||
/* append to proc's list */
|
||||
list_append (proc->sq_entries, &sq_entry->proc_link);
|
||||
|
||||
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
|
||||
|
||||
if (cpu->proc_current == proc)
|
||||
cpu->proc_current = NULL;
|
||||
|
||||
proc->cpu = NULL;
|
||||
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
cpu_request_sched (cpu);
|
||||
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||
}
|
||||
|
||||
static void proc_mutex_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
|
||||
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
|
||||
struct cpu* cpu = cpu_find_lightest ();
|
||||
struct proc_suspension_q* sq = sq_entry->sq;
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
|
||||
/* remove from sq's list */
|
||||
list_remove (sq->proc_list, &sq_entry->sq_link);
|
||||
|
||||
/* remove from proc's list */
|
||||
list_remove (proc->sq_entries, &sq_entry->proc_link);
|
||||
|
||||
proc->cpu = cpu;
|
||||
|
||||
if (proc->sq_entries == NULL)
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
free (sq_entry);
|
||||
|
||||
cpu_request_sched (cpu);
|
||||
}
|
||||
|
||||
void proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
bool proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
struct proc_mutex* mutex = &resource->u.mutex;
|
||||
spin_lock_ctx_t ctxmt, ctxsq;
|
||||
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
|
||||
bool reschedule = PROC_NO_RESCHEDULE;
|
||||
|
||||
while (mutex->suspension_q.proc_list != NULL) {
|
||||
struct list_node_link* node = mutex->suspension_q.proc_list;
|
||||
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
|
||||
@@ -101,7 +62,7 @@ void proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
proc_mutex_resume (suspended_proc, sq_entry);
|
||||
reschedule = reschedule || proc_sq_resume (suspended_proc, sq_entry);
|
||||
|
||||
/* reacquire */
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
@@ -113,23 +74,23 @@ void proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
return reschedule;
|
||||
}
|
||||
|
||||
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_lock_ctx_t ctxmt;
|
||||
|
||||
for (;;) {
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
if (!mutex->locked || mutex->owner == proc) {
|
||||
mutex->locked = true;
|
||||
mutex->owner = proc;
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return;
|
||||
}
|
||||
|
||||
proc_mutex_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
|
||||
if (!mutex->locked || mutex->owner == proc) {
|
||||
mutex->locked = true;
|
||||
mutex->owner = proc;
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
|
||||
}
|
||||
|
||||
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
@@ -139,7 +100,7 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
|
||||
if (mutex->owner != proc) {
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return false;
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
@@ -156,9 +117,7 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
proc_mutex_resume (resumed_proc, sq_entry);
|
||||
|
||||
return true;
|
||||
return proc_sq_resume (resumed_proc, sq_entry);
|
||||
}
|
||||
|
||||
mutex->locked = false;
|
||||
@@ -167,5 +126,5 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
return true;
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
@@ -15,8 +15,9 @@ struct proc_mutex {
|
||||
struct proc* owner;
|
||||
};
|
||||
|
||||
void proc_cleanup_resource_mutex (struct proc_resource* resource);
|
||||
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
|
||||
bool proc_cleanup_resource_mutex (struct proc_resource* resource);
|
||||
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
|
||||
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex);
|
||||
void proc_mutexes_cleanup (struct proc* proc);
|
||||
|
||||
#endif // _KERNEL_PROC_MUTEX_H
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <rd/rd.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
@@ -29,7 +28,7 @@
|
||||
#define SCHED_REAP_FREQ 10
|
||||
|
||||
static struct rb_node_link* proc_tree = NULL;
|
||||
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
|
||||
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
|
||||
|
||||
static atomic_int sched_cycles = 0;
|
||||
|
||||
@@ -74,6 +73,27 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
|
||||
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
|
||||
} break;
|
||||
case PT_TLS: {
|
||||
#if defined(__x86_64__)
|
||||
if (phdr->p_memsz > 0) {
|
||||
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
|
||||
size_t tls_size = align_up (phdr->p_memsz, tls_align);
|
||||
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
|
||||
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
|
||||
proc->procgroup->tls.tls_tmpl_pages = blks;
|
||||
proc->procgroup->tls.tls_tmpl_size = tls_size;
|
||||
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
|
||||
|
||||
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
|
||||
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
|
||||
|
||||
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
|
||||
phdr->p_filesz);
|
||||
|
||||
proc_init_tls (proc);
|
||||
}
|
||||
#endif
|
||||
} break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +104,6 @@ struct proc* proc_spawn_rd (char* name) {
|
||||
struct rd_file* rd_file = rd_get_file (name);
|
||||
|
||||
bool ok = proc_check_elf (rd_file->content);
|
||||
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
|
||||
|
||||
if (!ok)
|
||||
return NULL;
|
||||
@@ -96,9 +115,9 @@ struct proc* proc_find_pid (int pid) {
|
||||
spin_lock_ctx_t ctxprtr;
|
||||
struct proc* proc = NULL;
|
||||
|
||||
rw_spin_read_lock (&proc_tree_lock, &ctxprtr);
|
||||
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
|
||||
rw_spin_read_unlock (&proc_tree_lock, &ctxprtr);
|
||||
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
return proc;
|
||||
}
|
||||
@@ -107,21 +126,20 @@ void proc_register (struct proc* proc, struct cpu* cpu1) {
|
||||
spin_lock_ctx_t ctxcpu, ctxprtr;
|
||||
|
||||
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
|
||||
DEBUG ("Assigning CPU %d to PID %d\n", proc->cpu->id, proc->pid);
|
||||
|
||||
struct cpu* cpu = proc->cpu;
|
||||
|
||||
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
||||
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
||||
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||
|
||||
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
||||
|
||||
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
if (cpu->proc_current == NULL)
|
||||
cpu->proc_current = proc;
|
||||
|
||||
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
}
|
||||
|
||||
@@ -160,7 +178,7 @@ static void proc_reap (void) {
|
||||
spin_lock_ctx_t ctxprtr;
|
||||
spin_lock_ctx_t ctxpr;
|
||||
|
||||
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
||||
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
struct rb_node_link* node;
|
||||
rbtree_first (&proc_tree, node);
|
||||
@@ -180,7 +198,7 @@ static void proc_reap (void) {
|
||||
node = next;
|
||||
}
|
||||
|
||||
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
|
||||
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
struct list_node_link *reap_link, *reap_link_tmp;
|
||||
list_foreach (reap_list, reap_link, reap_link_tmp) {
|
||||
@@ -260,6 +278,6 @@ void proc_init (void) {
|
||||
proc_register (init, NULL);
|
||||
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
spin_lock (&init->cpu->lock, &ctxcpu);
|
||||
do_sched (init, &init->cpu->lock, &ctxcpu);
|
||||
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
|
||||
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <proc/suspension_q.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/mm.h>
|
||||
|
||||
@@ -18,6 +17,9 @@
|
||||
#include <amd64/proc.h> /* USTACK_SIZE */
|
||||
#endif
|
||||
|
||||
#define PROC_NEED_RESCHEDULE true
|
||||
#define PROC_NO_RESCHEDULE false
|
||||
|
||||
/* process states */
|
||||
#define PROC_READY 0
|
||||
#define PROC_DEAD 1
|
||||
@@ -28,13 +30,6 @@
|
||||
|
||||
struct cpu;
|
||||
|
||||
struct proc_sq_entry {
|
||||
struct list_node_link sq_link;
|
||||
struct list_node_link proc_link;
|
||||
struct proc* proc;
|
||||
struct proc_suspension_q* sq;
|
||||
};
|
||||
|
||||
struct proc {
|
||||
int pid;
|
||||
struct rb_node_link proc_tree_link;
|
||||
@@ -48,6 +43,7 @@ struct proc {
|
||||
spin_lock_t lock;
|
||||
struct cpu* cpu;
|
||||
atomic_int state;
|
||||
uintptr_t uvaddr_argument;
|
||||
};
|
||||
|
||||
void proc_sched (void);
|
||||
|
||||
@@ -4,30 +4,34 @@
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
|
||||
static struct rb_node_link* procgroup_tree = NULL;
|
||||
static rw_spin_lock_t procgroup_tree_lock = RW_SPIN_LOCK_INIT;
|
||||
static spin_lock_t procgroup_tree_lock = SPIN_LOCK_INIT;
|
||||
static atomic_int pgids = 0;
|
||||
|
||||
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
|
||||
uintptr_t* out_paddr) {
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
spin_lock_ctx_t ctxpg;
|
||||
|
||||
vaddr = (vaddr == 0) ? PROC_MAP_BASE : vaddr;
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
|
||||
vaddr = (vaddr == 0) ? procgroup->map_base : vaddr;
|
||||
|
||||
struct proc_mapping* mapping = malloc (sizeof (*mapping));
|
||||
|
||||
if (mapping == NULL)
|
||||
if (mapping == NULL) {
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
uintptr_t paddr = pmm_alloc (pages);
|
||||
|
||||
if (paddr == PMM_ALLOC_ERR) {
|
||||
free (mapping);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -38,9 +42,7 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
|
||||
mapping->vaddr = vaddr;
|
||||
mapping->size = pages * PAGE_SIZE;
|
||||
|
||||
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
|
||||
|
||||
spin_lock (&procgroup->pd.lock, &ctxprpd);
|
||||
procgroup->map_base += pages * PAGE_SIZE;
|
||||
|
||||
list_append (procgroup->mappings, &mapping->proc_mappings_link);
|
||||
|
||||
@@ -49,7 +51,7 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
|
||||
mm_map_page (&procgroup->pd, ppage, vpage, flags);
|
||||
}
|
||||
|
||||
spin_unlock (&procgroup->pd.lock, &ctxprpd);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
return vaddr;
|
||||
}
|
||||
@@ -57,15 +59,17 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa
|
||||
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
|
||||
size_t unmap_size = pages * PAGE_SIZE;
|
||||
uintptr_t end_vaddr = start_vaddr + unmap_size;
|
||||
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
|
||||
bool used_tail_mapping = false;
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
spin_lock_ctx_t ctxpg;
|
||||
|
||||
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
|
||||
if (tail_mapping == NULL)
|
||||
return false;
|
||||
|
||||
spin_lock (&procgroup->pd.lock, &ctxprpd);
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
|
||||
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
|
||||
struct proc_mapping* mapping =
|
||||
@@ -115,10 +119,10 @@ bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t
|
||||
free (tail_mapping);
|
||||
|
||||
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
|
||||
mm_unmap_page (&procgroup->pd, vpage, 0);
|
||||
mm_unmap_page (&procgroup->pd, vpage);
|
||||
}
|
||||
|
||||
spin_unlock (&procgroup->pd.lock, &ctxprpd);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -135,13 +139,13 @@ struct procgroup* procgroup_create (void) {
|
||||
procgroup->memb_proc_tree = NULL;
|
||||
procgroup->lock = SPIN_LOCK_INIT;
|
||||
procgroup->pgid = atomic_fetch_add (&pgids, 1);
|
||||
procgroup->pd.lock = SPIN_LOCK_INIT;
|
||||
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys ();
|
||||
procgroup->map_base = PROC_MAP_BASE;
|
||||
|
||||
rw_spin_write_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,
|
||||
procgroup_tree_link, pgid);
|
||||
rw_spin_write_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||
|
||||
return procgroup;
|
||||
}
|
||||
@@ -155,7 +159,6 @@ void procgroup_attach (struct procgroup* procgroup, struct proc* proc) {
|
||||
rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link,
|
||||
procgroup_memb_tree_link, pid);
|
||||
atomic_fetch_add (&procgroup->refs, 1);
|
||||
DEBUG ("procgrpup attach PID %d to PGID %d\n", proc->pid, procgroup->pgid);
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
@@ -169,21 +172,20 @@ void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
|
||||
|
||||
rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link);
|
||||
int refs = atomic_fetch_sub (&procgroup->refs, 1);
|
||||
DEBUG ("procgrpup detach PID %d to PGID %d\n", proc->pid, procgroup->pgid);
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
if (refs == 1) {
|
||||
rw_spin_write_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
|
||||
rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link);
|
||||
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
rw_spin_write_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||
|
||||
/* unlink resources */
|
||||
/* delete resources */
|
||||
struct rb_node_link* rnode;
|
||||
rbtree_first (&procgroup->resource_tree, rnode);
|
||||
while (rnode) {
|
||||
@@ -195,7 +197,7 @@ void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
|
||||
|
||||
rnode = next;
|
||||
|
||||
proc_resource_unlink (resource);
|
||||
proc_delete_resource (resource);
|
||||
}
|
||||
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
@@ -209,10 +211,8 @@ void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
|
||||
|
||||
pmm_free (procgroup->pd.cr3_paddr, 1);
|
||||
|
||||
free (procgroup->tls.tls_tmpl);
|
||||
|
||||
free (procgroup);
|
||||
}
|
||||
}
|
||||
|
||||
int procgroup_get_sys_rid (struct procgroup* procgroup) {
|
||||
return atomic_fetch_add (&procgroup->sys_rids, 1);
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <proc/resource.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/procgroup.h>
|
||||
|
||||
struct proc;
|
||||
|
||||
@@ -27,13 +28,14 @@ struct procgroup {
|
||||
struct rb_node_link* resource_tree;
|
||||
atomic_int sys_rids;
|
||||
struct pd pd;
|
||||
struct list_node_link* mappings; /* protected by pd.lock */
|
||||
struct list_node_link* mappings;
|
||||
uintptr_t map_base;
|
||||
struct procgroup_tls tls;
|
||||
};
|
||||
|
||||
struct procgroup* procgroup_create (void);
|
||||
void procgroup_attach (struct procgroup* procgroup, struct proc* proc);
|
||||
void procgroup_detach (struct procgroup* procgroup, struct proc* proc);
|
||||
int procgroup_get_sys_rid (struct procgroup* procgroup);
|
||||
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
|
||||
uintptr_t* out_paddr);
|
||||
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages);
|
||||
|
||||
@@ -42,7 +42,6 @@ struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, i
|
||||
resource->u.mutex.resource = resource;
|
||||
resource->rid = rid;
|
||||
resource->type = PR_MUTEX;
|
||||
resource->refs = 1;
|
||||
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
rbtree_insert (struct proc_resource, &procgroup->resource_tree, &resource->resource_tree_link,
|
||||
@@ -52,9 +51,9 @@ struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, i
|
||||
return resource;
|
||||
}
|
||||
|
||||
void proc_resource_unlink (struct proc_resource* resource) {
|
||||
if (atomic_fetch_sub (&resource->refs, 1) == 1) {
|
||||
resource->ops.cleanup (resource);
|
||||
free (resource);
|
||||
}
|
||||
bool proc_delete_resource (struct proc_resource* resource) {
|
||||
bool reschedule = resource->ops.cleanup (resource);
|
||||
free (resource);
|
||||
|
||||
return reschedule;
|
||||
}
|
||||
|
||||
@@ -15,19 +15,18 @@ struct procgroup;
|
||||
struct proc_resource {
|
||||
int type;
|
||||
int rid;
|
||||
atomic_int refs;
|
||||
spin_lock_t lock;
|
||||
struct rb_node_link resource_tree_link;
|
||||
union {
|
||||
struct proc_mutex mutex;
|
||||
} u;
|
||||
struct {
|
||||
void (*cleanup) (struct proc_resource* resource);
|
||||
bool (*cleanup) (struct proc_resource* resource);
|
||||
} ops;
|
||||
};
|
||||
|
||||
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid);
|
||||
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid);
|
||||
void proc_resource_unlink (struct proc_resource* resource);
|
||||
bool proc_delete_resource (struct proc_resource* resource);
|
||||
|
||||
#endif // _KERNEL_PROC_RESOURCE_H
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
c += proc/proc.c \
|
||||
proc/resource.c \
|
||||
proc/mutex.c \
|
||||
proc/procgroup.c
|
||||
proc/procgroup.c \
|
||||
proc/suspension_q.c
|
||||
|
||||
o += proc/proc.o \
|
||||
proc/resource.o \
|
||||
proc/mutex.o \
|
||||
proc/procgroup.o
|
||||
proc/procgroup.o \
|
||||
proc/suspension_q.o
|
||||
|
||||
111
kernel/proc/suspension_q.c
Normal file
111
kernel/proc/suspension_q.c
Normal file
@@ -0,0 +1,111 @@
|
||||
#include <libk/list.h>
|
||||
#include <libk/std.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/resource.h>
|
||||
#include <proc/suspension_q.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
spin_lock_ctx_t* ctxrl) {
|
||||
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
|
||||
struct cpu* cpu = proc->cpu;
|
||||
|
||||
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
|
||||
if (!sq_entry) {
|
||||
spin_unlock (resource_lock, ctxrl);
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
sq_entry->proc = proc;
|
||||
sq_entry->sq = sq;
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
|
||||
spin_unlock (resource_lock, ctxrl);
|
||||
|
||||
atomic_store (&proc->state, PROC_SUSPENDED);
|
||||
|
||||
/* append to sq's list */
|
||||
list_append (sq->proc_list, &sq_entry->sq_link);
|
||||
|
||||
/* append to proc's list */
|
||||
list_append (proc->sq_entries, &sq_entry->proc_link);
|
||||
|
||||
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
|
||||
|
||||
if (cpu->proc_current == proc)
|
||||
cpu->proc_current = NULL;
|
||||
|
||||
proc->cpu = NULL;
|
||||
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
|
||||
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
|
||||
struct cpu* cpu = cpu_find_lightest ();
|
||||
struct proc_suspension_q* sq = sq_entry->sq;
|
||||
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
|
||||
/* remove from sq's list */
|
||||
list_remove (sq->proc_list, &sq_entry->sq_link);
|
||||
|
||||
/* remove from proc's list */
|
||||
list_remove (proc->sq_entries, &sq_entry->proc_link);
|
||||
|
||||
proc->cpu = cpu;
|
||||
|
||||
if (proc->sq_entries == NULL)
|
||||
atomic_store (&proc->state, PROC_READY);
|
||||
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
free (sq_entry);
|
||||
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
void proc_sqs_cleanup (struct proc* proc) {
|
||||
spin_lock_ctx_t ctxsq, ctxpr;
|
||||
|
||||
spin_lock (&proc->lock, &ctxpr);
|
||||
|
||||
/* clean suspension queue entries */
|
||||
struct list_node_link *sq_link, *sq_link_tmp;
|
||||
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
|
||||
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
|
||||
struct proc_suspension_q* sq = sq_entry->sq;
|
||||
|
||||
spin_lock (&sq->lock, &ctxsq);
|
||||
|
||||
/* remove from sq's list */
|
||||
list_remove (sq->proc_list, &sq_entry->sq_link);
|
||||
|
||||
/* remove from proc's list */
|
||||
list_remove (proc->sq_entries, &sq_entry->proc_link);
|
||||
|
||||
spin_unlock (&sq->lock, &ctxsq);
|
||||
|
||||
free (sq_entry);
|
||||
}
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
}
|
||||
@@ -4,9 +4,23 @@
|
||||
#include <libk/list.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
struct proc;
|
||||
|
||||
struct proc_suspension_q {
|
||||
struct list_node_link* proc_list;
|
||||
spin_lock_t lock;
|
||||
};
|
||||
|
||||
struct proc_sq_entry {
|
||||
struct list_node_link sq_link;
|
||||
struct list_node_link proc_link;
|
||||
struct proc* proc;
|
||||
struct proc_suspension_q* sq;
|
||||
};
|
||||
|
||||
void proc_sqs_cleanup (struct proc* proc);
|
||||
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
spin_lock_ctx_t* ctxrl);
|
||||
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
|
||||
|
||||
#endif // _KERNEL_PROC_SUSPENTION_Q_H
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
#include <libk/assert.h>
|
||||
#include <libk/std.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/irq.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
#define WRITER_WAIT (1U << 31)
|
||||
#define READER_MASK (~WRITER_WAIT)
|
||||
|
||||
void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
|
||||
uint32_t value;
|
||||
|
||||
irq_save (ctx);
|
||||
|
||||
for (;;) {
|
||||
value = atomic_load_explicit (rw, memory_order_relaxed);
|
||||
|
||||
if ((value & WRITER_WAIT) == 0) {
|
||||
if (atomic_compare_exchange_weak_explicit (rw, &value, value + 1, memory_order_acquire,
|
||||
memory_order_relaxed)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_relax ();
|
||||
}
|
||||
}
|
||||
|
||||
void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
|
||||
uint32_t old = atomic_fetch_sub_explicit (rw, 1, memory_order_release);
|
||||
assert ((old & READER_MASK) > 0);
|
||||
irq_restore (ctx);
|
||||
}
|
||||
|
||||
void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
|
||||
uint32_t value;
|
||||
|
||||
irq_save (ctx);
|
||||
|
||||
/* announce writer */
|
||||
for (;;) {
|
||||
value = atomic_load_explicit (rw, memory_order_relaxed);
|
||||
|
||||
if ((value & WRITER_WAIT) == 0) {
|
||||
if (atomic_compare_exchange_weak_explicit (rw, &value, (value | WRITER_WAIT),
|
||||
memory_order_acquire, memory_order_relaxed))
|
||||
break;
|
||||
} else {
|
||||
spin_lock_relax ();
|
||||
}
|
||||
}
|
||||
|
||||
/* wait for readers */
|
||||
for (;;) {
|
||||
value = atomic_load_explicit (rw, memory_order_acquire);
|
||||
if ((value & READER_MASK) == 0)
|
||||
return;
|
||||
|
||||
spin_lock_relax ();
|
||||
}
|
||||
}
|
||||
|
||||
void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
|
||||
atomic_store_explicit (rw, 0, memory_order_release);
|
||||
irq_restore (ctx);
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
#ifndef _KERNEL_SYNC_RW_SPIN_LOCK_H
|
||||
#define _KERNEL_SYNC_RW_SPIN_LOCK_H
|
||||
|
||||
#include <libk/std.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
#define RW_SPIN_LOCK_INIT 0
|
||||
|
||||
typedef _Atomic (uint32_t) rw_spin_lock_t;
|
||||
|
||||
void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
|
||||
void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
|
||||
void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
|
||||
void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
|
||||
|
||||
#endif // _KERNEL_SYNC_RW_SPIN_LOCK_H
|
||||
@@ -1,5 +1,3 @@
|
||||
c += sync/spin_lock.c \
|
||||
sync/rw_spin_lock.c
|
||||
c += sync/spin_lock.c
|
||||
|
||||
o += sync/spin_lock.o \
|
||||
sync/rw_spin_lock.o
|
||||
o += sync/spin_lock.o
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#define _KERNEL_SYS_MM_H
|
||||
|
||||
#include <libk/std.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include <amd64/mm.h>
|
||||
@@ -10,21 +11,18 @@
|
||||
#define MM_PG_PRESENT (1 << 0)
|
||||
#define MM_PG_RW (1 << 1)
|
||||
#define MM_PG_USER (1 << 2)
|
||||
#define MM_PD_RELOAD (1 << 30)
|
||||
#define MM_PD_LOCK (1 << 31)
|
||||
|
||||
uintptr_t mm_alloc_user_pd_phys (void);
|
||||
void mm_reload (void);
|
||||
void mm_kernel_lock (spin_lock_ctx_t* ctx);
|
||||
void mm_kernel_unlock (spin_lock_ctx_t* ctx);
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags);
|
||||
void mm_lock_kernel (void);
|
||||
void mm_unlock_kernel (void);
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags);
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags);
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr);
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr);
|
||||
bool mm_validate (struct pd* pd, uintptr_t vaddr);
|
||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size);
|
||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr);
|
||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr);
|
||||
struct pd* mm_get_kernel_pd (void);
|
||||
void mm_init (void);
|
||||
|
||||
|
||||
@@ -6,8 +6,9 @@
|
||||
struct proc;
|
||||
|
||||
struct proc* proc_from_elf (uint8_t* elf_contents);
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_size,
|
||||
uintptr_t entry);
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
|
||||
uintptr_t argument_ptr);
|
||||
void proc_cleanup (struct proc* proc);
|
||||
void proc_init_tls (struct proc* proc);
|
||||
|
||||
#endif // _KERNEL_SYS_PROC_H
|
||||
|
||||
8
kernel/sys/procgroup.h
Normal file
8
kernel/sys/procgroup.h
Normal file
@@ -0,0 +1,8 @@
|
||||
#ifndef _KERNEL_SYS_PROCGROUP_H
|
||||
#define _KERNEL_SYS_PROCGROUP_H
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include <amd64/procgroup.h>
|
||||
#endif
|
||||
|
||||
#endif // _KERNEL_SYS_PROCGROUP_H
|
||||
@@ -24,18 +24,18 @@
|
||||
|
||||
static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
spin_lock_ctx_t ctxpg;
|
||||
|
||||
spin_lock (&proc->procgroup->pd.lock, &ctxprpd);
|
||||
spin_lock (&proc->procgroup->lock, &ctxpg);
|
||||
|
||||
if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size, 0)) {
|
||||
spin_unlock (&proc->procgroup->pd.lock, &ctxprpd);
|
||||
if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size)) {
|
||||
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr, 0);
|
||||
uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr);
|
||||
|
||||
spin_unlock (&proc->procgroup->pd.lock, &ctxprpd);
|
||||
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||
|
||||
uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr;
|
||||
|
||||
@@ -78,15 +78,13 @@ DEFINE_SYSCALL (sys_unmap) {
|
||||
return SYSRESULT (procgroup_unmap (proc->procgroup, vaddr, pages));
|
||||
}
|
||||
|
||||
/* int clone (uintptr_t vstack_top, size_t stack_size, void* entry) */
|
||||
/* int clone (uintptr_t vstack_top, void* entry, void* argument_ptr) */
|
||||
DEFINE_SYSCALL (sys_clone) {
|
||||
uintptr_t vstack_top = a1;
|
||||
size_t stack_size = (size_t)a2;
|
||||
uintptr_t entry = a3;
|
||||
uintptr_t entry = a2;
|
||||
uintptr_t argument_ptr = a3;
|
||||
|
||||
struct proc* new = proc_clone (proc, vstack_top, stack_size, entry);
|
||||
|
||||
DEBUG ("new=%p\n", new);
|
||||
struct proc* new = proc_clone (proc, vstack_top, entry, argument_ptr);
|
||||
|
||||
if (new == NULL) {
|
||||
return SYSRESULT (-ST_OOM_ERROR);
|
||||
@@ -99,14 +97,17 @@ DEFINE_SYSCALL (sys_clone) {
|
||||
return SYSRESULT (pid);
|
||||
}
|
||||
|
||||
/* void* argument_ptr (void) */
|
||||
DEFINE_SYSCALL (sys_argument_ptr) { return proc->uvaddr_argument; }
|
||||
|
||||
/* int sched (void) */
|
||||
DEFINE_SYSCALL (sys_sched) {
|
||||
proc_sched ();
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
/* int create_mutex (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_create_mutex) {
|
||||
/* int mutex_create (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_mutex_create) {
|
||||
int mutex_rid = (int)a1;
|
||||
|
||||
struct proc_resource* mutex_resource = proc_create_resource_mutex (proc->procgroup, mutex_rid);
|
||||
@@ -117,8 +118,8 @@ DEFINE_SYSCALL (sys_create_mutex) {
|
||||
return SYSRESULT (mutex_resource->rid);
|
||||
}
|
||||
|
||||
/* int unlink_mutex (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_unlink_mutex) {
|
||||
/* int mutex_delete (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_mutex_delete) {
|
||||
int mutex_rid = (int)a1;
|
||||
|
||||
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
|
||||
@@ -126,11 +127,14 @@ DEFINE_SYSCALL (sys_unlink_mutex) {
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
if (proc_delete_resource (mutex_resource) == PROC_NEED_RESCHEDULE)
|
||||
proc_sched ();
|
||||
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
/* int lock_mutex (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_lock_mutex) {
|
||||
/* int mutex_lock (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_mutex_lock) {
|
||||
int mutex_rid = (int)a1;
|
||||
|
||||
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
|
||||
@@ -138,13 +142,14 @@ DEFINE_SYSCALL (sys_lock_mutex) {
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
proc_mutex_lock (proc, &mutex_resource->u.mutex);
|
||||
if (proc_mutex_lock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
|
||||
proc_sched ();
|
||||
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
/* int unlock_mutex (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_unlock_mutex) {
|
||||
/* int mutex_unlock (int mutex_rid) */
|
||||
DEFINE_SYSCALL (sys_mutex_unlock) {
|
||||
int mutex_rid = (int)a1;
|
||||
|
||||
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
|
||||
@@ -152,8 +157,10 @@ DEFINE_SYSCALL (sys_unlock_mutex) {
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
return SYSRESULT (proc_mutex_unlock (proc, &mutex_resource->u.mutex) ? ST_OK
|
||||
: -ST_PERMISSION_ERROR);
|
||||
if (proc_mutex_unlock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
|
||||
proc_sched ();
|
||||
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
static syscall_handler_func_t handler_table[] = {
|
||||
@@ -162,11 +169,12 @@ static syscall_handler_func_t handler_table[] = {
|
||||
[SYS_MAP] = &sys_map,
|
||||
[SYS_UNMAP] = &sys_unmap,
|
||||
[SYS_CLONE] = &sys_clone,
|
||||
[SYS_ARGUMENT_PTR] = &sys_argument_ptr,
|
||||
[SYS_SCHED] = &sys_sched,
|
||||
[SYS_CREATE_MUTEX] = &sys_create_mutex,
|
||||
[SYS_UNLINK_MUTEX] = &sys_unlink_mutex,
|
||||
[SYS_LOCK_MUTEX] = &sys_lock_mutex,
|
||||
[SYS_UNLOCK_MUTEX] = &sys_unlock_mutex,
|
||||
[SYS_MUTEX_CREATE] = &sys_mutex_create,
|
||||
[SYS_MUTEX_DELETE] = &sys_mutex_delete,
|
||||
[SYS_MUTEX_LOCK] = &sys_mutex_lock,
|
||||
[SYS_MUTEX_UNLOCK] = &sys_mutex_unlock,
|
||||
};
|
||||
|
||||
syscall_handler_func_t syscall_find_handler (int syscall_num) {
|
||||
|
||||
@@ -6,15 +6,13 @@
|
||||
|
||||
#define LIBALLOC_MUTEX 500
|
||||
|
||||
static int liballoc_mutex;
|
||||
void liballoc_init (void) { mutex_create (LIBALLOC_MUTEX); }
|
||||
|
||||
void liballoc_init (void) { liballoc_mutex = create_mutex (LIBALLOC_MUTEX); }
|
||||
void liballoc_deinit (void) { mutex_delete (LIBALLOC_MUTEX); }
|
||||
|
||||
void liballoc_deinit (void) { unlink_mutex (liballoc_mutex); }
|
||||
int liballoc_lock (void) { return mutex_lock (LIBALLOC_MUTEX); }
|
||||
|
||||
int liballoc_lock (void) { return lock_mutex (liballoc_mutex); }
|
||||
|
||||
int liballoc_unlock (void) { return unlock_mutex (liballoc_mutex); }
|
||||
int liballoc_unlock (void) { return mutex_unlock (LIBALLOC_MUTEX); }
|
||||
|
||||
void* liballoc_alloc (int pages) { return map (0, pages, MAP_FLAGS | MAP_RW); }
|
||||
|
||||
|
||||
@@ -21,18 +21,16 @@ void* map (uintptr_t vaddr, size_t pages, uint32_t flags) {
|
||||
|
||||
int unmap (uintptr_t vaddr, size_t pages) { return do_syscall (SYS_UNMAP, vaddr, pages); }
|
||||
|
||||
int create_mem (int mem_rid, size_t pages) { return do_syscall (SYS_CREATE_MEM, mem_rid, pages); }
|
||||
|
||||
int unlink_mem (int mem_rid, size_t pages) { return do_syscall (SYS_UNLINK_MEM, mem_rid, pages); }
|
||||
|
||||
int clone (uintptr_t vstack_top, size_t stack_size, void (*entry) (void)) {
|
||||
return do_syscall (SYS_CLONE, vstack_top, stack_size, entry);
|
||||
int clone (uintptr_t vstack_top, void (*entry) (void), void* argument_ptr) {
|
||||
return do_syscall (SYS_CLONE, vstack_top, entry, argument_ptr);
|
||||
}
|
||||
|
||||
int create_mutex (int mutex_rid) { return do_syscall (SYS_CREATE_MUTEX, mutex_rid); }
|
||||
int mutex_create (int mutex_rid) { return do_syscall (SYS_MUTEX_CREATE, mutex_rid); }
|
||||
|
||||
int unlink_mutex (int mutex_rid) { return do_syscall (SYS_UNLINK_MUTEX, mutex_rid); }
|
||||
int mutex_delete (int mutex_rid) { return do_syscall (SYS_MUTEX_DELETE, mutex_rid); }
|
||||
|
||||
int lock_mutex (int mutex_rid) { return do_syscall (SYS_LOCK_MUTEX, mutex_rid); }
|
||||
int mutex_lock (int mutex_rid) { return do_syscall (SYS_MUTEX_LOCK, mutex_rid); }
|
||||
|
||||
int unlock_mutex (int mutex_rid) { return do_syscall (SYS_UNLOCK_MUTEX, mutex_rid); }
|
||||
int mutex_unlock (int mutex_rid) { return do_syscall (SYS_MUTEX_UNLOCK, mutex_rid); }
|
||||
|
||||
void* argument_ptr (void) { return (void*)do_syscall (SYS_ARGUMENT_PTR, 0); }
|
||||
|
||||
@@ -18,10 +18,11 @@ int test (char c);
|
||||
int sched (void);
|
||||
void* map (uintptr_t vaddr, size_t pages, uint32_t flags);
|
||||
int unmap (uintptr_t vaddr, size_t pages);
|
||||
int clone (uintptr_t vstack_top, size_t stack_size, void (*entry) (void));
|
||||
int create_mutex (int mutex_rid);
|
||||
int unlink_mutex (int mutex_rid);
|
||||
int lock_mutex (int mutex_rid);
|
||||
int unlock_mutex (int mutex_rid);
|
||||
int clone (uintptr_t vstack_top, void (*entry) (void), void* argument_ptr);
|
||||
int mutex_create (int mutex_rid);
|
||||
int mutex_delete (int mutex_rid);
|
||||
int mutex_lock (int mutex_rid);
|
||||
int mutex_unlock (int mutex_rid);
|
||||
void* argument_ptr (void);
|
||||
|
||||
#endif // _LIBMSL_M_SYSTEM_H
|
||||
|
||||
1
libmsl/proc/.gitignore
vendored
Normal file
1
libmsl/proc/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.o
|
||||
6
libmsl/proc/local.h
Normal file
6
libmsl/proc/local.h
Normal file
@@ -0,0 +1,6 @@
|
||||
#ifndef _LIBMSL_PROC_TLS_H
|
||||
#define _LIBMSL_PROC_TLS_H
|
||||
|
||||
#define LOCAL __thread
|
||||
|
||||
#endif // _LIBMSL_PROC_TLS_H
|
||||
19
libmsl/proc/proc.c
Normal file
19
libmsl/proc/proc.c
Normal file
@@ -0,0 +1,19 @@
|
||||
#include <alloc/liballoc.h>
|
||||
#include <m/status.h>
|
||||
#include <m/system.h>
|
||||
#include <proc/proc.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
int process_spawn (process_func_t func, void* argument_ptr) {
|
||||
void* stack = malloc (PROC_STACK_SIZE);
|
||||
if (stack == NULL)
|
||||
return -ST_OOM_ERROR;
|
||||
|
||||
uintptr_t top = (uintptr_t)stack + PROC_STACK_SIZE;
|
||||
return clone (top, func, argument_ptr);
|
||||
}
|
||||
|
||||
int process_quit (void) { return quit (); }
|
||||
|
||||
void* process_argument (void) { return argument_ptr (); }
|
||||
14
libmsl/proc/proc.h
Normal file
14
libmsl/proc/proc.h
Normal file
@@ -0,0 +1,14 @@
|
||||
#ifndef _LIBMSL_PROC_PROC_H
|
||||
#define _LIBMSL_PROC_PROC_H
|
||||
|
||||
#include <m/system.h>
|
||||
|
||||
#define PROC_STACK_SIZE 256 * PAGE_SIZE
|
||||
|
||||
typedef void (*process_func_t) (void);
|
||||
|
||||
int process_spawn (process_func_t func, void* argument_ptr);
|
||||
int process_quit (void);
|
||||
void* process_argument (void);
|
||||
|
||||
#endif // _LIBMSL_PROC_PROC_H
|
||||
3
libmsl/proc/src.mk
Normal file
3
libmsl/proc/src.mk
Normal file
@@ -0,0 +1,3 @@
|
||||
c += proc/proc.c
|
||||
|
||||
o += proc/proc.o
|
||||
@@ -3,3 +3,4 @@ include init/src.mk
|
||||
include m/src.mk
|
||||
include string/src.mk
|
||||
include alloc/src.mk
|
||||
include proc/src.mk
|
||||
|
||||
@@ -7,4 +7,4 @@ clean_libmsl:
|
||||
format_libmsl:
|
||||
make -C libmsl platform=$(platform) format
|
||||
|
||||
.PHONY: all_libmsl clean_libmsl
|
||||
.PHONY: all_libmsl clean_libmsl format_libmsl
|
||||
Reference in New Issue
Block a user