Compare commits
25 Commits
fff51321bc
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 38e26a9c12 | |||
| 124aa12f5b | |||
| d2f5c032d9 | |||
| 73e42588fb | |||
| e78bfb9984 | |||
| d2a88b3641 | |||
| fdda2e2df8 | |||
| 388418a718 | |||
| 1c64d608bd | |||
| 3d23187acf | |||
| a3b62ebd3d | |||
| 8bda300f6a | |||
| cf51600c6a | |||
| b388b30b24 | |||
| 600886a7ee | |||
| 67b66f2b39 | |||
| 18f791222e | |||
| 5e16bb647c | |||
| a68373e4ee | |||
| 8650010992 | |||
| 95f590fb3b | |||
| 7bb3b77ede | |||
| c26fd3cb2b | |||
| fea0999726 | |||
| 7eceecf6e3 |
2
Makefile
2
Makefile
@@ -4,4 +4,4 @@ include make/apps.mk
|
|||||||
include make/kernel.mk
|
include make/kernel.mk
|
||||||
include make/dist.mk
|
include make/dist.mk
|
||||||
include make/docs.mk
|
include make/docs.mk
|
||||||
include make/libc.mk
|
include make/libmsl.mk
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ PHDRS {
|
|||||||
text PT_LOAD;
|
text PT_LOAD;
|
||||||
rodata PT_LOAD;
|
rodata PT_LOAD;
|
||||||
data PT_LOAD;
|
data PT_LOAD;
|
||||||
|
bss PT_LOAD;
|
||||||
|
tls PT_TLS;
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTIONS {
|
SECTIONS {
|
||||||
@@ -13,32 +15,53 @@ SECTIONS {
|
|||||||
|
|
||||||
.text : {
|
.text : {
|
||||||
*(.text .text.*)
|
*(.text .text.*)
|
||||||
|
*(.ltext .ltext.*)
|
||||||
} :text
|
} :text
|
||||||
|
|
||||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
. = ALIGN(0x1000);
|
||||||
|
|
||||||
.rodata : {
|
.rodata : {
|
||||||
*(.rodata .rodata.*)
|
*(.rodata .rodata.*)
|
||||||
} :rodata
|
} :rodata
|
||||||
|
|
||||||
.note.gnu.build-id : {
|
. = ALIGN(0x1000);
|
||||||
*(.note.gnu.build-id)
|
|
||||||
} :rodata
|
|
||||||
|
|
||||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
|
||||||
|
|
||||||
.data : {
|
.data : {
|
||||||
*(.data .data.*)
|
*(.data .data.*)
|
||||||
|
*(.ldata .ldata.*)
|
||||||
} :data
|
} :data
|
||||||
|
|
||||||
|
. = ALIGN(0x1000);
|
||||||
|
|
||||||
__bss_start = .;
|
__bss_start = .;
|
||||||
|
|
||||||
.bss : {
|
.bss : {
|
||||||
*(.bss .bss.*)
|
*(.bss .bss.*)
|
||||||
} :data
|
*(.lbss .lbss.*)
|
||||||
|
} :bss
|
||||||
|
|
||||||
__bss_end = .;
|
__bss_end = .;
|
||||||
|
|
||||||
|
. = ALIGN(0x1000);
|
||||||
|
|
||||||
|
__tdata_start = .;
|
||||||
|
|
||||||
|
.tdata : {
|
||||||
|
*(.tdata .tdata.*)
|
||||||
|
} :tls
|
||||||
|
|
||||||
|
__tdata_end = .;
|
||||||
|
|
||||||
|
__tbss_start = .;
|
||||||
|
|
||||||
|
.tbss : {
|
||||||
|
*(.tbss .tbss.*)
|
||||||
|
} :tls
|
||||||
|
|
||||||
|
__tbss_end = .;
|
||||||
|
|
||||||
|
__tls_size = __tbss_end - __tdata_start;
|
||||||
|
|
||||||
/DISCARD/ : {
|
/DISCARD/ : {
|
||||||
*(.eh_frame*)
|
*(.eh_frame*)
|
||||||
*(.note .note.*)
|
*(.note .note.*)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
cpu: model=p4_prescott_celeron_336
|
cpu: model=p4_prescott_celeron_336, ips=200000000
|
||||||
|
|
||||||
memory: guest=4096 host=2048
|
memory: guest=4096 host=2048
|
||||||
|
|
||||||
@@ -9,6 +9,7 @@ ata0: enabled=1
|
|||||||
ata0-master: type=cdrom, path=mop3.iso, status=inserted
|
ata0-master: type=cdrom, path=mop3.iso, status=inserted
|
||||||
com1: enabled=1, mode=file, dev=bochs-com1.txt
|
com1: enabled=1, mode=file, dev=bochs-com1.txt
|
||||||
pci: enabled=1, chipset=i440fx
|
pci: enabled=1, chipset=i440fx
|
||||||
|
clock: sync=realtime, time0=local
|
||||||
|
|
||||||
boot: cdrom
|
boot: cdrom
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,10 @@
|
|||||||
#define SYS_UNMAP 4
|
#define SYS_UNMAP 4
|
||||||
#define SYS_CLONE 5
|
#define SYS_CLONE 5
|
||||||
#define SYS_SCHED 6
|
#define SYS_SCHED 6
|
||||||
#define SYS_CREATE_MEM 7
|
#define SYS_MUTEX_CREATE 7
|
||||||
#define SYS_UNLINK_MEM 8
|
#define SYS_MUTEX_DELETE 8
|
||||||
|
#define SYS_MUTEX_LOCK 9
|
||||||
|
#define SYS_MUTEX_UNLOCK 10
|
||||||
|
#define SYS_ARGUMENT_PTR 11
|
||||||
|
|
||||||
#endif // _M_SYSCALL_DEFS_H
|
#endif // _M_SYSCALL_DEFS_H
|
||||||
|
|||||||
53
init/init.c
53
init/init.c
@@ -1,25 +1,46 @@
|
|||||||
#include <alloc/liballoc.h>
|
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
#include <m/status.h>
|
#include <proc/local.h>
|
||||||
#include <m/system.h>
|
#include <proc/proc.h>
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <string/string.h>
|
#include <string/string.h>
|
||||||
|
|
||||||
#define EXAMPLE 1
|
#define MUTEX 2000
|
||||||
|
|
||||||
#if EXAMPLE == 1
|
LOCAL volatile char letter = 'c';
|
||||||
void app_main (void) {
|
|
||||||
test ('a');
|
|
||||||
test ('a');
|
|
||||||
test ('a');
|
|
||||||
|
|
||||||
int* xs = malloc (1024 * sizeof (*xs));
|
void app_proc (void) {
|
||||||
memset (xs, 123, 1024 * sizeof (*xs));
|
char arg_letter = (char)(uintptr_t)argument_ptr ();
|
||||||
free (xs);
|
|
||||||
|
|
||||||
test ('a');
|
letter = arg_letter;
|
||||||
test ('a');
|
|
||||||
test ('a');
|
for (;;) {
|
||||||
|
mutex_lock (MUTEX);
|
||||||
|
|
||||||
|
for (int i = 0; i < 3; i++)
|
||||||
|
test (letter);
|
||||||
|
|
||||||
|
mutex_unlock (MUTEX);
|
||||||
|
}
|
||||||
|
|
||||||
|
process_quit ();
|
||||||
|
}
|
||||||
|
|
||||||
|
void app_main (void) {
|
||||||
|
mutex_create (MUTEX);
|
||||||
|
|
||||||
|
letter = 'a';
|
||||||
|
|
||||||
|
process_spawn (&app_proc, (void*)'a');
|
||||||
|
process_spawn (&app_proc, (void*)'b');
|
||||||
|
process_spawn (&app_proc, (void*)'c');
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
mutex_lock (MUTEX);
|
||||||
|
|
||||||
|
for (int i = 0; i < 3; i++)
|
||||||
|
test (letter);
|
||||||
|
|
||||||
|
mutex_unlock (MUTEX);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
S += init.S
|
c += init.c
|
||||||
|
|
||||||
o += init.o
|
o += init.o
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
#include <amd64/msr.h>
|
#include <amd64/msr.h>
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
#include <limine/requests.h>
|
#include <limine/requests.h>
|
||||||
#include <sync/rw_spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
#include <sys/debug.h>
|
#include <sys/debug.h>
|
||||||
#include <sys/mm.h>
|
#include <sys/mm.h>
|
||||||
#include <sys/spin.h>
|
#include <sys/spin.h>
|
||||||
@@ -38,7 +38,7 @@
|
|||||||
|
|
||||||
struct ioapic {
|
struct ioapic {
|
||||||
struct acpi_madt_ioapic table_data;
|
struct acpi_madt_ioapic table_data;
|
||||||
rw_spin_lock_t lock;
|
spin_lock_t lock;
|
||||||
uintptr_t mmio_base;
|
uintptr_t mmio_base;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -59,10 +59,10 @@ static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
|
|||||||
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
|
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
|
||||||
spin_lock_ctx_t ctxioar;
|
spin_lock_ctx_t ctxioar;
|
||||||
|
|
||||||
rw_spin_read_lock (&ioapic->lock, &ctxioar);
|
spin_lock (&ioapic->lock, &ctxioar);
|
||||||
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
||||||
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
|
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
|
||||||
rw_spin_read_unlock (&ioapic->lock, &ctxioar);
|
spin_unlock (&ioapic->lock, &ctxioar);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,10 +70,10 @@ static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
|
|||||||
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
|
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
|
||||||
spin_lock_ctx_t ctxioaw;
|
spin_lock_ctx_t ctxioaw;
|
||||||
|
|
||||||
rw_spin_write_lock (&ioapic->lock, &ctxioaw);
|
spin_lock (&ioapic->lock, &ctxioaw);
|
||||||
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
*(volatile uint32_t*)ioapic->mmio_base = reg;
|
||||||
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
|
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
|
||||||
rw_spin_write_unlock (&ioapic->lock, &ctxioaw);
|
spin_unlock (&ioapic->lock, &ctxioaw);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Find an IOAPIC corresposting to provided IRQ */
|
/* Find an IOAPIC corresposting to provided IRQ */
|
||||||
@@ -82,8 +82,6 @@ static struct ioapic* amd64_ioapic_find (uint32_t irq) {
|
|||||||
|
|
||||||
for (size_t i = 0; i < ioapic_entries; i++) {
|
for (size_t i = 0; i < ioapic_entries; i++) {
|
||||||
ioapic = &ioapics[i];
|
ioapic = &ioapics[i];
|
||||||
/* uint32_t version = amd64_ioapic_read ((uintptr_t)hhdm->offset +
|
|
||||||
* (uintptr_t)ioapic->table_data.address, 1); */
|
|
||||||
uint32_t version = amd64_ioapic_read (ioapic, 1);
|
uint32_t version = amd64_ioapic_read (ioapic, 1);
|
||||||
uint32_t max = ((version >> 16) & 0xFF);
|
uint32_t max = ((version >> 16) & 0xFF);
|
||||||
|
|
||||||
@@ -162,9 +160,9 @@ void amd64_ioapic_init (void) {
|
|||||||
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
|
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
|
||||||
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
|
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
|
||||||
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
|
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
|
||||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
|
MM_PG_PRESENT | MM_PG_RW);
|
||||||
ioapics[ioapic_entries++] = (struct ioapic){
|
ioapics[ioapic_entries++] = (struct ioapic){
|
||||||
.lock = RW_SPIN_LOCK_INIT,
|
.lock = SPIN_LOCK_INIT,
|
||||||
.table_data = *ioapic_table_data,
|
.table_data = *ioapic_table_data,
|
||||||
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
|
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
|
||||||
};
|
};
|
||||||
@@ -233,7 +231,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
|
|||||||
static void amd64_lapic_start (uint32_t ticks) {
|
static void amd64_lapic_start (uint32_t ticks) {
|
||||||
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
|
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
|
||||||
amd64_lapic_write (LAPIC_TIMICT, ticks);
|
amd64_lapic_write (LAPIC_TIMICT, ticks);
|
||||||
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17) | (1 << 16));
|
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -248,8 +246,7 @@ void amd64_lapic_init (uint32_t us) {
|
|||||||
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
|
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
|
||||||
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
|
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
|
||||||
|
|
||||||
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base,
|
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
|
||||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK | MM_PD_RELOAD);
|
|
||||||
|
|
||||||
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
|
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
|
||||||
|
|
||||||
@@ -270,5 +267,5 @@ void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
|
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
|
||||||
amd64_lapic_write (LAPIC_ICR, vec);
|
amd64_lapic_write (LAPIC_ICR, vec | (1 << 14));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@
|
|||||||
#include <irq/irq.h>
|
#include <irq/irq.h>
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
#include <limine/limine.h>
|
#include <limine/limine.h>
|
||||||
|
#include <limine/requests.h>
|
||||||
#include <mm/liballoc.h>
|
#include <mm/liballoc.h>
|
||||||
#include <mm/pmm.h>
|
#include <mm/pmm.h>
|
||||||
#include <proc/proc.h>
|
#include <proc/proc.h>
|
||||||
@@ -29,7 +30,9 @@ ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
|
|||||||
* the necessary platform-dependent subsystems/drivers and jump into the init app.
|
* the necessary platform-dependent subsystems/drivers and jump into the init app.
|
||||||
*/
|
*/
|
||||||
void bootmain (void) {
|
void bootmain (void) {
|
||||||
struct cpu* bsp_cpu = cpu_make ();
|
struct limine_mp_response* mp = limine_mp_request.response;
|
||||||
|
|
||||||
|
struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id);
|
||||||
|
|
||||||
amd64_init (bsp_cpu, false);
|
amd64_init (bsp_cpu, false);
|
||||||
syscall_init ();
|
syscall_init ();
|
||||||
@@ -46,8 +49,6 @@ void bootmain (void) {
|
|||||||
|
|
||||||
smp_init ();
|
smp_init ();
|
||||||
|
|
||||||
mm_init2 ();
|
|
||||||
|
|
||||||
proc_init ();
|
proc_init ();
|
||||||
|
|
||||||
for (;;)
|
for (;;)
|
||||||
|
|||||||
@@ -129,8 +129,7 @@ void amd64_hpet_init (void) {
|
|||||||
hpet_paddr = (uintptr_t)hpet->address.address;
|
hpet_paddr = (uintptr_t)hpet->address.address;
|
||||||
|
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr,
|
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
|
||||||
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
|
|
||||||
|
|
||||||
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
|
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
|
||||||
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
|
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;
|
||||||
|
|||||||
@@ -165,10 +165,21 @@ static void amd64_intr_exception (struct saved_regs* regs) {
|
|||||||
|
|
||||||
/* Handle incoming interrupt, dispatch IRQ handlers. */
|
/* Handle incoming interrupt, dispatch IRQ handlers. */
|
||||||
void amd64_intr_handler (void* stack_ptr) {
|
void amd64_intr_handler (void* stack_ptr) {
|
||||||
|
spin_lock_ctx_t ctxcpu, ctxpr;
|
||||||
|
|
||||||
amd64_load_kernel_cr3 ();
|
amd64_load_kernel_cr3 ();
|
||||||
|
|
||||||
struct saved_regs* regs = stack_ptr;
|
struct saved_regs* regs = stack_ptr;
|
||||||
|
|
||||||
|
spin_lock (&thiscpu->lock, &ctxcpu);
|
||||||
|
struct proc* proc_current = thiscpu->proc_current;
|
||||||
|
spin_lock (&proc_current->lock, &ctxpr);
|
||||||
|
|
||||||
|
memcpy (&proc_current->pdata.regs, regs, sizeof (struct saved_regs));
|
||||||
|
|
||||||
|
spin_unlock (&proc_current->lock, &ctxpr);
|
||||||
|
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||||
|
|
||||||
if (regs->trap <= 31) {
|
if (regs->trap <= 31) {
|
||||||
amd64_intr_exception (regs);
|
amd64_intr_exception (regs);
|
||||||
} else {
|
} else {
|
||||||
@@ -177,13 +188,7 @@ void amd64_intr_handler (void* stack_ptr) {
|
|||||||
struct irq* irq = irq_find (regs->trap);
|
struct irq* irq = irq_find (regs->trap);
|
||||||
|
|
||||||
if (irq != NULL) {
|
if (irq != NULL) {
|
||||||
if ((irq->flags & IRQ_INTERRUPT_SAFE))
|
|
||||||
__asm__ volatile ("sti");
|
|
||||||
|
|
||||||
irq->func (irq->arg, stack_ptr);
|
irq->func (irq->arg, stack_ptr);
|
||||||
|
|
||||||
if ((irq->flags & IRQ_INTERRUPT_SAFE))
|
|
||||||
__asm__ volatile ("cli");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -214,15 +219,3 @@ void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
|
|||||||
|
|
||||||
/* Restore interrupt state */
|
/* Restore interrupt state */
|
||||||
void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }
|
void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }
|
||||||
|
|
||||||
/* Map custom IRQ mappings to legacy IRQs */
|
|
||||||
uint32_t amd64_resolve_irq (uint32_t irq) {
|
|
||||||
static const uint32_t mappings[] = {
|
|
||||||
[SCHED_PREEMPT_TIMER] = 0,
|
|
||||||
[TLB_SHOOTDOWN] = 6,
|
|
||||||
[CPU_REQUEST_SCHED] = 3,
|
|
||||||
[CPU_SPURIOUS] = 5,
|
|
||||||
};
|
|
||||||
|
|
||||||
return mappings[irq];
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -32,7 +32,6 @@ struct saved_regs {
|
|||||||
} PACKED;
|
} PACKED;
|
||||||
|
|
||||||
void amd64_load_idt (void);
|
void amd64_load_idt (void);
|
||||||
uint32_t amd64_resolve_irq (uint32_t irq);
|
|
||||||
void amd64_intr_init (void);
|
void amd64_intr_init (void);
|
||||||
|
|
||||||
#endif // _KERNEL_AMD64_INTR_H
|
#endif // _KERNEL_AMD64_INTR_H
|
||||||
|
|||||||
@@ -22,10 +22,12 @@ struct pg_index {
|
|||||||
} PACKED;
|
} PACKED;
|
||||||
|
|
||||||
/* Kernel page directory */
|
/* Kernel page directory */
|
||||||
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
|
static struct pd kernel_pd;
|
||||||
static spin_lock_ctx_t ctxkpd;
|
static spin_lock_t kernel_pd_lock;
|
||||||
/* Lock needed to sync between map/unmap operations and TLB shootdown */
|
|
||||||
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
|
void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||||
|
|
||||||
|
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
|
||||||
|
|
||||||
/* Get current value of CR3 register */
|
/* Get current value of CR3 register */
|
||||||
static uintptr_t amd64_current_cr3 (void) {
|
static uintptr_t amd64_current_cr3 (void) {
|
||||||
@@ -112,15 +114,7 @@ static void amd64_reload_cr3 (void) {
|
|||||||
|
|
||||||
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
|
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
|
||||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||||
spin_lock_ctx_t ctxmm, ctxpd;
|
|
||||||
|
|
||||||
spin_lock (&mm_lock, &ctxmm);
|
|
||||||
|
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
bool do_reload = false;
|
|
||||||
|
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_lock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
|
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
|
||||||
|
|
||||||
@@ -129,69 +123,50 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
|
|||||||
|
|
||||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
|
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
|
||||||
if (pml3 == NULL)
|
if (pml3 == NULL)
|
||||||
goto done;
|
return;
|
||||||
|
|
||||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
|
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
|
||||||
if (pml2 == NULL)
|
if (pml2 == NULL)
|
||||||
goto done;
|
return;
|
||||||
|
|
||||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
|
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
|
||||||
if (pml1 == NULL)
|
if (pml1 == NULL)
|
||||||
goto done;
|
return;
|
||||||
|
|
||||||
uint64_t* pte = &pml1[pg_index.pml1];
|
uint64_t* pte = &pml1[pg_index.pml1];
|
||||||
|
|
||||||
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
|
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
|
||||||
do_reload = true;
|
|
||||||
|
|
||||||
done:
|
|
||||||
if (do_reload && (flags & MM_PD_RELOAD))
|
|
||||||
amd64_reload_cr3 ();
|
|
||||||
|
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_unlock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
spin_unlock (&mm_lock, &ctxmm);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map a page into kernel page directory */
|
/* Map a page into kernel page directory */
|
||||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||||
mm_map_page (&kernel_pd, paddr, vaddr, flags);
|
mm_map_page (&kernel_pd, paddr, vaddr, flags);
|
||||||
|
amd64_reload_cr3 ();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unmap a virtual address. TLB needs to be flushed afterwards */
|
/* Unmap a virtual address. TLB needs to be flushed afterwards */
|
||||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
|
||||||
spin_lock_ctx_t ctxmm, ctxpd;
|
|
||||||
|
|
||||||
spin_lock (&mm_lock, &ctxmm);
|
|
||||||
|
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
bool do_reload = false;
|
|
||||||
|
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_lock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||||
|
|
||||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
|
||||||
if (pml3 == NULL)
|
if (pml3 == NULL)
|
||||||
goto done;
|
return;
|
||||||
|
|
||||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
|
||||||
if (pml2 == NULL)
|
if (pml2 == NULL)
|
||||||
goto done;
|
return;
|
||||||
|
|
||||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
|
||||||
if (pml1 == NULL)
|
if (pml1 == NULL)
|
||||||
goto done;
|
return;
|
||||||
|
|
||||||
uint64_t* pte = &pml1[pg_index.pml1];
|
uint64_t* pte = &pml1[pg_index.pml1];
|
||||||
|
|
||||||
if ((*pte) & AMD64_PG_PRESENT) {
|
if ((*pte) & AMD64_PG_PRESENT)
|
||||||
*pte = 0;
|
*pte = 0;
|
||||||
do_reload = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (amd64_mm_is_table_empty (pml1)) {
|
if (amd64_mm_is_table_empty (pml1)) {
|
||||||
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
|
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
|
||||||
@@ -210,28 +185,14 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
|
||||||
if (do_reload && (flags & MM_PD_RELOAD))
|
|
||||||
amd64_reload_cr3 ();
|
|
||||||
|
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_unlock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
spin_unlock (&mm_lock, &ctxmm);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unmap a page from kernel page directory */
|
/* Unmap a page from kernel page directory */
|
||||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
|
void mm_unmap_kernel_page (uintptr_t vaddr) {
|
||||||
mm_unmap_page (&kernel_pd, vaddr, flags);
|
mm_unmap_page (&kernel_pd, vaddr);
|
||||||
|
amd64_reload_cr3 ();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Lock kernel page directory */
|
|
||||||
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock, &ctxkpd); }
|
|
||||||
|
|
||||||
/* Unlock kernel page directory */
|
|
||||||
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock, &ctxkpd); }
|
|
||||||
|
|
||||||
/* Allocate a userspace-ready page directory */
|
/* Allocate a userspace-ready page directory */
|
||||||
uintptr_t mm_alloc_user_pd_phys (void) {
|
uintptr_t mm_alloc_user_pd_phys (void) {
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
@@ -250,26 +211,10 @@ uintptr_t mm_alloc_user_pd_phys (void) {
|
|||||||
return cr3;
|
return cr3;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reload after map/unmap operation was performed. This function does the TLB shootdown. */
|
bool mm_validate (struct pd* pd, uintptr_t vaddr) {
|
||||||
void mm_reload (void) {
|
|
||||||
struct limine_mp_response* mp = limine_mp_request.response;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
|
||||||
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
|
||||||
spin_lock_ctx_t ctxmm, ctxpd;
|
|
||||||
|
|
||||||
spin_lock (&mm_lock, &ctxmm);
|
|
||||||
|
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_lock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||||
|
|
||||||
@@ -289,45 +234,26 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
|||||||
ret = (pte & AMD64_PG_PRESENT) != 0;
|
ret = (pte & AMD64_PG_PRESENT) != 0;
|
||||||
|
|
||||||
done:
|
done:
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_unlock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
spin_unlock (&mm_lock, &ctxmm);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) {
|
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
|
||||||
bool ok = true;
|
bool ok = true;
|
||||||
spin_lock_ctx_t ctxpd;
|
|
||||||
|
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_lock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < size; i++) {
|
for (size_t i = 0; i < size; i++) {
|
||||||
ok = mm_validate (pd, vaddr + i, 0);
|
ok = mm_validate (pd, vaddr + i);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_unlock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
return ok;
|
return ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
|
||||||
spin_lock_ctx_t ctxmm, ctxpd;
|
|
||||||
|
|
||||||
spin_lock (&mm_lock, &ctxmm);
|
|
||||||
|
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
uintptr_t ret = 0;
|
uintptr_t ret = 0;
|
||||||
|
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_lock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||||
|
|
||||||
for (size_t i4 = 0; i4 < 512; i4++) {
|
for (size_t i4 = 0; i4 < 512; i4++) {
|
||||||
@@ -358,25 +284,13 @@ uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
done:
|
done:
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_unlock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
spin_unlock (&mm_lock, &ctxmm);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
|
||||||
spin_lock_ctx_t ctxmm, ctxpd;
|
|
||||||
|
|
||||||
spin_lock (&mm_lock, &ctxmm);
|
|
||||||
|
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
uintptr_t ret = 0;
|
uintptr_t ret = 0;
|
||||||
|
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_lock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||||
|
|
||||||
@@ -400,27 +314,8 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
|||||||
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
|
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
|
||||||
|
|
||||||
done:
|
done:
|
||||||
if (flags & MM_PD_LOCK)
|
|
||||||
spin_unlock (&pd->lock, &ctxpd);
|
|
||||||
|
|
||||||
spin_unlock (&mm_lock, &ctxmm);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TLB shootdown IRQ handler */
|
|
||||||
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
|
|
||||||
(void)arg, (void)regs;
|
|
||||||
|
|
||||||
amd64_reload_cr3 ();
|
|
||||||
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Continue initializing memory management subsystem for AMD64 after the essential parts were
|
|
||||||
* initialized */
|
|
||||||
void mm_init2 (void) {
|
|
||||||
irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize essentials for the AMD64 memory management subsystem */
|
/* Initialize essentials for the AMD64 memory management subsystem */
|
||||||
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }
|
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }
|
||||||
|
|||||||
@@ -7,12 +7,9 @@
|
|||||||
#define PAGE_SIZE 4096
|
#define PAGE_SIZE 4096
|
||||||
|
|
||||||
struct pd {
|
struct pd {
|
||||||
spin_lock_t lock;
|
|
||||||
uintptr_t cr3_paddr;
|
uintptr_t cr3_paddr;
|
||||||
atomic_int refs;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void amd64_load_kernel_cr3 (void);
|
void amd64_load_kernel_cr3 (void);
|
||||||
void mm_init2 (void);
|
|
||||||
|
|
||||||
#endif // _KERNEL_AMD64_MM_H
|
#endif // _KERNEL_AMD64_MM_H
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
#include <amd64/gdt.h>
|
#include <amd64/gdt.h>
|
||||||
#include <amd64/proc.h>
|
#include <amd64/proc.h>
|
||||||
#include <aux/elf.h>
|
#include <aux/elf.h>
|
||||||
|
#include <libk/align.h>
|
||||||
#include <libk/list.h>
|
#include <libk/list.h>
|
||||||
#include <libk/rbtree.h>
|
#include <libk/rbtree.h>
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
@@ -8,18 +9,18 @@
|
|||||||
#include <limine/requests.h>
|
#include <limine/requests.h>
|
||||||
#include <mm/liballoc.h>
|
#include <mm/liballoc.h>
|
||||||
#include <mm/pmm.h>
|
#include <mm/pmm.h>
|
||||||
|
#include <proc/mutex.h>
|
||||||
#include <proc/proc.h>
|
#include <proc/proc.h>
|
||||||
|
#include <proc/procgroup.h>
|
||||||
#include <proc/resource.h>
|
#include <proc/resource.h>
|
||||||
#include <sync/rw_spin_lock.h>
|
|
||||||
#include <sync/spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
#include <sys/debug.h>
|
#include <sys/debug.h>
|
||||||
|
#include <sys/proc.h>
|
||||||
|
|
||||||
/* 0 is kpproc */
|
static atomic_int pids = 0;
|
||||||
static atomic_int pids = 1;
|
|
||||||
|
|
||||||
struct proc* proc_from_elf (uint8_t* elf_contents) {
|
struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
int rid;
|
|
||||||
|
|
||||||
struct proc* proc = malloc (sizeof (*proc));
|
struct proc* proc = malloc (sizeof (*proc));
|
||||||
if (proc == NULL)
|
if (proc == NULL)
|
||||||
@@ -31,67 +32,18 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
|||||||
atomic_store (&proc->state, PROC_READY);
|
atomic_store (&proc->state, PROC_READY);
|
||||||
proc->pid = atomic_fetch_add (&pids, 1);
|
proc->pid = atomic_fetch_add (&pids, 1);
|
||||||
|
|
||||||
proc->resources = malloc (sizeof (*proc->resources));
|
proc->procgroup = procgroup_create ();
|
||||||
if (proc->resources == NULL) {
|
if (proc->procgroup == NULL) {
|
||||||
free (proc);
|
free (proc);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
proc->resources->tree = NULL;
|
procgroup_attach (proc->procgroup, proc);
|
||||||
proc->resources->lock = RW_SPIN_LOCK_INIT;
|
|
||||||
proc->resources->refs = 1;
|
|
||||||
proc->resources->sys_rids = 0;
|
|
||||||
|
|
||||||
proc->pd = malloc (sizeof (*proc->pd));
|
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
|
||||||
if (proc->pd == NULL) {
|
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
||||||
free (proc->resources);
|
|
||||||
free (proc);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
proc->pd->lock = SPIN_LOCK_INIT;
|
procgroup_map (proc->procgroup, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
|
||||||
proc->pd->refs = 1;
|
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW, NULL);
|
||||||
proc->pd->cr3_paddr = mm_alloc_user_pd_phys ();
|
|
||||||
if (proc->pd->cr3_paddr == 0) {
|
|
||||||
free (proc->pd);
|
|
||||||
free (proc->resources);
|
|
||||||
free (proc);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE,
|
|
||||||
.managed = false};
|
|
||||||
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
|
|
||||||
struct proc_resource* kstk_r =
|
|
||||||
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
|
|
||||||
if (kstk_r == NULL) {
|
|
||||||
pmm_free (proc->pd->cr3_paddr, 1);
|
|
||||||
free (proc->pd);
|
|
||||||
free (proc->resources);
|
|
||||||
free (proc);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
|
||||||
|
|
||||||
struct proc_resource_mem_init ustk_mem_init = {.pages = USTACK_SIZE / PAGE_SIZE,
|
|
||||||
.managed = false};
|
|
||||||
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
|
|
||||||
struct proc_resource* ustk_r =
|
|
||||||
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&ustk_mem_init);
|
|
||||||
if (ustk_r == NULL) {
|
|
||||||
kstk_r->ops.cleanup (proc, kstk_r);
|
|
||||||
free (kstk_r);
|
|
||||||
pmm_free (proc->pd->cr3_paddr, 1);
|
|
||||||
free (proc->pd);
|
|
||||||
free (proc->resources);
|
|
||||||
free (proc);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
proc->pdata.user_stack = ustk_r->u.mem.paddr;
|
|
||||||
|
|
||||||
proc_map (proc, proc->pdata.user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
|
|
||||||
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
|
|
||||||
|
|
||||||
proc->flags |= PROC_USTK_PREALLOC;
|
proc->flags |= PROC_USTK_PREALLOC;
|
||||||
|
|
||||||
@@ -106,11 +58,10 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
|||||||
return proc;
|
return proc;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_size,
|
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
|
||||||
uintptr_t entry) {
|
uintptr_t argument_ptr) {
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
spin_lock_ctx_t ctxprt, ctxrs;
|
spin_lock_ctx_t ctxprt;
|
||||||
int rid;
|
|
||||||
|
|
||||||
struct proc* proc = malloc (sizeof (*proc));
|
struct proc* proc = malloc (sizeof (*proc));
|
||||||
if (proc == NULL)
|
if (proc == NULL)
|
||||||
@@ -124,54 +75,13 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_
|
|||||||
|
|
||||||
spin_lock (&proto->lock, &ctxprt);
|
spin_lock (&proto->lock, &ctxprt);
|
||||||
|
|
||||||
proc->pd = proto->pd;
|
proc->procgroup = proto->procgroup;
|
||||||
proc->mappings = proto->mappings;
|
procgroup_attach (proc->procgroup, proc);
|
||||||
atomic_fetch_add (&proto->pd->refs, 1);
|
|
||||||
|
|
||||||
proc->resources = proto->resources;
|
|
||||||
|
|
||||||
rw_spin_write_lock (&proc->resources->lock, &ctxrs);
|
|
||||||
|
|
||||||
atomic_fetch_add (&proc->resources->refs, 1);
|
|
||||||
|
|
||||||
struct rb_node_link* rnode;
|
|
||||||
rbtree_first (&proc->resources->tree, rnode);
|
|
||||||
while (rnode) {
|
|
||||||
struct rb_node_link* next;
|
|
||||||
rbtree_next (rnode, next);
|
|
||||||
|
|
||||||
struct proc_resource* resource =
|
|
||||||
rbtree_entry (rnode, struct proc_resource, local_resource_tree_link);
|
|
||||||
atomic_fetch_add (&resource->refs, 1);
|
|
||||||
|
|
||||||
rnode = next;
|
|
||||||
}
|
|
||||||
|
|
||||||
rw_spin_write_unlock (&proc->resources->lock, &ctxrs);
|
|
||||||
|
|
||||||
spin_unlock (&proto->lock, &ctxprt);
|
spin_unlock (&proto->lock, &ctxprt);
|
||||||
|
|
||||||
uintptr_t vstack_bottom = vstack_top - stack_size;
|
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
|
||||||
|
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
||||||
uintptr_t pstack_bottom = mm_v2p (proc->pd, vstack_bottom, MM_PD_LOCK);
|
|
||||||
if (pstack_bottom == 0) {
|
|
||||||
free (proc);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE,
|
|
||||||
.managed = false};
|
|
||||||
rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
|
|
||||||
struct proc_resource* kstk_r =
|
|
||||||
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
|
|
||||||
if (kstk_r == NULL) {
|
|
||||||
free (proc);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
|
|
||||||
|
|
||||||
proc->pdata.user_stack = pstack_bottom + stack_size;
|
|
||||||
|
|
||||||
proc->pdata.regs.ss = GDT_UDATA | 0x03;
|
proc->pdata.regs.ss = GDT_UDATA | 0x03;
|
||||||
proc->pdata.regs.rsp = (uint64_t)vstack_top;
|
proc->pdata.regs.rsp = (uint64_t)vstack_top;
|
||||||
@@ -179,40 +89,50 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_
|
|||||||
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
||||||
proc->pdata.regs.rip = (uint64_t)entry;
|
proc->pdata.regs.rip = (uint64_t)entry;
|
||||||
|
|
||||||
|
proc->uvaddr_argument = argument_ptr;
|
||||||
|
|
||||||
|
proc_init_tls (proc);
|
||||||
|
|
||||||
return proc;
|
return proc;
|
||||||
}
|
}
|
||||||
|
|
||||||
void proc_cleanup (struct proc* proc) {
|
void proc_cleanup (struct proc* proc) {
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
proc_sqs_cleanup (proc);
|
||||||
spin_lock_ctx_t ctxprpd;
|
proc_mutexes_cleanup (proc);
|
||||||
|
|
||||||
proc_cleanup_resources (proc);
|
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
|
||||||
|
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
|
||||||
|
|
||||||
if (atomic_fetch_sub (&proc->pd->refs, 1) == 1) {
|
procgroup_detach (proc->procgroup, proc);
|
||||||
DEBUG ("PID %d Free virtual address space\n", proc->pid);
|
|
||||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
|
||||||
spin_lock (&proc->pd->lock, &ctxprpd);
|
|
||||||
|
|
||||||
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
|
|
||||||
struct proc_mapping* mapping =
|
|
||||||
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
|
||||||
|
|
||||||
list_remove (proc->mappings, mapping_link);
|
|
||||||
free (mapping);
|
|
||||||
}
|
|
||||||
|
|
||||||
pmm_free (proc->pd->cr3_paddr, 1);
|
|
||||||
spin_unlock (&proc->pd->lock, &ctxprpd);
|
|
||||||
free (proc->pd);
|
|
||||||
}
|
|
||||||
|
|
||||||
pmm_free (proc->pdata.kernel_stack - (uintptr_t)hhdm->offset - KSTACK_SIZE,
|
|
||||||
KSTACK_SIZE / PAGE_SIZE);
|
|
||||||
|
|
||||||
if ((proc->flags & PROC_USTK_PREALLOC))
|
|
||||||
pmm_free (proc->pdata.user_stack, USTACK_SIZE / PAGE_SIZE);
|
|
||||||
|
|
||||||
DEBUG ("PID %d Free stacks\n", proc->pid);
|
|
||||||
|
|
||||||
|
/* clean the process */
|
||||||
free (proc);
|
free (proc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void proc_init_tls (struct proc* proc) {
|
||||||
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
|
|
||||||
|
if (proc->procgroup->tls.tls_tmpl == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
|
||||||
|
size_t pages = proc->procgroup->tls.tls_tmpl_pages;
|
||||||
|
|
||||||
|
uintptr_t tls_paddr;
|
||||||
|
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
|
||||||
|
|
||||||
|
uintptr_t tls_vaddr = procgroup_map (proc->procgroup, 0, pages, flags, &tls_paddr);
|
||||||
|
|
||||||
|
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
|
||||||
|
|
||||||
|
memset ((void*)k_tls_addr, 0, pages * PAGE_SIZE);
|
||||||
|
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
|
||||||
|
|
||||||
|
uintptr_t ktcb = k_tls_addr + tls_size;
|
||||||
|
uintptr_t utcb = tls_vaddr + tls_size;
|
||||||
|
|
||||||
|
*(uintptr_t*)ktcb = utcb;
|
||||||
|
|
||||||
|
proc->pdata.fs_base = utcb;
|
||||||
|
proc->pdata.tls_vaddr = tls_vaddr;
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,17 +4,19 @@
|
|||||||
#include <amd64/intr.h>
|
#include <amd64/intr.h>
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
|
|
||||||
/// Top of userspace process' stack
|
/* Top of userspace process' stack */
|
||||||
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
|
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
|
||||||
/// Size of userspace process' stack
|
/* Size of userspace process' stack */
|
||||||
#define USTACK_SIZE (256 * PAGE_SIZE)
|
#define USTACK_SIZE (256 * PAGE_SIZE)
|
||||||
|
/* proc_map () base address */
|
||||||
|
#define PROC_MAP_BASE 0x0000700000000000
|
||||||
|
|
||||||
/// Platform-dependent process data
|
/* Platform-dependent process data */
|
||||||
struct proc_platformdata {
|
struct proc_platformdata {
|
||||||
struct saved_regs regs;
|
struct saved_regs regs;
|
||||||
uintptr_t user_stack;
|
|
||||||
uintptr_t kernel_stack;
|
uintptr_t kernel_stack;
|
||||||
uint64_t gs_base;
|
uint64_t fs_base;
|
||||||
|
uintptr_t tls_vaddr;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // _KERNEL_AMD64_PROC_H
|
#endif // _KERNEL_AMD64_PROC_H
|
||||||
|
|||||||
13
kernel/amd64/procgroup.h
Normal file
13
kernel/amd64/procgroup.h
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
#ifndef _KERNEL_AMD64_PROCGRPUP_H
|
||||||
|
#define _KERNEL_AMD64_PROCGRPUP_H
|
||||||
|
|
||||||
|
#include <libk/std.h>
|
||||||
|
|
||||||
|
struct procgroup_tls {
|
||||||
|
uint8_t* tls_tmpl;
|
||||||
|
size_t tls_tmpl_size;
|
||||||
|
size_t tls_tmpl_total_size;
|
||||||
|
size_t tls_tmpl_pages;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // _KERNEL_AMD64_PROCGRPUP_H
|
||||||
@@ -3,14 +3,21 @@
|
|||||||
#include <amd64/sched.h>
|
#include <amd64/sched.h>
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
#include <proc/proc.h>
|
#include <proc/proc.h>
|
||||||
|
#include <sync/spin_lock.h>
|
||||||
#include <sys/mm.h>
|
#include <sys/mm.h>
|
||||||
#include <sys/smp.h>
|
#include <sys/smp.h>
|
||||||
|
|
||||||
void do_sched (struct proc* proc) {
|
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu) {
|
||||||
__asm__ volatile ("cli");
|
spin_lock_ctx_t ctxpr;
|
||||||
|
|
||||||
|
spin_lock (&proc->lock, &ctxpr);
|
||||||
|
|
||||||
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
|
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
|
||||||
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
|
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
|
||||||
|
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
|
||||||
|
|
||||||
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd->cr3_paddr);
|
spin_unlock (&proc->lock, &ctxpr);
|
||||||
|
spin_unlock (cpu_lock, ctxcpu);
|
||||||
|
|
||||||
|
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,8 +8,10 @@
|
|||||||
#include <libk/string.h>
|
#include <libk/string.h>
|
||||||
#include <limine/requests.h>
|
#include <limine/requests.h>
|
||||||
#include <mm/liballoc.h>
|
#include <mm/liballoc.h>
|
||||||
|
#include <proc/proc.h>
|
||||||
#include <sync/spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
#include <sys/debug.h>
|
#include <sys/debug.h>
|
||||||
|
#include <sys/sched.h>
|
||||||
#include <sys/smp.h>
|
#include <sys/smp.h>
|
||||||
#include <sys/syscall.h>
|
#include <sys/syscall.h>
|
||||||
|
|
||||||
@@ -21,7 +23,7 @@ static struct cpu cpus[CPUS_MAX];
|
|||||||
static atomic_int cpu_init_count;
|
static atomic_int cpu_init_count;
|
||||||
|
|
||||||
/// Allocate a CPU structure
|
/// Allocate a CPU structure
|
||||||
struct cpu* cpu_make (void) {
|
struct cpu* cpu_make (uint64_t lapic_id) {
|
||||||
int id = atomic_fetch_add (&cpu_counter, 1);
|
int id = atomic_fetch_add (&cpu_counter, 1);
|
||||||
|
|
||||||
struct cpu* cpu = &cpus[id];
|
struct cpu* cpu = &cpus[id];
|
||||||
@@ -29,6 +31,7 @@ struct cpu* cpu_make (void) {
|
|||||||
memset (cpu, 0, sizeof (*cpu));
|
memset (cpu, 0, sizeof (*cpu));
|
||||||
cpu->lock = SPIN_LOCK_INIT;
|
cpu->lock = SPIN_LOCK_INIT;
|
||||||
cpu->id = id;
|
cpu->id = id;
|
||||||
|
cpu->lapic_id = lapic_id;
|
||||||
|
|
||||||
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
|
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
|
||||||
|
|
||||||
@@ -41,21 +44,36 @@ struct cpu* cpu_get (void) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void cpu_request_sched (struct cpu* cpu) {
|
void cpu_request_sched (struct cpu* cpu) {
|
||||||
struct limine_mp_response* mp = limine_mp_request.response;
|
if (cpu == thiscpu) {
|
||||||
|
proc_sched ();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
amd64_lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED);
|
||||||
if (cpu->id == i) {
|
}
|
||||||
amd64_lapic_ipi (mp->cpus[i]->lapic_id, CPU_REQUEST_SCHED);
|
|
||||||
break;
|
struct cpu* cpu_find_lightest (void) {
|
||||||
|
struct cpu* cpu = &cpus[0];
|
||||||
|
|
||||||
|
int load = atomic_load (&cpu->proc_run_q_count);
|
||||||
|
|
||||||
|
for (unsigned int i = 1; i < cpu_counter; i++) {
|
||||||
|
struct cpu* new_cpu = &cpus[i];
|
||||||
|
int new_load = atomic_load (&new_cpu->proc_run_q_count);
|
||||||
|
if (new_load < load) {
|
||||||
|
load = new_load;
|
||||||
|
cpu = new_cpu;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Bootstrap code for non-BSP CPUs
|
/// Bootstrap code for non-BSP CPUs
|
||||||
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
|
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
|
||||||
amd64_load_kernel_cr3 ();
|
amd64_load_kernel_cr3 ();
|
||||||
|
|
||||||
struct cpu* cpu = cpu_make ();
|
struct cpu* cpu = cpu_make (mp_info->lapic_id);
|
||||||
|
|
||||||
amd64_init (cpu, true); /* gdt + idt */
|
amd64_init (cpu, true); /* gdt + idt */
|
||||||
syscall_init ();
|
syscall_init ();
|
||||||
@@ -64,12 +82,14 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
|
|||||||
|
|
||||||
DEBUG ("CPU %u is online!\n", thiscpu->id);
|
DEBUG ("CPU %u is online!\n", thiscpu->id);
|
||||||
|
|
||||||
__asm__ volatile ("sti");
|
|
||||||
|
|
||||||
atomic_fetch_sub (&cpu_init_count, 1);
|
atomic_fetch_sub (&cpu_init_count, 1);
|
||||||
|
|
||||||
for (;;)
|
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
|
||||||
;
|
proc_register (spin_proc, thiscpu);
|
||||||
|
|
||||||
|
spin_lock_ctx_t ctxcpu;
|
||||||
|
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
|
||||||
|
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize SMP subsystem for AMD64. Start AP CPUs
|
/// Initialize SMP subsystem for AMD64. Start AP CPUs
|
||||||
@@ -81,7 +101,7 @@ void smp_init (void) {
|
|||||||
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
|
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
|
||||||
|
|
||||||
for (size_t i = 0; i < mp->cpu_count; i++) {
|
for (size_t i = 0; i < mp->cpu_count; i++) {
|
||||||
if (mp->cpus[i]->lapic_id != thiscpu->id) {
|
if (mp->cpus[i]->lapic_id != thiscpu->lapic_id) {
|
||||||
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
|
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
|
||||||
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
|
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
#define _KERNEL_AMD64_SMP_H
|
#define _KERNEL_AMD64_SMP_H
|
||||||
|
|
||||||
#include <amd64/gdt.h>
|
#include <amd64/gdt.h>
|
||||||
|
#include <amd64/intr.h>
|
||||||
#include <amd64/tss.h>
|
#include <amd64/tss.h>
|
||||||
#include <aux/compiler.h>
|
#include <aux/compiler.h>
|
||||||
#include <libk/rbtree.h>
|
#include <libk/rbtree.h>
|
||||||
@@ -23,17 +24,20 @@ struct cpu {
|
|||||||
|
|
||||||
uintptr_t lapic_mmio_base;
|
uintptr_t lapic_mmio_base;
|
||||||
uint64_t lapic_ticks;
|
uint64_t lapic_ticks;
|
||||||
|
uint64_t lapic_id;
|
||||||
uint32_t id;
|
uint32_t id;
|
||||||
|
|
||||||
spin_lock_t lock;
|
spin_lock_t lock;
|
||||||
|
|
||||||
struct rb_node_link* proc_run_q;
|
struct list_node_link* proc_run_q;
|
||||||
struct proc* proc_current;
|
struct proc* proc_current;
|
||||||
|
atomic_int proc_run_q_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cpu* cpu_make (void);
|
struct cpu* cpu_make (uint64_t lapic_id);
|
||||||
struct cpu* cpu_get (void);
|
struct cpu* cpu_get (void);
|
||||||
void cpu_request_sched (struct cpu* cpu);
|
void cpu_request_sched (struct cpu* cpu);
|
||||||
|
struct cpu* cpu_find_lightest (void);
|
||||||
|
|
||||||
#define thiscpu (cpu_get ())
|
#define thiscpu (cpu_get ())
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
#include <amd64/mm.h>
|
#include <amd64/mm.h>
|
||||||
#include <amd64/msr-index.h>
|
#include <amd64/msr-index.h>
|
||||||
#include <amd64/msr.h>
|
#include <amd64/msr.h>
|
||||||
|
#include <libk/string.h>
|
||||||
#include <m/status.h>
|
#include <m/status.h>
|
||||||
#include <m/syscall_defs.h>
|
#include <m/syscall_defs.h>
|
||||||
#include <proc/proc.h>
|
#include <proc/proc.h>
|
||||||
@@ -12,26 +13,29 @@
|
|||||||
|
|
||||||
extern void amd64_syscall_entry (void);
|
extern void amd64_syscall_entry (void);
|
||||||
|
|
||||||
int amd64_syscall_dispatch (void* stack_ptr) {
|
uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
|
||||||
amd64_load_kernel_cr3 ();
|
spin_lock_ctx_t ctxcpu, ctxpr;
|
||||||
|
|
||||||
|
amd64_load_kernel_cr3 ();
|
||||||
struct saved_regs* regs = stack_ptr;
|
struct saved_regs* regs = stack_ptr;
|
||||||
|
|
||||||
|
spin_lock (&thiscpu->lock, &ctxcpu);
|
||||||
|
struct proc* caller = thiscpu->proc_current;
|
||||||
|
spin_lock (&caller->lock, &ctxpr);
|
||||||
|
|
||||||
|
memcpy (&caller->pdata.regs, regs, sizeof (struct saved_regs));
|
||||||
|
|
||||||
|
spin_unlock (&caller->lock, &ctxpr);
|
||||||
|
spin_unlock (&thiscpu->lock, &ctxcpu);
|
||||||
|
|
||||||
int syscall_num = regs->rax;
|
int syscall_num = regs->rax;
|
||||||
syscall_handler_func_t func = syscall_find_handler (syscall_num);
|
syscall_handler_func_t func = syscall_find_handler (syscall_num);
|
||||||
|
|
||||||
if (func == NULL)
|
if (func == NULL) {
|
||||||
return -ST_SYSCALL_NOT_FOUND;
|
return -ST_SYSCALL_NOT_FOUND;
|
||||||
|
}
|
||||||
|
|
||||||
struct proc* caller = thiscpu->proc_current;
|
return func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
||||||
|
|
||||||
__asm__ volatile ("sti");
|
|
||||||
|
|
||||||
int result = func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
|
||||||
|
|
||||||
__asm__ volatile ("cli");
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void syscall_init (void) {
|
void syscall_init (void) {
|
||||||
|
|||||||
@@ -4,6 +4,8 @@
|
|||||||
|
|
||||||
.global amd64_syscall_entry
|
.global amd64_syscall_entry
|
||||||
amd64_syscall_entry:
|
amd64_syscall_entry:
|
||||||
|
cli
|
||||||
|
|
||||||
movq %rsp, %gs:0
|
movq %rsp, %gs:0
|
||||||
movq %gs:8, %rsp
|
movq %gs:8, %rsp
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#include <libk/list.h>
|
#include <libk/list.h>
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
#include <mm/liballoc.h>
|
#include <mm/liballoc.h>
|
||||||
#include <sync/rw_spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
#include <sys/debug.h>
|
#include <sys/debug.h>
|
||||||
|
|
||||||
#if defined(__x86_64__)
|
#if defined(__x86_64__)
|
||||||
@@ -12,9 +12,9 @@
|
|||||||
|
|
||||||
struct irq* irq_table[0x100];
|
struct irq* irq_table[0x100];
|
||||||
|
|
||||||
static rw_spin_lock_t irqs_lock;
|
static spin_lock_t irqs_lock = SPIN_LOCK_INIT;
|
||||||
|
|
||||||
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) {
|
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
|
||||||
spin_lock_ctx_t ctxiqa;
|
spin_lock_ctx_t ctxiqa;
|
||||||
|
|
||||||
struct irq* irq = malloc (sizeof (*irq));
|
struct irq* irq = malloc (sizeof (*irq));
|
||||||
@@ -25,16 +25,10 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
|
|||||||
irq->func = func;
|
irq->func = func;
|
||||||
irq->arg = arg;
|
irq->arg = arg;
|
||||||
irq->irq_num = irq_num;
|
irq->irq_num = irq_num;
|
||||||
irq->flags = flags;
|
|
||||||
|
|
||||||
rw_spin_write_lock (&irqs_lock, &ctxiqa);
|
spin_lock (&irqs_lock, &ctxiqa);
|
||||||
irq_table[irq_num] = irq;
|
irq_table[irq_num] = irq;
|
||||||
rw_spin_write_unlock (&irqs_lock, &ctxiqa);
|
spin_unlock (&irqs_lock, &ctxiqa);
|
||||||
|
|
||||||
#if defined(__x86_64__)
|
|
||||||
uint8_t resolution = amd64_resolve_irq (irq_num);
|
|
||||||
amd64_ioapic_route_irq (irq_num, resolution, 0, amd64_lapic_id ());
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -42,11 +36,11 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
|
|||||||
struct irq* irq_find (uint32_t irq_num) {
|
struct irq* irq_find (uint32_t irq_num) {
|
||||||
spin_lock_ctx_t ctxiqa;
|
spin_lock_ctx_t ctxiqa;
|
||||||
|
|
||||||
rw_spin_read_lock (&irqs_lock, &ctxiqa);
|
spin_lock (&irqs_lock, &ctxiqa);
|
||||||
|
|
||||||
struct irq* irq = irq_table[irq_num];
|
struct irq* irq = irq_table[irq_num];
|
||||||
|
|
||||||
rw_spin_read_unlock (&irqs_lock, &ctxiqa);
|
spin_unlock (&irqs_lock, &ctxiqa);
|
||||||
|
|
||||||
return irq;
|
return irq;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,6 @@
|
|||||||
#include <libk/list.h>
|
#include <libk/list.h>
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
|
|
||||||
#define IRQ_INTERRUPT_SAFE (1 << 0)
|
|
||||||
#define IRQ_INTERRUPT_UNSAFE (1 << 1)
|
|
||||||
|
|
||||||
typedef void (*irq_func_t) (void* arg, void* regs);
|
typedef void (*irq_func_t) (void* arg, void* regs);
|
||||||
|
|
||||||
struct irq {
|
struct irq {
|
||||||
@@ -15,10 +12,9 @@ struct irq {
|
|||||||
irq_func_t func;
|
irq_func_t func;
|
||||||
void* arg;
|
void* arg;
|
||||||
uint32_t irq_num;
|
uint32_t irq_num;
|
||||||
uint32_t flags;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num, uint32_t flags);
|
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num);
|
||||||
struct irq* irq_find (uint32_t irq_num);
|
struct irq* irq_find (uint32_t irq_num);
|
||||||
|
|
||||||
#endif // _KERNEL_IRQ_IRQ_H
|
#endif // _KERNEL_IRQ_IRQ_H
|
||||||
|
|||||||
@@ -1,20 +0,0 @@
|
|||||||
#ifndef _KERNEL_PROC_KPPROC_FB_H
|
|
||||||
#define _KERNEL_PROC_KPPROC_FB_H
|
|
||||||
|
|
||||||
#include <aux/compiler.h>
|
|
||||||
#include <libk/std.h>
|
|
||||||
|
|
||||||
/* data to expose as a kpproc resource */
|
|
||||||
struct kpproc_fb {
|
|
||||||
uintptr_t paddr;
|
|
||||||
uint64_t w, h, pitch;
|
|
||||||
uint16_t bpp;
|
|
||||||
uint8_t red_mask_size;
|
|
||||||
uint8_t red_mask_shift;
|
|
||||||
uint8_t green_mask_size;
|
|
||||||
uint8_t green_mask_shift;
|
|
||||||
uint8_t blue_mask_size;
|
|
||||||
uint8_t blue_mask_shift;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // _KERNEL_PROC_KPPROC_FB_H
|
|
||||||
10
kernel/proc/locks.txt
Normal file
10
kernel/proc/locks.txt
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
Lock hierarchy for process scheduling:
|
||||||
|
|
||||||
|
1. proc_tree_lock
|
||||||
|
2. cpu->lock
|
||||||
|
3. procgroup->lock
|
||||||
|
4. proc->lock
|
||||||
|
5. sq->lock
|
||||||
|
|
||||||
|
1. procgroup_tree_lock
|
||||||
|
2. procgroup->lock
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
#include <libk/std.h>
|
|
||||||
#include <mm/pmm.h>
|
|
||||||
#include <proc/mem.h>
|
|
||||||
#include <proc/proc.h>
|
|
||||||
#include <proc/resource.h>
|
|
||||||
#include <sync/spin_lock.h>
|
|
||||||
|
|
||||||
bool proc_create_resource_mem (struct proc_resource_mem* mem, struct proc_resource_mem_init* init) {
|
|
||||||
if (init->pages == 0)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (init->managed) {
|
|
||||||
mem->paddr = init->paddr;
|
|
||||||
mem->managed = true;
|
|
||||||
} else {
|
|
||||||
uintptr_t paddr = pmm_alloc (init->pages);
|
|
||||||
if (paddr == PMM_ALLOC_ERR)
|
|
||||||
return false;
|
|
||||||
mem->paddr = paddr;
|
|
||||||
mem->managed = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
mem->pages = mem->alive_pages = init->pages;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void proc_cleanup_resource_mem (struct proc* proc, struct proc_resource* resource) {
|
|
||||||
(void)proc;
|
|
||||||
|
|
||||||
if (!resource->u.mem.managed)
|
|
||||||
pmm_free (resource->u.mem.paddr, resource->u.mem.pages);
|
|
||||||
}
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
#ifndef _KERNEL_PROC_MEM_H
|
|
||||||
#define _KERNEL_PROC_MEM_H
|
|
||||||
|
|
||||||
#include <libk/std.h>
|
|
||||||
|
|
||||||
struct proc;
|
|
||||||
struct proc_resource;
|
|
||||||
|
|
||||||
struct proc_resource_mem {
|
|
||||||
struct proc_resource* resource;
|
|
||||||
|
|
||||||
uintptr_t paddr;
|
|
||||||
size_t pages;
|
|
||||||
ptrdiff_t alive_pages;
|
|
||||||
bool managed;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct proc_resource_mem_init {
|
|
||||||
uintptr_t paddr;
|
|
||||||
size_t pages;
|
|
||||||
bool managed;
|
|
||||||
};
|
|
||||||
|
|
||||||
bool proc_create_resource_mem (struct proc_resource_mem* mem, struct proc_resource_mem_init* init);
|
|
||||||
void proc_cleanup_resource_mem (struct proc* proc, struct proc_resource* resource);
|
|
||||||
|
|
||||||
#endif // _KERNEL_PROC_MEM_H
|
|
||||||
@@ -2,41 +2,95 @@
|
|||||||
#include <libk/rbtree.h>
|
#include <libk/rbtree.h>
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
#include <libk/string.h>
|
#include <libk/string.h>
|
||||||
|
#include <mm/liballoc.h>
|
||||||
#include <proc/mutex.h>
|
#include <proc/mutex.h>
|
||||||
#include <proc/proc.h>
|
#include <proc/proc.h>
|
||||||
|
#include <proc/suspension_q.h>
|
||||||
#include <sync/spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
#include <sys/debug.h>
|
#include <sys/debug.h>
|
||||||
|
#include <sys/smp.h>
|
||||||
|
#include <sys/spin_lock.h>
|
||||||
|
|
||||||
bool proc_create_resource_mutex (struct proc_mutex* mutex) {
|
void proc_mutexes_cleanup (struct proc* proc) {
|
||||||
memset (mutex, 0, sizeof (*mutex));
|
spin_lock_ctx_t ctxpg, ctxrs;
|
||||||
|
|
||||||
return true;
|
spin_lock (&proc->procgroup->lock, &ctxpg);
|
||||||
|
|
||||||
|
struct rb_node_link* rnode;
|
||||||
|
rbtree_first (&proc->procgroup->resource_tree, rnode);
|
||||||
|
|
||||||
|
while (rnode) {
|
||||||
|
struct rb_node_link* next;
|
||||||
|
rbtree_next (rnode, next);
|
||||||
|
|
||||||
|
struct proc_resource* resource = rbtree_entry (rnode, struct proc_resource, resource_tree_link);
|
||||||
|
|
||||||
|
rnode = next;
|
||||||
|
|
||||||
|
spin_lock (&resource->lock, &ctxrs);
|
||||||
|
|
||||||
|
if (resource->type != PR_MUTEX) {
|
||||||
|
spin_unlock (&resource->lock, &ctxrs);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
void proc_cleanup_resource_mutex (struct proc* proc, struct proc_resource* resource) {
|
if (resource->u.mutex.owner == proc && resource->u.mutex.locked) {
|
||||||
|
spin_unlock (&resource->lock, &ctxrs);
|
||||||
|
|
||||||
|
proc_mutex_unlock (proc, &resource->u.mutex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||||
struct proc_mutex* mutex = &resource->u.mutex;
|
struct proc_mutex* mutex = &resource->u.mutex;
|
||||||
|
spin_lock_ctx_t ctxmt, ctxsq;
|
||||||
|
|
||||||
proc_mutex_unlock (proc, mutex);
|
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||||
|
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||||
|
|
||||||
|
bool reschedule = PROC_NO_RESCHEDULE;
|
||||||
|
|
||||||
|
while (mutex->suspension_q.proc_list != NULL) {
|
||||||
|
struct list_node_link* node = mutex->suspension_q.proc_list;
|
||||||
|
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
|
||||||
|
struct proc* suspended_proc = sq_entry->proc;
|
||||||
|
|
||||||
|
/* we will relock during resume */
|
||||||
|
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||||
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||||
|
|
||||||
|
reschedule = reschedule || proc_sq_resume (suspended_proc, sq_entry);
|
||||||
|
|
||||||
|
/* reacquire */
|
||||||
|
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||||
|
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
|
mutex->locked = false;
|
||||||
|
mutex->owner = NULL;
|
||||||
|
|
||||||
|
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||||
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||||
|
|
||||||
|
return reschedule;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
|
||||||
spin_lock_ctx_t ctxmt;
|
spin_lock_ctx_t ctxmt;
|
||||||
|
|
||||||
try:
|
|
||||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||||
|
|
||||||
if (!mutex->locked || mutex->owner == proc) {
|
if (!mutex->locked || mutex->owner == proc) {
|
||||||
mutex->locked = true;
|
mutex->locked = true;
|
||||||
mutex->owner = proc;
|
mutex->owner = proc;
|
||||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||||
return;
|
return PROC_NO_RESCHEDULE;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
|
||||||
|
|
||||||
proc_suspend (proc, &mutex->suspension_q);
|
|
||||||
|
|
||||||
goto try;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||||
@@ -46,26 +100,24 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
|||||||
|
|
||||||
if (mutex->owner != proc) {
|
if (mutex->owner != proc) {
|
||||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||||
return false;
|
return PROC_NO_RESCHEDULE;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||||
|
|
||||||
struct proc* resumed_proc = NULL;
|
struct list_node_link* node = mutex->suspension_q.proc_list;
|
||||||
struct rb_node_link* node;
|
|
||||||
rbtree_first (&mutex->suspension_q.proc_tree, node);
|
|
||||||
|
|
||||||
if (node) {
|
if (node) {
|
||||||
resumed_proc = rbtree_entry (node, struct proc, suspension_link);
|
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
|
||||||
|
struct proc* resumed_proc = sq_entry->proc;
|
||||||
|
|
||||||
mutex->owner = resumed_proc;
|
mutex->owner = resumed_proc;
|
||||||
mutex->locked = true;
|
mutex->locked = true;
|
||||||
|
|
||||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||||
|
|
||||||
proc_resume (resumed_proc);
|
return proc_sq_resume (resumed_proc, sq_entry);
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex->locked = false;
|
mutex->locked = false;
|
||||||
@@ -74,5 +126,5 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
|||||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||||
|
|
||||||
return true;
|
return PROC_NEED_RESCHEDULE;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,9 +15,9 @@ struct proc_mutex {
|
|||||||
struct proc* owner;
|
struct proc* owner;
|
||||||
};
|
};
|
||||||
|
|
||||||
bool proc_create_resource_mutex (struct proc_mutex* mutex);
|
bool proc_cleanup_resource_mutex (struct proc_resource* resource);
|
||||||
void proc_cleanup_resource_mutex (struct proc* proc, struct proc_resource* resource);
|
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
|
||||||
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
|
|
||||||
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex);
|
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex);
|
||||||
|
void proc_mutexes_cleanup (struct proc* proc);
|
||||||
|
|
||||||
#endif // _KERNEL_PROC_MUTEX_H
|
#endif // _KERNEL_PROC_MUTEX_H
|
||||||
|
|||||||
@@ -9,11 +9,10 @@
|
|||||||
#include <limine/requests.h>
|
#include <limine/requests.h>
|
||||||
#include <mm/liballoc.h>
|
#include <mm/liballoc.h>
|
||||||
#include <mm/pmm.h>
|
#include <mm/pmm.h>
|
||||||
#include <proc/kpproc_fb.h>
|
|
||||||
#include <proc/proc.h>
|
#include <proc/proc.h>
|
||||||
|
#include <proc/procgroup.h>
|
||||||
#include <proc/resource.h>
|
#include <proc/resource.h>
|
||||||
#include <rd/rd.h>
|
#include <rd/rd.h>
|
||||||
#include <sync/rw_spin_lock.h>
|
|
||||||
#include <sync/spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
#include <sys/debug.h>
|
#include <sys/debug.h>
|
||||||
#include <sys/mm.h>
|
#include <sys/mm.h>
|
||||||
@@ -28,117 +27,17 @@
|
|||||||
|
|
||||||
#define SCHED_REAP_FREQ 10
|
#define SCHED_REAP_FREQ 10
|
||||||
|
|
||||||
/*
|
|
||||||
* Lock hierachy:
|
|
||||||
* - proc_tree_lock
|
|
||||||
* - cpu->lock
|
|
||||||
* - proc->lock
|
|
||||||
* - suspension_q->lock
|
|
||||||
*/
|
|
||||||
|
|
||||||
static struct rb_node_link* proc_tree = NULL;
|
static struct rb_node_link* proc_tree = NULL;
|
||||||
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
|
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
|
||||||
|
|
||||||
static atomic_int sched_cycles = 0;
|
static atomic_int sched_cycles = 0;
|
||||||
|
|
||||||
/* kernel pseudo process */
|
|
||||||
static struct proc kpproc;
|
|
||||||
|
|
||||||
static bool proc_check_elf (uint8_t* elf) {
|
static bool proc_check_elf (uint8_t* elf) {
|
||||||
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
|
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
|
|
||||||
uint32_t flags) {
|
|
||||||
spin_lock_ctx_t ctxprpd;
|
|
||||||
struct proc_mapping* mapping = malloc (sizeof (*mapping));
|
|
||||||
|
|
||||||
if (mapping == NULL)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
mapping->paddr = start_paddr;
|
|
||||||
mapping->vaddr = start_vaddr;
|
|
||||||
mapping->size = pages * PAGE_SIZE;
|
|
||||||
|
|
||||||
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
|
|
||||||
|
|
||||||
spin_lock (&proc->pd->lock, &ctxprpd);
|
|
||||||
|
|
||||||
list_append (proc->mappings, &mapping->proc_mappings_link);
|
|
||||||
|
|
||||||
for (uintptr_t vpage = start_vaddr, ppage = start_paddr; vpage < start_vaddr + pages * PAGE_SIZE;
|
|
||||||
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
|
|
||||||
mm_map_page (proc->pd, ppage, vpage, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock (&proc->pd->lock, &ctxprpd);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages) {
|
|
||||||
size_t unmap_size = pages * PAGE_SIZE;
|
|
||||||
uintptr_t end_vaddr = start_vaddr + unmap_size;
|
|
||||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
|
||||||
bool used_tail_mapping = false;
|
|
||||||
spin_lock_ctx_t ctxprpd;
|
|
||||||
|
|
||||||
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
|
|
||||||
if (tail_mapping == NULL)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
spin_lock (&proc->pd->lock, &ctxprpd);
|
|
||||||
|
|
||||||
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
|
|
||||||
struct proc_mapping* mapping =
|
|
||||||
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
|
||||||
|
|
||||||
uintptr_t m_end = mapping->vaddr + mapping->size;
|
|
||||||
|
|
||||||
/* check overlap */
|
|
||||||
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
|
|
||||||
/* split in the middle */
|
|
||||||
if ((start_vaddr > mapping->vaddr) && (end_vaddr < m_end)) {
|
|
||||||
tail_mapping->vaddr = end_vaddr;
|
|
||||||
tail_mapping->paddr = mapping->paddr + (end_vaddr - mapping->vaddr);
|
|
||||||
tail_mapping->size = m_end - end_vaddr;
|
|
||||||
|
|
||||||
mapping->size = start_vaddr - mapping->vaddr;
|
|
||||||
|
|
||||||
list_insert_after (proc->mappings, &mapping->proc_mappings_link,
|
|
||||||
&tail_mapping->proc_mappings_link);
|
|
||||||
|
|
||||||
used_tail_mapping = true;
|
|
||||||
|
|
||||||
break;
|
|
||||||
} else if ((start_vaddr <= mapping->vaddr) && (end_vaddr < m_end)) { /* shrink left */
|
|
||||||
size_t diff = end_vaddr - mapping->vaddr;
|
|
||||||
mapping->vaddr += diff;
|
|
||||||
mapping->paddr += diff;
|
|
||||||
mapping->size -= diff;
|
|
||||||
} else if ((start_vaddr > mapping->vaddr) && (end_vaddr >= m_end)) { /* shrink right */
|
|
||||||
mapping->size = start_vaddr - mapping->vaddr;
|
|
||||||
} else { /* full overlap */
|
|
||||||
list_remove (proc->mappings, &mapping->proc_mappings_link);
|
|
||||||
free (mapping);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!used_tail_mapping)
|
|
||||||
free (tail_mapping);
|
|
||||||
|
|
||||||
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
|
|
||||||
mm_unmap_page (proc->pd, vpage, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock (&proc->pd->lock, &ctxprpd);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||||
struct elf_aux aux;
|
struct elf_aux aux;
|
||||||
|
|
||||||
@@ -163,25 +62,37 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
|||||||
|
|
||||||
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
|
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
|
||||||
|
|
||||||
struct proc_resource_mem_init mem_init = {.pages = blks};
|
|
||||||
int rid = atomic_fetch_add (&proc->resources->sys_rids, 1);
|
|
||||||
struct proc_resource* r =
|
|
||||||
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&mem_init);
|
|
||||||
if (r == NULL) {
|
|
||||||
DEBUG ("pmm oom error while loading ELF segments! (tried to alloc %zu blks)\n", blks);
|
|
||||||
}
|
|
||||||
|
|
||||||
uintptr_t p_addr = r->u.mem.paddr;
|
|
||||||
|
|
||||||
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
|
|
||||||
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
|
|
||||||
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
|
|
||||||
|
|
||||||
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
|
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
|
||||||
if (phdr->p_flags & PF_W)
|
if (phdr->p_flags & PF_W)
|
||||||
pg_flags |= MM_PG_RW;
|
pg_flags |= MM_PG_RW;
|
||||||
|
|
||||||
proc_map (proc, p_addr, v_addr, blks, pg_flags);
|
uintptr_t p_addr;
|
||||||
|
procgroup_map (proc->procgroup, v_addr, blks, pg_flags, &p_addr);
|
||||||
|
|
||||||
|
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
|
||||||
|
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
|
||||||
|
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
|
||||||
|
} break;
|
||||||
|
case PT_TLS: {
|
||||||
|
#if defined(__x86_64__)
|
||||||
|
if (phdr->p_memsz > 0) {
|
||||||
|
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
|
||||||
|
size_t tls_size = align_up (phdr->p_memsz, tls_align);
|
||||||
|
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
|
||||||
|
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
|
||||||
|
proc->procgroup->tls.tls_tmpl_pages = blks;
|
||||||
|
proc->procgroup->tls.tls_tmpl_size = tls_size;
|
||||||
|
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
|
||||||
|
|
||||||
|
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
|
||||||
|
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
|
||||||
|
|
||||||
|
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
|
||||||
|
phdr->p_filesz);
|
||||||
|
|
||||||
|
proc_init_tls (proc);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
} break;
|
} break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -189,11 +100,10 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
|||||||
return aux;
|
return aux;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct proc* proc_spawn_rd (char* name) {
|
struct proc* proc_spawn_rd (char* name) {
|
||||||
struct rd_file* rd_file = rd_get_file (name);
|
struct rd_file* rd_file = rd_get_file (name);
|
||||||
|
|
||||||
bool ok = proc_check_elf (rd_file->content);
|
bool ok = proc_check_elf (rd_file->content);
|
||||||
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
|
|
||||||
|
|
||||||
if (!ok)
|
if (!ok)
|
||||||
return NULL;
|
return NULL;
|
||||||
@@ -205,61 +115,61 @@ struct proc* proc_find_pid (int pid) {
|
|||||||
spin_lock_ctx_t ctxprtr;
|
spin_lock_ctx_t ctxprtr;
|
||||||
struct proc* proc = NULL;
|
struct proc* proc = NULL;
|
||||||
|
|
||||||
rw_spin_read_lock (&proc_tree_lock, &ctxprtr);
|
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||||
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
|
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
|
||||||
rw_spin_read_unlock (&proc_tree_lock, &ctxprtr);
|
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||||
|
|
||||||
return proc;
|
return proc;
|
||||||
}
|
}
|
||||||
|
|
||||||
void proc_register (struct proc* proc, struct cpu* cpu) {
|
void proc_register (struct proc* proc, struct cpu* cpu1) {
|
||||||
spin_lock_ctx_t ctxcpu, ctxprtr;
|
spin_lock_ctx_t ctxcpu, ctxprtr;
|
||||||
|
|
||||||
proc->cpu = cpu;
|
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
|
||||||
|
|
||||||
|
struct cpu* cpu = proc->cpu;
|
||||||
|
|
||||||
|
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||||
spin_lock (&cpu->lock, &ctxcpu);
|
spin_lock (&cpu->lock, &ctxcpu);
|
||||||
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
|
|
||||||
|
|
||||||
|
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
||||||
|
|
||||||
|
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||||
|
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||||
if (cpu->proc_current == NULL)
|
if (cpu->proc_current == NULL)
|
||||||
cpu->proc_current = proc;
|
cpu->proc_current = proc;
|
||||||
|
|
||||||
|
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||||
spin_unlock (&cpu->lock, &ctxcpu);
|
spin_unlock (&cpu->lock, &ctxcpu);
|
||||||
|
|
||||||
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
|
||||||
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
|
||||||
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* caller holds cpu->lock */
|
/* caller holds cpu->lock */
|
||||||
static struct proc* proc_find_sched (struct cpu* cpu) {
|
static struct proc* proc_find_sched (struct cpu* cpu) {
|
||||||
struct rb_node_link* node = NULL;
|
if (!cpu->proc_run_q)
|
||||||
struct proc* current = cpu->proc_current;
|
|
||||||
struct proc* proc = NULL;
|
|
||||||
|
|
||||||
if (current)
|
|
||||||
rbtree_next (¤t->cpu_run_q_link, node);
|
|
||||||
|
|
||||||
if (!node)
|
|
||||||
rbtree_first (&cpu->proc_run_q, node);
|
|
||||||
|
|
||||||
if (!node)
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
struct rb_node_link* first = node;
|
struct list_node_link *current, *start;
|
||||||
|
|
||||||
|
if (cpu->proc_current)
|
||||||
|
current = cpu->proc_current->cpu_run_q_link.next;
|
||||||
|
else
|
||||||
|
current = cpu->proc_run_q;
|
||||||
|
|
||||||
|
if (!current)
|
||||||
|
current = cpu->proc_run_q;
|
||||||
|
|
||||||
|
start = current;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
proc = rbtree_entry (node, struct proc, cpu_run_q_link);
|
struct proc* proc = list_entry (current, struct proc, cpu_run_q_link);
|
||||||
|
|
||||||
if (atomic_load (&proc->state) == PROC_READY)
|
if (atomic_load (&proc->state) == PROC_READY)
|
||||||
return proc;
|
return proc;
|
||||||
|
|
||||||
rbtree_next (node, node);
|
current = current->next ? current->next : cpu->proc_run_q;
|
||||||
|
} while (current != start);
|
||||||
|
|
||||||
if (!node)
|
return NULL;
|
||||||
rbtree_first (&cpu->proc_run_q, node);
|
|
||||||
|
|
||||||
} while (node != first);
|
|
||||||
|
|
||||||
return ((atomic_load (¤t->state) == PROC_READY) ? current : NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void proc_reap (void) {
|
static void proc_reap (void) {
|
||||||
@@ -268,7 +178,7 @@ static void proc_reap (void) {
|
|||||||
spin_lock_ctx_t ctxprtr;
|
spin_lock_ctx_t ctxprtr;
|
||||||
spin_lock_ctx_t ctxpr;
|
spin_lock_ctx_t ctxpr;
|
||||||
|
|
||||||
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||||
|
|
||||||
struct rb_node_link* node;
|
struct rb_node_link* node;
|
||||||
rbtree_first (&proc_tree, node);
|
rbtree_first (&proc_tree, node);
|
||||||
@@ -281,15 +191,14 @@ static void proc_reap (void) {
|
|||||||
if (atomic_load (&proc->state) == PROC_DEAD) {
|
if (atomic_load (&proc->state) == PROC_DEAD) {
|
||||||
spin_lock (&proc->lock, &ctxpr);
|
spin_lock (&proc->lock, &ctxpr);
|
||||||
rbtree_delete (&proc_tree, &proc->proc_tree_link);
|
rbtree_delete (&proc_tree, &proc->proc_tree_link);
|
||||||
spin_unlock (&proc->lock, &ctxpr);
|
|
||||||
|
|
||||||
list_append (reap_list, &proc->reap_link);
|
list_append (reap_list, &proc->reap_link);
|
||||||
|
spin_unlock (&proc->lock, &ctxpr);
|
||||||
}
|
}
|
||||||
|
|
||||||
node = next;
|
node = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
|
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||||
|
|
||||||
struct list_node_link *reap_link, *reap_link_tmp;
|
struct list_node_link *reap_link, *reap_link_tmp;
|
||||||
list_foreach (reap_list, reap_link, reap_link_tmp) {
|
list_foreach (reap_list, reap_link, reap_link_tmp) {
|
||||||
@@ -301,8 +210,8 @@ static void proc_reap (void) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void proc_sched (void* regs) {
|
void proc_sched (void) {
|
||||||
spin_lock_ctx_t ctxcpu, ctxpr;
|
spin_lock_ctx_t ctxcpu;
|
||||||
|
|
||||||
int s_cycles = atomic_fetch_add (&sched_cycles, 1);
|
int s_cycles = atomic_fetch_add (&sched_cycles, 1);
|
||||||
|
|
||||||
@@ -314,21 +223,12 @@ void proc_sched (void* regs) {
|
|||||||
|
|
||||||
spin_lock (&cpu->lock, &ctxcpu);
|
spin_lock (&cpu->lock, &ctxcpu);
|
||||||
|
|
||||||
struct proc* prev = cpu->proc_current;
|
|
||||||
|
|
||||||
if (prev != NULL) {
|
|
||||||
spin_lock (&prev->lock, &ctxpr);
|
|
||||||
prev->pdata.regs = *(struct saved_regs*)regs;
|
|
||||||
spin_unlock (&prev->lock, &ctxpr);
|
|
||||||
}
|
|
||||||
|
|
||||||
next = proc_find_sched (cpu);
|
next = proc_find_sched (cpu);
|
||||||
|
|
||||||
if (next) {
|
if (next) {
|
||||||
cpu->proc_current = next;
|
cpu->proc_current = next;
|
||||||
spin_unlock (&cpu->lock, &ctxcpu);
|
|
||||||
|
|
||||||
do_sched (next);
|
do_sched (next, &cpu->lock, &ctxcpu);
|
||||||
} else {
|
} else {
|
||||||
cpu->proc_current = NULL;
|
cpu->proc_current = NULL;
|
||||||
spin_unlock (&cpu->lock, &ctxcpu);
|
spin_unlock (&cpu->lock, &ctxcpu);
|
||||||
@@ -343,11 +243,13 @@ void proc_kill (struct proc* proc) {
|
|||||||
|
|
||||||
spin_lock (&proc->lock, &ctxpr);
|
spin_lock (&proc->lock, &ctxpr);
|
||||||
atomic_store (&proc->state, PROC_DEAD);
|
atomic_store (&proc->state, PROC_DEAD);
|
||||||
|
proc->cpu = NULL;
|
||||||
spin_unlock (&proc->lock, &ctxpr);
|
spin_unlock (&proc->lock, &ctxpr);
|
||||||
|
|
||||||
spin_lock (&cpu->lock, &ctxcpu);
|
spin_lock (&cpu->lock, &ctxcpu);
|
||||||
|
|
||||||
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
|
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||||
|
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
|
||||||
if (cpu->proc_current == proc)
|
if (cpu->proc_current == proc)
|
||||||
cpu->proc_current = NULL;
|
cpu->proc_current = NULL;
|
||||||
|
|
||||||
@@ -358,118 +260,24 @@ void proc_kill (struct proc* proc) {
|
|||||||
cpu_request_sched (cpu);
|
cpu_request_sched (cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
|
|
||||||
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
|
|
||||||
struct cpu* cpu = proc->cpu;
|
|
||||||
|
|
||||||
spin_lock (&proc->lock, &ctxpr);
|
|
||||||
atomic_store (&proc->state, PROC_SUSPENDED);
|
|
||||||
proc->suspension_q = sq;
|
|
||||||
spin_unlock (&proc->lock, &ctxpr);
|
|
||||||
|
|
||||||
/* remove from run q */
|
|
||||||
spin_lock (&cpu->lock, &ctxcpu);
|
|
||||||
|
|
||||||
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
|
|
||||||
if (cpu->proc_current == proc)
|
|
||||||
cpu->proc_current = NULL;
|
|
||||||
spin_unlock (&cpu->lock, &ctxcpu);
|
|
||||||
|
|
||||||
spin_lock (&sq->lock, &ctxsq);
|
|
||||||
rbtree_insert (struct proc, &sq->proc_tree, &proc->suspension_link, suspension_link, pid);
|
|
||||||
spin_unlock (&sq->lock, &ctxsq);
|
|
||||||
|
|
||||||
cpu_request_sched (cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
void proc_resume (struct proc* proc) {
|
|
||||||
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
|
|
||||||
struct cpu* cpu = proc->cpu;
|
|
||||||
struct proc_suspension_q* sq = proc->suspension_q;
|
|
||||||
|
|
||||||
spin_lock (&sq->lock, &ctxsq);
|
|
||||||
rbtree_delete (&sq->proc_tree, &proc->suspension_link);
|
|
||||||
spin_unlock (&sq->lock, &ctxsq);
|
|
||||||
|
|
||||||
spin_lock (&proc->lock, &ctxpr);
|
|
||||||
proc->suspension_q = NULL;
|
|
||||||
atomic_store (&proc->state, PROC_READY);
|
|
||||||
spin_unlock (&proc->lock, &ctxpr);
|
|
||||||
|
|
||||||
spin_lock (&cpu->lock, &ctxcpu);
|
|
||||||
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
|
|
||||||
spin_unlock (&cpu->lock, &ctxcpu);
|
|
||||||
|
|
||||||
cpu_request_sched (cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void proc_irq_sched (void* arg, void* regs) {
|
static void proc_irq_sched (void* arg, void* regs) {
|
||||||
(void)arg;
|
(void)arg;
|
||||||
proc_sched (regs);
|
proc_sched ();
|
||||||
}
|
|
||||||
|
|
||||||
static void proc_kpproc_init (void) {
|
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
|
||||||
|
|
||||||
memset (&kpproc, 0, sizeof (kpproc));
|
|
||||||
|
|
||||||
kpproc.lock = SPIN_LOCK_INIT;
|
|
||||||
kpproc.state = PROC_PSEUDO;
|
|
||||||
kpproc.pid = 0;
|
|
||||||
|
|
||||||
kpproc.resources = malloc (sizeof (*kpproc.resources));
|
|
||||||
kpproc.resources->tree = NULL;
|
|
||||||
kpproc.resources->lock = RW_SPIN_LOCK_INIT;
|
|
||||||
kpproc.resources->refs = 1;
|
|
||||||
kpproc.resources->sys_rids = 0;
|
|
||||||
|
|
||||||
kpproc.pd = mm_get_kernel_pd ();
|
|
||||||
kpproc.cpu = thiscpu;
|
|
||||||
|
|
||||||
rbtree_insert (struct proc, &proc_tree, &kpproc.proc_tree_link, proc_tree_link, pid);
|
|
||||||
|
|
||||||
/* prepare kernel resources */
|
|
||||||
{
|
|
||||||
/* frame buffer */
|
|
||||||
|
|
||||||
struct limine_framebuffer_response* fb = limine_framebuffer_request.response;
|
|
||||||
struct kpproc_fb fb_info = {
|
|
||||||
.paddr = (uintptr_t)fb->framebuffers[0]->address - (uintptr_t)hhdm->offset,
|
|
||||||
.w = fb->framebuffers[0]->width,
|
|
||||||
.h = fb->framebuffers[0]->height,
|
|
||||||
.pitch = fb->framebuffers[0]->pitch,
|
|
||||||
.bpp = fb->framebuffers[0]->bpp,
|
|
||||||
.red_mask_size = fb->framebuffers[0]->red_mask_size,
|
|
||||||
.red_mask_shift = fb->framebuffers[0]->red_mask_shift,
|
|
||||||
.green_mask_size = fb->framebuffers[0]->green_mask_size,
|
|
||||||
.green_mask_shift = fb->framebuffers[0]->green_mask_shift,
|
|
||||||
.blue_mask_size = fb->framebuffers[0]->blue_mask_size,
|
|
||||||
.blue_mask_shift = fb->framebuffers[0]->blue_mask_shift,
|
|
||||||
};
|
|
||||||
|
|
||||||
DEBUG ("Framebuffer address %p\n", fb_info.paddr);
|
|
||||||
|
|
||||||
size_t pages = align_up (sizeof (fb_info), PAGE_SIZE) / PAGE_SIZE;
|
|
||||||
uintptr_t fb_info_memblk_paddr = pmm_alloc (pages);
|
|
||||||
memcpy ((struct kpproc_fb*)((uintptr_t)hhdm->offset + fb_info_memblk_paddr), &fb_info,
|
|
||||||
sizeof (fb_info));
|
|
||||||
|
|
||||||
struct proc_resource_mem_init mem_init = {
|
|
||||||
.pages = pages, .paddr = fb_info_memblk_paddr, .managed = true};
|
|
||||||
proc_create_resource (&kpproc, 0, PR_MEM, RV_PUBLIC, &mem_init);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void proc_init (void) {
|
void proc_init (void) {
|
||||||
#if defined(__x86_64__)
|
#if defined(__x86_64__)
|
||||||
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_SAFE);
|
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER);
|
||||||
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED, IRQ_INTERRUPT_SAFE);
|
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
proc_kpproc_init ();
|
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
|
||||||
|
proc_register (spin_proc, thiscpu);
|
||||||
|
|
||||||
struct proc* init = proc_spawn_rd ("init.exe");
|
struct proc* init = proc_spawn_rd ("init.exe");
|
||||||
proc_register (init, thiscpu);
|
proc_register (init, NULL);
|
||||||
|
|
||||||
do_sched (init);
|
spin_lock_ctx_t ctxcpu;
|
||||||
|
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
|
||||||
|
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,9 +6,9 @@
|
|||||||
#include <libk/list.h>
|
#include <libk/list.h>
|
||||||
#include <libk/rbtree.h>
|
#include <libk/rbtree.h>
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
|
#include <proc/procgroup.h>
|
||||||
#include <proc/resource.h>
|
#include <proc/resource.h>
|
||||||
#include <proc/suspension_q.h>
|
#include <proc/suspension_q.h>
|
||||||
#include <sync/rw_spin_lock.h>
|
|
||||||
#include <sync/spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
#include <sys/mm.h>
|
#include <sys/mm.h>
|
||||||
|
|
||||||
@@ -17,59 +17,41 @@
|
|||||||
#include <amd64/proc.h> /* USTACK_SIZE */
|
#include <amd64/proc.h> /* USTACK_SIZE */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define PROC_NEED_RESCHEDULE true
|
||||||
|
#define PROC_NO_RESCHEDULE false
|
||||||
|
|
||||||
/* process states */
|
/* process states */
|
||||||
#define PROC_READY 0
|
#define PROC_READY 0
|
||||||
#define PROC_DEAD 1
|
#define PROC_DEAD 1
|
||||||
#define PROC_SUSPENDED 2
|
#define PROC_SUSPENDED 2
|
||||||
#define PROC_PSEUDO 3
|
|
||||||
|
|
||||||
|
/* process flags */
|
||||||
#define PROC_USTK_PREALLOC (1 << 0)
|
#define PROC_USTK_PREALLOC (1 << 0)
|
||||||
|
|
||||||
struct cpu;
|
struct cpu;
|
||||||
|
|
||||||
struct proc_mapping {
|
|
||||||
struct list_node_link proc_mappings_link;
|
|
||||||
|
|
||||||
uintptr_t paddr;
|
|
||||||
uintptr_t vaddr;
|
|
||||||
size_t size;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct proc_resources {
|
|
||||||
atomic_int refs;
|
|
||||||
atomic_int sys_rids;
|
|
||||||
struct rb_node_link* tree;
|
|
||||||
rw_spin_lock_t lock;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct proc {
|
struct proc {
|
||||||
int pid;
|
int pid;
|
||||||
struct rb_node_link proc_tree_link;
|
struct rb_node_link proc_tree_link;
|
||||||
struct rb_node_link cpu_run_q_link;
|
struct rb_node_link procgroup_memb_tree_link;
|
||||||
struct rb_node_link suspension_link;
|
struct list_node_link cpu_run_q_link;
|
||||||
struct list_node_link reap_link;
|
struct list_node_link reap_link;
|
||||||
|
struct list_node_link* sq_entries;
|
||||||
struct list_node_link* mappings; /* pd.lock implicitly protects this field */
|
struct procgroup* procgroup;
|
||||||
struct proc_platformdata pdata;
|
struct proc_platformdata pdata;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
struct pd* pd;
|
|
||||||
spin_lock_t lock;
|
spin_lock_t lock;
|
||||||
struct cpu* cpu;
|
struct cpu* cpu;
|
||||||
atomic_int state;
|
atomic_int state;
|
||||||
struct proc_suspension_q* suspension_q;
|
uintptr_t uvaddr_argument;
|
||||||
struct proc_resources* resources;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq);
|
void proc_sched (void);
|
||||||
void proc_resume (struct proc* proc);
|
|
||||||
void proc_sched (void* regs);
|
|
||||||
void proc_kill (struct proc* proc);
|
void proc_kill (struct proc* proc);
|
||||||
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
|
|
||||||
uint32_t flags);
|
|
||||||
bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages);
|
|
||||||
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf);
|
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf);
|
||||||
void proc_register (struct proc* proc, struct cpu* cpu);
|
void proc_register (struct proc* proc, struct cpu* cpu);
|
||||||
struct proc* proc_find_pid (int pid);
|
struct proc* proc_find_pid (int pid);
|
||||||
|
struct proc* proc_spawn_rd (char* name);
|
||||||
void proc_init (void);
|
void proc_init (void);
|
||||||
|
|
||||||
#endif // _KERNEL_PROC_PROC_H
|
#endif // _KERNEL_PROC_PROC_H
|
||||||
|
|||||||
218
kernel/proc/procgroup.c
Normal file
218
kernel/proc/procgroup.c
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
#include <libk/rbtree.h>
|
||||||
|
#include <libk/std.h>
|
||||||
|
#include <mm/liballoc.h>
|
||||||
|
#include <mm/pmm.h>
|
||||||
|
#include <proc/proc.h>
|
||||||
|
#include <proc/procgroup.h>
|
||||||
|
#include <sync/spin_lock.h>
|
||||||
|
#include <sys/debug.h>
|
||||||
|
#include <sys/mm.h>
|
||||||
|
|
||||||
|
static struct rb_node_link* procgroup_tree = NULL;
|
||||||
|
static spin_lock_t procgroup_tree_lock = SPIN_LOCK_INIT;
|
||||||
|
static atomic_int pgids = 0;
|
||||||
|
|
||||||
|
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
|
||||||
|
uintptr_t* out_paddr) {
|
||||||
|
spin_lock_ctx_t ctxpg;
|
||||||
|
|
||||||
|
spin_lock (&procgroup->lock, &ctxpg);
|
||||||
|
|
||||||
|
vaddr = (vaddr == 0) ? procgroup->map_base : vaddr;
|
||||||
|
|
||||||
|
struct proc_mapping* mapping = malloc (sizeof (*mapping));
|
||||||
|
|
||||||
|
if (mapping == NULL) {
|
||||||
|
spin_unlock (&procgroup->lock, &ctxpg);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t paddr = pmm_alloc (pages);
|
||||||
|
|
||||||
|
if (paddr == PMM_ALLOC_ERR) {
|
||||||
|
free (mapping);
|
||||||
|
spin_unlock (&procgroup->lock, &ctxpg);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (out_paddr != NULL)
|
||||||
|
*out_paddr = paddr;
|
||||||
|
|
||||||
|
mapping->paddr = paddr;
|
||||||
|
mapping->vaddr = vaddr;
|
||||||
|
mapping->size = pages * PAGE_SIZE;
|
||||||
|
|
||||||
|
procgroup->map_base += pages * PAGE_SIZE;
|
||||||
|
|
||||||
|
list_append (procgroup->mappings, &mapping->proc_mappings_link);
|
||||||
|
|
||||||
|
for (uintptr_t vpage = vaddr, ppage = paddr; vpage < vaddr + pages * PAGE_SIZE;
|
||||||
|
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
|
||||||
|
mm_map_page (&procgroup->pd, ppage, vpage, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock (&procgroup->lock, &ctxpg);
|
||||||
|
|
||||||
|
return vaddr;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
|
||||||
|
size_t unmap_size = pages * PAGE_SIZE;
|
||||||
|
uintptr_t end_vaddr = start_vaddr + unmap_size;
|
||||||
|
|
||||||
|
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||||
|
|
||||||
|
bool used_tail_mapping = false;
|
||||||
|
spin_lock_ctx_t ctxpg;
|
||||||
|
|
||||||
|
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
|
||||||
|
if (tail_mapping == NULL)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
spin_lock (&procgroup->lock, &ctxpg);
|
||||||
|
|
||||||
|
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
|
||||||
|
struct proc_mapping* mapping =
|
||||||
|
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
||||||
|
|
||||||
|
uintptr_t m_start = mapping->vaddr;
|
||||||
|
uintptr_t m_end = mapping->vaddr + mapping->size;
|
||||||
|
|
||||||
|
/* check overlap */
|
||||||
|
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
|
||||||
|
uintptr_t free_vstart = (start_vaddr > m_start) ? start_vaddr : m_start;
|
||||||
|
uintptr_t free_vend = (end_vaddr < m_end) ? end_vaddr : m_end;
|
||||||
|
size_t free_size = free_vend - free_vstart;
|
||||||
|
|
||||||
|
uintptr_t ppage_to_free = mapping->paddr + (free_vstart - m_start);
|
||||||
|
pmm_free (ppage_to_free, free_size / PAGE_SIZE);
|
||||||
|
|
||||||
|
/* split in the middle */
|
||||||
|
if ((start_vaddr > m_start) && (end_vaddr < m_end)) {
|
||||||
|
tail_mapping->vaddr = end_vaddr;
|
||||||
|
tail_mapping->paddr = mapping->paddr + (end_vaddr - m_start);
|
||||||
|
tail_mapping->size = m_end - end_vaddr;
|
||||||
|
|
||||||
|
mapping->size = start_vaddr - m_start;
|
||||||
|
|
||||||
|
list_insert_after (procgroup->mappings, &mapping->proc_mappings_link,
|
||||||
|
&tail_mapping->proc_mappings_link);
|
||||||
|
|
||||||
|
used_tail_mapping = true;
|
||||||
|
|
||||||
|
break;
|
||||||
|
} else if ((start_vaddr <= m_start) && (end_vaddr < m_end)) { /* shrink left */
|
||||||
|
size_t diff = end_vaddr - m_start;
|
||||||
|
mapping->vaddr += diff;
|
||||||
|
mapping->paddr += diff;
|
||||||
|
mapping->size -= diff;
|
||||||
|
} else if ((start_vaddr > m_start) && (end_vaddr >= m_end)) { /* shrink right */
|
||||||
|
mapping->size = start_vaddr - m_start;
|
||||||
|
} else { /* full overlap */
|
||||||
|
list_remove (procgroup->mappings, &mapping->proc_mappings_link);
|
||||||
|
free (mapping);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!used_tail_mapping)
|
||||||
|
free (tail_mapping);
|
||||||
|
|
||||||
|
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
|
||||||
|
mm_unmap_page (&procgroup->pd, vpage);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock (&procgroup->lock, &ctxpg);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct procgroup* procgroup_create (void) {
|
||||||
|
spin_lock_ctx_t ctxpgtr;
|
||||||
|
|
||||||
|
struct procgroup* procgroup = malloc (sizeof (*procgroup));
|
||||||
|
if (procgroup == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
procgroup->refs = 0;
|
||||||
|
procgroup->memb_proc_tree = NULL;
|
||||||
|
procgroup->lock = SPIN_LOCK_INIT;
|
||||||
|
procgroup->pgid = atomic_fetch_add (&pgids, 1);
|
||||||
|
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys ();
|
||||||
|
procgroup->map_base = PROC_MAP_BASE;
|
||||||
|
|
||||||
|
spin_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||||
|
rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,
|
||||||
|
procgroup_tree_link, pgid);
|
||||||
|
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||||
|
|
||||||
|
return procgroup;
|
||||||
|
}
|
||||||
|
|
||||||
|
void procgroup_attach (struct procgroup* procgroup, struct proc* proc) {
|
||||||
|
spin_lock_ctx_t ctxpg, ctxpr;
|
||||||
|
|
||||||
|
spin_lock (&procgroup->lock, &ctxpg);
|
||||||
|
spin_lock (&proc->lock, &ctxpr);
|
||||||
|
|
||||||
|
rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link,
|
||||||
|
procgroup_memb_tree_link, pid);
|
||||||
|
atomic_fetch_add (&procgroup->refs, 1);
|
||||||
|
|
||||||
|
spin_unlock (&proc->lock, &ctxpr);
|
||||||
|
spin_unlock (&procgroup->lock, &ctxpg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
|
||||||
|
spin_lock_ctx_t ctxpg, ctxpr, ctxpgtr;
|
||||||
|
|
||||||
|
spin_lock (&procgroup->lock, &ctxpg);
|
||||||
|
spin_lock (&proc->lock, &ctxpr);
|
||||||
|
|
||||||
|
rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link);
|
||||||
|
int refs = atomic_fetch_sub (&procgroup->refs, 1);
|
||||||
|
|
||||||
|
spin_unlock (&proc->lock, &ctxpr);
|
||||||
|
spin_unlock (&procgroup->lock, &ctxpg);
|
||||||
|
|
||||||
|
if (refs == 1) {
|
||||||
|
spin_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||||
|
spin_lock (&procgroup->lock, &ctxpg);
|
||||||
|
|
||||||
|
rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link);
|
||||||
|
|
||||||
|
spin_unlock (&procgroup->lock, &ctxpg);
|
||||||
|
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||||
|
|
||||||
|
/* delete resources */
|
||||||
|
struct rb_node_link* rnode;
|
||||||
|
rbtree_first (&procgroup->resource_tree, rnode);
|
||||||
|
while (rnode) {
|
||||||
|
struct rb_node_link* next;
|
||||||
|
rbtree_next (rnode, next);
|
||||||
|
|
||||||
|
struct proc_resource* resource =
|
||||||
|
rbtree_entry (rnode, struct proc_resource, resource_tree_link);
|
||||||
|
|
||||||
|
rnode = next;
|
||||||
|
|
||||||
|
proc_delete_resource (resource);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||||
|
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
|
||||||
|
struct proc_mapping* mapping =
|
||||||
|
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
||||||
|
|
||||||
|
pmm_free (mapping->paddr, mapping->size / PAGE_SIZE);
|
||||||
|
free (mapping);
|
||||||
|
}
|
||||||
|
|
||||||
|
pmm_free (procgroup->pd.cr3_paddr, 1);
|
||||||
|
|
||||||
|
free (procgroup->tls.tls_tmpl);
|
||||||
|
|
||||||
|
free (procgroup);
|
||||||
|
}
|
||||||
|
}
|
||||||
43
kernel/proc/procgroup.h
Normal file
43
kernel/proc/procgroup.h
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
#ifndef _KERNEL_PROC_PROCGROUP_H
|
||||||
|
#define _KERNEL_PROC_PROCGROUP_H
|
||||||
|
|
||||||
|
#include <libk/list.h>
|
||||||
|
#include <libk/rbtree.h>
|
||||||
|
#include <libk/std.h>
|
||||||
|
#include <proc/resource.h>
|
||||||
|
#include <sync/spin_lock.h>
|
||||||
|
#include <sys/mm.h>
|
||||||
|
#include <sys/procgroup.h>
|
||||||
|
|
||||||
|
struct proc;
|
||||||
|
|
||||||
|
struct proc_mapping {
|
||||||
|
struct list_node_link proc_mappings_link;
|
||||||
|
|
||||||
|
uintptr_t paddr;
|
||||||
|
uintptr_t vaddr;
|
||||||
|
size_t size;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct procgroup {
|
||||||
|
int pgid;
|
||||||
|
struct rb_node_link procgroup_tree_link;
|
||||||
|
struct rb_node_link* memb_proc_tree;
|
||||||
|
spin_lock_t lock;
|
||||||
|
atomic_int refs;
|
||||||
|
struct rb_node_link* resource_tree;
|
||||||
|
atomic_int sys_rids;
|
||||||
|
struct pd pd;
|
||||||
|
struct list_node_link* mappings;
|
||||||
|
uintptr_t map_base;
|
||||||
|
struct procgroup_tls tls;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct procgroup* procgroup_create (void);
|
||||||
|
void procgroup_attach (struct procgroup* procgroup, struct proc* proc);
|
||||||
|
void procgroup_detach (struct procgroup* procgroup, struct proc* proc);
|
||||||
|
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
|
||||||
|
uintptr_t* out_paddr);
|
||||||
|
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages);
|
||||||
|
|
||||||
|
#endif // _KERNEL_PROC_PROCGROUP_H
|
||||||
@@ -7,169 +7,53 @@
|
|||||||
#include <mm/pmm.h>
|
#include <mm/pmm.h>
|
||||||
#include <proc/mutex.h>
|
#include <proc/mutex.h>
|
||||||
#include <proc/proc.h>
|
#include <proc/proc.h>
|
||||||
|
#include <proc/procgroup.h>
|
||||||
#include <proc/resource.h>
|
#include <proc/resource.h>
|
||||||
#include <sync/spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
#include <sys/debug.h>
|
#include <sys/debug.h>
|
||||||
|
|
||||||
static struct rb_node_link* resource_tree = NULL;
|
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid) {
|
||||||
static rw_spin_lock_t resource_tree_lock = RW_SPIN_LOCK_INIT;
|
spin_lock_ctx_t ctxpg;
|
||||||
|
|
||||||
void proc_cleanup_resources (struct proc* proc) {
|
|
||||||
spin_lock_ctx_t ctxrs;
|
|
||||||
|
|
||||||
rw_spin_write_lock (&proc->resources->lock, &ctxrs);
|
|
||||||
|
|
||||||
struct rb_node_link* rnode;
|
|
||||||
rbtree_first (&proc->resources->tree, rnode);
|
|
||||||
while (rnode) {
|
|
||||||
struct rb_node_link* next;
|
|
||||||
rbtree_next (rnode, next);
|
|
||||||
|
|
||||||
struct proc_resource* resource =
|
|
||||||
rbtree_entry (rnode, struct proc_resource, local_resource_tree_link);
|
|
||||||
|
|
||||||
rnode = next;
|
|
||||||
|
|
||||||
proc_drop_resource (proc, resource, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
rw_spin_write_unlock (&proc->resources->lock, &ctxrs);
|
|
||||||
|
|
||||||
if (atomic_fetch_sub (&proc->resources->refs, 1) == 1) {
|
|
||||||
free (proc->resources);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void proc_drop_resource (struct proc* proc, struct proc_resource* resource, bool lock) {
|
|
||||||
spin_lock_ctx_t ctxrs;
|
|
||||||
|
|
||||||
DEBUG ("resource=%p created_by=%d vis=%d type=%d rid=%d refs=%d\n", resource,
|
|
||||||
resource->created_by_pid, resource->visibility, resource->type, resource->rid,
|
|
||||||
atomic_load (&resource->refs));
|
|
||||||
|
|
||||||
if (atomic_fetch_sub (&resource->refs, 1) == 1) {
|
|
||||||
switch (resource->visibility) {
|
|
||||||
case RV_PRIVATE: {
|
|
||||||
if (lock)
|
|
||||||
rw_spin_write_lock (&proc->resources->lock, &ctxrs);
|
|
||||||
|
|
||||||
rbtree_delete (&proc->resources->tree, &resource->local_resource_tree_link);
|
|
||||||
|
|
||||||
if (lock)
|
|
||||||
rw_spin_write_unlock (&proc->resources->lock, &ctxrs);
|
|
||||||
} break;
|
|
||||||
case RV_PUBLIC: {
|
|
||||||
if (lock)
|
|
||||||
rw_spin_write_lock (&resource_tree_lock, &ctxrs);
|
|
||||||
|
|
||||||
rbtree_delete (&resource_tree, &resource->global_resource_tree_link);
|
|
||||||
|
|
||||||
if (lock)
|
|
||||||
rw_spin_write_unlock (&resource_tree_lock, &ctxrs);
|
|
||||||
} break;
|
|
||||||
default: {
|
|
||||||
assert (0);
|
|
||||||
} break;
|
|
||||||
}
|
|
||||||
|
|
||||||
resource->ops.cleanup (proc, resource);
|
|
||||||
free (resource);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct proc_resource* proc_find_resource (struct proc* proc, int rid, int vis) {
|
|
||||||
struct proc_resource* resource = NULL;
|
struct proc_resource* resource = NULL;
|
||||||
spin_lock_ctx_t ctxrs;
|
|
||||||
|
|
||||||
switch (vis) {
|
spin_lock (&procgroup->lock, &ctxpg);
|
||||||
case RV_PRIVATE: {
|
rbtree_find (struct proc_resource, &procgroup->resource_tree, rid, resource, resource_tree_link,
|
||||||
/* User wants to create a private resource, so search locally */
|
|
||||||
rw_spin_read_lock (&proc->resources->lock, &ctxrs);
|
|
||||||
rbtree_find (struct proc_resource, &proc->resources->tree, rid, resource,
|
|
||||||
local_resource_tree_link, rid);
|
|
||||||
rw_spin_read_unlock (&proc->resources->lock, &ctxrs);
|
|
||||||
} break;
|
|
||||||
case RV_PUBLIC: {
|
|
||||||
/* User wants to create a public resource, so search globally */
|
|
||||||
rw_spin_read_lock (&resource_tree_lock, &ctxrs);
|
|
||||||
rbtree_find (struct proc_resource, &resource_tree, rid, resource, global_resource_tree_link,
|
|
||||||
rid);
|
rid);
|
||||||
rw_spin_read_unlock (&resource_tree_lock, &ctxrs);
|
spin_unlock (&procgroup->lock, &ctxpg);
|
||||||
} break;
|
|
||||||
default: {
|
|
||||||
assert (0);
|
|
||||||
} break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return resource;
|
return resource;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type, int vis,
|
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid) {
|
||||||
void* data) {
|
spin_lock_ctx_t ctxpg;
|
||||||
spin_lock_ctx_t ctxrs;
|
struct proc_resource* resource;
|
||||||
|
|
||||||
/* Check if resource RID already exists */
|
resource = proc_find_resource (procgroup, rid);
|
||||||
struct proc_resource* resource_check = proc_find_resource (proc, rid, vis);
|
if (resource != NULL)
|
||||||
|
return resource;
|
||||||
|
|
||||||
/* Resource was found either way, so it already exists */
|
resource = malloc (sizeof (*resource));
|
||||||
if (resource_check != NULL)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* create the resource */
|
|
||||||
|
|
||||||
struct proc_resource* resource = malloc (sizeof (*resource));
|
|
||||||
if (resource == NULL)
|
if (resource == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
memset (resource, 0, sizeof (*resource));
|
memset (resource, 0, sizeof (*resource));
|
||||||
|
|
||||||
resource->lock = SPIN_LOCK_INIT;
|
resource->lock = SPIN_LOCK_INIT;
|
||||||
resource->type = type;
|
|
||||||
resource->refs = 1;
|
|
||||||
resource->rid = rid;
|
|
||||||
resource->visibility = vis;
|
|
||||||
resource->created_by_pid = proc->pid;
|
|
||||||
|
|
||||||
switch (resource->type) {
|
|
||||||
case PR_MEM: {
|
|
||||||
struct proc_resource_mem_init* mem_init = data;
|
|
||||||
proc_create_resource_mem (&resource->u.mem, mem_init);
|
|
||||||
resource->ops.cleanup = &proc_cleanup_resource_mem;
|
|
||||||
resource->u.mem.resource = resource;
|
|
||||||
DEBUG ("PR_MEM resource=%p created_by=%d, type=%d rid=%d paddr=%p, pages=%zu\n", resource,
|
|
||||||
resource->created_by_pid, resource->type, resource->rid, resource->u.mem.paddr,
|
|
||||||
resource->u.mem.pages);
|
|
||||||
} break;
|
|
||||||
case PR_MUTEX: {
|
|
||||||
proc_create_resource_mutex (&resource->u.mutex);
|
|
||||||
resource->ops.cleanup = &proc_cleanup_resource_mutex;
|
resource->ops.cleanup = &proc_cleanup_resource_mutex;
|
||||||
resource->u.mutex.resource = resource;
|
resource->u.mutex.resource = resource;
|
||||||
DEBUG ("PR_MUTEX resource=%p created_by=%d type=%d rid=%d\n", resource,
|
resource->rid = rid;
|
||||||
resource->created_by_pid, resource->type, resource->rid);
|
resource->type = PR_MUTEX;
|
||||||
} break;
|
|
||||||
default: {
|
|
||||||
free (resource);
|
|
||||||
return NULL;
|
|
||||||
} break;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (resource->visibility) {
|
spin_lock (&procgroup->lock, &ctxpg);
|
||||||
case RV_PRIVATE: {
|
rbtree_insert (struct proc_resource, &procgroup->resource_tree, &resource->resource_tree_link,
|
||||||
rw_spin_write_lock (&proc->resources->lock, &ctxrs);
|
resource_tree_link, rid);
|
||||||
rbtree_insert (struct proc_resource, &proc->resources->tree,
|
spin_unlock (&procgroup->lock, &ctxpg);
|
||||||
&resource->local_resource_tree_link, local_resource_tree_link, rid);
|
|
||||||
rw_spin_write_unlock (&proc->resources->lock, &ctxrs);
|
|
||||||
} break;
|
|
||||||
case RV_PUBLIC: {
|
|
||||||
rw_spin_write_lock (&resource_tree_lock, &ctxrs);
|
|
||||||
rbtree_insert (struct proc_resource, &resource_tree, &resource->global_resource_tree_link,
|
|
||||||
global_resource_tree_link, rid);
|
|
||||||
rw_spin_write_unlock (&resource_tree_lock, &ctxrs);
|
|
||||||
} break;
|
|
||||||
default: {
|
|
||||||
assert (0);
|
|
||||||
} break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return resource;
|
return resource;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool proc_delete_resource (struct proc_resource* resource) {
|
||||||
|
bool reschedule = resource->ops.cleanup (resource);
|
||||||
|
free (resource);
|
||||||
|
|
||||||
|
return reschedule;
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,40 +4,29 @@
|
|||||||
#include <libk/list.h>
|
#include <libk/list.h>
|
||||||
#include <libk/rbtree.h>
|
#include <libk/rbtree.h>
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
#include <proc/mem.h>
|
|
||||||
#include <proc/mutex.h>
|
#include <proc/mutex.h>
|
||||||
#include <sync/spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
|
|
||||||
#define PR_MEM 0
|
|
||||||
#define PR_MUTEX 1
|
#define PR_MUTEX 1
|
||||||
|
|
||||||
#define RV_PRIVATE 0
|
|
||||||
#define RV_PUBLIC 1
|
|
||||||
|
|
||||||
struct proc;
|
struct proc;
|
||||||
|
struct procgroup;
|
||||||
|
|
||||||
struct proc_resource {
|
struct proc_resource {
|
||||||
int type;
|
int type;
|
||||||
int rid;
|
int rid;
|
||||||
int visibility;
|
|
||||||
spin_lock_t lock;
|
spin_lock_t lock;
|
||||||
atomic_int refs;
|
struct rb_node_link resource_tree_link;
|
||||||
struct rb_node_link global_resource_tree_link;
|
|
||||||
struct rb_node_link local_resource_tree_link;
|
|
||||||
union {
|
union {
|
||||||
struct proc_resource_mem mem;
|
|
||||||
struct proc_mutex mutex;
|
struct proc_mutex mutex;
|
||||||
} u;
|
} u;
|
||||||
struct {
|
struct {
|
||||||
void (*cleanup) (struct proc* proc, struct proc_resource* resource);
|
bool (*cleanup) (struct proc_resource* resource);
|
||||||
} ops;
|
} ops;
|
||||||
int created_by_pid;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type, int vis,
|
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid);
|
||||||
void* data);
|
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid);
|
||||||
struct proc_resource* proc_find_resource (struct proc* proc, int rid, int vis);
|
bool proc_delete_resource (struct proc_resource* resource);
|
||||||
void proc_drop_resource (struct proc* proc, struct proc_resource* resource, bool lock);
|
|
||||||
void proc_cleanup_resources (struct proc* proc);
|
|
||||||
|
|
||||||
#endif // _KERNEL_PROC_RESOURCE_H
|
#endif // _KERNEL_PROC_RESOURCE_H
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
c += proc/proc.c \
|
c += proc/proc.c \
|
||||||
proc/resource.c \
|
proc/resource.c \
|
||||||
proc/mutex.c \
|
proc/mutex.c \
|
||||||
proc/mem.c
|
proc/procgroup.c \
|
||||||
|
proc/suspension_q.c
|
||||||
|
|
||||||
o += proc/proc.o \
|
o += proc/proc.o \
|
||||||
proc/resource.o \
|
proc/resource.o \
|
||||||
proc/mutex.o \
|
proc/mutex.o \
|
||||||
proc/mem.o
|
proc/procgroup.o \
|
||||||
|
proc/suspension_q.o
|
||||||
|
|||||||
111
kernel/proc/suspension_q.c
Normal file
111
kernel/proc/suspension_q.c
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
#include <libk/list.h>
|
||||||
|
#include <libk/std.h>
|
||||||
|
#include <mm/liballoc.h>
|
||||||
|
#include <proc/proc.h>
|
||||||
|
#include <proc/resource.h>
|
||||||
|
#include <proc/suspension_q.h>
|
||||||
|
#include <sync/spin_lock.h>
|
||||||
|
#include <sys/smp.h>
|
||||||
|
#include <sys/spin_lock.h>
|
||||||
|
|
||||||
|
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||||
|
spin_lock_ctx_t* ctxrl) {
|
||||||
|
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
|
||||||
|
struct cpu* cpu = proc->cpu;
|
||||||
|
|
||||||
|
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
|
||||||
|
if (!sq_entry) {
|
||||||
|
spin_unlock (resource_lock, ctxrl);
|
||||||
|
return PROC_NO_RESCHEDULE;
|
||||||
|
}
|
||||||
|
|
||||||
|
sq_entry->proc = proc;
|
||||||
|
sq_entry->sq = sq;
|
||||||
|
|
||||||
|
spin_lock (&cpu->lock, &ctxcpu);
|
||||||
|
spin_lock (&proc->lock, &ctxpr);
|
||||||
|
spin_lock (&sq->lock, &ctxsq);
|
||||||
|
|
||||||
|
spin_unlock (resource_lock, ctxrl);
|
||||||
|
|
||||||
|
atomic_store (&proc->state, PROC_SUSPENDED);
|
||||||
|
|
||||||
|
/* append to sq's list */
|
||||||
|
list_append (sq->proc_list, &sq_entry->sq_link);
|
||||||
|
|
||||||
|
/* append to proc's list */
|
||||||
|
list_append (proc->sq_entries, &sq_entry->proc_link);
|
||||||
|
|
||||||
|
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||||
|
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
|
||||||
|
|
||||||
|
if (cpu->proc_current == proc)
|
||||||
|
cpu->proc_current = NULL;
|
||||||
|
|
||||||
|
proc->cpu = NULL;
|
||||||
|
|
||||||
|
spin_unlock (&sq->lock, &ctxsq);
|
||||||
|
spin_unlock (&proc->lock, &ctxpr);
|
||||||
|
spin_unlock (&cpu->lock, &ctxcpu);
|
||||||
|
|
||||||
|
return PROC_NEED_RESCHEDULE;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
|
||||||
|
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
|
||||||
|
struct cpu* cpu = cpu_find_lightest ();
|
||||||
|
struct proc_suspension_q* sq = sq_entry->sq;
|
||||||
|
|
||||||
|
spin_lock (&cpu->lock, &ctxcpu);
|
||||||
|
spin_lock (&proc->lock, &ctxpr);
|
||||||
|
spin_lock (&sq->lock, &ctxsq);
|
||||||
|
|
||||||
|
/* remove from sq's list */
|
||||||
|
list_remove (sq->proc_list, &sq_entry->sq_link);
|
||||||
|
|
||||||
|
/* remove from proc's list */
|
||||||
|
list_remove (proc->sq_entries, &sq_entry->proc_link);
|
||||||
|
|
||||||
|
proc->cpu = cpu;
|
||||||
|
|
||||||
|
if (proc->sq_entries == NULL)
|
||||||
|
atomic_store (&proc->state, PROC_READY);
|
||||||
|
|
||||||
|
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||||
|
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||||
|
|
||||||
|
spin_unlock (&sq->lock, &ctxsq);
|
||||||
|
spin_unlock (&proc->lock, &ctxpr);
|
||||||
|
spin_unlock (&cpu->lock, &ctxcpu);
|
||||||
|
|
||||||
|
free (sq_entry);
|
||||||
|
|
||||||
|
return PROC_NEED_RESCHEDULE;
|
||||||
|
}
|
||||||
|
|
||||||
|
void proc_sqs_cleanup (struct proc* proc) {
|
||||||
|
spin_lock_ctx_t ctxsq, ctxpr;
|
||||||
|
|
||||||
|
spin_lock (&proc->lock, &ctxpr);
|
||||||
|
|
||||||
|
/* clean suspension queue entries */
|
||||||
|
struct list_node_link *sq_link, *sq_link_tmp;
|
||||||
|
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
|
||||||
|
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
|
||||||
|
struct proc_suspension_q* sq = sq_entry->sq;
|
||||||
|
|
||||||
|
spin_lock (&sq->lock, &ctxsq);
|
||||||
|
|
||||||
|
/* remove from sq's list */
|
||||||
|
list_remove (sq->proc_list, &sq_entry->sq_link);
|
||||||
|
|
||||||
|
/* remove from proc's list */
|
||||||
|
list_remove (proc->sq_entries, &sq_entry->proc_link);
|
||||||
|
|
||||||
|
spin_unlock (&sq->lock, &ctxsq);
|
||||||
|
|
||||||
|
free (sq_entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock (&proc->lock, &ctxpr);
|
||||||
|
}
|
||||||
@@ -1,12 +1,26 @@
|
|||||||
#ifndef _KERNEL_PROC_SUSPENTION_Q_H
|
#ifndef _KERNEL_PROC_SUSPENTION_Q_H
|
||||||
#define _KERNEL_PROC_SUSPENTION_Q_H
|
#define _KERNEL_PROC_SUSPENTION_Q_H
|
||||||
|
|
||||||
#include <libk/rbtree.h>
|
#include <libk/list.h>
|
||||||
#include <sync/spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
|
|
||||||
|
struct proc;
|
||||||
|
|
||||||
struct proc_suspension_q {
|
struct proc_suspension_q {
|
||||||
struct rb_node_link* proc_tree;
|
struct list_node_link* proc_list;
|
||||||
spin_lock_t lock;
|
spin_lock_t lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct proc_sq_entry {
|
||||||
|
struct list_node_link sq_link;
|
||||||
|
struct list_node_link proc_link;
|
||||||
|
struct proc* proc;
|
||||||
|
struct proc_suspension_q* sq;
|
||||||
|
};
|
||||||
|
|
||||||
|
void proc_sqs_cleanup (struct proc* proc);
|
||||||
|
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||||
|
spin_lock_ctx_t* ctxrl);
|
||||||
|
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
|
||||||
|
|
||||||
#endif // _KERNEL_PROC_SUSPENTION_Q_H
|
#endif // _KERNEL_PROC_SUSPENTION_Q_H
|
||||||
|
|||||||
@@ -1,67 +0,0 @@
|
|||||||
#include <libk/assert.h>
|
|
||||||
#include <libk/std.h>
|
|
||||||
#include <sync/rw_spin_lock.h>
|
|
||||||
#include <sys/debug.h>
|
|
||||||
#include <sys/irq.h>
|
|
||||||
#include <sys/spin_lock.h>
|
|
||||||
|
|
||||||
#define WRITER_WAIT (1U << 31)
|
|
||||||
#define READER_MASK (~WRITER_WAIT)
|
|
||||||
|
|
||||||
void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
|
|
||||||
uint32_t value;
|
|
||||||
|
|
||||||
irq_save (ctx);
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
value = atomic_load_explicit (rw, memory_order_relaxed);
|
|
||||||
|
|
||||||
if ((value & WRITER_WAIT) == 0) {
|
|
||||||
if (atomic_compare_exchange_weak_explicit (rw, &value, value + 1, memory_order_acquire,
|
|
||||||
memory_order_relaxed)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_relax ();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
|
|
||||||
uint32_t old = atomic_fetch_sub_explicit (rw, 1, memory_order_release);
|
|
||||||
assert ((old & READER_MASK) > 0);
|
|
||||||
irq_restore (ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
|
|
||||||
uint32_t value;
|
|
||||||
|
|
||||||
irq_save (ctx);
|
|
||||||
|
|
||||||
/* announce writer */
|
|
||||||
for (;;) {
|
|
||||||
value = atomic_load_explicit (rw, memory_order_relaxed);
|
|
||||||
|
|
||||||
if ((value & WRITER_WAIT) == 0) {
|
|
||||||
if (atomic_compare_exchange_weak_explicit (rw, &value, (value | WRITER_WAIT),
|
|
||||||
memory_order_acquire, memory_order_relaxed))
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
spin_lock_relax ();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* wait for readers */
|
|
||||||
for (;;) {
|
|
||||||
value = atomic_load_explicit (rw, memory_order_acquire);
|
|
||||||
if ((value & READER_MASK) == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock_relax ();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx) {
|
|
||||||
atomic_store_explicit (rw, 0, memory_order_release);
|
|
||||||
irq_restore (ctx);
|
|
||||||
}
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
#ifndef _KERNEL_SYNC_RW_SPIN_LOCK_H
|
|
||||||
#define _KERNEL_SYNC_RW_SPIN_LOCK_H
|
|
||||||
|
|
||||||
#include <libk/std.h>
|
|
||||||
#include <sync/spin_lock.h>
|
|
||||||
#include <sys/spin_lock.h>
|
|
||||||
|
|
||||||
#define RW_SPIN_LOCK_INIT 0
|
|
||||||
|
|
||||||
typedef _Atomic (uint32_t) rw_spin_lock_t;
|
|
||||||
|
|
||||||
void rw_spin_read_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
|
|
||||||
void rw_spin_read_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
|
|
||||||
void rw_spin_write_lock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
|
|
||||||
void rw_spin_write_unlock (rw_spin_lock_t* rw, spin_lock_ctx_t* ctx);
|
|
||||||
|
|
||||||
#endif // _KERNEL_SYNC_RW_SPIN_LOCK_H
|
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
c += sync/spin_lock.c \
|
c += sync/spin_lock.c
|
||||||
sync/rw_spin_lock.c
|
|
||||||
|
|
||||||
o += sync/spin_lock.o \
|
o += sync/spin_lock.o
|
||||||
sync/rw_spin_lock.o
|
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
#define _KERNEL_SYS_MM_H
|
#define _KERNEL_SYS_MM_H
|
||||||
|
|
||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
|
#include <sync/spin_lock.h>
|
||||||
|
|
||||||
#if defined(__x86_64__)
|
#if defined(__x86_64__)
|
||||||
#include <amd64/mm.h>
|
#include <amd64/mm.h>
|
||||||
@@ -10,21 +11,18 @@
|
|||||||
#define MM_PG_PRESENT (1 << 0)
|
#define MM_PG_PRESENT (1 << 0)
|
||||||
#define MM_PG_RW (1 << 1)
|
#define MM_PG_RW (1 << 1)
|
||||||
#define MM_PG_USER (1 << 2)
|
#define MM_PG_USER (1 << 2)
|
||||||
#define MM_PD_RELOAD (1 << 30)
|
|
||||||
#define MM_PD_LOCK (1 << 31)
|
|
||||||
|
|
||||||
uintptr_t mm_alloc_user_pd_phys (void);
|
uintptr_t mm_alloc_user_pd_phys (void);
|
||||||
void mm_reload (void);
|
void mm_kernel_lock (spin_lock_ctx_t* ctx);
|
||||||
|
void mm_kernel_unlock (spin_lock_ctx_t* ctx);
|
||||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
||||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
||||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
void mm_unmap_page (struct pd* pd, uintptr_t vaddr);
|
||||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags);
|
void mm_unmap_kernel_page (uintptr_t vaddr);
|
||||||
void mm_lock_kernel (void);
|
bool mm_validate (struct pd* pd, uintptr_t vaddr);
|
||||||
void mm_unlock_kernel (void);
|
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size);
|
||||||
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr);
|
||||||
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags);
|
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr);
|
||||||
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags);
|
|
||||||
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
|
||||||
struct pd* mm_get_kernel_pd (void);
|
struct pd* mm_get_kernel_pd (void);
|
||||||
void mm_init (void);
|
void mm_init (void);
|
||||||
|
|
||||||
|
|||||||
@@ -6,8 +6,9 @@
|
|||||||
struct proc;
|
struct proc;
|
||||||
|
|
||||||
struct proc* proc_from_elf (uint8_t* elf_contents);
|
struct proc* proc_from_elf (uint8_t* elf_contents);
|
||||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, size_t stack_size,
|
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
|
||||||
uintptr_t entry);
|
uintptr_t argument_ptr);
|
||||||
void proc_cleanup (struct proc* proc);
|
void proc_cleanup (struct proc* proc);
|
||||||
|
void proc_init_tls (struct proc* proc);
|
||||||
|
|
||||||
#endif // _KERNEL_SYS_PROC_H
|
#endif // _KERNEL_SYS_PROC_H
|
||||||
|
|||||||
8
kernel/sys/procgroup.h
Normal file
8
kernel/sys/procgroup.h
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
#ifndef _KERNEL_SYS_PROCGROUP_H
|
||||||
|
#define _KERNEL_SYS_PROCGROUP_H
|
||||||
|
|
||||||
|
#if defined(__x86_64__)
|
||||||
|
#include <amd64/procgroup.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // _KERNEL_SYS_PROCGROUP_H
|
||||||
@@ -4,6 +4,6 @@
|
|||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
#include <proc/proc.h>
|
#include <proc/proc.h>
|
||||||
|
|
||||||
void do_sched (struct proc* proc);
|
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu);
|
||||||
|
|
||||||
#endif // _KERNEL_SYS_SCHED_H
|
#endif // _KERNEL_SYS_SCHED_H
|
||||||
|
|||||||
@@ -5,9 +5,9 @@
|
|||||||
#include <m/status.h>
|
#include <m/status.h>
|
||||||
#include <m/syscall_defs.h>
|
#include <m/syscall_defs.h>
|
||||||
#include <mm/pmm.h>
|
#include <mm/pmm.h>
|
||||||
#include <proc/mem.h>
|
|
||||||
#include <proc/mutex.h>
|
#include <proc/mutex.h>
|
||||||
#include <proc/proc.h>
|
#include <proc/proc.h>
|
||||||
|
#include <proc/procgroup.h>
|
||||||
#include <proc/resource.h>
|
#include <proc/resource.h>
|
||||||
#include <sync/spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
#include <sys/debug.h>
|
#include <sys/debug.h>
|
||||||
@@ -16,23 +16,26 @@
|
|||||||
#include <syscall/syscall.h>
|
#include <syscall/syscall.h>
|
||||||
|
|
||||||
#define DEFINE_SYSCALL(name) \
|
#define DEFINE_SYSCALL(name) \
|
||||||
int name (struct proc* UNUSED proc, void* UNUSED regs, uintptr_t UNUSED a1, uintptr_t UNUSED a2, \
|
uintptr_t name (struct proc* UNUSED proc, void* UNUSED regs, uintptr_t UNUSED a1, \
|
||||||
uintptr_t UNUSED a3, uintptr_t UNUSED a4, uintptr_t UNUSED a5, uintptr_t UNUSED a6)
|
uintptr_t UNUSED a2, uintptr_t UNUSED a3, uintptr_t UNUSED a4, \
|
||||||
|
uintptr_t UNUSED a5, uintptr_t UNUSED a6)
|
||||||
|
|
||||||
|
#define SYSRESULT(x) ((uintptr_t)(x))
|
||||||
|
|
||||||
static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) {
|
static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) {
|
||||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||||
spin_lock_ctx_t ctxprpd;
|
spin_lock_ctx_t ctxpg;
|
||||||
|
|
||||||
spin_lock (&proc->pd->lock, &ctxprpd);
|
spin_lock (&proc->procgroup->lock, &ctxpg);
|
||||||
|
|
||||||
if (!mm_validate_buffer (proc->pd, (uintptr_t)uvaddr, size, 0)) {
|
if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size)) {
|
||||||
spin_unlock (&proc->pd->lock, &ctxprpd);
|
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t out_paddr = mm_v2p (proc->pd, uvaddr, 0);
|
uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr);
|
||||||
|
|
||||||
spin_unlock (&proc->pd->lock, &ctxprpd);
|
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||||
|
|
||||||
uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr;
|
uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr;
|
||||||
|
|
||||||
@@ -42,50 +45,26 @@ static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t si
|
|||||||
/* int quit (void) */
|
/* int quit (void) */
|
||||||
DEFINE_SYSCALL (sys_quit) {
|
DEFINE_SYSCALL (sys_quit) {
|
||||||
proc_kill (proc);
|
proc_kill (proc);
|
||||||
return ST_OK;
|
return SYSRESULT (ST_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* int test (void) */
|
/* int test (void) */
|
||||||
DEFINE_SYSCALL (sys_test) {
|
DEFINE_SYSCALL (sys_test) {
|
||||||
char c = (char)a1;
|
char c = (char)a1;
|
||||||
DEBUG ("test syscall from %d! %c\n", proc->pid, c);
|
DEBUG ("test syscall from %d! %c\n", proc->pid, c);
|
||||||
return ST_OK;
|
return SYSRESULT (ST_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags) */
|
/* int map (uintptr_t vaddr, size_t pages, uint32_t flags) */
|
||||||
DEFINE_SYSCALL (sys_map) {
|
DEFINE_SYSCALL (sys_map) {
|
||||||
spin_lock_ctx_t ctxrs;
|
uintptr_t vaddr = a1;
|
||||||
|
size_t pages = (size_t)a2;
|
||||||
int mem_rid = (int)a1;
|
uint32_t flags = (uint32_t)a3;
|
||||||
int vis = (int)a2;
|
|
||||||
uintptr_t vaddr = a3;
|
|
||||||
uint32_t flags = (uint32_t)a4;
|
|
||||||
|
|
||||||
if (vaddr % PAGE_SIZE != 0)
|
if (vaddr % PAGE_SIZE != 0)
|
||||||
return -ST_UNALIGNED;
|
return SYSRESULT (-ST_UNALIGNED);
|
||||||
|
|
||||||
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE))
|
return SYSRESULT (procgroup_map (proc->procgroup, vaddr, pages, flags, NULL));
|
||||||
return -ST_BAD_RESOURCE;
|
|
||||||
|
|
||||||
struct proc_resource* mem_resource = proc_find_resource (proc, mem_rid, vis);
|
|
||||||
|
|
||||||
if (mem_resource == NULL) {
|
|
||||||
return -ST_NOT_FOUND;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock (&mem_resource->lock, &ctxrs);
|
|
||||||
|
|
||||||
if (mem_resource->type != PR_MEM) {
|
|
||||||
spin_unlock (&mem_resource->lock, &ctxrs);
|
|
||||||
return -ST_BAD_RESOURCE;
|
|
||||||
}
|
|
||||||
|
|
||||||
uintptr_t paddr = mem_resource->u.mem.paddr;
|
|
||||||
size_t pages = mem_resource->u.mem.pages;
|
|
||||||
|
|
||||||
spin_unlock (&mem_resource->lock, &ctxrs);
|
|
||||||
|
|
||||||
return proc_map (proc, paddr, vaddr, pages, flags) ? ST_OK : -ST_OOM_ERROR;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* int unmap (uintptr_t vaddr, size_t pages) */
|
/* int unmap (uintptr_t vaddr, size_t pages) */
|
||||||
@@ -94,93 +73,94 @@ DEFINE_SYSCALL (sys_unmap) {
|
|||||||
size_t pages = (size_t)a2;
|
size_t pages = (size_t)a2;
|
||||||
|
|
||||||
if (vaddr % PAGE_SIZE != 0)
|
if (vaddr % PAGE_SIZE != 0)
|
||||||
return -ST_UNALIGNED;
|
return SYSRESULT (-ST_UNALIGNED);
|
||||||
|
|
||||||
return proc_unmap (proc, vaddr, pages) ? ST_OK : -ST_OOM_ERROR;
|
return SYSRESULT (procgroup_unmap (proc->procgroup, vaddr, pages));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* int create_mem (int rid, int vis, size_t pages) */
|
/* int clone (uintptr_t vstack_top, void* entry, void* argument_ptr) */
|
||||||
DEFINE_SYSCALL (sys_create_mem) {
|
|
||||||
int rid = (int)a1;
|
|
||||||
int vis = (int)a2;
|
|
||||||
size_t pages = (size_t)a3;
|
|
||||||
|
|
||||||
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE))
|
|
||||||
return -ST_BAD_RESOURCE;
|
|
||||||
|
|
||||||
if (pages == 0)
|
|
||||||
return ST_OK;
|
|
||||||
|
|
||||||
int rid1 = rid < 0 ? atomic_fetch_add (&proc->resources->sys_rids, 1) : rid;
|
|
||||||
|
|
||||||
struct proc_resource_mem_init mem_init = {.managed = false, .pages = pages};
|
|
||||||
struct proc_resource* mem_resource = proc_create_resource (proc, rid1, PR_MEM, vis, &mem_init);
|
|
||||||
|
|
||||||
if (mem_resource == NULL)
|
|
||||||
return -ST_OOM_ERROR;
|
|
||||||
|
|
||||||
return mem_resource->rid;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* int unlink_mem (int rid, int vis, size_t pages) */
|
|
||||||
DEFINE_SYSCALL (sys_unlink_mem) {
|
|
||||||
spin_lock_ctx_t ctxrs;
|
|
||||||
|
|
||||||
int rid = (int)a1;
|
|
||||||
int vis = (int)a2;
|
|
||||||
size_t pages = (size_t)a3;
|
|
||||||
|
|
||||||
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE))
|
|
||||||
return -ST_BAD_RESOURCE;
|
|
||||||
|
|
||||||
struct proc_resource* mem_resource = proc_find_resource (proc, rid, vis);
|
|
||||||
|
|
||||||
if (mem_resource == NULL)
|
|
||||||
return -ST_NOT_FOUND;
|
|
||||||
|
|
||||||
spin_lock (&mem_resource->lock, &ctxrs);
|
|
||||||
|
|
||||||
if (mem_resource->type != PR_MEM) {
|
|
||||||
spin_unlock (&mem_resource->lock, &ctxrs);
|
|
||||||
return -ST_BAD_RESOURCE;
|
|
||||||
}
|
|
||||||
|
|
||||||
mem_resource->u.mem.alive_pages -= pages;
|
|
||||||
if (mem_resource->u.mem.alive_pages < 0) {
|
|
||||||
spin_unlock (&mem_resource->lock, &ctxrs);
|
|
||||||
proc_drop_resource (proc, mem_resource, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ST_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* int clone (uintptr_t vstack_top, size_t stack_size, void* entry) */
|
|
||||||
DEFINE_SYSCALL (sys_clone) {
|
DEFINE_SYSCALL (sys_clone) {
|
||||||
uintptr_t vstack_top = a1;
|
uintptr_t vstack_top = a1;
|
||||||
size_t stack_size = (size_t)a2;
|
uintptr_t entry = a2;
|
||||||
uintptr_t entry = a3;
|
uintptr_t argument_ptr = a3;
|
||||||
|
|
||||||
struct cpu* cpu = proc->cpu;
|
struct proc* new = proc_clone (proc, vstack_top, entry, argument_ptr);
|
||||||
|
|
||||||
struct proc* new = proc_clone (proc, vstack_top, stack_size, entry);
|
|
||||||
|
|
||||||
DEBUG ("new=%p\n", new);
|
|
||||||
|
|
||||||
if (new == NULL) {
|
if (new == NULL) {
|
||||||
return -ST_OOM_ERROR;
|
return SYSRESULT (-ST_OOM_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
int pid = new->pid;
|
int pid = new->pid;
|
||||||
|
|
||||||
proc_register (new, cpu);
|
proc_register (new, NULL);
|
||||||
|
|
||||||
return pid;
|
return SYSRESULT (pid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* int proc_sched (void) */
|
/* void* argument_ptr (void) */
|
||||||
|
DEFINE_SYSCALL (sys_argument_ptr) { return proc->uvaddr_argument; }
|
||||||
|
|
||||||
|
/* int sched (void) */
|
||||||
DEFINE_SYSCALL (sys_sched) {
|
DEFINE_SYSCALL (sys_sched) {
|
||||||
proc_sched (regs);
|
proc_sched ();
|
||||||
return ST_OK;
|
return SYSRESULT (ST_OK);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* int mutex_create (int mutex_rid) */
|
||||||
|
DEFINE_SYSCALL (sys_mutex_create) {
|
||||||
|
int mutex_rid = (int)a1;
|
||||||
|
|
||||||
|
struct proc_resource* mutex_resource = proc_create_resource_mutex (proc->procgroup, mutex_rid);
|
||||||
|
|
||||||
|
if (mutex_resource == NULL)
|
||||||
|
return SYSRESULT (-ST_OOM_ERROR);
|
||||||
|
|
||||||
|
return SYSRESULT (mutex_resource->rid);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* int mutex_delete (int mutex_rid) */
|
||||||
|
DEFINE_SYSCALL (sys_mutex_delete) {
|
||||||
|
int mutex_rid = (int)a1;
|
||||||
|
|
||||||
|
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
|
||||||
|
|
||||||
|
if (mutex_resource == NULL)
|
||||||
|
return SYSRESULT (-ST_NOT_FOUND);
|
||||||
|
|
||||||
|
if (proc_delete_resource (mutex_resource) == PROC_NEED_RESCHEDULE)
|
||||||
|
proc_sched ();
|
||||||
|
|
||||||
|
return SYSRESULT (ST_OK);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* int mutex_lock (int mutex_rid) */
|
||||||
|
DEFINE_SYSCALL (sys_mutex_lock) {
|
||||||
|
int mutex_rid = (int)a1;
|
||||||
|
|
||||||
|
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
|
||||||
|
|
||||||
|
if (mutex_resource == NULL)
|
||||||
|
return SYSRESULT (-ST_NOT_FOUND);
|
||||||
|
|
||||||
|
if (proc_mutex_lock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
|
||||||
|
proc_sched ();
|
||||||
|
|
||||||
|
return SYSRESULT (ST_OK);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* int mutex_unlock (int mutex_rid) */
|
||||||
|
DEFINE_SYSCALL (sys_mutex_unlock) {
|
||||||
|
int mutex_rid = (int)a1;
|
||||||
|
|
||||||
|
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
|
||||||
|
|
||||||
|
if (mutex_resource == NULL)
|
||||||
|
return SYSRESULT (-ST_NOT_FOUND);
|
||||||
|
|
||||||
|
if (proc_mutex_unlock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
|
||||||
|
proc_sched ();
|
||||||
|
|
||||||
|
return SYSRESULT (ST_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static syscall_handler_func_t handler_table[] = {
|
static syscall_handler_func_t handler_table[] = {
|
||||||
@@ -189,9 +169,12 @@ static syscall_handler_func_t handler_table[] = {
|
|||||||
[SYS_MAP] = &sys_map,
|
[SYS_MAP] = &sys_map,
|
||||||
[SYS_UNMAP] = &sys_unmap,
|
[SYS_UNMAP] = &sys_unmap,
|
||||||
[SYS_CLONE] = &sys_clone,
|
[SYS_CLONE] = &sys_clone,
|
||||||
|
[SYS_ARGUMENT_PTR] = &sys_argument_ptr,
|
||||||
[SYS_SCHED] = &sys_sched,
|
[SYS_SCHED] = &sys_sched,
|
||||||
[SYS_CREATE_MEM] = &sys_create_mem,
|
[SYS_MUTEX_CREATE] = &sys_mutex_create,
|
||||||
[SYS_UNLINK_MEM] = &sys_unlink_mem,
|
[SYS_MUTEX_DELETE] = &sys_mutex_delete,
|
||||||
|
[SYS_MUTEX_LOCK] = &sys_mutex_lock,
|
||||||
|
[SYS_MUTEX_UNLOCK] = &sys_mutex_unlock,
|
||||||
};
|
};
|
||||||
|
|
||||||
syscall_handler_func_t syscall_find_handler (int syscall_num) {
|
syscall_handler_func_t syscall_find_handler (int syscall_num) {
|
||||||
|
|||||||
@@ -4,8 +4,9 @@
|
|||||||
#include <libk/std.h>
|
#include <libk/std.h>
|
||||||
#include <proc/proc.h>
|
#include <proc/proc.h>
|
||||||
|
|
||||||
typedef int (*syscall_handler_func_t) (struct proc* proc, void* regs, uintptr_t a1, uintptr_t a2,
|
typedef uintptr_t (*syscall_handler_func_t) (struct proc* proc, void* regs, uintptr_t a1,
|
||||||
uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6);
|
uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5,
|
||||||
|
uintptr_t a6);
|
||||||
|
|
||||||
syscall_handler_func_t syscall_find_handler (int syscall_num);
|
syscall_handler_func_t syscall_find_handler (int syscall_num);
|
||||||
|
|
||||||
|
|||||||
@@ -4,39 +4,19 @@
|
|||||||
#include <alloc/liballoc.h>
|
#include <alloc/liballoc.h>
|
||||||
#include <m/system.h>
|
#include <m/system.h>
|
||||||
|
|
||||||
static uintptr_t liballoc_map_base = PROC_MAP_BASE;
|
#define LIBALLOC_MUTEX 500
|
||||||
static int mem_rid_base = 1000000;
|
|
||||||
|
|
||||||
void liballoc_init (void) {}
|
void liballoc_init (void) { mutex_create (LIBALLOC_MUTEX); }
|
||||||
|
|
||||||
int liballoc_lock (void) { return 0; }
|
void liballoc_deinit (void) { mutex_delete (LIBALLOC_MUTEX); }
|
||||||
|
|
||||||
int liballoc_unlock (void) { return 0; }
|
int liballoc_lock (void) { return mutex_lock (LIBALLOC_MUTEX); }
|
||||||
|
|
||||||
void* liballoc_alloc (int pages, int* mem_rid) {
|
int liballoc_unlock (void) { return mutex_unlock (LIBALLOC_MUTEX); }
|
||||||
uintptr_t current_base = liballoc_map_base;
|
|
||||||
|
|
||||||
*mem_rid = create_mem (mem_rid_base++, RV_PRIVATE, pages);
|
void* liballoc_alloc (int pages) { return map (0, pages, MAP_FLAGS | MAP_RW); }
|
||||||
if (*mem_rid < 0) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (map (*mem_rid, RV_PRIVATE, current_base, MAP_FLAGS | MAP_RW) < 0) {
|
int liballoc_free (void* ptr, int pages) { return unmap ((uintptr_t)ptr, pages); }
|
||||||
unlink_mem (*mem_rid, RV_PRIVATE, pages);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
uintptr_t old_base = current_base;
|
|
||||||
current_base += pages * PAGE_SIZE;
|
|
||||||
|
|
||||||
return (void*)old_base;
|
|
||||||
}
|
|
||||||
|
|
||||||
int liballoc_free (void* ptr, int pages, int mem_rid) {
|
|
||||||
unmap ((uintptr_t)ptr, pages);
|
|
||||||
unlink_mem (mem_rid, RV_PRIVATE, pages);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Durand's Ridiculously Amazing Super Duper Memory functions. */
|
/** Durand's Ridiculously Amazing Super Duper Memory functions. */
|
||||||
|
|
||||||
@@ -203,7 +183,6 @@ static struct boundary_tag* allocate_new_tag (unsigned int size) {
|
|||||||
unsigned int pages;
|
unsigned int pages;
|
||||||
unsigned int usage;
|
unsigned int usage;
|
||||||
struct boundary_tag* tag;
|
struct boundary_tag* tag;
|
||||||
int mem_rid;
|
|
||||||
|
|
||||||
// This is how much space is required.
|
// This is how much space is required.
|
||||||
usage = size + sizeof (struct boundary_tag);
|
usage = size + sizeof (struct boundary_tag);
|
||||||
@@ -217,7 +196,7 @@ static struct boundary_tag* allocate_new_tag (unsigned int size) {
|
|||||||
if (pages < (unsigned int)l_pageCount)
|
if (pages < (unsigned int)l_pageCount)
|
||||||
pages = l_pageCount;
|
pages = l_pageCount;
|
||||||
|
|
||||||
tag = (struct boundary_tag*)liballoc_alloc (pages, &mem_rid);
|
tag = (struct boundary_tag*)liballoc_alloc (pages);
|
||||||
|
|
||||||
if (tag == NULL)
|
if (tag == NULL)
|
||||||
return NULL; // uh oh, we ran out of memory.
|
return NULL; // uh oh, we ran out of memory.
|
||||||
@@ -226,7 +205,6 @@ static struct boundary_tag* allocate_new_tag (unsigned int size) {
|
|||||||
tag->size = size;
|
tag->size = size;
|
||||||
tag->real_size = pages * l_pageSize;
|
tag->real_size = pages * l_pageSize;
|
||||||
tag->index = -1;
|
tag->index = -1;
|
||||||
tag->mem_rid = mem_rid;
|
|
||||||
|
|
||||||
tag->next = NULL;
|
tag->next = NULL;
|
||||||
tag->prev = NULL;
|
tag->prev = NULL;
|
||||||
@@ -349,7 +327,7 @@ void free (void* ptr) {
|
|||||||
if (pages < (unsigned int)l_pageCount)
|
if (pages < (unsigned int)l_pageCount)
|
||||||
pages = l_pageCount;
|
pages = l_pageCount;
|
||||||
|
|
||||||
liballoc_free (tag, pages, tag->mem_rid);
|
liballoc_free (tag, pages);
|
||||||
|
|
||||||
liballoc_unlock ();
|
liballoc_unlock ();
|
||||||
return;
|
return;
|
||||||
|
|||||||
@@ -41,8 +41,6 @@ struct boundary_tag {
|
|||||||
|
|
||||||
struct boundary_tag* next; //< Linked list info.
|
struct boundary_tag* next; //< Linked list info.
|
||||||
struct boundary_tag* prev; //< Linked list info.
|
struct boundary_tag* prev; //< Linked list info.
|
||||||
|
|
||||||
int mem_rid;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/** This function is supposed to lock the memory data structures. It
|
/** This function is supposed to lock the memory data structures. It
|
||||||
@@ -69,7 +67,7 @@ extern int liballoc_unlock (void);
|
|||||||
* \return NULL if the pages were not allocated.
|
* \return NULL if the pages were not allocated.
|
||||||
* \return A pointer to the allocated memory.
|
* \return A pointer to the allocated memory.
|
||||||
*/
|
*/
|
||||||
extern void* liballoc_alloc (int pages, int* mem_rid);
|
extern void* liballoc_alloc (int pages);
|
||||||
|
|
||||||
/** This frees previously allocated memory. The void* parameter passed
|
/** This frees previously allocated memory. The void* parameter passed
|
||||||
* to the function is the exact same value returned from a previous
|
* to the function is the exact same value returned from a previous
|
||||||
@@ -79,7 +77,7 @@ extern void* liballoc_alloc (int pages, int* mem_rid);
|
|||||||
*
|
*
|
||||||
* \return 0 if the memory was successfully freed.
|
* \return 0 if the memory was successfully freed.
|
||||||
*/
|
*/
|
||||||
extern int liballoc_free (void* ptr, int pages, int mem_rid);
|
extern int liballoc_free (void* ptr, int pages);
|
||||||
|
|
||||||
void* malloc (size_t); //< The standard function.
|
void* malloc (size_t); //< The standard function.
|
||||||
void* realloc (void*, size_t); //< The standard function.
|
void* realloc (void*, size_t); //< The standard function.
|
||||||
@@ -87,6 +85,7 @@ void* calloc (size_t, size_t); //< The standard function.
|
|||||||
void free (void*); //< The standard function.
|
void free (void*); //< The standard function.
|
||||||
|
|
||||||
void liballoc_init (void);
|
void liballoc_init (void);
|
||||||
|
void liballoc_deinit (void);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
int amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
|
uintptr_t amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
|
||||||
uintptr_t a5, uintptr_t a6) {
|
uintptr_t a5, uintptr_t a6) {
|
||||||
uint64_t result;
|
uint64_t result;
|
||||||
__asm__ volatile ("movq %[a4], %%r10\n"
|
__asm__ volatile ("movq %[a4], %%r10\n"
|
||||||
@@ -13,5 +13,5 @@ int amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, ui
|
|||||||
: "a"(syscall_num), "D"(a1), "S"(a2),
|
: "a"(syscall_num), "D"(a1), "S"(a2),
|
||||||
"d"(a3), [a4] "r"(a4), [a5] "r"(a5), [a6] "r"(a6)
|
"d"(a3), [a4] "r"(a4), [a5] "r"(a5), [a6] "r"(a6)
|
||||||
: "r10", "r8", "r9", "r11", "rcx", "cc", "memory");
|
: "r10", "r8", "r9", "r11", "rcx", "cc", "memory");
|
||||||
return (int)result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
int amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
|
uintptr_t amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
|
||||||
uintptr_t a5, uintptr_t a6);
|
uintptr_t a5, uintptr_t a6);
|
||||||
|
|
||||||
#endif // _LIBMSL_AMD64_SYSCALL_H
|
#endif // _LIBMSL_AMD64_SYSCALL_H
|
||||||
|
|||||||
@@ -17,8 +17,7 @@ static void clear_bss (void) {
|
|||||||
void __premain (void) {
|
void __premain (void) {
|
||||||
clear_bss ();
|
clear_bss ();
|
||||||
liballoc_init ();
|
liballoc_init ();
|
||||||
|
|
||||||
app_main ();
|
app_main ();
|
||||||
|
liballoc_deinit ();
|
||||||
quit ();
|
quit ();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,24 +9,28 @@
|
|||||||
|
|
||||||
#define do_syscall(...) do_syscall1 (__VA_ARGS__, 0, 0, 0, 0, 0, 0)
|
#define do_syscall(...) do_syscall1 (__VA_ARGS__, 0, 0, 0, 0, 0, 0)
|
||||||
|
|
||||||
int quit (void) { return do_syscall (SYS_QUIT); }
|
int quit (void) { return do_syscall (SYS_QUIT, 0); }
|
||||||
|
|
||||||
int test (char c) { return do_syscall (SYS_TEST, c); }
|
int test (char c) { return do_syscall (SYS_TEST, c); }
|
||||||
|
|
||||||
int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags) {
|
int sched (void) { return do_syscall (SYS_SCHED, 0); }
|
||||||
return do_syscall (SYS_MAP, mem_rid, vis, vaddr, flags);
|
|
||||||
|
void* map (uintptr_t vaddr, size_t pages, uint32_t flags) {
|
||||||
|
return (void*)do_syscall (SYS_MAP, vaddr, pages, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
int unmap (uintptr_t vaddr, size_t pages) { return do_syscall (SYS_UNMAP, vaddr, pages); }
|
int unmap (uintptr_t vaddr, size_t pages) { return do_syscall (SYS_UNMAP, vaddr, pages); }
|
||||||
|
|
||||||
int create_mem (int mem_rid, int vis, size_t pages) {
|
int clone (uintptr_t vstack_top, void (*entry) (void), void* argument_ptr) {
|
||||||
return do_syscall (SYS_CREATE_MEM, mem_rid, vis, pages);
|
return do_syscall (SYS_CLONE, vstack_top, entry, argument_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
int unlink_mem (int mem_rid, int vis, size_t pages) {
|
int mutex_create (int mutex_rid) { return do_syscall (SYS_MUTEX_CREATE, mutex_rid); }
|
||||||
return do_syscall (SYS_UNLINK_MEM, mem_rid, vis, pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
int clone (uintptr_t vstack_top, size_t stack_size, void (*entry) (void)) {
|
int mutex_delete (int mutex_rid) { return do_syscall (SYS_MUTEX_DELETE, mutex_rid); }
|
||||||
return do_syscall (SYS_CLONE, vstack_top, stack_size, entry);
|
|
||||||
}
|
int mutex_lock (int mutex_rid) { return do_syscall (SYS_MUTEX_LOCK, mutex_rid); }
|
||||||
|
|
||||||
|
int mutex_unlock (int mutex_rid) { return do_syscall (SYS_MUTEX_UNLOCK, mutex_rid); }
|
||||||
|
|
||||||
|
void* argument_ptr (void) { return (void*)do_syscall (SYS_ARGUMENT_PTR, 0); }
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#if defined(__x86_64__)
|
#if defined(__x86_64__)
|
||||||
#define PROC_MAP_BASE 0x0000700000000000
|
|
||||||
#define PAGE_SIZE 4096
|
#define PAGE_SIZE 4096
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -14,15 +13,16 @@
|
|||||||
#define MAP_USER (1 << 2)
|
#define MAP_USER (1 << 2)
|
||||||
#define MAP_FLAGS (MAP_PRESENT | MAP_USER)
|
#define MAP_FLAGS (MAP_PRESENT | MAP_USER)
|
||||||
|
|
||||||
#define RV_PRIVATE 0
|
|
||||||
#define RV_PUBLIC 1
|
|
||||||
|
|
||||||
int quit (void);
|
int quit (void);
|
||||||
int test (char c);
|
int test (char c);
|
||||||
int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags);
|
int sched (void);
|
||||||
|
void* map (uintptr_t vaddr, size_t pages, uint32_t flags);
|
||||||
int unmap (uintptr_t vaddr, size_t pages);
|
int unmap (uintptr_t vaddr, size_t pages);
|
||||||
int create_mem (int mem_rid, int vis, size_t pages);
|
int clone (uintptr_t vstack_top, void (*entry) (void), void* argument_ptr);
|
||||||
int unlink_mem (int mem_rid, int vis, size_t pages);
|
int mutex_create (int mutex_rid);
|
||||||
int clone (uintptr_t vstack_top, size_t stack_size, void (*entry) (void));
|
int mutex_delete (int mutex_rid);
|
||||||
|
int mutex_lock (int mutex_rid);
|
||||||
|
int mutex_unlock (int mutex_rid);
|
||||||
|
void* argument_ptr (void);
|
||||||
|
|
||||||
#endif // _LIBMSL_M_SYSTEM_H
|
#endif // _LIBMSL_M_SYSTEM_H
|
||||||
|
|||||||
1
libmsl/proc/.gitignore
vendored
Normal file
1
libmsl/proc/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
*.o
|
||||||
6
libmsl/proc/local.h
Normal file
6
libmsl/proc/local.h
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#ifndef _LIBMSL_PROC_TLS_H
|
||||||
|
#define _LIBMSL_PROC_TLS_H
|
||||||
|
|
||||||
|
#define LOCAL __thread
|
||||||
|
|
||||||
|
#endif // _LIBMSL_PROC_TLS_H
|
||||||
19
libmsl/proc/proc.c
Normal file
19
libmsl/proc/proc.c
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
#include <alloc/liballoc.h>
|
||||||
|
#include <m/status.h>
|
||||||
|
#include <m/system.h>
|
||||||
|
#include <proc/proc.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
int process_spawn (process_func_t func, void* argument_ptr) {
|
||||||
|
void* stack = malloc (PROC_STACK_SIZE);
|
||||||
|
if (stack == NULL)
|
||||||
|
return -ST_OOM_ERROR;
|
||||||
|
|
||||||
|
uintptr_t top = (uintptr_t)stack + PROC_STACK_SIZE;
|
||||||
|
return clone (top, func, argument_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
int process_quit (void) { return quit (); }
|
||||||
|
|
||||||
|
void* process_argument (void) { return argument_ptr (); }
|
||||||
14
libmsl/proc/proc.h
Normal file
14
libmsl/proc/proc.h
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
#ifndef _LIBMSL_PROC_PROC_H
|
||||||
|
#define _LIBMSL_PROC_PROC_H
|
||||||
|
|
||||||
|
#include <m/system.h>
|
||||||
|
|
||||||
|
#define PROC_STACK_SIZE 256 * PAGE_SIZE
|
||||||
|
|
||||||
|
typedef void (*process_func_t) (void);
|
||||||
|
|
||||||
|
int process_spawn (process_func_t func, void* argument_ptr);
|
||||||
|
int process_quit (void);
|
||||||
|
void* process_argument (void);
|
||||||
|
|
||||||
|
#endif // _LIBMSL_PROC_PROC_H
|
||||||
3
libmsl/proc/src.mk
Normal file
3
libmsl/proc/src.mk
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
c += proc/proc.c
|
||||||
|
|
||||||
|
o += proc/proc.o
|
||||||
@@ -3,3 +3,4 @@ include init/src.mk
|
|||||||
include m/src.mk
|
include m/src.mk
|
||||||
include string/src.mk
|
include string/src.mk
|
||||||
include alloc/src.mk
|
include alloc/src.mk
|
||||||
|
include proc/src.mk
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
apps := init
|
apps := init spin
|
||||||
|
|
||||||
all_apps:
|
all_apps:
|
||||||
@for d in $(apps); do make -C $$d platform=$(platform) all; done
|
@for d in $(apps); do make -C $$d platform=$(platform) all; done
|
||||||
|
|||||||
@@ -7,4 +7,4 @@ clean_libmsl:
|
|||||||
format_libmsl:
|
format_libmsl:
|
||||||
make -C libmsl platform=$(platform) format
|
make -C libmsl platform=$(platform) format
|
||||||
|
|
||||||
.PHONY: all_libmsl clean_libmsl
|
.PHONY: all_libmsl clean_libmsl format_libmsl
|
||||||
2
spin/.gitignore
vendored
Normal file
2
spin/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
*.o
|
||||||
|
*.exe
|
||||||
1
spin/Makefile
Normal file
1
spin/Makefile
Normal file
@@ -0,0 +1 @@
|
|||||||
|
include ../make/user.mk
|
||||||
1
spin/app.mk
Normal file
1
spin/app.mk
Normal file
@@ -0,0 +1 @@
|
|||||||
|
app := spin.exe
|
||||||
4
spin/spin.c
Normal file
4
spin/spin.c
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
void app_main (void) {
|
||||||
|
for (;;)
|
||||||
|
;
|
||||||
|
}
|
||||||
3
spin/src.mk
Normal file
3
spin/src.mk
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
c += spin.c
|
||||||
|
|
||||||
|
o += spin.o
|
||||||
Reference in New Issue
Block a user