Compare commits

..

43 Commits

Author SHA1 Message Date
38e26a9c12 Implement argument_ptr () syscall for handling process arguments
All checks were successful
Build documentation / build-and-deploy (push) Successful in 37s
2026-01-30 14:05:47 +01:00
124aa12f5b Redesign scheduling points
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-30 02:36:27 +01:00
d2f5c032d9 Fix TLS alignment issues, works on BOCHS now too!
All checks were successful
Build documentation / build-and-deploy (push) Successful in 40s
2026-01-29 18:18:24 +01:00
73e42588fb Fix BOCHS clock
All checks were successful
Build documentation / build-and-deploy (push) Successful in 41s
2026-01-29 15:04:06 +01:00
e78bfb9984 Move suspension q code into proc/suspension_q.c
All checks were successful
Build documentation / build-and-deploy (push) Successful in 24s
2026-01-29 01:52:18 +01:00
d2a88b3641 Move suspension q's cleanup to proc/suspension_q.c 2026-01-29 01:43:01 +01:00
fdda2e2df8 Unlock mutexes on process death 2026-01-29 01:38:44 +01:00
388418a718 Nice wrappers around process management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 34s
2026-01-29 00:08:54 +01:00
1c64d608bd Rename make/libc.mk -> make/libmsl.mk
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-28 23:57:28 +01:00
3d23187acf Implement userspace TLS, remove RW Locks 2026-01-28 23:52:48 +01:00
a3b62ebd3d Clean up AMD64 memory management code, remove dependency on pd.lock 2026-01-27 19:03:03 +01:00
8bda300f6a Fix sys_clone () wrong argument bug
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-27 18:05:02 +01:00
cf51600c6a Cleanup syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 34s
2026-01-27 17:34:43 +01:00
b388b30b24 Redesign userspace memory management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
2026-01-27 17:04:08 +01:00
600886a7ee Organize resources into process groups 2026-01-27 14:18:05 +01:00
67b66f2b39 Implement proper mutex cleanup
All checks were successful
Build documentation / build-and-deploy (push) Successful in 23s
2026-01-25 23:10:12 +01:00
18f791222e Remove dead process from it's suspension queues 2026-01-25 22:39:29 +01:00
5e16bb647c Multiple process suspension queues 2026-01-25 22:10:04 +01:00
a68373e4ee Dynamically assign cpu upon mutex unlock
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-25 20:39:51 +01:00
8650010992 Fix user CPU context saving
All checks were successful
Build documentation / build-and-deploy (push) Successful in 31s
2026-01-25 17:39:34 +01:00
95f590fb3b multi-cpu scheduling WIP 2026-01-25 15:54:00 +01:00
7bb3b77ede Disable kernel preemption, fix requesting rescheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
2026-01-22 19:32:15 +01:00
c26fd3cb2b Fix scheduler locking hierarchy 2026-01-22 15:59:29 +01:00
fea0999726 Fix scheduler starvation, use lists for scheduling
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-22 11:54:52 +01:00
7eceecf6e3 Add mutex syscalls 2026-01-20 22:18:43 +01:00
fff51321bc Redesign syscalls
All checks were successful
Build documentation / build-and-deploy (push) Successful in 40s
2026-01-20 20:46:34 +01:00
a29233f853 Rename proc_spawn_thread to proc_clone 2026-01-19 22:01:44 +01:00
38a43b59b0 Resolve strange IRQ issues which cause the scheduler to behave weirdly (IRQ mapping)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 52s
2026-01-19 01:51:34 +01:00
ddafc4eb19 Rewrite resource subsystem 2026-01-18 20:50:45 +01:00
4f7077d458 Move mutex and mem create/cleanup functions into mutex.c and mem.c respectively
All checks were successful
Build documentation / build-and-deploy (push) Successful in 33s
2026-01-16 22:13:17 +01:00
9a7dbf0594 Properly implement liballoc_free () 2026-01-16 22:09:16 +01:00
ab8093cc6c CI install pymdown-extensions from pip
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-16 20:28:26 +01:00
ddbb66b5e4 Docs processes overview 2026-01-16 20:26:23 +01:00
11a1eb52aa Move status codes into a separate header
All checks were successful
Build documentation / build-and-deploy (push) Successful in 36s
2026-01-16 19:07:32 +01:00
a054257336 Port liballoc to userspace 2026-01-16 18:50:40 +01:00
9fc8521e63 sys_proc_mutex_unlock () automatically reschedule at the end
All checks were successful
Build documentation / build-and-deploy (push) Successful in 29s
2026-01-16 00:28:46 +01:00
711da8aeab Implement proc_spawn_thread syscall, fix proc_resume and proc_suspend
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
2026-01-16 00:26:37 +01:00
ebd9f0cac6 Let the user application decide upon the resource ID (RID)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 22s
2026-01-14 23:19:39 +01:00
7cd5623d36 Use reference counting to track filetime of process PD
All checks were successful
Build documentation / build-and-deploy (push) Successful in 26s
2026-01-14 23:11:06 +01:00
270ff507d4 Implement lock IRQ nesting via stack variables/contexts
All checks were successful
Build documentation / build-and-deploy (push) Successful in 21s
2026-01-14 22:11:56 +01:00
55166f9d5f syscall doesn't need RPL 3 bits on kernel code
All checks were successful
Build documentation / build-and-deploy (push) Successful in 24s
2026-01-14 21:21:20 +01:00
e5cc3a64d3 Fix syscall return value - preserve RAX register
All checks were successful
Build documentation / build-and-deploy (push) Successful in 39s
2026-01-14 20:58:00 +01:00
2ab308d678 Drop m_ prefix from libmsl 2026-01-14 20:56:09 +01:00
88 changed files with 1898 additions and 1095 deletions

View File

@@ -25,7 +25,7 @@ jobs:
- name: Install mkdocs
run: |
pip install --upgrade pip
pip install mkdocs mkdocs-material
pip install mkdocs mkdocs-material pymdown-extensions
- name: Build
run: make docs

View File

@@ -4,4 +4,4 @@ include make/apps.mk
include make/kernel.mk
include make/dist.mk
include make/docs.mk
include make/libc.mk
include make/libmsl.mk

View File

@@ -6,6 +6,8 @@ PHDRS {
text PT_LOAD;
rodata PT_LOAD;
data PT_LOAD;
bss PT_LOAD;
tls PT_TLS;
}
SECTIONS {
@@ -13,32 +15,53 @@ SECTIONS {
.text : {
*(.text .text.*)
*(.ltext .ltext.*)
} :text
. = ALIGN(CONSTANT(MAXPAGESIZE));
. = ALIGN(0x1000);
.rodata : {
*(.rodata .rodata.*)
} :rodata
.note.gnu.build-id : {
*(.note.gnu.build-id)
} :rodata
. = ALIGN(CONSTANT(MAXPAGESIZE));
. = ALIGN(0x1000);
.data : {
*(.data .data.*)
*(.ldata .ldata.*)
} :data
. = ALIGN(0x1000);
__bss_start = .;
.bss : {
*(.bss .bss.*)
} :data
*(.lbss .lbss.*)
} :bss
__bss_end = .;
. = ALIGN(0x1000);
__tdata_start = .;
.tdata : {
*(.tdata .tdata.*)
} :tls
__tdata_end = .;
__tbss_start = .;
.tbss : {
*(.tbss .tbss.*)
} :tls
__tbss_end = .;
__tls_size = __tbss_end - __tdata_start;
/DISCARD/ : {
*(.eh_frame*)
*(.note .note.*)

View File

@@ -1,4 +1,4 @@
cpu: model=p4_prescott_celeron_336
cpu: model=p4_prescott_celeron_336, ips=200000000
memory: guest=4096 host=2048
@@ -9,6 +9,7 @@ ata0: enabled=1
ata0-master: type=cdrom, path=mop3.iso, status=inserted
com1: enabled=1, mode=file, dev=bochs-com1.txt
pci: enabled=1, chipset=i440fx
clock: sync=realtime, time0=local
boot: cdrom

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

View File

@@ -0,0 +1,30 @@
# Overview of processes in MOP3
## What is a process?
A process is a structure defined to represent an internal state of a user application's environment. This includes
the necessary stacks, code, data and other resources. A process (usually) has it's own address, but in certain
circumstances may share it with another process.
## Only processes vs. processes-threads model
### Overview
MOP3 doesn't have a process-thread separation. Ususally in operating systems you'd have a "process", which consists
of multiple worker threads. For eg. a single-threaded application is a process, which consists of one worker. In MOP3
we do things a little differently. We only have processes, but some processes may work within the same pool of (generally speaking)
"resources", such as a shared address space, shared memory allocations, mutexes and so on. An application then consists of
not threads, but processes, which are loosely tied together via shared data.
#### Processes-threads model diagram
![Processes-threads model](assets/images/processes-threads.png)
#### Only processes model diagram
![Only processes model](assets/images/only-processes.png)
## Scheduling
MOP3 uses a round-robin based scheduler. For now priorities are left unimplemented, ie. every processes has
equal priority, but this may change in the future.
A good explaination of round-robin scheduling can be found on the OSDev wiki: [the article](https://wiki.osdev.org/Scheduling_Algorithms#Round_Robin)

13
include/m/status.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _M_STATUS_H
#define _M_STATUS_H
#define ST_OK 0
#define ST_SYSCALL_NOT_FOUND 1
#define ST_UNALIGNED 2
#define ST_OOM_ERROR 3
#define ST_NOT_FOUND 4
#define ST_BAD_ADDRESS_SPACE 5
#define ST_PERMISSION_ERROR 6
#define ST_BAD_RESOURCE 7
#endif // _M_STATUS_H

View File

@@ -1,22 +1,16 @@
#ifndef _M_SYSCALL_DEFS_H
#define _M_SYSCALL_DEFS_H
#define SYS_PROC_QUIT 1
#define SYS_PROC_TEST 2
#define SYS_PROC_MAP 3
#define SYS_PROC_UNMAP 4
#define SYS_PROC_CREATE_RESOURCE_MEM 5
#define SYS_PROC_DROP_RESOURCE 6
#define SYS_PROC_CREATE_RESOURCE_MUTEX 7
#define SYS_PROC_MUTEX_LOCK 8
#define SYS_PROC_MUTEX_UNLOCK 9
#define SR_OK 0
#define SR_SYSCALL_NOT_FOUND 1
#define SR_UNALIGNED 2
#define SR_OOM_ERROR 3
#define SR_NOT_FOUND 4
#define SR_BAD_ADDRESS_SPACE 5
#define SR_PERMISSION_ERROR 6
#define SYS_QUIT 1
#define SYS_TEST 2
#define SYS_MAP 3
#define SYS_UNMAP 4
#define SYS_CLONE 5
#define SYS_SCHED 6
#define SYS_MUTEX_CREATE 7
#define SYS_MUTEX_DELETE 8
#define SYS_MUTEX_LOCK 9
#define SYS_MUTEX_UNLOCK 10
#define SYS_ARGUMENT_PTR 11
#endif // _M_SYSCALL_DEFS_H

View File

@@ -1,75 +1,46 @@
#include <limits.h>
#include <m/proc.h>
#include <proc/local.h>
#include <proc/proc.h>
#include <stddef.h>
#include <stdint.h>
#include <string/string.h>
char c = 'a';
#define MUTEX 2000
LOCAL volatile char letter = 'c';
void app_proc (void) {
char arg_letter = (char)(uintptr_t)argument_ptr ();
letter = arg_letter;
for (;;) {
mutex_lock (MUTEX);
for (int i = 0; i < 3; i++)
test (letter);
mutex_unlock (MUTEX);
}
process_quit ();
}
void app_main (void) {
/* uintptr_t out_paddr; */
/* int mem_rid = m_proc_create_resource_mem (16, RV_PRIVATE, &out_paddr); */
mutex_create (MUTEX);
/* m_proc_map (out_paddr, M_PROC_MAP_BASE, 16, PM_PRESENT | PM_RW | PM_USER); */
letter = 'a';
/* memset ((void*)M_PROC_MAP_BASE, 0, M_PAGE_SIZE * 16); */
process_spawn (&app_proc, (void*)'a');
process_spawn (&app_proc, (void*)'b');
process_spawn (&app_proc, (void*)'c');
/* m_proc_unmap (M_PROC_MAP_BASE, 16); */
for (;;) {
mutex_lock (MUTEX);
/* m_proc_drop_resource (mem_rid); */
for (int i = 0; i < 3; i++)
test (letter);
/* m_proc_test (); */
/* int mutex_rid = m_proc_create_resource_mutex (RV_PRIVATE); */
/* m_proc_mutex_lock (mutex_rid); */
/* m_proc_test (); */
/* m_proc_mutex_unlock (mutex_rid); */
if (c > 'z')
c = 'a';
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
m_proc_test (c++);
__asm__ volatile ("dupa: nop; nop; nop; jmp dupa");
/* for (volatile uint64_t i = 0; i < 1000*1000*100; i++); */
/* if (c > 'z') */
/* c = 'a'; */
/* m_proc_test ('k'); */
/* m_proc_test ('l'); */
/* m_proc_test ('m'); */
/* m_proc_test ('n'); */
/* m_proc_test ('o'); */
/* m_proc_test ('p'); */
/* m_proc_test ('r'); */
/* m_proc_test ('s'); */
/* m_proc_test ('t'); */
/* m_proc_test ('u'); */
mutex_unlock (MUTEX);
}
}

View File

@@ -1,3 +1,3 @@
S += init.S
c += init.c
o += init.o

View File

@@ -4,7 +4,7 @@
#include <amd64/msr.h>
#include <libk/std.h>
#include <limine/requests.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/spin.h>
@@ -38,7 +38,7 @@
struct ioapic {
struct acpi_madt_ioapic table_data;
rw_spin_lock_t lock;
spin_lock_t lock;
uintptr_t mmio_base;
};
@@ -57,29 +57,31 @@ static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT;
/* Read IOAPIC */
static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) {
rw_spin_read_lock (&ioapic->lock);
spin_lock_ctx_t ctxioar;
spin_lock (&ioapic->lock, &ctxioar);
*(volatile uint32_t*)ioapic->mmio_base = reg;
uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10);
rw_spin_read_unlock (&ioapic->lock);
spin_unlock (&ioapic->lock, &ctxioar);
return ret;
}
/* Write IOAPIC */
static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) {
rw_spin_write_lock (&ioapic->lock);
spin_lock_ctx_t ctxioaw;
spin_lock (&ioapic->lock, &ctxioaw);
*(volatile uint32_t*)ioapic->mmio_base = reg;
*(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value;
rw_spin_write_unlock (&ioapic->lock);
spin_unlock (&ioapic->lock, &ctxioaw);
}
/* Find an IOAPIC corresposting to provided IRQ */
static struct ioapic* amd64_ioapic_find (uint8_t irq) {
static struct ioapic* amd64_ioapic_find (uint32_t irq) {
struct ioapic* ioapic = NULL;
for (size_t i = 0; i < ioapic_entries; i++) {
ioapic = &ioapics[i];
/* uint32_t version = amd64_ioapic_read ((uintptr_t)hhdm->offset +
* (uintptr_t)ioapic->table_data.address, 1); */
uint32_t version = amd64_ioapic_read (ioapic, 1);
uint32_t max = ((version >> 16) & 0xFF);
@@ -99,7 +101,7 @@ static struct ioapic* amd64_ioapic_find (uint8_t irq) {
* flags - IOAPIC redirection flags.
* lapic_id - Local APIC that will receive the interrupt.
*/
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id) {
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id) {
struct ioapic* ioapic = NULL;
struct acpi_madt_interrupt_source_override* override;
bool found_override = false;
@@ -115,13 +117,13 @@ void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t
uint64_t calc_flags = (lapic_id << 56) | (flags) | (vec & 0xFF);
if (found_override) {
uint8_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
uint8_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
uint32_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
uint32_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
calc_flags |= (uint64_t)mode << 15;
calc_flags |= (uint64_t)polarity << 13;
}
uint8_t gsi = found_override ? override->gsi : irq;
uint32_t gsi = found_override ? override->gsi : irq;
ioapic = amd64_ioapic_find (gsi);
@@ -158,9 +160,9 @@ void amd64_ioapic_init (void) {
struct acpi_madt_ioapic* ioapic_table_data = (struct acpi_madt_ioapic*)current;
mm_map_kernel_page ((uintptr_t)ioapic_table_data->address,
(uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address,
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
MM_PG_PRESENT | MM_PG_RW);
ioapics[ioapic_entries++] = (struct ioapic){
.lock = RW_SPIN_LOCK_INIT,
.lock = SPIN_LOCK_INIT,
.table_data = *ioapic_table_data,
.mmio_base = ((uintptr_t)hhdm->offset + (uintptr_t)ioapic_table_data->address),
};
@@ -201,7 +203,9 @@ void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
* us - Period length in microseconds
*/
static uint32_t amd64_lapic_calibrate (uint32_t us) {
spin_lock (&lapic_calibration_lock);
spin_lock_ctx_t ctxlacb;
spin_lock (&lapic_calibration_lock, &ctxlacb);
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
@@ -214,7 +218,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
DEBUG ("timer ticks = %u\n", ticks);
spin_unlock (&lapic_calibration_lock);
spin_unlock (&lapic_calibration_lock, &ctxlacb);
return ticks;
}
@@ -227,7 +231,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE);
amd64_lapic_write (LAPIC_TIMICT, ticks);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17) | (1 << 16));
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
}
/*
@@ -242,8 +246,7 @@ void amd64_lapic_init (uint32_t us) {
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base,
MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK | MM_PD_RELOAD);
mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW);
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
@@ -257,12 +260,12 @@ void amd64_lapic_init (uint32_t us) {
* lapic_id - Target Local APIC
* vec - Interrupt vector/IDT stub, which will be invoked by the IPI.
*/
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec) {
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) {
/* wait for previous IPI to finish */
while (amd64_lapic_read (LAPIC_ICR) & (1 << 12)) {
__asm__ volatile ("pause");
}
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
amd64_lapic_write (LAPIC_ICR, vec);
amd64_lapic_write (LAPIC_ICR, vec | (1 << 14));
}

View File

@@ -3,12 +3,12 @@
#include <libk/std.h>
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id);
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id);
void amd64_ioapic_init (void);
uint32_t amd64_lapic_id (void);
void amd64_lapic_eoi (void);
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec);
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec);
void amd64_lapic_init (uint32_t us);
#endif // _KERNEL_AMD64_APIC_H

View File

@@ -9,6 +9,7 @@
#include <irq/irq.h>
#include <libk/std.h>
#include <limine/limine.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
@@ -29,7 +30,9 @@ ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
* the necessary platform-dependent subsystems/drivers and jump into the init app.
*/
void bootmain (void) {
struct cpu* bsp_cpu = cpu_make ();
struct limine_mp_response* mp = limine_mp_request.response;
struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id);
amd64_init (bsp_cpu, false);
syscall_init ();
@@ -46,8 +49,6 @@ void bootmain (void) {
smp_init ();
mm_init2 ();
proc_init ();
for (;;)

View File

@@ -35,6 +35,8 @@ static void amd64_debug_serial_write (char x) {
* Formatted printing to serial. serial_lock ensures that all prints are atomic.
*/
void debugprintf (const char* fmt, ...) {
spin_lock_ctx_t ctxdbgp;
if (!debug_init)
return;
@@ -50,14 +52,14 @@ void debugprintf (const char* fmt, ...) {
const char* p = buffer;
spin_lock (&serial_lock);
spin_lock (&serial_lock, &ctxdbgp);
while (*p) {
amd64_debug_serial_write (*p);
p++;
}
spin_unlock (&serial_lock);
spin_unlock (&serial_lock, &ctxdbgp);
}
/* Initialize serial */

View File

@@ -60,8 +60,9 @@ static void amd64_hpet_write32 (uint32_t reg, uint32_t value) {
static uint64_t amd64_hpet_read_counter (void) {
uint64_t value;
spin_lock_ctx_t ctxhrc;
spin_lock (&hpet_lock);
spin_lock (&hpet_lock, &ctxhrc);
if (!hpet_32bits)
value = amd64_hpet_read64 (HPET_MCVR);
@@ -76,13 +77,15 @@ static uint64_t amd64_hpet_read_counter (void) {
value = ((uint64_t)hi1 << 32) | lo;
}
spin_unlock (&hpet_lock);
spin_unlock (&hpet_lock, &ctxhrc);
return value;
}
static void amd64_hpet_write_counter (uint64_t value) {
spin_lock (&hpet_lock);
spin_lock_ctx_t ctxhwc;
spin_lock (&hpet_lock, &ctxhwc);
if (!hpet_32bits)
amd64_hpet_write64 (HPET_MCVR, value);
@@ -91,7 +94,7 @@ static void amd64_hpet_write_counter (uint64_t value) {
amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32));
}
spin_unlock (&hpet_lock);
spin_unlock (&hpet_lock, &ctxhwc);
}
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being
@@ -126,8 +129,7 @@ void amd64_hpet_init (void) {
hpet_paddr = (uintptr_t)hpet->address.address;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr,
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
uint64_t caps = amd64_hpet_read64 (HPET_GCIDR);
hpet_32bits = (caps & (1 << 13)) ? 0 : 1;

View File

@@ -157,7 +157,7 @@ static void amd64_intr_exception (struct saved_regs* regs) {
regs->rbx);
if (regs->cs == (GDT_UCODE | 0x03)) {
proc_kill (thiscpu->proc_current, regs);
proc_kill (thiscpu->proc_current);
} else {
spin ();
}
@@ -165,10 +165,21 @@ static void amd64_intr_exception (struct saved_regs* regs) {
/* Handle incoming interrupt, dispatch IRQ handlers. */
void amd64_intr_handler (void* stack_ptr) {
spin_lock_ctx_t ctxcpu, ctxpr;
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr;
spin_lock (&thiscpu->lock, &ctxcpu);
struct proc* proc_current = thiscpu->proc_current;
spin_lock (&proc_current->lock, &ctxpr);
memcpy (&proc_current->pdata.regs, regs, sizeof (struct saved_regs));
spin_unlock (&proc_current->lock, &ctxpr);
spin_unlock (&thiscpu->lock, &ctxcpu);
if (regs->trap <= 31) {
amd64_intr_exception (regs);
} else {
@@ -177,13 +188,7 @@ void amd64_intr_handler (void* stack_ptr) {
struct irq* irq = irq_find (regs->trap);
if (irq != NULL) {
if ((irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("sti");
irq->func (irq->arg, stack_ptr);
if ((irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("cli");
}
}
}
@@ -210,27 +215,7 @@ static void amd64_irq_restore_flags (uint64_t rflags) {
}
/* Save current interrupt state */
void irq_save (void) {
int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 0)
thiscpu->irq_ctx.rflags = amd64_irq_save_flags ();
}
void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
/* Restore interrupt state */
void irq_restore (void) {
int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 1)
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);
}
/* Map custom IRQ mappings to legacy IRQs */
uint8_t amd64_resolve_irq (uint8_t irq) {
static const uint8_t mappings[] = {
[SCHED_PREEMPT_TIMER] = 0,
[TLB_SHOOTDOWN] = 1,
[CPU_REQUEST_SCHED] = 2,
[CPU_SPURIOUS] = 3,
};
return mappings[irq];
}
void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }

View File

@@ -32,7 +32,6 @@ struct saved_regs {
} PACKED;
void amd64_load_idt (void);
uint8_t amd64_resolve_irq (uint8_t irq);
void amd64_intr_init (void);
#endif // _KERNEL_AMD64_INTR_H

View File

@@ -11,12 +11,10 @@
#include <sys/mm.h>
#include <sys/smp.h>
/* Present flag */
#define AMD64_PG_PRESENT (1 << 0)
/* Writable flag */
#define AMD64_PG_RW (1 << 1)
/* User-accessible flag */
#define AMD64_PG_USER (1 << 2)
#define AMD64_PG_HUGE (1 << 7)
/* Auxilary struct for page directory walking */
struct pg_index {
@@ -24,9 +22,12 @@ struct pg_index {
} PACKED;
/* Kernel page directory */
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
/* Lock needed to sync between map/unmap operations and TLB shootdown */
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
static struct pd kernel_pd;
static spin_lock_t kernel_pd_lock;
void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); }
/* Get current value of CR3 register */
static uintptr_t amd64_current_cr3 (void) {
@@ -44,6 +45,8 @@ void amd64_load_kernel_cr3 (void) {
}
}
struct pd* mm_get_kernel_pd (void) { return &kernel_pd; }
/* Extract PML info from virtual address */
static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
struct pg_index ret;
@@ -63,9 +66,12 @@ static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
if (entry & AMD64_PG_PRESENT)
if (entry & AMD64_PG_PRESENT) {
if (entry & AMD64_PG_HUGE)
return NULL;
paddr = entry & ~0xFFFULL;
else {
} else {
if (!alloc)
return NULL;
@@ -108,13 +114,7 @@ static void amd64_reload_cr3 (void) {
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
@@ -123,67 +123,50 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
if (pml3 == NULL)
goto done;
return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
if (pml2 == NULL)
goto done;
return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
if (pml1 == NULL)
goto done;
return;
uint64_t* pte = &pml1[pg_index.pml1];
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
do_reload = true;
done:
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
}
/* Map a page into kernel page directory */
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
mm_map_page (&kernel_pd, paddr, vaddr, flags);
amd64_reload_cr3 ();
}
/* Unmap a virtual address. TLB needs to be flushed afterwards */
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
void mm_unmap_page (struct pd* pd, uintptr_t vaddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL)
goto done;
return;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
goto done;
return;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
goto done;
return;
uint64_t* pte = &pml1[pg_index.pml1];
if ((*pte) & AMD64_PG_PRESENT) {
if ((*pte) & AMD64_PG_PRESENT)
*pte = 0;
do_reload = true;
}
if (amd64_mm_is_table_empty (pml1)) {
uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL;
@@ -202,28 +185,14 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
}
}
}
done:
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
}
/* Unmap a page from kernel page directory */
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
mm_unmap_page (&kernel_pd, vaddr, flags);
void mm_unmap_kernel_page (uintptr_t vaddr) {
mm_unmap_page (&kernel_pd, vaddr);
amd64_reload_cr3 ();
}
/* Lock kernel page directory */
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); }
/* Unlock kernel page directory */
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); }
/* Allocate a userspace-ready page directory */
uintptr_t mm_alloc_user_pd_phys (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -242,24 +211,10 @@ uintptr_t mm_alloc_user_pd_phys (void) {
return cr3;
}
/* Reload after map/unmap operation was performed. This function does the TLB shootdown. */
void mm_reload (void) {
struct limine_mp_response* mp = limine_mp_request.response;
for (size_t i = 0; i < mp->cpu_count; i++) {
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN);
}
}
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
bool mm_validate (struct pd* pd, uintptr_t vaddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool ret = false;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -279,42 +234,26 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
ret = (pte & AMD64_PG_PRESENT) != 0;
done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
return ret;
}
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags) {
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size) {
bool ok = true;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
for (size_t i = 0; i < size; i++) {
ok = mm_validate (pd, vaddr + i, 0);
ok = mm_validate (pd, vaddr + i);
if (!ok)
goto done;
}
done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
return ok;
}
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
spin_lock (&mm_lock);
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
for (size_t i4 = 0; i4 < 512; i4++) {
@@ -345,23 +284,13 @@ uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags) {
}
done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
return ret;
}
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t ret = 0;
if (flags & MM_PD_LOCK)
spin_lock (&pd->lock);
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
@@ -385,27 +314,8 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
ret = ((pte & ~0xFFFULL) | (vaddr & 0xFFFULL));
done:
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
return ret;
}
/* TLB shootdown IRQ handler */
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
(void)arg, (void)regs;
amd64_reload_cr3 ();
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
}
/* Continue initializing memory management subsystem for AMD64 after the essential parts were
* initialized */
void mm_init2 (void) {
irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE);
}
/* Initialize essentials for the AMD64 memory management subsystem */
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }

View File

@@ -7,11 +7,9 @@
#define PAGE_SIZE 4096
struct pd {
spin_lock_t lock;
uintptr_t cr3_paddr;
};
void amd64_load_kernel_cr3 (void);
void mm_init2 (void);
#endif // _KERNEL_AMD64_MM_H

View File

@@ -1,5 +1,7 @@
#include <amd64/gdt.h>
#include <amd64/proc.h>
#include <aux/elf.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
@@ -7,12 +9,15 @@
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/mutex.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/proc.h>
static atomic_int pids = 1;
static atomic_int pids = 0;
struct proc* proc_from_elf (uint8_t* elf_contents) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -27,39 +32,20 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1);
proc->pd.lock = SPIN_LOCK_INIT;
proc->pd.cr3_paddr = mm_alloc_user_pd_phys ();
if (proc->pd.cr3_paddr == 0) {
proc->procgroup = procgroup_create ();
if (proc->procgroup == NULL) {
free (proc);
return NULL;
}
procgroup_attach (proc->procgroup, proc);
int kstk_rid = atomic_fetch_add (&proc->rids, 1);
struct proc_resource_mem_init kstk_mem_init = {.pages = KSTACK_SIZE / PAGE_SIZE};
struct proc_resource* kstk_r =
proc_create_resource (proc, kstk_rid, PR_MEM, RV_PRIVATE, (void*)&kstk_mem_init);
if (kstk_r == NULL) {
free (proc);
return NULL;
}
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
proc->pdata.kernel_stack = kstk_r->u.mem.paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
procgroup_map (proc->procgroup, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW, NULL);
int ustk_rid = atomic_fetch_add (&proc->rids, 1);
struct proc_resource_mem_init ustk_mem_init = {.pages = USTACK_SIZE / PAGE_SIZE};
struct proc_resource* ustk_r =
proc_create_resource (proc, ustk_rid, PR_MEM, RV_PRIVATE, (void*)&ustk_mem_init);
if (ustk_r == NULL) {
kstk_r->ops.cleanup (proc, kstk_r);
free (kstk_r);
free (proc);
return NULL;
}
proc->pdata.user_stack = ustk_r->u.mem.paddr;
proc_map (proc, proc->pdata.user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
proc->flags |= PROC_USTK_PREALLOC;
struct elf_aux aux = proc_load_segments (proc, elf_contents);
@@ -72,29 +58,81 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
return proc;
}
void proc_cleanup (struct proc* proc) {
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
uintptr_t argument_ptr) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxprt;
proc_cleanup_resources (proc);
struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL)
return NULL;
struct list_node_link *mapping_link, *mapping_link_tmp;
spin_lock (&proc->pd.lock);
memset (proc, 0, sizeof (*proc));
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
proc->lock = SPIN_LOCK_INIT;
atomic_store (&proc->state, PROC_READY);
proc->pid = atomic_fetch_add (&pids, 1);
list_remove (proc->mappings, mapping_link);
free (mapping);
spin_lock (&proto->lock, &ctxprt);
proc->procgroup = proto->procgroup;
procgroup_attach (proc->procgroup, proc);
spin_unlock (&proto->lock, &ctxprt);
uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE;
proc->pdata.regs.ss = GDT_UDATA | 0x03;
proc->pdata.regs.rsp = (uint64_t)vstack_top;
proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = GDT_UCODE | 0x03;
proc->pdata.regs.rip = (uint64_t)entry;
proc->uvaddr_argument = argument_ptr;
proc_init_tls (proc);
return proc;
}
spin_unlock (&proc->pd.lock);
void proc_cleanup (struct proc* proc) {
proc_sqs_cleanup (proc);
proc_mutexes_cleanup (proc);
pmm_free (proc->pd.cr3_paddr, 1);
pmm_free (proc->pdata.kernel_stack, KSTACK_SIZE / PAGE_SIZE);
procgroup_unmap (proc->procgroup, proc->pdata.tls_vaddr, proc->procgroup->tls.tls_tmpl_pages);
pmm_free (proc->pdata.kernel_stack - (uintptr_t)hhdm->offset - KSTACK_SIZE,
KSTACK_SIZE / PAGE_SIZE);
pmm_free (proc->pdata.user_stack, USTACK_SIZE / PAGE_SIZE);
procgroup_detach (proc->procgroup, proc);
/* clean the process */
free (proc);
}
void proc_init_tls (struct proc* proc) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
if (proc->procgroup->tls.tls_tmpl == NULL)
return;
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
size_t pages = proc->procgroup->tls.tls_tmpl_pages;
uintptr_t tls_paddr;
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
uintptr_t tls_vaddr = procgroup_map (proc->procgroup, 0, pages, flags, &tls_paddr);
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
memset ((void*)k_tls_addr, 0, pages * PAGE_SIZE);
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
uintptr_t ktcb = k_tls_addr + tls_size;
uintptr_t utcb = tls_vaddr + tls_size;
*(uintptr_t*)ktcb = utcb;
proc->pdata.fs_base = utcb;
proc->pdata.tls_vaddr = tls_vaddr;
}

View File

@@ -4,17 +4,19 @@
#include <amd64/intr.h>
#include <libk/std.h>
/// Top of userspace process' stack
/* Top of userspace process' stack */
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
/// Size of userspace process' stack
/* Size of userspace process' stack */
#define USTACK_SIZE (256 * PAGE_SIZE)
/* proc_map () base address */
#define PROC_MAP_BASE 0x0000700000000000
/// Platform-dependent process data
/* Platform-dependent process data */
struct proc_platformdata {
struct saved_regs regs;
uintptr_t user_stack;
uintptr_t kernel_stack;
uint64_t gs_base;
uint64_t fs_base;
uintptr_t tls_vaddr;
};
#endif // _KERNEL_AMD64_PROC_H

13
kernel/amd64/procgroup.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _KERNEL_AMD64_PROCGRPUP_H
#define _KERNEL_AMD64_PROCGRPUP_H
#include <libk/std.h>
struct procgroup_tls {
uint8_t* tls_tmpl;
size_t tls_tmpl_size;
size_t tls_tmpl_total_size;
size_t tls_tmpl_pages;
};
#endif // _KERNEL_AMD64_PROCGRPUP_H

View File

@@ -3,14 +3,21 @@
#include <amd64/sched.h>
#include <libk/std.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/mm.h>
#include <sys/smp.h>
void do_sched (struct proc* proc) {
__asm__ volatile ("cli");
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu) {
spin_lock_ctx_t ctxpr;
spin_lock (&proc->lock, &ctxpr);
thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd.cr3_paddr);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (cpu_lock, ctxcpu);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr);
}

View File

@@ -8,8 +8,10 @@
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/syscall.h>
@@ -21,7 +23,7 @@ static struct cpu cpus[CPUS_MAX];
static atomic_int cpu_init_count;
/// Allocate a CPU structure
struct cpu* cpu_make (void) {
struct cpu* cpu_make (uint64_t lapic_id) {
int id = atomic_fetch_add (&cpu_counter, 1);
struct cpu* cpu = &cpus[id];
@@ -29,7 +31,7 @@ struct cpu* cpu_make (void) {
memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT;
cpu->id = id;
cpu->self = cpu;
cpu->lapic_id = lapic_id;
amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
@@ -42,21 +44,36 @@ struct cpu* cpu_get (void) {
}
void cpu_request_sched (struct cpu* cpu) {
struct limine_mp_response* mp = limine_mp_request.response;
if (cpu == thiscpu) {
proc_sched ();
return;
}
for (size_t i = 0; i < mp->cpu_count; i++) {
if (cpu->id == i) {
amd64_lapic_ipi (mp->cpus[i]->lapic_id, CPU_REQUEST_SCHED);
break;
amd64_lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED);
}
struct cpu* cpu_find_lightest (void) {
struct cpu* cpu = &cpus[0];
int load = atomic_load (&cpu->proc_run_q_count);
for (unsigned int i = 1; i < cpu_counter; i++) {
struct cpu* new_cpu = &cpus[i];
int new_load = atomic_load (&new_cpu->proc_run_q_count);
if (new_load < load) {
load = new_load;
cpu = new_cpu;
}
}
return cpu;
}
/// Bootstrap code for non-BSP CPUs
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_load_kernel_cr3 ();
struct cpu* cpu = cpu_make ();
struct cpu* cpu = cpu_make (mp_info->lapic_id);
amd64_init (cpu, true); /* gdt + idt */
syscall_init ();
@@ -65,12 +82,14 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
DEBUG ("CPU %u is online!\n", thiscpu->id);
__asm__ volatile ("sti");
atomic_fetch_sub (&cpu_init_count, 1);
for (;;)
;
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
proc_register (spin_proc, thiscpu);
spin_lock_ctx_t ctxcpu;
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
}
/// Initialize SMP subsystem for AMD64. Start AP CPUs
@@ -82,7 +101,7 @@ void smp_init (void) {
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
for (size_t i = 0; i < mp->cpu_count; i++) {
if (mp->cpus[i]->lapic_id != thiscpu->id) {
if (mp->cpus[i]->lapic_id != thiscpu->lapic_id) {
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
}

View File

@@ -2,6 +2,7 @@
#define _KERNEL_AMD64_SMP_H
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/tss.h>
#include <aux/compiler.h>
#include <libk/rbtree.h>
@@ -15,7 +16,6 @@ struct cpu {
/* for syscall instruction */
uintptr_t syscall_user_stack;
uintptr_t syscall_kernel_stack;
struct cpu* self;
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
@@ -24,22 +24,20 @@ struct cpu {
uintptr_t lapic_mmio_base;
uint64_t lapic_ticks;
uint64_t lapic_id;
uint32_t id;
struct {
uint64_t rflags;
atomic_int nesting;
} irq_ctx;
spin_lock_t lock;
struct rb_node_link* proc_run_q;
struct list_node_link* proc_run_q;
struct proc* proc_current;
atomic_int proc_run_q_count;
};
struct cpu* cpu_make (void);
struct cpu* cpu_make (uint64_t lapic_id);
struct cpu* cpu_get (void);
void cpu_request_sched (struct cpu* cpu);
struct cpu* cpu_find_lightest (void);
#define thiscpu (cpu_get ())

View File

@@ -3,6 +3,8 @@
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/string.h>
#include <m/status.h>
#include <m/syscall_defs.h>
#include <proc/proc.h>
#include <sys/debug.h>
@@ -11,31 +13,33 @@
extern void amd64_syscall_entry (void);
int amd64_syscall_dispatch (void* stack_ptr) {
amd64_load_kernel_cr3 ();
uintptr_t amd64_syscall_dispatch (void* stack_ptr) {
spin_lock_ctx_t ctxcpu, ctxpr;
amd64_load_kernel_cr3 ();
struct saved_regs* regs = stack_ptr;
spin_lock (&thiscpu->lock, &ctxcpu);
struct proc* caller = thiscpu->proc_current;
spin_lock (&caller->lock, &ctxpr);
memcpy (&caller->pdata.regs, regs, sizeof (struct saved_regs));
spin_unlock (&caller->lock, &ctxpr);
spin_unlock (&thiscpu->lock, &ctxcpu);
int syscall_num = regs->rax;
syscall_handler_func_t func = syscall_find_handler (syscall_num);
if (func == NULL)
return -SR_SYSCALL_NOT_FOUND;
if (func == NULL) {
return -ST_SYSCALL_NOT_FOUND;
}
struct proc* caller = thiscpu->proc_current;
__asm__ volatile ("sti");
int result = func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
__asm__ volatile ("cli");
return result;
return func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
}
void syscall_init (void) {
amd64_wrmsr (MSR_STAR,
((uint64_t)(GDT_KCODE | 0x03) << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48));
amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48));
amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry);
amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9));
amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE);

View File

@@ -4,6 +4,8 @@
.global amd64_syscall_entry
amd64_syscall_entry:
cli
movq %rsp, %gs:0
movq %gs:8, %rsp
@@ -37,7 +39,7 @@ amd64_syscall_entry:
movq %rbp, %rsp
popq %rax; movq %rax, %cr3
popq %rbx; movq %rbx, %cr3
pop_regs_skip_rax

View File

@@ -2,7 +2,8 @@
#include <libk/list.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#if defined(__x86_64__)
#include <amd64/apic.h>
@@ -11,9 +12,11 @@
struct irq* irq_table[0x100];
static rw_spin_lock_t irqs_lock;
static spin_lock_t irqs_lock = SPIN_LOCK_INIT;
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) {
spin_lock_ctx_t ctxiqa;
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) {
struct irq* irq = malloc (sizeof (*irq));
if (irq == NULL) {
return false;
@@ -22,26 +25,22 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint3
irq->func = func;
irq->arg = arg;
irq->irq_num = irq_num;
irq->flags = flags;
rw_spin_write_lock (&irqs_lock);
spin_lock (&irqs_lock, &ctxiqa);
irq_table[irq_num] = irq;
rw_spin_write_unlock (&irqs_lock);
#if defined(__x86_64__)
uint8_t resolution = amd64_resolve_irq (irq_num);
amd64_ioapic_route_irq (irq_num, resolution, 0, amd64_lapic_id ());
#endif
spin_unlock (&irqs_lock, &ctxiqa);
return true;
}
struct irq* irq_find (uint32_t irq_num) {
rw_spin_read_lock (&irqs_lock);
spin_lock_ctx_t ctxiqa;
spin_lock (&irqs_lock, &ctxiqa);
struct irq* irq = irq_table[irq_num];
rw_spin_read_unlock (&irqs_lock);
spin_unlock (&irqs_lock, &ctxiqa);
return irq;
}

View File

@@ -4,9 +4,6 @@
#include <libk/list.h>
#include <libk/std.h>
#define IRQ_INTERRUPT_SAFE (1 << 0)
#define IRQ_INTERRUPT_UNSAFE (1 << 1)
typedef void (*irq_func_t) (void* arg, void* regs);
struct irq {
@@ -15,10 +12,9 @@ struct irq {
irq_func_t func;
void* arg;
uint32_t irq_num;
uint32_t flags;
};
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num, uint32_t flags);
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num);
struct irq* irq_find (uint32_t irq_num);
#endif // _KERNEL_IRQ_IRQ_H

View File

@@ -8,6 +8,7 @@
if (!(x)) { \
DEBUG ("%s ssertion failed\n", #x); \
spin (); \
__builtin_unreachable (); \
} \
} while (0)

View File

@@ -20,3 +20,4 @@ DECL_REQ (memmap, MEMMAP);
DECL_REQ (rsdp, RSDP);
DECL_REQ (mp, MP);
DECL_REQ (module, MODULE);
DECL_REQ (framebuffer, FRAMEBUFFER);

View File

@@ -10,5 +10,6 @@ EXTERN_REQ (memmap);
EXTERN_REQ (rsdp);
EXTERN_REQ (mp);
EXTERN_REQ (module);
EXTERN_REQ (framebuffer);
#endif // _KERNEL_LIMINE_REQUESTS_H

View File

@@ -11,13 +11,13 @@
spin_lock_t _liballoc_lock = SPIN_LOCK_INIT;
int liballoc_lock (void) {
spin_lock (&_liballoc_lock);
int liballoc_lock (void* ctx) {
spin_lock (&_liballoc_lock, (spin_lock_ctx_t*)ctx);
return 0;
}
int liballoc_unlock (void) {
spin_unlock (&_liballoc_lock);
int liballoc_unlock (void* ctx) {
spin_unlock (&_liballoc_lock, (spin_lock_ctx_t*)ctx);
return 0;
}
@@ -243,8 +243,9 @@ void* malloc (size_t size) {
int index;
void* ptr;
struct boundary_tag* tag = NULL;
spin_lock_ctx_t ctxliba;
liballoc_lock ();
liballoc_lock (&ctxliba);
if (l_initialized == 0) {
for (index = 0; index < MAXEXP; index++) {
@@ -272,7 +273,7 @@ void* malloc (size_t size) {
// No page found. Make one.
if (tag == NULL) {
if ((tag = allocate_new_tag (size)) == NULL) {
liballoc_unlock ();
liballoc_unlock (&ctxliba);
return NULL;
}
@@ -305,23 +306,24 @@ void* malloc (size_t size) {
ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag));
liballoc_unlock ();
liballoc_unlock (&ctxliba);
return ptr;
}
void free (void* ptr) {
int index;
struct boundary_tag* tag;
spin_lock_ctx_t ctxliba;
if (ptr == NULL)
return;
liballoc_lock ();
liballoc_lock (&ctxliba);
tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag));
if (tag->magic != LIBALLOC_MAGIC) {
liballoc_unlock (); // release the lock
liballoc_unlock (&ctxliba); // release the lock
return;
}
@@ -354,7 +356,7 @@ void free (void* ptr) {
liballoc_free (tag, pages);
liballoc_unlock ();
liballoc_unlock (&ctxliba);
return;
}
@@ -365,7 +367,7 @@ void free (void* ptr) {
insert_tag (tag, index);
liballoc_unlock ();
liballoc_unlock (&ctxliba);
}
void* calloc (size_t nobj, size_t size) {
@@ -385,6 +387,7 @@ void* realloc (void* p, size_t size) {
void* ptr;
struct boundary_tag* tag;
int real_size;
spin_lock_ctx_t ctxliba;
if (size == 0) {
free (p);
@@ -394,11 +397,11 @@ void* realloc (void* p, size_t size) {
return malloc (size);
if (&liballoc_lock != NULL)
liballoc_lock (); // lockit
liballoc_lock (&ctxliba); // lockit
tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag));
real_size = tag->size;
if (&liballoc_unlock != NULL)
liballoc_unlock ();
liballoc_unlock (&ctxliba);
if ((size_t)real_size > size)
real_size = size;

View File

@@ -47,7 +47,7 @@ struct boundary_tag {
* \return 0 if the lock was acquired successfully. Anything else is
* failure.
*/
extern int liballoc_lock ();
extern int liballoc_lock (void* ctx);
/** This function unlocks what was previously locked by the liballoc_lock
* function. If it disabled interrupts, it enables interrupts. If it
@@ -55,7 +55,7 @@ extern int liballoc_lock ();
*
* \return 0 if the lock was successfully released.
*/
extern int liballoc_unlock ();
extern int liballoc_unlock (void* ctx);
/** This is the hook into the local system which allocates pages. It
* accepts an integer parameter which is the number of pages

View File

@@ -100,6 +100,8 @@ static size_t pmm_find_free_space (struct pmm_region* pmm_region, size_t nblks)
}
physaddr_t pmm_alloc (size_t nblks) {
spin_lock_ctx_t ctxpmmr;
for (size_t region = 0; region < PMM_REGIONS_MAX; region++) {
struct pmm_region* pmm_region = &pmm.regions[region];
@@ -107,7 +109,7 @@ physaddr_t pmm_alloc (size_t nblks) {
if (!(pmm_region->flags & PMM_REGION_ACTIVE))
continue;
spin_lock (&pmm_region->lock);
spin_lock (&pmm_region->lock, &ctxpmmr);
/* Find starting bit of the free bit range */
size_t bit = pmm_find_free_space (pmm_region, nblks);
@@ -116,18 +118,19 @@ physaddr_t pmm_alloc (size_t nblks) {
if (bit != (size_t)-1) {
/* Mark it */
bm_set_region (&pmm_region->bm, bit, nblks);
spin_unlock (&pmm_region->lock);
spin_unlock (&pmm_region->lock, &ctxpmmr);
return pmm_region->membase + bit * PAGE_SIZE;
}
spin_unlock (&pmm_region->lock);
spin_unlock (&pmm_region->lock, &ctxpmmr);
}
return PMM_ALLOC_ERR;
}
void pmm_free (physaddr_t p_addr, size_t nblks) {
spin_lock_ctx_t ctxpmmr;
/* Round down to nearest page boundary */
physaddr_t aligned_p_addr = align_down (p_addr, PAGE_SIZE);
@@ -145,11 +148,11 @@ void pmm_free (physaddr_t p_addr, size_t nblks) {
size_t bit = div_align_up (addr, PAGE_SIZE);
spin_lock (&pmm_region->lock);
spin_lock (&pmm_region->lock, &ctxpmmr);
bm_clear_region (&pmm_region->bm, bit, nblks);
spin_unlock (&pmm_region->lock);
spin_unlock (&pmm_region->lock, &ctxpmmr);
break;
}

10
kernel/proc/locks.txt Normal file
View File

@@ -0,0 +1,10 @@
Lock hierarchy for process scheduling:
1. proc_tree_lock
2. cpu->lock
3. procgroup->lock
4. proc->lock
5. sq->lock
1. procgroup_tree_lock
2. procgroup->lock

View File

@@ -1,42 +1,130 @@
#include <libk/assert.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <libk/string.h>
#include <mm/liballoc.h>
#include <proc/mutex.h>
#include <proc/proc.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/smp.h>
#include <sys/spin_lock.h>
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
while (atomic_flag_test_and_set_explicit (&mutex->flag, memory_order_acquire))
proc_suspend (proc, &mutex->suspension_q);
void proc_mutexes_cleanup (struct proc* proc) {
spin_lock_ctx_t ctxpg, ctxrs;
/* taken */
spin_lock (&proc->procgroup->lock, &ctxpg);
struct rb_node_link* rnode;
rbtree_first (&proc->procgroup->resource_tree, rnode);
while (rnode) {
struct rb_node_link* next;
rbtree_next (rnode, next);
struct proc_resource* resource = rbtree_entry (rnode, struct proc_resource, resource_tree_link);
rnode = next;
spin_lock (&resource->lock, &ctxrs);
if (resource->type != PR_MUTEX) {
spin_unlock (&resource->lock, &ctxrs);
continue;
}
if (resource->u.mutex.owner == proc && resource->u.mutex.locked) {
spin_unlock (&resource->lock, &ctxrs);
proc_mutex_unlock (proc, &resource->u.mutex);
}
}
spin_unlock (&proc->procgroup->lock, &ctxpg);
}
bool proc_cleanup_resource_mutex (struct proc_resource* resource) {
struct proc_mutex* mutex = &resource->u.mutex;
spin_lock_ctx_t ctxmt, ctxsq;
spin_lock (&mutex->resource->lock, &ctxmt);
spin_lock (&mutex->suspension_q.lock, &ctxsq);
bool reschedule = PROC_NO_RESCHEDULE;
while (mutex->suspension_q.proc_list != NULL) {
struct list_node_link* node = mutex->suspension_q.proc_list;
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
struct proc* suspended_proc = sq_entry->proc;
/* we will relock during resume */
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt);
reschedule = reschedule || proc_sq_resume (suspended_proc, sq_entry);
/* reacquire */
spin_lock (&mutex->resource->lock, &ctxmt);
spin_lock (&mutex->suspension_q.lock, &ctxsq);
}
mutex->locked = false;
mutex->owner = NULL;
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt);
return reschedule;
}
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
spin_lock_ctx_t ctxmt;
spin_lock (&mutex->resource->lock, &ctxmt);
if (!mutex->locked || mutex->owner == proc) {
mutex->locked = true;
mutex->owner = proc;
spin_unlock (&mutex->resource->lock, &ctxmt);
return PROC_NO_RESCHEDULE;
}
return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
}
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
if (mutex->owner != proc)
return false;
spin_lock_ctx_t ctxmt, ctxsq;
atomic_flag_clear_explicit (&mutex->flag, memory_order_release);
spin_lock (&mutex->resource->lock, &ctxmt);
struct proc* resumed_proc;
struct rb_node_link* node;
rbtree_first (&mutex->suspension_q.proc_tree, node);
while (node) {
struct rb_node_link* next;
rbtree_next (node, next);
resumed_proc = rbtree_entry (node, struct proc, suspension_link);
proc_resume (resumed_proc);
node = next;
if (mutex->owner != proc) {
spin_unlock (&mutex->resource->lock, &ctxmt);
return PROC_NO_RESCHEDULE;
}
assert (mutex->suspension_q.proc_tree == NULL);
spin_lock (&mutex->suspension_q.lock, &ctxsq);
return true;
struct list_node_link* node = mutex->suspension_q.proc_list;
if (node) {
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
struct proc* resumed_proc = sq_entry->proc;
mutex->owner = resumed_proc;
mutex->locked = true;
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt);
return proc_sq_resume (resumed_proc, sq_entry);
}
mutex->locked = false;
mutex->owner = NULL;
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
spin_unlock (&mutex->resource->lock, &ctxmt);
return PROC_NEED_RESCHEDULE;
}

View File

@@ -3,17 +3,21 @@
#include <libk/std.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h>
struct proc;
struct proc_resource;
struct proc_mutex {
atomic_flag flag;
struct proc_resource* resource;
bool locked;
struct proc_suspension_q suspension_q;
struct proc* owner;
};
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
bool proc_cleanup_resource_mutex (struct proc_resource* resource);
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex);
void proc_mutexes_cleanup (struct proc* proc);
#endif // _KERNEL_PROC_MUTEX_H

View File

@@ -10,9 +10,9 @@
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <rd/rd.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
@@ -25,18 +25,10 @@
#include <amd64/intr_defs.h>
#endif
#define SCHED_REAP_FREQ 200
/*
* Lock hierachy:
* - proc_tree_lock
* - cpu->lock
* - proc->lock
* - suspension_q->lock
*/
#define SCHED_REAP_FREQ 10
static struct rb_node_link* proc_tree = NULL;
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
static atomic_int sched_cycles = 0;
@@ -46,93 +38,6 @@ static bool proc_check_elf (uint8_t* elf) {
return true;
}
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags) {
struct proc_mapping* mapping = malloc (sizeof (*mapping));
if (mapping == NULL)
return false;
mapping->paddr = start_paddr;
mapping->vaddr = start_vaddr;
mapping->size = pages * PAGE_SIZE;
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
spin_lock (&proc->pd.lock);
list_append (proc->mappings, &mapping->proc_mappings_link);
for (uintptr_t vpage = start_vaddr, ppage = start_paddr; vpage < start_vaddr + pages * PAGE_SIZE;
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
mm_map_page (&proc->pd, ppage, vpage, flags);
}
spin_unlock (&proc->pd.lock);
return true;
}
bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages) {
size_t unmap_size = pages * PAGE_SIZE;
uintptr_t end_vaddr = start_vaddr + unmap_size;
struct list_node_link *mapping_link, *mapping_link_tmp;
bool used_tail_mapping = false;
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
if (tail_mapping == NULL)
return false;
spin_lock (&proc->pd.lock);
list_foreach (proc->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
uintptr_t m_end = mapping->vaddr + mapping->size;
/* check overlap */
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
/* split in the middle */
if ((start_vaddr > mapping->vaddr) && (end_vaddr < m_end)) {
tail_mapping->vaddr = end_vaddr;
tail_mapping->paddr = mapping->paddr + (end_vaddr - mapping->vaddr);
tail_mapping->size = m_end - end_vaddr;
mapping->size = start_vaddr - mapping->vaddr;
list_insert_after (proc->mappings, &mapping->proc_mappings_link,
&tail_mapping->proc_mappings_link);
used_tail_mapping = true;
break;
} else if ((start_vaddr <= mapping->vaddr) && (end_vaddr < m_end)) { /* shrink left */
size_t diff = end_vaddr - mapping->vaddr;
mapping->vaddr += diff;
mapping->paddr += diff;
mapping->size -= diff;
} else if ((start_vaddr > mapping->vaddr) && (end_vaddr >= m_end)) { /* shrink right */
mapping->size = start_vaddr - mapping->vaddr;
} else { /* full overlap */
list_remove (proc->mappings, &mapping->proc_mappings_link);
free (mapping);
}
}
}
if (!used_tail_mapping)
free (tail_mapping);
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
mm_unmap_page (&proc->pd, vpage, 0);
}
spin_unlock (&proc->pd.lock);
return true;
}
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
struct elf_aux aux;
@@ -157,25 +62,37 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
int rid = atomic_fetch_add (&proc->rids, 1);
struct proc_resource_mem_init mem_init = {.pages = blks};
struct proc_resource* r =
proc_create_resource (proc, rid, PR_MEM, RV_PRIVATE, (void*)&mem_init);
if (r == NULL) {
DEBUG ("pmm oom error while loading ELF segments! (tried to alloc %zu blks)\n", blks);
}
uintptr_t p_addr = r->u.mem.paddr;
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
if (phdr->p_flags & PF_W)
pg_flags |= MM_PG_RW;
proc_map (proc, p_addr, v_addr, blks, pg_flags);
uintptr_t p_addr;
procgroup_map (proc->procgroup, v_addr, blks, pg_flags, &p_addr);
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
} break;
case PT_TLS: {
#if defined(__x86_64__)
if (phdr->p_memsz > 0) {
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
size_t tls_size = align_up (phdr->p_memsz, tls_align);
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
proc->procgroup->tls.tls_tmpl_pages = blks;
proc->procgroup->tls.tls_tmpl_size = tls_size;
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
phdr->p_filesz);
proc_init_tls (proc);
}
#endif
} break;
}
}
@@ -183,11 +100,10 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
return aux;
}
static struct proc* proc_spawn_rd (char* name) {
struct proc* proc_spawn_rd (char* name) {
struct rd_file* rd_file = rd_get_file (name);
bool ok = proc_check_elf (rd_file->content);
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
if (!ok)
return NULL;
@@ -195,59 +111,74 @@ static struct proc* proc_spawn_rd (char* name) {
return proc_from_elf (rd_file->content);
}
static void proc_register (struct proc* proc, struct cpu* cpu) {
proc->cpu = cpu;
struct proc* proc_find_pid (int pid) {
spin_lock_ctx_t ctxprtr;
struct proc* proc = NULL;
spin_lock (&cpu->lock);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
spin_lock (&proc_tree_lock, &ctxprtr);
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
spin_unlock (&proc_tree_lock, &ctxprtr);
return proc;
}
void proc_register (struct proc* proc, struct cpu* cpu1) {
spin_lock_ctx_t ctxcpu, ctxprtr;
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
struct cpu* cpu = proc->cpu;
spin_lock (&proc_tree_lock, &ctxprtr);
spin_lock (&cpu->lock, &ctxcpu);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
spin_unlock (&cpu->lock);
rw_spin_write_lock (&proc_tree_lock);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
rw_spin_write_unlock (&proc_tree_lock);
spin_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&cpu->lock, &ctxcpu);
}
/* caller holds cpu->lock */
static struct proc* proc_find_sched (struct cpu* cpu) {
struct rb_node_link* node = NULL;
struct proc* current = cpu->proc_current;
struct proc* proc = NULL;
if (current)
rbtree_next (&current->cpu_run_q_link, node);
if (!node)
rbtree_first (&cpu->proc_run_q, node);
if (!node)
if (!cpu->proc_run_q)
return NULL;
struct rb_node_link* first = node;
struct list_node_link *current, *start;
if (cpu->proc_current)
current = cpu->proc_current->cpu_run_q_link.next;
else
current = cpu->proc_run_q;
if (!current)
current = cpu->proc_run_q;
start = current;
do {
proc = rbtree_entry (node, struct proc, cpu_run_q_link);
struct proc* proc = list_entry (current, struct proc, cpu_run_q_link);
if (atomic_load (&proc->state) == PROC_READY)
return proc;
rbtree_next (node, node);
current = current->next ? current->next : cpu->proc_run_q;
} while (current != start);
if (!node)
rbtree_first (&cpu->proc_run_q, node);
} while (node != first);
return ((atomic_load (&current->state) == PROC_READY) ? current : NULL);
return NULL;
}
static void proc_reap (void) {
struct proc* proc = NULL;
struct list_node_link* reap_list = NULL;
spin_lock_ctx_t ctxprtr;
spin_lock_ctx_t ctxpr;
rw_spin_write_lock (&proc_tree_lock);
spin_lock (&proc_tree_lock, &ctxprtr);
struct rb_node_link* node;
rbtree_first (&proc_tree, node);
@@ -258,17 +189,16 @@ static void proc_reap (void) {
proc = rbtree_entry (node, struct proc, proc_tree_link);
if (atomic_load (&proc->state) == PROC_DEAD) {
spin_lock (&proc->lock);
spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&proc_tree, &proc->proc_tree_link);
spin_unlock (&proc->lock);
list_append (reap_list, &proc->reap_link);
spin_unlock (&proc->lock, &ctxpr);
}
node = next;
}
rw_spin_write_unlock (&proc_tree_lock);
spin_unlock (&proc_tree_lock, &ctxprtr);
struct list_node_link *reap_link, *reap_link_tmp;
list_foreach (reap_list, reap_link, reap_link_tmp) {
@@ -280,7 +210,9 @@ static void proc_reap (void) {
}
}
void proc_sched (void* regs) {
void proc_sched (void) {
spin_lock_ctx_t ctxcpu;
int s_cycles = atomic_fetch_add (&sched_cycles, 1);
if (s_cycles % SCHED_REAP_FREQ == 0)
@@ -289,116 +221,63 @@ void proc_sched (void* regs) {
struct proc* next = NULL;
struct cpu* cpu = thiscpu;
spin_lock (&cpu->lock);
struct proc* prev = cpu->proc_current;
if (prev != NULL) {
spin_lock (&prev->lock);
prev->pdata.regs = *(struct saved_regs*)regs;
spin_unlock (&prev->lock);
}
spin_lock (&cpu->lock, &ctxcpu);
next = proc_find_sched (cpu);
if (next) {
cpu->proc_current = next;
spin_unlock (&cpu->lock);
do_sched (next);
do_sched (next, &cpu->lock, &ctxcpu);
} else {
cpu->proc_current = NULL;
spin_unlock (&cpu->lock);
spin_unlock (&cpu->lock, &ctxcpu);
spin ();
}
}
void proc_kill (struct proc* proc, void* regs) {
void proc_kill (struct proc* proc) {
spin_lock_ctx_t ctxpr, ctxcpu;
struct cpu* cpu = proc->cpu;
spin_lock (&proc->lock);
spin_lock (&proc->lock, &ctxpr);
atomic_store (&proc->state, PROC_DEAD);
spin_unlock (&proc->lock);
proc->cpu = NULL;
spin_unlock (&proc->lock, &ctxpr);
spin_lock (&cpu->lock);
spin_lock (&cpu->lock, &ctxcpu);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
spin_unlock (&cpu->lock);
spin_unlock (&cpu->lock, &ctxcpu);
DEBUG ("killed PID %d\n", proc->pid);
if (cpu == thiscpu)
proc_sched (regs);
else
cpu_request_sched (cpu);
}
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
struct cpu* cpu = proc->cpu;
spin_lock (&proc->lock);
atomic_store (&proc->state, PROC_SUSPENDED);
proc->suspension_q = sq;
spin_unlock (&proc->lock);
/* remove from run q */
spin_lock (&cpu->lock);
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
spin_unlock (&cpu->lock);
spin_lock (&sq->lock);
rbtree_insert (struct proc, &sq->proc_tree, &proc->suspension_link, suspension_link, pid);
spin_unlock (&sq->lock);
cpu_request_sched (cpu);
}
void proc_resume (struct proc* proc) {
struct cpu* cpu = proc->cpu;
struct proc_suspension_q* sq = proc->suspension_q;
spin_lock (&sq->lock);
rbtree_delete (&sq->proc_tree, &proc->suspension_link);
spin_unlock (&sq->lock);
spin_lock (&proc->lock);
proc->suspension_q = NULL;
atomic_store (&proc->state, PROC_READY);
spin_unlock (&proc->lock);
spin_lock (&cpu->lock);
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
spin_unlock (&cpu->lock);
cpu_request_sched (cpu);
}
static void proc_irq_sched (void* arg, void* regs) {
(void)arg;
#if defined(__x86_64__)
struct saved_regs* s_regs = regs;
/* Only schedule, when we came from usermode */
if ((s_regs->cs & 0x03))
proc_sched (regs);
#endif
proc_sched ();
}
void proc_init (void) {
struct proc* init = proc_spawn_rd ("init.exe");
proc_register (init, thiscpu);
#if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_UNSAFE);
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED, IRQ_INTERRUPT_UNSAFE);
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER);
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED);
#endif
do_sched (init);
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
proc_register (spin_proc, thiscpu);
struct proc* init = proc_spawn_rd ("init.exe");
proc_register (init, NULL);
spin_lock_ctx_t ctxcpu;
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
}

View File

@@ -6,6 +6,7 @@
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h>
@@ -16,51 +17,41 @@
#include <amd64/proc.h> /* USTACK_SIZE */
#endif
/* Process is ready to run */
#define PROC_NEED_RESCHEDULE true
#define PROC_NO_RESCHEDULE false
/* process states */
#define PROC_READY 0
/* Process marked garbage collection */
#define PROC_DEAD 1
/* Process is suspended */
#define PROC_SUSPENDED 2
#define PROC_RESOURCES_MAX 1024
/* process flags */
#define PROC_USTK_PREALLOC (1 << 0)
struct cpu;
struct proc_mapping {
struct list_node_link proc_mappings_link;
uintptr_t paddr;
uintptr_t vaddr;
size_t size;
};
struct proc {
int pid;
struct rb_node_link proc_tree_link;
struct rb_node_link cpu_run_q_link;
struct rb_node_link suspension_link;
struct rb_node_link procgroup_memb_tree_link;
struct list_node_link cpu_run_q_link;
struct list_node_link reap_link;
struct list_node_link* mappings; /* pd.lock implicitly protects this field */
struct list_node_link* sq_entries;
struct procgroup* procgroup;
struct proc_platformdata pdata;
struct pd pd;
uint32_t flags;
spin_lock_t lock;
struct cpu* cpu;
atomic_int state;
struct rb_node_link* resource_tree;
atomic_int rids;
struct proc_suspension_q* suspension_q;
uintptr_t uvaddr_argument;
};
void proc_suspend (struct proc* proc, struct proc_suspension_q* sq);
void proc_resume (struct proc* proc);
void proc_sched (void* regs);
void proc_kill (struct proc* proc, void* regs);
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags);
bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages);
void proc_sched (void);
void proc_kill (struct proc* proc);
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf);
void proc_register (struct proc* proc, struct cpu* cpu);
struct proc* proc_find_pid (int pid);
struct proc* proc_spawn_rd (char* name);
void proc_init (void);
#endif // _KERNEL_PROC_PROC_H

218
kernel/proc/procgroup.c Normal file
View File

@@ -0,0 +1,218 @@
#include <libk/rbtree.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
static struct rb_node_link* procgroup_tree = NULL;
static spin_lock_t procgroup_tree_lock = SPIN_LOCK_INIT;
static atomic_int pgids = 0;
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
uintptr_t* out_paddr) {
spin_lock_ctx_t ctxpg;
spin_lock (&procgroup->lock, &ctxpg);
vaddr = (vaddr == 0) ? procgroup->map_base : vaddr;
struct proc_mapping* mapping = malloc (sizeof (*mapping));
if (mapping == NULL) {
spin_unlock (&procgroup->lock, &ctxpg);
return 0;
}
uintptr_t paddr = pmm_alloc (pages);
if (paddr == PMM_ALLOC_ERR) {
free (mapping);
spin_unlock (&procgroup->lock, &ctxpg);
return 0;
}
if (out_paddr != NULL)
*out_paddr = paddr;
mapping->paddr = paddr;
mapping->vaddr = vaddr;
mapping->size = pages * PAGE_SIZE;
procgroup->map_base += pages * PAGE_SIZE;
list_append (procgroup->mappings, &mapping->proc_mappings_link);
for (uintptr_t vpage = vaddr, ppage = paddr; vpage < vaddr + pages * PAGE_SIZE;
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
mm_map_page (&procgroup->pd, ppage, vpage, flags);
}
spin_unlock (&procgroup->lock, &ctxpg);
return vaddr;
}
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
size_t unmap_size = pages * PAGE_SIZE;
uintptr_t end_vaddr = start_vaddr + unmap_size;
struct list_node_link *mapping_link, *mapping_link_tmp;
bool used_tail_mapping = false;
spin_lock_ctx_t ctxpg;
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
if (tail_mapping == NULL)
return false;
spin_lock (&procgroup->lock, &ctxpg);
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
uintptr_t m_start = mapping->vaddr;
uintptr_t m_end = mapping->vaddr + mapping->size;
/* check overlap */
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
uintptr_t free_vstart = (start_vaddr > m_start) ? start_vaddr : m_start;
uintptr_t free_vend = (end_vaddr < m_end) ? end_vaddr : m_end;
size_t free_size = free_vend - free_vstart;
uintptr_t ppage_to_free = mapping->paddr + (free_vstart - m_start);
pmm_free (ppage_to_free, free_size / PAGE_SIZE);
/* split in the middle */
if ((start_vaddr > m_start) && (end_vaddr < m_end)) {
tail_mapping->vaddr = end_vaddr;
tail_mapping->paddr = mapping->paddr + (end_vaddr - m_start);
tail_mapping->size = m_end - end_vaddr;
mapping->size = start_vaddr - m_start;
list_insert_after (procgroup->mappings, &mapping->proc_mappings_link,
&tail_mapping->proc_mappings_link);
used_tail_mapping = true;
break;
} else if ((start_vaddr <= m_start) && (end_vaddr < m_end)) { /* shrink left */
size_t diff = end_vaddr - m_start;
mapping->vaddr += diff;
mapping->paddr += diff;
mapping->size -= diff;
} else if ((start_vaddr > m_start) && (end_vaddr >= m_end)) { /* shrink right */
mapping->size = start_vaddr - m_start;
} else { /* full overlap */
list_remove (procgroup->mappings, &mapping->proc_mappings_link);
free (mapping);
}
}
}
if (!used_tail_mapping)
free (tail_mapping);
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
mm_unmap_page (&procgroup->pd, vpage);
}
spin_unlock (&procgroup->lock, &ctxpg);
return true;
}
struct procgroup* procgroup_create (void) {
spin_lock_ctx_t ctxpgtr;
struct procgroup* procgroup = malloc (sizeof (*procgroup));
if (procgroup == NULL) {
return NULL;
}
procgroup->refs = 0;
procgroup->memb_proc_tree = NULL;
procgroup->lock = SPIN_LOCK_INIT;
procgroup->pgid = atomic_fetch_add (&pgids, 1);
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys ();
procgroup->map_base = PROC_MAP_BASE;
spin_lock (&procgroup_tree_lock, &ctxpgtr);
rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,
procgroup_tree_link, pgid);
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
return procgroup;
}
void procgroup_attach (struct procgroup* procgroup, struct proc* proc) {
spin_lock_ctx_t ctxpg, ctxpr;
spin_lock (&procgroup->lock, &ctxpg);
spin_lock (&proc->lock, &ctxpr);
rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link,
procgroup_memb_tree_link, pid);
atomic_fetch_add (&procgroup->refs, 1);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&procgroup->lock, &ctxpg);
}
void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
spin_lock_ctx_t ctxpg, ctxpr, ctxpgtr;
spin_lock (&procgroup->lock, &ctxpg);
spin_lock (&proc->lock, &ctxpr);
rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link);
int refs = atomic_fetch_sub (&procgroup->refs, 1);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&procgroup->lock, &ctxpg);
if (refs == 1) {
spin_lock (&procgroup_tree_lock, &ctxpgtr);
spin_lock (&procgroup->lock, &ctxpg);
rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link);
spin_unlock (&procgroup->lock, &ctxpg);
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
/* delete resources */
struct rb_node_link* rnode;
rbtree_first (&procgroup->resource_tree, rnode);
while (rnode) {
struct rb_node_link* next;
rbtree_next (rnode, next);
struct proc_resource* resource =
rbtree_entry (rnode, struct proc_resource, resource_tree_link);
rnode = next;
proc_delete_resource (resource);
}
struct list_node_link *mapping_link, *mapping_link_tmp;
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
struct proc_mapping* mapping =
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
pmm_free (mapping->paddr, mapping->size / PAGE_SIZE);
free (mapping);
}
pmm_free (procgroup->pd.cr3_paddr, 1);
free (procgroup->tls.tls_tmpl);
free (procgroup);
}
}

43
kernel/proc/procgroup.h Normal file
View File

@@ -0,0 +1,43 @@
#ifndef _KERNEL_PROC_PROCGROUP_H
#define _KERNEL_PROC_PROCGROUP_H
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/mm.h>
#include <sys/procgroup.h>
struct proc;
struct proc_mapping {
struct list_node_link proc_mappings_link;
uintptr_t paddr;
uintptr_t vaddr;
size_t size;
};
struct procgroup {
int pgid;
struct rb_node_link procgroup_tree_link;
struct rb_node_link* memb_proc_tree;
spin_lock_t lock;
atomic_int refs;
struct rb_node_link* resource_tree;
atomic_int sys_rids;
struct pd pd;
struct list_node_link* mappings;
uintptr_t map_base;
struct procgroup_tls tls;
};
struct procgroup* procgroup_create (void);
void procgroup_attach (struct procgroup* procgroup, struct proc* proc);
void procgroup_detach (struct procgroup* procgroup, struct proc* proc);
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
uintptr_t* out_paddr);
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages);
#endif // _KERNEL_PROC_PROCGROUP_H

View File

@@ -1,4 +1,5 @@
#include <libk/assert.h>
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <libk/string.h>
@@ -6,116 +7,53 @@
#include <mm/pmm.h>
#include <proc/mutex.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
void proc_cleanup_resources (struct proc* proc) {
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid) {
spin_lock_ctx_t ctxpg;
struct proc_resource* resource = NULL;
struct rb_node_link* rnode;
rbtree_first (&proc->resource_tree, rnode);
spin_lock (&procgroup->lock, &ctxpg);
rbtree_find (struct proc_resource, &procgroup->resource_tree, rid, resource, resource_tree_link,
rid);
spin_unlock (&procgroup->lock, &ctxpg);
while (rnode) {
struct rb_node_link* next;
rbtree_next (rnode, next);
resource = rbtree_entry (rnode, struct proc_resource, proc_resource_tree_link);
proc_drop_resource (proc, resource);
rnode = next;
return resource;
}
assert (proc->resource_tree == NULL);
}
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid) {
spin_lock_ctx_t ctxpg;
struct proc_resource* resource;
void proc_drop_resource (struct proc* proc, struct proc_resource* resource) {
DEBUG ("resource=%p, type=%d, rid=%d\n", resource, resource->type, resource->rid);
resource = proc_find_resource (procgroup, rid);
if (resource != NULL)
return resource;
if (atomic_fetch_sub (&resource->refs, 1) == 1) {
spin_lock (&proc->lock);
rbtree_delete (&proc->resource_tree, &resource->proc_resource_tree_link);
spin_unlock (&proc->lock);
resource->ops.cleanup (proc, resource);
free (resource);
}
}
static bool proc_create_resource_mem (struct proc_resource_mem* mem,
struct proc_resource_mem_init* init) {
if (init->pages == 0)
return false;
uintptr_t paddr = pmm_alloc (init->pages);
if (paddr == PMM_ALLOC_ERR)
return false;
mem->paddr = paddr;
mem->pages = init->pages;
return true;
}
static void proc_cleanup_resource_mem (struct proc* proc, struct proc_resource* resource) {
(void)proc;
pmm_free (resource->u.mem.paddr, resource->u.mem.pages);
}
static bool proc_create_resource_mutex (struct proc_mutex* mutex) {
memset (mutex, 0, sizeof (*mutex));
return true;
}
static void proc_cleanup_resource_mutex (struct proc* proc, struct proc_resource* resource) {
struct proc_mutex* mutex = &resource->u.mutex;
proc_mutex_unlock (proc, mutex);
}
struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type, int vis,
void* data) {
/* Check if resource RID already exists */
struct proc_resource* resource_check;
rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource_check,
proc_resource_tree_link, rid);
if (resource_check != NULL)
return NULL;
struct proc_resource* resource = malloc (sizeof (*resource));
resource = malloc (sizeof (*resource));
if (resource == NULL)
return NULL;
memset (resource, 0, sizeof (*resource));
resource->lock = SPIN_LOCK_INIT;
resource->type = type;
resource->refs = 1;
resource->rid = rid;
resource->visibility = vis;
switch (resource->type) {
case PR_MEM: {
struct proc_resource_mem_init* mem_init = data;
proc_create_resource_mem (&resource->u.mem, mem_init);
resource->ops.cleanup = &proc_cleanup_resource_mem;
DEBUG ("PR_MEM resource=%p type=%d rid=%d paddr=%p, pages=%zu\n", resource, resource->type,
resource->rid, resource->u.mem.paddr, resource->u.mem.pages);
} break;
case PR_MUTEX: {
proc_create_resource_mutex (&resource->u.mutex);
resource->ops.cleanup = &proc_cleanup_resource_mutex;
DEBUG ("PR_MUTEX resource=%p, type=%d rid=%d\n", resource, resource->type, resource->rid);
} break;
default: {
free (resource);
return NULL;
} break;
}
resource->u.mutex.resource = resource;
resource->rid = rid;
resource->type = PR_MUTEX;
spin_lock (&proc->lock);
rbtree_insert (struct proc_resource, &proc->resource_tree, &resource->proc_resource_tree_link,
proc_resource_tree_link, rid);
spin_unlock (&proc->lock);
spin_lock (&procgroup->lock, &ctxpg);
rbtree_insert (struct proc_resource, &procgroup->resource_tree, &resource->resource_tree_link,
resource_tree_link, rid);
spin_unlock (&procgroup->lock, &ctxpg);
return resource;
}
bool proc_delete_resource (struct proc_resource* resource) {
bool reschedule = resource->ops.cleanup (resource);
free (resource);
return reschedule;
}

View File

@@ -1,47 +1,32 @@
#ifndef _KERNEL_PROC_RESOURCE_H
#define _KERNEL_PROC_RESOURCE_H
#include <libk/list.h>
#include <libk/rbtree.h>
#include <libk/std.h>
#include <proc/mutex.h>
#include <sync/spin_lock.h>
#define PR_MEM 0
#define PR_MUTEX 1
#define RV_PRIVATE 0
#define RV_PUBLIC 1
struct proc;
struct proc_resource_mem {
uintptr_t paddr;
size_t pages;
};
struct proc_resource_mem_init {
size_t pages;
};
struct procgroup;
struct proc_resource {
int type;
int rid;
int visibility;
spin_lock_t lock;
atomic_int refs;
struct rb_node_link proc_resource_tree_link;
struct rb_node_link resource_tree_link;
union {
struct proc_resource_mem mem;
struct proc_mutex mutex;
} u;
struct {
void (*cleanup) (struct proc* proc, struct proc_resource* resource);
bool (*cleanup) (struct proc_resource* resource);
} ops;
};
struct proc_resource* proc_create_resource (struct proc* proc, int rid, int type, int vis,
void* data);
void proc_drop_resource (struct proc* proc, struct proc_resource* resource);
void proc_cleanup_resources (struct proc* proc);
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid);
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid);
bool proc_delete_resource (struct proc_resource* resource);
#endif // _KERNEL_PROC_RESOURCE_H

View File

@@ -1,7 +1,11 @@
c += proc/proc.c \
proc/resource.c \
proc/mutex.c
proc/mutex.c \
proc/procgroup.c \
proc/suspension_q.c
o += proc/proc.o \
proc/resource.o \
proc/mutex.o
proc/mutex.o \
proc/procgroup.o \
proc/suspension_q.o

111
kernel/proc/suspension_q.c Normal file
View File

@@ -0,0 +1,111 @@
#include <libk/list.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <proc/resource.h>
#include <proc/suspension_q.h>
#include <sync/spin_lock.h>
#include <sys/smp.h>
#include <sys/spin_lock.h>
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
spin_lock_ctx_t* ctxrl) {
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
struct cpu* cpu = proc->cpu;
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
if (!sq_entry) {
spin_unlock (resource_lock, ctxrl);
return PROC_NO_RESCHEDULE;
}
sq_entry->proc = proc;
sq_entry->sq = sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
spin_unlock (resource_lock, ctxrl);
atomic_store (&proc->state, PROC_SUSPENDED);
/* append to sq's list */
list_append (sq->proc_list, &sq_entry->sq_link);
/* append to proc's list */
list_append (proc->sq_entries, &sq_entry->proc_link);
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_sub (&cpu->proc_run_q_count, 1);
if (cpu->proc_current == proc)
cpu->proc_current = NULL;
proc->cpu = NULL;
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
return PROC_NEED_RESCHEDULE;
}
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
struct cpu* cpu = cpu_find_lightest ();
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&cpu->lock, &ctxcpu);
spin_lock (&proc->lock, &ctxpr);
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
proc->cpu = cpu;
if (proc->sq_entries == NULL)
atomic_store (&proc->state, PROC_READY);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
spin_unlock (&sq->lock, &ctxsq);
spin_unlock (&proc->lock, &ctxpr);
spin_unlock (&cpu->lock, &ctxcpu);
free (sq_entry);
return PROC_NEED_RESCHEDULE;
}
void proc_sqs_cleanup (struct proc* proc) {
spin_lock_ctx_t ctxsq, ctxpr;
spin_lock (&proc->lock, &ctxpr);
/* clean suspension queue entries */
struct list_node_link *sq_link, *sq_link_tmp;
list_foreach (proc->sq_entries, sq_link, sq_link_tmp) {
struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link);
struct proc_suspension_q* sq = sq_entry->sq;
spin_lock (&sq->lock, &ctxsq);
/* remove from sq's list */
list_remove (sq->proc_list, &sq_entry->sq_link);
/* remove from proc's list */
list_remove (proc->sq_entries, &sq_entry->proc_link);
spin_unlock (&sq->lock, &ctxsq);
free (sq_entry);
}
spin_unlock (&proc->lock, &ctxpr);
}

View File

@@ -1,12 +1,26 @@
#ifndef _KERNEL_PROC_SUSPENTION_Q_H
#define _KERNEL_PROC_SUSPENTION_Q_H
#include <libk/rbtree.h>
#include <libk/list.h>
#include <sync/spin_lock.h>
struct proc;
struct proc_suspension_q {
struct rb_node_link* proc_tree;
struct list_node_link* proc_list;
spin_lock_t lock;
};
struct proc_sq_entry {
struct list_node_link sq_link;
struct list_node_link proc_link;
struct proc* proc;
struct proc_suspension_q* sq;
};
void proc_sqs_cleanup (struct proc* proc);
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
spin_lock_ctx_t* ctxrl);
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
#endif // _KERNEL_PROC_SUSPENTION_Q_H

View File

@@ -1,59 +0,0 @@
#include <libk/assert.h>
#include <libk/std.h>
#include <sync/rw_spin_lock.h>
#include <sys/debug.h>
#include <sys/spin_lock.h>
#define WRITER_WAIT (1U << 31)
#define READER_MASK (~WRITER_WAIT)
void rw_spin_read_lock (rw_spin_lock_t* rw) {
uint32_t value;
for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed);
if ((value & WRITER_WAIT) == 0) {
if (atomic_compare_exchange_weak_explicit (rw, &value, value + 1, memory_order_acquire,
memory_order_relaxed)) {
return;
}
}
spin_lock_relax ();
}
}
void rw_spin_read_unlock (rw_spin_lock_t* rw) {
uint32_t old = atomic_fetch_sub_explicit (rw, 1, memory_order_release);
assert ((old & READER_MASK) > 0);
}
void rw_spin_write_lock (rw_spin_lock_t* rw) {
uint32_t value;
/* announce writer */
for (;;) {
value = atomic_load_explicit (rw, memory_order_relaxed);
if ((value & WRITER_WAIT) == 0) {
if (atomic_compare_exchange_weak_explicit (rw, &value, (value | WRITER_WAIT),
memory_order_acquire, memory_order_relaxed))
break;
} else
spin_lock_relax ();
}
/* wait for readers */
for (;;) {
value = atomic_load_explicit (rw, memory_order_acquire);
if ((value & READER_MASK) == 0)
return;
spin_lock_relax ();
}
}
void rw_spin_write_unlock (rw_spin_lock_t* rw) {
atomic_store_explicit (rw, 0, memory_order_release);
}

View File

@@ -1,16 +0,0 @@
#ifndef _KERNEL_SYNC_RW_SPIN_LOCK_H
#define _KERNEL_SYNC_RW_SPIN_LOCK_H
#include <libk/std.h>
#include <sync/spin_lock.h>
#define RW_SPIN_LOCK_INIT 0
typedef _Atomic (uint32_t) rw_spin_lock_t;
void rw_spin_read_lock (rw_spin_lock_t* rw);
void rw_spin_read_unlock (rw_spin_lock_t* rw);
void rw_spin_write_lock (rw_spin_lock_t* rw);
void rw_spin_write_unlock (rw_spin_lock_t* rw);
#endif // _KERNEL_SYNC_RW_SPIN_LOCK_H

View File

@@ -3,15 +3,15 @@
#include <sys/irq.h>
#include <sys/spin_lock.h>
void spin_lock (spin_lock_t* sl) {
irq_save ();
void spin_lock (spin_lock_t* sl, spin_lock_ctx_t* ctx) {
irq_save (ctx);
while (atomic_flag_test_and_set_explicit (sl, memory_order_acquire))
spin_lock_relax ();
}
void spin_unlock (spin_lock_t* sl) {
void spin_unlock (spin_lock_t* sl, spin_lock_ctx_t* ctx) {
atomic_flag_clear_explicit (sl, memory_order_release);
irq_restore ();
irq_restore (ctx);
}

View File

@@ -2,12 +2,13 @@
#define _KERNEL_SYNC_SPIN_LOCK_H
#include <libk/std.h>
#include <sys/spin_lock.h>
#define SPIN_LOCK_INIT ATOMIC_FLAG_INIT
typedef atomic_flag spin_lock_t;
void spin_lock (spin_lock_t* sl);
void spin_unlock (spin_lock_t* sl);
void spin_lock (spin_lock_t* sl, spin_lock_ctx_t* ctx);
void spin_unlock (spin_lock_t* sl, spin_lock_ctx_t* ctx);
#endif // _KERNEL_SYNC_SPIN_LOCK_H

View File

@@ -1,5 +1,3 @@
c += sync/spin_lock.c \
sync/rw_spin_lock.c
c += sync/spin_lock.c
o += sync/spin_lock.o \
sync/rw_spin_lock.o
o += sync/spin_lock.o

View File

@@ -1,7 +1,9 @@
#ifndef _KERNEL_SYS_IRQ_H
#define _KERNEL_SYS_IRQ_H
void irq_save (void);
void irq_restore (void);
#include <sys/spin_lock.h>
void irq_save (spin_lock_ctx_t* ctx);
void irq_restore (spin_lock_ctx_t* ctx);
#endif // _KERNEL_SYS_IRQ_H

View File

@@ -2,6 +2,7 @@
#define _KERNEL_SYS_MM_H
#include <libk/std.h>
#include <sync/spin_lock.h>
#if defined(__x86_64__)
#include <amd64/mm.h>
@@ -10,21 +11,19 @@
#define MM_PG_PRESENT (1 << 0)
#define MM_PG_RW (1 << 1)
#define MM_PG_USER (1 << 2)
#define MM_PD_RELOAD (1 << 30)
#define MM_PD_LOCK (1 << 31)
uintptr_t mm_alloc_user_pd_phys (void);
void mm_reload (void);
void mm_kernel_lock (spin_lock_ctx_t* ctx);
void mm_kernel_unlock (spin_lock_ctx_t* ctx);
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags);
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags);
void mm_lock_kernel (void);
void mm_unlock_kernel (void);
bool mm_validate (struct pd* pd, uintptr_t vaddr, uint32_t flags);
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size, uint32_t flags);
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr, uint32_t flags);
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr, uint32_t flags);
void mm_unmap_page (struct pd* pd, uintptr_t vaddr);
void mm_unmap_kernel_page (uintptr_t vaddr);
bool mm_validate (struct pd* pd, uintptr_t vaddr);
bool mm_validate_buffer (struct pd* pd, uintptr_t vaddr, size_t size);
uintptr_t mm_p2v (struct pd* pd, uintptr_t paddr);
uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr);
struct pd* mm_get_kernel_pd (void);
void mm_init (void);
#endif // _KERNEL_SYS_MM_H

View File

@@ -6,6 +6,9 @@
struct proc;
struct proc* proc_from_elf (uint8_t* elf_contents);
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
uintptr_t argument_ptr);
void proc_cleanup (struct proc* proc);
void proc_init_tls (struct proc* proc);
#endif // _KERNEL_SYS_PROC_H

8
kernel/sys/procgroup.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef _KERNEL_SYS_PROCGROUP_H
#define _KERNEL_SYS_PROCGROUP_H
#if defined(__x86_64__)
#include <amd64/procgroup.h>
#endif
#endif // _KERNEL_SYS_PROCGROUP_H

View File

@@ -4,6 +4,6 @@
#include <libk/std.h>
#include <proc/proc.h>
void do_sched (struct proc* proc);
void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu);
#endif // _KERNEL_SYS_SCHED_H

View File

@@ -1,6 +1,12 @@
#ifndef _KERNEL_SYS_SPIN_LOCK_H
#define _KERNEL_SYS_SPIN_LOCK_H
#include <libk/std.h>
#if defined(__x86_64__)
typedef uint64_t spin_lock_ctx_t;
#endif
void spin_lock_relax (void);
#endif // _KERNEL_SYS_SPIN_LOCK_H

View File

@@ -1,168 +1,180 @@
#include <aux/compiler.h>
#include <libk/assert.h>
#include <libk/std.h>
#include <limine/requests.h>
#include <m/status.h>
#include <m/syscall_defs.h>
#include <mm/pmm.h>
#include <proc/mutex.h>
#include <proc/proc.h>
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/proc.h>
#include <syscall/syscall.h>
#define DEFINE_SYSCALL(name) \
int name (struct proc* proc, void* UNUSED regs, uintptr_t UNUSED a1, uintptr_t UNUSED a2, \
uintptr_t UNUSED a3, uintptr_t UNUSED a4, uintptr_t UNUSED a5, uintptr_t UNUSED a6)
uintptr_t name (struct proc* UNUSED proc, void* UNUSED regs, uintptr_t UNUSED a1, \
uintptr_t UNUSED a2, uintptr_t UNUSED a3, uintptr_t UNUSED a4, \
uintptr_t UNUSED a5, uintptr_t UNUSED a6)
/* int proc_quit (void) */
DEFINE_SYSCALL (sys_proc_quit) {
proc_kill (proc, regs);
return SR_OK;
#define SYSRESULT(x) ((uintptr_t)(x))
static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
spin_lock_ctx_t ctxpg;
spin_lock (&proc->procgroup->lock, &ctxpg);
if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size)) {
spin_unlock (&proc->procgroup->lock, &ctxpg);
return NULL;
}
/* int proc_test (void) */
DEFINE_SYSCALL (sys_proc_test) {
uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr);
spin_unlock (&proc->procgroup->lock, &ctxpg);
uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr;
return (void*)out_kvaddr;
}
/* int quit (void) */
DEFINE_SYSCALL (sys_quit) {
proc_kill (proc);
return SYSRESULT (ST_OK);
}
/* int test (void) */
DEFINE_SYSCALL (sys_test) {
char c = (char)a1;
DEBUG ("test syscall! %c\n", c);
return SR_OK;
DEBUG ("test syscall from %d! %c\n", proc->pid, c);
return SYSRESULT (ST_OK);
}
/* int proc_map (uintptr_t paddr, uintptr_t vaddr, size_t pages, uint32_t flags) */
DEFINE_SYSCALL (sys_proc_map) {
uintptr_t paddr = a1;
uintptr_t vaddr = a2;
size_t pages = (size_t)a3;
uint32_t flags = (uint32_t)a4;
/* int map (uintptr_t vaddr, size_t pages, uint32_t flags) */
DEFINE_SYSCALL (sys_map) {
uintptr_t vaddr = a1;
size_t pages = (size_t)a2;
uint32_t flags = (uint32_t)a3;
if (vaddr % PAGE_SIZE != 0)
return -SR_UNALIGNED;
return SYSRESULT (-ST_UNALIGNED);
if (paddr % PAGE_SIZE != 0)
return -SR_UNALIGNED;
bool ok = proc_map (proc, paddr, vaddr, pages, flags);
return ok ? SR_OK : -SR_OOM_ERROR;
return SYSRESULT (procgroup_map (proc->procgroup, vaddr, pages, flags, NULL));
}
/* int proc_unmap (uintptr_t vaddr, size_t pages) */
DEFINE_SYSCALL (sys_proc_unmap) {
/* int unmap (uintptr_t vaddr, size_t pages) */
DEFINE_SYSCALL (sys_unmap) {
uintptr_t vaddr = a1;
size_t pages = (size_t)a2;
if (vaddr % PAGE_SIZE != 0)
return -SR_UNALIGNED;
return SYSRESULT (-ST_UNALIGNED);
bool ok = proc_unmap (proc, vaddr, pages);
return ok ? SR_OK : -SR_OOM_ERROR;
return SYSRESULT (procgroup_unmap (proc->procgroup, vaddr, pages));
}
/* int proc_create_resource_mem (size_t pages, int vis, uintptr_t* out_paddr) */
DEFINE_SYSCALL (sys_proc_create_resource_mem) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
/* int clone (uintptr_t vstack_top, void* entry, void* argument_ptr) */
DEFINE_SYSCALL (sys_clone) {
uintptr_t vstack_top = a1;
uintptr_t entry = a2;
uintptr_t argument_ptr = a3;
size_t pages = (size_t)a1;
int vis = (int)a2;
uintptr_t* out_paddr_buf = (uintptr_t*)a3;
struct proc* new = proc_clone (proc, vstack_top, entry, argument_ptr);
spin_lock (&proc->pd.lock);
uintptr_t out_paddr_buf_paddr = mm_v2p (&proc->pd, (uintptr_t)out_paddr_buf, 0);
if (!mm_validate_buffer (&proc->pd, (uintptr_t)out_paddr_buf, sizeof (uintptr_t), 0)) {
spin_unlock (&proc->pd.lock);
return -SR_BAD_ADDRESS_SPACE;
if (new == NULL) {
return SYSRESULT (-ST_OOM_ERROR);
}
spin_unlock (&proc->pd.lock);
int pid = new->pid;
uintptr_t* out_paddr_buf_vaddr = (uintptr_t*)((uintptr_t)hhdm->offset + out_paddr_buf_paddr);
proc_register (new, NULL);
int rid = atomic_fetch_add (&proc->rids, 1);
struct proc_resource_mem_init mem_init = {.pages = pages};
struct proc_resource* r = proc_create_resource (proc, rid, PR_MEM, vis, &mem_init);
if (r != NULL) {
*out_paddr_buf_vaddr = r->u.mem.paddr;
return r->rid;
} else {
return -SR_OOM_ERROR;
}
return SYSRESULT (pid);
}
/* int proc_create_resource_mutex (int vis) */
DEFINE_SYSCALL (sys_proc_create_resource_mutex) {
int vis = (int)a1;
/* void* argument_ptr (void) */
DEFINE_SYSCALL (sys_argument_ptr) { return proc->uvaddr_argument; }
int rid = atomic_fetch_add (&proc->rids, 1);
struct proc_resource* r = proc_create_resource (proc, rid, PR_MUTEX, vis, NULL);
if (r != NULL)
return r->rid;
else
return -SR_OOM_ERROR;
/* int sched (void) */
DEFINE_SYSCALL (sys_sched) {
proc_sched ();
return SYSRESULT (ST_OK);
}
/* int proc_mutex_lock (int mutex_rid) */
DEFINE_SYSCALL (sys_proc_mutex_lock) {
int rid = (int)a1;
/* int mutex_create (int mutex_rid) */
DEFINE_SYSCALL (sys_mutex_create) {
int mutex_rid = (int)a1;
struct proc_resource* resource;
spin_lock (&proc->lock);
rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource, proc_resource_tree_link,
rid);
spin_unlock (&proc->lock);
struct proc_resource* mutex_resource = proc_create_resource_mutex (proc->procgroup, mutex_rid);
if (resource == NULL)
return -SR_NOT_FOUND;
if (mutex_resource == NULL)
return SYSRESULT (-ST_OOM_ERROR);
proc_mutex_lock (proc, &resource->u.mutex);
return SR_OK;
return SYSRESULT (mutex_resource->rid);
}
DEFINE_SYSCALL (sys_proc_mutex_unlock) {
int rid = (int)a1;
/* int mutex_delete (int mutex_rid) */
DEFINE_SYSCALL (sys_mutex_delete) {
int mutex_rid = (int)a1;
struct proc_resource* resource;
spin_lock (&proc->lock);
rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource, proc_resource_tree_link,
rid);
spin_unlock (&proc->lock);
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
if (resource == NULL)
return -SR_NOT_FOUND;
if (mutex_resource == NULL)
return SYSRESULT (-ST_NOT_FOUND);
return proc_mutex_unlock (proc, &resource->u.mutex) ? SR_OK : -SR_PERMISSION_ERROR;
if (proc_delete_resource (mutex_resource) == PROC_NEED_RESCHEDULE)
proc_sched ();
return SYSRESULT (ST_OK);
}
/* int proc_drop_resource (int rid) */
DEFINE_SYSCALL (sys_proc_drop_resource) {
int rid = (int)a1;
/* int mutex_lock (int mutex_rid) */
DEFINE_SYSCALL (sys_mutex_lock) {
int mutex_rid = (int)a1;
struct proc_resource* resource;
spin_lock (&proc->lock);
rbtree_find (struct proc_resource, &proc->resource_tree, rid, resource, proc_resource_tree_link,
rid);
spin_unlock (&proc->lock);
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
if (resource == NULL)
return -SR_NOT_FOUND;
if (mutex_resource == NULL)
return SYSRESULT (-ST_NOT_FOUND);
proc_drop_resource (proc, resource);
if (proc_mutex_lock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
proc_sched ();
return SR_OK;
return SYSRESULT (ST_OK);
}
/* int mutex_unlock (int mutex_rid) */
DEFINE_SYSCALL (sys_mutex_unlock) {
int mutex_rid = (int)a1;
struct proc_resource* mutex_resource = proc_find_resource (proc->procgroup, mutex_rid);
if (mutex_resource == NULL)
return SYSRESULT (-ST_NOT_FOUND);
if (proc_mutex_unlock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
proc_sched ();
return SYSRESULT (ST_OK);
}
static syscall_handler_func_t handler_table[] = {
[SYS_PROC_QUIT] = &sys_proc_quit,
[SYS_PROC_TEST] = &sys_proc_test,
[SYS_PROC_MAP] = &sys_proc_map,
[SYS_PROC_UNMAP] = &sys_proc_unmap,
[SYS_PROC_CREATE_RESOURCE_MEM] = &sys_proc_create_resource_mem,
[SYS_PROC_DROP_RESOURCE] = &sys_proc_drop_resource,
[SYS_PROC_CREATE_RESOURCE_MUTEX] = &sys_proc_create_resource_mutex,
[SYS_PROC_MUTEX_LOCK] = &sys_proc_mutex_lock,
[SYS_PROC_MUTEX_UNLOCK] = &sys_proc_mutex_unlock,
[SYS_QUIT] = &sys_quit,
[SYS_TEST] = &sys_test,
[SYS_MAP] = &sys_map,
[SYS_UNMAP] = &sys_unmap,
[SYS_CLONE] = &sys_clone,
[SYS_ARGUMENT_PTR] = &sys_argument_ptr,
[SYS_SCHED] = &sys_sched,
[SYS_MUTEX_CREATE] = &sys_mutex_create,
[SYS_MUTEX_DELETE] = &sys_mutex_delete,
[SYS_MUTEX_LOCK] = &sys_mutex_lock,
[SYS_MUTEX_UNLOCK] = &sys_mutex_unlock,
};
syscall_handler_func_t syscall_find_handler (int syscall_num) {

View File

@@ -4,8 +4,9 @@
#include <libk/std.h>
#include <proc/proc.h>
typedef int (*syscall_handler_func_t) (struct proc* proc, void* regs, uintptr_t a1, uintptr_t a2,
uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6);
typedef uintptr_t (*syscall_handler_func_t) (struct proc* proc, void* regs, uintptr_t a1,
uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5,
uintptr_t a6);
syscall_handler_func_t syscall_find_handler (int syscall_num);

1
libmsl/alloc/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

386
libmsl/alloc/liballoc.c Normal file
View File

@@ -0,0 +1,386 @@
/* liballoc breaks when optimized too aggressively, for eg. clang's -Oz */
#pragma clang optimize off
#include <alloc/liballoc.h>
#include <m/system.h>
#define LIBALLOC_MUTEX 500
void liballoc_init (void) { mutex_create (LIBALLOC_MUTEX); }
void liballoc_deinit (void) { mutex_delete (LIBALLOC_MUTEX); }
int liballoc_lock (void) { return mutex_lock (LIBALLOC_MUTEX); }
int liballoc_unlock (void) { return mutex_unlock (LIBALLOC_MUTEX); }
void* liballoc_alloc (int pages) { return map (0, pages, MAP_FLAGS | MAP_RW); }
int liballoc_free (void* ptr, int pages) { return unmap ((uintptr_t)ptr, pages); }
/** Durand's Ridiculously Amazing Super Duper Memory functions. */
// #define DEBUG
#define LIBALLOC_MAGIC 0xc001c0de
#define MAXCOMPLETE 5
#define MAXEXP 32
#define MINEXP 8
#define MODE_BEST 0
#define MODE_INSTANT 1
#define MODE MODE_BEST
struct boundary_tag* l_freePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
int l_completePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
static int l_initialized = 0; //< Flag to indicate initialization.
static int l_pageSize = PAGE_SIZE; //< Individual page size
static int l_pageCount = 16; //< Minimum number of pages to allocate.
// *********** HELPER FUNCTIONS *******************************
/** Returns the exponent required to manage 'size' amount of memory.
*
* Returns n where 2^n <= size < 2^(n+1)
*/
static inline int getexp (unsigned int size) {
if (size < (1 << MINEXP)) {
return -1; // Smaller than the quantum.
}
int shift = MINEXP;
while (shift < MAXEXP) {
if ((1 << shift) > size)
break;
shift += 1;
}
return shift - 1;
}
static void* liballoc_memset (void* s, int c, size_t n) {
size_t i;
for (i = 0; i < n; i++)
((char*)s)[i] = c;
return s;
}
static void* liballoc_memcpy (void* s1, const void* s2, size_t n) {
char* cdest;
char* csrc;
unsigned int* ldest = (unsigned int*)s1;
unsigned int* lsrc = (unsigned int*)s2;
while (n >= sizeof (unsigned int)) {
*ldest++ = *lsrc++;
n -= sizeof (unsigned int);
}
cdest = (char*)ldest;
csrc = (char*)lsrc;
while (n > 0) {
*cdest++ = *csrc++;
n -= 1;
}
return s1;
}
static inline void insert_tag (struct boundary_tag* tag, int index) {
int realIndex;
if (index < 0) {
realIndex = getexp (tag->real_size - sizeof (struct boundary_tag));
if (realIndex < MINEXP)
realIndex = MINEXP;
} else
realIndex = index;
tag->index = realIndex;
if (l_freePages[realIndex] != NULL) {
l_freePages[realIndex]->prev = tag;
tag->next = l_freePages[realIndex];
}
l_freePages[realIndex] = tag;
}
static inline void remove_tag (struct boundary_tag* tag) {
if (l_freePages[tag->index] == tag)
l_freePages[tag->index] = tag->next;
if (tag->prev != NULL)
tag->prev->next = tag->next;
if (tag->next != NULL)
tag->next->prev = tag->prev;
tag->next = NULL;
tag->prev = NULL;
tag->index = -1;
}
static inline struct boundary_tag* melt_left (struct boundary_tag* tag) {
struct boundary_tag* left = tag->split_left;
left->real_size += tag->real_size;
left->split_right = tag->split_right;
if (tag->split_right != NULL)
tag->split_right->split_left = left;
return left;
}
static inline struct boundary_tag* absorb_right (struct boundary_tag* tag) {
struct boundary_tag* right = tag->split_right;
remove_tag (right); // Remove right from free pages.
tag->real_size += right->real_size;
tag->split_right = right->split_right;
if (right->split_right != NULL)
right->split_right->split_left = tag;
return tag;
}
static inline struct boundary_tag* split_tag (struct boundary_tag* tag) {
unsigned int remainder = tag->real_size - sizeof (struct boundary_tag) - tag->size;
struct boundary_tag* new_tag =
(struct boundary_tag*)((uintptr_t)tag + sizeof (struct boundary_tag) + tag->size);
new_tag->magic = LIBALLOC_MAGIC;
new_tag->real_size = remainder;
new_tag->next = NULL;
new_tag->prev = NULL;
new_tag->split_left = tag;
new_tag->split_right = tag->split_right;
if (new_tag->split_right != NULL)
new_tag->split_right->split_left = new_tag;
tag->split_right = new_tag;
tag->real_size -= new_tag->real_size;
insert_tag (new_tag, -1);
return new_tag;
}
// ***************************************************************
static struct boundary_tag* allocate_new_tag (unsigned int size) {
unsigned int pages;
unsigned int usage;
struct boundary_tag* tag;
// This is how much space is required.
usage = size + sizeof (struct boundary_tag);
// Perfect amount of space
pages = usage / l_pageSize;
if ((usage % l_pageSize) != 0)
pages += 1;
// Make sure it's >= the minimum size.
if (pages < (unsigned int)l_pageCount)
pages = l_pageCount;
tag = (struct boundary_tag*)liballoc_alloc (pages);
if (tag == NULL)
return NULL; // uh oh, we ran out of memory.
tag->magic = LIBALLOC_MAGIC;
tag->size = size;
tag->real_size = pages * l_pageSize;
tag->index = -1;
tag->next = NULL;
tag->prev = NULL;
tag->split_left = NULL;
tag->split_right = NULL;
return tag;
}
void* malloc (size_t size) {
int index;
void* ptr;
struct boundary_tag* tag = NULL;
liballoc_lock ();
if (l_initialized == 0) {
for (index = 0; index < MAXEXP; index++) {
l_freePages[index] = NULL;
l_completePages[index] = 0;
}
l_initialized = 1;
}
index = getexp (size) + MODE;
if (index < MINEXP)
index = MINEXP;
// Find one big enough.
tag = l_freePages[index]; // Start at the front of the list.
while (tag != NULL) {
// If there's enough space in this tag.
if ((tag->real_size - sizeof (struct boundary_tag)) >= (size + sizeof (struct boundary_tag))) {
break;
}
tag = tag->next;
}
// No page found. Make one.
if (tag == NULL) {
if ((tag = allocate_new_tag (size)) == NULL) {
liballoc_unlock ();
return NULL;
}
index = getexp (tag->real_size - sizeof (struct boundary_tag));
} else {
remove_tag (tag);
if ((tag->split_left == NULL) && (tag->split_right == NULL))
l_completePages[index] -= 1;
}
// We have a free page. Remove it from the free pages list.
tag->size = size;
// Removed... see if we can re-use the excess space.
unsigned int remainder =
tag->real_size - size - sizeof (struct boundary_tag) * 2; // Support a new tag + remainder
if (((int)(remainder) > 0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/) {
int childIndex = getexp (remainder);
if (childIndex >= 0) {
struct boundary_tag* new_tag = split_tag (tag);
(void)new_tag;
}
}
ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag));
liballoc_unlock ();
return ptr;
}
void free (void* ptr) {
int index;
struct boundary_tag* tag;
if (ptr == NULL)
return;
liballoc_lock ();
tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag));
if (tag->magic != LIBALLOC_MAGIC) {
liballoc_unlock (); // release the lock
return;
}
// MELT LEFT...
while ((tag->split_left != NULL) && (tag->split_left->index >= 0)) {
tag = melt_left (tag);
remove_tag (tag);
}
// MELT RIGHT...
while ((tag->split_right != NULL) && (tag->split_right->index >= 0)) {
tag = absorb_right (tag);
}
// Where is it going back to?
index = getexp (tag->real_size - sizeof (struct boundary_tag));
if (index < MINEXP)
index = MINEXP;
// A whole, empty block?
if ((tag->split_left == NULL) && (tag->split_right == NULL)) {
if (l_completePages[index] == MAXCOMPLETE) {
// Too many standing by to keep. Free this one.
unsigned int pages = tag->real_size / l_pageSize;
if ((tag->real_size % l_pageSize) != 0)
pages += 1;
if (pages < (unsigned int)l_pageCount)
pages = l_pageCount;
liballoc_free (tag, pages);
liballoc_unlock ();
return;
}
l_completePages[index] += 1; // Increase the count of complete pages.
}
// ..........
insert_tag (tag, index);
liballoc_unlock ();
}
void* calloc (size_t nobj, size_t size) {
int real_size;
void* p;
real_size = nobj * size;
p = malloc (real_size);
liballoc_memset (p, 0, real_size);
return p;
}
void* realloc (void* p, size_t size) {
void* ptr;
struct boundary_tag* tag;
int real_size;
if (size == 0) {
free (p);
return NULL;
}
if (p == NULL)
return malloc (size);
if (&liballoc_lock != NULL)
liballoc_lock (); // lockit
tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag));
real_size = tag->size;
if (&liballoc_unlock != NULL)
liballoc_unlock ();
if ((size_t)real_size > size)
real_size = size;
ptr = malloc (size);
liballoc_memcpy (ptr, p, real_size);
free (p);
return ptr;
}

94
libmsl/alloc/liballoc.h Normal file
View File

@@ -0,0 +1,94 @@
#ifndef _LIBALLOC_H
#define _LIBALLOC_H
#include <stddef.h>
#include <stdint.h>
#define _ALLOC_SKIP_DEFINE
// If we are told to not define our own size_t, then we
// skip the define.
#ifndef _ALLOC_SKIP_DEFINE
#ifndef _HAVE_SIZE_T
#define _HAVE_SIZE_T
typedef unsigned int size_t;
#endif
#ifndef NULL
#define NULL 0
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/** This is a boundary tag which is prepended to the
* page or section of a page which we have allocated. It is
* used to identify valid memory blocks that the
* application is trying to free.
*/
struct boundary_tag {
unsigned int magic; //< It's a kind of ...
unsigned int size; //< Requested size.
unsigned int real_size; //< Actual size.
int index; //< Location in the page table.
struct boundary_tag* split_left; //< Linked-list info for broken pages.
struct boundary_tag* split_right; //< The same.
struct boundary_tag* next; //< Linked list info.
struct boundary_tag* prev; //< Linked list info.
};
/** This function is supposed to lock the memory data structures. It
* could be as simple as disabling interrupts or acquiring a spinlock.
* It's up to you to decide.
*
* \return 0 if the lock was acquired successfully. Anything else is
* failure.
*/
extern int liballoc_lock (void);
/** This function unlocks what was previously locked by the liballoc_lock
* function. If it disabled interrupts, it enables interrupts. If it
* had acquiried a spinlock, it releases the spinlock. etc.
*
* \return 0 if the lock was successfully released.
*/
extern int liballoc_unlock (void);
/** This is the hook into the local system which allocates pages. It
* accepts an integer parameter which is the number of pages
* required. The page size was set up in the liballoc_init function.
*
* \return NULL if the pages were not allocated.
* \return A pointer to the allocated memory.
*/
extern void* liballoc_alloc (int pages);
/** This frees previously allocated memory. The void* parameter passed
* to the function is the exact same value returned from a previous
* liballoc_alloc call.
*
* The integer value is the number of pages to free.
*
* \return 0 if the memory was successfully freed.
*/
extern int liballoc_free (void* ptr, int pages);
void* malloc (size_t); //< The standard function.
void* realloc (void*, size_t); //< The standard function.
void* calloc (size_t, size_t); //< The standard function.
void free (void*); //< The standard function.
void liballoc_init (void);
void liballoc_deinit (void);
#ifdef __cplusplus
}
#endif
#endif

3
libmsl/alloc/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += alloc/liballoc.c
o += alloc/liballoc.o

View File

@@ -2,7 +2,7 @@
#include <stddef.h>
#include <stdint.h>
int msl_amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
uintptr_t amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
uintptr_t a5, uintptr_t a6) {
uint64_t result;
__asm__ volatile ("movq %[a4], %%r10\n"
@@ -13,5 +13,5 @@ int msl_amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3
: "a"(syscall_num), "D"(a1), "S"(a2),
"d"(a3), [a4] "r"(a4), [a5] "r"(a5), [a6] "r"(a6)
: "r10", "r8", "r9", "r11", "rcx", "cc", "memory");
return (int)result;
return result;
}

View File

@@ -3,7 +3,7 @@
#include <stdint.h>
int msl_amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
uintptr_t amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4,
uintptr_t a5, uintptr_t a6);
#endif // _LIBMSL_AMD64_SYSCALL_H

View File

@@ -1,4 +1,5 @@
#include <m/proc.h>
#include <alloc/liballoc.h>
#include <m/system.h>
#include <stdint.h>
extern volatile uint8_t __bss_start[];
@@ -6,7 +7,7 @@ extern volatile uint8_t __bss_end[];
extern void app_main (void);
static void msl_clear_bss (void) {
static void clear_bss (void) {
uint8_t* p = (uint8_t*)__bss_start;
while (p < __bss_end) {
*p++ = 0;
@@ -14,9 +15,9 @@ static void msl_clear_bss (void) {
}
void __premain (void) {
msl_clear_bss ();
clear_bss ();
liballoc_init ();
app_main ();
m_proc_quit ();
liballoc_deinit ();
quit ();
}

View File

@@ -1,37 +0,0 @@
#include <m/syscall.h>
#include <m/syscall_defs.h>
#include <stddef.h>
#include <stdint.h>
int m_proc_quit (void) { return m_syscall (SYS_PROC_QUIT, 0, 0, 0, 0, 0, 0); }
int m_proc_test (char c) { return m_syscall (SYS_PROC_TEST, (uintptr_t)c, 0, 0, 0, 0, 0); }
int m_proc_map (uintptr_t paddr, uintptr_t vaddr, size_t pages, uint32_t flags) {
return m_syscall (SYS_PROC_MAP, paddr, vaddr, (uintptr_t)pages, (uintptr_t)flags, 0, 0);
}
int m_proc_unmap (uintptr_t vaddr, size_t pages) {
return m_syscall (SYS_PROC_UNMAP, vaddr, (uintptr_t)pages, 0, 0, 0, 0);
}
int m_proc_create_resource_mem (size_t pages, int vis, uintptr_t* out_paddr) {
return m_syscall (SYS_PROC_CREATE_RESOURCE_MEM, (uintptr_t)pages, (uintptr_t)vis,
(uintptr_t)out_paddr, 0, 0, 0);
}
int m_proc_drop_resource (int rid) {
return m_syscall (SYS_PROC_DROP_RESOURCE, (uintptr_t)rid, 0, 0, 0, 0, 0);
}
int m_proc_create_resource_mutex (int vis) {
return m_syscall (SYS_PROC_CREATE_RESOURCE_MUTEX, (uintptr_t)vis, 0, 0, 0, 0, 0);
}
int m_proc_mutex_lock (int mutex_rid) {
return m_syscall (SYS_PROC_MUTEX_LOCK, (uintptr_t)mutex_rid, 0, 0, 0, 0, 0);
}
int m_proc_mutex_unlock (int mutex_rid) {
return m_syscall (SYS_PROC_MUTEX_UNLOCK, (uintptr_t)mutex_rid, 0, 0, 0, 0, 0);
}

View File

@@ -1,29 +0,0 @@
#ifndef _LIBMSL_M_PROC_H
#define _LIBMSL_M_PROC_H
#if defined(__x86_64__)
#define M_PROC_MAP_BASE 0x0000700000000000
#define M_PAGE_SIZE 4096
#endif
#define PM_PRESENT (1 << 0)
#define PM_RW (1 << 1)
#define PM_USER (1 << 2)
#define RV_PRIVATE 0
#define RV_PUBLIC 1
#include <stddef.h>
#include <stdint.h>
int m_proc_quit (void);
int m_proc_test (char c);
int m_proc_map (uintptr_t paddr, uintptr_t vaddr, size_t pages, uint32_t flags);
int m_proc_unmap (uintptr_t vaddr, size_t pages);
int m_proc_create_resource_mem (size_t pages, int vis, uintptr_t* out_paddr);
int m_proc_drop_resource (int rid);
int m_proc_create_resource_mutex (int vis);
int m_proc_mutex_lock (int mutex_rid);
int m_proc_mutex_unlock (int mutex_rid);
#endif // _LIBMSL_M_PROC_H

View File

@@ -1,3 +1,3 @@
c += m/proc.c
c += m/system.c
o += m/proc.o
o += m/system.o

View File

@@ -5,7 +5,7 @@
#if defined(__x86_64__)
#include <amd64/syscall.h>
#define m_syscall msl_amd64_syscall
#define syscall amd64_syscall
#endif
#endif // _LIBMSL_M_SYSCALL_H

36
libmsl/m/system.c Normal file
View File

@@ -0,0 +1,36 @@
#include <m/syscall.h>
#include <m/system.h>
#include <stddef.h>
#include <stdint.h>
#define do_syscall1(id, a1, a2, a3, a4, a5, a6, ...) \
syscall (id, (uintptr_t)a1, (uintptr_t)a2, (uintptr_t)a3, (uintptr_t)a4, (uintptr_t)a5, \
(uintptr_t)a6)
#define do_syscall(...) do_syscall1 (__VA_ARGS__, 0, 0, 0, 0, 0, 0)
int quit (void) { return do_syscall (SYS_QUIT, 0); }
int test (char c) { return do_syscall (SYS_TEST, c); }
int sched (void) { return do_syscall (SYS_SCHED, 0); }
void* map (uintptr_t vaddr, size_t pages, uint32_t flags) {
return (void*)do_syscall (SYS_MAP, vaddr, pages, flags);
}
int unmap (uintptr_t vaddr, size_t pages) { return do_syscall (SYS_UNMAP, vaddr, pages); }
int clone (uintptr_t vstack_top, void (*entry) (void), void* argument_ptr) {
return do_syscall (SYS_CLONE, vstack_top, entry, argument_ptr);
}
int mutex_create (int mutex_rid) { return do_syscall (SYS_MUTEX_CREATE, mutex_rid); }
int mutex_delete (int mutex_rid) { return do_syscall (SYS_MUTEX_DELETE, mutex_rid); }
int mutex_lock (int mutex_rid) { return do_syscall (SYS_MUTEX_LOCK, mutex_rid); }
int mutex_unlock (int mutex_rid) { return do_syscall (SYS_MUTEX_UNLOCK, mutex_rid); }
void* argument_ptr (void) { return (void*)do_syscall (SYS_ARGUMENT_PTR, 0); }

28
libmsl/m/system.h Normal file
View File

@@ -0,0 +1,28 @@
#ifndef _LIBMSL_M_SYSTEM_H
#define _LIBMSL_M_SYSTEM_H
#include <stddef.h>
#include <stdint.h>
#if defined(__x86_64__)
#define PAGE_SIZE 4096
#endif
#define MAP_PRESENT (1 << 0)
#define MAP_RW (1 << 1)
#define MAP_USER (1 << 2)
#define MAP_FLAGS (MAP_PRESENT | MAP_USER)
int quit (void);
int test (char c);
int sched (void);
void* map (uintptr_t vaddr, size_t pages, uint32_t flags);
int unmap (uintptr_t vaddr, size_t pages);
int clone (uintptr_t vstack_top, void (*entry) (void), void* argument_ptr);
int mutex_create (int mutex_rid);
int mutex_delete (int mutex_rid);
int mutex_lock (int mutex_rid);
int mutex_unlock (int mutex_rid);
void* argument_ptr (void);
#endif // _LIBMSL_M_SYSTEM_H

1
libmsl/proc/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

6
libmsl/proc/local.h Normal file
View File

@@ -0,0 +1,6 @@
#ifndef _LIBMSL_PROC_TLS_H
#define _LIBMSL_PROC_TLS_H
#define LOCAL __thread
#endif // _LIBMSL_PROC_TLS_H

19
libmsl/proc/proc.c Normal file
View File

@@ -0,0 +1,19 @@
#include <alloc/liballoc.h>
#include <m/status.h>
#include <m/system.h>
#include <proc/proc.h>
#include <stddef.h>
#include <stdint.h>
int process_spawn (process_func_t func, void* argument_ptr) {
void* stack = malloc (PROC_STACK_SIZE);
if (stack == NULL)
return -ST_OOM_ERROR;
uintptr_t top = (uintptr_t)stack + PROC_STACK_SIZE;
return clone (top, func, argument_ptr);
}
int process_quit (void) { return quit (); }
void* process_argument (void) { return argument_ptr (); }

14
libmsl/proc/proc.h Normal file
View File

@@ -0,0 +1,14 @@
#ifndef _LIBMSL_PROC_PROC_H
#define _LIBMSL_PROC_PROC_H
#include <m/system.h>
#define PROC_STACK_SIZE 256 * PAGE_SIZE
typedef void (*process_func_t) (void);
int process_spawn (process_func_t func, void* argument_ptr);
int process_quit (void);
void* process_argument (void);
#endif // _LIBMSL_PROC_PROC_H

3
libmsl/proc/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += proc/proc.c
o += proc/proc.o

View File

@@ -2,3 +2,5 @@ include $(platform)/src.mk
include init/src.mk
include m/src.mk
include string/src.mk
include alloc/src.mk
include proc/src.mk

View File

@@ -1,4 +1,4 @@
apps := init
apps := init spin
all_apps:
@for d in $(apps); do make -C $$d platform=$(platform) all; done

View File

@@ -7,4 +7,4 @@ clean_libmsl:
format_libmsl:
make -C libmsl platform=$(platform) format
.PHONY: all_libmsl clean_libmsl
.PHONY: all_libmsl clean_libmsl format_libmsl

View File

@@ -1,7 +1,12 @@
site_name: MOP3 documentation
theme:
name: readthedocs
name: material
highlightjs: true
repo_url: https://git.kamkow1lair.pl/kamkow1/mop3.git
markdown_extensions:
- attr_list
- md_in_html
- pymdownx.blocks.caption

2
spin/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.o
*.exe

1
spin/Makefile Normal file
View File

@@ -0,0 +1 @@
include ../make/user.mk

1
spin/app.mk Normal file
View File

@@ -0,0 +1 @@
app := spin.exe

4
spin/spin.c Normal file
View File

@@ -0,0 +1,4 @@
void app_main (void) {
for (;;)
;
}

3
spin/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += spin.c
o += spin.o