Rewrite init app in C, introduce MSL (MOP3 System Library)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s

This commit is contained in:
2026-01-04 01:11:31 +01:00
parent 2c954a9ca9
commit e077d322f4
57 changed files with 214 additions and 120 deletions

View File

@@ -1,6 +1,7 @@
platform ?= amd64 platform ?= amd64
include apps.mk include make/apps.mk
include kernel.mk include make/kernel.mk
include dist.mk include make/dist.mk
include docs.mk include make/docs.mk
include make/libc.mk

View File

@@ -31,10 +31,14 @@ SECTIONS {
*(.data .data.*) *(.data .data.*)
} :data } :data
__bss_start = .;
.bss : { .bss : {
*(.bss .bss.*) *(.bss .bss.*)
} :data } :data
__bss_end = .;
/DISCARD/ : { /DISCARD/ : {
*(.eh_frame*) *(.eh_frame*)
*(.note .note.*) *(.note .note.*)

View File

@@ -8,6 +8,7 @@ else
make -B all_kernel make -B all_kernel
fi fi
make -B all_libmsl
make -B all_apps make -B all_apps
make -B all_dist make -B all_dist
./aux/limine_iso_amd64.sh ./aux/limine_iso_amd64.sh

View File

@@ -5,8 +5,9 @@ cflags += -nostdinc \
-std=c11 \ -std=c11 \
-pedantic \ -pedantic \
-Wall \ -Wall \
-Wextra \ -Wextra
-mcmodel=kernel
cflags += -isystem ../include
ldflags += -ffreestanding \ ldflags += -ffreestanding \
-nostdlib \ -nostdlib \

View File

@@ -1,5 +1,5 @@
#ifndef _KERNEL_SYSCALL_DEFS_H #ifndef _M_SYSCALL_DEFS_H
#define _KERNEL_SYSCALL_DEFS_H #define _M_SYSCALL_DEFS_H
#define SYS_PROC_QUIT 1 #define SYS_PROC_QUIT 1
#define SYS_PROC_TEST 2 #define SYS_PROC_TEST 2
@@ -7,4 +7,4 @@
#define SR_OK 0 #define SR_OK 0
#define SR_SYSCALL_NOT_FOUND 1 #define SR_SYSCALL_NOT_FOUND 1
#endif // _KERNEL_SYSCALL_DEFS_H #endif // _M_SYSCALL_DEFS_H

View File

@@ -1 +1 @@
include ../user.mk include ../make/user.mk

View File

@@ -1,12 +0,0 @@
.global _start
_start:
pushq $123
addq $8, %rsp
movq $2, %rax
syscall
movq $1, %rax
syscall
jmp _start

9
init/init.c Normal file
View File

@@ -0,0 +1,9 @@
#include <limits.h>
#include <m/proc.h>
void app_main (void) {
m_proc_test ();
m_proc_test ();
m_proc_test ();
m_proc_test ();
}

View File

@@ -235,7 +235,7 @@ void amd64_lapic_tick (uint32_t tick) { amd64_lapic_write (LAPIC_TIMICT, tick);
* @return amount of ticsk in a given period * @return amount of ticsk in a given period
*/ */
static uint32_t amd64_lapic_calibrate (uint32_t us) { static uint32_t amd64_lapic_calibrate (uint32_t us) {
amd64_lapic_write (LAPIC_DCR, 0x03); amd64_lapic_write (LAPIC_DCR, 0x0B);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16)); amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
@@ -256,7 +256,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
* Initial tick count * Initial tick count
*/ */
static void amd64_lapic_start (uint32_t ticks) { static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, 0x03); amd64_lapic_write (LAPIC_DCR, 0x0B);
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17)); amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));

View File

@@ -35,7 +35,6 @@ ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
*/ */
void bootmain (void) { void bootmain (void) {
struct cpu* bsp_cpu = cpu_make (); struct cpu* bsp_cpu = cpu_make ();
amd64_thiscpu_set_init ();
amd64_init (bsp_cpu, false); amd64_init (bsp_cpu, false);
syscall_init (); syscall_init ();
@@ -52,10 +51,6 @@ void bootmain (void) {
smp_init (); smp_init ();
/* busy wait for cpus to come online */
for (volatile int i = 0; i < INT_MAX; i++)
;
mm_init2 (); mm_init2 ();
proc_init (); proc_init ();

View File

@@ -28,8 +28,8 @@
static bool hpet_32bits = 1; static bool hpet_32bits = 1;
/// Physical address for HPET MMIO /// Physical address for HPET MMIO
static uintptr_t hpet_paddr; static uintptr_t hpet_paddr;
/// HPET nanoseconds for conversion /// HPET period in femtoseconds
static uint64_t hpet_clock_nano; static uint64_t hpet_period_fs;
/// Lock, which protects concurrent access. See \ref amd64/smp.c /// Lock, which protects concurrent access. See \ref amd64/smp.c
static spin_lock_t hpet_lock = SPIN_LOCK_INIT; static spin_lock_t hpet_lock = SPIN_LOCK_INIT;
@@ -54,32 +54,22 @@ static void amd64_hpet_write (uint32_t reg, uint64_t value) {
/// Read current value of \ref HPET_MCVR register. /// Read current value of \ref HPET_MCVR register.
static uint64_t amd64_hpet_timestamp (void) { return amd64_hpet_read (HPET_MCVR); } static uint64_t amd64_hpet_timestamp (void) { return amd64_hpet_read (HPET_MCVR); }
/**
* @brief Get current HPET timestamp in nanoseconds
*
* @param lock
* if true, hold \ref hpet_lock
*/
uint64_t amd64_hpet_current_nano (bool lock) {
if (lock)
spin_lock (&hpet_lock);
uint64_t t = amd64_hpet_timestamp () * hpet_clock_nano;
if (lock)
spin_unlock (&hpet_lock);
return t;
}
/// Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being held. /// Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being held.
void amd64_hpet_sleep_micro (uint64_t us) { void amd64_hpet_sleep_micro (uint64_t us) {
spin_lock (&hpet_lock); spin_lock (&hpet_lock);
uint64_t start = amd64_hpet_timestamp (); uint64_t start = amd64_hpet_timestamp ();
uint64_t conv = us * 1000; uint64_t target_fs = us * 1000000000ULL;
while (((amd64_hpet_timestamp () - start) * hpet_clock_nano) < conv)
for (;;) {
uint64_t current = amd64_hpet_timestamp ();
uint64_t dt = current - start;
if ((dt * hpet_period_fs) >= target_fs)
break;
__asm__ volatile ("pause" ::: "memory"); __asm__ volatile ("pause" ::: "memory");
}
spin_unlock (&hpet_lock); spin_unlock (&hpet_lock);
} }
@@ -114,7 +104,5 @@ void amd64_hpet_init (void) {
gcidr = (((uint64_t)high << 32) | low); gcidr = (((uint64_t)high << 32) | low);
} }
uint64_t period_fs = (gcidr >> 32); hpet_period_fs = (gcidr >> 32);
hpet_clock_nano = period_fs / 1000000;
} }

View File

@@ -3,7 +3,6 @@
#include <libk/std.h> #include <libk/std.h>
uint64_t amd64_hpet_current_nano (bool lock);
void amd64_hpet_sleep_micro (uint64_t us); void amd64_hpet_sleep_micro (uint64_t us);
void amd64_hpet_init (void); void amd64_hpet_init (void);

View File

@@ -65,8 +65,6 @@ static void amd64_gdt_init (struct cpu* cpu) {
"movw %%ax, %%ds\n" "movw %%ax, %%ds\n"
"movw %%ax, %%es\n" "movw %%ax, %%es\n"
"movw %%ax, %%ss\n" "movw %%ax, %%ss\n"
"movw %%ax, %%fs\n"
"movw %%ax, %%gs\n"
: :
: [kcode] "i"(GDT_KCODE), [kdata] "i"(GDT_KDATA) : [kcode] "i"(GDT_KCODE), [kdata] "i"(GDT_KDATA)
: "rax", "memory"); : "rax", "memory");

View File

@@ -175,8 +175,6 @@ static void amd64_intr_exception (struct saved_regs* regs) {
void amd64_intr_handler (void* stack_ptr) { void amd64_intr_handler (void* stack_ptr) {
struct saved_regs* regs = stack_ptr; struct saved_regs* regs = stack_ptr;
amd64_load_kernel_cr3 ();
if (regs->trap <= 31) { if (regs->trap <= 31) {
amd64_intr_exception (regs); amd64_intr_exception (regs);
} else { } else {
@@ -219,10 +217,6 @@ static void amd64_irq_restore_flags (uint64_t rflags) {
/// Save current interrupt state /// Save current interrupt state
void irq_save (void) { void irq_save (void) {
/* before smp init. */
if (thiscpu == NULL)
return;
int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel); int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 0) if (prev == 0)
thiscpu->irq_ctx.rflags = amd64_irq_save_flags (); thiscpu->irq_ctx.rflags = amd64_irq_save_flags ();
@@ -230,10 +224,6 @@ void irq_save (void) {
/// Restore interrupt state /// Restore interrupt state
void irq_restore (void) { void irq_restore (void) {
/* before smp init. */
if (thiscpu == NULL)
return;
int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel); int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 1) if (prev == 1)
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags); amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);

View File

@@ -22,9 +22,6 @@
;\ ;\
movq %rsp, %rdi; \ movq %rsp, %rdi; \
;\ ;\
movq %cr3, %rax; \
pushq %rax; \
;\
movq %rsp, %rbp; \ movq %rsp, %rbp; \
;\ ;\
subq $8, %rsp; \ subq $8, %rsp; \
@@ -34,9 +31,6 @@
;\ ;\
movq %rbp, %rsp; \ movq %rbp, %rsp; \
;\ ;\
popq %rax; \
movq %rax, %cr3; \
;\
pop_regs; \ pop_regs; \
addq $16, %rsp; \ addq $16, %rsp; \
;\ ;\

View File

@@ -9,6 +9,6 @@
void do_sched (struct proc* proc) { void do_sched (struct proc* proc) {
thiscpu->tss.rsp0 = proc->pdata.kernel_stack; thiscpu->tss.rsp0 = proc->pdata.kernel_stack;
thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack; thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack;
amd64_wrmsr (MSR_GS_BASE, (uint64_t)proc->pdata.gs_base);
amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd.cr3_paddr); amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->pd.cr3_paddr);
} }

View File

@@ -13,38 +13,31 @@
#include <sys/syscall.h> #include <sys/syscall.h>
/// Cpu ID counter /// Cpu ID counter
static uint32_t cpu_counter = 0; static atomic_uint cpu_counter = 0;
/// Lock for \ref cpu_counter
static spin_lock_t cpu_counter_lock = SPIN_LOCK_INIT;
/// The CPUs /// The CPUs
static struct cpu cpus[CPUS_MAX]; static struct cpu cpus[CPUS_MAX];
static bool thiscpu_init = false; static atomic_int cpu_init_count;
void amd64_thiscpu_set_init (void) { thiscpu_init = true; }
/// Allocate a CPU structure /// Allocate a CPU structure
struct cpu* cpu_make (void) { struct cpu* cpu_make (void) {
spin_lock (&cpu_counter_lock); int id = atomic_fetch_add (&cpu_counter, 1);
int id = cpu_counter++;
spin_unlock (&cpu_counter_lock);
struct cpu* cpu = &cpus[id]; struct cpu* cpu = &cpus[id];
memset (cpu, 0, sizeof (*cpu)); memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT; cpu->lock = SPIN_LOCK_INIT;
cpu->id = id; cpu->id = id;
cpu->self = cpu;
amd64_wrmsr (MSR_SHADOW_GS_BASE, (uint64_t)cpu); amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu);
return cpu; return cpu;
} }
struct cpu* cpu_get (void) { struct cpu* cpu_get (void) {
if (!thiscpu_init) struct cpu* ptr = (struct cpu*)amd64_rdmsr (MSR_GS_BASE);
return NULL; return ptr;
return (struct cpu*)amd64_rdmsr (MSR_SHADOW_GS_BASE);
} }
/// Bootstrap code for non-BSP CPUs /// Bootstrap code for non-BSP CPUs
@@ -56,27 +49,36 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_init (cpu, true); /* gdt + idt */ amd64_init (cpu, true); /* gdt + idt */
syscall_init (); syscall_init ();
thiscpu->lapic_ticks = amd64_lapic_init (2500); thiscpu->lapic_ticks = amd64_lapic_init (10000);
amd64_lapic_tick (thiscpu->lapic_ticks); amd64_lapic_tick (thiscpu->lapic_ticks);
DEBUG ("CPU %u is online!\n", thiscpu->id); DEBUG ("CPU %u is online!\n", thiscpu->id);
__asm__ volatile ("sti"); __asm__ volatile ("sti");
atomic_fetch_sub (&cpu_init_count, 1);
for (;;) for (;;)
; ;
} }
/// Initialize SMP subsystem for AMD64. Start AP CPUs /// Initialize SMP subsystem for AMD64. Start AP CPUs
void smp_init (void) { void smp_init (void) {
thiscpu->lapic_ticks = amd64_lapic_init (2500); thiscpu->lapic_ticks = amd64_lapic_init (10000);
struct limine_mp_response* mp = limine_mp_request.response; struct limine_mp_response* mp = limine_mp_request.response;
cpu_init_count = mp->cpu_count - 1; /* Don't include BSP */
for (size_t i = 0; i < mp->cpu_count; i++) { for (size_t i = 0; i < mp->cpu_count; i++) {
if (mp->cpus[i]->lapic_id != thiscpu->id) { if (mp->cpus[i]->lapic_id != thiscpu->id) {
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id); DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
mp->cpus[i]->goto_address = &amd64_smp_bootstrap; mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
} }
} }
while (atomic_load (&cpu_init_count) > 0)
;
DEBUG ("All CPUs are online\n");
} }

View File

@@ -13,6 +13,7 @@ struct cpu {
/* for syscall instruction */ /* for syscall instruction */
uintptr_t syscall_user_stack; uintptr_t syscall_user_stack;
uintptr_t syscall_kernel_stack; uintptr_t syscall_kernel_stack;
struct cpu* self;
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16); volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16); volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16); volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);

View File

@@ -6,7 +6,7 @@
#include <proc/proc.h> #include <proc/proc.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <sys/smp.h> #include <sys/smp.h>
#include <syscall/defs.h> #include <m/syscall_defs.h>
#include <syscall/syscall.h> #include <syscall/syscall.h>
extern void amd64_syscall_entry (void); extern void amd64_syscall_entry (void);
@@ -14,8 +14,6 @@ extern void amd64_syscall_entry (void);
int amd64_syscall_dispatch (void* stack_ptr) { int amd64_syscall_dispatch (void* stack_ptr) {
struct saved_regs* regs = stack_ptr; struct saved_regs* regs = stack_ptr;
amd64_load_kernel_cr3 ();
int syscall_num = regs->rax; int syscall_num = regs->rax;
syscall_handler_func_t func = syscall_find_handler (syscall_num); syscall_handler_func_t func = syscall_find_handler (syscall_num);

View File

@@ -5,7 +5,6 @@
.global amd64_syscall_entry .global amd64_syscall_entry
amd64_syscall_entry: amd64_syscall_entry:
cli cli
swapgs
movq %rsp, %gs:0 movq %rsp, %gs:0
movq %gs:8, %rsp movq %gs:8, %rsp
@@ -20,15 +19,10 @@ amd64_syscall_entry:
push_regs push_regs
swapgs
cld cld
movq %rsp, %rdi movq %rsp, %rdi
movq %cr3, %rax
pushq %rax
movq %rsp, %rbp movq %rsp, %rbp
subq $8, %rsp subq $8, %rsp
@@ -38,20 +32,9 @@ amd64_syscall_entry:
movq %rbp, %rsp movq %rbp, %rsp
popq %rax
movq %rax, %cr3
pop_regs pop_regs
swapgs addq $56, %rsp
addq $16, %rsp
popq %rcx
addq $8, %rsp
popq %r11
addq $16, %rsp
movq %gs:0, %rsp movq %gs:0, %rsp
swapgs
sysretq sysretq

View File

@@ -8,7 +8,7 @@ cflags += -nostdinc \
-Wextra \ -Wextra \
-mcmodel=kernel -mcmodel=kernel
cflags += -isystem . -isystem c_headers/include cflags += -isystem . -isystem ../include
cflags += -DPRINTF_INCLUDE_CONFIG_H=1 \ cflags += -DPRINTF_INCLUDE_CONFIG_H=1 \
-D_ALLOC_SKIP_DEFINE -D_ALLOC_SKIP_DEFINE

View File

@@ -7,11 +7,7 @@ void debugprintf (const char* fmt, ...);
#define DEBUG(fmt, ...) \ #define DEBUG(fmt, ...) \
do { \ do { \
if (thiscpu != NULL) { \
debugprintf ("(CPU %u) %s:%d: " fmt, thiscpu->id, __func__, __LINE__, ##__VA_ARGS__); \ debugprintf ("(CPU %u) %s:%d: " fmt, thiscpu->id, __func__, __LINE__, ##__VA_ARGS__); \
} else { \
debugprintf ("%s:%d: " fmt, __func__, __LINE__, ##__VA_ARGS__); \
} \
} while (0) } while (0)
#endif // _KERNEL_SYS_DEBUG_H #endif // _KERNEL_SYS_DEBUG_H

View File

@@ -2,7 +2,7 @@
#include <libk/std.h> #include <libk/std.h>
#include <proc/proc.h> #include <proc/proc.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <syscall/defs.h> #include <m/syscall_defs.h>
#include <syscall/syscall.h> #include <syscall/syscall.h>
#define DEFINE_SYSCALL(name) \ #define DEFINE_SYSCALL(name) \

28
libmsl/Makefile Normal file
View File

@@ -0,0 +1,28 @@
cc := clang
o :=
c :=
cflags := -isystem .
buildtype ?= release
include src.mk
include ../generic/flags.mk
include ../$(platform)/flags.mk
all: build/libmsl.a
build/libmsl.a: $(o)
llvm-ar rcs $@ $^
%.o: %.c
$(cc) -c -o $@ $(cflags) $<
%.o: %.S
$(cc) -c -o $@ $(cflags) $<
clean:
rm -f $(o) build/libmsl.a
format:
clang-format -i $$(git ls-files '*.c' '*.h')
.PHONY: all clean format

1
libmsl/amd64/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

8
libmsl/amd64/_start.S Normal file
View File

@@ -0,0 +1,8 @@
.global _start
_start:
xorq %rbp, %rbp
movq %rsp, %rbp
andq $-16, %rsp
subq $8, %rsp
callq __premain

6
libmsl/amd64/src.mk Normal file
View File

@@ -0,0 +1,6 @@
c += amd64/syscall.c
S += amd64/_start.S
o += amd64/_start.o \
amd64/syscall.o

23
libmsl/amd64/syscall.c Normal file
View File

@@ -0,0 +1,23 @@
#include <stddef.h>
#include <stdint.h>
#include <amd64/syscall.h>
int msl_amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6) {
uint64_t result;
__asm__ volatile (
"movq %1, %%rax\n"
"movq %2, %%rdi\n"
"movq %3, %%rsi\n"
"movq %4, %%rdx\n"
"movq %5, %%r10\n"
"movq %6, %%r8\n"
"movq %7, %%r9\n"
"syscall\n"
"movq %%rax, %0\n"
: "=r"(result)
: "r"((uint64_t)syscall_num), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), "r"(a6)
: "memory", "cc", "rcx", "r11"
);
return (int)result;
}

8
libmsl/amd64/syscall.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef _LIBMSL_AMD64_SYSCALL_H
#define _LIBMSL_AMD64_SYSCALL_H
#include <stdint.h>
int msl_amd64_syscall (int syscall_num, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6);
#endif // _LIBMSL_AMD64_SYSCALL_H

1
libmsl/init/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

22
libmsl/init/__premain.c Normal file
View File

@@ -0,0 +1,22 @@
#include <stdint.h>
#include <m/proc.h>
extern volatile uint8_t __bss_start[];
extern volatile uint8_t __bss_end[];
extern void app_main (void);
static void msl_clear_bss (void) {
uint8_t *p = (uint8_t*)__bss_start;
while (p < __bss_end) {
*p++ = 0;
}
}
void __premain (void) {
msl_clear_bss ();
app_main ();
m_proc_quit ();
}

3
libmsl/init/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += init/__premain.c
o += init/__premain.o

1
libmsl/m/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

10
libmsl/m/proc.c Normal file
View File

@@ -0,0 +1,10 @@
#include <m/syscall.h>
#include <m/syscall_defs.h>
int m_proc_quit (void) {
return m_syscall (SYS_PROC_QUIT, 0, 0, 0, 0, 0, 0);
}
int m_proc_test (void) {
return m_syscall (SYS_PROC_TEST, 0, 0, 0, 0, 0, 0);
}

8
libmsl/m/proc.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef _LIBMSL_M_PROC_H
#define _LIBMSL_M_PROC_H
int m_proc_quit (void);
int m_proc_test (void);
#endif // _LIBMSL_M_PROC_H

3
libmsl/m/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += m/proc.c
o += m/proc.o

11
libmsl/m/syscall.h Normal file
View File

@@ -0,0 +1,11 @@
#ifndef _LIBMSL_M_SYSCALL_H
#define _LIBMSL_M_SYSCALL_H
#include <m/syscall_defs.h>
#if defined(__x86_64__)
#include <amd64/syscall.h>
#define m_syscall msl_amd64_syscall
#endif
#endif // _LIBMSL_M_SYSCALL_H

3
libmsl/src.mk Normal file
View File

@@ -0,0 +1,3 @@
include $(platform)/src.mk
include init/src.mk
include m/src.mk

10
make/libc.mk Normal file
View File

@@ -0,0 +1,10 @@
all_libmsl:
make -C libmsl platform=$(platform) all
clean_libmsl:
make -C libmsl platform=$(platform) clean
format_libmsl:
make -C libmsl platform=$(platform) format
.PHONY: all_libmsl clean_libmsl

View File

@@ -1,8 +1,8 @@
cc := clang cc := clang
o := o :=
c := c :=
ldflags := ldflags := -L ../libmsl/build -l:libmsl.a
cflags := cflags := -isystem ../libmsl
include src.mk include src.mk
include app.mk include app.mk