Compare commits
4 Commits
e78bfb9984
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 38e26a9c12 | |||
| 124aa12f5b | |||
| d2f5c032d9 | |||
| 73e42588fb |
@@ -18,20 +18,20 @@ SECTIONS {
|
||||
*(.ltext .ltext.*)
|
||||
} :text
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
.rodata : {
|
||||
*(.rodata .rodata.*)
|
||||
} :rodata
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
.data : {
|
||||
*(.data .data.*)
|
||||
*(.ldata .ldata.*)
|
||||
} :data
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
__bss_start = .;
|
||||
|
||||
@@ -42,7 +42,7 @@ SECTIONS {
|
||||
|
||||
__bss_end = .;
|
||||
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
. = ALIGN(0x1000);
|
||||
|
||||
__tdata_start = .;
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
cpu: model=p4_prescott_celeron_336
|
||||
cpu: model=p4_prescott_celeron_336, ips=200000000
|
||||
|
||||
memory: guest=4096 host=2048
|
||||
|
||||
@@ -9,6 +9,7 @@ ata0: enabled=1
|
||||
ata0-master: type=cdrom, path=mop3.iso, status=inserted
|
||||
com1: enabled=1, mode=file, dev=bochs-com1.txt
|
||||
pci: enabled=1, chipset=i440fx
|
||||
clock: sync=realtime, time0=local
|
||||
|
||||
boot: cdrom
|
||||
|
||||
|
||||
@@ -11,5 +11,6 @@
|
||||
#define SYS_MUTEX_DELETE 8
|
||||
#define SYS_MUTEX_LOCK 9
|
||||
#define SYS_MUTEX_UNLOCK 10
|
||||
#define SYS_ARGUMENT_PTR 11
|
||||
|
||||
#endif // _M_SYSCALL_DEFS_H
|
||||
|
||||
14
init/init.c
14
init/init.c
@@ -7,10 +7,12 @@
|
||||
|
||||
#define MUTEX 2000
|
||||
|
||||
LOCAL char letter = 'c';
|
||||
LOCAL volatile char letter = 'c';
|
||||
|
||||
void app_proc1 (void) {
|
||||
letter = 'b';
|
||||
void app_proc (void) {
|
||||
char arg_letter = (char)(uintptr_t)argument_ptr ();
|
||||
|
||||
letter = arg_letter;
|
||||
|
||||
for (;;) {
|
||||
mutex_lock (MUTEX);
|
||||
@@ -29,7 +31,9 @@ void app_main (void) {
|
||||
|
||||
letter = 'a';
|
||||
|
||||
process_spawn (&app_proc1);
|
||||
process_spawn (&app_proc, (void*)'a');
|
||||
process_spawn (&app_proc, (void*)'b');
|
||||
process_spawn (&app_proc, (void*)'c');
|
||||
|
||||
for (;;) {
|
||||
mutex_lock (MUTEX);
|
||||
@@ -37,8 +41,6 @@ void app_main (void) {
|
||||
for (int i = 0; i < 3; i++)
|
||||
test (letter);
|
||||
|
||||
process_quit ();
|
||||
|
||||
mutex_unlock (MUTEX);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,7 +58,8 @@ struct proc* proc_from_elf (uint8_t* elf_contents) {
|
||||
return proc;
|
||||
}
|
||||
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry) {
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
|
||||
uintptr_t argument_ptr) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
spin_lock_ctx_t ctxprt;
|
||||
|
||||
@@ -88,6 +89,8 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t ent
|
||||
proc->pdata.regs.cs = GDT_UCODE | 0x03;
|
||||
proc->pdata.regs.rip = (uint64_t)entry;
|
||||
|
||||
proc->uvaddr_argument = argument_ptr;
|
||||
|
||||
proc_init_tls (proc);
|
||||
|
||||
return proc;
|
||||
@@ -109,22 +112,25 @@ void proc_cleanup (struct proc* proc) {
|
||||
void proc_init_tls (struct proc* proc) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
if (proc->procgroup->tls.tls_tmpl == NULL)
|
||||
return;
|
||||
|
||||
size_t tls_size = proc->procgroup->tls.tls_tmpl_size;
|
||||
size_t pages = proc->procgroup->tls.tls_tmpl_pages;
|
||||
|
||||
uintptr_t tls_paddr;
|
||||
uint32_t flags = MM_PG_USER | MM_PG_PRESENT | MM_PG_RW;
|
||||
|
||||
uintptr_t tls_vaddr =
|
||||
procgroup_map (proc->procgroup, 0, proc->procgroup->tls.tls_tmpl_pages, flags, &tls_paddr);
|
||||
uintptr_t tls_vaddr = procgroup_map (proc->procgroup, 0, pages, flags, &tls_paddr);
|
||||
|
||||
uintptr_t k_tls_addr = (uintptr_t)hhdm->offset + tls_paddr;
|
||||
|
||||
memset ((void*)k_tls_addr, 0, pages * PAGE_SIZE);
|
||||
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
|
||||
|
||||
uintptr_t ktcb = k_tls_addr + tls_size;
|
||||
uintptr_t utcb = tls_vaddr + tls_size;
|
||||
|
||||
memset ((void*)k_tls_addr, 0, tls_size);
|
||||
memcpy ((void*)k_tls_addr, (void*)proc->procgroup->tls.tls_tmpl, tls_size);
|
||||
|
||||
*(uintptr_t*)ktcb = utcb;
|
||||
|
||||
proc->pdata.fs_base = utcb;
|
||||
|
||||
@@ -44,13 +44,15 @@ void proc_mutexes_cleanup (struct proc* proc) {
|
||||
spin_unlock (&proc->procgroup->lock, &ctxpg);
|
||||
}
|
||||
|
||||
void proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
bool proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
struct proc_mutex* mutex = &resource->u.mutex;
|
||||
spin_lock_ctx_t ctxmt, ctxsq;
|
||||
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
|
||||
bool reschedule = PROC_NO_RESCHEDULE;
|
||||
|
||||
while (mutex->suspension_q.proc_list != NULL) {
|
||||
struct list_node_link* node = mutex->suspension_q.proc_list;
|
||||
struct proc_sq_entry* sq_entry = list_entry (node, struct proc_sq_entry, sq_link);
|
||||
@@ -60,7 +62,7 @@ void proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
proc_sq_resume (suspended_proc, sq_entry);
|
||||
reschedule = reschedule || proc_sq_resume (suspended_proc, sq_entry);
|
||||
|
||||
/* reacquire */
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
@@ -72,23 +74,23 @@ void proc_cleanup_resource_mutex (struct proc_resource* resource) {
|
||||
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
return reschedule;
|
||||
}
|
||||
|
||||
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_lock_ctx_t ctxmt;
|
||||
|
||||
for (;;) {
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
spin_lock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
if (!mutex->locked || mutex->owner == proc) {
|
||||
mutex->locked = true;
|
||||
mutex->owner = proc;
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return;
|
||||
}
|
||||
|
||||
proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
|
||||
if (!mutex->locked || mutex->owner == proc) {
|
||||
mutex->locked = true;
|
||||
mutex->owner = proc;
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt);
|
||||
}
|
||||
|
||||
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
@@ -98,7 +100,7 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
|
||||
if (mutex->owner != proc) {
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
return false;
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
@@ -115,9 +117,7 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
proc_sq_resume (resumed_proc, sq_entry);
|
||||
|
||||
return true;
|
||||
return proc_sq_resume (resumed_proc, sq_entry);
|
||||
}
|
||||
|
||||
mutex->locked = false;
|
||||
@@ -126,5 +126,5 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
return true;
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
@@ -15,8 +15,8 @@ struct proc_mutex {
|
||||
struct proc* owner;
|
||||
};
|
||||
|
||||
void proc_cleanup_resource_mutex (struct proc_resource* resource);
|
||||
void proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
|
||||
bool proc_cleanup_resource_mutex (struct proc_resource* resource);
|
||||
bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex);
|
||||
bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex);
|
||||
void proc_mutexes_cleanup (struct proc* proc);
|
||||
|
||||
|
||||
@@ -75,21 +75,23 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||
} break;
|
||||
case PT_TLS: {
|
||||
#if defined(__x86_64__)
|
||||
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
|
||||
size_t tls_size = phdr->p_memsz;
|
||||
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
|
||||
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
|
||||
proc->procgroup->tls.tls_tmpl_pages = blks;
|
||||
proc->procgroup->tls.tls_tmpl_size = tls_size;
|
||||
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
|
||||
if (phdr->p_memsz > 0) {
|
||||
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
|
||||
size_t tls_size = align_up (phdr->p_memsz, tls_align);
|
||||
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
|
||||
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
|
||||
proc->procgroup->tls.tls_tmpl_pages = blks;
|
||||
proc->procgroup->tls.tls_tmpl_size = tls_size;
|
||||
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
|
||||
|
||||
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
|
||||
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
|
||||
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
|
||||
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
|
||||
|
||||
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
|
||||
phdr->p_filesz);
|
||||
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
|
||||
phdr->p_filesz);
|
||||
|
||||
proc_init_tls (proc);
|
||||
proc_init_tls (proc);
|
||||
}
|
||||
#endif
|
||||
} break;
|
||||
}
|
||||
@@ -276,6 +278,6 @@ void proc_init (void) {
|
||||
proc_register (init, NULL);
|
||||
|
||||
spin_lock_ctx_t ctxcpu;
|
||||
spin_lock (&init->cpu->lock, &ctxcpu);
|
||||
do_sched (init, &init->cpu->lock, &ctxcpu);
|
||||
spin_lock (&spin_proc->cpu->lock, &ctxcpu);
|
||||
do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu);
|
||||
}
|
||||
|
||||
@@ -17,6 +17,9 @@
|
||||
#include <amd64/proc.h> /* USTACK_SIZE */
|
||||
#endif
|
||||
|
||||
#define PROC_NEED_RESCHEDULE true
|
||||
#define PROC_NO_RESCHEDULE false
|
||||
|
||||
/* process states */
|
||||
#define PROC_READY 0
|
||||
#define PROC_DEAD 1
|
||||
@@ -40,6 +43,7 @@ struct proc {
|
||||
spin_lock_t lock;
|
||||
struct cpu* cpu;
|
||||
atomic_int state;
|
||||
uintptr_t uvaddr_argument;
|
||||
};
|
||||
|
||||
void proc_sched (void);
|
||||
|
||||
@@ -51,7 +51,9 @@ struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, i
|
||||
return resource;
|
||||
}
|
||||
|
||||
void proc_delete_resource (struct proc_resource* resource) {
|
||||
resource->ops.cleanup (resource);
|
||||
bool proc_delete_resource (struct proc_resource* resource) {
|
||||
bool reschedule = resource->ops.cleanup (resource);
|
||||
free (resource);
|
||||
|
||||
return reschedule;
|
||||
}
|
||||
|
||||
@@ -21,12 +21,12 @@ struct proc_resource {
|
||||
struct proc_mutex mutex;
|
||||
} u;
|
||||
struct {
|
||||
void (*cleanup) (struct proc_resource* resource);
|
||||
bool (*cleanup) (struct proc_resource* resource);
|
||||
} ops;
|
||||
};
|
||||
|
||||
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid);
|
||||
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid);
|
||||
void proc_delete_resource (struct proc_resource* resource);
|
||||
bool proc_delete_resource (struct proc_resource* resource);
|
||||
|
||||
#endif // _KERNEL_PROC_RESOURCE_H
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#include <sys/smp.h>
|
||||
#include <sys/spin_lock.h>
|
||||
|
||||
void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
spin_lock_ctx_t* ctxrl) {
|
||||
spin_lock_ctx_t ctxpr, ctxcpu, ctxsq;
|
||||
struct cpu* cpu = proc->cpu;
|
||||
@@ -16,7 +16,7 @@ void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock
|
||||
struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry));
|
||||
if (!sq_entry) {
|
||||
spin_unlock (resource_lock, ctxrl);
|
||||
return;
|
||||
return PROC_NO_RESCHEDULE;
|
||||
}
|
||||
|
||||
sq_entry->proc = proc;
|
||||
@@ -48,10 +48,10 @@ void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
|
||||
cpu_request_sched (cpu);
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
|
||||
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
|
||||
spin_lock_ctx_t ctxsq, ctxpr, ctxcpu;
|
||||
struct cpu* cpu = cpu_find_lightest ();
|
||||
struct proc_suspension_q* sq = sq_entry->sq;
|
||||
@@ -80,7 +80,7 @@ void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry) {
|
||||
|
||||
free (sq_entry);
|
||||
|
||||
cpu_request_sched (cpu);
|
||||
return PROC_NEED_RESCHEDULE;
|
||||
}
|
||||
|
||||
void proc_sqs_cleanup (struct proc* proc) {
|
||||
|
||||
@@ -19,8 +19,8 @@ struct proc_sq_entry {
|
||||
};
|
||||
|
||||
void proc_sqs_cleanup (struct proc* proc);
|
||||
void proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock,
|
||||
spin_lock_ctx_t* ctxrl);
|
||||
void proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
|
||||
bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry);
|
||||
|
||||
#endif // _KERNEL_PROC_SUSPENTION_Q_H
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
struct proc;
|
||||
|
||||
struct proc* proc_from_elf (uint8_t* elf_contents);
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry);
|
||||
struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry,
|
||||
uintptr_t argument_ptr);
|
||||
void proc_cleanup (struct proc* proc);
|
||||
void proc_init_tls (struct proc* proc);
|
||||
|
||||
|
||||
@@ -78,12 +78,13 @@ DEFINE_SYSCALL (sys_unmap) {
|
||||
return SYSRESULT (procgroup_unmap (proc->procgroup, vaddr, pages));
|
||||
}
|
||||
|
||||
/* int clone (uintptr_t vstack_top, void* entry) */
|
||||
/* int clone (uintptr_t vstack_top, void* entry, void* argument_ptr) */
|
||||
DEFINE_SYSCALL (sys_clone) {
|
||||
uintptr_t vstack_top = a1;
|
||||
uintptr_t entry = a2;
|
||||
uintptr_t argument_ptr = a3;
|
||||
|
||||
struct proc* new = proc_clone (proc, vstack_top, entry);
|
||||
struct proc* new = proc_clone (proc, vstack_top, entry, argument_ptr);
|
||||
|
||||
if (new == NULL) {
|
||||
return SYSRESULT (-ST_OOM_ERROR);
|
||||
@@ -96,6 +97,9 @@ DEFINE_SYSCALL (sys_clone) {
|
||||
return SYSRESULT (pid);
|
||||
}
|
||||
|
||||
/* void* argument_ptr (void) */
|
||||
DEFINE_SYSCALL (sys_argument_ptr) { return proc->uvaddr_argument; }
|
||||
|
||||
/* int sched (void) */
|
||||
DEFINE_SYSCALL (sys_sched) {
|
||||
proc_sched ();
|
||||
@@ -123,7 +127,8 @@ DEFINE_SYSCALL (sys_mutex_delete) {
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
proc_delete_resource (mutex_resource);
|
||||
if (proc_delete_resource (mutex_resource) == PROC_NEED_RESCHEDULE)
|
||||
proc_sched ();
|
||||
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
@@ -137,7 +142,8 @@ DEFINE_SYSCALL (sys_mutex_lock) {
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
proc_mutex_lock (proc, &mutex_resource->u.mutex);
|
||||
if (proc_mutex_lock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
|
||||
proc_sched ();
|
||||
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
@@ -151,8 +157,10 @@ DEFINE_SYSCALL (sys_mutex_unlock) {
|
||||
if (mutex_resource == NULL)
|
||||
return SYSRESULT (-ST_NOT_FOUND);
|
||||
|
||||
return SYSRESULT (proc_mutex_unlock (proc, &mutex_resource->u.mutex) ? ST_OK
|
||||
: -ST_PERMISSION_ERROR);
|
||||
if (proc_mutex_unlock (proc, &mutex_resource->u.mutex) == PROC_NEED_RESCHEDULE)
|
||||
proc_sched ();
|
||||
|
||||
return SYSRESULT (ST_OK);
|
||||
}
|
||||
|
||||
static syscall_handler_func_t handler_table[] = {
|
||||
@@ -161,6 +169,7 @@ static syscall_handler_func_t handler_table[] = {
|
||||
[SYS_MAP] = &sys_map,
|
||||
[SYS_UNMAP] = &sys_unmap,
|
||||
[SYS_CLONE] = &sys_clone,
|
||||
[SYS_ARGUMENT_PTR] = &sys_argument_ptr,
|
||||
[SYS_SCHED] = &sys_sched,
|
||||
[SYS_MUTEX_CREATE] = &sys_mutex_create,
|
||||
[SYS_MUTEX_DELETE] = &sys_mutex_delete,
|
||||
|
||||
@@ -21,8 +21,8 @@ void* map (uintptr_t vaddr, size_t pages, uint32_t flags) {
|
||||
|
||||
int unmap (uintptr_t vaddr, size_t pages) { return do_syscall (SYS_UNMAP, vaddr, pages); }
|
||||
|
||||
int clone (uintptr_t vstack_top, void (*entry) (void)) {
|
||||
return do_syscall (SYS_CLONE, vstack_top, entry);
|
||||
int clone (uintptr_t vstack_top, void (*entry) (void), void* argument_ptr) {
|
||||
return do_syscall (SYS_CLONE, vstack_top, entry, argument_ptr);
|
||||
}
|
||||
|
||||
int mutex_create (int mutex_rid) { return do_syscall (SYS_MUTEX_CREATE, mutex_rid); }
|
||||
@@ -32,3 +32,5 @@ int mutex_delete (int mutex_rid) { return do_syscall (SYS_MUTEX_DELETE, mutex_ri
|
||||
int mutex_lock (int mutex_rid) { return do_syscall (SYS_MUTEX_LOCK, mutex_rid); }
|
||||
|
||||
int mutex_unlock (int mutex_rid) { return do_syscall (SYS_MUTEX_UNLOCK, mutex_rid); }
|
||||
|
||||
void* argument_ptr (void) { return (void*)do_syscall (SYS_ARGUMENT_PTR, 0); }
|
||||
|
||||
@@ -18,10 +18,11 @@ int test (char c);
|
||||
int sched (void);
|
||||
void* map (uintptr_t vaddr, size_t pages, uint32_t flags);
|
||||
int unmap (uintptr_t vaddr, size_t pages);
|
||||
int clone (uintptr_t vstack_top, void (*entry) (void));
|
||||
int clone (uintptr_t vstack_top, void (*entry) (void), void* argument_ptr);
|
||||
int mutex_create (int mutex_rid);
|
||||
int mutex_delete (int mutex_rid);
|
||||
int mutex_lock (int mutex_rid);
|
||||
int mutex_unlock (int mutex_rid);
|
||||
void* argument_ptr (void);
|
||||
|
||||
#endif // _LIBMSL_M_SYSTEM_H
|
||||
|
||||
@@ -5,13 +5,15 @@
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
int process_spawn (process_func_t func) {
|
||||
int process_spawn (process_func_t func, void* argument_ptr) {
|
||||
void* stack = malloc (PROC_STACK_SIZE);
|
||||
if (stack == NULL)
|
||||
return -ST_OOM_ERROR;
|
||||
|
||||
uintptr_t top = (uintptr_t)stack + PROC_STACK_SIZE;
|
||||
return clone (top, func);
|
||||
return clone (top, func, argument_ptr);
|
||||
}
|
||||
|
||||
int process_quit (void) { return quit (); }
|
||||
|
||||
void* process_argument (void) { return argument_ptr (); }
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
|
||||
typedef void (*process_func_t) (void);
|
||||
|
||||
int process_spawn (process_func_t func);
|
||||
int process_spawn (process_func_t func, void* argument_ptr);
|
||||
int process_quit (void);
|
||||
void* process_argument (void);
|
||||
|
||||
#endif // _LIBMSL_PROC_PROC_H
|
||||
|
||||
Reference in New Issue
Block a user