Compare commits
2 Commits
fff51321bc
...
fea0999726
| Author | SHA1 | Date | |
|---|---|---|---|
| fea0999726 | |||
| 7eceecf6e3 |
@@ -9,5 +9,9 @@
|
|||||||
#define SYS_SCHED 6
|
#define SYS_SCHED 6
|
||||||
#define SYS_CREATE_MEM 7
|
#define SYS_CREATE_MEM 7
|
||||||
#define SYS_UNLINK_MEM 8
|
#define SYS_UNLINK_MEM 8
|
||||||
|
#define SYS_CREATE_MUTEX 9
|
||||||
|
#define SYS_UNLINK_MUTEX 10
|
||||||
|
#define SYS_LOCK_MUTEX 11
|
||||||
|
#define SYS_UNLOCK_MUTEX 12
|
||||||
|
|
||||||
#endif // _M_SYSCALL_DEFS_H
|
#endif // _M_SYSCALL_DEFS_H
|
||||||
|
|||||||
64
init/init.c
64
init/init.c
@@ -6,20 +6,66 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <string/string.h>
|
#include <string/string.h>
|
||||||
|
|
||||||
#define EXAMPLE 1
|
#define EXAMPLE 2
|
||||||
|
|
||||||
#if EXAMPLE == 1
|
#if EXAMPLE == 1
|
||||||
|
|
||||||
|
void app_thread1 (void) {
|
||||||
|
test ('b');
|
||||||
|
quit ();
|
||||||
|
}
|
||||||
|
|
||||||
|
int spawn (void (*fn) (void)) {
|
||||||
|
size_t stack_size = 256 * PAGE_SIZE;
|
||||||
|
void* stack = malloc (stack_size);
|
||||||
|
if (stack == NULL)
|
||||||
|
return -ST_OOM_ERROR;
|
||||||
|
|
||||||
|
uintptr_t stack_top = (uintptr_t)stack + stack_size;
|
||||||
|
return clone (stack_top, stack_size, fn);
|
||||||
|
}
|
||||||
|
|
||||||
|
void app_main (void) { spawn (&app_thread1); }
|
||||||
|
#elif EXAMPLE == 2
|
||||||
|
|
||||||
|
#define MUTEX 2000
|
||||||
|
|
||||||
|
void app_thread1 (void);
|
||||||
|
|
||||||
|
int spawn (void (*fn) (void)) {
|
||||||
|
size_t stack_size = 256 * PAGE_SIZE;
|
||||||
|
void* stack = malloc (stack_size);
|
||||||
|
if (stack == NULL)
|
||||||
|
return -ST_OOM_ERROR;
|
||||||
|
|
||||||
|
uintptr_t stack_top = (uintptr_t)stack + stack_size;
|
||||||
|
return clone (stack_top, stack_size, fn);
|
||||||
|
}
|
||||||
|
|
||||||
void app_main (void) {
|
void app_main (void) {
|
||||||
test ('a');
|
create_mutex (MUTEX, RV_PRIVATE);
|
||||||
test ('a');
|
|
||||||
|
spawn (&app_thread1);
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
/* lock_mutex (MUTEX, RV_PRIVATE); */
|
||||||
|
|
||||||
|
for (int i = 0; i < 3; i++)
|
||||||
test ('a');
|
test ('a');
|
||||||
|
|
||||||
int* xs = malloc (1024 * sizeof (*xs));
|
/* unlock_mutex (MUTEX, RV_PRIVATE); */
|
||||||
memset (xs, 123, 1024 * sizeof (*xs));
|
}
|
||||||
free (xs);
|
}
|
||||||
|
|
||||||
test ('a');
|
void app_thread1 (void) {
|
||||||
test ('a');
|
for (int i = 0; i < 3; i++)
|
||||||
test ('a');
|
test ('b');
|
||||||
|
for (;;) {
|
||||||
|
/* lock_mutex (MUTEX, RV_PRIVATE); */
|
||||||
|
|
||||||
|
/* unlock_mutex (MUTEX, RV_PRIVATE); */
|
||||||
|
}
|
||||||
|
|
||||||
|
quit ();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ struct cpu {
|
|||||||
|
|
||||||
spin_lock_t lock;
|
spin_lock_t lock;
|
||||||
|
|
||||||
struct rb_node_link* proc_run_q;
|
struct list_node_link* proc_run_q;
|
||||||
struct proc* proc_current;
|
struct proc* proc_current;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -25,12 +25,8 @@ int amd64_syscall_dispatch (void* stack_ptr) {
|
|||||||
|
|
||||||
struct proc* caller = thiscpu->proc_current;
|
struct proc* caller = thiscpu->proc_current;
|
||||||
|
|
||||||
__asm__ volatile ("sti");
|
|
||||||
|
|
||||||
int result = func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
int result = func (caller, regs, regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8, regs->r9);
|
||||||
|
|
||||||
__asm__ volatile ("cli");
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ try:
|
|||||||
|
|
||||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||||
|
|
||||||
|
DEBUG ("proc->pid=%d\n", proc->pid);
|
||||||
proc_suspend (proc, &mutex->suspension_q);
|
proc_suspend (proc, &mutex->suspension_q);
|
||||||
|
|
||||||
goto try;
|
goto try;
|
||||||
@@ -52,11 +53,10 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
|||||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||||
|
|
||||||
struct proc* resumed_proc = NULL;
|
struct proc* resumed_proc = NULL;
|
||||||
struct rb_node_link* node;
|
struct list_node_link* node = mutex->suspension_q.proc_list;
|
||||||
rbtree_first (&mutex->suspension_q.proc_tree, node);
|
|
||||||
|
|
||||||
if (node) {
|
if (node) {
|
||||||
resumed_proc = rbtree_entry (node, struct proc, suspension_link);
|
resumed_proc = list_entry (node, struct proc, suspension_link);
|
||||||
mutex->owner = resumed_proc;
|
mutex->owner = resumed_proc;
|
||||||
mutex->locked = true;
|
mutex->locked = true;
|
||||||
|
|
||||||
|
|||||||
@@ -28,14 +28,6 @@
|
|||||||
|
|
||||||
#define SCHED_REAP_FREQ 10
|
#define SCHED_REAP_FREQ 10
|
||||||
|
|
||||||
/*
|
|
||||||
* Lock hierachy:
|
|
||||||
* - proc_tree_lock
|
|
||||||
* - cpu->lock
|
|
||||||
* - proc->lock
|
|
||||||
* - suspension_q->lock
|
|
||||||
*/
|
|
||||||
|
|
||||||
static struct rb_node_link* proc_tree = NULL;
|
static struct rb_node_link* proc_tree = NULL;
|
||||||
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
|
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
|
||||||
|
|
||||||
@@ -217,49 +209,46 @@ void proc_register (struct proc* proc, struct cpu* cpu) {
|
|||||||
|
|
||||||
proc->cpu = cpu;
|
proc->cpu = cpu;
|
||||||
|
|
||||||
|
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
||||||
|
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
||||||
|
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
|
||||||
|
|
||||||
spin_lock (&cpu->lock, &ctxcpu);
|
spin_lock (&cpu->lock, &ctxcpu);
|
||||||
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
|
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||||
|
|
||||||
if (cpu->proc_current == NULL)
|
if (cpu->proc_current == NULL)
|
||||||
cpu->proc_current = proc;
|
cpu->proc_current = proc;
|
||||||
|
|
||||||
spin_unlock (&cpu->lock, &ctxcpu);
|
spin_unlock (&cpu->lock, &ctxcpu);
|
||||||
|
|
||||||
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
|
||||||
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
|
||||||
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* caller holds cpu->lock */
|
/* caller holds cpu->lock */
|
||||||
static struct proc* proc_find_sched (struct cpu* cpu) {
|
static struct proc* proc_find_sched (struct cpu* cpu) {
|
||||||
struct rb_node_link* node = NULL;
|
if (!cpu->proc_run_q)
|
||||||
struct proc* current = cpu->proc_current;
|
|
||||||
struct proc* proc = NULL;
|
|
||||||
|
|
||||||
if (current)
|
|
||||||
rbtree_next (¤t->cpu_run_q_link, node);
|
|
||||||
|
|
||||||
if (!node)
|
|
||||||
rbtree_first (&cpu->proc_run_q, node);
|
|
||||||
|
|
||||||
if (!node)
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
struct rb_node_link* first = node;
|
struct list_node_link *current, *start;
|
||||||
|
|
||||||
|
if (cpu->proc_current)
|
||||||
|
current = cpu->proc_current->cpu_run_q_link.next;
|
||||||
|
else
|
||||||
|
current = cpu->proc_run_q;
|
||||||
|
|
||||||
|
if (!current)
|
||||||
|
current = cpu->proc_run_q;
|
||||||
|
|
||||||
|
start = current;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
proc = rbtree_entry (node, struct proc, cpu_run_q_link);
|
struct proc* proc = list_entry (current, struct proc, cpu_run_q_link);
|
||||||
|
|
||||||
if (atomic_load (&proc->state) == PROC_READY)
|
if (atomic_load (&proc->state) == PROC_READY)
|
||||||
return proc;
|
return proc;
|
||||||
|
|
||||||
rbtree_next (node, node);
|
current = current->next ? current->next : cpu->proc_run_q;
|
||||||
|
} while (current != start);
|
||||||
|
|
||||||
if (!node)
|
return NULL;
|
||||||
rbtree_first (&cpu->proc_run_q, node);
|
|
||||||
|
|
||||||
} while (node != first);
|
|
||||||
|
|
||||||
return ((atomic_load (¤t->state) == PROC_READY) ? current : NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void proc_reap (void) {
|
static void proc_reap (void) {
|
||||||
@@ -281,9 +270,8 @@ static void proc_reap (void) {
|
|||||||
if (atomic_load (&proc->state) == PROC_DEAD) {
|
if (atomic_load (&proc->state) == PROC_DEAD) {
|
||||||
spin_lock (&proc->lock, &ctxpr);
|
spin_lock (&proc->lock, &ctxpr);
|
||||||
rbtree_delete (&proc_tree, &proc->proc_tree_link);
|
rbtree_delete (&proc_tree, &proc->proc_tree_link);
|
||||||
spin_unlock (&proc->lock, &ctxpr);
|
|
||||||
|
|
||||||
list_append (reap_list, &proc->reap_link);
|
list_append (reap_list, &proc->reap_link);
|
||||||
|
spin_unlock (&proc->lock, &ctxpr);
|
||||||
}
|
}
|
||||||
|
|
||||||
node = next;
|
node = next;
|
||||||
@@ -318,7 +306,7 @@ void proc_sched (void* regs) {
|
|||||||
|
|
||||||
if (prev != NULL) {
|
if (prev != NULL) {
|
||||||
spin_lock (&prev->lock, &ctxpr);
|
spin_lock (&prev->lock, &ctxpr);
|
||||||
prev->pdata.regs = *(struct saved_regs*)regs;
|
memcpy (&prev->pdata.regs, regs, sizeof (struct saved_regs));
|
||||||
spin_unlock (&prev->lock, &ctxpr);
|
spin_unlock (&prev->lock, &ctxpr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -347,7 +335,7 @@ void proc_kill (struct proc* proc) {
|
|||||||
|
|
||||||
spin_lock (&cpu->lock, &ctxcpu);
|
spin_lock (&cpu->lock, &ctxcpu);
|
||||||
|
|
||||||
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
|
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||||
if (cpu->proc_current == proc)
|
if (cpu->proc_current == proc)
|
||||||
cpu->proc_current = NULL;
|
cpu->proc_current = NULL;
|
||||||
|
|
||||||
@@ -370,13 +358,13 @@ void proc_suspend (struct proc* proc, struct proc_suspension_q* sq) {
|
|||||||
/* remove from run q */
|
/* remove from run q */
|
||||||
spin_lock (&cpu->lock, &ctxcpu);
|
spin_lock (&cpu->lock, &ctxcpu);
|
||||||
|
|
||||||
rbtree_delete (&cpu->proc_run_q, &proc->cpu_run_q_link);
|
list_remove (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||||
if (cpu->proc_current == proc)
|
if (cpu->proc_current == proc)
|
||||||
cpu->proc_current = NULL;
|
cpu->proc_current = NULL;
|
||||||
spin_unlock (&cpu->lock, &ctxcpu);
|
spin_unlock (&cpu->lock, &ctxcpu);
|
||||||
|
|
||||||
spin_lock (&sq->lock, &ctxsq);
|
spin_lock (&sq->lock, &ctxsq);
|
||||||
rbtree_insert (struct proc, &sq->proc_tree, &proc->suspension_link, suspension_link, pid);
|
list_append (sq->proc_list, &proc->suspension_link);
|
||||||
spin_unlock (&sq->lock, &ctxsq);
|
spin_unlock (&sq->lock, &ctxsq);
|
||||||
|
|
||||||
cpu_request_sched (cpu);
|
cpu_request_sched (cpu);
|
||||||
@@ -388,7 +376,7 @@ void proc_resume (struct proc* proc) {
|
|||||||
struct proc_suspension_q* sq = proc->suspension_q;
|
struct proc_suspension_q* sq = proc->suspension_q;
|
||||||
|
|
||||||
spin_lock (&sq->lock, &ctxsq);
|
spin_lock (&sq->lock, &ctxsq);
|
||||||
rbtree_delete (&sq->proc_tree, &proc->suspension_link);
|
list_remove (sq->proc_list, &proc->suspension_link);
|
||||||
spin_unlock (&sq->lock, &ctxsq);
|
spin_unlock (&sq->lock, &ctxsq);
|
||||||
|
|
||||||
spin_lock (&proc->lock, &ctxpr);
|
spin_lock (&proc->lock, &ctxpr);
|
||||||
@@ -397,7 +385,7 @@ void proc_resume (struct proc* proc) {
|
|||||||
spin_unlock (&proc->lock, &ctxpr);
|
spin_unlock (&proc->lock, &ctxpr);
|
||||||
|
|
||||||
spin_lock (&cpu->lock, &ctxcpu);
|
spin_lock (&cpu->lock, &ctxcpu);
|
||||||
rbtree_insert (struct proc, &cpu->proc_run_q, &proc->cpu_run_q_link, cpu_run_q_link, pid);
|
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||||
spin_unlock (&cpu->lock, &ctxcpu);
|
spin_unlock (&cpu->lock, &ctxcpu);
|
||||||
|
|
||||||
cpu_request_sched (cpu);
|
cpu_request_sched (cpu);
|
||||||
|
|||||||
@@ -45,8 +45,8 @@ struct proc_resources {
|
|||||||
struct proc {
|
struct proc {
|
||||||
int pid;
|
int pid;
|
||||||
struct rb_node_link proc_tree_link;
|
struct rb_node_link proc_tree_link;
|
||||||
struct rb_node_link cpu_run_q_link;
|
struct list_node_link cpu_run_q_link;
|
||||||
struct rb_node_link suspension_link;
|
struct list_node_link suspension_link;
|
||||||
struct list_node_link reap_link;
|
struct list_node_link reap_link;
|
||||||
|
|
||||||
struct list_node_link* mappings; /* pd.lock implicitly protects this field */
|
struct list_node_link* mappings; /* pd.lock implicitly protects this field */
|
||||||
|
|||||||
@@ -43,11 +43,11 @@ void proc_cleanup_resources (struct proc* proc) {
|
|||||||
void proc_drop_resource (struct proc* proc, struct proc_resource* resource, bool lock) {
|
void proc_drop_resource (struct proc* proc, struct proc_resource* resource, bool lock) {
|
||||||
spin_lock_ctx_t ctxrs;
|
spin_lock_ctx_t ctxrs;
|
||||||
|
|
||||||
|
if (atomic_fetch_sub (&resource->refs, 1) == 1) {
|
||||||
DEBUG ("resource=%p created_by=%d vis=%d type=%d rid=%d refs=%d\n", resource,
|
DEBUG ("resource=%p created_by=%d vis=%d type=%d rid=%d refs=%d\n", resource,
|
||||||
resource->created_by_pid, resource->visibility, resource->type, resource->rid,
|
resource->created_by_pid, resource->visibility, resource->type, resource->rid,
|
||||||
atomic_load (&resource->refs));
|
atomic_load (&resource->refs));
|
||||||
|
|
||||||
if (atomic_fetch_sub (&resource->refs, 1) == 1) {
|
|
||||||
switch (resource->visibility) {
|
switch (resource->visibility) {
|
||||||
case RV_PRIVATE: {
|
case RV_PRIVATE: {
|
||||||
if (lock)
|
if (lock)
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
#ifndef _KERNEL_PROC_SUSPENTION_Q_H
|
#ifndef _KERNEL_PROC_SUSPENTION_Q_H
|
||||||
#define _KERNEL_PROC_SUSPENTION_Q_H
|
#define _KERNEL_PROC_SUSPENTION_Q_H
|
||||||
|
|
||||||
#include <libk/rbtree.h>
|
#include <libk/list.h>
|
||||||
#include <sync/spin_lock.h>
|
#include <sync/spin_lock.h>
|
||||||
|
|
||||||
struct proc_suspension_q {
|
struct proc_suspension_q {
|
||||||
struct rb_node_link* proc_tree;
|
struct list_node_link* proc_list;
|
||||||
spin_lock_t lock;
|
spin_lock_t lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -177,12 +177,84 @@ DEFINE_SYSCALL (sys_clone) {
|
|||||||
return pid;
|
return pid;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* int proc_sched (void) */
|
/* int sched (void) */
|
||||||
DEFINE_SYSCALL (sys_sched) {
|
DEFINE_SYSCALL (sys_sched) {
|
||||||
proc_sched (regs);
|
proc_sched (regs);
|
||||||
return ST_OK;
|
return ST_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* int create_mutex (int mutex_rid, int vis) */
|
||||||
|
DEFINE_SYSCALL (sys_create_mutex) {
|
||||||
|
int mutex_rid = (int)a1;
|
||||||
|
int vis = (int)a2;
|
||||||
|
|
||||||
|
if (mutex_rid < 0)
|
||||||
|
return -ST_BAD_RESOURCE;
|
||||||
|
|
||||||
|
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE))
|
||||||
|
return -ST_BAD_RESOURCE;
|
||||||
|
|
||||||
|
struct proc_resource* mutex_resource =
|
||||||
|
proc_create_resource (proc, mutex_rid, PR_MUTEX, vis, NULL);
|
||||||
|
|
||||||
|
if (mutex_resource == NULL)
|
||||||
|
return -ST_OOM_ERROR;
|
||||||
|
|
||||||
|
return mutex_resource->rid;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* int unlink_mutex (int mutex_rid, int vis) */
|
||||||
|
DEFINE_SYSCALL (sys_unlink_mutex) {
|
||||||
|
int mutex_rid = (int)a1;
|
||||||
|
int vis = (int)a2;
|
||||||
|
|
||||||
|
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE))
|
||||||
|
return -ST_BAD_RESOURCE;
|
||||||
|
|
||||||
|
struct proc_resource* mutex_resource = proc_find_resource (proc, mutex_rid, vis);
|
||||||
|
|
||||||
|
if (mutex_resource == NULL)
|
||||||
|
return -ST_NOT_FOUND;
|
||||||
|
|
||||||
|
proc_drop_resource (proc, mutex_resource, true);
|
||||||
|
|
||||||
|
return ST_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* int lock_mutex (int mutex_rid, int vis) */
|
||||||
|
DEFINE_SYSCALL (sys_lock_mutex) {
|
||||||
|
int mutex_rid = (int)a1;
|
||||||
|
int vis = (int)a2;
|
||||||
|
|
||||||
|
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE))
|
||||||
|
return -ST_BAD_RESOURCE;
|
||||||
|
|
||||||
|
struct proc_resource* mutex_resource = proc_find_resource (proc, mutex_rid, vis);
|
||||||
|
|
||||||
|
if (mutex_resource == NULL)
|
||||||
|
return -ST_NOT_FOUND;
|
||||||
|
|
||||||
|
proc_mutex_lock (proc, &mutex_resource->u.mutex);
|
||||||
|
|
||||||
|
return ST_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* int unlock_mutex (int mutex_rid, int vis) */
|
||||||
|
DEFINE_SYSCALL (sys_unlock_mutex) {
|
||||||
|
int mutex_rid = (int)a1;
|
||||||
|
int vis = (int)a2;
|
||||||
|
|
||||||
|
if (!(vis == RV_PUBLIC || vis == RV_PRIVATE))
|
||||||
|
return -ST_BAD_RESOURCE;
|
||||||
|
|
||||||
|
struct proc_resource* mutex_resource = proc_find_resource (proc, mutex_rid, vis);
|
||||||
|
|
||||||
|
if (mutex_resource == NULL)
|
||||||
|
return -ST_NOT_FOUND;
|
||||||
|
|
||||||
|
return proc_mutex_unlock (proc, &mutex_resource->u.mutex) ? ST_OK : -ST_PERMISSION_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
static syscall_handler_func_t handler_table[] = {
|
static syscall_handler_func_t handler_table[] = {
|
||||||
[SYS_QUIT] = &sys_quit,
|
[SYS_QUIT] = &sys_quit,
|
||||||
[SYS_TEST] = &sys_test,
|
[SYS_TEST] = &sys_test,
|
||||||
@@ -192,6 +264,10 @@ static syscall_handler_func_t handler_table[] = {
|
|||||||
[SYS_SCHED] = &sys_sched,
|
[SYS_SCHED] = &sys_sched,
|
||||||
[SYS_CREATE_MEM] = &sys_create_mem,
|
[SYS_CREATE_MEM] = &sys_create_mem,
|
||||||
[SYS_UNLINK_MEM] = &sys_unlink_mem,
|
[SYS_UNLINK_MEM] = &sys_unlink_mem,
|
||||||
|
[SYS_CREATE_MUTEX] = &sys_create_mutex,
|
||||||
|
[SYS_UNLINK_MUTEX] = &sys_unlink_mutex,
|
||||||
|
[SYS_LOCK_MUTEX] = &sys_lock_mutex,
|
||||||
|
[SYS_UNLOCK_MUTEX] = &sys_unlock_mutex,
|
||||||
};
|
};
|
||||||
|
|
||||||
syscall_handler_func_t syscall_find_handler (int syscall_num) {
|
syscall_handler_func_t syscall_find_handler (int syscall_num) {
|
||||||
|
|||||||
@@ -4,14 +4,18 @@
|
|||||||
#include <alloc/liballoc.h>
|
#include <alloc/liballoc.h>
|
||||||
#include <m/system.h>
|
#include <m/system.h>
|
||||||
|
|
||||||
|
#define LIBALLOC_MUTEX 500
|
||||||
|
|
||||||
static uintptr_t liballoc_map_base = PROC_MAP_BASE;
|
static uintptr_t liballoc_map_base = PROC_MAP_BASE;
|
||||||
static int mem_rid_base = 1000000;
|
static int mem_rid_base = 1000000;
|
||||||
|
|
||||||
void liballoc_init (void) {}
|
void liballoc_init (void) { create_mutex (LIBALLOC_MUTEX, RV_PRIVATE); }
|
||||||
|
|
||||||
int liballoc_lock (void) { return 0; }
|
void liballoc_deinit (void) { unlink_mutex (LIBALLOC_MUTEX, RV_PRIVATE); }
|
||||||
|
|
||||||
int liballoc_unlock (void) { return 0; }
|
int liballoc_lock (void) { return lock_mutex (LIBALLOC_MUTEX, RV_PRIVATE); }
|
||||||
|
|
||||||
|
int liballoc_unlock (void) { return unlock_mutex (LIBALLOC_MUTEX, RV_PRIVATE); }
|
||||||
|
|
||||||
void* liballoc_alloc (int pages, int* mem_rid) {
|
void* liballoc_alloc (int pages, int* mem_rid) {
|
||||||
uintptr_t current_base = liballoc_map_base;
|
uintptr_t current_base = liballoc_map_base;
|
||||||
|
|||||||
@@ -87,6 +87,7 @@ void* calloc (size_t, size_t); //< The standard function.
|
|||||||
void free (void*); //< The standard function.
|
void free (void*); //< The standard function.
|
||||||
|
|
||||||
void liballoc_init (void);
|
void liballoc_init (void);
|
||||||
|
void liballoc_deinit (void);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,8 +17,7 @@ static void clear_bss (void) {
|
|||||||
void __premain (void) {
|
void __premain (void) {
|
||||||
clear_bss ();
|
clear_bss ();
|
||||||
liballoc_init ();
|
liballoc_init ();
|
||||||
|
|
||||||
app_main ();
|
app_main ();
|
||||||
|
liballoc_deinit ();
|
||||||
quit ();
|
quit ();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,10 +9,12 @@
|
|||||||
|
|
||||||
#define do_syscall(...) do_syscall1 (__VA_ARGS__, 0, 0, 0, 0, 0, 0)
|
#define do_syscall(...) do_syscall1 (__VA_ARGS__, 0, 0, 0, 0, 0, 0)
|
||||||
|
|
||||||
int quit (void) { return do_syscall (SYS_QUIT); }
|
int quit (void) { return do_syscall (SYS_QUIT, 0); }
|
||||||
|
|
||||||
int test (char c) { return do_syscall (SYS_TEST, c); }
|
int test (char c) { return do_syscall (SYS_TEST, c); }
|
||||||
|
|
||||||
|
int sched (void) { return do_syscall (SYS_SCHED, 0); }
|
||||||
|
|
||||||
int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags) {
|
int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags) {
|
||||||
return do_syscall (SYS_MAP, mem_rid, vis, vaddr, flags);
|
return do_syscall (SYS_MAP, mem_rid, vis, vaddr, flags);
|
||||||
}
|
}
|
||||||
@@ -30,3 +32,11 @@ int unlink_mem (int mem_rid, int vis, size_t pages) {
|
|||||||
int clone (uintptr_t vstack_top, size_t stack_size, void (*entry) (void)) {
|
int clone (uintptr_t vstack_top, size_t stack_size, void (*entry) (void)) {
|
||||||
return do_syscall (SYS_CLONE, vstack_top, stack_size, entry);
|
return do_syscall (SYS_CLONE, vstack_top, stack_size, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int create_mutex (int mutex_rid, int vis) { return do_syscall (SYS_CREATE_MUTEX, mutex_rid, vis); }
|
||||||
|
|
||||||
|
int unlink_mutex (int mutex_rid, int vis) { return do_syscall (SYS_UNLINK_MUTEX, mutex_rid, vis); }
|
||||||
|
|
||||||
|
int lock_mutex (int mutex_rid, int vis) { return do_syscall (SYS_LOCK_MUTEX, mutex_rid, vis); }
|
||||||
|
|
||||||
|
int unlock_mutex (int mutex_rid, int vis) { return do_syscall (SYS_UNLOCK_MUTEX, mutex_rid, vis); }
|
||||||
|
|||||||
@@ -19,10 +19,15 @@
|
|||||||
|
|
||||||
int quit (void);
|
int quit (void);
|
||||||
int test (char c);
|
int test (char c);
|
||||||
|
int sched (void);
|
||||||
int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags);
|
int map (int mem_rid, int vis, uintptr_t vaddr, uint32_t flags);
|
||||||
int unmap (uintptr_t vaddr, size_t pages);
|
int unmap (uintptr_t vaddr, size_t pages);
|
||||||
int create_mem (int mem_rid, int vis, size_t pages);
|
int create_mem (int mem_rid, int vis, size_t pages);
|
||||||
int unlink_mem (int mem_rid, int vis, size_t pages);
|
int unlink_mem (int mem_rid, int vis, size_t pages);
|
||||||
int clone (uintptr_t vstack_top, size_t stack_size, void (*entry) (void));
|
int clone (uintptr_t vstack_top, size_t stack_size, void (*entry) (void));
|
||||||
|
int create_mutex (int mutex_rid, int vis);
|
||||||
|
int unlink_mutex (int mutex_rid, int vis);
|
||||||
|
int lock_mutex (int mutex_rid, int vis);
|
||||||
|
int unlock_mutex (int mutex_rid, int vis);
|
||||||
|
|
||||||
#endif // _LIBMSL_M_SYSTEM_H
|
#endif // _LIBMSL_M_SYSTEM_H
|
||||||
|
|||||||
Reference in New Issue
Block a user