Implement userspace TLS, remove RW Locks
This commit is contained in:
@@ -13,7 +13,6 @@
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <rd/rd.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
@@ -29,7 +28,7 @@
|
||||
#define SCHED_REAP_FREQ 10
|
||||
|
||||
static struct rb_node_link* proc_tree = NULL;
|
||||
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
|
||||
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
|
||||
|
||||
static atomic_int sched_cycles = 0;
|
||||
|
||||
@@ -75,6 +74,23 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
|
||||
} break;
|
||||
case PT_TLS: {
|
||||
#if defined(__x86_64__)
|
||||
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
|
||||
size_t tls_size = phdr->p_memsz;
|
||||
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
|
||||
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
|
||||
proc->procgroup->tls.tls_tmpl_pages = blks;
|
||||
proc->procgroup->tls.tls_tmpl_size = tls_size;
|
||||
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
|
||||
|
||||
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
|
||||
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
|
||||
|
||||
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
|
||||
phdr->p_filesz);
|
||||
|
||||
proc_init_tls (proc);
|
||||
#endif
|
||||
} break;
|
||||
}
|
||||
}
|
||||
@@ -86,7 +102,6 @@ struct proc* proc_spawn_rd (char* name) {
|
||||
struct rd_file* rd_file = rd_get_file (name);
|
||||
|
||||
bool ok = proc_check_elf (rd_file->content);
|
||||
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
|
||||
|
||||
if (!ok)
|
||||
return NULL;
|
||||
@@ -98,9 +113,9 @@ struct proc* proc_find_pid (int pid) {
|
||||
spin_lock_ctx_t ctxprtr;
|
||||
struct proc* proc = NULL;
|
||||
|
||||
rw_spin_read_lock (&proc_tree_lock, &ctxprtr);
|
||||
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
|
||||
rw_spin_read_unlock (&proc_tree_lock, &ctxprtr);
|
||||
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
return proc;
|
||||
}
|
||||
@@ -109,21 +124,20 @@ void proc_register (struct proc* proc, struct cpu* cpu1) {
|
||||
spin_lock_ctx_t ctxcpu, ctxprtr;
|
||||
|
||||
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
|
||||
DEBUG ("Assigning CPU %d to PID %d\n", proc->cpu->id, proc->pid);
|
||||
|
||||
struct cpu* cpu = proc->cpu;
|
||||
|
||||
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
||||
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
||||
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||
spin_lock (&cpu->lock, &ctxcpu);
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||
|
||||
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
|
||||
|
||||
atomic_fetch_add (&cpu->proc_run_q_count, 1);
|
||||
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
|
||||
if (cpu->proc_current == NULL)
|
||||
cpu->proc_current = proc;
|
||||
|
||||
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||
spin_unlock (&cpu->lock, &ctxcpu);
|
||||
}
|
||||
|
||||
@@ -162,7 +176,7 @@ static void proc_reap (void) {
|
||||
spin_lock_ctx_t ctxprtr;
|
||||
spin_lock_ctx_t ctxpr;
|
||||
|
||||
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
|
||||
spin_lock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
struct rb_node_link* node;
|
||||
rbtree_first (&proc_tree, node);
|
||||
@@ -182,7 +196,7 @@ static void proc_reap (void) {
|
||||
node = next;
|
||||
}
|
||||
|
||||
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
|
||||
spin_unlock (&proc_tree_lock, &ctxprtr);
|
||||
|
||||
struct list_node_link *reap_link, *reap_link_tmp;
|
||||
list_foreach (reap_list, reap_link, reap_link_tmp) {
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
#include <proc/suspension_q.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/mm.h>
|
||||
|
||||
|
||||
@@ -4,13 +4,12 @@
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
|
||||
static struct rb_node_link* procgroup_tree = NULL;
|
||||
static rw_spin_lock_t procgroup_tree_lock = RW_SPIN_LOCK_INIT;
|
||||
static spin_lock_t procgroup_tree_lock = SPIN_LOCK_INIT;
|
||||
static atomic_int pgids = 0;
|
||||
|
||||
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
|
||||
@@ -143,10 +142,10 @@ struct procgroup* procgroup_create (void) {
|
||||
procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys ();
|
||||
procgroup->map_base = PROC_MAP_BASE;
|
||||
|
||||
rw_spin_write_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link,
|
||||
procgroup_tree_link, pgid);
|
||||
rw_spin_write_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||
|
||||
return procgroup;
|
||||
}
|
||||
@@ -160,7 +159,6 @@ void procgroup_attach (struct procgroup* procgroup, struct proc* proc) {
|
||||
rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link,
|
||||
procgroup_memb_tree_link, pid);
|
||||
atomic_fetch_add (&procgroup->refs, 1);
|
||||
DEBUG ("procgrpup attach PID %d to PGID %d\n", proc->pid, procgroup->pgid);
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
@@ -174,19 +172,18 @@ void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
|
||||
|
||||
rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link);
|
||||
int refs = atomic_fetch_sub (&procgroup->refs, 1);
|
||||
DEBUG ("procgrpup detach PID %d to PGID %d\n", proc->pid, procgroup->pgid);
|
||||
|
||||
spin_unlock (&proc->lock, &ctxpr);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
if (refs == 1) {
|
||||
rw_spin_write_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_lock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
|
||||
rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link);
|
||||
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
rw_spin_write_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||
spin_unlock (&procgroup_tree_lock, &ctxpgtr);
|
||||
|
||||
/* delete resources */
|
||||
struct rb_node_link* rnode;
|
||||
@@ -214,6 +211,8 @@ void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
|
||||
|
||||
pmm_free (procgroup->pd.cr3_paddr, 1);
|
||||
|
||||
free (procgroup->tls.tls_tmpl);
|
||||
|
||||
free (procgroup);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <proc/resource.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/procgroup.h>
|
||||
|
||||
struct proc;
|
||||
|
||||
@@ -29,6 +30,7 @@ struct procgroup {
|
||||
struct pd pd;
|
||||
struct list_node_link* mappings;
|
||||
uintptr_t map_base;
|
||||
struct procgroup_tls tls;
|
||||
};
|
||||
|
||||
struct procgroup* procgroup_create (void);
|
||||
|
||||
Reference in New Issue
Block a user