Implement userspace TLS, remove RW Locks

This commit is contained in:
2026-01-28 23:52:48 +01:00
parent a3b62ebd3d
commit 3d23187acf
17 changed files with 135 additions and 157 deletions

View File

@@ -13,7 +13,6 @@
#include <proc/procgroup.h>
#include <proc/resource.h>
#include <rd/rd.h>
#include <sync/rw_spin_lock.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
@@ -29,7 +28,7 @@
#define SCHED_REAP_FREQ 10
static struct rb_node_link* proc_tree = NULL;
static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
static spin_lock_t proc_tree_lock = SPIN_LOCK_INIT;
static atomic_int sched_cycles = 0;
@@ -75,6 +74,23 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
} break;
case PT_TLS: {
#if defined(__x86_64__)
size_t tls_align = phdr->p_align ? phdr->p_align : sizeof (uintptr_t);
size_t tls_size = phdr->p_memsz;
size_t tls_total_needed = tls_size + sizeof (uintptr_t);
size_t blks = div_align_up (tls_total_needed, PAGE_SIZE);
proc->procgroup->tls.tls_tmpl_pages = blks;
proc->procgroup->tls.tls_tmpl_size = tls_size;
proc->procgroup->tls.tls_tmpl_total_size = tls_total_needed;
proc->procgroup->tls.tls_tmpl = malloc (blks * PAGE_SIZE);
memset (proc->procgroup->tls.tls_tmpl, 0, blks * PAGE_SIZE);
memcpy (proc->procgroup->tls.tls_tmpl, (void*)((uintptr_t)elf + phdr->p_offset),
phdr->p_filesz);
proc_init_tls (proc);
#endif
} break;
}
}
@@ -86,7 +102,6 @@ struct proc* proc_spawn_rd (char* name) {
struct rd_file* rd_file = rd_get_file (name);
bool ok = proc_check_elf (rd_file->content);
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
if (!ok)
return NULL;
@@ -98,9 +113,9 @@ struct proc* proc_find_pid (int pid) {
spin_lock_ctx_t ctxprtr;
struct proc* proc = NULL;
rw_spin_read_lock (&proc_tree_lock, &ctxprtr);
spin_lock (&proc_tree_lock, &ctxprtr);
rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid);
rw_spin_read_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&proc_tree_lock, &ctxprtr);
return proc;
}
@@ -109,21 +124,20 @@ void proc_register (struct proc* proc, struct cpu* cpu1) {
spin_lock_ctx_t ctxcpu, ctxprtr;
proc->cpu = cpu1 != NULL ? cpu1 : cpu_find_lightest ();
DEBUG ("Assigning CPU %d to PID %d\n", proc->cpu->id, proc->pid);
struct cpu* cpu = proc->cpu;
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
spin_lock (&proc_tree_lock, &ctxprtr);
spin_lock (&cpu->lock, &ctxcpu);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
rbtree_insert (struct proc, &proc_tree, &proc->proc_tree_link, proc_tree_link, pid);
atomic_fetch_add (&cpu->proc_run_q_count, 1);
list_append (cpu->proc_run_q, &proc->cpu_run_q_link);
if (cpu->proc_current == NULL)
cpu->proc_current = proc;
spin_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&cpu->lock, &ctxcpu);
}
@@ -162,7 +176,7 @@ static void proc_reap (void) {
spin_lock_ctx_t ctxprtr;
spin_lock_ctx_t ctxpr;
rw_spin_write_lock (&proc_tree_lock, &ctxprtr);
spin_lock (&proc_tree_lock, &ctxprtr);
struct rb_node_link* node;
rbtree_first (&proc_tree, node);
@@ -182,7 +196,7 @@ static void proc_reap (void) {
node = next;
}
rw_spin_write_unlock (&proc_tree_lock, &ctxprtr);
spin_unlock (&proc_tree_lock, &ctxprtr);
struct list_node_link *reap_link, *reap_link_tmp;
list_foreach (reap_list, reap_link, reap_link_tmp) {