diff --git a/kernel/amd64/apic.c b/kernel/amd64/apic.c index 0c45ae0..56f0ea7 100644 --- a/kernel/amd64/apic.c +++ b/kernel/amd64/apic.c @@ -57,23 +57,19 @@ static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT; /* Read IOAPIC */ static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) { - spin_lock_ctx_t ctxioar; - - spin_lock (&ioapic->lock, &ctxioar); + spin_lock (&ioapic->lock); *(volatile uint32_t*)ioapic->mmio_base = reg; uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10); - spin_unlock (&ioapic->lock, &ctxioar); + spin_unlock (&ioapic->lock); return ret; } /* Write IOAPIC */ static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) { - spin_lock_ctx_t ctxioaw; - - spin_lock (&ioapic->lock, &ctxioaw); + spin_lock (&ioapic->lock); *(volatile uint32_t*)ioapic->mmio_base = reg; *(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value; - spin_unlock (&ioapic->lock, &ctxioaw); + spin_unlock (&ioapic->lock); } /* Find an IOAPIC corresposting to provided IRQ */ @@ -203,9 +199,7 @@ void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); } * us - Period length in microseconds */ static uint32_t amd64_lapic_calibrate (uint32_t us) { - spin_lock_ctx_t ctxlacb; - - spin_lock (&lapic_calibration_lock, &ctxlacb); + spin_lock (&lapic_calibration_lock); amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE); @@ -218,7 +212,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) { uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT); DEBUG ("timer ticks = %u\n", ticks); - spin_unlock (&lapic_calibration_lock, &ctxlacb); + spin_unlock (&lapic_calibration_lock); return ticks; } diff --git a/kernel/amd64/debug.c b/kernel/amd64/debug.c index aa0381d..a311973 100644 --- a/kernel/amd64/debug.c +++ b/kernel/amd64/debug.c @@ -35,8 +35,6 @@ static void amd64_debug_serial_write (char x) { * Formatted printing to serial. serial_lock ensures that all prints are atomic. */ void debugprintf (const char* fmt, ...) { - spin_lock_ctx_t ctxdbgp; - if (!debug_init) return; @@ -52,14 +50,14 @@ void debugprintf (const char* fmt, ...) { const char* p = buffer; - spin_lock (&serial_lock, &ctxdbgp); + spin_lock (&serial_lock); while (*p) { amd64_debug_serial_write (*p); p++; } - spin_unlock (&serial_lock, &ctxdbgp); + spin_unlock (&serial_lock); } /* Initialize serial */ diff --git a/kernel/amd64/hpet.c b/kernel/amd64/hpet.c index 14d84c3..f99986b 100644 --- a/kernel/amd64/hpet.c +++ b/kernel/amd64/hpet.c @@ -60,9 +60,7 @@ static void amd64_hpet_write32 (uint32_t reg, uint32_t value) { static uint64_t amd64_hpet_read_counter (void) { uint64_t value; - spin_lock_ctx_t ctxhrc; - - spin_lock (&hpet_lock, &ctxhrc); + spin_lock (&hpet_lock); if (!hpet_32bits) value = amd64_hpet_read64 (HPET_MCVR); @@ -77,15 +75,13 @@ static uint64_t amd64_hpet_read_counter (void) { value = ((uint64_t)hi1 << 32) | lo; } - spin_unlock (&hpet_lock, &ctxhrc); + spin_unlock (&hpet_lock); return value; } static void amd64_hpet_write_counter (uint64_t value) { - spin_lock_ctx_t ctxhwc; - - spin_lock (&hpet_lock, &ctxhwc); + spin_lock (&hpet_lock); if (!hpet_32bits) amd64_hpet_write64 (HPET_MCVR, value); @@ -94,7 +90,7 @@ static void amd64_hpet_write_counter (uint64_t value) { amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32)); } - spin_unlock (&hpet_lock, &ctxhwc); + spin_unlock (&hpet_lock); } /* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being diff --git a/kernel/amd64/intr.c b/kernel/amd64/intr.c index 4c4f004..ae8f472 100644 --- a/kernel/amd64/intr.c +++ b/kernel/amd64/intr.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -167,20 +166,18 @@ static void amd64_intr_exception (struct saved_regs* regs) { /* Handle incoming interrupt, dispatch IRQ handlers. */ void amd64_intr_handler (void* stack_ptr) { - spin_lock_ctx_t ctxcpu, ctxpr; - amd64_load_kernel_cr3 (); struct saved_regs* regs = stack_ptr; - spin_lock (&thiscpu->lock, &ctxcpu); + spin_lock (&thiscpu->lock); struct proc* proc_current = thiscpu->proc_current; - spin_lock (&proc_current->lock, &ctxpr); + spin_lock (&proc_current->lock); memcpy (&proc_current->pdata.regs, regs, sizeof (struct saved_regs)); - spin_unlock (&proc_current->lock, &ctxpr); - spin_unlock (&thiscpu->lock, &ctxcpu); + spin_unlock (&proc_current->lock); + spin_unlock (&thiscpu->lock); if (regs->trap <= 31) { amd64_intr_exception (regs); @@ -200,24 +197,3 @@ void amd64_intr_init (void) { amd64_init_pic (); amd64_idt_init (); } - -/* Aux. */ - -/* Save RFLAGS of the current CPU */ -static uint64_t amd64_irq_save_flags (void) { - uint64_t rflags; - __asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc"); - return rflags; -} - -/* Restore interrupts (IF bit) from RFLAGS */ -static void amd64_irq_restore_flags (uint64_t rflags) { - if (rflags & (1ULL << 9)) - __asm__ volatile ("sti"); -} - -/* Save current interrupt state */ -void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); } - -/* Restore interrupt state */ -void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); } diff --git a/kernel/amd64/mm.c b/kernel/amd64/mm.c index d5f5b4e..c91d015 100644 --- a/kernel/amd64/mm.c +++ b/kernel/amd64/mm.c @@ -25,9 +25,9 @@ struct pg_index { static struct pd kernel_pd; static spin_lock_t kernel_pd_lock; -void mm_kernel_lock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); } +void mm_kernel_lock (void) { spin_lock (&kernel_pd_lock); } -void mm_kernel_unlock (spin_lock_ctx_t* ctx) { spin_lock (&kernel_pd_lock, ctx); } +void mm_kernel_unlock (void) { spin_lock (&kernel_pd_lock); } /* Get current value of CR3 register */ static uintptr_t amd64_current_cr3 (void) { diff --git a/kernel/amd64/proc.c b/kernel/amd64/proc.c index bf4c139..48df073 100644 --- a/kernel/amd64/proc.c +++ b/kernel/amd64/proc.c @@ -61,7 +61,6 @@ struct proc* proc_from_elf (uint8_t* elf_contents) { struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t entry, uintptr_t argument_ptr) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; - spin_lock_ctx_t ctxprt; struct proc* proc = malloc (sizeof (*proc)); if (proc == NULL) @@ -73,12 +72,12 @@ struct proc* proc_clone (struct proc* proto, uintptr_t vstack_top, uintptr_t ent atomic_store (&proc->state, PROC_READY); proc->pid = atomic_fetch_add (&pids, 1); - spin_lock (&proto->lock, &ctxprt); + spin_lock (&proto->lock); proc->procgroup = proto->procgroup; procgroup_attach (proc->procgroup, proc); - spin_unlock (&proto->lock, &ctxprt); + spin_unlock (&proto->lock); uintptr_t kstack_paddr = pmm_alloc (KSTACK_SIZE / PAGE_SIZE); proc->pdata.kernel_stack = kstack_paddr + (uintptr_t)hhdm->offset + KSTACK_SIZE; diff --git a/kernel/amd64/sched1.c b/kernel/amd64/sched1.c index 903f83e..d642165 100644 --- a/kernel/amd64/sched1.c +++ b/kernel/amd64/sched1.c @@ -7,17 +7,15 @@ #include #include -void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu) { - spin_lock_ctx_t ctxpr; - - spin_lock (&proc->lock, &ctxpr); +void do_sched (struct proc* proc, spin_lock_t* cpu_lock) { + spin_lock (&proc->lock); thiscpu->tss.rsp0 = proc->pdata.kernel_stack; thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack; amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base); - spin_unlock (&proc->lock, &ctxpr); - spin_unlock (cpu_lock, ctxcpu); + spin_unlock (&proc->lock); + spin_unlock (cpu_lock); amd64_do_sched ((void*)&proc->pdata.regs, (void*)proc->procgroup->pd.cr3_paddr); } diff --git a/kernel/amd64/smp.c b/kernel/amd64/smp.c index a825915..73bc217 100644 --- a/kernel/amd64/smp.c +++ b/kernel/amd64/smp.c @@ -91,9 +91,8 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) { struct cpu* spin_cpu = thiscpu; proc_register (spin_proc, &spin_cpu); - spin_lock_ctx_t ctxcpu; - spin_lock (&spin_proc->cpu->lock, &ctxcpu); - do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu); + spin_lock (&spin_proc->cpu->lock); + do_sched (spin_proc, &spin_proc->cpu->lock); } /// Initialize SMP subsystem for AMD64. Start AP CPUs diff --git a/kernel/amd64/syscall.c b/kernel/amd64/syscall.c index f9c5275..27d1517 100644 --- a/kernel/amd64/syscall.c +++ b/kernel/amd64/syscall.c @@ -14,20 +14,18 @@ extern void amd64_syscall_entry (void); uintptr_t amd64_syscall_dispatch (void* stack_ptr) { - spin_lock_ctx_t ctxcpu, ctxpr; - amd64_load_kernel_cr3 (); struct saved_regs* regs = stack_ptr; - spin_lock (&thiscpu->lock, &ctxcpu); + spin_lock (&thiscpu->lock); struct proc* caller = thiscpu->proc_current; int caller_pid = caller->pid; - spin_lock (&caller->lock, &ctxpr); + spin_lock (&caller->lock); memcpy (&caller->pdata.regs, regs, sizeof (struct saved_regs)); - spin_unlock (&caller->lock, &ctxpr); - spin_unlock (&thiscpu->lock, &ctxcpu); + spin_unlock (&caller->lock); + spin_unlock (&thiscpu->lock); int syscall_num = regs->rax; syscall_handler_func_t func = syscall_find_handler (syscall_num); @@ -45,9 +43,9 @@ uintptr_t amd64_syscall_dispatch (void* stack_ptr) { caller = proc_find_pid (caller_pid); if (caller != NULL) { - spin_lock (&caller->lock, &ctxpr); + spin_lock (&caller->lock); caller->pdata.regs.rax = r; - spin_unlock (&caller->lock, &ctxpr); + spin_unlock (&caller->lock); } if (reschedule) diff --git a/kernel/irq/irq.c b/kernel/irq/irq.c index b145e39..36e0f9c 100644 --- a/kernel/irq/irq.c +++ b/kernel/irq/irq.c @@ -15,8 +15,6 @@ struct irq* irq_table[0x100]; static spin_lock_t irqs_lock = SPIN_LOCK_INIT; bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) { - spin_lock_ctx_t ctxiqa; - struct irq* irq = malloc (sizeof (*irq)); if (irq == NULL) { return false; @@ -26,21 +24,19 @@ bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num) { irq->arg = arg; irq->irq_num = irq_num; - spin_lock (&irqs_lock, &ctxiqa); + spin_lock (&irqs_lock); irq_table[irq_num] = irq; - spin_unlock (&irqs_lock, &ctxiqa); + spin_unlock (&irqs_lock); return true; } struct irq* irq_find (uint32_t irq_num) { - spin_lock_ctx_t ctxiqa; - - spin_lock (&irqs_lock, &ctxiqa); + spin_lock (&irqs_lock); struct irq* irq = irq_table[irq_num]; - spin_unlock (&irqs_lock, &ctxiqa); + spin_unlock (&irqs_lock); return irq; } diff --git a/kernel/mm/liballoc.c b/kernel/mm/liballoc.c index 02f2751..45fa8a7 100644 --- a/kernel/mm/liballoc.c +++ b/kernel/mm/liballoc.c @@ -11,13 +11,13 @@ spin_lock_t _liballoc_lock = SPIN_LOCK_INIT; -int liballoc_lock (void* ctx) { - spin_lock (&_liballoc_lock, (spin_lock_ctx_t*)ctx); +int liballoc_lock (void) { + spin_lock (&_liballoc_lock); return 0; } -int liballoc_unlock (void* ctx) { - spin_unlock (&_liballoc_lock, (spin_lock_ctx_t*)ctx); +int liballoc_unlock (void) { + spin_unlock (&_liballoc_lock); return 0; } @@ -243,9 +243,8 @@ void* malloc (size_t size) { int index; void* ptr; struct boundary_tag* tag = NULL; - spin_lock_ctx_t ctxliba; - liballoc_lock (&ctxliba); + liballoc_lock (); if (l_initialized == 0) { for (index = 0; index < MAXEXP; index++) { @@ -273,7 +272,7 @@ void* malloc (size_t size) { // No page found. Make one. if (tag == NULL) { if ((tag = allocate_new_tag (size)) == NULL) { - liballoc_unlock (&ctxliba); + liballoc_unlock (); return NULL; } @@ -306,24 +305,23 @@ void* malloc (size_t size) { ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag)); - liballoc_unlock (&ctxliba); + liballoc_unlock (); return ptr; } void free (void* ptr) { int index; struct boundary_tag* tag; - spin_lock_ctx_t ctxliba; if (ptr == NULL) return; - liballoc_lock (&ctxliba); + liballoc_lock (); tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag)); if (tag->magic != LIBALLOC_MAGIC) { - liballoc_unlock (&ctxliba); // release the lock + liballoc_unlock (); // release the lock return; } @@ -356,7 +354,7 @@ void free (void* ptr) { liballoc_free (tag, pages); - liballoc_unlock (&ctxliba); + liballoc_unlock (); return; } @@ -367,7 +365,7 @@ void free (void* ptr) { insert_tag (tag, index); - liballoc_unlock (&ctxliba); + liballoc_unlock (); } void* calloc (size_t nobj, size_t size) { @@ -387,7 +385,6 @@ void* realloc (void* p, size_t size) { void* ptr; struct boundary_tag* tag; int real_size; - spin_lock_ctx_t ctxliba; if (size == 0) { free (p); @@ -397,11 +394,11 @@ void* realloc (void* p, size_t size) { return malloc (size); if (&liballoc_lock != NULL) - liballoc_lock (&ctxliba); // lockit + liballoc_lock (); // lockit tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag)); real_size = tag->size; if (&liballoc_unlock != NULL) - liballoc_unlock (&ctxliba); + liballoc_unlock (); if ((size_t)real_size > size) real_size = size; diff --git a/kernel/mm/liballoc.h b/kernel/mm/liballoc.h index b9c3682..163d27b 100644 --- a/kernel/mm/liballoc.h +++ b/kernel/mm/liballoc.h @@ -47,7 +47,7 @@ struct boundary_tag { * \return 0 if the lock was acquired successfully. Anything else is * failure. */ -extern int liballoc_lock (void* ctx); +extern int liballoc_lock (void); /** This function unlocks what was previously locked by the liballoc_lock * function. If it disabled interrupts, it enables interrupts. If it @@ -55,7 +55,7 @@ extern int liballoc_lock (void* ctx); * * \return 0 if the lock was successfully released. */ -extern int liballoc_unlock (void* ctx); +extern int liballoc_unlock (void); /** This is the hook into the local system which allocates pages. It * accepts an integer parameter which is the number of pages diff --git a/kernel/mm/pmm.c b/kernel/mm/pmm.c index 136d712..d4561ef 100644 --- a/kernel/mm/pmm.c +++ b/kernel/mm/pmm.c @@ -100,8 +100,6 @@ static size_t pmm_find_free_space (struct pmm_region* pmm_region, size_t nblks) } physaddr_t pmm_alloc (size_t nblks) { - spin_lock_ctx_t ctxpmmr; - for (size_t region = 0; region < PMM_REGIONS_MAX; region++) { struct pmm_region* pmm_region = &pmm.regions[region]; @@ -109,7 +107,7 @@ physaddr_t pmm_alloc (size_t nblks) { if (!(pmm_region->flags & PMM_REGION_ACTIVE)) continue; - spin_lock (&pmm_region->lock, &ctxpmmr); + spin_lock (&pmm_region->lock); /* Find starting bit of the free bit range */ size_t bit = pmm_find_free_space (pmm_region, nblks); @@ -118,19 +116,18 @@ physaddr_t pmm_alloc (size_t nblks) { if (bit != (size_t)-1) { /* Mark it */ bm_set_region (&pmm_region->bm, bit, nblks); - spin_unlock (&pmm_region->lock, &ctxpmmr); + spin_unlock (&pmm_region->lock); return pmm_region->membase + bit * PAGE_SIZE; } - spin_unlock (&pmm_region->lock, &ctxpmmr); + spin_unlock (&pmm_region->lock); } return PMM_ALLOC_ERR; } void pmm_free (physaddr_t p_addr, size_t nblks) { - spin_lock_ctx_t ctxpmmr; /* Round down to nearest page boundary */ physaddr_t aligned_p_addr = align_down (p_addr, PAGE_SIZE); @@ -148,11 +145,11 @@ void pmm_free (physaddr_t p_addr, size_t nblks) { size_t bit = div_align_up (addr, PAGE_SIZE); - spin_lock (&pmm_region->lock, &ctxpmmr); + spin_lock (&pmm_region->lock); bm_clear_region (&pmm_region->bm, bit, nblks); - spin_unlock (&pmm_region->lock, &ctxpmmr); + spin_unlock (&pmm_region->lock); break; } diff --git a/kernel/proc/mutex.c b/kernel/proc/mutex.c index 587e62a..e2abf30 100644 --- a/kernel/proc/mutex.c +++ b/kernel/proc/mutex.c @@ -12,9 +12,7 @@ #include void proc_mutexes_cleanup (struct proc* proc) { - spin_lock_ctx_t ctxpg, ctxrs; - - spin_lock (&proc->procgroup->lock, &ctxpg); + spin_lock (&proc->procgroup->lock); struct rb_node_link* rnode; rbtree_first (&proc->procgroup->resource_tree, rnode); @@ -27,30 +25,29 @@ void proc_mutexes_cleanup (struct proc* proc) { rnode = next; - spin_lock (&resource->lock, &ctxrs); + spin_lock (&resource->lock); if (resource->type != PR_MUTEX) { - spin_unlock (&resource->lock, &ctxrs); + spin_unlock (&resource->lock); continue; } if (resource->u.mutex.owner == proc && resource->u.mutex.locked) { - spin_unlock (&resource->lock, &ctxrs); + spin_unlock (&resource->lock); struct cpu* reschedule_cpu; proc_mutex_unlock (proc, &resource->u.mutex, &reschedule_cpu); } } - spin_unlock (&proc->procgroup->lock, &ctxpg); + spin_unlock (&proc->procgroup->lock); } bool proc_cleanup_resource_mutex (struct proc_resource* resource, struct cpu** reschedule_cpu) { struct proc_mutex* mutex = &resource->u.mutex; - spin_lock_ctx_t ctxmt, ctxsq; - spin_lock (&mutex->resource->lock, &ctxmt); - spin_lock (&mutex->suspension_q.lock, &ctxsq); + spin_lock (&mutex->resource->lock); + spin_lock (&mutex->suspension_q.lock); bool reschedule = PROC_NO_RESCHEDULE; @@ -60,52 +57,47 @@ bool proc_cleanup_resource_mutex (struct proc_resource* resource, struct cpu** r struct proc* suspended_proc = sq_entry->proc; /* we will relock during resume */ - spin_unlock (&mutex->suspension_q.lock, &ctxsq); - spin_unlock (&mutex->resource->lock, &ctxmt); + spin_unlock (&mutex->suspension_q.lock); + spin_unlock (&mutex->resource->lock); reschedule = reschedule || proc_sq_resume (suspended_proc, sq_entry, reschedule_cpu); /* reacquire */ - spin_lock (&mutex->resource->lock, &ctxmt); - spin_lock (&mutex->suspension_q.lock, &ctxsq); + spin_lock (&mutex->resource->lock); + spin_lock (&mutex->suspension_q.lock); } mutex->locked = false; mutex->owner = NULL; - spin_unlock (&mutex->suspension_q.lock, &ctxsq); - spin_unlock (&mutex->resource->lock, &ctxmt); + spin_unlock (&mutex->suspension_q.lock); + spin_unlock (&mutex->resource->lock); return reschedule; } bool proc_mutex_lock (struct proc* proc, struct proc_mutex* mutex, struct cpu** reschedule_cpu) { - spin_lock_ctx_t ctxmt; - - spin_lock (&mutex->resource->lock, &ctxmt); + spin_lock (&mutex->resource->lock); if (!mutex->locked || mutex->owner == proc) { mutex->locked = true; mutex->owner = proc; - spin_unlock (&mutex->resource->lock, &ctxmt); + spin_unlock (&mutex->resource->lock); return PROC_NO_RESCHEDULE; } - return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, &ctxmt, - reschedule_cpu); + return proc_sq_suspend (proc, &mutex->suspension_q, &mutex->resource->lock, reschedule_cpu); } bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex, struct cpu** reschedule_cpu) { - spin_lock_ctx_t ctxmt, ctxsq; - - spin_lock (&mutex->resource->lock, &ctxmt); + spin_lock (&mutex->resource->lock); if (mutex->owner != proc) { - spin_unlock (&mutex->resource->lock, &ctxmt); + spin_unlock (&mutex->resource->lock); return PROC_NO_RESCHEDULE; } - spin_lock (&mutex->suspension_q.lock, &ctxsq); + spin_lock (&mutex->suspension_q.lock); struct list_node_link* node = mutex->suspension_q.proc_list; @@ -116,8 +108,8 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex, struct cpu* mutex->owner = resumed_proc; mutex->locked = true; - spin_unlock (&mutex->suspension_q.lock, &ctxsq); - spin_unlock (&mutex->resource->lock, &ctxmt); + spin_unlock (&mutex->suspension_q.lock); + spin_unlock (&mutex->resource->lock); return proc_sq_resume (resumed_proc, sq_entry, reschedule_cpu); } @@ -125,8 +117,8 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex, struct cpu* mutex->locked = false; mutex->owner = NULL; - spin_unlock (&mutex->suspension_q.lock, &ctxsq); - spin_unlock (&mutex->resource->lock, &ctxmt); + spin_unlock (&mutex->suspension_q.lock); + spin_unlock (&mutex->resource->lock); return PROC_NO_RESCHEDULE; } diff --git a/kernel/proc/proc.c b/kernel/proc/proc.c index c0fe48c..ff09e45 100644 --- a/kernel/proc/proc.c +++ b/kernel/proc/proc.c @@ -105,6 +105,8 @@ struct proc* proc_spawn_rd (char* name) { bool ok = proc_check_elf (rd_file->content); + DEBUG ("Spawning %s, elf header %s\n", name, ok ? "ok" : "bad"); + if (!ok) return NULL; @@ -112,24 +114,21 @@ struct proc* proc_spawn_rd (char* name) { } struct proc* proc_find_pid (int pid) { - spin_lock_ctx_t ctxprtr; struct proc* proc = NULL; - spin_lock (&proc_tree_lock, &ctxprtr); + spin_lock (&proc_tree_lock); rbtree_find (struct proc, &proc_tree, pid, proc, proc_tree_link, pid); - spin_unlock (&proc_tree_lock, &ctxprtr); + spin_unlock (&proc_tree_lock); return proc; } bool proc_register (struct proc* proc, struct cpu** reschedule_cpu) { - spin_lock_ctx_t ctxcpu, ctxprtr, ctxpr; - struct cpu* cpu = *reschedule_cpu != NULL ? *reschedule_cpu : cpu_find_lightest (); - spin_lock (&proc_tree_lock, &ctxprtr); - spin_lock (&cpu->lock, &ctxcpu); - spin_lock (&proc->lock, &ctxpr); + spin_lock (&proc_tree_lock); + spin_lock (&cpu->lock); + spin_lock (&proc->lock); proc->cpu = cpu; @@ -140,9 +139,9 @@ bool proc_register (struct proc* proc, struct cpu** reschedule_cpu) { if (cpu->proc_current == NULL) cpu->proc_current = proc; - spin_unlock (&proc->lock, &ctxpr); - spin_unlock (&cpu->lock, &ctxcpu); - spin_unlock (&proc_tree_lock, &ctxprtr); + spin_unlock (&proc->lock); + spin_unlock (&cpu->lock); + spin_unlock (&proc_tree_lock); *reschedule_cpu = cpu; @@ -181,10 +180,8 @@ static struct proc* proc_find_sched (struct cpu* cpu) { static void proc_reap (void) { struct proc* proc = NULL; struct list_node_link* reap_list = NULL; - spin_lock_ctx_t ctxprtr; - spin_lock_ctx_t ctxpr; - spin_lock (&proc_tree_lock, &ctxprtr); + spin_lock (&proc_tree_lock); struct rb_node_link* node; rbtree_first (&proc_tree, node); @@ -195,16 +192,16 @@ static void proc_reap (void) { proc = rbtree_entry (node, struct proc, proc_tree_link); if (atomic_load (&proc->state) == PROC_DEAD) { - spin_lock (&proc->lock, &ctxpr); + spin_lock (&proc->lock); rbtree_delete (&proc_tree, &proc->proc_tree_link); list_append (reap_list, &proc->reap_link); - spin_unlock (&proc->lock, &ctxpr); + spin_unlock (&proc->lock); } node = next; } - spin_unlock (&proc_tree_lock, &ctxprtr); + spin_unlock (&proc_tree_lock); struct list_node_link *reap_link, *reap_link_tmp; list_foreach (reap_list, reap_link, reap_link_tmp) { @@ -217,8 +214,6 @@ static void proc_reap (void) { } void proc_sched (void) { - spin_lock_ctx_t ctxcpu; - int s_cycles = atomic_fetch_add (&sched_cycles, 1); if (s_cycles % SCHED_REAP_FREQ == 0) @@ -227,31 +222,29 @@ void proc_sched (void) { struct proc* next = NULL; struct cpu* cpu = thiscpu; - spin_lock (&cpu->lock, &ctxcpu); + spin_lock (&cpu->lock); next = proc_find_sched (cpu); if (next) { cpu->proc_current = next; - do_sched (next, &cpu->lock, &ctxcpu); + do_sched (next, &cpu->lock); } else { cpu->proc_current = NULL; - spin_unlock (&cpu->lock, &ctxcpu); + spin_unlock (&cpu->lock); spin (); } } bool proc_kill (struct proc* proc, struct cpu** reschedule_cpu) { - spin_lock_ctx_t ctxpr, ctxcpu; - - spin_lock (&proc->lock, &ctxpr); + spin_lock (&proc->lock); struct cpu* cpu = proc->cpu; - spin_unlock (&proc->lock, &ctxpr); + spin_unlock (&proc->lock); - spin_lock (&cpu->lock, &ctxcpu); - spin_lock (&proc->lock, &ctxpr); + spin_lock (&cpu->lock); + spin_lock (&proc->lock); atomic_store (&proc->state, PROC_DEAD); proc->cpu = NULL; @@ -261,8 +254,8 @@ bool proc_kill (struct proc* proc, struct cpu** reschedule_cpu) { if (cpu->proc_current == proc) cpu->proc_current = NULL; - spin_unlock (&proc->lock, &ctxpr); - spin_unlock (&cpu->lock, &ctxcpu); + spin_unlock (&proc->lock); + spin_unlock (&cpu->lock); DEBUG ("killed PID %d\n", proc->pid); @@ -290,7 +283,6 @@ void proc_init (void) { struct cpu* init_cpu = thiscpu; proc_register (init, &init_cpu); - spin_lock_ctx_t ctxcpu; - spin_lock (&spin_proc->cpu->lock, &ctxcpu); - do_sched (spin_proc, &spin_proc->cpu->lock, &ctxcpu); + spin_lock (&spin_proc->cpu->lock); + do_sched (spin_proc, &spin_proc->cpu->lock); } diff --git a/kernel/proc/procgroup.c b/kernel/proc/procgroup.c index 3fc5fab..b14445d 100644 --- a/kernel/proc/procgroup.c +++ b/kernel/proc/procgroup.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -14,16 +15,14 @@ static atomic_int pgids = 0; uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags, uintptr_t* out_paddr) { - spin_lock_ctx_t ctxpg; - - spin_lock (&procgroup->lock, &ctxpg); + spin_lock (&procgroup->lock); vaddr = (vaddr == 0) ? procgroup->map_base : vaddr; struct proc_mapping* mapping = malloc (sizeof (*mapping)); if (mapping == NULL) { - spin_unlock (&procgroup->lock, &ctxpg); + spin_unlock (&procgroup->lock); return 0; } @@ -31,7 +30,7 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa if (paddr == PMM_ALLOC_ERR) { free (mapping); - spin_unlock (&procgroup->lock, &ctxpg); + spin_unlock (&procgroup->lock); return 0; } @@ -51,7 +50,7 @@ uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pa mm_map_page (&procgroup->pd, ppage, vpage, flags); } - spin_unlock (&procgroup->lock, &ctxpg); + spin_unlock (&procgroup->lock); return vaddr; } @@ -63,13 +62,12 @@ bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t struct list_node_link *mapping_link, *mapping_link_tmp; bool used_tail_mapping = false; - spin_lock_ctx_t ctxpg; struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping)); if (tail_mapping == NULL) return false; - spin_lock (&procgroup->lock, &ctxpg); + spin_lock (&procgroup->lock); list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) { struct proc_mapping* mapping = @@ -122,19 +120,19 @@ bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t mm_unmap_page (&procgroup->pd, vpage); } - spin_unlock (&procgroup->lock, &ctxpg); + spin_unlock (&procgroup->lock); return true; } struct procgroup* procgroup_create (void) { - spin_lock_ctx_t ctxpgtr; - struct procgroup* procgroup = malloc (sizeof (*procgroup)); if (procgroup == NULL) { return NULL; } + memset (procgroup, 0, sizeof (*procgroup)); + procgroup->refs = 0; procgroup->memb_proc_tree = NULL; procgroup->lock = SPIN_LOCK_INIT; @@ -142,48 +140,44 @@ struct procgroup* procgroup_create (void) { procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys (); procgroup->map_base = PROC_MAP_BASE; - spin_lock (&procgroup_tree_lock, &ctxpgtr); + spin_lock (&procgroup_tree_lock); rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link, procgroup_tree_link, pgid); - spin_unlock (&procgroup_tree_lock, &ctxpgtr); + spin_unlock (&procgroup_tree_lock); return procgroup; } void procgroup_attach (struct procgroup* procgroup, struct proc* proc) { - spin_lock_ctx_t ctxpg, ctxpr; - - spin_lock (&procgroup->lock, &ctxpg); - spin_lock (&proc->lock, &ctxpr); + spin_lock (&procgroup->lock); + spin_lock (&proc->lock); rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link, procgroup_memb_tree_link, pid); atomic_fetch_add (&procgroup->refs, 1); - spin_unlock (&proc->lock, &ctxpr); - spin_unlock (&procgroup->lock, &ctxpg); + spin_unlock (&proc->lock); + spin_unlock (&procgroup->lock); } void procgroup_detach (struct procgroup* procgroup, struct proc* proc) { - spin_lock_ctx_t ctxpg, ctxpr, ctxpgtr; - - spin_lock (&procgroup->lock, &ctxpg); - spin_lock (&proc->lock, &ctxpr); + spin_lock (&procgroup->lock); + spin_lock (&proc->lock); rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link); int refs = atomic_fetch_sub (&procgroup->refs, 1); - spin_unlock (&proc->lock, &ctxpr); - spin_unlock (&procgroup->lock, &ctxpg); + spin_unlock (&proc->lock); + spin_unlock (&procgroup->lock); if (refs == 1) { - spin_lock (&procgroup_tree_lock, &ctxpgtr); - spin_lock (&procgroup->lock, &ctxpg); + spin_lock (&procgroup_tree_lock); + spin_lock (&procgroup->lock); rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link); - spin_unlock (&procgroup->lock, &ctxpg); - spin_unlock (&procgroup_tree_lock, &ctxpgtr); + spin_unlock (&procgroup->lock); + spin_unlock (&procgroup_tree_lock); /* delete resources */ struct rb_node_link* rnode; diff --git a/kernel/proc/resource.c b/kernel/proc/resource.c index e95cc88..760e0b1 100644 --- a/kernel/proc/resource.c +++ b/kernel/proc/resource.c @@ -13,19 +13,17 @@ #include struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid) { - spin_lock_ctx_t ctxpg; struct proc_resource* resource = NULL; - spin_lock (&procgroup->lock, &ctxpg); + spin_lock (&procgroup->lock); rbtree_find (struct proc_resource, &procgroup->resource_tree, rid, resource, resource_tree_link, rid); - spin_unlock (&procgroup->lock, &ctxpg); + spin_unlock (&procgroup->lock); return resource; } struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid) { - spin_lock_ctx_t ctxpg; struct proc_resource* resource; resource = proc_find_resource (procgroup, rid); @@ -43,10 +41,10 @@ struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, i resource->rid = rid; resource->type = PR_MUTEX; - spin_lock (&procgroup->lock, &ctxpg); + spin_lock (&procgroup->lock); rbtree_insert (struct proc_resource, &procgroup->resource_tree, &resource->resource_tree_link, resource_tree_link, rid); - spin_unlock (&procgroup->lock, &ctxpg); + spin_unlock (&procgroup->lock); return resource; } diff --git a/kernel/proc/suspension_q.c b/kernel/proc/suspension_q.c index 627511b..c209e5c 100644 --- a/kernel/proc/suspension_q.c +++ b/kernel/proc/suspension_q.c @@ -9,24 +9,23 @@ #include bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock, - spin_lock_ctx_t* ctxrl, struct cpu** reschedule_cpu) { - spin_lock_ctx_t ctxpr, ctxcpu, ctxsq; + struct cpu** reschedule_cpu) { struct cpu* cpu = proc->cpu; struct proc_sq_entry* sq_entry = malloc (sizeof (*sq_entry)); if (!sq_entry) { - spin_unlock (resource_lock, ctxrl); + spin_unlock (resource_lock); return PROC_NO_RESCHEDULE; } sq_entry->proc = proc; sq_entry->sq = sq; - spin_lock (&cpu->lock, &ctxcpu); - spin_lock (&proc->lock, &ctxpr); - spin_lock (&sq->lock, &ctxsq); + spin_lock (&cpu->lock); + spin_lock (&proc->lock); + spin_lock (&sq->lock); - spin_unlock (resource_lock, ctxrl); + spin_unlock (resource_lock); atomic_store (&proc->state, PROC_SUSPENDED); @@ -44,9 +43,9 @@ bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock proc->cpu = NULL; - spin_unlock (&sq->lock, &ctxsq); - spin_unlock (&proc->lock, &ctxpr); - spin_unlock (&cpu->lock, &ctxcpu); + spin_unlock (&sq->lock); + spin_unlock (&proc->lock); + spin_unlock (&cpu->lock); *reschedule_cpu = cpu; @@ -55,13 +54,12 @@ bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry, struct cpu** reschedule_cpu) { - spin_lock_ctx_t ctxsq, ctxpr, ctxcpu; struct cpu* cpu = cpu_find_lightest (); struct proc_suspension_q* sq = sq_entry->sq; - spin_lock (&cpu->lock, &ctxcpu); - spin_lock (&proc->lock, &ctxpr); - spin_lock (&sq->lock, &ctxsq); + spin_lock (&cpu->lock); + spin_lock (&proc->lock); + spin_lock (&sq->lock); /* remove from sq's list */ list_remove (sq->proc_list, &sq_entry->sq_link); @@ -77,9 +75,9 @@ bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry, list_append (cpu->proc_run_q, &proc->cpu_run_q_link); atomic_fetch_add (&cpu->proc_run_q_count, 1); - spin_unlock (&sq->lock, &ctxsq); - spin_unlock (&proc->lock, &ctxpr); - spin_unlock (&cpu->lock, &ctxcpu); + spin_unlock (&sq->lock); + spin_unlock (&proc->lock); + spin_unlock (&cpu->lock); free (sq_entry); @@ -89,9 +87,7 @@ bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry, } void proc_sqs_cleanup (struct proc* proc) { - spin_lock_ctx_t ctxsq, ctxpr; - - spin_lock (&proc->lock, &ctxpr); + spin_lock (&proc->lock); /* clean suspension queue entries */ struct list_node_link *sq_link, *sq_link_tmp; @@ -99,7 +95,7 @@ void proc_sqs_cleanup (struct proc* proc) { struct proc_sq_entry* sq_entry = list_entry (sq_link, struct proc_sq_entry, proc_link); struct proc_suspension_q* sq = sq_entry->sq; - spin_lock (&sq->lock, &ctxsq); + spin_lock (&sq->lock); /* remove from sq's list */ list_remove (sq->proc_list, &sq_entry->sq_link); @@ -107,10 +103,10 @@ void proc_sqs_cleanup (struct proc* proc) { /* remove from proc's list */ list_remove (proc->sq_entries, &sq_entry->proc_link); - spin_unlock (&sq->lock, &ctxsq); + spin_unlock (&sq->lock); free (sq_entry); } - spin_unlock (&proc->lock, &ctxpr); + spin_unlock (&proc->lock); } diff --git a/kernel/proc/suspension_q.h b/kernel/proc/suspension_q.h index ae5972a..321b054 100644 --- a/kernel/proc/suspension_q.h +++ b/kernel/proc/suspension_q.h @@ -21,7 +21,7 @@ struct proc_sq_entry { void proc_sqs_cleanup (struct proc* proc); bool proc_sq_suspend (struct proc* proc, struct proc_suspension_q* sq, spin_lock_t* resource_lock, - spin_lock_ctx_t* ctxrl, struct cpu** reschedule_cpu); + struct cpu** reschedule_cpu); bool proc_sq_resume (struct proc* proc, struct proc_sq_entry* sq_entry, struct cpu** reschedule_cpu); diff --git a/kernel/sync/spin_lock.c b/kernel/sync/spin_lock.c index 6d4d1f0..9889096 100644 --- a/kernel/sync/spin_lock.c +++ b/kernel/sync/spin_lock.c @@ -1,17 +1,10 @@ #include #include -#include #include -void spin_lock (spin_lock_t* sl, spin_lock_ctx_t* ctx) { - irq_save (ctx); - +void spin_lock (spin_lock_t* sl) { while (atomic_flag_test_and_set_explicit (sl, memory_order_acquire)) spin_lock_relax (); } -void spin_unlock (spin_lock_t* sl, spin_lock_ctx_t* ctx) { - atomic_flag_clear_explicit (sl, memory_order_release); - - irq_restore (ctx); -} +void spin_unlock (spin_lock_t* sl) { atomic_flag_clear_explicit (sl, memory_order_release); } diff --git a/kernel/sync/spin_lock.h b/kernel/sync/spin_lock.h index 167a3ba..bafadd5 100644 --- a/kernel/sync/spin_lock.h +++ b/kernel/sync/spin_lock.h @@ -8,7 +8,7 @@ typedef atomic_flag spin_lock_t; -void spin_lock (spin_lock_t* sl, spin_lock_ctx_t* ctx); -void spin_unlock (spin_lock_t* sl, spin_lock_ctx_t* ctx); +void spin_lock (spin_lock_t* sl); +void spin_unlock (spin_lock_t* sl); #endif // _KERNEL_SYNC_SPIN_LOCK_H diff --git a/kernel/sys/irq.h b/kernel/sys/irq.h deleted file mode 100644 index 7116322..0000000 --- a/kernel/sys/irq.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _KERNEL_SYS_IRQ_H -#define _KERNEL_SYS_IRQ_H - -#include - -void irq_save (spin_lock_ctx_t* ctx); -void irq_restore (spin_lock_ctx_t* ctx); - -#endif // _KERNEL_SYS_IRQ_H diff --git a/kernel/sys/mm.h b/kernel/sys/mm.h index 1f88f2e..70cfdd9 100644 --- a/kernel/sys/mm.h +++ b/kernel/sys/mm.h @@ -13,8 +13,8 @@ #define MM_PG_USER (1 << 2) uintptr_t mm_alloc_user_pd_phys (void); -void mm_kernel_lock (spin_lock_ctx_t* ctx); -void mm_kernel_unlock (spin_lock_ctx_t* ctx); +void mm_kernel_lock (void); +void mm_kernel_unlock (void); void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags); void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags); void mm_unmap_page (struct pd* pd, uintptr_t vaddr); diff --git a/kernel/sys/sched.h b/kernel/sys/sched.h index 7207ad9..09bb475 100644 --- a/kernel/sys/sched.h +++ b/kernel/sys/sched.h @@ -4,6 +4,6 @@ #include #include -void do_sched (struct proc* proc, spin_lock_t* cpu_lock, spin_lock_ctx_t* ctxcpu); +void do_sched (struct proc* proc, spin_lock_t* cpu_lock); #endif // _KERNEL_SYS_SCHED_H diff --git a/kernel/sys/spin_lock.h b/kernel/sys/spin_lock.h index c5113b8..902d57a 100644 --- a/kernel/sys/spin_lock.h +++ b/kernel/sys/spin_lock.h @@ -3,10 +3,6 @@ #include -#if defined(__x86_64__) -typedef uint64_t spin_lock_ctx_t; -#endif - void spin_lock_relax (void); #endif // _KERNEL_SYS_SPIN_LOCK_H diff --git a/kernel/syscall/syscall.c b/kernel/syscall/syscall.c index f85e3f2..9dc8471 100644 --- a/kernel/syscall/syscall.c +++ b/kernel/syscall/syscall.c @@ -25,18 +25,17 @@ static void* sys_get_user_buffer (struct proc* proc, uintptr_t uvaddr, size_t size) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; - spin_lock_ctx_t ctxpg; - spin_lock (&proc->procgroup->lock, &ctxpg); + spin_lock (&proc->procgroup->lock); if (!mm_validate_buffer (&proc->procgroup->pd, (uintptr_t)uvaddr, size)) { - spin_unlock (&proc->procgroup->lock, &ctxpg); + spin_unlock (&proc->procgroup->lock); return NULL; } uintptr_t out_paddr = mm_v2p (&proc->procgroup->pd, uvaddr); - spin_unlock (&proc->procgroup->lock, &ctxpg); + spin_unlock (&proc->procgroup->lock); uintptr_t out_kvaddr = (uintptr_t)hhdm->offset + out_paddr;