Resolve strange IRQ issues which cause the scheduler to behave weirdly (IRQ mapping)
All checks were successful
Build documentation / build-and-deploy (push) Successful in 52s
All checks were successful
Build documentation / build-and-deploy (push) Successful in 52s
This commit is contained in:
10
init/init.c
10
init/init.c
@@ -14,9 +14,14 @@ int mutex_rid;
|
||||
void mythread (void) {
|
||||
for (int j = 0; j < 10; j++) {
|
||||
proc_mutex_lock (mutex_rid, RV_PRIVATE);
|
||||
|
||||
for (size_t i = 0; i < 3; i++)
|
||||
proc_test ('b');
|
||||
|
||||
proc_mutex_unlock (mutex_rid, RV_PRIVATE);
|
||||
|
||||
for (volatile int i = 0; i < 200 * 1000 * 1000; i++)
|
||||
;
|
||||
}
|
||||
|
||||
proc_quit ();
|
||||
@@ -39,9 +44,14 @@ void app_main (void) {
|
||||
|
||||
for (int j = 0; j < 10; j++) {
|
||||
proc_mutex_lock (mutex_rid, RV_PRIVATE);
|
||||
|
||||
for (size_t i = 0; i < 3; i++)
|
||||
proc_test ('a');
|
||||
|
||||
proc_mutex_unlock (mutex_rid, RV_PRIVATE);
|
||||
|
||||
for (volatile int i = 0; i < 400 * 1000 * 1000; i++)
|
||||
;
|
||||
}
|
||||
|
||||
for (;;)
|
||||
|
||||
@@ -77,7 +77,7 @@ static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t va
|
||||
}
|
||||
|
||||
/* Find an IOAPIC corresposting to provided IRQ */
|
||||
static struct ioapic* amd64_ioapic_find (uint8_t irq) {
|
||||
static struct ioapic* amd64_ioapic_find (uint32_t irq) {
|
||||
struct ioapic* ioapic = NULL;
|
||||
|
||||
for (size_t i = 0; i < ioapic_entries; i++) {
|
||||
@@ -103,7 +103,7 @@ static struct ioapic* amd64_ioapic_find (uint8_t irq) {
|
||||
* flags - IOAPIC redirection flags.
|
||||
* lapic_id - Local APIC that will receive the interrupt.
|
||||
*/
|
||||
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id) {
|
||||
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id) {
|
||||
struct ioapic* ioapic = NULL;
|
||||
struct acpi_madt_interrupt_source_override* override;
|
||||
bool found_override = false;
|
||||
@@ -119,13 +119,13 @@ void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t
|
||||
uint64_t calc_flags = (lapic_id << 56) | (flags) | (vec & 0xFF);
|
||||
|
||||
if (found_override) {
|
||||
uint8_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
|
||||
uint8_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
|
||||
uint32_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
|
||||
uint32_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
|
||||
calc_flags |= (uint64_t)mode << 15;
|
||||
calc_flags |= (uint64_t)polarity << 13;
|
||||
}
|
||||
|
||||
uint8_t gsi = found_override ? override->gsi : irq;
|
||||
uint32_t gsi = found_override ? override->gsi : irq;
|
||||
|
||||
ioapic = amd64_ioapic_find (gsi);
|
||||
|
||||
@@ -263,7 +263,7 @@ void amd64_lapic_init (uint32_t us) {
|
||||
* lapic_id - Target Local APIC
|
||||
* vec - Interrupt vector/IDT stub, which will be invoked by the IPI.
|
||||
*/
|
||||
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec) {
|
||||
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) {
|
||||
/* wait for previous IPI to finish */
|
||||
while (amd64_lapic_read (LAPIC_ICR) & (1 << 12)) {
|
||||
__asm__ volatile ("pause");
|
||||
|
||||
@@ -3,12 +3,12 @@
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id);
|
||||
void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id);
|
||||
void amd64_ioapic_init (void);
|
||||
|
||||
uint32_t amd64_lapic_id (void);
|
||||
void amd64_lapic_eoi (void);
|
||||
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec);
|
||||
void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec);
|
||||
void amd64_lapic_init (uint32_t us);
|
||||
|
||||
#endif // _KERNEL_AMD64_APIC_H
|
||||
|
||||
@@ -216,12 +216,12 @@ void irq_save (spin_lock_ctx_t* ctx) { *ctx = amd64_irq_save_flags (); }
|
||||
void irq_restore (spin_lock_ctx_t* ctx) { amd64_irq_restore_flags (*ctx); }
|
||||
|
||||
/* Map custom IRQ mappings to legacy IRQs */
|
||||
uint8_t amd64_resolve_irq (uint8_t irq) {
|
||||
static const uint8_t mappings[] = {
|
||||
uint32_t amd64_resolve_irq (uint32_t irq) {
|
||||
static const uint32_t mappings[] = {
|
||||
[SCHED_PREEMPT_TIMER] = 0,
|
||||
[TLB_SHOOTDOWN] = 1,
|
||||
[CPU_REQUEST_SCHED] = 2,
|
||||
[CPU_SPURIOUS] = 3,
|
||||
[TLB_SHOOTDOWN] = 6,
|
||||
[CPU_REQUEST_SCHED] = 3,
|
||||
[CPU_SPURIOUS] = 5,
|
||||
};
|
||||
|
||||
return mappings[irq];
|
||||
|
||||
@@ -32,7 +32,7 @@ struct saved_regs {
|
||||
} PACKED;
|
||||
|
||||
void amd64_load_idt (void);
|
||||
uint8_t amd64_resolve_irq (uint8_t irq);
|
||||
uint32_t amd64_resolve_irq (uint32_t irq);
|
||||
void amd64_intr_init (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_INTR_H
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#include <libk/std.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include <amd64/apic.h>
|
||||
|
||||
@@ -51,26 +51,27 @@ bool proc_mutex_unlock (struct proc* proc, struct proc_mutex* mutex) {
|
||||
|
||||
spin_lock (&mutex->suspension_q.lock, &ctxsq);
|
||||
|
||||
struct proc* resumed_proc;
|
||||
struct proc* resumed_proc = NULL;
|
||||
struct rb_node_link* node;
|
||||
rbtree_first (&mutex->suspension_q.proc_tree, node);
|
||||
|
||||
if (node) {
|
||||
resumed_proc = rbtree_entry (node, struct proc, suspension_link);
|
||||
mutex->owner = resumed_proc;
|
||||
mutex->locked = true;
|
||||
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
proc_resume (resumed_proc);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
|
||||
mutex->locked = false;
|
||||
mutex->owner = NULL;
|
||||
|
||||
spin_unlock (&mutex->suspension_q.lock, &ctxsq);
|
||||
spin_unlock (&mutex->resource->lock, &ctxmt);
|
||||
|
||||
return true;
|
||||
|
||||
@@ -355,9 +355,6 @@ void proc_kill (struct proc* proc, void* regs) {
|
||||
|
||||
DEBUG ("killed PID %d\n", proc->pid);
|
||||
|
||||
if (cpu == thiscpu)
|
||||
proc_sched (regs);
|
||||
else
|
||||
cpu_request_sched (cpu);
|
||||
}
|
||||
|
||||
@@ -408,13 +405,7 @@ void proc_resume (struct proc* proc) {
|
||||
|
||||
static void proc_irq_sched (void* arg, void* regs) {
|
||||
(void)arg;
|
||||
|
||||
#if defined(__x86_64__)
|
||||
struct saved_regs* s_regs = regs;
|
||||
/* Only schedule, when we came from usermode */
|
||||
if ((s_regs->cs & 0x03))
|
||||
proc_sched (regs);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void proc_kpproc_init (void) {
|
||||
@@ -470,15 +461,15 @@ static void proc_kpproc_init (void) {
|
||||
}
|
||||
|
||||
void proc_init (void) {
|
||||
#if defined(__x86_64__)
|
||||
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_SAFE);
|
||||
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED, IRQ_INTERRUPT_SAFE);
|
||||
#endif
|
||||
|
||||
proc_kpproc_init ();
|
||||
|
||||
struct proc* init = proc_spawn_rd ("init.exe");
|
||||
proc_register (init, thiscpu);
|
||||
|
||||
#if defined(__x86_64__)
|
||||
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, IRQ_INTERRUPT_UNSAFE);
|
||||
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED, IRQ_INTERRUPT_UNSAFE);
|
||||
#endif
|
||||
|
||||
do_sched (init);
|
||||
}
|
||||
|
||||
@@ -188,8 +188,6 @@ DEFINE_SYSCALL (sys_proc_mutex_unlock) {
|
||||
if (result < 0)
|
||||
return result;
|
||||
|
||||
proc_sched (regs);
|
||||
|
||||
return ST_OK;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user