Files
mop3/kernel/amd64/intr.c
kamkow1 e077d322f4
All checks were successful
Build documentation / build-and-deploy (push) Successful in 35s
Rewrite init app in C, introduce MSL (MOP3 System Library)
2026-01-04 01:11:31 +01:00

241 lines
7.0 KiB
C

#include <amd64/apic.h>
#include <amd64/gdt.h>
#include <amd64/intr.h>
#include <amd64/intr_defs.h>
#include <amd64/io.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <libk/string.h>
#include <sys/debug.h>
#include <sys/irq.h>
#include <sys/smp.h>
#include <sys/spin.h>
/* 8259 PIC defs. */
#define PIC1 0x20
#define PIC2 0xA0
#define PIC1_CMD PIC1
#define PIC1_DATA (PIC1 + 1)
#define PIC2_CMD PIC2
#define PIC2_DATA (PIC2 + 1)
#define PIC_EOI 0x20
#define ICW1_ICW4 0x01
#define ICW1_SINGLE 0x02
#define ICW1_INTVL4 0x04
#define ICW1_LEVEL 0x08
#define ICW1_INIT 0x10
#define ICW4_8086 0x01
#define ICW4_AUTO 0x02
#define ICW4_BUFSLAVE 0x08
#define ICW4_BUFMASER 0x0C
#define ICW4_SFNM 0x10
#define CASCADE_IRQ 2
/* IDT defs. */
#define IDT_ENTRIES_MAX 256
/// 64-bit <IDT entry structure: https://wiki.osdev.org/Interrupt_Descriptor_Table
struct idt_entry {
uint16_t intrlow;
uint16_t kernel_cs;
uint8_t ist;
uint8_t attrs;
uint16_t intrmid;
uint32_t intrhigh;
uint32_t resv;
} PACKED;
struct idt {
uint16_t limit;
uint64_t base;
} PACKED;
/** @cond DOXYGEN_IGNORE */
ALIGNED (16) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX];
/** @endcond */
static volatile struct idt idt;
/// Remaps and disables old 8259 PIC, since we'll be using APIC.
static void amd64_init_pic (void) {
/** @cond DOXYGEN_IGNORE */
#define IO_OP(fn, ...) \
fn (__VA_ARGS__); \
amd64_io_wait ()
/** @endcond */
IO_OP (amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4));
IO_OP (amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4));
IO_OP (amd64_io_outb, PIC1_DATA, 0x20);
IO_OP (amd64_io_outb, PIC2_DATA, 0x28);
IO_OP (amd64_io_outb, PIC1_DATA, (1 << CASCADE_IRQ));
IO_OP (amd64_io_outb, PIC2_DATA, 2);
IO_OP (amd64_io_outb, PIC1_DATA, ICW4_8086);
IO_OP (amd64_io_outb, PIC2_DATA, ICW4_8086);
/* Disable */
IO_OP (amd64_io_outb, PIC1_DATA, 0xFF);
IO_OP (amd64_io_outb, PIC2_DATA, 0xFF);
#undef IO_OP
}
/// Set IDT entry
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags,
uint8_t ist) {
ent->intrlow = (handler & 0xFFFF);
ent->kernel_cs = GDT_KCODE;
ent->ist = ist;
ent->attrs = flags;
ent->intrmid = ((handler >> 16) & 0xFFFF);
ent->intrhigh = ((handler >> 32) & 0xFFFFFFFF);
ent->resv = 0;
}
/// Load the IDT
void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); }
/// Initialize IDT entries
static void amd64_idt_init (void) {
memset ((void*)idt_entries, 0, sizeof (idt_entries));
/** @cond DOXYGEN_IGNORE */
#define IDT_ENTRY(n, ist) \
extern void amd64_intr##n (void); \
amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist))
/** @endcond */
/* clang-format off */
IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0);
IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0);
IDT_ENTRY (8, 0); IDT_ENTRY (9, 0); IDT_ENTRY (10, 0); IDT_ENTRY (11, 0);
IDT_ENTRY (12, 0); IDT_ENTRY (13, 0); IDT_ENTRY (14, 0); IDT_ENTRY (15, 0);
IDT_ENTRY (16, 0); IDT_ENTRY (17, 0); IDT_ENTRY (18, 0); IDT_ENTRY (19, 0);
IDT_ENTRY (20, 0); IDT_ENTRY (21, 0); IDT_ENTRY (22, 0); IDT_ENTRY (23, 0);
IDT_ENTRY (24, 0); IDT_ENTRY (25, 0); IDT_ENTRY (26, 0); IDT_ENTRY (27, 0);
IDT_ENTRY (28, 0); IDT_ENTRY (29, 0); IDT_ENTRY (30, 0); IDT_ENTRY (31, 0);
IDT_ENTRY (32, 1); IDT_ENTRY (33, 1); IDT_ENTRY (34, 1); IDT_ENTRY (35, 1);
IDT_ENTRY (36, 1); IDT_ENTRY (37, 1); IDT_ENTRY (38, 1); IDT_ENTRY (39, 1);
IDT_ENTRY (40, 1); IDT_ENTRY (41, 1); IDT_ENTRY (42, 1); IDT_ENTRY (43, 1);
IDT_ENTRY (44, 1); IDT_ENTRY (45, 1); IDT_ENTRY (46, 1); IDT_ENTRY (47, 1);
IDT_ENTRY (SCHED_PREEMPT_TIMER, 1);
IDT_ENTRY (TLB_SHOOTDOWN, 1);
/* clang-format on */
#undef IDT_ENTRY
idt.limit = sizeof (idt_entries) - 1;
idt.base = (uint64_t)idt_entries;
amd64_load_idt ();
}
/**
* @brief Handle CPU exception and dump registers. If incoming CS has CPL3, kill the
* process.
*
* @param regs
* saved registers
*/
static void amd64_intr_exception (struct saved_regs* regs) {
DEBUG ("cpu exception %lu (%lu)\n", regs->trap, regs->error);
uint64_t cr2;
__asm__ volatile ("movq %%cr2, %0" : "=r"(cr2));
uint64_t cr3;
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3));
debugprintf ("r15=%016lx r14=%016lx r13=%016lx\n"
"r12=%016lx r11=%016lx r10=%016lx\n"
"r9 =%016lx r8 =%016lx rbp=%016lx\n"
"rdi=%016lx rsi=%016lx rdx=%016lx\n"
"rcx=%016lx rax=%016lx trp=%016lx\n"
"err=%016lx rip=%016lx cs =%016lx\n"
"rfl=%016lx rsp=%016lx ss =%016lx\n"
"cr2=%016lx cr3=%016lx rbx=%016lx\n",
regs->r15, regs->r14, regs->r13, regs->r12, regs->r11, regs->r10, regs->r9, regs->r8,
regs->rbp, regs->rdi, regs->rsi, regs->rdx, regs->rcx, regs->rax, regs->trap,
regs->error, regs->rip, regs->cs, regs->rflags, regs->rsp, regs->ss, cr2, cr3,
regs->rbx);
if (regs->cs == (GDT_UCODE | 0x03)) {
proc_kill (thiscpu->proc_current);
} else {
spin ();
}
}
/// Handle incoming interrupt, dispatch IRQ handlers.
void amd64_intr_handler (void* stack_ptr) {
struct saved_regs* regs = stack_ptr;
if (regs->trap <= 31) {
amd64_intr_exception (regs);
} else {
amd64_lapic_eoi ();
struct irq* irq = irq_find (regs->trap);
if (irq != NULL) {
if ((irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("sti");
irq->func (irq->arg, stack_ptr);
if ((irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("cli");
}
}
}
/// Initialize interrupts
void amd64_intr_init (void) {
amd64_init_pic ();
amd64_idt_init ();
}
/* Aux. */
/// Save RFLAGS of the current CPU
static uint64_t amd64_irq_save_flags (void) {
uint64_t rflags;
__asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc");
return rflags;
}
/// Restore interrupts (IF bit) from RFLAGS
static void amd64_irq_restore_flags (uint64_t rflags) {
if (rflags & (1ULL << 9))
__asm__ volatile ("sti");
}
/// Save current interrupt state
void irq_save (void) {
int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 0)
thiscpu->irq_ctx.rflags = amd64_irq_save_flags ();
}
/// Restore interrupt state
void irq_restore (void) {
int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 1)
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);
}
/// Map custom IRQ mappings to legacy IRQs
uint8_t amd64_resolve_irq (uint8_t irq) {
static const uint8_t mappings[] = {
[SCHED_PREEMPT_TIMER] = 0,
[TLB_SHOOTDOWN] = 1,
};
return mappings[irq];
}