#include #include #include #include #include /* 8259 PIC defs. */ #define PIC1 0x20 #define PIC2 0xA0 #define PIC1_CMD PIC1 #define PIC1_DATA (PIC1 + 1) #define PIC2_CMD PIC2 #define PIC2_DATA (PIC2 + 1) #define PIC_EOI 0x20 #define ICW1_ICW4 0x01 #define ICW1_SINGLE 0x02 #define ICW1_INTVL4 0x04 #define ICW1_LEVEL 0x08 #define ICW1_INIT 0x10 #define ICW4_8086 0x01 #define ICW4_AUTO 0x02 #define ICW4_BUFSLAVE 0x08 #define ICW4_BUFMASER 0x0C #define ICW4_SFNM 0x10 #define CASCADE_IRQ 2 /* IDT defs. */ #define IDT_ENTRIES_MAX 256 struct idt_entry { uint16_t intrlow; uint16_t kernel_cs; uint8_t ist; uint8_t attrs; uint16_t intrmid; uint32_t intrhigh; uint32_t resv; } __attribute__((packed)); struct idt { uint16_t limit; uint64_t base; } __attribute__((packed)); __attribute__((aligned(16))) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX]; static volatile struct idt idt; extern void amd64_spin(void); /* Remaps and disables old 8259 PIC, since we'll be using APIC. */ static void amd64_init_pic(void) { #define IO_OP(fn, ...) fn(__VA_ARGS__); amd64_io_wait() IO_OP(amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4)); IO_OP(amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4)); IO_OP(amd64_io_outb, PIC1_DATA, 0x20); IO_OP(amd64_io_outb, PIC2_DATA, 0x28); IO_OP(amd64_io_outb, PIC1_DATA, (1 << CASCADE_IRQ)); IO_OP(amd64_io_outb, PIC2_DATA, 2); IO_OP(amd64_io_outb, PIC1_DATA, ICW4_8086); IO_OP(amd64_io_outb, PIC2_DATA, ICW4_8086); /* Disable */ IO_OP(amd64_io_outb, PIC1_DATA, 0xFF); IO_OP(amd64_io_outb, PIC2_DATA, 0xFF); #undef IO_OP } static void amd64_idt_set(volatile struct idt_entry *ent, uint64_t handler, uint8_t flags) { ent->intrlow = (handler & 0xFFFF); ent->kernel_cs = 0x08; // GDT_KCODE (init.c) ent->ist = 0; ent->attrs = flags; ent->intrmid = ((handler >> 16) & 0xFFFF); ent->intrhigh = ((handler >> 32) & 0xFFFFFFFF); ent->resv = 0; } static void amd64_idt_init(void) { memset((void *)idt_entries, 0, sizeof(idt_entries)); #define IDT_ENTRY(n) \ extern void amd64_intr ## n(void); \ amd64_idt_set(&idt_entries[(n)], (uint64_t)&amd64_intr ## n, 0x8E) IDT_ENTRY(0); IDT_ENTRY(1); IDT_ENTRY(2); IDT_ENTRY(3); IDT_ENTRY(4); IDT_ENTRY(5); IDT_ENTRY(6); IDT_ENTRY(7); IDT_ENTRY(8); IDT_ENTRY(9); IDT_ENTRY(10); IDT_ENTRY(11); IDT_ENTRY(12); IDT_ENTRY(13); IDT_ENTRY(14); IDT_ENTRY(15); IDT_ENTRY(16); IDT_ENTRY(17); IDT_ENTRY(18); IDT_ENTRY(19); IDT_ENTRY(20); IDT_ENTRY(21); IDT_ENTRY(22); IDT_ENTRY(23); IDT_ENTRY(24); IDT_ENTRY(25); IDT_ENTRY(26); IDT_ENTRY(27); IDT_ENTRY(28); IDT_ENTRY(29); IDT_ENTRY(30); IDT_ENTRY(31); IDT_ENTRY(32); IDT_ENTRY(33); IDT_ENTRY(34); IDT_ENTRY(35); IDT_ENTRY(36); IDT_ENTRY(37); IDT_ENTRY(38); IDT_ENTRY(39); IDT_ENTRY(40); IDT_ENTRY(41); IDT_ENTRY(42); IDT_ENTRY(43); IDT_ENTRY(44); IDT_ENTRY(45); IDT_ENTRY(46); IDT_ENTRY(47); #undef IDT_ENTRY idt.limit = sizeof(idt_entries) - 1; idt.base = (uint64_t)idt_entries; __asm__ volatile("lidt %0" :: "m"(idt)); __asm__ volatile("sti"); } static void amd64_intr_exception(struct saved_regs *regs) { DEBUG("cpu exception %lu (%lu)\n", regs->trap, regs->error); uint64_t cr2; __asm__ volatile("movq %%cr2, %0" : "=r"(cr2)); uint64_t cr3; __asm__ volatile("movq %%cr3, %0" : "=r"(cr3)); debugprintf( "r15=%016lx r14=%016lx r13=%016lx\n" "r12=%016lx r11=%016lx r10=%016lx\n" "r9 =%016lx r8 =%016lx rbp=%016lx\n" "rdi=%016lx rsi=%016lx rdx=%016lx\n" "rcx=%016lx rax=%016lx trp=%016lx\n" "err=%016lx rip=%016lx cs =%016lx\n" "rfl=%016lx rsp=%016lx ss =%016lx\n" "cr2=%016lx cr3=%016lx rbx=%016lx\n", regs->r15, regs->r14, regs->r13, regs->r12, regs->r11, regs->r10, regs->r9, regs->r8, regs->rbp, regs->rdi, regs->rsi, regs->rdx, regs->rcx, regs->rax, regs->trap, regs->error, regs->rip, regs->cs, regs->rflags, regs->rsp, regs->ss, cr2, cr3, regs->rbx ); amd64_spin(); } void amd64_intr_handler(void *stack_ptr) { struct saved_regs *regs = stack_ptr; if (regs->trap <= 31) { amd64_intr_exception(regs); } else { DEBUG("unknown trap %lu\n", regs->trap); } } void amd64_intr_init(void) { amd64_init_pic(); amd64_idt_init(); }