SMP and timer interrupts

This commit is contained in:
2025-12-23 19:50:37 +01:00
parent 259aa732c8
commit c16170e4c2
31 changed files with 1766 additions and 88 deletions

View File

@@ -1,8 +1,11 @@
#include <amd64/apic.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/std.h>
#include <limine/requests.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/time.h>
#include <uacpi/acpi.h>
#include <uacpi/status.h>
#include <uacpi/tables.h>
@@ -11,12 +14,22 @@
#define IOAPICS_MAX 24
#define INTERRUPT_SRC_OVERRIDES_MAX 24
#define LAPIC_ID 0x20 /* ID */
#define LAPIC_EOI 0xB0 /* End of interrupt */
#define LAPIC_SIVR 0xF0 /* Spurious interrupt vector register */
#define LAPIC_ICR 0x300
#define LAPIC_LVTTR 0x320 /* LVT timer register */
#define LAPIC_TIMICT 0x380 /* Initial count register */
#define LAPIC_TIMCCT 0x390 /* Current count register */
#define LAPIC_DCR 0x3E0 /* Divide config register */
static struct acpi_madt_ioapic apics[IOAPICS_MAX];
/* clang-format off */
static struct acpi_madt_interrupt_source_override intr_src_overrides[INTERRUPT_SRC_OVERRIDES_MAX];
/* clang-format on */
static size_t ioapic_entries = 0;
static size_t intr_src_override_entries = 0;
static uintptr_t lapic_mmio_base = 0;
extern void amd64_spin (void);
@@ -37,13 +50,13 @@ static struct acpi_madt_ioapic* amd64_ioapic_find (uint8_t irq) {
for (size_t i = 0; i < ioapic_entries; i++) {
apic = &apics[i];
uint32_t version = amd64_ioapic_read ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, 1);
uint32_t max = (version >> 16);
uint32_t max = ((version >> 16) & 0xFF);
if ((apic->gsi_base <= irq) && ((apic->gsi_base + max) > irq))
break;
if ((irq >= apic->gsi_base) && (irq <= (apic->gsi_base + max)))
return apic;
}
return apic;
return NULL;
}
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id) {
@@ -65,7 +78,12 @@ void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t
if (found_override) {
uint8_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
uint8_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
calc_flags = (lapic_id << 56) | (mode << 15) | (polarity << 14) | (vec & 0xFF) | flags;
calc_flags |= (uint64_t)mode << 15;
calc_flags |= (uint64_t)polarity << 13;
calc_flags |= flags;
} else {
calc_flags |= flags;
}
apic = amd64_ioapic_find (irq);
@@ -137,7 +155,7 @@ void amd64_ioapic_init (void) {
struct acpi_madt_ioapic* ioapic = (struct acpi_madt_ioapic*)current;
mm_map_kernel_page ((uintptr_t)ioapic->address,
(uintptr_t)hhdm->offset + (uintptr_t)ioapic->address,
MM_PG_USER | MM_PG_RW);
MM_PG_PRESENT | MM_PG_RW);
apics[ioapic_entries++] = *ioapic;
} break;
case ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE: {
@@ -150,3 +168,60 @@ void amd64_ioapic_init (void) {
current = (struct acpi_entry_hdr*)((uintptr_t)current + current->length);
}
}
static uintptr_t amd64_lapic_base (void) { return lapic_mmio_base; }
static void amd64_lapic_write (uint32_t reg, uint32_t value) {
*(volatile uint32_t*)(amd64_lapic_base () + reg) = value;
}
static uint32_t amd64_lapic_read (uint32_t reg) {
return *(volatile uint32_t*)(amd64_lapic_base () + reg);
}
uint32_t amd64_lapic_id (void) { return amd64_lapic_read (LAPIC_ID) >> 24; }
void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
void amd64_lapic_tick (uint32_t tick) { amd64_lapic_write (LAPIC_TIMICT, tick); }
static uint32_t amd64_lapic_calibrate (uint32_t us) {
amd64_lapic_write (LAPIC_DCR, 0x03);
amd64_lapic_write (LAPIC_LVTTR, 0x20 | (1 << 16));
amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF);
sleep_micro (us);
uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT);
return ticks;
}
static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, 0x03);
amd64_lapic_write (LAPIC_LVTTR, 0x20 | (1 << 17));
amd64_lapic_write (LAPIC_TIMICT, ticks);
}
uint64_t amd64_lapic_init (uint32_t us) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
amd64_wrmsr (MSR_APIC_BASE, amd64_rdmsr (MSR_APIC_BASE) | (1 << 11));
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
mm_map_kernel_page (lapic_paddr, lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK);
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
uint32_t ticks = amd64_lapic_calibrate (us);
amd64_lapic_start (ticks);
return ticks;
}

View File

@@ -8,4 +8,9 @@ void amd64_ioapic_mask (uint8_t irq);
void amd64_ioapic_unmask (uint8_t irq);
void amd64_ioapic_init (void);
uint32_t amd64_lapic_id (void);
void amd64_lapic_tick (uint32_t tick);
void amd64_lapic_eoi (void);
uint64_t amd64_lapic_init (uint32_t us);
#endif // _KERNEL_AMD64_APIC_H

View File

@@ -1,13 +1,15 @@
#include <amd64/apic.h>
#include <amd64/debug.h>
#include <amd64/hpet.h>
#include <amd64/init.h>
#include <amd64/debug.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <limine/limine.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
#include <sys/time.h>
#include <uacpi/uacpi.h>
@@ -15,8 +17,16 @@
ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
void ack (void* arg) {
(void)arg;
debugprintf (". %u\n", thiscpu->id);
}
void bootmain (void) {
amd64_init ();
struct cpu* bsp_cpu = cpu_make ();
cpu_assign (bsp_cpu->id);
amd64_init (bsp_cpu, false);
amd64_debug_init ();
pmm_init ();
mm_init ();
@@ -26,14 +36,11 @@ void bootmain (void) {
amd64_ioapic_init ();
amd64_hpet_init ();
/* int* a = malloc (sizeof (int)); */
/* *a = 6969; */
/* DEBUG ("a=%p, *a=%d\n", a, *a); */
smp_init ();
for (size_t i = 0; i < 1000; i++) {
DEBUG ("i=%zu\n", i);
sleep_micro (1000000);
}
irq_attach (&ack, NULL, 32 + 0);
__asm__ volatile ("sti");
for (;;)
;

View File

@@ -3,11 +3,14 @@
#include <libk/printf.h>
#include <libk/std.h>
#include <libk/string.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#define PORT_COM1 0x03F8
#define BUFFER_SIZE 1024
spin_lock_t serial_lock = SPIN_LOCK_INIT;
static bool amd64_debug_serial_tx_empty (void) {
return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20);
}
@@ -30,10 +33,15 @@ void debugprintf (const char* fmt, ...) {
buffer[sizeof (buffer) - 1] = '\0';
const char* p = buffer;
spin_lock (&serial_lock);
while (*p) {
amd64_debug_serial_write (*p);
p++;
}
spin_unlock (&serial_lock);
}
void amd64_debug_init (void) {

View File

@@ -1,6 +1,10 @@
cflags += --target=x86_64-pc-none-elf \
-mno-sse \
-mno-avx
-mno-sse2 \
-mno-avx \
-mno-mmx \
-mno-80387 \
-mno-red-zone
ldflags += --target=x86_64-pc-none-elf \
-Wl,-zmax-page-size=0x1000

29
kernel/amd64/gdt.h Normal file
View File

@@ -0,0 +1,29 @@
#ifndef _KERNEL_AMD64_GDT_H
#define _KERNEL_AMD64_GDT_H
#include <aux/compiler.h>
#include <libk/std.h>
#define KSTACK_SIZE (16 * 1024)
struct gdt_entry {
uint16_t limitlow;
uint16_t baselow;
uint8_t basemid;
uint8_t access;
uint8_t gran;
uint8_t basehigh;
} PACKED;
struct gdt_ptr {
uint16_t limit;
uint64_t base;
} PACKED;
struct gdt_extended {
struct gdt_entry old[5];
struct gdt_entry tsslow;
struct gdt_entry tsshigh;
} PACKED;
#endif // _KERNEL_AMD64_GDT_H

View File

@@ -1,6 +1,6 @@
#include <amd64/init.h>
#include <amd64/intr.h>
#include <amd64/tss.h>
#include <amd64/smp.h>
#include <aux/compiler.h>
#include <libk/std.h>
#include <libk/string.h>
@@ -14,33 +14,6 @@
#define TSS 0x80
#define TSS_PRESENT 0x89
#define KSTACK_SIZE (8 * 1024)
struct gdt_entry {
uint16_t limitlow;
uint16_t baselow;
uint8_t basemid;
uint8_t access;
uint8_t gran;
uint8_t basehigh;
} PACKED;
struct gdt_ptr {
uint16_t limit;
uint64_t base;
} PACKED;
struct gdt_extended {
struct gdt_entry old[5];
struct gdt_entry tsslow;
struct gdt_entry tsshigh;
} PACKED;
ALIGNED (16) static volatile uint8_t kernel_stack[KSTACK_SIZE];
ALIGNED (16) static volatile uint8_t except_stack[KSTACK_SIZE];
ALIGNED (16) static volatile uint8_t irq_stack[KSTACK_SIZE];
ALIGNED (16) static volatile struct gdt_extended gdt;
static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit,
uint8_t acc, uint8_t gran) {
ent->baselow = (base & 0xFFFF);
@@ -51,39 +24,39 @@ static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32
ent->access = acc;
}
static void amd64_gdt_init (void) {
volatile struct tss* tss = amd64_get_tss ();
static void amd64_gdt_init (struct cpu* cpu) {
volatile struct tss* tss = &cpu->tss;
volatile struct gdt_extended* gdt = &cpu->gdt;
memset ((void*)&gdt, 0, sizeof (gdt));
memset ((void*)kernel_stack, 0, sizeof (kernel_stack));
memset ((void*)gdt, 0, sizeof (*gdt));
memset ((void*)tss, 0, sizeof (*tss));
tss->iopb_off = sizeof (*tss);
tss->rsp0 = (uint64_t)((uintptr_t)kernel_stack + sizeof (kernel_stack));
tss->ist[0] = (uint64_t)((uintptr_t)except_stack + sizeof (except_stack));
tss->ist[1] = (uint64_t)((uintptr_t)irq_stack + sizeof (irq_stack));
tss->rsp0 = (uint64_t)((uintptr_t)cpu->kernel_stack + sizeof (cpu->kernel_stack));
tss->ist[0] = (uint64_t)((uintptr_t)cpu->except_stack + sizeof (cpu->except_stack));
tss->ist[1] = (uint64_t)((uintptr_t)cpu->irq_stack + sizeof (cpu->irq_stack));
uint64_t tssbase = (uint64_t)&tss;
uint64_t tssbase = (uint64_t)tss;
uint64_t tsslimit = sizeof (*tss) - 1;
amd64_gdt_set (&gdt.old[0], 0, 0, 0, 0);
amd64_gdt_set (&gdt.old[1], 0, 0xFFFFF, 0x9A, 0xA0);
amd64_gdt_set (&gdt.old[2], 0, 0xFFFFF, 0x92, 0xC0);
amd64_gdt_set (&gdt.old[3], 0, 0xFFFFF, 0xFA, 0xA0);
amd64_gdt_set (&gdt.old[4], 0, 0xFFFFF, 0xF2, 0xC0);
amd64_gdt_set (&gdt.tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0);
amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0);
amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0);
amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xFA, 0xA0);
amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xF2, 0xC0);
amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
uint32_t tssbasehigh = (tssbase >> 32);
gdt.tsshigh.limitlow = (tssbasehigh & 0xFFFF);
gdt.tsshigh.baselow = (tssbasehigh >> 16) & 0xFFFF;
gdt.tsshigh.basemid = 0;
gdt.tsshigh.basehigh = 0;
gdt.tsshigh.access = 0;
gdt.tsshigh.gran = 0;
gdt->tsshigh.limitlow = (tssbasehigh & 0xFFFF);
gdt->tsshigh.baselow = (tssbasehigh >> 16) & 0xFFFF;
gdt->tsshigh.basemid = 0;
gdt->tsshigh.basehigh = 0;
gdt->tsshigh.access = 0;
gdt->tsshigh.gran = 0;
struct gdt_ptr gdtr;
gdtr.limit = sizeof (gdt) - 1;
gdtr.base = (uint64_t)&gdt;
gdtr.limit = sizeof (*gdt) - 1;
gdtr.base = (uint64_t)gdt;
__asm__ volatile ("lgdt %0" ::"m"(gdtr) : "memory");
__asm__ volatile ("pushq %[kcode]\n"
@@ -102,7 +75,10 @@ static void amd64_gdt_init (void) {
__asm__ volatile ("ltr %0" ::"r"((uint16_t)GDT_TSS));
}
void amd64_init (void) {
amd64_gdt_init ();
amd64_intr_init ();
void amd64_init (struct cpu* cpu, bool load_idt) {
amd64_gdt_init (cpu);
if (load_idt)
amd64_load_idt ();
else
amd64_intr_init ();
}

View File

@@ -1,6 +1,8 @@
#ifndef _KERNEL_AMD64_INIT_H
#define _KERNEL_AMD64_INIT_H
void amd64_init (void);
#include <amd64/smp.h>
void amd64_init (struct cpu* cpu, bool load_idt);
#endif // _KERNEL_AMD64_INIT_H

View File

@@ -1,9 +1,13 @@
#include <amd64/apic.h>
#include <amd64/intr.h>
#include <amd64/io.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <libk/string.h>
#include <sys/debug.h>
#include <sys/irq.h>
#include <sys/smp.h>
/* 8259 PIC defs. */
#define PIC1 0x20
@@ -77,20 +81,23 @@ static void amd64_init_pic (void) {
#undef IO_OP
}
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags, uint8_t ist) {
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags,
uint8_t ist) {
ent->intrlow = (handler & 0xFFFF);
ent->kernel_cs = 0x08; // GDT_KCODE (init.c)
ent->ist = 0;
ent->ist = ist;
ent->attrs = flags;
ent->intrmid = ((handler >> 16) & 0xFFFF);
ent->intrhigh = ((handler >> 32) & 0xFFFFFFFF);
ent->resv = 0;
}
void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); }
static void amd64_idt_init (void) {
memset ((void*)idt_entries, 0, sizeof (idt_entries));
#define IDT_ENTRY(n, ist) \
#define IDT_ENTRY(n, ist) \
extern void amd64_intr##n (void); \
amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist))
IDT_ENTRY (0, 0);
@@ -146,8 +153,7 @@ static void amd64_idt_init (void) {
idt.limit = sizeof (idt_entries) - 1;
idt.base = (uint64_t)idt_entries;
__asm__ volatile ("lidt %0" ::"m"(idt));
__asm__ volatile ("sti");
amd64_load_idt ();
}
static void amd64_intr_exception (struct saved_regs* regs) {
@@ -179,6 +185,14 @@ void amd64_intr_handler (void* stack_ptr) {
if (regs->trap <= 31) {
amd64_intr_exception (regs);
} else if (regs->trap >= 32) {
amd64_lapic_eoi ();
__asm__ volatile ("sti");
irq_invoke_each (regs->trap);
__asm__ volatile ("cli");
} else {
DEBUG ("unknown trap %lu\n", regs->trap);
}
@@ -188,3 +202,36 @@ void amd64_intr_init (void) {
amd64_init_pic ();
amd64_idt_init ();
}
/* Aux. */
static uint64_t amd64_irq_save_flags (void) {
uint64_t rflags;
__asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc");
return rflags;
}
static void amd64_irq_restore_flags (uint64_t rflags) {
if (rflags & (1ULL << 9))
__asm__ volatile ("sti");
}
void irq_save (void) {
/* before smp init. */
if (thiscpu == NULL)
return;
int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 0)
thiscpu->irq_ctx.rflags = amd64_irq_save_flags ();
}
void irq_restore (void) {
/* before smp init. */
if (thiscpu == NULL)
return;
int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 1)
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);
}

View File

@@ -31,6 +31,7 @@ struct saved_regs {
uint64_t ss;
} PACKED;
void amd64_load_idt (void);
void amd64_intr_init (void);
#endif // _KERNEL_AMD64_INTR_H

View File

@@ -1,8 +1,5 @@
.extern amd64_intr_handler
dupa:
jmp dupa
#define err(z) \
pushq $z;
@@ -36,7 +33,7 @@ dupa:
popq %r10; \
popq %r9; \
popq %r8; \
pushq %rbx; \
popq %rbx; \
popq %rbp; \
popq %rdi; \
popq %rsi; \
@@ -52,9 +49,12 @@ dupa:
push_regs; \
cld; \
movq %rsp, %rdi; \
movq %rsp, %rax; \
subq $8, %rsp; \
andq $~0xF, %rsp; \
movq %rax, (%rsp); \
callq amd64_intr_handler; \
movq %rdi, %rsp; \
movq (%rsp), %rsp; \
pop_regs; \
addq $16, %rsp; \
iretq;

View File

@@ -24,6 +24,10 @@ static uintptr_t amd64_current_cr3 (void) {
return cr3;
}
void amd64_load_kernel_cr3 (void) {
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
}
static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
struct pg_index ret;

View File

@@ -11,4 +11,6 @@ struct pd {
uintptr_t cr3_paddr;
};
void amd64_load_kernel_cr3 (void);
#endif // _KERNEL_AMD64_MM_H

1093
kernel/amd64/msr-index.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,9 @@
#include <libk/std.h>
#define MSR_FS_BASE 0xC0000100
#define MSR_GS_BASE 0xC0000101
uint64_t amd64_rdmsr (uint32_t msr);
void amd64_wrmsr (uint32_t msr, uint64_t value);

72
kernel/amd64/smp.c Normal file
View File

@@ -0,0 +1,72 @@
#include <amd64/apic.h>
#include <amd64/init.h>
#include <amd64/mm.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/smp.h>
static uint32_t cpu_counter = 0;
static spin_lock_t cpu_counter_lock = SPIN_LOCK_INIT;
static struct cpu cpus[CPUS_MAX];
struct cpu* cpu_make (void) {
spin_lock (&cpu_counter_lock);
int id = cpu_counter++;
spin_unlock (&cpu_counter_lock);
struct cpu* cpu = &cpus[id];
memset (cpu, 0, sizeof (*cpu));
cpu->id = id;
return cpu_get (id);
}
struct cpu* cpu_get (uint32_t id) {
if (id >= CPUS_MAX)
return NULL;
return &cpus[id];
}
uint32_t cpu_id (void) { return (uint32_t)amd64_rdmsr (MSR_GS_BASE); }
void cpu_assign (uint32_t id) { amd64_wrmsr (MSR_GS_BASE, (uint64_t)id); }
static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) {
amd64_load_kernel_cr3 ();
struct cpu* cpu = cpu_make ();
cpu_assign (cpu->id);
amd64_init (cpu, true); /* gdt + idt */
thiscpu->lapic_ticks = amd64_lapic_init (2500);
amd64_lapic_tick (thiscpu->lapic_ticks);
DEBUG ("CPU %u is online!\n", thiscpu->id);
__asm__ volatile ("sti");
for (;;)
;
}
void smp_init (void) {
thiscpu->lapic_ticks = amd64_lapic_init (2500);
struct limine_mp_response* mp = limine_mp_request.response;
for (size_t i = 0; i < mp->cpu_count; i++) {
if (mp->cpus[i]->lapic_id != thiscpu->id) {
DEBUG ("Trying CPU %u\n", mp->cpus[i]->lapic_id);
mp->cpus[i]->goto_address = &amd64_smp_bootstrap;
}
}
}

32
kernel/amd64/smp.h Normal file
View File

@@ -0,0 +1,32 @@
#ifndef _KERNEL_AMD64_SMP_H
#define _KERNEL_AMD64_SMP_H
#include <amd64/gdt.h>
#include <amd64/tss.h>
#include <aux/compiler.h>
#include <libk/std.h>
#define CPUS_MAX 32
struct cpu {
uint64_t lapic_ticks;
uint32_t id;
struct {
uint64_t rflags;
atomic_int nesting;
} irq_ctx;
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
volatile struct gdt_extended gdt ALIGNED (16);
volatile struct tss tss;
};
struct cpu* cpu_make (void);
struct cpu* cpu_get (uint32_t id);
void cpu_assign (uint32_t id);
uint32_t cpu_id (void);
#define thiscpu (cpu_get (cpu_id ()))
#endif // _KERNEL_AMD64_SMP_H

View File

@@ -9,7 +9,8 @@ c += amd64/bootmain.c \
amd64/msr.c \
amd64/hpet.c \
amd64/mm.c \
amd64/time.c
amd64/time.c \
amd64/smp.c
S += amd64/intr_stub.S \
amd64/spin.S
@@ -27,4 +28,5 @@ o += amd64/bootmain.o \
amd64/msr.o \
amd64/hpet.o \
amd64/mm.o \
amd64/time.o
amd64/time.o \
amd64/smp.o

View File

@@ -1,7 +1,5 @@
#include <amd64/hpet.h>
#include <libk/std.h>
#include <sys/time.h>
#include <amd64/hpet.h>
void sleep_micro (size_t us) {
amd64_hpet_sleep_micro (us);
}
void sleep_micro (size_t us) { amd64_hpet_sleep_micro (us); }

1
kernel/irq/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

59
kernel/irq/irq.c Normal file
View File

@@ -0,0 +1,59 @@
#include <irq/irq.h>
#include <libk/list.h>
#include <libk/std.h>
#include <mm/liballoc.h>
#include <sync/spin_lock.h>
#if defined(__x86_64__)
#include <amd64/apic.h>
#endif
/* TODO: figure out a generic way to work with IRQs */
static struct irq* irqs = NULL;
static spin_lock_t irqs_lock;
bool irq_attach (void (*func) (void*), void* arg, uint32_t irq_num) {
struct irq* irq = malloc (sizeof (*irq));
if (irq == NULL) {
return false;
}
irq->func = func;
irq->arg = arg;
irq->irq_num = irq_num;
spin_lock (&irqs_lock);
linklist_append (struct irq*, irqs, irq);
spin_unlock (&irqs_lock);
#if defined(__x86_64__)
amd64_ioapic_route_irq (irq_num, irq_num - 0x20, 0, amd64_lapic_id ());
#endif
return true;
}
void irq_detach (void (*func) (void*)) {
spin_lock (&irqs_lock);
struct irq *irq, *irq_tmp;
linklist_foreach (irqs, irq, irq_tmp) {
if ((uintptr_t)irq->func == (uintptr_t)func)
linklist_remove (struct irq*, irqs, irq);
}
spin_unlock (&irqs_lock);
}
void irq_invoke_each (uint32_t irq_num) {
spin_lock (&irqs_lock);
struct irq *irq, *irq_tmp;
linklist_foreach (irqs, irq, irq_tmp) {
if (irq->irq_num == irq_num)
irq->func (irq->arg);
}
spin_unlock (&irqs_lock);
}

18
kernel/irq/irq.h Normal file
View File

@@ -0,0 +1,18 @@
#ifndef _KERNEL_IRQ_IRQ_H
#define _KERNEL_IRQ_IRQ_H
#include <libk/std.h>
struct irq {
struct irq* next;
void (*func) (void*);
void* arg;
uint32_t irq_num;
};
bool irq_attach (void (*func) (void*), void* arg, uint32_t irq_num);
void irq_detach (void (*func) (void*));
void irq_invoke_each (uint32_t irq_num);
#endif // _KERNEL_IRQ_IRQ_H

3
kernel/irq/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += irq/irq.c
o += irq/irq.o

208
kernel/libk/list.h Normal file
View File

@@ -0,0 +1,208 @@
#ifndef _KERNEL_LIBK_LIST_H
#define _KERNEL_LIBK_LIST_H
#define dlinklist_append(type, head, new) \
do { \
if ((new) != NULL) { \
(new)->next = NULL; \
if ((head) != NULL) { \
type __tmp = (head); \
while (__tmp->next != NULL) { \
__tmp = __tmp->next; \
} \
__tmp->next = (new); \
(new)->prev = __tmp; \
} else { \
(new)->prev = NULL; \
(head) = (new); \
} \
} \
} while (0)
#define dlinklist_prepend(head, new) \
do { \
if ((new) != NULL) { \
(new)->prev = NULL; \
(new)->next = (head); \
if ((head) != NULL) { \
(head)->prev = (new); \
} \
(head) = (new); \
} \
} while (0)
#define dlinklist_remove(head, ele) \
do { \
if ((ele) != NULL) { \
if ((ele)->prev != NULL) { \
(ele)->prev->next = (ele)->next; \
} else { \
(head) = (ele)->next; \
} \
if ((ele)->next != NULL) { \
(ele)->next->prev = (ele)->prev; \
} \
(ele)->next = NULL; \
(ele)->prev = NULL; \
} \
} while (0)
#define dlinklist_find(type, head, out, propname, propvalue) \
do { \
(out) = NULL; \
type __tmp = (head); \
while (__tmp) { \
if (__tmp->propname == (propvalue)) { \
(out) = __tmp; \
break; \
} \
__tmp = __tmp->next; \
} \
} while (0)
#define dlinklist_foreach(head, var, tmp) \
for (var = (head), tmp = (var ? var->next : NULL); var != NULL; \
var = tmp, tmp = (var ? var->next : NULL))
#define dlinklist_foreach_index(head, var, tmp, idx) \
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL; \
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
#define dlinklist_foreach_index_limit(head, var, tmp, idx, max) \
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL && (idx) < (max); \
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
#define dlinklist_back(type, head, out) \
do { \
(out) = NULL; \
if ((head) != NULL) { \
type __tmp = (head); \
while (__tmp->next != NULL) { \
__tmp = __tmp->next; \
} \
(out) = __tmp; \
} \
} while (0)
#define dlinklist_front(type, head, out) \
do { \
(out) = NULL; \
if ((head) != NULL) { \
type __tmp = (head); \
while (__tmp->prev != NULL) { \
__tmp = __tmp->prev; \
} \
(out) = __tmp; \
} \
} while (0)
#define dlinklist_insert_after(head, pos, new) \
do { \
if ((pos) != NULL && (new) != NULL) { \
(new)->prev = (pos); \
(new)->next = (pos)->next; \
if ((pos)->next != NULL) { \
(pos)->next->prev = (new); \
} \
(pos)->next = (new); \
} else if ((pos) == NULL && (head) == NULL) { \
(new)->prev = NULL; \
(new)->next = NULL; \
(head) = (new); \
} \
} while (0)
#define dlinklist_insert_before(head, pos, new) \
do { \
if ((pos) != NULL && (new) != NULL) { \
(new)->next = (pos); \
(new)->prev = (pos)->prev; \
if ((pos)->prev != NULL) { \
(pos)->prev->next = (new); \
} else { \
(head) = (new); \
} \
(pos)->prev = (new); \
} else if ((pos) == NULL && (head) == NULL) { \
(new)->prev = NULL; \
(new)->next = NULL; \
(head) = (new); \
} \
} while (0)
#define linklist_append(type, head, new) \
do { \
if ((new) != NULL) { \
if ((head) != NULL) { \
type __tmp; \
(new)->next = NULL; \
__tmp = (head); \
while (__tmp->next != NULL) { \
__tmp = __tmp->next; \
} \
__tmp->next = (new); \
} else { \
(new)->next = NULL; \
(head) = (new); \
} \
} \
} while (0)
#define linklist_remove(type, head, ele) \
do { \
if ((head) != NULL && (ele) != NULL) { \
type __cur = (head); \
type __prev = NULL; \
while (__cur != NULL && __cur != (ele)) { \
__prev = __cur; \
__cur = __cur->next; \
} \
if (__cur == (ele)) { \
if (__prev != NULL) { \
__prev->next = __cur->next; \
} else { \
(head) = __cur->next; \
} \
(ele)->next = NULL; \
} \
} \
} while (0)
#define linklist_find(type, head, out, propname, propvalue) \
do { \
(out) = NULL; \
type __tmp = (head); \
while (__tmp) { \
if (__tmp->propname == (propvalue)) { \
(out) = __tmp; \
break; \
} \
__tmp = __tmp->next; \
} \
} while (0)
#define linklist_foreach(head, var, tmp) \
for (var = (head), tmp = (var ? var->next : NULL); var != NULL; \
var = tmp, tmp = (var ? var->next : NULL))
#define linklist_foreach_index(head, var, tmp, idx) \
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL; \
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
#define linklist_foreach_index_limit(head, var, tmp, idx, max) \
for ((idx) = 0, var = (head), tmp = (var ? var->next : NULL); var != NULL && (idx) < (max); \
var = tmp, tmp = (var ? var->next : NULL), (idx)++)
#define linklist_back(type, head, out) \
do { \
(out) = NULL; \
if ((head) != NULL) { \
type __tmp = (head); \
while (__tmp->next != NULL) { \
__tmp = __tmp->next; \
} \
(out) = __tmp; \
} \
} while (0)
#endif // _KERNEL_LIBK_LIST_H

View File

@@ -18,3 +18,4 @@ volatile uint64_t limine_requests_end_marker[] = LIMINE_REQUESTS_END_MARKER;
DECL_REQ (hhdm, HHDM);
DECL_REQ (memmap, MEMMAP);
DECL_REQ (rsdp, RSDP);
DECL_REQ (mp, MP);

View File

@@ -8,5 +8,6 @@
EXTERN_REQ (hhdm);
EXTERN_REQ (memmap);
EXTERN_REQ (rsdp);
EXTERN_REQ (mp);
#endif // _KERNEL_LIMINE_REQUESTS_H

View File

@@ -4,3 +4,4 @@ include sync/src.mk
include mm/src.mk
include limine/src.mk
include uACPI/src.mk
include irq/src.mk

View File

@@ -1,10 +1,17 @@
#include <libk/std.h>
#include <sync/spin_lock.h>
#include <sys/irq.h>
#include <sys/spin_lock.h>
void spin_lock (spin_lock_t* sl) {
irq_save ();
while (atomic_flag_test_and_set_explicit (sl, memory_order_acquire))
spin_lock_relax ();
}
void spin_unlock (spin_lock_t* sl) { atomic_flag_clear_explicit (sl, memory_order_release); }
void spin_unlock (spin_lock_t* sl) {
atomic_flag_clear_explicit (sl, memory_order_release);
irq_restore ();
}

7
kernel/sys/irq.h Normal file
View File

@@ -0,0 +1,7 @@
#ifndef _KERNEL_SYS_IRQ_H
#define _KERNEL_SYS_IRQ_H
void irq_save (void);
void irq_restore (void);
#endif // _KERNEL_SYS_IRQ_H

10
kernel/sys/smp.h Normal file
View File

@@ -0,0 +1,10 @@
#ifndef _KERNEL_SYS_SMP_H
#define _KERNEL_SYS_SMP_H
#if defined(__x86_64__)
#include <amd64/smp.h>
#endif
void smp_init (void);
#endif // _KERNEL_SYS_SMP_H