From c68b00f2ea31d3631332803dc871543ff162b763 Mon Sep 17 00:00:00 2001 From: kamkow1 Date: Fri, 20 Feb 2026 15:33:16 +0100 Subject: [PATCH] Remove amd64_ platform prefix --- kernel/amd64/apic.c | 74 ++++++++++++++++++------------------- kernel/amd64/apic.h | 12 +++--- kernel/amd64/bootmain.c | 8 ++-- kernel/amd64/debug.c | 34 ++++++++--------- kernel/amd64/debug.h | 2 +- kernel/amd64/hpet.c | 42 ++++++++++----------- kernel/amd64/hpet.h | 4 +- kernel/amd64/init.c | 26 ++++++------- kernel/amd64/init.h | 2 +- kernel/amd64/intr.c | 67 +++++++++++++++++---------------- kernel/amd64/intr.h | 4 +- kernel/amd64/intr_stub.S | 8 ++-- kernel/amd64/io.c | 24 +++++------- kernel/amd64/io.h | 18 ++++----- kernel/amd64/mm.c | 66 ++++++++++++++++----------------- kernel/amd64/mm.h | 2 +- kernel/amd64/msr.c | 4 +- kernel/amd64/msr.h | 4 +- kernel/amd64/sched.S | 4 +- kernel/amd64/sched.h | 2 +- kernel/amd64/sched1.c | 4 +- kernel/amd64/smp.c | 18 ++++----- kernel/amd64/spin.S | 6 +-- kernel/amd64/syscall.c | 14 +++---- kernel/amd64/syscallentry.S | 8 ++-- kernel/amd64/time.c | 2 +- kernel/device/ps2_kb.c | 18 ++++----- kernel/sys/spin.h | 5 +-- 28 files changed, 236 insertions(+), 246 deletions(-) diff --git a/kernel/amd64/apic.c b/kernel/amd64/apic.c index 0b91bdf..10bb7d8 100644 --- a/kernel/amd64/apic.c +++ b/kernel/amd64/apic.c @@ -56,7 +56,7 @@ static size_t intr_src_override_entries = 0; static spin_lock_t lapic_calibration_lock = SPIN_LOCK_INIT; /* Read IOAPIC */ -static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) { +static uint32_t ioapic_read (struct ioapic* ioapic, uint32_t reg) { spin_lock (&ioapic->lock); *(volatile uint32_t*)ioapic->mmio_base = reg; uint32_t ret = *(volatile uint32_t*)(ioapic->mmio_base + 0x10); @@ -65,7 +65,7 @@ static uint32_t amd64_ioapic_read (struct ioapic* ioapic, uint32_t reg) { } /* Write IOAPIC */ -static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) { +static void ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t value) { spin_lock (&ioapic->lock); *(volatile uint32_t*)ioapic->mmio_base = reg; *(volatile uint32_t*)(ioapic->mmio_base + 0x10) = value; @@ -73,12 +73,12 @@ static void amd64_ioapic_write (struct ioapic* ioapic, uint32_t reg, uint32_t va } /* Find an IOAPIC corresposting to provided IRQ */ -static struct ioapic* amd64_ioapic_find (uint32_t irq) { +static struct ioapic* ioapic_find (uint32_t irq) { struct ioapic* ioapic = NULL; for (size_t i = 0; i < ioapic_entries; i++) { ioapic = &ioapics[i]; - uint32_t version = amd64_ioapic_read (ioapic, 1); + uint32_t version = ioapic_read (ioapic, 1); uint32_t max = ((version >> 16) & 0xFF); if ((irq >= ioapic->table_data.gsi_base) && (irq <= (ioapic->table_data.gsi_base + max))) @@ -97,7 +97,7 @@ static struct ioapic* amd64_ioapic_find (uint32_t irq) { * flags - IOAPIC redirection flags. * lapic_id - Local APIC that will receive the interrupt. */ -void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id) { +void ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id) { struct ioapic* ioapic = NULL; struct acpi_madt_interrupt_source_override* override; bool found_override = false; @@ -121,7 +121,7 @@ void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_ uint32_t gsi = found_override ? override->gsi : irq; - ioapic = amd64_ioapic_find (gsi); + ioapic = ioapic_find (gsi); DEBUG ("%p\n", ioapic); if (ioapic == NULL) @@ -129,12 +129,12 @@ void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_ uint32_t irq_reg = ((gsi - ioapic->table_data.gsi_base) * 2) + 0x10; - amd64_ioapic_write (ioapic, irq_reg + 1, (uint32_t)(calc_flags >> 32)); - amd64_ioapic_write (ioapic, irq_reg, (uint32_t)calc_flags); + ioapic_write (ioapic, irq_reg + 1, (uint32_t)(calc_flags >> 32)); + ioapic_write (ioapic, irq_reg, (uint32_t)calc_flags); } /* Find and initialize the IOAPIC */ -void amd64_ioapic_init (void) { +void ioapic_init (void) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; struct uacpi_table apic_table; @@ -176,41 +176,39 @@ void amd64_ioapic_init (void) { } /* Get MMIO base of Local APIC */ -static uintptr_t amd64_lapic_base (void) { return thiscpu->lapic_mmio_base; } +static uintptr_t lapic_base (void) { return thiscpu->lapic_mmio_base; } /* Write Local APIC */ -static void amd64_lapic_write (uint32_t reg, uint32_t value) { - *(volatile uint32_t*)(amd64_lapic_base () + reg) = value; +static void lapic_write (uint32_t reg, uint32_t value) { + *(volatile uint32_t*)(lapic_base () + reg) = value; } /* Read Local APIC */ -static uint32_t amd64_lapic_read (uint32_t reg) { - return *(volatile uint32_t*)(amd64_lapic_base () + reg); -} +static uint32_t lapic_read (uint32_t reg) { return *(volatile uint32_t*)(lapic_base () + reg); } /* Get ID of Local APIC */ -uint32_t amd64_lapic_id (void) { return amd64_lapic_read (LAPIC_ID) >> 24; } +uint32_t lapic_id (void) { return lapic_read (LAPIC_ID) >> 24; } /* Send End of interrupt command to Local APIC */ -void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); } +void lapic_eoi (void) { lapic_write (LAPIC_EOI, 0); } /* * Calibrate Local APIC to send interrupts in a set interval. * * us - Period length in microseconds */ -static uint32_t amd64_lapic_calibrate (uint32_t us) { +static uint32_t lapic_calibrate (uint32_t us) { spin_lock (&lapic_calibration_lock); - amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE); + lapic_write (LAPIC_DCR, DIVIDER_VALUE); - amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16)); - amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF); + lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16)); + lapic_write (LAPIC_TIMICT, 0xFFFFFFFF); sleep_micro (us); - amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (0 << 16)); - uint32_t ticks = 0xFFFFFFFF - amd64_lapic_read (LAPIC_TIMCCT); + lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (0 << 16)); + uint32_t ticks = 0xFFFFFFFF - lapic_read (LAPIC_TIMCCT); DEBUG ("timer ticks = %u\n", ticks); spin_unlock (&lapic_calibration_lock); @@ -223,30 +221,30 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) { * * ticks - Initial tick count */ -static void amd64_lapic_start (uint32_t ticks) { - amd64_lapic_write (LAPIC_DCR, DIVIDER_VALUE); - amd64_lapic_write (LAPIC_TIMICT, ticks); - amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17)); +static void lapic_start (uint32_t ticks) { + lapic_write (LAPIC_DCR, DIVIDER_VALUE); + lapic_write (LAPIC_TIMICT, ticks); + lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17)); } /* * Initialize Local APIC, configure to send timer interrupts at a given period. See - * amd64_lapic_calibrate and amd64_lapic_start. + * lapic_calibrate and lapic_start. */ -void amd64_lapic_init (uint32_t us) { +void lapic_init (uint32_t us) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; - amd64_wrmsr (MSR_APIC_BASE, amd64_rdmsr (MSR_APIC_BASE) | (1 << 11)); + wrmsr (MSR_APIC_BASE, rdmsr (MSR_APIC_BASE) | (1 << 11)); - uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000; + uintptr_t lapic_paddr = rdmsr (MSR_APIC_BASE) & 0xFFFFF000; thiscpu->lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset; mm_map_kernel_page (lapic_paddr, thiscpu->lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW); - amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8)); + lapic_write (LAPIC_SIVR, 0xFF | (1 << 8)); - thiscpu->lapic_ticks = amd64_lapic_calibrate (us); - amd64_lapic_start (thiscpu->lapic_ticks); + thiscpu->lapic_ticks = lapic_calibrate (us); + lapic_start (thiscpu->lapic_ticks); } /* @@ -255,12 +253,12 @@ void amd64_lapic_init (uint32_t us) { * lapic_id - Target Local APIC * vec - Interrupt vector/IDT stub, which will be invoked by the IPI. */ -void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec) { +void lapic_ipi (uint32_t lapic_id, uint32_t vec) { /* wait for previous IPI to finish */ - while (amd64_lapic_read (LAPIC_ICR) & (1 << 12)) { + while (lapic_read (LAPIC_ICR) & (1 << 12)) { __asm__ volatile ("pause"); } - amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24)); - amd64_lapic_write (LAPIC_ICR, vec); + lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24)); + lapic_write (LAPIC_ICR, vec); } diff --git a/kernel/amd64/apic.h b/kernel/amd64/apic.h index a057068..b26ce18 100644 --- a/kernel/amd64/apic.h +++ b/kernel/amd64/apic.h @@ -3,12 +3,12 @@ #include -void amd64_ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id); -void amd64_ioapic_init (void); +void ioapic_route_irq (uint32_t vec, uint32_t irq, uint64_t flags, uint64_t lapic_id); +void ioapic_init (void); -uint32_t amd64_lapic_id (void); -void amd64_lapic_eoi (void); -void amd64_lapic_ipi (uint32_t lapic_id, uint32_t vec); -void amd64_lapic_init (uint32_t us); +uint32_t lapic_id (void); +void lapic_eoi (void); +void lapic_ipi (uint32_t lapic_id, uint32_t vec); +void lapic_init (uint32_t us); #endif // _KERNEL_AMD64_APIC_H diff --git a/kernel/amd64/bootmain.c b/kernel/amd64/bootmain.c index 0b5bbf0..8002e23 100644 --- a/kernel/amd64/bootmain.c +++ b/kernel/amd64/bootmain.c @@ -39,16 +39,16 @@ void bootmain (void) { struct cpu* bsp_cpu = cpu_make (mp->bsp_lapic_id, 0); - amd64_init (bsp_cpu, false); + init_gdt_idt (bsp_cpu, false); syscall_init (); - amd64_debug_init (); + debug_init (); pmm_init (); mm_init (); uacpi_setup_early_table_access ((void*)uacpi_memory_buffer, sizeof (uacpi_memory_buffer)); - amd64_ioapic_init (); - amd64_hpet_init (); + ioapic_init (); + hpet_init (); devices_init (); vfs_init (); diff --git a/kernel/amd64/debug.c b/kernel/amd64/debug.c index a311973..22abbcd 100644 --- a/kernel/amd64/debug.c +++ b/kernel/amd64/debug.c @@ -17,25 +17,23 @@ */ static spin_lock_t serial_lock = SPIN_LOCK_INIT; -static bool debug_init = false; +static bool debug_is_init = false; /* Block until TX buffer is empty */ -static bool amd64_debug_serial_tx_empty (void) { - return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20); -} +static bool debug_serial_tx_empty (void) { return (bool)(inb (PORT_COM1 + 5) & 0x20); } /* Write a single character to serial */ -static void amd64_debug_serial_write (char x) { - while (!amd64_debug_serial_tx_empty ()) +static void debug_serial_write (char x) { + while (!debug_serial_tx_empty ()) ; - amd64_io_outb (PORT_COM1, (uint8_t)x); + outb (PORT_COM1, (uint8_t)x); } /* * Formatted printing to serial. serial_lock ensures that all prints are atomic. */ void debugprintf (const char* fmt, ...) { - if (!debug_init) + if (!debug_is_init) return; char buffer[BUFFER_SIZE]; @@ -53,7 +51,7 @@ void debugprintf (const char* fmt, ...) { spin_lock (&serial_lock); while (*p) { - amd64_debug_serial_write (*p); + debug_serial_write (*p); p++; } @@ -61,14 +59,14 @@ void debugprintf (const char* fmt, ...) { } /* Initialize serial */ -void amd64_debug_init (void) { - amd64_io_outb (PORT_COM1 + 1, 0x00); - amd64_io_outb (PORT_COM1 + 3, 0x80); - amd64_io_outb (PORT_COM1 + 0, 0x03); - amd64_io_outb (PORT_COM1 + 1, 0x00); - amd64_io_outb (PORT_COM1 + 3, 0x03); - amd64_io_outb (PORT_COM1 + 2, 0xC7); - amd64_io_outb (PORT_COM1 + 4, 0x0B); +void debug_init (void) { + outb (PORT_COM1 + 1, 0x00); + outb (PORT_COM1 + 3, 0x80); + outb (PORT_COM1 + 0, 0x03); + outb (PORT_COM1 + 1, 0x00); + outb (PORT_COM1 + 3, 0x03); + outb (PORT_COM1 + 2, 0xC7); + outb (PORT_COM1 + 4, 0x0B); - debug_init = true; + debug_is_init = true; } diff --git a/kernel/amd64/debug.h b/kernel/amd64/debug.h index cf9fa50..888f940 100644 --- a/kernel/amd64/debug.h +++ b/kernel/amd64/debug.h @@ -1,6 +1,6 @@ #ifndef _KERNEL_AMD64_DEBUG_H #define _KERNEL_AMD64_DEBUG_H -void amd64_debug_init (void); +void debug_init (void); #endif // _KERNEL_AMD64_DEBUG_H diff --git a/kernel/amd64/hpet.c b/kernel/amd64/hpet.c index f99986b..45ccc2e 100644 --- a/kernel/amd64/hpet.c +++ b/kernel/amd64/hpet.c @@ -31,26 +31,26 @@ static uint64_t hpet_period_fs; static spin_lock_t hpet_lock = SPIN_LOCK_INIT; /* Read a HPET register. Assumes caller holds hpet_lock */ -static uint64_t amd64_hpet_read64 (uint32_t reg) { +static uint64_t hpet_read64 (uint32_t reg) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset; return *(volatile uint64_t*)(hpet_vaddr + reg); } -static uint32_t amd64_hpet_read32 (uint32_t reg) { +static uint32_t hpet_read32 (uint32_t reg) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset; return *(volatile uint32_t*)(hpet_vaddr + reg); } /* Write a HPET register. Assumes caller holds hpet_lock */ -static void amd64_hpet_write64 (uint32_t reg, uint64_t value) { +static void hpet_write64 (uint32_t reg, uint64_t value) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset; *(volatile uint64_t*)(hpet_vaddr + reg) = value; } -static void amd64_hpet_write32 (uint32_t reg, uint32_t value) { +static void hpet_write32 (uint32_t reg, uint32_t value) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset; *(volatile uint32_t*)(hpet_vaddr + reg) = value; @@ -58,18 +58,18 @@ static void amd64_hpet_write32 (uint32_t reg, uint32_t value) { /* Read current value of HPET_MCVR register. */ -static uint64_t amd64_hpet_read_counter (void) { +static uint64_t hpet_read_counter (void) { uint64_t value; spin_lock (&hpet_lock); if (!hpet_32bits) - value = amd64_hpet_read64 (HPET_MCVR); + value = hpet_read64 (HPET_MCVR); else { uint32_t hi1, lo, hi2; do { - hi1 = amd64_hpet_read32 (HPET_MCVR + 4); - lo = amd64_hpet_read32 (HPET_MCVR + 0); - hi2 = amd64_hpet_read32 (HPET_MCVR + 4); + hi1 = hpet_read32 (HPET_MCVR + 4); + lo = hpet_read32 (HPET_MCVR + 0); + hi2 = hpet_read32 (HPET_MCVR + 4); } while (hi1 != hi2); value = ((uint64_t)hi1 << 32) | lo; @@ -80,14 +80,14 @@ static uint64_t amd64_hpet_read_counter (void) { return value; } -static void amd64_hpet_write_counter (uint64_t value) { +static void hpet_write_counter (uint64_t value) { spin_lock (&hpet_lock); if (!hpet_32bits) - amd64_hpet_write64 (HPET_MCVR, value); + hpet_write64 (HPET_MCVR, value); else { - amd64_hpet_write32 (HPET_MCVR, (uint32_t)value); - amd64_hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32)); + hpet_write32 (HPET_MCVR, (uint32_t)value); + hpet_write32 (HPET_MCVR + 4, (uint32_t)(value >> 32)); } spin_unlock (&hpet_lock); @@ -95,15 +95,15 @@ static void amd64_hpet_write_counter (uint64_t value) { /* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being * held. */ -void amd64_hpet_sleep_micro (uint64_t us) { +void hpet_sleep_micro (uint64_t us) { if (hpet_period_fs == 0) return; uint64_t ticks_to_wait = (us * 1000ULL) / (hpet_period_fs / 1000000ULL); - uint64_t start = amd64_hpet_read_counter (); + uint64_t start = hpet_read_counter (); for (;;) { - uint64_t now = amd64_hpet_read_counter (); + uint64_t now = hpet_read_counter (); if ((now - start) >= ticks_to_wait) break; @@ -113,7 +113,7 @@ void amd64_hpet_sleep_micro (uint64_t us) { } /* Initialize HPET */ -void amd64_hpet_init (void) { +void hpet_init (void) { struct uacpi_table hpet_table; uacpi_status status = uacpi_table_find_by_signature (ACPI_HPET_SIGNATURE, &hpet_table); if (status != UACPI_STATUS_OK) { @@ -127,12 +127,12 @@ void amd64_hpet_init (void) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW); - uint64_t caps = amd64_hpet_read64 (HPET_GCIDR); + uint64_t caps = hpet_read64 (HPET_GCIDR); hpet_32bits = (caps & (1 << 13)) ? 0 : 1; hpet_period_fs = (uint32_t)(caps >> 32); - amd64_hpet_write64 (HPET_GCR, 0); - amd64_hpet_write_counter (0); - amd64_hpet_write64 (HPET_GCR, 1); + hpet_write64 (HPET_GCR, 0); + hpet_write_counter (0); + hpet_write64 (HPET_GCR, 1); } diff --git a/kernel/amd64/hpet.h b/kernel/amd64/hpet.h index 6841998..1eda515 100644 --- a/kernel/amd64/hpet.h +++ b/kernel/amd64/hpet.h @@ -3,7 +3,7 @@ #include -void amd64_hpet_sleep_micro (uint64_t us); -void amd64_hpet_init (void); +void hpet_sleep_micro (uint64_t us); +void hpet_init (void); #endif // _KERNEL_AMD64_HPET_H diff --git a/kernel/amd64/init.c b/kernel/amd64/init.c index 69e2784..16e406f 100644 --- a/kernel/amd64/init.c +++ b/kernel/amd64/init.c @@ -10,8 +10,8 @@ #define TSS_PRESENT 0x89 /* Set a GDT entry */ -static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit, - uint8_t acc, uint8_t gran) { +static void gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit, uint8_t acc, + uint8_t gran) { ent->baselow = (base & 0xFFFF); ent->basemid = (base >> 16) & 0xFF; ent->basehigh = (base >> 24) & 0xFF; @@ -21,7 +21,7 @@ static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32 } /* Initialize GDT and TSS structures for a given CPU */ -static void amd64_gdt_init (struct cpu* cpu) { +static void gdt_init (struct cpu* cpu) { volatile struct tss* tss = &cpu->tss; volatile struct gdt_extended* gdt = &cpu->gdt; @@ -36,12 +36,12 @@ static void amd64_gdt_init (struct cpu* cpu) { uint64_t tssbase = (uint64_t)tss; uint64_t tsslimit = sizeof (*tss) - 1; - amd64_gdt_set (&gdt->old[0], 0, 0, 0, 0); - amd64_gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0); - amd64_gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0); - amd64_gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xF2, 0xC0); - amd64_gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xFA, 0xA0); - amd64_gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0); + gdt_set (&gdt->old[0], 0, 0, 0, 0); + gdt_set (&gdt->old[1], 0, 0xFFFFF, 0x9A, 0xA0); + gdt_set (&gdt->old[2], 0, 0xFFFFF, 0x92, 0xC0); + gdt_set (&gdt->old[3], 0, 0xFFFFF, 0xF2, 0xC0); + gdt_set (&gdt->old[4], 0, 0xFFFFF, 0xFA, 0xA0); + gdt_set (&gdt->tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0); uint32_t tssbasehigh = (tssbase >> 32); gdt->tsshigh.limitlow = (tssbasehigh & 0xFFFF); @@ -80,10 +80,10 @@ static void amd64_gdt_init (struct cpu* cpu) { * load_idt - Tell whether the IDT needs to be loaded. It only has to be loaded once on * the BSP */ -void amd64_init (struct cpu* cpu, bool load_idt) { - amd64_gdt_init (cpu); +void init_gdt_idt (struct cpu* cpu, bool load_idt) { + gdt_init (cpu); if (load_idt) - amd64_load_idt (); + idt_load (); else - amd64_intr_init (); + intr_init (); } diff --git a/kernel/amd64/init.h b/kernel/amd64/init.h index 97225d1..5594e5f 100644 --- a/kernel/amd64/init.h +++ b/kernel/amd64/init.h @@ -3,6 +3,6 @@ #include -void amd64_init (struct cpu* cpu, bool load_idt); +void init_gdt_idt (struct cpu* cpu, bool load_idt); #endif // _KERNEL_AMD64_INIT_H diff --git a/kernel/amd64/intr.c b/kernel/amd64/intr.c index 2916644..a08e456 100644 --- a/kernel/amd64/intr.c +++ b/kernel/amd64/intr.c @@ -62,33 +62,36 @@ ALIGNED (16) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX]; static volatile struct idt idt; /* Remaps and disables old 8259 PIC, since we'll be using APIC. */ -static void amd64_init_pic (void) { -#define IO_OP(fn, ...) \ - fn (__VA_ARGS__); \ - amd64_io_wait () +static void pic_init (void) { + outb (PIC1_CMD, (ICW1_INIT | ICW1_ICW4)); + io_wait (); + outb (PIC2_CMD, (ICW1_INIT | ICW1_ICW4)); + io_wait (); - IO_OP (amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4)); - IO_OP (amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4)); + outb (PIC1_DATA, 0x20); + io_wait (); + outb (PIC2_DATA, 0x28); + io_wait (); - IO_OP (amd64_io_outb, PIC1_DATA, 0x20); - IO_OP (amd64_io_outb, PIC2_DATA, 0x28); + outb (PIC1_DATA, (1 << CASCADE_IRQ)); + io_wait (); + outb (PIC2_DATA, 2); + io_wait (); - IO_OP (amd64_io_outb, PIC1_DATA, (1 << CASCADE_IRQ)); - IO_OP (amd64_io_outb, PIC2_DATA, 2); - - IO_OP (amd64_io_outb, PIC1_DATA, ICW4_8086); - IO_OP (amd64_io_outb, PIC2_DATA, ICW4_8086); + outb (PIC1_DATA, ICW4_8086); + io_wait (); + outb (PIC2_DATA, ICW4_8086); + io_wait (); /* Disable */ - IO_OP (amd64_io_outb, PIC1_DATA, 0xFF); - IO_OP (amd64_io_outb, PIC2_DATA, 0xFF); - -#undef IO_OP + outb (PIC1_DATA, 0xFF); + io_wait (); + outb (PIC2_DATA, 0xFF); + io_wait (); } /* Set IDT entry */ -static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags, - uint8_t ist) { +static void idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags, uint8_t ist) { ent->intrlow = (handler & 0xFFFF); ent->kernel_cs = GDT_KCODE; ent->ist = ist; @@ -99,15 +102,15 @@ static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uin } /* Load the IDT */ -void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); } +void idt_load (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); } /* Initialize IDT entries */ -static void amd64_idt_init (void) { +static void idt_init (void) { memset ((void*)idt_entries, 0, sizeof (idt_entries)); #define IDT_ENTRY(n, ist) \ - extern void amd64_intr##n (void); \ - amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist)) + extern void intr##n (void); \ + idt_set (&idt_entries[(n)], (uint64_t)&intr##n, 0x8E, (ist)) /* clang-format off */ IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0); IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0); @@ -131,11 +134,11 @@ static void amd64_idt_init (void) { idt.limit = sizeof (idt_entries) - 1; idt.base = (uint64_t)idt_entries; - amd64_load_idt (); + idt_load (); } /* Handle CPU exception and dump registers. If incoming CS has CPL3, kill the process. */ -static void amd64_intr_exception (struct saved_regs* regs) { +static void intr_exception (struct saved_regs* regs) { DEBUG ("cpu exception %lu (%lu)\n", regs->trap, regs->error); uint64_t cr2; @@ -169,8 +172,8 @@ static void amd64_intr_exception (struct saved_regs* regs) { } /* Handle incoming interrupt, dispatch IRQ handlers. */ -void amd64_intr_handler (void* stack_ptr) { - amd64_load_kernel_cr3 (); +void intr_handler (void* stack_ptr) { + load_kernel_cr3 (); struct saved_regs* regs = stack_ptr; @@ -184,9 +187,9 @@ void amd64_intr_handler (void* stack_ptr) { spin_unlock (&thiscpu->lock); if (regs->trap <= 31) { - amd64_intr_exception (regs); + intr_exception (regs); } else { - amd64_lapic_eoi (); + lapic_eoi (); struct irq* irq = irq_find (regs->trap); @@ -202,7 +205,7 @@ void amd64_intr_handler (void* stack_ptr) { } /* Initialize interrupts */ -void amd64_intr_init (void) { - amd64_init_pic (); - amd64_idt_init (); +void intr_init (void) { + pic_init (); + idt_init (); } diff --git a/kernel/amd64/intr.h b/kernel/amd64/intr.h index 5717a7a..4f77e94 100644 --- a/kernel/amd64/intr.h +++ b/kernel/amd64/intr.h @@ -31,7 +31,7 @@ struct saved_regs { uint64_t ss; } PACKED; -void amd64_load_idt (void); -void amd64_intr_init (void); +void idt_load (void); +void intr_init (void); #endif // _KERNEL_AMD64_INTR_H diff --git a/kernel/amd64/intr_stub.S b/kernel/amd64/intr_stub.S index f668e05..7750c4e 100644 --- a/kernel/amd64/intr_stub.S +++ b/kernel/amd64/intr_stub.S @@ -1,7 +1,7 @@ #include #include -.extern amd64_intr_handler +.extern intr_handler #define err(z) \ pushq $z; @@ -11,8 +11,8 @@ pushq $z; #define make_intr_stub(x, n) \ - .global amd64_intr ## n; \ - amd64_intr ## n:; \ + .global intr ## n; \ + intr ## n:; \ x(n); \ cli; \ ; \ @@ -33,7 +33,7 @@ subq $8, %rsp; \ andq $-16, %rsp; \ ; \ - callq amd64_intr_handler; \ + callq intr_handler; \ ; \ movq %rbp, %rsp; \ ; \ diff --git a/kernel/amd64/io.c b/kernel/amd64/io.c index b5980e8..692d238 100644 --- a/kernel/amd64/io.c +++ b/kernel/amd64/io.c @@ -2,50 +2,44 @@ #include /// Perform outb instruction (send 8-bit int) -void amd64_io_outb (uint16_t port, uint8_t v) { - __asm__ volatile ("outb %1, %0" ::"dN"(port), "a"(v)); -} +void outb (uint16_t port, uint8_t v) { __asm__ volatile ("outb %1, %0" ::"dN"(port), "a"(v)); } /// Perform outw instruction (send 16-bit int) -void amd64_io_outw (uint16_t port, uint16_t v) { - __asm__ volatile ("outw %%ax, %%dx" ::"a"(v), "d"(port)); -} +void outw (uint16_t port, uint16_t v) { __asm__ volatile ("outw %%ax, %%dx" ::"a"(v), "d"(port)); } /// Perform outl instruction (send 32-bit int) -void amd64_io_outl (uint16_t port, uint32_t v) { - __asm__ volatile ("outl %%eax, %%dx" ::"d"(port), "a"(v)); -} +void outl (uint16_t port, uint32_t v) { __asm__ volatile ("outl %%eax, %%dx" ::"d"(port), "a"(v)); } /// Perform outsw instruction (send a string) -void amd64_io_outsw (uint16_t port, const void* addr, int cnt) { +void outsw (uint16_t port, const void* addr, int cnt) { __asm__ volatile ("cld; rep outsw" : "+S"(addr), "+c"(cnt) : "d"(port) : "memory", "cc"); } /// Perform inb instruction (receive 8-bit int) -uint8_t amd64_io_inb (uint16_t port) { +uint8_t inb (uint16_t port) { uint8_t r; __asm__ volatile ("inb %1, %0" : "=a"(r) : "dN"(port)); return r; } /// Perform inw instruction (receive 16-bit int) -uint16_t amd64_io_inw (uint16_t port) { +uint16_t inw (uint16_t port) { uint16_t r; __asm__ volatile ("inw %%dx, %%ax" : "=a"(r) : "d"(port)); return r; } /// Perform inl instruction (receive 32-bit int) -uint32_t amd64_io_inl (uint16_t port) { +uint32_t inl (uint16_t port) { uint32_t r; __asm__ volatile ("inl %%dx, %%eax" : "=a"(r) : "d"(port)); return r; } /// Perform insw instruction (receive a string) -void amd64_io_insw (uint16_t port, void* addr, int cnt) { +void insw (uint16_t port, void* addr, int cnt) { __asm__ volatile ("cld; rep insw" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory", "cc"); } /// output a byte on port 0x80, which does a small IO delay -void amd64_io_wait (void) { amd64_io_outb (0x80, 0); } +void io_wait (void) { outb (0x80, 0); } diff --git a/kernel/amd64/io.h b/kernel/amd64/io.h index aa28f0b..f3d490c 100644 --- a/kernel/amd64/io.h +++ b/kernel/amd64/io.h @@ -3,14 +3,14 @@ #include -void amd64_io_outb (uint16_t port, uint8_t v); -void amd64_io_outw (uint16_t port, uint16_t v); -void amd64_io_outl (uint16_t port, uint32_t v); -void amd64_io_outsw (uint16_t port, const void* addr, int cnt); -uint8_t amd64_io_inb (uint16_t port); -uint16_t amd64_io_inw (uint16_t port); -uint32_t amd64_io_inl (uint16_t port); -void amd64_io_insw (uint16_t port, void* addr, int cnt); -void amd64_io_wait (void); +void outb (uint16_t port, uint8_t v); +void outw (uint16_t port, uint16_t v); +void outl (uint16_t port, uint32_t v); +void outsw (uint16_t port, const void* addr, int cnt); +uint8_t inb (uint16_t port); +uint16_t inw (uint16_t port); +uint32_t inl (uint16_t port); +void insw (uint16_t port, void* addr, int cnt); +void io_wait (void); #endif // _KERNEL_AMD64_IO_H diff --git a/kernel/amd64/mm.c b/kernel/amd64/mm.c index c91d015..485b216 100644 --- a/kernel/amd64/mm.c +++ b/kernel/amd64/mm.c @@ -30,15 +30,15 @@ void mm_kernel_lock (void) { spin_lock (&kernel_pd_lock); } void mm_kernel_unlock (void) { spin_lock (&kernel_pd_lock); } /* Get current value of CR3 register */ -static uintptr_t amd64_current_cr3 (void) { +static uintptr_t current_cr3 (void) { uintptr_t cr3; __asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory"); return cr3; } /* Load kernel CR3 as current CR3 */ -void amd64_load_kernel_cr3 (void) { - uintptr_t cr3 = amd64_current_cr3 (); +void load_kernel_cr3 (void) { + uintptr_t cr3 = current_cr3 (); if (cr3 != kernel_pd.cr3_paddr) { __asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory"); @@ -48,7 +48,7 @@ void amd64_load_kernel_cr3 (void) { struct pd* mm_get_kernel_pd (void) { return &kernel_pd; } /* Extract PML info from virtual address */ -static struct pg_index amd64_mm_page_index (uint64_t vaddr) { +static struct pg_index mm_page_index (uint64_t vaddr) { struct pg_index ret; ret.pml4 = ((vaddr >> 39) & 0x1FF); @@ -60,7 +60,7 @@ static struct pg_index amd64_mm_page_index (uint64_t vaddr) { } /* Walk paging tables and allocate necessary structures along the way */ -static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) { +static uint64_t* mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) { uint64_t entry = table[entry_idx]; physaddr_t paddr; @@ -87,7 +87,7 @@ static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr); } -static bool amd64_mm_is_table_empty (uint64_t* table) { +static bool mm_is_table_empty (uint64_t* table) { for (size_t i = 0; i < 512; i++) { if (table[i] & AMD64_PG_PRESENT) return false; @@ -96,7 +96,7 @@ static bool amd64_mm_is_table_empty (uint64_t* table) { } /* Convert generic memory management subsystem flags into AMD64-specific flags */ -static uint64_t amd64_mm_resolve_flags (uint32_t generic) { +static uint64_t mm_resolve_flags (uint32_t generic) { uint64_t flags = 0; flags |= ((generic & MM_PG_PRESENT) ? AMD64_PG_PRESENT : 0); @@ -107,41 +107,41 @@ static uint64_t amd64_mm_resolve_flags (uint32_t generic) { } /* Reload the current CR3 value ON A LOCAL CPU */ -static void amd64_reload_cr3 (void) { +static void reload_cr3 (void) { uint64_t cr3; __asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory"); } /* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */ -void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { +void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags1) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; - uint64_t amd64_flags = amd64_mm_resolve_flags (flags); + uint64_t flags = mm_resolve_flags (flags1); uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); - struct pg_index pg_index = amd64_mm_page_index (vaddr); + struct pg_index pg_index = mm_page_index (vaddr); - uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true); + uint64_t* pml3 = mm_next_table (pml4, pg_index.pml4, true); if (pml3 == NULL) return; - uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true); + uint64_t* pml2 = mm_next_table (pml3, pg_index.pml3, true); if (pml2 == NULL) return; - uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true); + uint64_t* pml1 = mm_next_table (pml2, pg_index.pml2, true); if (pml1 == NULL) return; uint64_t* pte = &pml1[pg_index.pml1]; - *pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL)); + *pte = ((paddr & ~0xFFFULL) | (flags & 0x7ULL)); } /* Map a page into kernel page directory */ void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { mm_map_page (&kernel_pd, paddr, vaddr, flags); - amd64_reload_cr3 (); + reload_cr3 (); } /* Unmap a virtual address. TLB needs to be flushed afterwards */ @@ -149,17 +149,17 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); - struct pg_index pg_index = amd64_mm_page_index (vaddr); + struct pg_index pg_index = mm_page_index (vaddr); - uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false); + uint64_t* pml3 = mm_next_table (pml4, pg_index.pml4, false); if (pml3 == NULL) return; - uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false); + uint64_t* pml2 = mm_next_table (pml3, pg_index.pml3, false); if (pml2 == NULL) return; - uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false); + uint64_t* pml1 = mm_next_table (pml2, pg_index.pml2, false); if (pml1 == NULL) return; @@ -168,17 +168,17 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr) { if ((*pte) & AMD64_PG_PRESENT) *pte = 0; - if (amd64_mm_is_table_empty (pml1)) { + if (mm_is_table_empty (pml1)) { uintptr_t pml1_phys = pml2[pg_index.pml2] & ~0xFFFULL; pmm_free (pml1_phys, 1); pml2[pg_index.pml2] = 0; - if (amd64_mm_is_table_empty (pml2)) { + if (mm_is_table_empty (pml2)) { uintptr_t pml2_phys = pml3[pg_index.pml3] & ~0xFFFULL; pmm_free (pml2_phys, 1); pml3[pg_index.pml3] = 0; - if (amd64_mm_is_table_empty (pml3)) { + if (mm_is_table_empty (pml3)) { uintptr_t pml3_phys = pml4[pg_index.pml4] & ~0xFFFULL; pmm_free (pml3_phys, 1); pml4[pg_index.pml4] = 0; @@ -190,7 +190,7 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr) { /* Unmap a page from kernel page directory */ void mm_unmap_kernel_page (uintptr_t vaddr) { mm_unmap_page (&kernel_pd, vaddr); - amd64_reload_cr3 (); + reload_cr3 (); } /* Allocate a userspace-ready page directory */ @@ -216,17 +216,17 @@ bool mm_validate (struct pd* pd, uintptr_t vaddr) { bool ret = false; uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); - struct pg_index pg_index = amd64_mm_page_index (vaddr); + struct pg_index pg_index = mm_page_index (vaddr); - uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false); + uint64_t* pml3 = mm_next_table (pml4, pg_index.pml4, false); if (pml3 == NULL) goto done; - uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false); + uint64_t* pml2 = mm_next_table (pml3, pg_index.pml3, false); if (pml2 == NULL) goto done; - uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false); + uint64_t* pml1 = mm_next_table (pml2, pg_index.pml2, false); if (pml1 == NULL) goto done; @@ -292,17 +292,17 @@ uintptr_t mm_v2p (struct pd* pd, uintptr_t vaddr) { uintptr_t ret = 0; uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset); - struct pg_index pg_index = amd64_mm_page_index (vaddr); + struct pg_index pg_index = mm_page_index (vaddr); - uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false); + uint64_t* pml3 = mm_next_table (pml4, pg_index.pml4, false); if (pml3 == NULL) goto done; - uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false); + uint64_t* pml2 = mm_next_table (pml3, pg_index.pml3, false); if (pml2 == NULL) goto done; - uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false); + uint64_t* pml1 = mm_next_table (pml2, pg_index.pml2, false); if (pml1 == NULL) goto done; @@ -318,4 +318,4 @@ done: } /* Initialize essentials for the AMD64 memory management subsystem */ -void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); } +void mm_init (void) { kernel_pd.cr3_paddr = current_cr3 (); } diff --git a/kernel/amd64/mm.h b/kernel/amd64/mm.h index ef86671..174c0c6 100644 --- a/kernel/amd64/mm.h +++ b/kernel/amd64/mm.h @@ -10,6 +10,6 @@ struct pd { uintptr_t cr3_paddr; }; -void amd64_load_kernel_cr3 (void); +void load_kernel_cr3 (void); #endif // _KERNEL_AMD64_MM_H diff --git a/kernel/amd64/msr.c b/kernel/amd64/msr.c index ccf8a25..667416a 100644 --- a/kernel/amd64/msr.c +++ b/kernel/amd64/msr.c @@ -2,14 +2,14 @@ #include /// Read a model-specific register -uint64_t amd64_rdmsr (uint32_t msr) { +uint64_t rdmsr (uint32_t msr) { uint32_t low, high; __asm__ volatile ("rdmsr" : "=a"(low), "=d"(high) : "c"(msr)); return ((uint64_t)high << 32 | (uint64_t)low); } /// Write a model-specific register -void amd64_wrmsr (uint32_t msr, uint64_t value) { +void wrmsr (uint32_t msr, uint64_t value) { uint32_t low = (uint32_t)(value & 0xFFFFFFFF); uint32_t high = (uint32_t)(value >> 32); __asm__ volatile ("wrmsr" ::"c"(msr), "a"(low), "d"(high)); diff --git a/kernel/amd64/msr.h b/kernel/amd64/msr.h index d1ccfdf..c21cb7b 100644 --- a/kernel/amd64/msr.h +++ b/kernel/amd64/msr.h @@ -3,7 +3,7 @@ #include -uint64_t amd64_rdmsr (uint32_t msr); -void amd64_wrmsr (uint32_t msr, uint64_t value); +uint64_t rdmsr (uint32_t msr); +void wrmsr (uint32_t msr, uint64_t value); #endif // _KERNEL_AMD64_MSR_H diff --git a/kernel/amd64/sched.S b/kernel/amd64/sched.S index c9bdb7c..19df996 100644 --- a/kernel/amd64/sched.S +++ b/kernel/amd64/sched.S @@ -1,7 +1,7 @@ #include -.global amd64_do_sched -amd64_do_sched: +.global do_sched1 +do_sched1: movq %rsi, %cr3 movq %rdi, %rsp pop_regs diff --git a/kernel/amd64/sched.h b/kernel/amd64/sched.h index 97d453d..46029c2 100644 --- a/kernel/amd64/sched.h +++ b/kernel/amd64/sched.h @@ -2,6 +2,6 @@ #define _KERNEL_AMD64_SCHED_H /// Perform process context switch -void amd64_do_sched (void* regs, void* cr3); +void do_sched1 (void* regs, void* cr3); #endif // _KERNEL_AMD64_SCHED_H diff --git a/kernel/amd64/sched1.c b/kernel/amd64/sched1.c index b9b8e57..7b012ca 100644 --- a/kernel/amd64/sched1.c +++ b/kernel/amd64/sched1.c @@ -13,7 +13,7 @@ void do_sched (struct proc* proc, spin_lock_t* cpu_lock) { thiscpu->tss.rsp0 = proc->pdata.kernel_stack; thiscpu->syscall_kernel_stack = proc->pdata.kernel_stack; - amd64_wrmsr (MSR_FS_BASE, proc->pdata.fs_base); + wrmsr (MSR_FS_BASE, proc->pdata.fs_base); void* cr3 = (void*)proc->procgroup->pd.cr3_paddr; struct saved_regs regs; @@ -22,5 +22,5 @@ void do_sched (struct proc* proc, spin_lock_t* cpu_lock) { spin_unlock (&proc->lock); spin_unlock (cpu_lock); - amd64_do_sched ((void*)®s, cr3); + do_sched1 ((void*)®s, cr3); } diff --git a/kernel/amd64/smp.c b/kernel/amd64/smp.c index 4b6e6c3..dc40795 100644 --- a/kernel/amd64/smp.c +++ b/kernel/amd64/smp.c @@ -34,13 +34,13 @@ struct cpu* cpu_make (uint64_t lapic_id, uint64_t acpi_id) { cpu->acpi_id = acpi_id; cpu->lapic_id = lapic_id; - amd64_wrmsr (MSR_GS_BASE, (uint64_t)cpu); + wrmsr (MSR_GS_BASE, (uint64_t)cpu); return cpu; } struct cpu* cpu_get (void) { - struct cpu* ptr = (struct cpu*)amd64_rdmsr (MSR_GS_BASE); + struct cpu* ptr = (struct cpu*)rdmsr (MSR_GS_BASE); return ptr; } @@ -50,7 +50,7 @@ void cpu_request_sched (struct cpu* cpu) { return; } - amd64_lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED); + lapic_ipi (cpu->lapic_id, CPU_REQUEST_SCHED); } struct cpu* cpu_find_lightest (void) { @@ -74,15 +74,15 @@ struct cpu* cpu_find_lightest (void) { } /// Bootstrap code for non-BSP CPUs -static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) { - amd64_load_kernel_cr3 (); +static void smp_bootstrap (struct limine_mp_info* mp_info) { + load_kernel_cr3 (); struct cpu* cpu = cpu_make (mp_info->lapic_id, mp_info->processor_id); - amd64_init (cpu, true); /* gdt + idt */ + init_gdt_idt (cpu, true); /* gdt + idt */ syscall_init (); - amd64_lapic_init (1000); + lapic_init (1000); DEBUG ("CPU %u is online!\n", thiscpu->id); @@ -97,7 +97,7 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) { /// Initialize SMP subsystem for AMD64. Start AP CPUs void smp_init (void) { - amd64_lapic_init (1000); + lapic_init (1000); struct limine_mp_response* mp = limine_mp_request.response; @@ -105,7 +105,7 @@ void smp_init (void) { for (size_t i = 0; i < mp->cpu_count; i++) { if (mp->cpus[i]->processor_id != thiscpu->acpi_id) { - mp->cpus[i]->goto_address = &amd64_smp_bootstrap; + mp->cpus[i]->goto_address = &smp_bootstrap; } } diff --git a/kernel/amd64/spin.S b/kernel/amd64/spin.S index b4322de..3df1d48 100644 --- a/kernel/amd64/spin.S +++ b/kernel/amd64/spin.S @@ -1,4 +1,4 @@ -.global amd64_spin -amd64_spin: +.global spin +spin: hlt - jmp amd64_spin + jmp spin diff --git a/kernel/amd64/syscall.c b/kernel/amd64/syscall.c index cd0f76d..992b6d4 100644 --- a/kernel/amd64/syscall.c +++ b/kernel/amd64/syscall.c @@ -14,10 +14,10 @@ #include #include -extern void amd64_syscall_entry (void); +extern void syscall_entry (void); -uintptr_t amd64_syscall_dispatch (void* stack_ptr) { - amd64_load_kernel_cr3 (); +uintptr_t syscall_dispatch (void* stack_ptr) { + load_kernel_cr3 (); struct saved_regs* regs = stack_ptr; spin_lock (&thiscpu->lock); @@ -57,8 +57,8 @@ uintptr_t amd64_syscall_dispatch (void* stack_ptr) { } void syscall_init (void) { - amd64_wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48)); - amd64_wrmsr (MSR_LSTAR, (uint64_t)&amd64_syscall_entry); - amd64_wrmsr (MSR_SYSCALL_MASK, (1ULL << 9)); - amd64_wrmsr (MSR_EFER, amd64_rdmsr (MSR_EFER) | EFER_SCE); + wrmsr (MSR_STAR, ((uint64_t)GDT_KCODE << 32) | ((uint64_t)(GDT_KDATA | 0x03) << 48)); + wrmsr (MSR_LSTAR, (uint64_t)&syscall_entry); + wrmsr (MSR_SYSCALL_MASK, (1ULL << 9)); + wrmsr (MSR_EFER, rdmsr (MSR_EFER) | EFER_SCE); } diff --git a/kernel/amd64/syscallentry.S b/kernel/amd64/syscallentry.S index dd26bef..658ec7d 100644 --- a/kernel/amd64/syscallentry.S +++ b/kernel/amd64/syscallentry.S @@ -1,9 +1,9 @@ #include -.extern amd64_syscall_dispatch +.extern syscall_dispatch -.global amd64_syscall_entry -amd64_syscall_entry: +.global syscall_entry +syscall_entry: cli movq %rsp, %gs:0 @@ -35,7 +35,7 @@ amd64_syscall_entry: subq $8, %rsp andq $-16, %rsp - callq amd64_syscall_dispatch + callq syscall_dispatch movq %rbp, %rsp diff --git a/kernel/amd64/time.c b/kernel/amd64/time.c index 1d928ef..b1a8038 100644 --- a/kernel/amd64/time.c +++ b/kernel/amd64/time.c @@ -3,4 +3,4 @@ #include /// Sleep for given amount of microseconds -void sleep_micro (size_t us) { amd64_hpet_sleep_micro (us); } +void sleep_micro (size_t us) { hpet_sleep_micro (us); } diff --git a/kernel/device/ps2_kb.c b/kernel/device/ps2_kb.c index be433f9..5d1d7b4 100644 --- a/kernel/device/ps2_kb.c +++ b/kernel/device/ps2_kb.c @@ -134,11 +134,11 @@ static int32_t ps2kb_keycode (void) { static uint8_t* charcode[4] = {normalmap, shiftmap, ctlmap, ctlmap}; uint32_t st, data, c; - st = amd64_io_inb (KB_CTL_STATUS); + st = inb (KB_CTL_STATUS); if (!(st & KB_DATA_IN_BUF)) { return -1; } - data = amd64_io_inb (KB_DATA); + data = inb (KB_DATA); if (data == 0xe0) { shift |= KB_E0ESC; @@ -237,22 +237,22 @@ int ps2kb_read_key (struct device* device, struct proc* proc, struct reschedule_ bool ps2kb_init (struct device* device, void* arg) { (void)device, (void)arg; - amd64_ioapic_route_irq (PS2KB, 1, 0, thiscpu->lapic_id); + ioapic_route_irq (PS2KB, 1, 0, thiscpu->lapic_id); irq_attach (&ps2kb_irq, NULL, PS2KB); ringbuffer_init (&ps2kb_ringbuffer, PS2KB_RINGBUFFER_MAX, sizeof (uint8_t)); - while (amd64_io_inb (KB_CTL_STATUS) & KB_DATA_IN_BUF) - amd64_io_inb (KB_DATA); + while (inb (KB_CTL_STATUS) & KB_DATA_IN_BUF) + inb (KB_DATA); - amd64_io_outb (KB_CTL_STATUS, 0x20); + outb (KB_CTL_STATUS, 0x20); - uint8_t cb = amd64_io_inb (KB_DATA); + uint8_t cb = inb (KB_DATA); cb |= 0x01; cb |= 0x40; - amd64_io_outb (KB_CTL_STATUS, 0x60); - amd64_io_outb (KB_DATA, cb); + outb (KB_CTL_STATUS, 0x60); + outb (KB_DATA, cb); return true; } diff --git a/kernel/sys/spin.h b/kernel/sys/spin.h index 41e91ea..7d37c2b 100644 --- a/kernel/sys/spin.h +++ b/kernel/sys/spin.h @@ -1,9 +1,6 @@ #ifndef _KERNEL_SYS_SPIN_H #define _KERNEL_SYS_SPIN_H -#if defined(__x86_64__) -extern void amd64_spin (void); -#define spin amd64_spin -#endif +void spin (void); #endif // _KERNEL_SYS_SPIN_H