From 34f1e0ba301cf737b247164bd73e5c8950edb8e1 Mon Sep 17 00:00:00 2001 From: kamkow1 Date: Tue, 30 Dec 2025 16:50:15 +0100 Subject: [PATCH] Document amd64 platform-specific code --- kernel/amd64/apic.c | 14 ++++++++------ kernel/amd64/bootmain.c | 9 +++++++++ kernel/amd64/debug.c | 15 ++++++++++++--- kernel/amd64/gdt.h | 9 +++++++++ kernel/amd64/hpet.c | 35 ++++++++++++++++++++++++++++++----- kernel/amd64/init.c | 9 +++++++++ kernel/amd64/intr.c | 28 +++++++++++++++++++++++++++- kernel/amd64/intr_defs.h | 6 ++++++ kernel/amd64/io.c | 9 +++++++++ kernel/amd64/mm.c | 37 ++++++++++++++++++++++++++++++++++--- kernel/amd64/msr.c | 2 ++ kernel/amd64/proc.h | 5 ++++- kernel/amd64/sched.h | 1 + kernel/amd64/smp.c | 8 ++++++++ kernel/amd64/spin_lock.c | 1 + kernel/amd64/time.c | 1 + kernel/amd64/tss.h | 1 + 17 files changed, 171 insertions(+), 19 deletions(-) diff --git a/kernel/amd64/apic.c b/kernel/amd64/apic.c index 83541e7..5855b48 100644 --- a/kernel/amd64/apic.c +++ b/kernel/amd64/apic.c @@ -16,21 +16,21 @@ #define INTERRUPT_SRC_OVERRIDES_MAX 24 /// ID of Local APIC -#define LAPIC_ID 0x20 +#define LAPIC_ID 0x20 /// End of interrupt register -#define LAPIC_EOI 0xB0 +#define LAPIC_EOI 0xB0 /// Spurious interrupt vector register -#define LAPIC_SIVR 0xF0 +#define LAPIC_SIVR 0xF0 /// Interrupt command register -#define LAPIC_ICR 0x300 +#define LAPIC_ICR 0x300 /// LVT timer register -#define LAPIC_LVTTR 0x320 +#define LAPIC_LVTTR 0x320 /// Timer initial count register #define LAPIC_TIMICT 0x380 /// Timer current count register #define LAPIC_TIMCCT 0x390 /// Divide config register -#define LAPIC_DCR 0x3E0 +#define LAPIC_DCR 0x3E0 /// Table of IOAPICS static struct acpi_madt_ioapic apics[IOAPICS_MAX]; @@ -45,7 +45,9 @@ static size_t intr_src_override_entries = 0; /// Local APIC MMIO base address. It comes from MSR_APIC_BASE static uintptr_t lapic_mmio_base = 0; +/** @cond DOXYGEN_IGNORE */ extern void amd64_spin (void); +/** @endcond */ /// Read IOAPIC static uint32_t amd64_ioapic_read (uintptr_t vaddr, uint32_t reg) { diff --git a/kernel/amd64/bootmain.c b/kernel/amd64/bootmain.c index 77ece9a..1fe0217 100644 --- a/kernel/amd64/bootmain.c +++ b/kernel/amd64/bootmain.c @@ -1,3 +1,5 @@ +/** @file */ + #include #include #include @@ -19,8 +21,15 @@ #define UACPI_MEMORY_BUFFER_MAX 4096 +/** @cond DOXYGEN_IGNORE */ ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX]; +/** @endcond */ +/** + * @brief The kernel starts booting here. This is the entry point after + * Limine hands control. We set up all the necessary platform-dependent + * subsystems/drivers and jump into the init app. + */ void bootmain (void) { struct cpu* bsp_cpu = cpu_make (); cpu_assign (bsp_cpu->id); diff --git a/kernel/amd64/debug.c b/kernel/amd64/debug.c index 4bb03cd..1a073f6 100644 --- a/kernel/amd64/debug.c +++ b/kernel/amd64/debug.c @@ -6,21 +6,29 @@ #include #include -#define PORT_COM1 0x03F8 +/// Port for printing to serial +#define PORT_COM1 0x03F8 +/// \ref debugprintf buffer size #define BUFFER_SIZE 1024 +/// Lock, which ensures that prints to the serial port are atomic +static spin_lock_t serial_lock = SPIN_LOCK_INIT; -spin_lock_t serial_lock = SPIN_LOCK_INIT; - +/// Block until TX buffer is empty static bool amd64_debug_serial_tx_empty (void) { return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20); } +/// Write a single character to serial static void amd64_debug_serial_write (char x) { while (!amd64_debug_serial_tx_empty ()) ; amd64_io_outb (PORT_COM1, (uint8_t)x); } +/** + * @brief Formatted printing to serial. \ref serial_lock ensures that + * all prints are atomic. + */ void debugprintf (const char* fmt, ...) { char buffer[BUFFER_SIZE]; memset (buffer, 0, sizeof (buffer)); @@ -44,6 +52,7 @@ void debugprintf (const char* fmt, ...) { spin_unlock (&serial_lock); } +/// Initialize serial void amd64_debug_init (void) { amd64_io_outb (PORT_COM1 + 1, 0x00); amd64_io_outb (PORT_COM1 + 3, 0x80); diff --git a/kernel/amd64/gdt.h b/kernel/amd64/gdt.h index cb1f760..cb6180e 100644 --- a/kernel/amd64/gdt.h +++ b/kernel/amd64/gdt.h @@ -5,8 +5,17 @@ #include #include +/// Size of kernel stack #define KSTACK_SIZE (32 * 1024) +/** + * @file + * + * @brief 64-bit GDT structure. For more info see: + * - https://wiki.osdev.org/Global_Descriptor_Table + * - https://wiki.osdev.org/GDT_Tutorial + */ + struct gdt_entry { uint16_t limitlow; uint16_t baselow; diff --git a/kernel/amd64/hpet.c b/kernel/amd64/hpet.c index 3020eb7..69ce63c 100644 --- a/kernel/amd64/hpet.c +++ b/kernel/amd64/hpet.c @@ -9,19 +9,34 @@ #include #include -#define HPET_MCVR 0xF0 /* Main Counter Value Register */ -#define HPET_GCR 0x10 /* General Configuration Register */ -#define HPET_GCIDR 0x00 /* General Capabilities and ID Register */ +/** + * @file + * + * @brief HPET (High Precision Event Timer) driver code. + * See more at https://wiki.osdev.org/HPET + */ +/// HPET Main Counter Value Register +#define HPET_MCVR 0xF0 +/// HPET General Configuration Register +#define HPET_GCR 0x10 +/// HPET General Capabilities and ID Register +#define HPET_GCIDR 0x00 + +/// Set whether we sould use 32-bit or 64-bit reads/writes static bool hpet_32bits = 1; +/// Physical address for HPET MMIO static uintptr_t hpet_paddr; +/// HPET nanoseconds for conversion static uint64_t hpet_clock_nano; +/// Lock, which protects concurrent access. See \ref amd64/smp.c static spin_lock_t hpet_lock = SPIN_LOCK_INIT; +/** @cond DOXYGEN_IGNORE */ extern void amd64_spin (void); +/** @endcond */ -/* These functions assume hpet_lock is held by the caller! */ - +/// Read a HPET register. Assumes caller holds \ref hpet_lock static uint64_t amd64_hpet_read (uint32_t reg) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset; @@ -29,6 +44,7 @@ static uint64_t amd64_hpet_read (uint32_t reg) { : *(volatile uint64_t*)(hpet_vaddr + reg)); } +/// Write a HPET register. Assumes caller holds \ref hpet_lock static void amd64_hpet_write (uint32_t reg, uint64_t value) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset; @@ -38,8 +54,15 @@ static void amd64_hpet_write (uint32_t reg, uint64_t value) { *(volatile uint64_t*)(hpet_vaddr + reg) = value; } +/// Read current value of \ref HPET_MCVR register. static uint64_t amd64_hpet_timestamp (void) { return amd64_hpet_read (HPET_MCVR); } +/** + * @brief Get current HPET timestamp in nanoseconds + * + * @param lock + * if true, hold \ref hpet_lock + */ uint64_t amd64_hpet_current_nano (bool lock) { if (lock) spin_lock (&hpet_lock); @@ -52,6 +75,7 @@ uint64_t amd64_hpet_current_nano (bool lock) { return t; } +/// Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being held. void amd64_hpet_sleep_micro (uint64_t us) { spin_lock (&hpet_lock); @@ -63,6 +87,7 @@ void amd64_hpet_sleep_micro (uint64_t us) { spin_unlock (&hpet_lock); } +/// Initialize HPET void amd64_hpet_init (void) { struct uacpi_table hpet_table; uacpi_status status = uacpi_table_find_by_signature (ACPI_HPET_SIGNATURE, &hpet_table); diff --git a/kernel/amd64/init.c b/kernel/amd64/init.c index f135463..422ac51 100644 --- a/kernel/amd64/init.c +++ b/kernel/amd64/init.c @@ -14,6 +14,7 @@ #define TSS 0x80 #define TSS_PRESENT 0x89 +/// Set a GDT entry static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit, uint8_t acc, uint8_t gran) { ent->baselow = (base & 0xFFFF); @@ -24,6 +25,7 @@ static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32 ent->access = acc; } +/// Initialize GDT and TSS structures for a given CPU static void amd64_gdt_init (struct cpu* cpu) { volatile struct tss* tss = &cpu->tss; volatile struct gdt_extended* gdt = &cpu->gdt; @@ -75,6 +77,13 @@ static void amd64_gdt_init (struct cpu* cpu) { __asm__ volatile ("ltr %0" ::"r"((uint16_t)GDT_TSS)); } +/** + * @brief Initialize essentials (GDT, TSS, IDT) for a given CPU + * + * @param load_idt + * Tell whether the IDT needs to be loaded. It only has to be loaded once on + * the BSP + */ void amd64_init (struct cpu* cpu, bool load_idt) { amd64_gdt_init (cpu); if (load_idt) diff --git a/kernel/amd64/intr.c b/kernel/amd64/intr.c index 89ba50f..fe7dfc9 100644 --- a/kernel/amd64/intr.c +++ b/kernel/amd64/intr.c @@ -37,6 +37,7 @@ #define IDT_ENTRIES_MAX 256 +/// 64-bit intrlow = (handler & 0xFFFF); @@ -93,14 +101,18 @@ static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uin ent->resv = 0; } +/// Load the IDT void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); } +/// Initialize IDT entries static void amd64_idt_init (void) { memset ((void*)idt_entries, 0, sizeof (idt_entries)); + /** @cond DOXYGEN_IGNORE */ #define IDT_ENTRY(n, ist) \ extern void amd64_intr##n (void); \ amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist)) + /** @endcond */ /* clang-format off */ IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0); IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0); @@ -126,6 +138,13 @@ static void amd64_idt_init (void) { amd64_load_idt (); } +/** + * @brief Handle CPU exception and dump registers. If incoming CS has CPL3, kill the + * process. + * + * @param regs + * saved registers + */ static void amd64_intr_exception (struct saved_regs* regs) { DEBUG ("cpu exception %lu (%lu)\n", regs->trap, regs->error); @@ -154,6 +173,7 @@ static void amd64_intr_exception (struct saved_regs* regs) { } } +/// Handle incoming interrupt, dispatch IRQ handlers. void amd64_intr_handler (void* stack_ptr) { struct saved_regs* regs = stack_ptr; @@ -176,6 +196,7 @@ void amd64_intr_handler (void* stack_ptr) { } } +/// Initialize interrupts void amd64_intr_init (void) { amd64_init_pic (); amd64_idt_init (); @@ -183,17 +204,20 @@ void amd64_intr_init (void) { /* Aux. */ +/// Save RFLAGS of the current CPU static uint64_t amd64_irq_save_flags (void) { uint64_t rflags; __asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc"); return rflags; } +/// Restore interrupts (IF bit) from RFLAGS static void amd64_irq_restore_flags (uint64_t rflags) { if (rflags & (1ULL << 9)) __asm__ volatile ("sti"); } +/// Save current interrupt state void irq_save (void) { /* before smp init. */ if (thiscpu == NULL) @@ -204,6 +228,7 @@ void irq_save (void) { thiscpu->irq_ctx.rflags = amd64_irq_save_flags (); } +/// Restore interrupt state void irq_restore (void) { /* before smp init. */ if (thiscpu == NULL) @@ -214,6 +239,7 @@ void irq_restore (void) { amd64_irq_restore_flags (thiscpu->irq_ctx.rflags); } +/// Map custom IRQ mappings to legacy IRQs uint8_t amd64_resolve_irq (uint8_t irq) { static const uint8_t mappings[] = { [SCHED_PREEMPT_TIMER] = 0, diff --git a/kernel/amd64/intr_defs.h b/kernel/amd64/intr_defs.h index 31e5d9f..fac1a7d 100644 --- a/kernel/amd64/intr_defs.h +++ b/kernel/amd64/intr_defs.h @@ -1,6 +1,12 @@ #ifndef _KERNEL_AMD64_INTR_DEFS_H #define _KERNEL_AMD64_INTR_DEFS_H +/** + * @file + * Definitions for custom, nonstandard IDT entries. They have to be remapped + * by \ref amd64_resolve_irq into legacy IRQs. + */ + #define SCHED_PREEMPT_TIMER 80 #define TLB_SHOOTDOWN 81 diff --git a/kernel/amd64/io.c b/kernel/amd64/io.c index 06174db..b5980e8 100644 --- a/kernel/amd64/io.c +++ b/kernel/amd64/io.c @@ -1,42 +1,51 @@ #include #include +/// Perform outb instruction (send 8-bit int) void amd64_io_outb (uint16_t port, uint8_t v) { __asm__ volatile ("outb %1, %0" ::"dN"(port), "a"(v)); } +/// Perform outw instruction (send 16-bit int) void amd64_io_outw (uint16_t port, uint16_t v) { __asm__ volatile ("outw %%ax, %%dx" ::"a"(v), "d"(port)); } +/// Perform outl instruction (send 32-bit int) void amd64_io_outl (uint16_t port, uint32_t v) { __asm__ volatile ("outl %%eax, %%dx" ::"d"(port), "a"(v)); } +/// Perform outsw instruction (send a string) void amd64_io_outsw (uint16_t port, const void* addr, int cnt) { __asm__ volatile ("cld; rep outsw" : "+S"(addr), "+c"(cnt) : "d"(port) : "memory", "cc"); } +/// Perform inb instruction (receive 8-bit int) uint8_t amd64_io_inb (uint16_t port) { uint8_t r; __asm__ volatile ("inb %1, %0" : "=a"(r) : "dN"(port)); return r; } +/// Perform inw instruction (receive 16-bit int) uint16_t amd64_io_inw (uint16_t port) { uint16_t r; __asm__ volatile ("inw %%dx, %%ax" : "=a"(r) : "d"(port)); return r; } +/// Perform inl instruction (receive 32-bit int) uint32_t amd64_io_inl (uint16_t port) { uint32_t r; __asm__ volatile ("inl %%dx, %%eax" : "=a"(r) : "d"(port)); return r; } +/// Perform insw instruction (receive a string) void amd64_io_insw (uint16_t port, void* addr, int cnt) { __asm__ volatile ("cld; rep insw" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory", "cc"); } +/// output a byte on port 0x80, which does a small IO delay void amd64_io_wait (void) { amd64_io_outb (0x80, 0); } diff --git a/kernel/amd64/mm.c b/kernel/amd64/mm.c index 1ac92a3..30ef6d5 100644 --- a/kernel/amd64/mm.c +++ b/kernel/amd64/mm.c @@ -11,28 +11,36 @@ #include #include +/// Present flag #define AMD64_PG_PRESENT (1 << 0) -#define AMD64_PG_RW (1 << 1) -#define AMD64_PG_USER (1 << 2) +/// Writable flag +#define AMD64_PG_RW (1 << 1) +/// User-accessible flag +#define AMD64_PG_USER (1 << 2) +/// Auxilary struct for page directory walking struct pg_index { uint16_t pml4, pml3, pml2, pml1; } PACKED; +/// Kernel page directory static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT}; -/* This is needed to sync between map/unmap operations and TLB shootdown. */ +/// Lock needed to sync between map/unmap operations and TLB shootdown static spin_lock_t mm_lock = SPIN_LOCK_INIT; +/// Get current value of CR3 register static uintptr_t amd64_current_cr3 (void) { uintptr_t cr3; __asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory"); return cr3; } +/// Load kernel CR3 as current CR3 void amd64_load_kernel_cr3 (void) { __asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory"); } +/// Extract PML info from virtual address static struct pg_index amd64_mm_page_index (uint64_t vaddr) { struct pg_index ret; @@ -44,6 +52,7 @@ static struct pg_index amd64_mm_page_index (uint64_t vaddr) { return ret; } +/// Walk paging tables and allocate necessary structures along the way static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) { uint64_t entry = table[entry_idx]; physaddr_t paddr; @@ -68,6 +77,7 @@ static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr); } +/// Convert generic memory management subsystem flags into AMD64-specific flags static uint64_t amd64_mm_resolve_flags (uint32_t generic) { uint64_t flags = 0; @@ -78,11 +88,16 @@ static uint64_t amd64_mm_resolve_flags (uint32_t generic) { return flags; } +/// Reload the current CR3 value ON A LOCAL CPU static void amd64_reload_cr3 (void) { uint64_t cr3; __asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory"); } +/** + * @brief Map physical address to virtual address with flags. TLB needs to be flushed + * afterwards. + */ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { spin_lock (&mm_lock); @@ -124,10 +139,12 @@ done: spin_unlock (&mm_lock); } +/// Map a page into kernel page directory void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) { mm_map_page (&kernel_pd, paddr, vaddr, flags); } +/// Unmap a virtual address. TLB needs to be flushed afterwards void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) { spin_lock (&mm_lock); @@ -167,14 +184,18 @@ done: spin_unlock (&mm_lock); } +/// Unmap a page from kernel page directory void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) { mm_unmap_page (&kernel_pd, vaddr, flags); } +/// Lock kernel page directory void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); } +/// Unlock kernel page directory void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); } +/// Allocate a userspace-ready page directory uintptr_t mm_alloc_user_pd_phys (void) { struct limine_hhdm_response* hhdm = limine_hhdm_request.response; @@ -192,6 +213,10 @@ uintptr_t mm_alloc_user_pd_phys (void) { return cr3; } +/** + * @brief Reload after map/unmap operation was performed. This function does the TLB + * shootdown. + */ void mm_reload (void) { spin_lock (&mm_lock); @@ -204,6 +229,7 @@ void mm_reload (void) { spin_unlock (&mm_lock); } +/// TLB shootdown IRQ handler static void amd64_tlb_shootdown_irq (void* arg, void* regs) { (void)arg, (void)regs; @@ -211,8 +237,13 @@ static void amd64_tlb_shootdown_irq (void* arg, void* regs) { DEBUG ("cpu %u TLB shootdown\n", thiscpu->id); } +/** + * @brief Continue initializing memory management subsystem for AMD64 after the + * essential parts were initialized + */ void mm_init2 (void) { irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE); } +/// Initialize essentials for the AMD64 memory management subsystem void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); } diff --git a/kernel/amd64/msr.c b/kernel/amd64/msr.c index 903ea38..ccf8a25 100644 --- a/kernel/amd64/msr.c +++ b/kernel/amd64/msr.c @@ -1,12 +1,14 @@ #include #include +/// Read a model-specific register uint64_t amd64_rdmsr (uint32_t msr) { uint32_t low, high; __asm__ volatile ("rdmsr" : "=a"(low), "=d"(high) : "c"(msr)); return ((uint64_t)high << 32 | (uint64_t)low); } +/// Write a model-specific register void amd64_wrmsr (uint32_t msr, uint64_t value) { uint32_t low = (uint32_t)(value & 0xFFFFFFFF); uint32_t high = (uint32_t)(value >> 32); diff --git a/kernel/amd64/proc.h b/kernel/amd64/proc.h index 57555e8..c900f7a 100644 --- a/kernel/amd64/proc.h +++ b/kernel/amd64/proc.h @@ -4,9 +4,12 @@ #include #include +/// Top of userspace process' stack #define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL -#define USTACK_SIZE (256 * PAGE_SIZE) +/// Size of userspace process' stack +#define USTACK_SIZE (256 * PAGE_SIZE) +/// Platform-dependent process data struct proc_platformdata { struct saved_regs regs; uintptr_t syscall_stack; diff --git a/kernel/amd64/sched.h b/kernel/amd64/sched.h index aacf1c4..97d453d 100644 --- a/kernel/amd64/sched.h +++ b/kernel/amd64/sched.h @@ -1,6 +1,7 @@ #ifndef _KERNEL_AMD64_SCHED_H #define _KERNEL_AMD64_SCHED_H +/// Perform process context switch void amd64_do_sched (void* regs, void* cr3); #endif // _KERNEL_AMD64_SCHED_H diff --git a/kernel/amd64/smp.c b/kernel/amd64/smp.c index 13f06d8..036e340 100644 --- a/kernel/amd64/smp.c +++ b/kernel/amd64/smp.c @@ -11,10 +11,14 @@ #include #include +/// Cpu ID counter static uint32_t cpu_counter = 0; +/// Lock for \ref cpu_counter static spin_lock_t cpu_counter_lock = SPIN_LOCK_INIT; +/// The CPUs static struct cpu cpus[CPUS_MAX]; +/// Allocate a CPU structure struct cpu* cpu_make (void) { spin_lock (&cpu_counter_lock); int id = cpu_counter++; @@ -36,10 +40,13 @@ struct cpu* cpu_get (uint32_t id) { return &cpus[id]; } +/// Get ID of current running CPU uint32_t cpu_id (void) { return (uint32_t)amd64_rdmsr (MSR_GS_BASE); } +/// Assign an ID to the current running CPU void cpu_assign (uint32_t id) { amd64_wrmsr (MSR_GS_BASE, (uint64_t)id); } +/// Bootstrap code for non-BSP CPUs static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) { amd64_load_kernel_cr3 (); @@ -59,6 +66,7 @@ static void amd64_smp_bootstrap (struct limine_mp_info* mp_info) { ; } +/// Initialize SMP subsystem for AMD64. Start AP CPUs void smp_init (void) { thiscpu->lapic_ticks = amd64_lapic_init (2500); diff --git a/kernel/amd64/spin_lock.c b/kernel/amd64/spin_lock.c index 405b815..a2ecc85 100644 --- a/kernel/amd64/spin_lock.c +++ b/kernel/amd64/spin_lock.c @@ -1,3 +1,4 @@ #include +/// Relax the spinlock using AMD64 pause instruction void spin_lock_relax (void) { __asm__ volatile ("pause"); } diff --git a/kernel/amd64/time.c b/kernel/amd64/time.c index 7065d95..1d928ef 100644 --- a/kernel/amd64/time.c +++ b/kernel/amd64/time.c @@ -2,4 +2,5 @@ #include #include +/// Sleep for given amount of microseconds void sleep_micro (size_t us) { amd64_hpet_sleep_micro (us); } diff --git a/kernel/amd64/tss.h b/kernel/amd64/tss.h index b8f7d47..2c54d6b 100644 --- a/kernel/amd64/tss.h +++ b/kernel/amd64/tss.h @@ -4,6 +4,7 @@ #include #include +/// 64-bit TSS structure: https://wiki.osdev.org/Task_State_Segment struct tss { uint32_t resv0; uint64_t rsp0;