Remove Doxygen-style comments, change formatting to wrap comments
All checks were successful
Build documentation / build-and-deploy (push) Successful in 28s

This commit is contained in:
2026-01-06 02:04:32 +01:00
parent 902682ac11
commit 7915986902
15 changed files with 469 additions and 510 deletions

View File

@@ -16,49 +16,49 @@
#define IOAPICS_MAX 24
#define INTERRUPT_SRC_OVERRIDES_MAX 24
/// ID of Local APIC
/* ID of Local APIC */
#define LAPIC_ID 0x20
/// End of interrupt register
/* End of interrupt register */
#define LAPIC_EOI 0xB0
/// Spurious interrupt vector register
/* Spurious interrupt vector register */
#define LAPIC_SIVR 0xF0
/// Interrupt command register
/* Interrupt command register */
#define LAPIC_ICR 0x300
/// LVT timer register
/* LVT timer register */
#define LAPIC_LVTTR 0x320
/// Timer initial count register
/* Timer initial count register */
#define LAPIC_TIMICT 0x380
/// Timer current count register
/* Timer current count register */
#define LAPIC_TIMCCT 0x390
/// Divide config register
/* Divide config register */
#define LAPIC_DCR 0x3E0
/// Table of IOAPICS
/* Table of IOAPICS */
static struct acpi_madt_ioapic apics[IOAPICS_MAX];
/* Table of interrupt source overrides */
/* clang-format off */
/// Table of interrupt source overrides
static struct acpi_madt_interrupt_source_override intr_src_overrides[INTERRUPT_SRC_OVERRIDES_MAX];
/* clang-format on */
/// Count of actual IOAPIC entries
/* Count of actual IOAPIC entries */
static size_t ioapic_entries = 0;
/// Count of actual interrupt source overrides
/* Count of actual interrupt source overrides */
static size_t intr_src_override_entries = 0;
/// Local APIC MMIO base address. It comes from MSR_APIC_BASE
/* Local APIC MMIO base address. It comes from MSR_APIC_BASE */
static uintptr_t lapic_mmio_base = 0;
/// Read IOAPIC
/* Read IOAPIC */
static uint32_t amd64_ioapic_read (uintptr_t vaddr, uint32_t reg) {
*(volatile uint32_t*)vaddr = reg;
return *(volatile uint32_t*)(vaddr + 0x10);
}
/// Write IOAPIC
/* Write IOAPIC */
static void amd64_ioapic_write (uintptr_t vaddr, uint32_t reg, uint32_t value) {
*(volatile uint32_t*)vaddr = reg;
*(volatile uint32_t*)(vaddr + 0x10) = value;
}
/// Find an IOAPIC corresposting to provided IRQ
/* Find an IOAPIC corresposting to provided IRQ */
static struct acpi_madt_ioapic* amd64_ioapic_find (uint8_t irq) {
struct acpi_madt_ioapic* apic = NULL;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -75,21 +75,14 @@ static struct acpi_madt_ioapic* amd64_ioapic_find (uint8_t irq) {
return NULL;
}
/**
* @brief Route IRQ to an IDT entry of a given Local APIC.
/*
* Route IRQ to an IDT entry of a given Local APIC.
*
* @param vec
* Interrupt vector number, which will be delivered to the CPU
*
* @param irq
* Legacy IRQ number to be routed. Can be changed by an interrupt source override
* vec - Interrupt vector number, which will be delivered to the CPU.
* irq -Legacy IRQ number to be routed. Can be changed by an interrupt source override
* into a different GSI.
*
* @param flags
* IOAPIC redirection flags.
*
* @param lapic_id
* Local APIC that will receive the interrupt.
* flags - IOAPIC redirection flags.
* lapic_id - Local APIC that will receive the interrupt.
*/
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id) {
struct acpi_madt_ioapic* apic = NULL;
@@ -132,7 +125,7 @@ void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t
(uint32_t)(calc_flags >> 32));
}
/// Mask a given IRQ
/* Mask a given IRQ */
void amd64_ioapic_mask (uint8_t irq) {
struct acpi_madt_ioapic* apic;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -149,7 +142,7 @@ void amd64_ioapic_mask (uint8_t irq) {
value | (1 << 16));
}
/// Unmask a given IRQ
/* Unmask a given IRQ */
void amd64_ioapic_unmask (uint8_t irq) {
struct acpi_madt_ioapic* apic;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -166,7 +159,7 @@ void amd64_ioapic_unmask (uint8_t irq) {
value & ~(1 << 16));
}
/// Find and initialize the IOAPIC
/* Find and initialize the IOAPIC */
void amd64_ioapic_init (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -204,35 +197,32 @@ void amd64_ioapic_init (void) {
}
}
/// Get MMIO base of Local APIC
/* Get MMIO base of Local APIC */
static uintptr_t amd64_lapic_base (void) { return lapic_mmio_base; }
/// Write Local APIC
/* Write Local APIC */
static void amd64_lapic_write (uint32_t reg, uint32_t value) {
*(volatile uint32_t*)(amd64_lapic_base () + reg) = value;
}
/// Read Local APIC
/* Read Local APIC */
static uint32_t amd64_lapic_read (uint32_t reg) {
return *(volatile uint32_t*)(amd64_lapic_base () + reg);
}
/// Get ID of Local APIC
/* Get ID of Local APIC */
uint32_t amd64_lapic_id (void) { return amd64_lapic_read (LAPIC_ID) >> 24; }
/// Send End of interrupt command to Local APIC
/* Send End of interrupt command to Local APIC */
void amd64_lapic_eoi (void) { amd64_lapic_write (LAPIC_EOI, 0); }
/// Set initial counter value in Local APIC timer
/* Set initial counter value in Local APIC timer */
void amd64_lapic_tick (uint32_t tick) { amd64_lapic_write (LAPIC_TIMICT, tick); }
/**
* @brief Calibrate Local APIC to send interrupts in a set interval.
/*
* Calibrate Local APIC to send interrupts in a set interval.
*
* @param us
* Period length in microseconds
*
* @return amount of ticsk in a given period
* us - Period length in microseconds
*/
static uint32_t amd64_lapic_calibrate (uint32_t us) {
amd64_lapic_write (LAPIC_DCR, 0x0B);
@@ -248,12 +238,10 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
return ticks;
}
/**
* @brief Starts a Local APIC, configures LVT timer to
* send interrupts at \ref SCHED_PREEMPT_TIMER.
/*
* Starts a Local APIC, configures LVT timer to send interrupts at SCHED_PREEMPT_TIMER.
*
* @param ticks
* Initial tick count
* ticks - Initial tick count
*/
static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, 0x0B);
@@ -263,9 +251,9 @@ static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_TIMICT, ticks);
}
/**
* @brief Initialize Local APIC, configure to send timer interrupts
* at a given period. See \ref amd64_lapic_calibrate and \ref amd64_lapic_start.
/*
* Initialize Local APIC, configure to send timer interrupts at a given period. See
* amd64_lapic_calibrate and amd64_lapic_start.
*/
uint64_t amd64_lapic_init (uint32_t us) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -287,14 +275,11 @@ uint64_t amd64_lapic_init (uint32_t us) {
return ticks;
}
/**
* @brief Send an IPI to a given Local APIC. This till invoke an IDT stub located at vec.
/*
* Send an IPI to a given Local APIC. This till invoke an IDT stub located at vec.
*
* @param lapic_id
* Target Local APIC
*
* @param vec
* Interrupt vector/IDT stub, which will be invoked by the IPI.
* lapic_id - Target Local APIC
* vec - Interrupt vector/IDT stub, which will be invoked by the IPI.
*/
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec) {
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));

View File

@@ -1,5 +1,3 @@
/** @file */
#include <amd64/apic.h>
#include <amd64/debug.h>
#include <amd64/hpet.h>
@@ -24,14 +22,11 @@
#define UACPI_MEMORY_BUFFER_MAX 4096
/** @cond DOXYGEN_IGNORE */
ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
/** @endcond */
/**
* @brief The kernel starts booting here. This is the entry point after
* Limine hands control. We set up all the necessary platform-dependent
* subsystems/drivers and jump into the init app.
/*
* The kernel starts booting here. This is the entry point after Limine hands control. We set up all
* the necessary platform-dependent subsystems/drivers and jump into the init app.
*/
void bootmain (void) {
struct cpu* bsp_cpu = cpu_make ();

View File

@@ -6,30 +6,33 @@
#include <sync/spin_lock.h>
#include <sys/debug.h>
/// Port for printing to serial
/* Port for printing to serial */
/* TODO: Make this configurable */
#define PORT_COM1 0x03F8
/// \ref debugprintf buffer size
/* debugprintf buffer size */
#define BUFFER_SIZE 1024
/// Lock, which ensures that prints to the serial port are atomic
/*
* Lock, which ensures that prints to the serial port are atomic (ie. one debugprintf is atomic in
* itself).
*/
static spin_lock_t serial_lock = SPIN_LOCK_INIT;
static bool debug_init = false;
/// Block until TX buffer is empty
/* Block until TX buffer is empty */
static bool amd64_debug_serial_tx_empty (void) {
return (bool)(amd64_io_inb (PORT_COM1 + 5) & 0x20);
}
/// Write a single character to serial
/* Write a single character to serial */
static void amd64_debug_serial_write (char x) {
while (!amd64_debug_serial_tx_empty ())
;
amd64_io_outb (PORT_COM1, (uint8_t)x);
}
/**
* @brief Formatted printing to serial. \ref serial_lock ensures that
* all prints are atomic.
/*
* Formatted printing to serial. serial_lock ensures that all prints are atomic.
*/
void debugprintf (const char* fmt, ...) {
if (!debug_init)
@@ -57,7 +60,7 @@ void debugprintf (const char* fmt, ...) {
spin_unlock (&serial_lock);
}
/// Initialize serial
/* Initialize serial */
void amd64_debug_init (void) {
amd64_io_outb (PORT_COM1 + 1, 0x00);
amd64_io_outb (PORT_COM1 + 3, 0x80);

View File

@@ -11,13 +11,11 @@
#define GDT_UDATA 0x20
#define GDT_TSS 0x28
/// Size of kernel stack
/* Size of kernel stack */
#define KSTACK_SIZE (32 * 1024)
/**
* @file
*
* @brief 64-bit GDT structure. For more info see:
/*
* 64-bit GDT structure. For more info see:
* - https://wiki.osdev.org/Global_Descriptor_Table
* - https://wiki.osdev.org/GDT_Tutorial
*/
@@ -31,11 +29,13 @@ struct gdt_entry {
uint8_t basehigh;
} PACKED;
/* Struct that gets loaded into GDTR */
struct gdt_ptr {
uint16_t limit;
uint64_t base;
} PACKED;
/* New, extended GDT (we need to extend Limine's GDT) */
struct gdt_extended {
struct gdt_entry old[5];
struct gdt_entry tsslow;

View File

@@ -10,30 +10,27 @@
#include <uacpi/tables.h>
#include <uacpi/uacpi.h>
/**
* @file
*
* @brief HPET (High Precision Event Timer) driver code.
* See more at https://wiki.osdev.org/HPET
/*
* HPET (High Precision Event Timer) driver code. See more at https://wiki.osdev.org/HPET
*/
/// HPET Main Counter Value Register
/* HPET Main Counter Value Register */
#define HPET_MCVR 0xF0
/// HPET General Configuration Register
/* HPET General Configuration Register */
#define HPET_GCR 0x10
/// HPET General Capabilities and ID Register
/* HPET General Capabilities and ID Register */
#define HPET_GCIDR 0x00
/// Set whether we sould use 32-bit or 64-bit reads/writes
/* Set whether we sould use 32-bit or 64-bit reads/writes */
static bool hpet_32bits = 1;
/// Physical address for HPET MMIO
/* Physical address for HPET MMIO */
static uintptr_t hpet_paddr;
/// HPET period in femtoseconds
/* HPET period in femtoseconds */
static uint64_t hpet_period_fs;
/// Lock, which protects concurrent access. See \ref amd64/smp.c
/* Lock, which protects concurrent access. See amd64/smp.c */
static spin_lock_t hpet_lock = SPIN_LOCK_INIT;
/// Read a HPET register. Assumes caller holds \ref hpet_lock
/* Read a HPET register. Assumes caller holds \ref hpet_lock */
static uint64_t amd64_hpet_read (uint32_t reg) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
@@ -41,7 +38,7 @@ static uint64_t amd64_hpet_read (uint32_t reg) {
: *(volatile uint64_t*)(hpet_vaddr + reg));
}
/// Write a HPET register. Assumes caller holds \ref hpet_lock
/* Write a HPET register. Assumes caller holds \ref hpet_lock */
static void amd64_hpet_write (uint32_t reg, uint64_t value) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
@@ -51,10 +48,11 @@ static void amd64_hpet_write (uint32_t reg, uint64_t value) {
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
}
/// Read current value of \ref HPET_MCVR register.
/* Read current value of \ref HPET_MCVR register. */
static uint64_t amd64_hpet_timestamp (void) { return amd64_hpet_read (HPET_MCVR); }
/// Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being held.
/* Sleep for a given amount of microseconds. This time can last longer due to \ref hpet_lock being
* held. */
void amd64_hpet_sleep_micro (uint64_t us) {
spin_lock (&hpet_lock);
@@ -74,7 +72,7 @@ void amd64_hpet_sleep_micro (uint64_t us) {
spin_unlock (&hpet_lock);
}
/// Initialize HPET
/* Initialize HPET */
void amd64_hpet_init (void) {
struct uacpi_table hpet_table;
uacpi_status status = uacpi_table_find_by_signature (ACPI_HPET_SIGNATURE, &hpet_table);

View File

@@ -9,7 +9,7 @@
#define TSS 0x80
#define TSS_PRESENT 0x89
/// Set a GDT entry
/* Set a GDT entry */
static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit,
uint8_t acc, uint8_t gran) {
ent->baselow = (base & 0xFFFF);
@@ -20,7 +20,7 @@ static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32
ent->access = acc;
}
/// Initialize GDT and TSS structures for a given CPU
/* Initialize GDT and TSS structures for a given CPU */
static void amd64_gdt_init (struct cpu* cpu) {
volatile struct tss* tss = &cpu->tss;
volatile struct gdt_extended* gdt = &cpu->gdt;
@@ -51,11 +51,13 @@ static void amd64_gdt_init (struct cpu* cpu) {
gdt->tsshigh.access = 0;
gdt->tsshigh.gran = 0;
/* Load GDTR */
struct gdt_ptr gdtr;
gdtr.limit = sizeof (*gdt) - 1;
gdtr.base = (uint64_t)gdt;
__asm__ volatile ("lgdt %0" ::"m"(gdtr) : "memory");
/* Reload CS */
__asm__ volatile ("pushq %[kcode]\n"
"lea 1f(%%rip), %%rax\n"
"pushq %%rax\n"
@@ -72,11 +74,10 @@ static void amd64_gdt_init (struct cpu* cpu) {
__asm__ volatile ("ltr %0" ::"r"((uint16_t)GDT_TSS));
}
/**
* @brief Initialize essentials (GDT, TSS, IDT) for a given CPU
/*
* Initialize essentials (GDT, TSS, IDT) for a given CPU
*
* @param load_idt
* Tell whether the IDT needs to be loaded. It only has to be loaded once on
* load_idt - Tell whether the IDT needs to be loaded. It only has to be loaded once on
* the BSP
*/
void amd64_init (struct cpu* cpu, bool load_idt) {

View File

@@ -39,7 +39,7 @@
#define IDT_ENTRIES_MAX 256
/// 64-bit <IDT entry structure: https://wiki.osdev.org/Interrupt_Descriptor_Table
/* 64-bit <IDT entry structure: https://wiki.osdev.org/Interrupt_Descriptor_Table */
struct idt_entry {
uint16_t intrlow;
uint16_t kernel_cs;
@@ -55,18 +55,14 @@ struct idt {
uint64_t base;
} PACKED;
/** @cond DOXYGEN_IGNORE */
ALIGNED (16) static volatile struct idt_entry idt_entries[IDT_ENTRIES_MAX];
/** @endcond */
static volatile struct idt idt;
/// Remaps and disables old 8259 PIC, since we'll be using APIC.
/* Remaps and disables old 8259 PIC, since we'll be using APIC. */
static void amd64_init_pic (void) {
/** @cond DOXYGEN_IGNORE */
#define IO_OP(fn, ...) \
fn (__VA_ARGS__); \
amd64_io_wait ()
/** @endcond */
IO_OP (amd64_io_outb, PIC1_CMD, (ICW1_INIT | ICW1_ICW4));
IO_OP (amd64_io_outb, PIC2_CMD, (ICW1_INIT | ICW1_ICW4));
@@ -87,7 +83,7 @@ static void amd64_init_pic (void) {
#undef IO_OP
}
/// Set IDT entry
/* Set IDT entry */
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags,
uint8_t ist) {
ent->intrlow = (handler & 0xFFFF);
@@ -99,18 +95,16 @@ static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uin
ent->resv = 0;
}
/// Load the IDT
/* Load the IDT */
void amd64_load_idt (void) { __asm__ volatile ("lidt %0" ::"m"(idt)); }
/// Initialize IDT entries
/* Initialize IDT entries */
static void amd64_idt_init (void) {
memset ((void*)idt_entries, 0, sizeof (idt_entries));
/** @cond DOXYGEN_IGNORE */
#define IDT_ENTRY(n, ist) \
extern void amd64_intr##n (void); \
amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist))
/** @endcond */
/* clang-format off */
IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0);
IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0);
@@ -136,13 +130,7 @@ static void amd64_idt_init (void) {
amd64_load_idt ();
}
/**
* @brief Handle CPU exception and dump registers. If incoming CS has CPL3, kill the
* process.
*
* @param regs
* saved registers
*/
/* Handle CPU exception and dump registers. If incoming CS has CPL3, kill the process. */
static void amd64_intr_exception (struct saved_regs* regs) {
DEBUG ("cpu exception %lu (%lu)\n", regs->trap, regs->error);
@@ -171,7 +159,7 @@ static void amd64_intr_exception (struct saved_regs* regs) {
}
}
/// Handle incoming interrupt, dispatch IRQ handlers.
/* Handle incoming interrupt, dispatch IRQ handlers. */
void amd64_intr_handler (void* stack_ptr) {
struct saved_regs* regs = stack_ptr;
@@ -194,7 +182,7 @@ void amd64_intr_handler (void* stack_ptr) {
}
}
/// Initialize interrupts
/* Initialize interrupts */
void amd64_intr_init (void) {
amd64_init_pic ();
amd64_idt_init ();
@@ -202,34 +190,34 @@ void amd64_intr_init (void) {
/* Aux. */
/// Save RFLAGS of the current CPU
/* Save RFLAGS of the current CPU */
static uint64_t amd64_irq_save_flags (void) {
uint64_t rflags;
__asm__ volatile ("pushfq; cli; popq %0" : "=r"(rflags)::"memory", "cc");
return rflags;
}
/// Restore interrupts (IF bit) from RFLAGS
/* Restore interrupts (IF bit) from RFLAGS */
static void amd64_irq_restore_flags (uint64_t rflags) {
if (rflags & (1ULL << 9))
__asm__ volatile ("sti");
}
/// Save current interrupt state
/* Save current interrupt state */
void irq_save (void) {
int prev = atomic_fetch_add_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 0)
thiscpu->irq_ctx.rflags = amd64_irq_save_flags ();
}
/// Restore interrupt state
/* Restore interrupt state */
void irq_restore (void) {
int prev = atomic_fetch_sub_explicit (&thiscpu->irq_ctx.nesting, 1, memory_order_acq_rel);
if (prev == 1)
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);
}
/// Map custom IRQ mappings to legacy IRQs
/* Map custom IRQ mappings to legacy IRQs */
uint8_t amd64_resolve_irq (uint8_t irq) {
static const uint8_t mappings[] = {
[SCHED_PREEMPT_TIMER] = 0,

View File

@@ -1,11 +1,8 @@
#ifndef _KERNEL_AMD64_INTR_DEFS_H
#define _KERNEL_AMD64_INTR_DEFS_H
/**
* @file
* Definitions for custom, nonstandard IDT entries. They have to be remapped
* by \ref amd64_resolve_irq into legacy IRQs.
*/
/* Definitions for custom, nonstandard IDT entries. They have to be remapped by amd64_resolve_irq
* into legacy IRQs. */
#define SCHED_PREEMPT_TIMER 80
#define TLB_SHOOTDOWN 81

View File

@@ -11,36 +11,36 @@
#include <sys/mm.h>
#include <sys/smp.h>
/// Present flag
/* Present flag */
#define AMD64_PG_PRESENT (1 << 0)
/// Writable flag
/* Writable flag */
#define AMD64_PG_RW (1 << 1)
/// User-accessible flag
/* User-accessible flag */
#define AMD64_PG_USER (1 << 2)
/// Auxilary struct for page directory walking
/* Auxilary struct for page directory walking */
struct pg_index {
uint16_t pml4, pml3, pml2, pml1;
} PACKED;
/// Kernel page directory
/* Kernel page directory */
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
/// Lock needed to sync between map/unmap operations and TLB shootdown
/* Lock needed to sync between map/unmap operations and TLB shootdown */
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
/// Get current value of CR3 register
/* Get current value of CR3 register */
static uintptr_t amd64_current_cr3 (void) {
uintptr_t cr3;
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory");
return cr3;
}
/// Load kernel CR3 as current CR3
/* Load kernel CR3 as current CR3 */
void amd64_load_kernel_cr3 (void) {
__asm__ volatile ("movq %0, %%cr3" ::"r"(kernel_pd.cr3_paddr) : "memory");
}
/// Extract PML info from virtual address
/* Extract PML info from virtual address */
static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
struct pg_index ret;
@@ -52,7 +52,7 @@ static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
return ret;
}
/// Walk paging tables and allocate necessary structures along the way
/* Walk paging tables and allocate necessary structures along the way */
static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) {
uint64_t entry = table[entry_idx];
physaddr_t paddr;
@@ -85,7 +85,7 @@ static bool amd64_mm_is_table_empty (uint64_t* table) {
return true;
}
/// Convert generic memory management subsystem flags into AMD64-specific flags
/* Convert generic memory management subsystem flags into AMD64-specific flags */
static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
uint64_t flags = 0;
@@ -96,16 +96,13 @@ static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
return flags;
}
/// Reload the current CR3 value ON A LOCAL CPU
/* Reload the current CR3 value ON A LOCAL CPU */
static void amd64_reload_cr3 (void) {
uint64_t cr3;
__asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory");
}
/**
* @brief Map physical address to virtual address with flags. TLB needs to be flushed
* afterwards.
*/
/* Map physical address to virtual address with flags. TLB needs to be flushed afterwards. */
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
@@ -147,12 +144,12 @@ done:
spin_unlock (&mm_lock);
}
/// Map a page into kernel page directory
/* Map a page into kernel page directory */
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
mm_map_page (&kernel_pd, paddr, vaddr, flags);
}
/// Unmap a virtual address. TLB needs to be flushed afterwards
/* Unmap a virtual address. TLB needs to be flushed afterwards */
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
@@ -212,18 +209,18 @@ done:
spin_unlock (&mm_lock);
}
/// Unmap a page from kernel page directory
/* Unmap a page from kernel page directory */
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
mm_unmap_page (&kernel_pd, vaddr, flags);
}
/// Lock kernel page directory
/* Lock kernel page directory */
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); }
/// Unlock kernel page directory
/* Unlock kernel page directory */
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); }
/// Allocate a userspace-ready page directory
/* Allocate a userspace-ready page directory */
uintptr_t mm_alloc_user_pd_phys (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -241,10 +238,7 @@ uintptr_t mm_alloc_user_pd_phys (void) {
return cr3;
}
/**
* @brief Reload after map/unmap operation was performed. This function does the TLB
* shootdown.
*/
/* Reload after map/unmap operation was performed. This function does the TLB shootdown. */
void mm_reload (void) {
spin_lock (&mm_lock);
@@ -257,7 +251,7 @@ void mm_reload (void) {
spin_unlock (&mm_lock);
}
/// TLB shootdown IRQ handler
/* TLB shootdown IRQ handler */
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
(void)arg, (void)regs;
@@ -265,13 +259,11 @@ static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
}
/**
* @brief Continue initializing memory management subsystem for AMD64 after the
* essential parts were initialized
*/
/* Continue initializing memory management subsystem for AMD64 after the essential parts were
* initialized */
void mm_init2 (void) {
irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE);
}
/// Initialize essentials for the AMD64 memory management subsystem
/* Initialize essentials for the AMD64 memory management subsystem */
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }

View File

@@ -28,15 +28,15 @@ Usage-Guide:
License-Text:
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
@@ -85,8 +85,8 @@ patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
@@ -140,7 +140,7 @@ above, provided that you also meet all of these conditions:
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
@@ -198,7 +198,7 @@ access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
@@ -255,7 +255,7 @@ impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
@@ -285,7 +285,7 @@ make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
@@ -307,9 +307,9 @@ YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it