Compare commits
2 Commits
741d0fb9b0
...
69feceaaae
| Author | SHA1 | Date | |
|---|---|---|---|
| 69feceaaae | |||
| 7b33d0757a |
3
aux/qemu_amd64.sh
Executable file
3
aux/qemu_amd64.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
qemu-system-x86_64 -M q35 -m 4G -serial mon:stdio -cdrom mop3.iso
|
||||
@@ -22,7 +22,7 @@ AllowShortLoopsOnASingleLine: false
|
||||
AllowShortBlocksOnASingleLine: Never
|
||||
|
||||
# Line breaking
|
||||
ColumnLimit: 80
|
||||
ColumnLimit: 100
|
||||
BreakBeforeBinaryOperators: None
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakStringLiterals: false
|
||||
|
||||
152
kernel/amd64/apic.c
Normal file
152
kernel/amd64/apic.c
Normal file
@@ -0,0 +1,152 @@
|
||||
#include <amd64/apic.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/requests.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <uacpi/acpi.h>
|
||||
#include <uacpi/status.h>
|
||||
#include <uacpi/tables.h>
|
||||
#include <uacpi/uacpi.h>
|
||||
|
||||
#define IOAPICS_MAX 24
|
||||
#define INTERRUPT_SRC_OVERRIDES_MAX 24
|
||||
|
||||
static struct acpi_madt_ioapic apics[IOAPICS_MAX];
|
||||
/* clang-format off */
|
||||
static struct acpi_madt_interrupt_source_override intr_src_overrides[INTERRUPT_SRC_OVERRIDES_MAX];
|
||||
/* clang-format on */
|
||||
static size_t ioapic_entries = 0;
|
||||
static size_t intr_src_override_entries = 0;
|
||||
|
||||
extern void amd64_spin (void);
|
||||
|
||||
static uint32_t amd64_ioapic_read (uintptr_t vaddr, uint32_t reg) {
|
||||
*(volatile uint32_t*)vaddr = reg;
|
||||
return *(volatile uint32_t*)(vaddr + 0x10);
|
||||
}
|
||||
|
||||
static void amd64_ioapic_write (uintptr_t vaddr, uint32_t reg, uint32_t value) {
|
||||
*(volatile uint32_t*)vaddr = reg;
|
||||
*(volatile uint32_t*)(vaddr + 0x10) = value;
|
||||
}
|
||||
|
||||
static struct acpi_madt_ioapic* amd64_ioapic_find (uint8_t irq) {
|
||||
struct acpi_madt_ioapic* apic = NULL;
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
for (size_t i = 0; i < ioapic_entries; i++) {
|
||||
apic = &apics[i];
|
||||
uint32_t version = amd64_ioapic_read ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, 1);
|
||||
uint32_t max = (version >> 16);
|
||||
|
||||
if ((apic->gsi_base <= irq) && ((apic->gsi_base + max) > irq))
|
||||
break;
|
||||
}
|
||||
|
||||
return apic;
|
||||
}
|
||||
|
||||
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id) {
|
||||
struct acpi_madt_ioapic* apic;
|
||||
struct acpi_madt_interrupt_source_override* override;
|
||||
bool found_override = false;
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
for (size_t i = 0; i < intr_src_override_entries; i++) {
|
||||
override = &intr_src_overrides[i];
|
||||
if (override->source == irq) {
|
||||
found_override = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t calc_flags = (lapic_id << 56) | (flags) | (vec & 0xFF);
|
||||
|
||||
if (found_override) {
|
||||
uint8_t polarity = ((override->flags & 0x03) == 0x03) ? 1 : 0;
|
||||
uint8_t mode = (((override->flags >> 2) & 0x03) == 0x03) ? 1 : 0;
|
||||
calc_flags = (lapic_id << 56) | (mode << 15) | (polarity << 14) | (vec & 0xFF) | flags;
|
||||
}
|
||||
|
||||
apic = amd64_ioapic_find (irq);
|
||||
|
||||
if (apic == NULL)
|
||||
return;
|
||||
|
||||
uint32_t irq_reg = ((irq - apic->gsi_base) * 2) + 0x10;
|
||||
|
||||
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg,
|
||||
(uint32_t)calc_flags);
|
||||
|
||||
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg + 1,
|
||||
(uint32_t)(calc_flags >> 32));
|
||||
}
|
||||
|
||||
void amd64_ioapic_mask (uint8_t irq) {
|
||||
struct acpi_madt_ioapic* apic;
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
apic = amd64_ioapic_find (irq);
|
||||
|
||||
if (apic == NULL)
|
||||
return;
|
||||
|
||||
uint32_t irq_reg = ((irq - apic->gsi_base) * 2) + 0x10;
|
||||
|
||||
uint32_t value = amd64_ioapic_read ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg);
|
||||
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg,
|
||||
value | (1 << 16));
|
||||
}
|
||||
|
||||
void amd64_ioapic_unmask (uint8_t irq) {
|
||||
struct acpi_madt_ioapic* apic;
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
apic = amd64_ioapic_find (irq);
|
||||
|
||||
if (apic == NULL)
|
||||
return;
|
||||
|
||||
uint32_t irq_reg = ((irq - apic->gsi_base) * 2) + 0x10;
|
||||
|
||||
uint32_t value = amd64_ioapic_read ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg);
|
||||
amd64_ioapic_write ((uintptr_t)hhdm->offset + (uintptr_t)apic->address, irq_reg,
|
||||
value & ~(1 << 16));
|
||||
}
|
||||
|
||||
void amd64_ioapic_init (void) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
struct uacpi_table apic_table;
|
||||
uacpi_status status = uacpi_table_find_by_signature (ACPI_MADT_SIGNATURE, &apic_table);
|
||||
if (status != UACPI_STATUS_OK) {
|
||||
DEBUG ("Could not find MADT table!\n");
|
||||
amd64_spin ();
|
||||
}
|
||||
|
||||
struct acpi_madt* apic = (struct acpi_madt*)apic_table.virt_addr;
|
||||
struct acpi_entry_hdr* current = (struct acpi_entry_hdr*)apic->entries;
|
||||
|
||||
for (;;) {
|
||||
if ((uintptr_t)current >=
|
||||
((uintptr_t)apic->entries + apic->hdr.length - sizeof (struct acpi_madt)))
|
||||
break;
|
||||
|
||||
switch (current->type) {
|
||||
case ACPI_MADT_ENTRY_TYPE_IOAPIC: {
|
||||
struct acpi_madt_ioapic* ioapic = (struct acpi_madt_ioapic*)current;
|
||||
mm_map_kernel_page ((uintptr_t)ioapic->address,
|
||||
(uintptr_t)hhdm->offset + (uintptr_t)ioapic->address,
|
||||
MM_PG_USER | MM_PG_RW);
|
||||
apics[ioapic_entries++] = *ioapic;
|
||||
} break;
|
||||
case ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE: {
|
||||
struct acpi_madt_interrupt_source_override* override =
|
||||
(struct acpi_madt_interrupt_source_override*)current;
|
||||
intr_src_overrides[intr_src_override_entries++] = *override;
|
||||
} break;
|
||||
}
|
||||
|
||||
current = (struct acpi_entry_hdr*)((uintptr_t)current + current->length);
|
||||
}
|
||||
}
|
||||
11
kernel/amd64/apic.h
Normal file
11
kernel/amd64/apic.h
Normal file
@@ -0,0 +1,11 @@
|
||||
#ifndef _KERNEL_AMD64_APIC_H
|
||||
#define _KERNEL_AMD64_APIC_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id);
|
||||
void amd64_ioapic_mask (uint8_t irq);
|
||||
void amd64_ioapic_unmask (uint8_t irq);
|
||||
void amd64_ioapic_init (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_APIC_H
|
||||
@@ -1,9 +1,12 @@
|
||||
#include <amd64/apic.h>
|
||||
#include <amd64/hpet.h>
|
||||
#include <amd64/init.h>
|
||||
#include <aux/compiler.h>
|
||||
#include <limine/limine.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <uacpi/uacpi.h>
|
||||
|
||||
#define UACPI_MEMORY_BUFFER_MAX 4096
|
||||
@@ -13,14 +16,21 @@ ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
|
||||
void bootmain (void) {
|
||||
amd64_init ();
|
||||
pmm_init ();
|
||||
mm_init ();
|
||||
|
||||
uacpi_setup_early_table_access ((void*)uacpi_memory_buffer,
|
||||
sizeof (uacpi_memory_buffer));
|
||||
uacpi_setup_early_table_access ((void*)uacpi_memory_buffer, sizeof (uacpi_memory_buffer));
|
||||
|
||||
amd64_ioapic_init ();
|
||||
amd64_hpet_init ();
|
||||
|
||||
int* a = malloc (sizeof (int));
|
||||
*a = 6969;
|
||||
DEBUG ("a=%p, *a=%d\n", a, *a);
|
||||
|
||||
amd64_hpet_sleep_micro (3000000);
|
||||
|
||||
DEBUG ("woke up!!!\n");
|
||||
|
||||
for (;;)
|
||||
;
|
||||
}
|
||||
|
||||
67
kernel/amd64/hpet.c
Normal file
67
kernel/amd64/hpet.c
Normal file
@@ -0,0 +1,67 @@
|
||||
#include <amd64/hpet.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/requests.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <uacpi/acpi.h>
|
||||
#include <uacpi/status.h>
|
||||
#include <uacpi/tables.h>
|
||||
#include <uacpi/uacpi.h>
|
||||
|
||||
#define HPET_MCVR 0xF0 /* Main Counter Value Register */
|
||||
#define HPET_GCR 0x10 /* General Configuration Register */
|
||||
#define HPET_GCIDR 0x00 /* General Capabilities and ID Register */
|
||||
|
||||
static bool hpet_32bits = 1;
|
||||
static uintptr_t hpet_paddr;
|
||||
static uint64_t hpet_clock_nano;
|
||||
|
||||
extern void amd64_spin (void);
|
||||
|
||||
static uint64_t amd64_hpet_read (uint32_t reg) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
return (hpet_32bits ? *(volatile uint32_t*)(hpet_vaddr + reg)
|
||||
: *(volatile uint64_t*)(hpet_vaddr + reg));
|
||||
}
|
||||
|
||||
static void amd64_hpet_write (uint32_t reg, uint64_t value) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t hpet_vaddr = hpet_paddr + (uintptr_t)hhdm->offset;
|
||||
if (hpet_32bits)
|
||||
*(volatile uint32_t*)(hpet_vaddr + reg) = (value & 0xFFFFFFFF);
|
||||
else
|
||||
*(volatile uint64_t*)(hpet_vaddr + reg) = value;
|
||||
}
|
||||
|
||||
static uint64_t amd64_hpet_timestamp (void) { return amd64_hpet_read (HPET_MCVR); }
|
||||
|
||||
uint64_t amd64_hpet_current_nano (void) { return amd64_hpet_timestamp () * hpet_clock_nano; }
|
||||
|
||||
void amd64_hpet_sleep_micro (uint64_t us) {
|
||||
uint64_t start = amd64_hpet_timestamp ();
|
||||
uint64_t conv = us * 1000;
|
||||
while (((amd64_hpet_timestamp () - start) * hpet_clock_nano) < conv)
|
||||
__asm__ volatile ("pause" ::: "memory");
|
||||
}
|
||||
|
||||
void amd64_hpet_init (void) {
|
||||
struct uacpi_table hpet_table;
|
||||
uacpi_status status = uacpi_table_find_by_signature (ACPI_HPET_SIGNATURE, &hpet_table);
|
||||
if (status != UACPI_STATUS_OK) {
|
||||
DEBUG ("Could not find HPET table!\n");
|
||||
amd64_spin ();
|
||||
}
|
||||
|
||||
struct acpi_hpet* hpet = (struct acpi_hpet*)hpet_table.virt_addr;
|
||||
hpet_paddr = (uintptr_t)hpet->address.address;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
|
||||
|
||||
hpet_32bits = (amd64_hpet_read (HPET_GCIDR) & (1 << 13)) ? 0 : 1;
|
||||
|
||||
amd64_hpet_write (HPET_GCR, amd64_hpet_read (HPET_GCR) | 0x01);
|
||||
|
||||
hpet_clock_nano = amd64_hpet_read (HPET_GCIDR + 4) / 1000000;
|
||||
}
|
||||
10
kernel/amd64/hpet.h
Normal file
10
kernel/amd64/hpet.h
Normal file
@@ -0,0 +1,10 @@
|
||||
#ifndef _KERNEL_AMD64_HPET_H
|
||||
#define _KERNEL_AMD64_HPET_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
uint64_t amd64_hpet_current_nano (void);
|
||||
void amd64_hpet_sleep_micro (uint64_t us);
|
||||
void amd64_hpet_init (void);
|
||||
|
||||
#endif // _KERNEL_AMD64_HPET_H
|
||||
@@ -40,8 +40,8 @@ struct gdt_extended {
|
||||
ALIGNED (16) static volatile uint8_t kernel_stack[KSTACK_SIZE];
|
||||
ALIGNED (16) static volatile struct gdt_extended gdt;
|
||||
|
||||
static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base,
|
||||
uint32_t limit, uint8_t acc, uint8_t gran) {
|
||||
static void amd64_gdt_set (volatile struct gdt_entry* ent, uint32_t base, uint32_t limit,
|
||||
uint8_t acc, uint8_t gran) {
|
||||
ent->baselow = (base & 0xFFFF);
|
||||
ent->basemid = (base >> 16) & 0xFF;
|
||||
ent->basehigh = (base >> 24) & 0xFF;
|
||||
@@ -68,8 +68,7 @@ static void amd64_gdt_init (void) {
|
||||
amd64_gdt_set (&gdt.old[2], 0, 0xFFFFF, 0x92, 0xC0);
|
||||
amd64_gdt_set (&gdt.old[3], 0, 0xFFFFF, 0xFA, 0xA0);
|
||||
amd64_gdt_set (&gdt.old[4], 0, 0xFFFFF, 0xF2, 0xC0);
|
||||
amd64_gdt_set (&gdt.tsslow, (tssbase & 0xFFFFFFFF), tsslimit,
|
||||
TSS_PRESENT | TSS, 0);
|
||||
amd64_gdt_set (&gdt.tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0);
|
||||
|
||||
uint32_t tssbasehigh = (tssbase >> 32);
|
||||
gdt.tsshigh.limitlow = (tssbasehigh & 0xFFFF);
|
||||
|
||||
@@ -77,8 +77,7 @@ static void amd64_init_pic (void) {
|
||||
#undef IO_OP
|
||||
}
|
||||
|
||||
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler,
|
||||
uint8_t flags) {
|
||||
static void amd64_idt_set (volatile struct idt_entry* ent, uint64_t handler, uint8_t flags) {
|
||||
ent->intrlow = (handler & 0xFFFF);
|
||||
ent->kernel_cs = 0x08; // GDT_KCODE (init.c)
|
||||
ent->ist = 0;
|
||||
@@ -167,10 +166,9 @@ static void amd64_intr_exception (struct saved_regs* regs) {
|
||||
"err=%016lx rip=%016lx cs =%016lx\n"
|
||||
"rfl=%016lx rsp=%016lx ss =%016lx\n"
|
||||
"cr2=%016lx cr3=%016lx rbx=%016lx\n",
|
||||
regs->r15, regs->r14, regs->r13, regs->r12, regs->r11, regs->r10,
|
||||
regs->r9, regs->r8, regs->rbp, regs->rdi, regs->rsi, regs->rdx,
|
||||
regs->rcx, regs->rax, regs->trap, regs->error, regs->rip,
|
||||
regs->cs, regs->rflags, regs->rsp, regs->ss, cr2, cr3,
|
||||
regs->r15, regs->r14, regs->r13, regs->r12, regs->r11, regs->r10, regs->r9, regs->r8,
|
||||
regs->rbp, regs->rdi, regs->rsi, regs->rdx, regs->rcx, regs->rax, regs->trap,
|
||||
regs->error, regs->rip, regs->cs, regs->rflags, regs->rsp, regs->ss, cr2, cr3,
|
||||
regs->rbx);
|
||||
|
||||
amd64_spin ();
|
||||
|
||||
@@ -14,10 +14,7 @@ void amd64_io_outl (uint16_t port, uint32_t v) {
|
||||
}
|
||||
|
||||
void amd64_io_outsw (uint16_t port, const void* addr, int cnt) {
|
||||
__asm__ volatile ("cld; rep outsw"
|
||||
: "+S"(addr), "+c"(cnt)
|
||||
: "d"(port)
|
||||
: "memory", "cc");
|
||||
__asm__ volatile ("cld; rep outsw" : "+S"(addr), "+c"(cnt) : "d"(port) : "memory", "cc");
|
||||
}
|
||||
|
||||
uint8_t amd64_io_inb (uint16_t port) {
|
||||
@@ -39,10 +36,7 @@ uint32_t amd64_io_inl (uint16_t port) {
|
||||
}
|
||||
|
||||
void amd64_io_insw (uint16_t port, void* addr, int cnt) {
|
||||
__asm__ volatile ("cld; rep insw"
|
||||
: "+D"(addr), "+c"(cnt)
|
||||
: "d"(port)
|
||||
: "memory", "cc");
|
||||
__asm__ volatile ("cld; rep insw" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory", "cc");
|
||||
}
|
||||
|
||||
void amd64_io_wait (void) { amd64_io_outb (0x80, 0); }
|
||||
|
||||
161
kernel/amd64/mm.c
Normal file
161
kernel/amd64/mm.c
Normal file
@@ -0,0 +1,161 @@
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/mm.h>
|
||||
|
||||
#define AMD64_PG_PRESENT (1 << 0)
|
||||
#define AMD64_PG_RW (1 << 1)
|
||||
#define AMD64_PG_USER (1 << 2)
|
||||
|
||||
#define AMD64_PG_TABLE_ENTRIES_MAX 512
|
||||
|
||||
struct pg_index {
|
||||
uint16_t pml4, pml3, pml2, pml1;
|
||||
} PACKED;
|
||||
|
||||
struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
|
||||
|
||||
static uintptr_t amd64_current_cr3 (void) {
|
||||
uintptr_t cr3;
|
||||
__asm__ volatile ("movq %%cr3, %0" : "=r"(cr3)::"memory");
|
||||
return cr3;
|
||||
}
|
||||
|
||||
static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
|
||||
struct pg_index ret;
|
||||
|
||||
ret.pml4 = ((vaddr >> 39) & 0x1FF);
|
||||
ret.pml3 = ((vaddr >> 30) & 0x1FF);
|
||||
ret.pml2 = ((vaddr >> 21) & 0x1FF);
|
||||
ret.pml1 = ((vaddr >> 12) & 0x1FF);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) {
|
||||
uint64_t entry = table[entry_idx];
|
||||
uint64_t paddr;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
if (entry & AMD64_PG_PRESENT)
|
||||
paddr = entry & ~0xFFFULL;
|
||||
else {
|
||||
if (!alloc)
|
||||
return NULL;
|
||||
|
||||
paddr = pmm_alloc (1);
|
||||
|
||||
if (paddr == 0)
|
||||
return NULL;
|
||||
|
||||
memset ((void*)((uintptr_t)hhdm->offset + (uintptr_t)paddr), 0, PAGE_SIZE);
|
||||
table[entry_idx] = paddr | AMD64_PG_PRESENT | AMD64_PG_RW;
|
||||
}
|
||||
|
||||
return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr);
|
||||
}
|
||||
|
||||
static uint64_t amd64_mm_resolve_flags (uint32_t generic) {
|
||||
uint64_t flags = 0;
|
||||
|
||||
flags |= ((generic & MM_PG_PRESENT) ? AMD64_PG_PRESENT : 0);
|
||||
flags |= ((generic & MM_PG_RW) ? AMD64_PG_RW : 0);
|
||||
flags |= ((generic & MM_PG_USER) ? AMD64_PG_USER : 0);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static void amd64_reload_cr3 (void) {
|
||||
uint64_t cr3;
|
||||
__asm__ volatile ("movq %%cr3, %0; movq %0, %%cr3" : "=r"(cr3)::"memory");
|
||||
}
|
||||
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
|
||||
uint64_t amd64_flags = amd64_mm_resolve_flags (flags);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
*pte = ((paddr & ~0xFFFULL) | (amd64_flags & 0x7ULL));
|
||||
do_reload = true;
|
||||
|
||||
done:
|
||||
if (do_reload)
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
}
|
||||
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
|
||||
mm_map_page (&kernel_pd, paddr, vaddr, flags);
|
||||
}
|
||||
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
bool do_reload = false;
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_lock (&pd->lock);
|
||||
|
||||
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
|
||||
struct pg_index pg_index = amd64_mm_page_index (vaddr);
|
||||
|
||||
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
|
||||
if (pml3 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
|
||||
if (pml2 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
|
||||
if (pml1 == NULL)
|
||||
goto done;
|
||||
|
||||
uint64_t* pte = &pml1[pg_index.pml1];
|
||||
|
||||
*pte &= ~AMD64_PG_PRESENT;
|
||||
do_reload = true;
|
||||
|
||||
done:
|
||||
if (do_reload)
|
||||
amd64_reload_cr3 ();
|
||||
|
||||
if (flags & MM_PD_LOCK)
|
||||
spin_unlock (&pd->lock);
|
||||
}
|
||||
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
|
||||
mm_unmap_page (&kernel_pd, vaddr, flags);
|
||||
}
|
||||
|
||||
void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); }
|
||||
|
||||
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); }
|
||||
|
||||
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }
|
||||
@@ -1,6 +1,14 @@
|
||||
#ifndef _KERNEL_AMD64_MM_H
|
||||
#define _KERNEL_AMD64_MM_H
|
||||
|
||||
#include <libk/std.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
#define PAGE_SIZE 4096
|
||||
|
||||
struct pd {
|
||||
spin_lock_t lock;
|
||||
uintptr_t cr3_paddr;
|
||||
};
|
||||
|
||||
#endif // _KERNEL_AMD64_MM_H
|
||||
|
||||
14
kernel/amd64/msr.c
Normal file
14
kernel/amd64/msr.c
Normal file
@@ -0,0 +1,14 @@
|
||||
#include <amd64/msr.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
uint64_t amd64_rdmsr (uint32_t msr) {
|
||||
uint32_t low, high;
|
||||
__asm__ volatile ("rdmsr" : "=a"(low), "=d"(high) : "c"(msr));
|
||||
return ((uint64_t)high << 32 | (uint64_t)low);
|
||||
}
|
||||
|
||||
void amd64_wrmsr (uint32_t msr, uint64_t value) {
|
||||
uint32_t low = (uint32_t)(value & 0xFFFFFFFF);
|
||||
uint32_t high = (uint32_t)(value >> 32);
|
||||
__asm__ volatile ("wrmsr" ::"c"(msr), "a"(low), "d"(high));
|
||||
}
|
||||
9
kernel/amd64/msr.h
Normal file
9
kernel/amd64/msr.h
Normal file
@@ -0,0 +1,9 @@
|
||||
#ifndef _KERNEL_AMD64_MSR_H
|
||||
#define _KERNEL_AMD64_MSR_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
uint64_t amd64_rdmsr (uint32_t msr);
|
||||
void amd64_wrmsr (uint32_t msr, uint64_t value);
|
||||
|
||||
#endif // _KERNEL_AMD64_MSR_H
|
||||
@@ -4,7 +4,11 @@ c += amd64/bootmain.c \
|
||||
amd64/io.c \
|
||||
amd64/debug.c \
|
||||
amd64/spin_lock.c \
|
||||
amd64/intr.c
|
||||
amd64/intr.c \
|
||||
amd64/apic.c \
|
||||
amd64/msr.c \
|
||||
amd64/hpet.c \
|
||||
amd64/mm.c
|
||||
|
||||
S += amd64/intr_stub.S \
|
||||
amd64/spin.S
|
||||
@@ -17,4 +21,8 @@ o += amd64/bootmain.o \
|
||||
amd64/spin_lock.o \
|
||||
amd64/intr.o \
|
||||
amd64/intr_stub.o \
|
||||
amd64/spin.o
|
||||
amd64/spin.o \
|
||||
amd64/apic.o \
|
||||
amd64/msr.o \
|
||||
amd64/hpet.o \
|
||||
amd64/mm.o
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
|
||||
#define DECL_REQ(small, big) \
|
||||
SECTION (".limine_requests") \
|
||||
struct limine_##small##_request limine_##small##_request = { \
|
||||
.id = LIMINE_##big##_REQUEST_ID, .revision = 4}
|
||||
struct limine_##small##_request limine_##small##_request = {.id = LIMINE_##big##_REQUEST_ID, \
|
||||
.revision = 4}
|
||||
|
||||
SECTION (".limine_requests")
|
||||
volatile uint64_t limine_base_revision[] = LIMINE_BASE_REVISION (4);
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
|
||||
#include <limine/limine.h>
|
||||
|
||||
#define EXTERN_REQ(small) \
|
||||
extern struct limine_##small##_request limine_##small##_request
|
||||
#define EXTERN_REQ(small) extern struct limine_##small##_request limine_##small##_request
|
||||
|
||||
EXTERN_REQ (hhdm);
|
||||
EXTERN_REQ (memmap);
|
||||
|
||||
@@ -94,8 +94,7 @@ static inline int getexp (unsigned int size) {
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
printf ("getexp returns %i (%i bytes) for %i size\n", shift - 1,
|
||||
(1 << (shift - 1)), size);
|
||||
printf ("getexp returns %i (%i bytes) for %i size\n", shift - 1, (1 << (shift - 1)), size);
|
||||
#endif
|
||||
|
||||
return shift - 1;
|
||||
@@ -223,12 +222,10 @@ static inline struct boundary_tag* absorb_right (struct boundary_tag* tag) {
|
||||
}
|
||||
|
||||
static inline struct boundary_tag* split_tag (struct boundary_tag* tag) {
|
||||
unsigned int remainder =
|
||||
tag->real_size - sizeof (struct boundary_tag) - tag->size;
|
||||
unsigned int remainder = tag->real_size - sizeof (struct boundary_tag) - tag->size;
|
||||
|
||||
struct boundary_tag* new_tag =
|
||||
(struct boundary_tag*)((uintptr_t)tag + sizeof (struct boundary_tag) +
|
||||
tag->size);
|
||||
(struct boundary_tag*)((uintptr_t)tag + sizeof (struct boundary_tag) + tag->size);
|
||||
|
||||
new_tag->magic = LIBALLOC_MAGIC;
|
||||
new_tag->real_size = remainder;
|
||||
@@ -285,8 +282,8 @@ static struct boundary_tag* allocate_new_tag (unsigned int size) {
|
||||
tag->split_right = NULL;
|
||||
|
||||
#ifdef DEBUG
|
||||
printf ("Resource allocated %x of %i pages (%i bytes) for %i size.\n", tag,
|
||||
pages, pages * l_pageSize, size);
|
||||
printf ("Resource allocated %x of %i pages (%i bytes) for %i size.\n", tag, pages,
|
||||
pages * l_pageSize, size);
|
||||
|
||||
l_allocated += pages * l_pageSize;
|
||||
|
||||
@@ -322,11 +319,9 @@ void* malloc (size_t size) {
|
||||
tag = l_freePages[index]; // Start at the front of the list.
|
||||
while (tag != NULL) {
|
||||
// If there's enough space in this tag.
|
||||
if ((tag->real_size - sizeof (struct boundary_tag)) >=
|
||||
(size + sizeof (struct boundary_tag))) {
|
||||
if ((tag->real_size - sizeof (struct boundary_tag)) >= (size + sizeof (struct boundary_tag))) {
|
||||
#ifdef DEBUG
|
||||
printf ("Tag search found %i >= %i\n",
|
||||
(tag->real_size - sizeof (struct boundary_tag)),
|
||||
printf ("Tag search found %i >= %i\n", (tag->real_size - sizeof (struct boundary_tag)),
|
||||
(size + sizeof (struct boundary_tag)));
|
||||
#endif
|
||||
break;
|
||||
@@ -363,17 +358,16 @@ void* malloc (size_t size) {
|
||||
tag->real_size - size - sizeof (struct boundary_tag), index, 1 << index);
|
||||
#endif
|
||||
|
||||
unsigned int remainder = tag->real_size - size -
|
||||
sizeof (struct boundary_tag) * 2; // Support a new tag + remainder
|
||||
unsigned int remainder =
|
||||
tag->real_size - size - sizeof (struct boundary_tag) * 2; // Support a new tag + remainder
|
||||
|
||||
if (((int)(remainder) >
|
||||
0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/) {
|
||||
if (((int)(remainder) > 0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/) {
|
||||
int childIndex = getexp (remainder);
|
||||
|
||||
if (childIndex >= 0) {
|
||||
#ifdef DEBUG
|
||||
printf ("Seems to be splittable: %i >= 2^%i .. %i\n", remainder,
|
||||
childIndex, (1 << childIndex));
|
||||
printf ("Seems to be splittable: %i >= 2^%i .. %i\n", remainder, childIndex,
|
||||
(1 << childIndex));
|
||||
#endif
|
||||
|
||||
struct boundary_tag* new_tag = split_tag (tag);
|
||||
@@ -381,8 +375,8 @@ void* malloc (size_t size) {
|
||||
(void)new_tag;
|
||||
|
||||
#ifdef DEBUG
|
||||
printf ("Old tag has become %i bytes, new tag is now %i bytes (%i exp)\n",
|
||||
tag->real_size, new_tag->real_size, new_tag->index);
|
||||
printf ("Old tag has become %i bytes, new tag is now %i bytes (%i exp)\n", tag->real_size,
|
||||
new_tag->real_size, new_tag->index);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@@ -391,8 +385,7 @@ void* malloc (size_t size) {
|
||||
|
||||
#ifdef DEBUG
|
||||
l_inuse += size;
|
||||
printf ("malloc: %x, %i, %i\n", ptr, (int)l_inuse / 1024,
|
||||
(int)l_allocated / 1024);
|
||||
printf ("malloc: %x, %i, %i\n", ptr, (int)l_inuse / 1024, (int)l_allocated / 1024);
|
||||
dump_array ();
|
||||
#endif
|
||||
|
||||
@@ -418,15 +411,13 @@ void free (void* ptr) {
|
||||
|
||||
#ifdef DEBUG
|
||||
l_inuse -= tag->size;
|
||||
printf ("free: %x, %i, %i\n", ptr, (int)l_inuse / 1024,
|
||||
(int)l_allocated / 1024);
|
||||
printf ("free: %x, %i, %i\n", ptr, (int)l_inuse / 1024, (int)l_allocated / 1024);
|
||||
#endif
|
||||
|
||||
// MELT LEFT...
|
||||
while ((tag->split_left != NULL) && (tag->split_left->index >= 0)) {
|
||||
#ifdef DEBUG
|
||||
printf (
|
||||
"Melting tag left into available memory. Left was %i, becomes %i (%i)\n",
|
||||
printf ("Melting tag left into available memory. Left was %i, becomes %i (%i)\n",
|
||||
tag->split_left->real_size, tag->split_left->real_size + tag->real_size,
|
||||
tag->split_left->real_size);
|
||||
#endif
|
||||
@@ -437,8 +428,7 @@ void free (void* ptr) {
|
||||
// MELT RIGHT...
|
||||
while ((tag->split_right != NULL) && (tag->split_right->index >= 0)) {
|
||||
#ifdef DEBUG
|
||||
printf (
|
||||
"Melting tag right into available memory. This was was %i, becomes %i (%i)\n",
|
||||
printf ("Melting tag right into available memory. This was was %i, becomes %i (%i)\n",
|
||||
tag->real_size, tag->split_right->real_size + tag->real_size,
|
||||
tag->split_right->real_size);
|
||||
#endif
|
||||
@@ -481,8 +471,7 @@ void free (void* ptr) {
|
||||
insert_tag (tag, index);
|
||||
|
||||
#ifdef DEBUG
|
||||
printf (
|
||||
"Returning tag with %i bytes (requested %i bytes), which has exponent: %i\n",
|
||||
printf ("Returning tag with %i bytes (requested %i bytes), which has exponent: %i\n",
|
||||
tag->real_size, tag->size, index);
|
||||
dump_array ();
|
||||
#endif
|
||||
|
||||
@@ -31,8 +31,8 @@ void pmm_init (void) {
|
||||
"framebuffer",
|
||||
"acpi tables"};
|
||||
|
||||
DEBUG ("memmap entry: %-25s %p (%zu bytes)\n", entry_strings[entry->type],
|
||||
entry->base, entry->length);
|
||||
DEBUG ("memmap entry: %-25s %p (%zu bytes)\n", entry_strings[entry->type], entry->base,
|
||||
entry->length);
|
||||
|
||||
if (entry->type == LIMINE_MEMMAP_USABLE && region < PMM_REGIONS_MAX) {
|
||||
struct pmm_region* pmm_region = &pmm.regions[region];
|
||||
@@ -87,8 +87,7 @@ void pmm_init (void) {
|
||||
* Find free space for a block range. For every bit of the bitmap, we test nblks bits forward.
|
||||
* bm_test_region helps us out, because it automatically does range checks. See comments there.
|
||||
*/
|
||||
static size_t pmm_find_free_space (struct pmm_region* pmm_region,
|
||||
size_t nblks) {
|
||||
static size_t pmm_find_free_space (struct pmm_region* pmm_region, size_t nblks) {
|
||||
for (size_t bit = 0; bit < pmm_region->bm.nbits; bit++) {
|
||||
if (bm_test_region (&pmm_region->bm, bit, nblks)) {
|
||||
continue;
|
||||
@@ -140,8 +139,7 @@ void pmm_free (physaddr_t p_addr, size_t nblks) {
|
||||
continue;
|
||||
|
||||
/* If aligned_p_addr is within the range if this region, it belongs to it. */
|
||||
if (aligned_p_addr >= pmm_region->membase &&
|
||||
aligned_p_addr < pmm_region->size) {
|
||||
if (aligned_p_addr >= pmm_region->membase && aligned_p_addr < pmm_region->size) {
|
||||
physaddr_t addr = aligned_p_addr - pmm_region->membase;
|
||||
|
||||
size_t bit = div_align_up (addr, PAGE_SIZE);
|
||||
|
||||
@@ -7,6 +7,4 @@ void spin_lock (spin_lock_t* sl) {
|
||||
spin_lock_relax ();
|
||||
}
|
||||
|
||||
void spin_unlock (spin_lock_t* sl) {
|
||||
atomic_flag_clear_explicit (sl, memory_order_release);
|
||||
}
|
||||
void spin_unlock (spin_lock_t* sl) { atomic_flag_clear_explicit (sl, memory_order_release); }
|
||||
|
||||
@@ -1,8 +1,23 @@
|
||||
#ifndef _KERNEL_SYS_MM_H
|
||||
#define _KERNEL_SYS_MM_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include <amd64/mm.h>
|
||||
#endif
|
||||
|
||||
#define MM_PG_PRESENT (1 << 0)
|
||||
#define MM_PG_RW (1 << 1)
|
||||
#define MM_PG_USER (1 << 2)
|
||||
#define MM_PD_LOCK (1 << 31)
|
||||
|
||||
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags);
|
||||
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags);
|
||||
void mm_lock_kernel (void);
|
||||
void mm_unlock_kernel (void);
|
||||
void mm_init (void);
|
||||
|
||||
#endif // _KERNEL_SYS_MM_H
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
#include <libk/align.h>
|
||||
#include <libk/std.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
#include <uacpi/kernel_api.h>
|
||||
#include <uacpi/status.h>
|
||||
|
||||
@@ -9,19 +11,23 @@ uacpi_status uacpi_kernel_get_rsdp (uacpi_phys_addr* out_rsdp_address) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
struct limine_rsdp_response* rsdp = limine_rsdp_request.response;
|
||||
|
||||
*out_rsdp_address =
|
||||
(uacpi_phys_addr)((uintptr_t)rsdp->address - (uintptr_t)hhdm->offset);
|
||||
*out_rsdp_address = (uacpi_phys_addr)((uintptr_t)rsdp->address - (uintptr_t)hhdm->offset);
|
||||
|
||||
return UACPI_STATUS_OK;
|
||||
}
|
||||
|
||||
void* uacpi_kernel_map (uacpi_phys_addr addr, uacpi_size len) {
|
||||
(void)len;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
return (void*)((uintptr_t)hhdm->offset + (uintptr_t)addr);
|
||||
}
|
||||
|
||||
void uacpi_kernel_unmap (void* addr, uacpi_size len) { (void)addr, (void)len; }
|
||||
void uacpi_kernel_unmap (void* addr, uacpi_size len) {
|
||||
(void)addr;
|
||||
(void)len;
|
||||
}
|
||||
|
||||
void uacpi_kernel_log (uacpi_log_level level, const uacpi_char* msg) {
|
||||
const char* prefix = NULL;
|
||||
|
||||
Reference in New Issue
Block a user