Run first app from ramdisk!

This commit is contained in:
2025-12-29 23:54:21 +01:00
parent c16170e4c2
commit fa7998c323
56 changed files with 5443 additions and 229 deletions

View File

@@ -1,4 +1,5 @@
#include <amd64/apic.h>
#include <amd64/intr_defs.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/std.h>
@@ -14,10 +15,10 @@
#define IOAPICS_MAX 24
#define INTERRUPT_SRC_OVERRIDES_MAX 24
#define LAPIC_ID 0x20 /* ID */
#define LAPIC_EOI 0xB0 /* End of interrupt */
#define LAPIC_SIVR 0xF0 /* Spurious interrupt vector register */
#define LAPIC_ICR 0x300
#define LAPIC_ID 0x20 /* ID */
#define LAPIC_EOI 0xB0 /* End of interrupt */
#define LAPIC_SIVR 0xF0 /* Spurious interrupt vector register */
#define LAPIC_ICR 0x300 /* Interrupt command register */
#define LAPIC_LVTTR 0x320 /* LVT timer register */
#define LAPIC_TIMICT 0x380 /* Initial count register */
#define LAPIC_TIMCCT 0x390 /* Current count register */
@@ -60,7 +61,7 @@ static struct acpi_madt_ioapic* amd64_ioapic_find (uint8_t irq) {
}
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id) {
struct acpi_madt_ioapic* apic;
struct acpi_madt_ioapic* apic = NULL;
struct acpi_madt_interrupt_source_override* override;
bool found_override = false;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -155,7 +156,7 @@ void amd64_ioapic_init (void) {
struct acpi_madt_ioapic* ioapic = (struct acpi_madt_ioapic*)current;
mm_map_kernel_page ((uintptr_t)ioapic->address,
(uintptr_t)hhdm->offset + (uintptr_t)ioapic->address,
MM_PG_PRESENT | MM_PG_RW);
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
apics[ioapic_entries++] = *ioapic;
} break;
case ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE: {
@@ -188,7 +189,7 @@ void amd64_lapic_tick (uint32_t tick) { amd64_lapic_write (LAPIC_TIMICT, tick);
static uint32_t amd64_lapic_calibrate (uint32_t us) {
amd64_lapic_write (LAPIC_DCR, 0x03);
amd64_lapic_write (LAPIC_LVTTR, 0x20 | (1 << 16));
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF);
@@ -202,7 +203,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, 0x03);
amd64_lapic_write (LAPIC_LVTTR, 0x20 | (1 << 17));
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
amd64_lapic_write (LAPIC_TIMICT, ticks);
}
@@ -215,7 +216,8 @@ uint64_t amd64_lapic_init (uint32_t us) {
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
mm_map_kernel_page (lapic_paddr, lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK);
mm_map_kernel_page (lapic_paddr, lapic_mmio_base,
MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK | MM_PD_RELOAD);
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
@@ -225,3 +227,8 @@ uint64_t amd64_lapic_init (uint32_t us) {
return ticks;
}
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec) {
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
amd64_lapic_write (LAPIC_ICR, vec);
}

View File

@@ -11,6 +11,7 @@ void amd64_ioapic_init (void);
uint32_t amd64_lapic_id (void);
void amd64_lapic_tick (uint32_t tick);
void amd64_lapic_eoi (void);
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec);
uint64_t amd64_lapic_init (uint32_t us);
#endif // _KERNEL_AMD64_APIC_H

View File

@@ -2,11 +2,15 @@
#include <amd64/debug.h>
#include <amd64/hpet.h>
#include <amd64/init.h>
#include <amd64/intr_defs.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <limine/limine.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <rd/rd.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
@@ -17,11 +21,6 @@
ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
void ack (void* arg) {
(void)arg;
debugprintf (". %u\n", thiscpu->id);
}
void bootmain (void) {
struct cpu* bsp_cpu = cpu_make ();
cpu_assign (bsp_cpu->id);
@@ -31,6 +30,8 @@ void bootmain (void) {
pmm_init ();
mm_init ();
rd_init ();
uacpi_setup_early_table_access ((void*)uacpi_memory_buffer, sizeof (uacpi_memory_buffer));
amd64_ioapic_init ();
@@ -38,10 +39,16 @@ void bootmain (void) {
smp_init ();
irq_attach (&ack, NULL, 32 + 0);
/* busy wait for cpus to come online */
for (volatile int i = 0; i < INT_MAX; i++)
;
mm_init2 ();
__asm__ volatile ("sti");
proc_init ();
for (;;)
;
}

View File

@@ -3,8 +3,9 @@
#include <aux/compiler.h>
#include <libk/std.h>
#include <proc/proc.h>
#define KSTACK_SIZE (16 * 1024)
#define KSTACK_SIZE (32 * 1024)
struct gdt_entry {
uint16_t limitlow;

View File

@@ -75,7 +75,8 @@ void amd64_hpet_init (void) {
hpet_paddr = (uintptr_t)hpet->address.address;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr,
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
hpet_32bits = (amd64_hpet_read (HPET_GCIDR) & (1 << 13)) ? 0 : 1;

View File

@@ -1,5 +1,6 @@
#include <amd64/apic.h>
#include <amd64/intr.h>
#include <amd64/intr_defs.h>
#include <amd64/io.h>
#include <aux/compiler.h>
#include <irq/irq.h>
@@ -100,54 +101,23 @@ static void amd64_idt_init (void) {
#define IDT_ENTRY(n, ist) \
extern void amd64_intr##n (void); \
amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist))
IDT_ENTRY (0, 0);
IDT_ENTRY (1, 0);
IDT_ENTRY (2, 0);
IDT_ENTRY (3, 0);
IDT_ENTRY (4, 0);
IDT_ENTRY (5, 0);
IDT_ENTRY (6, 0);
IDT_ENTRY (7, 0);
IDT_ENTRY (8, 0);
IDT_ENTRY (9, 0);
IDT_ENTRY (10, 0);
IDT_ENTRY (11, 0);
IDT_ENTRY (12, 0);
IDT_ENTRY (13, 0);
IDT_ENTRY (14, 0);
IDT_ENTRY (15, 0);
IDT_ENTRY (16, 0);
IDT_ENTRY (17, 0);
IDT_ENTRY (18, 0);
IDT_ENTRY (19, 0);
IDT_ENTRY (20, 0);
IDT_ENTRY (21, 0);
IDT_ENTRY (22, 0);
IDT_ENTRY (23, 0);
IDT_ENTRY (24, 0);
IDT_ENTRY (25, 0);
IDT_ENTRY (26, 0);
IDT_ENTRY (27, 0);
IDT_ENTRY (28, 0);
IDT_ENTRY (29, 0);
IDT_ENTRY (30, 0);
IDT_ENTRY (31, 0);
IDT_ENTRY (32, 1);
IDT_ENTRY (33, 1);
IDT_ENTRY (34, 1);
IDT_ENTRY (35, 1);
IDT_ENTRY (36, 1);
IDT_ENTRY (37, 1);
IDT_ENTRY (38, 1);
IDT_ENTRY (39, 1);
IDT_ENTRY (40, 1);
IDT_ENTRY (41, 1);
IDT_ENTRY (42, 1);
IDT_ENTRY (43, 1);
IDT_ENTRY (44, 1);
IDT_ENTRY (45, 1);
IDT_ENTRY (46, 1);
IDT_ENTRY (47, 1);
/* clang-format off */
IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0);
IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0);
IDT_ENTRY (8, 0); IDT_ENTRY (9, 0); IDT_ENTRY (10, 0); IDT_ENTRY (11, 0);
IDT_ENTRY (12, 0); IDT_ENTRY (13, 0); IDT_ENTRY (14, 0); IDT_ENTRY (15, 0);
IDT_ENTRY (16, 0); IDT_ENTRY (17, 0); IDT_ENTRY (18, 0); IDT_ENTRY (19, 0);
IDT_ENTRY (20, 0); IDT_ENTRY (21, 0); IDT_ENTRY (22, 0); IDT_ENTRY (23, 0);
IDT_ENTRY (24, 0); IDT_ENTRY (25, 0); IDT_ENTRY (26, 0); IDT_ENTRY (27, 0);
IDT_ENTRY (28, 0); IDT_ENTRY (29, 0); IDT_ENTRY (30, 0); IDT_ENTRY (31, 0);
IDT_ENTRY (32, 1); IDT_ENTRY (33, 1); IDT_ENTRY (34, 1); IDT_ENTRY (35, 1);
IDT_ENTRY (36, 1); IDT_ENTRY (37, 1); IDT_ENTRY (38, 1); IDT_ENTRY (39, 1);
IDT_ENTRY (40, 1); IDT_ENTRY (41, 1); IDT_ENTRY (42, 1); IDT_ENTRY (43, 1);
IDT_ENTRY (44, 1); IDT_ENTRY (45, 1); IDT_ENTRY (46, 1); IDT_ENTRY (47, 1);
IDT_ENTRY (SCHED_PREEMPT_TIMER, 1);
IDT_ENTRY (TLB_SHOOTDOWN, 1);
/* clang-format on */
#undef IDT_ENTRY
idt.limit = sizeof (idt_entries) - 1;
@@ -177,7 +147,11 @@ static void amd64_intr_exception (struct saved_regs* regs) {
regs->error, regs->rip, regs->cs, regs->rflags, regs->rsp, regs->ss, cr2, cr3,
regs->rbx);
amd64_spin ();
if (regs->cs == (0x18 | 0x03)) {
proc_kill (thiscpu->proc_current);
} else {
amd64_spin ();
}
}
void amd64_intr_handler (void* stack_ptr) {
@@ -185,16 +159,20 @@ void amd64_intr_handler (void* stack_ptr) {
if (regs->trap <= 31) {
amd64_intr_exception (regs);
} else if (regs->trap >= 32) {
} else {
amd64_lapic_eoi ();
__asm__ volatile ("sti");
struct irq* irq = irq_find (regs->trap);
irq_invoke_each (regs->trap);
if (irq != NULL) {
if (!(irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("sti");
__asm__ volatile ("cli");
} else {
DEBUG ("unknown trap %lu\n", regs->trap);
irq->func (irq->arg, stack_ptr);
if (!(irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("cli");
}
}
}
@@ -235,3 +213,12 @@ void irq_restore (void) {
if (prev == 1)
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);
}
uint8_t amd64_resolve_irq (uint8_t irq) {
static const uint8_t mappings[] = {
[SCHED_PREEMPT_TIMER] = 0,
[TLB_SHOOTDOWN] = 1,
};
return mappings[irq];
}

View File

@@ -32,6 +32,7 @@ struct saved_regs {
} PACKED;
void amd64_load_idt (void);
uint8_t amd64_resolve_irq (uint8_t irq);
void amd64_intr_init (void);
#endif // _KERNEL_AMD64_INTR_H

7
kernel/amd64/intr_defs.h Normal file
View File

@@ -0,0 +1,7 @@
#ifndef _KERNEL_AMD64_INTR_DEFS_H
#define _KERNEL_AMD64_INTR_DEFS_H
#define SCHED_PREEMPT_TIMER 80
#define TLB_SHOOTDOWN 81
#endif // _KERNEL_AMD64_INTR_DEFS_H

View File

@@ -1,3 +1,5 @@
#include <amd64/intr_defs.h>
.extern amd64_intr_handler
#define err(z) \
@@ -108,3 +110,6 @@ make_intr_stub(no_err, 44)
make_intr_stub(no_err, 45)
make_intr_stub(no_err, 46)
make_intr_stub(no_err, 47)
make_intr_stub(no_err, SCHED_PREEMPT_TIMER)
make_intr_stub(no_err, TLB_SHOOTDOWN)

View File

@@ -1,22 +1,27 @@
#include <amd64/apic.h>
#include <amd64/intr_defs.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/pmm.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
#define AMD64_PG_PRESENT (1 << 0)
#define AMD64_PG_RW (1 << 1)
#define AMD64_PG_USER (1 << 2)
#define AMD64_PG_TABLE_ENTRIES_MAX 512
struct pg_index {
uint16_t pml4, pml3, pml2, pml1;
} PACKED;
struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
/* This is needed to sync between map/unmap operations and TLB shootdown. */
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
static uintptr_t amd64_current_cr3 (void) {
uintptr_t cr3;
@@ -41,7 +46,7 @@ static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) {
uint64_t entry = table[entry_idx];
uint64_t paddr;
physaddr_t paddr;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -53,11 +58,11 @@ static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool
paddr = pmm_alloc (1);
if (paddr == 0)
if (paddr == PMM_ALLOC_ERR)
return NULL;
memset ((void*)((uintptr_t)hhdm->offset + (uintptr_t)paddr), 0, PAGE_SIZE);
table[entry_idx] = paddr | AMD64_PG_PRESENT | AMD64_PG_RW;
table[entry_idx] = paddr | AMD64_PG_PRESENT | AMD64_PG_RW | AMD64_PG_USER;
}
return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr);
@@ -79,6 +84,8 @@ static void amd64_reload_cr3 (void) {
}
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
@@ -108,11 +115,13 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
do_reload = true;
done:
if (do_reload)
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
}
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
@@ -120,6 +129,8 @@ void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
}
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
@@ -129,15 +140,15 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL)
goto done;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
goto done;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
goto done;
@@ -147,11 +158,13 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
do_reload = true;
done:
if (do_reload)
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
}
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
@@ -162,4 +175,44 @@ void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); }
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); }
uintptr_t mm_alloc_user_pd_phys (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
physaddr_t cr3 = pmm_alloc (1);
if (cr3 == PMM_ALLOC_ERR)
return 0;
uint8_t* vu_cr3 = (uint8_t*)((uintptr_t)hhdm->offset + cr3);
memset ((void*)vu_cr3, 0, PAGE_SIZE / 2);
uint8_t* vk_cr3 = (uint8_t*)((uintptr_t)hhdm->offset + (uintptr_t)kernel_pd.cr3_paddr);
memcpy (&vu_cr3[PAGE_SIZE / 2], &vk_cr3[PAGE_SIZE / 2], PAGE_SIZE / 2);
return cr3;
}
void mm_reload (void) {
spin_lock (&mm_lock);
struct limine_mp_response* mp = limine_mp_request.response;
for (size_t i = 0; i < mp->cpu_count; i++) {
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN);
}
spin_unlock (&mm_lock);
}
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
(void)arg, (void)regs;
amd64_reload_cr3 ();
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
}
void mm_init2 (void) {
irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE);
}
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }

View File

@@ -12,5 +12,6 @@ struct pd {
};
void amd64_load_kernel_cr3 (void);
void mm_init2 (void);
#endif // _KERNEL_AMD64_MM_H

17
kernel/amd64/proc.h Normal file
View File

@@ -0,0 +1,17 @@
#ifndef _KERNEL_AMD64_PROC_H
#define _KERNEL_AMD64_PROC_H
#include <amd64/intr.h>
#include <libk/std.h>
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
#define USTACK_SIZE (256 * PAGE_SIZE)
struct proc_platformdata {
struct saved_regs regs;
uintptr_t syscall_stack;
uintptr_t user_stack;
uint64_t fsbase;
};
#endif // _KERNEL_AMD64_PROC_H

24
kernel/amd64/sched.S Normal file
View File

@@ -0,0 +1,24 @@
#define pop_regs \
popq %r15; \
popq %r14; \
popq %r13; \
popq %r12; \
popq %r11; \
popq %r10; \
popq %r9; \
popq %r8; \
popq %rbx; \
popq %rbp; \
popq %rdi; \
popq %rsi; \
popq %rdx; \
popq %rcx; \
popq %rax;
.global amd64_do_sched
amd64_do_sched:
movq %rsi, %cr3
movq %rdi, %rsp
pop_regs
add $16, %rsp
iretq

6
kernel/amd64/sched.h Normal file
View File

@@ -0,0 +1,6 @@
#ifndef _KERNEL_AMD64_SCHED_H
#define _KERNEL_AMD64_SCHED_H
void amd64_do_sched (void* regs, void* cr3);
#endif // _KERNEL_AMD64_SCHED_H

4
kernel/amd64/sched1.c Normal file
View File

@@ -0,0 +1,4 @@
#include <amd64/sched.h>
#include <sys/mm.h>
void do_sched (void* regs, struct pd* pd) { amd64_do_sched (regs, (void*)pd->cr3_paddr); }

View File

@@ -23,6 +23,7 @@ struct cpu* cpu_make (void) {
struct cpu* cpu = &cpus[id];
memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT;
cpu->id = id;
return cpu_get (id);

View File

@@ -5,21 +5,31 @@
#include <amd64/tss.h>
#include <aux/compiler.h>
#include <libk/std.h>
#include <proc/proc.h>
#define CPUS_MAX 32
struct cpu {
uint64_t lapic_ticks;
uint32_t id;
struct {
uint64_t rflags;
atomic_int nesting;
} irq_ctx;
uint8_t user_stack[USTACK_SIZE] ALIGNED (16);
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
volatile struct gdt_extended gdt ALIGNED (16);
volatile struct tss tss;
spin_lock_t lock;
struct proc* proc_run_q;
struct proc* proc_current;
};
struct cpu* cpu_make (void);

View File

@@ -10,10 +10,12 @@ c += amd64/bootmain.c \
amd64/hpet.c \
amd64/mm.c \
amd64/time.c \
amd64/smp.c
amd64/smp.c \
amd64/sched1.c
S += amd64/intr_stub.S \
amd64/spin.S
amd64/spin.S \
amd64/sched.S
o += amd64/bootmain.o \
amd64/init.o \
@@ -29,4 +31,6 @@ o += amd64/bootmain.o \
amd64/hpet.o \
amd64/mm.o \
amd64/time.o \
amd64/smp.o
amd64/smp.o \
amd64/sched.o \
amd64/sched1.o

4548
kernel/aux/elf.h Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,7 @@
#if defined(__x86_64__)
#include <amd64/apic.h>
#include <amd64/intr.h>
#endif
/* TODO: figure out a generic way to work with IRQs */
@@ -13,7 +14,7 @@
static struct irq* irqs = NULL;
static spin_lock_t irqs_lock;
bool irq_attach (void (*func) (void*), void* arg, uint32_t irq_num) {
bool irq_attach (void (*func) (void*, void*), void* arg, uint32_t irq_num, uint32_t flags) {
struct irq* irq = malloc (sizeof (*irq));
if (irq == NULL) {
return false;
@@ -22,19 +23,21 @@ bool irq_attach (void (*func) (void*), void* arg, uint32_t irq_num) {
irq->func = func;
irq->arg = arg;
irq->irq_num = irq_num;
irq->flags = flags;
spin_lock (&irqs_lock);
linklist_append (struct irq*, irqs, irq);
spin_unlock (&irqs_lock);
#if defined(__x86_64__)
amd64_ioapic_route_irq (irq_num, irq_num - 0x20, 0, amd64_lapic_id ());
uint8_t resolution = amd64_resolve_irq (irq_num);
amd64_ioapic_route_irq (irq_num, resolution, 0, amd64_lapic_id ());
#endif
return true;
}
void irq_detach (void (*func) (void*)) {
void irq_detach (void (*func) (void*, void*)) {
spin_lock (&irqs_lock);
struct irq *irq, *irq_tmp;
@@ -46,14 +49,18 @@ void irq_detach (void (*func) (void*)) {
spin_unlock (&irqs_lock);
}
void irq_invoke_each (uint32_t irq_num) {
struct irq* irq_find (uint32_t irq_num) {
spin_lock (&irqs_lock);
struct irq *irq, *irq_tmp;
linklist_foreach (irqs, irq, irq_tmp) {
if (irq->irq_num == irq_num)
irq->func (irq->arg);
if (irq->irq_num == irq_num) {
spin_unlock (&irqs_lock);
return irq;
}
}
spin_unlock (&irqs_lock);
return NULL;
}

View File

@@ -3,16 +3,21 @@
#include <libk/std.h>
#define IRQ_INTERRUPT_SAFE (1 << 0)
typedef void (*irq_func_t) (void* arg, void* regs);
struct irq {
struct irq* next;
void (*func) (void*);
irq_func_t func;
void* arg;
uint32_t irq_num;
uint32_t flags;
};
bool irq_attach (void (*func) (void*), void* arg, uint32_t irq_num);
void irq_detach (void (*func) (void*));
void irq_invoke_each (uint32_t irq_num);
bool irq_attach (irq_func_t, void* arg, uint32_t irq_num, uint32_t flags);
void irq_detach (irq_func_t func);
struct irq* irq_find (uint32_t irq_num);
#endif // _KERNEL_IRQ_IRQ_H

View File

@@ -45,16 +45,16 @@ bool bm_test (struct bm* bm, size_t k) {
* Set a range of bits in a bitmap. if starting bit is out of range, we fail.
*/
bool bm_set_region (struct bm* bm, size_t k, size_t m) {
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
if (((k + m) > bm->nbits) || (k + m) < k)
return false;
for (size_t i = k; i < m; i++) {
for (size_t i = k; i < (k + m); i++) {
bool taken = bm_test (bm, i);
if (taken)
return false;
}
for (size_t i = k; i < m; i++)
for (size_t i = k; i < (k + m); i++)
bm_set (bm, i);
return true;
@@ -64,10 +64,10 @@ bool bm_set_region (struct bm* bm, size_t k, size_t m) {
* Clear a range of bits in a bitmap. starting bit must be in range.
*/
void bm_clear_region (struct bm* bm, size_t k, size_t m) {
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
if (((k + m) > bm->nbits) || (k + m) < k)
return;
for (size_t i = k; i < m; i++)
for (size_t i = k; i < (k + m); i++)
bm_clear (bm, i);
}
@@ -78,10 +78,10 @@ void bm_clear_region (struct bm* bm, size_t k, size_t m) {
* useful for implementing the physical memory manager algorithm.
*/
bool bm_test_region (struct bm* bm, size_t k, size_t m) {
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
if (((k + m) > bm->nbits) || (k + m) < k)
return true;
for (size_t i = k; i < m; i++) {
for (size_t i = k; i < (k + m); i++) {
bool test = bm_test (bm, i);
if (test)
return true;

View File

@@ -130,6 +130,66 @@
} \
} while (0)
#define dlinklist_index_of(type, head, ele, out_idx) \
do { \
(out_idx) = -1; \
int __idx = 0; \
type __tmp = (head); \
while (__tmp != NULL) { \
if (__tmp == (ele)) { \
(out_idx) = __idx; \
break; \
} \
__tmp = __tmp->next; \
__idx++; \
} \
} while (0)
#define dlinklist_index_of_prop(type, head, propname, propvalue, out_idx) \
do { \
(out_idx) = -1; \
int __idx = 0; \
type __tmp = (head); \
while (__tmp != NULL) { \
if (__tmp->propname == (propvalue)) { \
(out_idx) = __idx; \
break; \
} \
__tmp = __tmp->next; \
__idx++; \
} \
} while (0)
#define linklist_index_of(type, head, ele, out_idx) \
do { \
(out_idx) = -1; \
int __idx = 0; \
type __tmp = (head); \
while (__tmp != NULL) { \
if (__tmp == (ele)) { \
(out_idx) = __idx; \
break; \
} \
__tmp = __tmp->next; \
__idx++; \
} \
} while (0)
#define linklist_index_of_prop(type, head, propname, propvalue, out_idx) \
do { \
(out_idx) = -1; \
int __idx = 0; \
type __tmp = (head); \
while (__tmp != NULL) { \
if (__tmp->propname == (propvalue)) { \
(out_idx) = __idx; \
break; \
} \
__tmp = __tmp->next; \
__idx++; \
} \
} while (0)
#define linklist_append(type, head, new) \
do { \
if ((new) != NULL) { \

View File

@@ -19,3 +19,4 @@ DECL_REQ (hhdm, HHDM);
DECL_REQ (memmap, MEMMAP);
DECL_REQ (rsdp, RSDP);
DECL_REQ (mp, MP);
DECL_REQ (module, MODULE);

View File

@@ -9,5 +9,6 @@ EXTERN_REQ (hhdm);
EXTERN_REQ (memmap);
EXTERN_REQ (rsdp);
EXTERN_REQ (mp);
EXTERN_REQ (module);
#endif // _KERNEL_LIMINE_REQUESTS_H

View File

@@ -6,8 +6,9 @@
#include <mm/pmm.h>
#include <mm/types.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
/* Porting */
spin_lock_t _liballoc_lock = SPIN_LOCK_INIT;
int liballoc_lock (void) {
@@ -28,6 +29,7 @@ void* liballoc_alloc (int pages) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
uintptr_t addr = (uintptr_t)(p_addr + hhdm->offset);
return (void*)addr;
}
@@ -55,18 +57,9 @@ int liballoc_free (void* ptr, int pages) {
#define MODE MODE_BEST
#ifdef DEBUG
#include <stdio.h>
#endif
struct boundary_tag* l_freePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
int l_completePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
#ifdef DEBUG
unsigned int l_allocated = 0; //< The real amount of memory allocated.
unsigned int l_inuse = 0; //< The amount of memory in use (malloc'ed).
#endif
static int l_initialized = 0; //< Flag to indicate initialization.
static int l_pageSize = 4096; //< Individual page size
static int l_pageCount = 16; //< Minimum number of pages to allocate.
@@ -79,9 +72,6 @@ static int l_pageCount = 16; //< Minimum number of pages to allocate.
*/
static inline int getexp (unsigned int size) {
if (size < (1 << MINEXP)) {
#ifdef DEBUG
printf ("getexp returns -1 for %i less than MINEXP\n", size);
#endif
return -1; // Smaller than the quantum.
}
@@ -93,10 +83,6 @@ static inline int getexp (unsigned int size) {
shift += 1;
}
#ifdef DEBUG
printf ("getexp returns %i (%i bytes) for %i size\n", shift - 1, (1 << (shift - 1)), size);
#endif
return shift - 1;
}
@@ -130,37 +116,6 @@ static void* liballoc_memcpy (void* s1, const void* s2, size_t n) {
return s1;
}
#ifdef DEBUG
static void dump_array () {
int i = 0;
struct boundary_tag* tag = NULL;
printf ("------ Free pages array ---------\n");
printf ("System memory allocated: %i\n", l_allocated);
printf ("Memory in used (malloc'ed): %i\n", l_inuse);
for (i = 0; i < MAXEXP; i++) {
printf ("%.2i(%i): ", i, l_completePages[i]);
tag = l_freePages[i];
while (tag != NULL) {
if (tag->split_left != NULL)
printf ("*");
printf ("%i", tag->real_size);
if (tag->split_right != NULL)
printf ("*");
printf (" ");
tag = tag->next;
}
printf ("\n");
}
printf ("'*' denotes a split to the left/right of a tag\n");
fflush (stdout);
}
#endif
static inline void insert_tag (struct boundary_tag* tag, int index) {
int realIndex;
@@ -281,15 +236,6 @@ static struct boundary_tag* allocate_new_tag (unsigned int size) {
tag->split_left = NULL;
tag->split_right = NULL;
#ifdef DEBUG
printf ("Resource allocated %x of %i pages (%i bytes) for %i size.\n", tag, pages,
pages * l_pageSize, size);
l_allocated += pages * l_pageSize;
printf ("Total memory usage = %i KB\n", (int)((l_allocated / (1024))));
#endif
return tag;
}
@@ -301,9 +247,6 @@ void* malloc (size_t size) {
liballoc_lock ();
if (l_initialized == 0) {
#ifdef DEBUG
printf ("%s\n", "liballoc initializing.");
#endif
for (index = 0; index < MAXEXP; index++) {
l_freePages[index] = NULL;
l_completePages[index] = 0;
@@ -320,10 +263,6 @@ void* malloc (size_t size) {
while (tag != NULL) {
// If there's enough space in this tag.
if ((tag->real_size - sizeof (struct boundary_tag)) >= (size + sizeof (struct boundary_tag))) {
#ifdef DEBUG
printf ("Tag search found %i >= %i\n", (tag->real_size - sizeof (struct boundary_tag)),
(size + sizeof (struct boundary_tag)));
#endif
break;
}
@@ -351,13 +290,6 @@ void* malloc (size_t size) {
// Removed... see if we can re-use the excess space.
#ifdef DEBUG
printf (
"Found tag with %i bytes available (requested %i bytes, leaving %i), which has exponent: %i (%i bytes)\n",
tag->real_size - sizeof (struct boundary_tag), size,
tag->real_size - size - sizeof (struct boundary_tag), index, 1 << index);
#endif
unsigned int remainder =
tag->real_size - size - sizeof (struct boundary_tag) * 2; // Support a new tag + remainder
@@ -365,30 +297,14 @@ void* malloc (size_t size) {
int childIndex = getexp (remainder);
if (childIndex >= 0) {
#ifdef DEBUG
printf ("Seems to be splittable: %i >= 2^%i .. %i\n", remainder, childIndex,
(1 << childIndex));
#endif
struct boundary_tag* new_tag = split_tag (tag);
(void)new_tag;
#ifdef DEBUG
printf ("Old tag has become %i bytes, new tag is now %i bytes (%i exp)\n", tag->real_size,
new_tag->real_size, new_tag->index);
#endif
}
}
ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag));
#ifdef DEBUG
l_inuse += size;
printf ("malloc: %x, %i, %i\n", ptr, (int)l_inuse / 1024, (int)l_allocated / 1024);
dump_array ();
#endif
liballoc_unlock ();
return ptr;
}
@@ -409,29 +325,14 @@ void free (void* ptr) {
return;
}
#ifdef DEBUG
l_inuse -= tag->size;
printf ("free: %x, %i, %i\n", ptr, (int)l_inuse / 1024, (int)l_allocated / 1024);
#endif
// MELT LEFT...
while ((tag->split_left != NULL) && (tag->split_left->index >= 0)) {
#ifdef DEBUG
printf ("Melting tag left into available memory. Left was %i, becomes %i (%i)\n",
tag->split_left->real_size, tag->split_left->real_size + tag->real_size,
tag->split_left->real_size);
#endif
tag = melt_left (tag);
remove_tag (tag);
}
// MELT RIGHT...
while ((tag->split_right != NULL) && (tag->split_right->index >= 0)) {
#ifdef DEBUG
printf ("Melting tag right into available memory. This was was %i, becomes %i (%i)\n",
tag->real_size, tag->split_right->real_size + tag->real_size,
tag->split_right->real_size);
#endif
tag = absorb_right (tag);
}
@@ -453,12 +354,6 @@ void free (void* ptr) {
liballoc_free (tag, pages);
#ifdef DEBUG
l_allocated -= pages * l_pageSize;
printf ("Resource freeing %x of %i pages\n", tag, pages);
dump_array ();
#endif
liballoc_unlock ();
return;
}
@@ -470,12 +365,6 @@ void free (void* ptr) {
insert_tag (tag, index);
#ifdef DEBUG
printf ("Returning tag with %i bytes (requested %i bytes), which has exponent: %i\n",
tag->real_size, tag->size, index);
dump_array ();
#endif
liballoc_unlock ();
}

View File

@@ -139,7 +139,8 @@ void pmm_free (physaddr_t p_addr, size_t nblks) {
continue;
/* If aligned_p_addr is within the range if this region, it belongs to it. */
if (aligned_p_addr >= pmm_region->membase && aligned_p_addr < pmm_region->size) {
if (aligned_p_addr >= pmm_region->membase &&
aligned_p_addr < pmm_region->membase + pmm_region->size) {
physaddr_t addr = aligned_p_addr - pmm_region->membase;
size_t bit = div_align_up (addr, PAGE_SIZE);

1
kernel/proc/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

245
kernel/proc/proc.c Normal file
View File

@@ -0,0 +1,245 @@
#include <aux/compiler.h>
#include <aux/elf.h>
#include <irq/irq.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <rd/rd.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/sched.h>
#include <sys/smp.h>
#if defined(__x86_64__)
#include <amd64/intr_defs.h>
#endif
struct elf_aux {
uint64_t entry;
uint64_t phdr;
uint64_t phent;
uint64_t phnum;
};
static struct procw* procs;
static spin_lock_t procs_lock = SPIN_LOCK_INIT;
static bool proc_check_elf (uint8_t* elf) {
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
return false;
return true;
}
void proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
uint32_t flags) {
struct proc_mapping* mapping = malloc (sizeof (*mapping));
mapping->paddr = start_paddr;
mapping->vaddr = start_vaddr;
mapping->size = pages * PAGE_SIZE;
flags &= ~MM_PD_LOCK; /* clear LOCK flag if present, because we lock manualy */
spin_lock (&proc->pd.lock);
linklist_append (struct proc_mapping*, proc->mappings, mapping);
for (uintptr_t vpage = start_vaddr, ppage = start_paddr; vpage < start_vaddr + pages * PAGE_SIZE;
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
mm_map_page (&proc->pd, ppage, vpage, flags);
}
spin_unlock (&proc->pd.lock);
}
static struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
struct elf_aux aux;
Elf64_Ehdr* ehdr = (Elf64_Ehdr*)elf;
aux.entry = ehdr->e_entry;
aux.phnum = ehdr->e_phnum;
aux.phent = ehdr->e_phentsize;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
for (uint64_t segment = 0; segment < ehdr->e_phnum; segment++) {
Elf64_Phdr* phdr =
(Elf64_Phdr*)((uintptr_t)elf + ehdr->e_phoff + (ehdr->e_phentsize * segment));
switch (phdr->p_type) {
case PT_PHDR: {
aux.phdr = (uint64_t)phdr->p_vaddr;
} break;
case PT_LOAD: {
uintptr_t v_addr = align_down (phdr->p_vaddr, PAGE_SIZE);
uintptr_t off = phdr->p_vaddr - v_addr;
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
uintptr_t p_addr = pmm_alloc (blks);
if (p_addr == PMM_ALLOC_ERR)
DEBUG ("pmm oom error while loading ELF segments! (tried to alloc %zu blks)\n", blks);
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
if (phdr->p_flags & PF_W)
pg_flags |= MM_PG_RW;
proc_map (proc, p_addr, v_addr, blks, pg_flags);
} break;
}
}
return aux;
}
static struct proc* proc_spawn_rd (char* name) {
struct rd_file* rd_file = rd_get_file (name);
bool ok = proc_check_elf (rd_file->content);
DEBUG ("ELF magic %s\n", (ok ? "OK" : "BAD"));
if (!ok)
return NULL;
struct proc* proc = malloc (sizeof (*proc));
if (proc == NULL)
return NULL;
memset (proc, 0, sizeof (*proc));
#if defined(__x86_64__)
proc->pd.lock = SPIN_LOCK_INIT;
proc->pd.cr3_paddr = mm_alloc_user_pd_phys ();
if (proc->pd.cr3_paddr == 0) {
free (proc);
return NULL;
}
proc->pdata.syscall_stack = pmm_alloc (KSTACK_SIZE / PAGE_SIZE);
if (proc->pdata.syscall_stack == PMM_ALLOC_ERR) {
free (proc);
return NULL;
}
proc->pdata.user_stack = pmm_alloc (USTACK_SIZE / PAGE_SIZE);
if (proc->pdata.user_stack == PMM_ALLOC_ERR) {
free (proc);
pmm_free (proc->pdata.syscall_stack, USTACK_SIZE / PAGE_SIZE);
return NULL;
}
uintptr_t user_stack = proc->pdata.user_stack;
proc->pdata.syscall_stack += KSTACK_SIZE;
proc->pdata.user_stack += USTACK_SIZE;
proc_map (proc, user_stack, PROC_USTACK_TOP - USTACK_SIZE, USTACK_SIZE / PAGE_SIZE,
MM_PG_USER | MM_PG_PRESENT | MM_PG_RW);
struct elf_aux aux = proc_load_segments (proc, rd_file->content);
proc->pdata.regs.ss = 0x20 | 0x03;
proc->pdata.regs.rsp = (uint64_t)PROC_USTACK_TOP;
proc->pdata.regs.rflags = 0x202;
proc->pdata.regs.cs = 0x18 | 0x03;
proc->pdata.regs.rip = aux.entry;
proc->lock = SPIN_LOCK_INIT;
#endif
return proc;
}
static void proc_register (struct proc* proc) {
/* make available globally. */
struct procw* procw = malloc (sizeof (*procw));
if (procw == NULL)
return;
procw->proc = proc;
proc->procw = procw;
spin_lock (&procs_lock);
spin_lock (&thiscpu->lock);
linklist_append (struct procw*, procs, procw);
linklist_append (struct proc*, thiscpu->proc_run_q, proc);
if (thiscpu->proc_current == NULL)
thiscpu->proc_current = proc;
spin_unlock (&thiscpu->lock);
spin_unlock (&procs_lock);
}
void proc_sched (void) {
spin_lock (&thiscpu->lock);
if (thiscpu->proc_run_q == NULL || thiscpu->proc_current == NULL) {
goto done;
}
thiscpu->proc_current = thiscpu->proc_current->next;
if (thiscpu->proc_current == NULL) {
thiscpu->proc_current = thiscpu->proc_run_q;
}
done:
spin_unlock (&thiscpu->lock);
if (thiscpu->proc_current != NULL) {
do_sched (&thiscpu->proc_current->pdata.regs, &thiscpu->proc_current->pd);
}
#if defined(__x86_64__)
extern void amd64_spin (void);
amd64_spin ();
#endif
}
void proc_kill (struct proc* proc) {
spin_lock (&procs_lock);
spin_lock (&thiscpu->lock);
linklist_remove (struct procw*, procs, proc->procw);
linklist_remove (struct proc*, thiscpu->proc_run_q, proc);
if (thiscpu->proc_current == proc)
thiscpu->proc_current = NULL;
spin_unlock (&thiscpu->lock);
spin_unlock (&procs_lock);
/* clean up */
free (proc->procw);
free (proc);
}
static void proc_irq_sched (void* arg, void* regs) {
(void)arg, (void)regs;
proc_sched ();
}
void proc_init (void) {
struct proc* init = proc_spawn_rd ("init.exe");
proc_register (init);
#if defined(__x86_64__)
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER, 0);
#endif
do_sched (&init->pdata.regs, &init->pd);
}

49
kernel/proc/proc.h Normal file
View File

@@ -0,0 +1,49 @@
#ifndef _KERNEL_PROC_PROC_H
#define _KERNEL_PROC_PROC_H
#include <aux/compiler.h>
#include <libk/std.h>
#include <sync/spin_lock.h>
#include <sys/mm.h>
#if defined(__x86_64__)
#include <amd64/gdt.h> /* KSTACK_SIZE */
#include <amd64/proc.h> /* USTACK_SIZE */
#endif
struct cpu;
struct proc_mapping {
struct proc_mapping* next;
uintptr_t paddr;
uintptr_t vaddr;
size_t size;
} PACKED;
struct procw;
struct proc {
struct proc* next;
struct proc_mapping* mappings; /* pd.lock implicitly protects this field */
struct proc_platformdata pdata;
struct pd pd;
spin_lock_t lock;
struct cpu* cpu;
struct procw* procw; /* link to it's global struct */
};
/*
* struct proc is a member of a CPU's proc_run_q.
* struct procw is a process wrapper that is a member of
* a global process list.
*/
struct procw {
struct procw* next;
struct proc* proc;
};
void proc_sched (void);
void proc_kill (struct proc* proc);
void proc_init (void);
#endif // _KERNEL_PROC_PROC_H

3
kernel/proc/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += proc/proc.c
o += proc/proc.o

1
kernel/rd/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

81
kernel/rd/rd.c Normal file
View File

@@ -0,0 +1,81 @@
#include <aux/compiler.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <rd/rd.h>
#include <sys/debug.h>
#define RD_FILES_MAX 64
#define RD_PATH "/boot/mop3dist.tar"
static struct rd_file rd_files[RD_FILES_MAX];
struct rd_file* rd_get_file (char* filename) {
for (size_t i = 0; i < RD_FILES_MAX; i++) {
if ((rd_files[i].hdr != NULL) &&
(memcmp (rd_files[i].hdr->filename, filename, strlen (filename)) == 0))
return &rd_files[i];
}
return NULL;
}
static size_t rd_tar_get_size (uint8_t* in) {
size_t size = 0;
size_t j;
size_t count = 1;
for (j = 11; j > 0; j--, count *= 8)
size += ((in[j - 1] - '0') * count);
return size;
}
static size_t rd_tar_parse (uint8_t* addr) {
size_t i;
for (i = 0;; i++) {
struct tar_hdr* hdr = (struct tar_hdr*)addr;
if (hdr->filename[i] == '\0')
break;
size_t size = rd_tar_get_size (hdr->size);
rd_files[i].hdr = hdr;
rd_files[i].content = (uint8_t*)((uintptr_t)hdr + 512);
rd_files[i].size = rd_tar_get_size ((uint8_t*)hdr->size);
DEBUG ("filename=%s\n", hdr->filename);
addr += ((size / 512) + 1) * 512;
if (size % 512)
addr += 512;
}
return i;
}
void rd_init (void) {
struct limine_module_response* module = limine_module_request.response;
uint8_t* rd_addr = NULL;
for (size_t i = 0; i < module->module_count; i++) {
struct limine_file* file = module->modules[i];
DEBUG ("%s\n", file->path);
if (memcmp (file->path, RD_PATH, strlen (RD_PATH)) == 0) {
rd_addr = file->address;
}
}
if (rd_addr == NULL) {
DEBUG ("mop3dist.tar NOT FOUND!\n");
for (;;)
;
}
rd_tar_parse (rd_addr);
}

27
kernel/rd/rd.h Normal file
View File

@@ -0,0 +1,27 @@
#ifndef _KERNEL_RD_RD_H
#define _KERNEL_RD_RD_H
#include <aux/compiler.h>
#include <libk/std.h>
struct tar_hdr {
char filename[100];
uint8_t mode[8];
uint8_t uid[8];
uint8_t gid[8];
uint8_t size[12];
uint8_t mtime[12];
uint8_t checksum[8];
uint8_t type_flag;
} PACKED;
struct rd_file {
struct tar_hdr* hdr;
uint8_t* content;
size_t size;
};
struct rd_file* rd_get_file (char* filename);
void rd_init (void);
#endif // _KERNEL_RD_RD_H

3
kernel/rd/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += rd/rd.c
o += rd/rd.o

View File

@@ -5,3 +5,5 @@ include mm/src.mk
include limine/src.mk
include uACPI/src.mk
include irq/src.mk
include rd/src.mk
include proc/src.mk

View File

@@ -10,8 +10,11 @@
#define MM_PG_PRESENT (1 << 0)
#define MM_PG_RW (1 << 1)
#define MM_PG_USER (1 << 2)
#define MM_PD_RELOAD (1 << 30)
#define MM_PD_LOCK (1 << 31)
uintptr_t mm_alloc_user_pd_phys (void);
void mm_reload (void);
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags);
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags);

8
kernel/sys/sched.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef _KERNEL_SYS_SCHED_H
#define _KERNEL_SYS_SCHED_H
#include <sys/mm.h>
void do_sched (void* regs, struct pd* pd);
#endif // _KERNEL_SYS_SCHED_H