Run first app from ramdisk!

This commit is contained in:
2025-12-29 23:54:21 +01:00
parent c16170e4c2
commit fa7998c323
56 changed files with 5443 additions and 229 deletions

View File

@@ -1,4 +1,5 @@
#include <amd64/apic.h>
#include <amd64/intr_defs.h>
#include <amd64/msr-index.h>
#include <amd64/msr.h>
#include <libk/std.h>
@@ -14,10 +15,10 @@
#define IOAPICS_MAX 24
#define INTERRUPT_SRC_OVERRIDES_MAX 24
#define LAPIC_ID 0x20 /* ID */
#define LAPIC_EOI 0xB0 /* End of interrupt */
#define LAPIC_SIVR 0xF0 /* Spurious interrupt vector register */
#define LAPIC_ICR 0x300
#define LAPIC_ID 0x20 /* ID */
#define LAPIC_EOI 0xB0 /* End of interrupt */
#define LAPIC_SIVR 0xF0 /* Spurious interrupt vector register */
#define LAPIC_ICR 0x300 /* Interrupt command register */
#define LAPIC_LVTTR 0x320 /* LVT timer register */
#define LAPIC_TIMICT 0x380 /* Initial count register */
#define LAPIC_TIMCCT 0x390 /* Current count register */
@@ -60,7 +61,7 @@ static struct acpi_madt_ioapic* amd64_ioapic_find (uint8_t irq) {
}
void amd64_ioapic_route_irq (uint8_t vec, uint8_t irq, uint64_t flags, uint64_t lapic_id) {
struct acpi_madt_ioapic* apic;
struct acpi_madt_ioapic* apic = NULL;
struct acpi_madt_interrupt_source_override* override;
bool found_override = false;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -155,7 +156,7 @@ void amd64_ioapic_init (void) {
struct acpi_madt_ioapic* ioapic = (struct acpi_madt_ioapic*)current;
mm_map_kernel_page ((uintptr_t)ioapic->address,
(uintptr_t)hhdm->offset + (uintptr_t)ioapic->address,
MM_PG_PRESENT | MM_PG_RW);
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
apics[ioapic_entries++] = *ioapic;
} break;
case ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE: {
@@ -188,7 +189,7 @@ void amd64_lapic_tick (uint32_t tick) { amd64_lapic_write (LAPIC_TIMICT, tick);
static uint32_t amd64_lapic_calibrate (uint32_t us) {
amd64_lapic_write (LAPIC_DCR, 0x03);
amd64_lapic_write (LAPIC_LVTTR, 0x20 | (1 << 16));
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 16));
amd64_lapic_write (LAPIC_TIMICT, 0xFFFFFFFF);
@@ -202,7 +203,7 @@ static uint32_t amd64_lapic_calibrate (uint32_t us) {
static void amd64_lapic_start (uint32_t ticks) {
amd64_lapic_write (LAPIC_DCR, 0x03);
amd64_lapic_write (LAPIC_LVTTR, 0x20 | (1 << 17));
amd64_lapic_write (LAPIC_LVTTR, SCHED_PREEMPT_TIMER | (1 << 17));
amd64_lapic_write (LAPIC_TIMICT, ticks);
}
@@ -215,7 +216,8 @@ uint64_t amd64_lapic_init (uint32_t us) {
uintptr_t lapic_paddr = amd64_rdmsr (MSR_APIC_BASE) & 0xFFFFF000;
lapic_mmio_base = lapic_paddr + (uintptr_t)hhdm->offset;
mm_map_kernel_page (lapic_paddr, lapic_mmio_base, MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK);
mm_map_kernel_page (lapic_paddr, lapic_mmio_base,
MM_PG_PRESENT | MM_PG_RW | MM_PD_LOCK | MM_PD_RELOAD);
amd64_lapic_write (LAPIC_SIVR, 0xFF | (1 << 8));
@@ -225,3 +227,8 @@ uint64_t amd64_lapic_init (uint32_t us) {
return ticks;
}
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec) {
amd64_lapic_write (LAPIC_ICR + 0x10, (lapic_id << 24));
amd64_lapic_write (LAPIC_ICR, vec);
}

View File

@@ -11,6 +11,7 @@ void amd64_ioapic_init (void);
uint32_t amd64_lapic_id (void);
void amd64_lapic_tick (uint32_t tick);
void amd64_lapic_eoi (void);
void amd64_lapic_ipi (uint8_t lapic_id, uint8_t vec);
uint64_t amd64_lapic_init (uint32_t us);
#endif // _KERNEL_AMD64_APIC_H

View File

@@ -2,11 +2,15 @@
#include <amd64/debug.h>
#include <amd64/hpet.h>
#include <amd64/init.h>
#include <amd64/intr_defs.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <limine/limine.h>
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <proc/proc.h>
#include <rd/rd.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
@@ -17,11 +21,6 @@
ALIGNED (16) static uint8_t uacpi_memory_buffer[UACPI_MEMORY_BUFFER_MAX];
void ack (void* arg) {
(void)arg;
debugprintf (". %u\n", thiscpu->id);
}
void bootmain (void) {
struct cpu* bsp_cpu = cpu_make ();
cpu_assign (bsp_cpu->id);
@@ -31,6 +30,8 @@ void bootmain (void) {
pmm_init ();
mm_init ();
rd_init ();
uacpi_setup_early_table_access ((void*)uacpi_memory_buffer, sizeof (uacpi_memory_buffer));
amd64_ioapic_init ();
@@ -38,10 +39,16 @@ void bootmain (void) {
smp_init ();
irq_attach (&ack, NULL, 32 + 0);
/* busy wait for cpus to come online */
for (volatile int i = 0; i < INT_MAX; i++)
;
mm_init2 ();
__asm__ volatile ("sti");
proc_init ();
for (;;)
;
}

View File

@@ -3,8 +3,9 @@
#include <aux/compiler.h>
#include <libk/std.h>
#include <proc/proc.h>
#define KSTACK_SIZE (16 * 1024)
#define KSTACK_SIZE (32 * 1024)
struct gdt_entry {
uint16_t limitlow;

View File

@@ -75,7 +75,8 @@ void amd64_hpet_init (void) {
hpet_paddr = (uintptr_t)hpet->address.address;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr, MM_PG_PRESENT | MM_PG_RW);
mm_map_kernel_page (hpet_paddr, (uintptr_t)hhdm->offset + hpet_paddr,
MM_PG_PRESENT | MM_PG_RW | MM_PD_RELOAD);
hpet_32bits = (amd64_hpet_read (HPET_GCIDR) & (1 << 13)) ? 0 : 1;

View File

@@ -1,5 +1,6 @@
#include <amd64/apic.h>
#include <amd64/intr.h>
#include <amd64/intr_defs.h>
#include <amd64/io.h>
#include <aux/compiler.h>
#include <irq/irq.h>
@@ -100,54 +101,23 @@ static void amd64_idt_init (void) {
#define IDT_ENTRY(n, ist) \
extern void amd64_intr##n (void); \
amd64_idt_set (&idt_entries[(n)], (uint64_t)&amd64_intr##n, 0x8E, (ist))
IDT_ENTRY (0, 0);
IDT_ENTRY (1, 0);
IDT_ENTRY (2, 0);
IDT_ENTRY (3, 0);
IDT_ENTRY (4, 0);
IDT_ENTRY (5, 0);
IDT_ENTRY (6, 0);
IDT_ENTRY (7, 0);
IDT_ENTRY (8, 0);
IDT_ENTRY (9, 0);
IDT_ENTRY (10, 0);
IDT_ENTRY (11, 0);
IDT_ENTRY (12, 0);
IDT_ENTRY (13, 0);
IDT_ENTRY (14, 0);
IDT_ENTRY (15, 0);
IDT_ENTRY (16, 0);
IDT_ENTRY (17, 0);
IDT_ENTRY (18, 0);
IDT_ENTRY (19, 0);
IDT_ENTRY (20, 0);
IDT_ENTRY (21, 0);
IDT_ENTRY (22, 0);
IDT_ENTRY (23, 0);
IDT_ENTRY (24, 0);
IDT_ENTRY (25, 0);
IDT_ENTRY (26, 0);
IDT_ENTRY (27, 0);
IDT_ENTRY (28, 0);
IDT_ENTRY (29, 0);
IDT_ENTRY (30, 0);
IDT_ENTRY (31, 0);
IDT_ENTRY (32, 1);
IDT_ENTRY (33, 1);
IDT_ENTRY (34, 1);
IDT_ENTRY (35, 1);
IDT_ENTRY (36, 1);
IDT_ENTRY (37, 1);
IDT_ENTRY (38, 1);
IDT_ENTRY (39, 1);
IDT_ENTRY (40, 1);
IDT_ENTRY (41, 1);
IDT_ENTRY (42, 1);
IDT_ENTRY (43, 1);
IDT_ENTRY (44, 1);
IDT_ENTRY (45, 1);
IDT_ENTRY (46, 1);
IDT_ENTRY (47, 1);
/* clang-format off */
IDT_ENTRY (0, 0); IDT_ENTRY (1, 0); IDT_ENTRY (2, 0); IDT_ENTRY (3, 0);
IDT_ENTRY (4, 0); IDT_ENTRY (5, 0); IDT_ENTRY (6, 0); IDT_ENTRY (7, 0);
IDT_ENTRY (8, 0); IDT_ENTRY (9, 0); IDT_ENTRY (10, 0); IDT_ENTRY (11, 0);
IDT_ENTRY (12, 0); IDT_ENTRY (13, 0); IDT_ENTRY (14, 0); IDT_ENTRY (15, 0);
IDT_ENTRY (16, 0); IDT_ENTRY (17, 0); IDT_ENTRY (18, 0); IDT_ENTRY (19, 0);
IDT_ENTRY (20, 0); IDT_ENTRY (21, 0); IDT_ENTRY (22, 0); IDT_ENTRY (23, 0);
IDT_ENTRY (24, 0); IDT_ENTRY (25, 0); IDT_ENTRY (26, 0); IDT_ENTRY (27, 0);
IDT_ENTRY (28, 0); IDT_ENTRY (29, 0); IDT_ENTRY (30, 0); IDT_ENTRY (31, 0);
IDT_ENTRY (32, 1); IDT_ENTRY (33, 1); IDT_ENTRY (34, 1); IDT_ENTRY (35, 1);
IDT_ENTRY (36, 1); IDT_ENTRY (37, 1); IDT_ENTRY (38, 1); IDT_ENTRY (39, 1);
IDT_ENTRY (40, 1); IDT_ENTRY (41, 1); IDT_ENTRY (42, 1); IDT_ENTRY (43, 1);
IDT_ENTRY (44, 1); IDT_ENTRY (45, 1); IDT_ENTRY (46, 1); IDT_ENTRY (47, 1);
IDT_ENTRY (SCHED_PREEMPT_TIMER, 1);
IDT_ENTRY (TLB_SHOOTDOWN, 1);
/* clang-format on */
#undef IDT_ENTRY
idt.limit = sizeof (idt_entries) - 1;
@@ -177,7 +147,11 @@ static void amd64_intr_exception (struct saved_regs* regs) {
regs->error, regs->rip, regs->cs, regs->rflags, regs->rsp, regs->ss, cr2, cr3,
regs->rbx);
amd64_spin ();
if (regs->cs == (0x18 | 0x03)) {
proc_kill (thiscpu->proc_current);
} else {
amd64_spin ();
}
}
void amd64_intr_handler (void* stack_ptr) {
@@ -185,16 +159,20 @@ void amd64_intr_handler (void* stack_ptr) {
if (regs->trap <= 31) {
amd64_intr_exception (regs);
} else if (regs->trap >= 32) {
} else {
amd64_lapic_eoi ();
__asm__ volatile ("sti");
struct irq* irq = irq_find (regs->trap);
irq_invoke_each (regs->trap);
if (irq != NULL) {
if (!(irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("sti");
__asm__ volatile ("cli");
} else {
DEBUG ("unknown trap %lu\n", regs->trap);
irq->func (irq->arg, stack_ptr);
if (!(irq->flags & IRQ_INTERRUPT_SAFE))
__asm__ volatile ("cli");
}
}
}
@@ -235,3 +213,12 @@ void irq_restore (void) {
if (prev == 1)
amd64_irq_restore_flags (thiscpu->irq_ctx.rflags);
}
uint8_t amd64_resolve_irq (uint8_t irq) {
static const uint8_t mappings[] = {
[SCHED_PREEMPT_TIMER] = 0,
[TLB_SHOOTDOWN] = 1,
};
return mappings[irq];
}

View File

@@ -32,6 +32,7 @@ struct saved_regs {
} PACKED;
void amd64_load_idt (void);
uint8_t amd64_resolve_irq (uint8_t irq);
void amd64_intr_init (void);
#endif // _KERNEL_AMD64_INTR_H

7
kernel/amd64/intr_defs.h Normal file
View File

@@ -0,0 +1,7 @@
#ifndef _KERNEL_AMD64_INTR_DEFS_H
#define _KERNEL_AMD64_INTR_DEFS_H
#define SCHED_PREEMPT_TIMER 80
#define TLB_SHOOTDOWN 81
#endif // _KERNEL_AMD64_INTR_DEFS_H

View File

@@ -1,3 +1,5 @@
#include <amd64/intr_defs.h>
.extern amd64_intr_handler
#define err(z) \
@@ -108,3 +110,6 @@ make_intr_stub(no_err, 44)
make_intr_stub(no_err, 45)
make_intr_stub(no_err, 46)
make_intr_stub(no_err, 47)
make_intr_stub(no_err, SCHED_PREEMPT_TIMER)
make_intr_stub(no_err, TLB_SHOOTDOWN)

View File

@@ -1,22 +1,27 @@
#include <amd64/apic.h>
#include <amd64/intr_defs.h>
#include <aux/compiler.h>
#include <irq/irq.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/pmm.h>
#include <sync/spin_lock.h>
#include <sys/debug.h>
#include <sys/mm.h>
#include <sys/smp.h>
#define AMD64_PG_PRESENT (1 << 0)
#define AMD64_PG_RW (1 << 1)
#define AMD64_PG_USER (1 << 2)
#define AMD64_PG_TABLE_ENTRIES_MAX 512
struct pg_index {
uint16_t pml4, pml3, pml2, pml1;
} PACKED;
struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
static struct pd kernel_pd = {.lock = SPIN_LOCK_INIT};
/* This is needed to sync between map/unmap operations and TLB shootdown. */
static spin_lock_t mm_lock = SPIN_LOCK_INIT;
static uintptr_t amd64_current_cr3 (void) {
uintptr_t cr3;
@@ -41,7 +46,7 @@ static struct pg_index amd64_mm_page_index (uint64_t vaddr) {
static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool alloc) {
uint64_t entry = table[entry_idx];
uint64_t paddr;
physaddr_t paddr;
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
@@ -53,11 +58,11 @@ static uint64_t* amd64_mm_next_table (uint64_t* table, uint64_t entry_idx, bool
paddr = pmm_alloc (1);
if (paddr == 0)
if (paddr == PMM_ALLOC_ERR)
return NULL;
memset ((void*)((uintptr_t)hhdm->offset + (uintptr_t)paddr), 0, PAGE_SIZE);
table[entry_idx] = paddr | AMD64_PG_PRESENT | AMD64_PG_RW;
table[entry_idx] = paddr | AMD64_PG_PRESENT | AMD64_PG_RW | AMD64_PG_USER;
}
return (uint64_t*)((uintptr_t)hhdm->offset + (uintptr_t)paddr);
@@ -79,6 +84,8 @@ static void amd64_reload_cr3 (void) {
}
void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
@@ -108,11 +115,13 @@ void mm_map_page (struct pd* pd, uintptr_t paddr, uintptr_t vaddr, uint32_t flag
do_reload = true;
done:
if (do_reload)
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
}
void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
@@ -120,6 +129,8 @@ void mm_map_kernel_page (uintptr_t paddr, uintptr_t vaddr, uint32_t flags) {
}
void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
spin_lock (&mm_lock);
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
bool do_reload = false;
@@ -129,15 +140,15 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
uint64_t* pml4 = (uint64_t*)(pd->cr3_paddr + (uintptr_t)hhdm->offset);
struct pg_index pg_index = amd64_mm_page_index (vaddr);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, true);
uint64_t* pml3 = amd64_mm_next_table (pml4, pg_index.pml4, false);
if (pml3 == NULL)
goto done;
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, true);
uint64_t* pml2 = amd64_mm_next_table (pml3, pg_index.pml3, false);
if (pml2 == NULL)
goto done;
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, true);
uint64_t* pml1 = amd64_mm_next_table (pml2, pg_index.pml2, false);
if (pml1 == NULL)
goto done;
@@ -147,11 +158,13 @@ void mm_unmap_page (struct pd* pd, uintptr_t vaddr, uint32_t flags) {
do_reload = true;
done:
if (do_reload)
if (do_reload && (flags & MM_PD_RELOAD))
amd64_reload_cr3 ();
if (flags & MM_PD_LOCK)
spin_unlock (&pd->lock);
spin_unlock (&mm_lock);
}
void mm_unmap_kernel_page (uintptr_t vaddr, uint32_t flags) {
@@ -162,4 +175,44 @@ void mm_lock_kernel (void) { spin_lock (&kernel_pd.lock); }
void mm_unlock_kernel (void) { spin_unlock (&kernel_pd.lock); }
uintptr_t mm_alloc_user_pd_phys (void) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
physaddr_t cr3 = pmm_alloc (1);
if (cr3 == PMM_ALLOC_ERR)
return 0;
uint8_t* vu_cr3 = (uint8_t*)((uintptr_t)hhdm->offset + cr3);
memset ((void*)vu_cr3, 0, PAGE_SIZE / 2);
uint8_t* vk_cr3 = (uint8_t*)((uintptr_t)hhdm->offset + (uintptr_t)kernel_pd.cr3_paddr);
memcpy (&vu_cr3[PAGE_SIZE / 2], &vk_cr3[PAGE_SIZE / 2], PAGE_SIZE / 2);
return cr3;
}
void mm_reload (void) {
spin_lock (&mm_lock);
struct limine_mp_response* mp = limine_mp_request.response;
for (size_t i = 0; i < mp->cpu_count; i++) {
amd64_lapic_ipi (mp->cpus[i]->lapic_id, TLB_SHOOTDOWN);
}
spin_unlock (&mm_lock);
}
static void amd64_tlb_shootdown_irq (void* arg, void* regs) {
(void)arg, (void)regs;
amd64_reload_cr3 ();
DEBUG ("cpu %u TLB shootdown\n", thiscpu->id);
}
void mm_init2 (void) {
irq_attach (&amd64_tlb_shootdown_irq, NULL, TLB_SHOOTDOWN, IRQ_INTERRUPT_SAFE);
}
void mm_init (void) { kernel_pd.cr3_paddr = amd64_current_cr3 (); }

View File

@@ -12,5 +12,6 @@ struct pd {
};
void amd64_load_kernel_cr3 (void);
void mm_init2 (void);
#endif // _KERNEL_AMD64_MM_H

17
kernel/amd64/proc.h Normal file
View File

@@ -0,0 +1,17 @@
#ifndef _KERNEL_AMD64_PROC_H
#define _KERNEL_AMD64_PROC_H
#include <amd64/intr.h>
#include <libk/std.h>
#define PROC_USTACK_TOP 0x00007FFFFFFFF000ULL
#define USTACK_SIZE (256 * PAGE_SIZE)
struct proc_platformdata {
struct saved_regs regs;
uintptr_t syscall_stack;
uintptr_t user_stack;
uint64_t fsbase;
};
#endif // _KERNEL_AMD64_PROC_H

24
kernel/amd64/sched.S Normal file
View File

@@ -0,0 +1,24 @@
#define pop_regs \
popq %r15; \
popq %r14; \
popq %r13; \
popq %r12; \
popq %r11; \
popq %r10; \
popq %r9; \
popq %r8; \
popq %rbx; \
popq %rbp; \
popq %rdi; \
popq %rsi; \
popq %rdx; \
popq %rcx; \
popq %rax;
.global amd64_do_sched
amd64_do_sched:
movq %rsi, %cr3
movq %rdi, %rsp
pop_regs
add $16, %rsp
iretq

6
kernel/amd64/sched.h Normal file
View File

@@ -0,0 +1,6 @@
#ifndef _KERNEL_AMD64_SCHED_H
#define _KERNEL_AMD64_SCHED_H
void amd64_do_sched (void* regs, void* cr3);
#endif // _KERNEL_AMD64_SCHED_H

4
kernel/amd64/sched1.c Normal file
View File

@@ -0,0 +1,4 @@
#include <amd64/sched.h>
#include <sys/mm.h>
void do_sched (void* regs, struct pd* pd) { amd64_do_sched (regs, (void*)pd->cr3_paddr); }

View File

@@ -23,6 +23,7 @@ struct cpu* cpu_make (void) {
struct cpu* cpu = &cpus[id];
memset (cpu, 0, sizeof (*cpu));
cpu->lock = SPIN_LOCK_INIT;
cpu->id = id;
return cpu_get (id);

View File

@@ -5,21 +5,31 @@
#include <amd64/tss.h>
#include <aux/compiler.h>
#include <libk/std.h>
#include <proc/proc.h>
#define CPUS_MAX 32
struct cpu {
uint64_t lapic_ticks;
uint32_t id;
struct {
uint64_t rflags;
atomic_int nesting;
} irq_ctx;
uint8_t user_stack[USTACK_SIZE] ALIGNED (16);
volatile uint8_t kernel_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t except_stack[KSTACK_SIZE] ALIGNED (16);
volatile uint8_t irq_stack[KSTACK_SIZE] ALIGNED (16);
volatile struct gdt_extended gdt ALIGNED (16);
volatile struct tss tss;
spin_lock_t lock;
struct proc* proc_run_q;
struct proc* proc_current;
};
struct cpu* cpu_make (void);

View File

@@ -10,10 +10,12 @@ c += amd64/bootmain.c \
amd64/hpet.c \
amd64/mm.c \
amd64/time.c \
amd64/smp.c
amd64/smp.c \
amd64/sched1.c
S += amd64/intr_stub.S \
amd64/spin.S
amd64/spin.S \
amd64/sched.S
o += amd64/bootmain.o \
amd64/init.o \
@@ -29,4 +31,6 @@ o += amd64/bootmain.o \
amd64/hpet.o \
amd64/mm.o \
amd64/time.o \
amd64/smp.o
amd64/smp.o \
amd64/sched.o \
amd64/sched1.o