#include #include #include #include #include #define GDT_KCODE 0x08 #define GDT_KDATA 0x10 #define GDT_UCODE 0x18 #define GDT_UDATA 0x20 #define GDT_TSS 0x28 #define TSS 0x80 #define TSS_PRESENT 0x89 #define KSTACK_SIZE (8*1024) struct gdt_entry { uint16_t limitlow; uint16_t baselow; uint8_t basemid; uint8_t access; uint8_t gran; uint8_t basehigh; } __attribute__((packed)); struct gdt_ptr { uint16_t limit; uint64_t base; } __attribute__((packed)); struct gdt_extended { struct gdt_entry old[5]; struct gdt_entry tsslow; struct gdt_entry tsshigh; } __attribute__((packed)); __attribute__((aligned(16))) static volatile uint8_t kernel_stack[KSTACK_SIZE]; __attribute__((aligned(16))) static volatile struct gdt_extended gdt; static void amd64_gdt_set(volatile struct gdt_entry *ent, uint32_t base, uint32_t limit, uint8_t acc, uint8_t gran) { ent->baselow = (base & 0xFFFF); ent->basemid = (base >> 16) & 0xFF; ent->basehigh = (base >> 24) & 0xFF; ent->limitlow = (limit & 0xFFFF); ent->gran = ((limit >> 16) & 0x0F) | (gran & 0xF0); ent->access = acc; } static void amd64_gdt_init(void) { volatile struct tss *tss = amd64_get_tss(); memset((void *)&gdt, 0, sizeof(gdt)); memset((void *)kernel_stack, 0, sizeof(kernel_stack)); memset((void *)tss, 0, sizeof(*tss)); tss->iopb_off = sizeof(*tss); tss->rsp0 = (uint64_t)((uintptr_t)kernel_stack + sizeof(kernel_stack)); uint64_t tssbase = (uint64_t)&tss; uint64_t tsslimit = sizeof(*tss) - 1; amd64_gdt_set(&gdt.old[0], 0, 0, 0, 0); amd64_gdt_set(&gdt.old[1], 0, 0xFFFFF, 0x9A, 0xA0); amd64_gdt_set(&gdt.old[2], 0, 0xFFFFF, 0x92, 0xC0); amd64_gdt_set(&gdt.old[3], 0, 0xFFFFF, 0xFA, 0xA0); amd64_gdt_set(&gdt.old[4], 0, 0xFFFFF, 0xF2, 0xC0); amd64_gdt_set(&gdt.tsslow, (tssbase & 0xFFFFFFFF), tsslimit, TSS_PRESENT | TSS, 0); uint32_t tssbasehigh = (tssbase >> 32); gdt.tsshigh.limitlow = (tssbasehigh & 0xFFFF); gdt.tsshigh.baselow = (tssbasehigh >> 16) & 0xFFFF; gdt.tsshigh.basemid = 0; gdt.tsshigh.basehigh = 0; gdt.tsshigh.access = 0; gdt.tsshigh.gran = 0; struct gdt_ptr gdtr; gdtr.limit = sizeof(gdt) - 1; gdtr.base = (uint64_t)&gdt; __asm__ volatile("lgdt %0" :: "m"(gdtr) : "memory"); __asm__ volatile( "pushq %[kcode]\n" "lea 1f(%%rip), %%rax\n" "pushq %%rax\n" "lretq\n" "1:\n" "movw %[kdata], %%ax\n" "movw %%ax, %%ds\n" "movw %%ax, %%es\n" "movw %%ax, %%ss\n" : : [kcode] "i"(GDT_KCODE), [kdata] "i"(GDT_KDATA) : "rax", "memory" ); __asm__ volatile("ltr %0" :: "r"((uint16_t)GDT_TSS)); } void amd64_init(void) { amd64_gdt_init(); amd64_debug_init(); }