Files
mop3/kernel/device/idedrv.c
2026-03-12 19:23:47 +01:00

468 lines
13 KiB
C

#include <amd64/io.h>
#include <amd64/intr_defs.h>
#include <amd64/apic.h>
#include <device/device.h>
#include <device/idedrv.h>
#include <device/partitions.h>
#include <devices.h>
#include <libk/std.h>
#include <libk/list.h>
#include <libk/string.h>
#include <mm/liballoc.h>
#include <proc/proc.h>
#include <proc/reschedule.h>
#include <status.h>
#include <irq/irq.h>
#define IDE_REG_DATA 0x00
#define IDE_REG_ERROR 0x01
#define IDE_REG_SECCOUNT 0x02
#define IDE_REG_LBA0 0x03
#define IDE_REG_LBA1 0x04
#define IDE_REG_LBA2 0x05
#define IDE_REG_DRIVE 0x06
#define IDE_REG_STATUS 0x07
#define IDE_REG_CMD 0x07
#define IDE_BSY 0x80
#define IDE_DRDY 0x40
#define IDE_DF 0x20
#define IDE_ERR 0x01
#define IDE_DRQ 0x08
#define IDE_CMD_READ28 0x20
#define IDE_CMD_WRITE28 0x30
#define IDE_CMD_READ48 0x24
#define IDE_CMD_WRITE48 0x34
#define IDE_CMD_FLUSH48 0xEA
#define IDE_CMD_FLUSH28 0xE7
#define IDE_CMD_IDENTIFY 0xEC
static bool ide_wait (uint16_t io, uint32_t timeout, bool drq, bool errcheck) {
uint32_t i = 0;
uint8_t status;
for (;;) {
status = inb (io + IDE_REG_STATUS);
if (!(status & IDE_BSY))
break;
if (++i >= timeout)
return false;
}
if (drq) {
i = 0;
while (!(status & IDE_DRQ)) {
if (status & IDE_ERR)
return false;
status = inb (io + IDE_REG_STATUS);
if (status == 0xFF)
return true;
if (++i >= timeout)
return false;
}
}
if (errcheck && (status & (IDE_DF | IDE_ERR)))
return false;
return true;
}
#pragma clang optimize off
static void ide_delay (uint16_t ctrl) {
inb (ctrl);
inb (ctrl);
inb (ctrl);
inb (ctrl);
}
#pragma clang optimize on
static void ide_flush (struct idedrv* idedrv) {
if (idedrv->lba48)
outb (idedrv->io + IDE_REG_CMD, IDE_CMD_FLUSH48);
else
outb (idedrv->io + IDE_REG_CMD, IDE_CMD_FLUSH28);
}
static void ide_irq (void* arg, void* regs, struct reschedule_ctx* rctx) {
struct idedrv* idedrv = arg;
spin_lock (&idedrv->device->lock);
struct list_node_link* node = idedrv->requests;
if (node == NULL) {
(void)inb (idedrv->io + IDE_REG_STATUS);
spin_unlock (&idedrv->device->lock);
return;
}
struct idedrv_request* req = list_entry (node, struct idedrv_request, requests_link);
struct list_node_link* sqlist_node = req->sq.proc_list;
struct proc_sq_entry* sq_entry = list_entry (sqlist_node, struct proc_sq_entry, sq_link);
struct proc* resumed_proc = sq_entry->proc;
uint8_t status = inb (idedrv->io + IDE_REG_STATUS);
if ((status & (IDE_ERR | IDE_DRQ))) {
list_remove (idedrv->requests, &req->requests_link);
spin_unlock (&idedrv->device->lock);
proc_sq_resume (resumed_proc, sq_entry, rctx);
free (req);
return;
}
spin_lock (&resumed_proc->lock);
if (req->rw == 1) {
if ((status & IDE_DRQ)) {
uint16_t* p = req->outbuffer + (req->sector_done_count * (idedrv->sector_size / 2));
insw (idedrv->io + IDE_REG_DATA, p, idedrv->sector_size / 2);
req->sector_done_count++;
}
} else if (req->rw == 2) {
req->sector_done_count++;
if (req->sector_done_count < req->sector_count) {
uint16_t* p = req->outbuffer + (req->sector_done_count * (idedrv->sector_size / 2));
outsw (idedrv->io + IDE_REG_DATA, p, idedrv->sector_size / 2);
}
}
if (req->sector_done_count >= req->sector_count) {
if (req->rw == 2)
ide_flush (idedrv);
list_remove (idedrv->requests, &req->requests_link);
spin_unlock (&resumed_proc->lock);
spin_unlock (&idedrv->device->lock);
free (req);
proc_sq_resume (resumed_proc, sq_entry, rctx);
return;
}
spin_unlock (&resumed_proc->lock);
spin_unlock (&idedrv->device->lock);
}
void ide_probe (uint16_t io, uint16_t ctrl, uint8_t devno, struct ide_probe* probe) {
probe->flags = 0;
probe->sector_count = 0;
probe->sector_size = 0;
uint16_t identify_buffer[256];
uint8_t status = inb (io + IDE_REG_STATUS);
if (status == 0xFF)
return;
outb (io + IDE_REG_DRIVE, 0xA0 | (devno << 4));
ide_delay (ctrl);
outb (io + IDE_REG_SECCOUNT, 0);
outb (io + IDE_REG_LBA0, 0);
outb (io + IDE_REG_LBA1, 0);
outb (io + IDE_REG_LBA2, 0);
outb (io + IDE_REG_CMD, IDE_CMD_IDENTIFY);
status = inb (io + IDE_REG_STATUS);
if (status == 0)
return;
if (!ide_wait (io, 100000, true, true)) {
return;
}
insw (io + IDE_REG_DATA, identify_buffer, 256);
probe->flags |= IDE_PROBE_AVAIL;
if ((identify_buffer[106] & 0xC000) == 0x4000) {
if (identify_buffer[106] & (1 << 12)) {
uint32_t words_per_sector =
(uint32_t)identify_buffer[117] | ((uint32_t)identify_buffer[118] << 16);
probe->sector_size = (size_t)words_per_sector * 2;
}
}
if ((identify_buffer[83] & (1 << 10)) != 0)
probe->flags |= IDE_PROBE_LBA48;
if ((probe->flags & IDE_PROBE_LBA48)) {
probe->sector_count =
(size_t)((uint64_t)identify_buffer[100] | ((uint64_t)identify_buffer[101] << 16) |
((uint64_t)identify_buffer[102] << 32) | ((uint64_t)identify_buffer[103] << 48));
} else {
probe->sector_count =
(size_t)((uint64_t)identify_buffer[60] | ((uint64_t)identify_buffer[61] << 16));
}
probe->io = io;
probe->ctrl = ctrl;
probe->devno = devno;
if (probe->sector_size == 0)
probe->sector_size = 512;
}
static void ide_prepare (struct idedrv* idedrv, size_t sector, uint16_t sector_count, bool clear) {
if (clear)
outb (idedrv->ctrl, 0x00);
if (idedrv->lba48) {
outb (idedrv->io + IDE_REG_DRIVE, 0x40 | (idedrv->devno << 4));
ide_delay (idedrv->ctrl);
outb (idedrv->io + IDE_REG_SECCOUNT, (sector_count >> 8) & 0xFF);
outb (idedrv->io + IDE_REG_LBA0, (sector >> 24) & 0xFF);
outb (idedrv->io + IDE_REG_LBA1, (sector >> 32) & 0xFF);
outb (idedrv->io + IDE_REG_LBA2, (sector >> 40) & 0xFF);
outb (idedrv->io + IDE_REG_SECCOUNT, sector_count & 0xFF);
outb (idedrv->io + IDE_REG_LBA0, sector & 0xFF);
outb (idedrv->io + IDE_REG_LBA1, (sector >> 8) & 0xFF);
outb (idedrv->io + IDE_REG_LBA2, (sector >> 16) & 0xFF);
} else {
outb (idedrv->io + IDE_REG_DRIVE, 0xE0 | (idedrv->devno << 4) | ((sector >> 24) & 0xFF));
ide_delay (idedrv->ctrl);
uint8_t count = (sector_count == 256) ? 0 : (uint8_t)sector_count;
outb (idedrv->io + IDE_REG_SECCOUNT, count);
outb (idedrv->io + IDE_REG_LBA0, sector & 0xFF);
outb (idedrv->io + IDE_REG_LBA1, (sector >> 8) & 0xFF);
outb (idedrv->io + IDE_REG_LBA2, (sector >> 16) & 0xFF);
}
}
bool idedrv_init (struct device* device, void* arg, struct proc* proc, struct reschedule_ctx* rctx) {
(void)proc, (void)rctx;
struct idedrv_init* init = arg;
struct idedrv* idedrv = malloc (sizeof (*idedrv));
if (idedrv == NULL)
return false;
idedrv->device = device;
idedrv->lba48 = init->lba48;
idedrv->sector_count = init->sector_count;
idedrv->sector_size = init->sector_size;
idedrv->io = init->io;
idedrv->ctrl = init->ctrl;
idedrv->devno = init->devno;
idedrv->primscnd = init->primscnd;
device->udata = idedrv;
if (idedrv->primscnd == 1) {
ioapic_route_irq (IDE_DRIVE_PRIM, 14, 0, thiscpu->lapic_id);
irq_attach (&ide_irq, idedrv, IDE_DRIVE_PRIM);
} else if (idedrv->primscnd == 2) {
ioapic_route_irq (IDE_DRIVE_SCND, 15, 0, thiscpu->lapic_id);
irq_attach (&ide_irq, idedrv, IDE_DRIVE_SCND);
}
return true;
}
void idedrv_fini (struct device* device, struct proc* proc, struct reschedule_ctx* rctx) {
struct idedrv* idedrv = device->udata;
struct list_node_link* req_link, *tmp_req_link;
list_foreach (idedrv->requests, req_link, tmp_req_link) {
struct idedrv_request* req = list_entry (req_link, struct idedrv_request, requests_link);
list_remove (idedrv->requests, &req->requests_link);
struct proc_sq_entry* sq_entry = list_entry (req->sq.proc_list, struct proc_sq_entry, sq_link);
proc_sq_resume (proc, sq_entry, rctx);
free (req);
}
irq_detach (idedrv->primscnd == 1 ? IDE_DRIVE_PRIM : IDE_DRIVE_SCND);
free (idedrv);
}
int idedrv_read (struct device* device, struct proc* proc, struct reschedule_ctx* rctx, void* a1,
void* a2, void* a3, void* a4) {
(void)proc, (void)rctx, (void)a4;
if (a1 == NULL || a2 == NULL || a3 == NULL)
return -ST_BAD_ADDRESS_SPACE;
size_t sector = *(size_t*)a1;
size_t sector_count = *(size_t*)a2;
uint16_t* buffer = a3;
struct idedrv* idedrv = device->udata;
if (sector + sector_count > idedrv->sector_count)
return -ST_OOB_ERROR;
spin_lock (&proc->lock);
bool is_kproc = (proc->flags & PROC_KPROC) != 0;
spin_unlock (&proc->lock);
/* /1* polling *1/ */
/* if (is_kproc) { */
ide_prepare (idedrv, sector, sector_count, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ48 : IDE_CMD_READ28;
outb (idedrv->io + IDE_REG_CMD, cmd);
for (uint16_t s = 0; s < sector_count; s++) {
if (!ide_wait (idedrv->io, 100000, true, true))
return -ST_XDRV_READ_ERROR;
insw (idedrv->io + IDE_REG_DATA, buffer + (s * (idedrv->sector_size / 2)), (idedrv->sector_size / 2));
}
return ST_OK;
/* } else { /1* IRQ *1/ */
/* struct idedrv_request* req = malloc (sizeof (*req)); */
/* if (req == NULL) */
/* return -ST_OOM_ERROR; */
/* memset (req, 0, sizeof (*req)); */
/* req->outbuffer = buffer; */
/* req->sector_count = sector_count; */
/* req->sector_done_count = 0; */
/* req->rw = 1; */
/* list_append (idedrv->requests, &req->requests_link); */
/* proc_sq_suspend (proc, &req->sq, NULL, rctx); */
/* ide_prepare (idedrv, sector, sector_count, true); */
/* uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ48 : IDE_CMD_READ28; */
/* outb (idedrv->io + IDE_REG_CMD, cmd); */
/* return ST_OK; */
/* } */
}
int idedrv_write (struct device* device, struct proc* proc, struct reschedule_ctx* rctx, void* a1,
void* a2, void* a3, void* a4) {
(void)proc, (void)rctx, (void)a4;
if (a1 == NULL || a2 == NULL || a3 == NULL)
return -ST_BAD_ADDRESS_SPACE;
size_t sector = *(size_t*)a1;
size_t sector_count = *(size_t*)a2;
uint16_t* buffer = a3;
struct idedrv* idedrv = device->udata;
if (sector + sector_count > idedrv->sector_count)
return -ST_OOB_ERROR;
spin_lock (&proc->lock);
bool is_kproc = (proc->flags & PROC_KPROC) != 0;
spin_unlock (&proc->lock);
/* /1* polling *1/ */
/* if (is_kproc) { */
ide_prepare (idedrv, sector, sector_count, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_WRITE48 : IDE_CMD_WRITE28;
outb (idedrv->io + IDE_REG_CMD, cmd);
for (uint16_t s = 0; s < sector_count; s++) {
if (!ide_wait (idedrv->io, 100000, true, true))
return -ST_XDRV_WRITE_ERROR;
outsw (idedrv->io + IDE_REG_DATA, buffer + (s * (idedrv->sector_size / 2)), (idedrv->sector_size / 2));
}
return ST_OK;
/* } else { /1* IRQ *1/ */
/* struct idedrv_request* req = malloc (sizeof (*req)); */
/* if (req == NULL) */
/* return -ST_OOM_ERROR; */
/* memset (req, 0, sizeof (*req)); */
/* req->outbuffer = buffer; */
/* req->sector_count = sector_count; */
/* req->sector_done_count = 0; */
/* req->rw = 2; */
/* list_append (idedrv->requests, &req->requests_link); */
/* proc_sq_suspend (proc, &req->sq, NULL, rctx); */
/* ide_prepare (idedrv, sector, sector_count, true); */
/* uint8_t cmd = idedrv->lba48 ? IDE_CMD_WRITE48 : IDE_CMD_WRITE28; */
/* outb (idedrv->io + IDE_REG_CMD, cmd); */
/* if (!ide_wait (idedrv->io, 100000, true, true)) { */
/* list_remove (idedrv->requests, &req->requests_link); */
/* struct proc_sq_entry* sq_entry = list_entry (req->sq.proc_list, struct proc_sq_entry, sq_link); */
/* proc_sq_resume (proc, sq_entry, rctx); */
/* free (req); */
/* return -ST_XDRV_WRITE_ERROR; */
/* } */
/* outsw (idedrv->io + IDE_REG_DATA, buffer, idedrv->sector_size / 2); */
/* return ST_OK; */
/* } */
}
int idedrv_get_device_type (struct device* device, struct proc* proc, struct reschedule_ctx* rctx,
void* a1, void* a2, void* a3, void* a4) {
(void)proc, (void)rctx, (void)device, (void)a2, (void)a3, (void)a4;
if (a1 == NULL)
return -ST_BAD_ADDRESS_SPACE;
int* device_type = (int*)a1;
*device_type = XDRV_TYPE_IDEDRV;
return ST_OK;
}
int idedrv_get_sector_size (struct device* device, struct proc* proc, struct reschedule_ctx* rctx,
void* a1, void* a2, void* a3, void* a4) {
(void)proc, (void)rctx, (void)a2, (void)a3, (void)a4;
if (a1 == NULL)
return -ST_BAD_ADDRESS_SPACE;
size_t* secsize = (size_t*)a1;
struct idedrv* idedrv = device->udata;
*secsize = idedrv->sector_size;
return ST_OK;
}
int idedrv_get_size (struct device* device, struct proc* proc, struct reschedule_ctx* rctx,
void* a1, void* a2, void* a3, void* a4) {
(void)proc, (void)rctx, (void)a2, (void)a3, (void)a4;
if (a1 == NULL)
return -ST_BAD_ADDRESS_SPACE;
size_t* size = (size_t*)a1;
struct idedrv* idedrv = device->udata;
*size = idedrv->sector_size * idedrv->sector_count;
return ST_OK;
}