PCI-IDE interrupt based driver fixes, works on QEMU

This commit is contained in:
2026-03-15 10:15:01 +01:00
parent 3c10b76b3f
commit 5b432b2b01
6 changed files with 167 additions and 132 deletions

View File

@@ -41,13 +41,12 @@
#define IDE_READ 1
#define IDE_WRITE 2
#define IDE_FLUSH 3
static bool ide_wait (uint16_t io, uint32_t timeout, bool drq, bool errcheck) {
uint32_t i = 0;
uint8_t status;
for (;;) {
while (1) {
status = inb (io + IDE_REG_STATUS);
if (!(status & IDE_BSY))
@@ -57,26 +56,26 @@ static bool ide_wait (uint16_t io, uint32_t timeout, bool drq, bool errcheck) {
return false;
}
if (drq) {
i = 0;
while (!(status & IDE_DRQ) || (status & IDE_BSY)) {
if (status & IDE_ERR)
return false;
status = inb (io + IDE_REG_STATUS);
if (status == 0xFF)
return false;
if (++i >= timeout)
return false;
}
}
if (errcheck && (status & (IDE_DF | IDE_ERR)))
if (errcheck && (status & (IDE_ERR | IDE_DF)))
return false;
return true;
if (!drq)
return true;
i = 0;
while (1) {
status = inb (io + IDE_REG_STATUS);
if (status & (IDE_ERR | IDE_DF))
return false;
if (status & IDE_DRQ)
return true;
if (++i >= timeout)
return false;
}
}
#pragma clang optimize off
@@ -92,60 +91,50 @@ static void ide_irq (void* arg, void* regs, bool user, struct reschedule_ctx* rc
uint64_t fd, fp;
struct idedrv* idedrv = arg;
DEBUG ("1\n");
spin_lock (&idedrv->device->lock, &fd);
struct list_node_link* node = idedrv->requests;
struct idedrv_request* req = idedrv->current_req;
DEBUG ("2\n");
if (node == NULL) {
if (req == NULL) {
(void)inb (idedrv->io + IDE_REG_STATUS);
spin_unlock (&idedrv->device->lock, fd);
return;
}
struct idedrv_request* req = list_entry (node, struct idedrv_request, requests_link);
DEBUG ("3\n");
uint8_t status = inb (idedrv->io + IDE_REG_STATUS);
if (req->type == IDE_FLUSH) {
list_remove (idedrv->requests, &req->requests_link);
spin_unlock (&idedrv->device->lock, fd);
atomic_store (&req->done, 1);
return;
}
if ((status & (IDE_ERR | IDE_DF))) {
list_remove (idedrv->requests, &req->requests_link);
spin_unlock (&idedrv->device->lock, fd);
atomic_store (&req->done, 1);
idedrv->current_req = NULL;
spin_unlock (&idedrv->device->lock, fd);
return;
}
DEBUG ("4\n");
if (!(status & IDE_BSY) && (status & IDE_DRQ)) {
switch (req->type) {
case IDE_READ: {
uint16_t* p = req->buffer + (req->sector_done_count * (idedrv->sector_size / 2));
if ((status & IDE_DRQ) && (req->sector_done_count < req->sector_count)) {
uint16_t* p = req->buffer + (req->sector_done_count * (idedrv->sector_size / 2));
if (req->type == IDE_READ)
insw (idedrv->io + IDE_REG_DATA, p, idedrv->sector_size / 2);
req->sector_done_count++;
} break;
case IDE_WRITE: {
req->sector_done_count++;
if (req->sector_done_count < req->sector_count) {
uint16_t* p = req->buffer + (req->sector_done_count * (idedrv->sector_size / 2));
outsw (idedrv->io + IDE_REG_DATA, p, idedrv->sector_size / 2);
}
} break;
}
else
outsw (idedrv->io + IDE_REG_DATA, p, idedrv->sector_size / 2);
req->sector_done_count++;
DEBUG ("4\n");
}
if (req->sector_done_count >= req->sector_count) {
list_remove (idedrv->requests, &req->requests_link);
spin_unlock (&idedrv->device->lock, fd);
if ((req->sector_done_count >= req->sector_count)) {
atomic_store (&req->done, 1);
return;
idedrv->current_req = NULL;
}
DEBUG ("6\n");
spin_unlock (&idedrv->device->lock, fd);
DEBUG ("7\n");
}
void ide_probe (uint16_t io, uint16_t ctrl, uint8_t devno, struct ide_probe* probe) {
@@ -211,14 +200,21 @@ void ide_probe (uint16_t io, uint16_t ctrl, uint8_t devno, struct ide_probe* pro
probe->sector_size = 512;
}
static void ide_prepare (struct idedrv* idedrv, size_t sector, uint16_t sector_count, bool clear) {
if (clear)
outb (idedrv->ctrl, 0x00);
static void ide_prepare (struct idedrv* idedrv, size_t sector, uint16_t sector_count,
bool irq_enable) {
uint8_t ctrl = inb (idedrv->ctrl);
if (irq_enable)
ctrl &= ~0x02;
else
ctrl |= 0x02;
outb (idedrv->ctrl, ctrl);
if (idedrv->lba48) {
outb (idedrv->io + IDE_REG_DRIVE, 0x40 | (idedrv->devno << 4));
ide_delay (idedrv->ctrl);
ide_wait (idedrv->io, 100000, false, false);
outb (idedrv->io + IDE_REG_SECCOUNT, (sector_count >> 8) & 0xFF);
outb (idedrv->io + IDE_REG_LBA0, (sector >> 24) & 0xFF);
outb (idedrv->io + IDE_REG_LBA1, (sector >> 32) & 0xFF);
@@ -229,9 +225,11 @@ static void ide_prepare (struct idedrv* idedrv, size_t sector, uint16_t sector_c
outb (idedrv->io + IDE_REG_LBA1, (sector >> 8) & 0xFF);
outb (idedrv->io + IDE_REG_LBA2, (sector >> 16) & 0xFF);
} else {
outb (idedrv->io + IDE_REG_DRIVE, 0xE0 | (idedrv->devno << 4) | ((sector >> 24) & 0xFF));
outb (idedrv->io + IDE_REG_DRIVE, 0xE0 | (idedrv->devno << 4) | ((sector >> 24) & 0x0F));
ide_delay (idedrv->ctrl);
ide_wait (idedrv->io, 100000, false, false);
uint8_t count = (sector_count == 256) ? 0 : (uint8_t)sector_count;
outb (idedrv->io + IDE_REG_SECCOUNT, count);
outb (idedrv->io + IDE_REG_LBA0, sector & 0xFF);
@@ -258,17 +256,12 @@ bool idedrv_init (struct device* device, void* arg, struct proc* proc,
idedrv->io = init->io;
idedrv->ctrl = init->ctrl;
idedrv->devno = init->devno;
idedrv->primscnd = init->primscnd;
idedrv->irq = init->irq;
idedrv->current_req = NULL;
device->udata = idedrv;
if (idedrv->primscnd == 1) {
ioapic_route_irq (IDE_DRIVE_PRIM, 14, 0, thiscpu->lapic_id);
irq_attach (&ide_irq, idedrv, IDE_DRIVE_PRIM);
} else if (idedrv->primscnd == 2) {
ioapic_route_irq (IDE_DRIVE_SCND, 15, 0, thiscpu->lapic_id);
irq_attach (&ide_irq, idedrv, IDE_DRIVE_SCND);
}
irq_attach (&ide_irq, idedrv, idedrv->irq);
return true;
}
@@ -276,14 +269,12 @@ bool idedrv_init (struct device* device, void* arg, struct proc* proc,
void idedrv_fini (struct device* device, struct proc* proc, struct reschedule_ctx* rctx) {
struct idedrv* idedrv = device->udata;
struct list_node_link *req_link, *tmp_req_link;
list_foreach (idedrv->requests, req_link, tmp_req_link) {
struct idedrv_request* req = list_entry (req_link, struct idedrv_request, requests_link);
list_remove (idedrv->requests, &req->requests_link);
free (req);
if (idedrv->current_req != NULL) {
free (idedrv->current_req);
idedrv->current_req = NULL;
}
irq_detach (idedrv->primscnd == 1 ? IDE_DRIVE_PRIM : IDE_DRIVE_SCND);
irq_detach (idedrv->irq);
free (idedrv);
}
@@ -300,9 +291,14 @@ int idedrv_read (struct device* device, struct proc* proc, struct reschedule_ctx
struct idedrv* idedrv = device->udata;
DEBUG ("1\n");
if (sector + sector_count > idedrv->sector_count)
return -ST_OOB_ERROR;
if (!ide_wait (idedrv->io, 100000, false, false))
return -ST_XDRV_READ_ERROR;
struct idedrv_request* req = malloc (sizeof (*req));
if (req == NULL)
@@ -313,23 +309,18 @@ int idedrv_read (struct device* device, struct proc* proc, struct reschedule_ctx
req->sector_count = sector_count;
req->sector_done_count = 0;
req->type = IDE_READ;
DEBUG ("2\n");
list_append (idedrv->requests, &req->requests_link);
idedrv->current_req = req;
ide_prepare (idedrv, sector, sector_count, true);
DEBUG ("3\n");
DEBUG ("4\n");
uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ48 : IDE_CMD_READ28;
outb (idedrv->io + IDE_REG_CMD, cmd);
if (!ide_wait (idedrv->io, 100000, true, true)) {
list_remove (idedrv->requests, &req->requests_link);
free (req);
return -ST_XDRV_READ_ERROR;
}
insw (idedrv->io + IDE_REG_DATA, buffer, idedrv->sector_size / 2);
req->sector_done_count = 1;
DEBUG ("5\n");
spin_unlock (&device->lock, *lockflags);
@@ -337,6 +328,7 @@ int idedrv_read (struct device* device, struct proc* proc, struct reschedule_ctx
spin_lock_relax ();
spin_lock (&device->lock, lockflags);
DEBUG ("6\n");
free (req);
@@ -360,33 +352,21 @@ int idedrv_write (struct device* device, struct proc* proc, struct reschedule_ct
if (sector + sector_count > idedrv->sector_count)
return -ST_OOB_ERROR;
struct idedrv_request* req = malloc (sizeof (*req));
if (!ide_wait (idedrv->io, 100000, false, false))
return -ST_XDRV_WRITE_ERROR;
struct idedrv_request* flushreq = malloc (sizeof (*req));
struct idedrv_request* req = malloc (sizeof (*req));
if (req == NULL)
return -ST_OOM_ERROR;
if (flushreq == NULL) {
free (req);
return -ST_OOM_ERROR;
}
memset (req, 0, sizeof (*req));
req->buffer = buffer;
req->sector_count = sector_count;
req->sector_done_count = 0;
req->type = IDE_WRITE;
list_append (idedrv->requests, &req->requests_link);
memset (flushreq, 0, sizeof (*flushreq));
flushreq->buffer = NULL;
flushreq->sector_count = 0;
flushreq->sector_done_count = 0;
flushreq->type = IDE_FLUSH;
list_append (idedrv->requests, &flushreq->requests_link);
idedrv->current_req = req;
ide_prepare (idedrv, sector, sector_count, true);
@@ -394,7 +374,7 @@ int idedrv_write (struct device* device, struct proc* proc, struct reschedule_ct
outb (idedrv->io + IDE_REG_CMD, cmd);
if (!ide_wait (idedrv->io, 100000, true, true)) {
list_remove (idedrv->requests, &req->requests_link);
idedrv->current_req = NULL;
free (req);
return -ST_XDRV_WRITE_ERROR;
}
@@ -410,20 +390,24 @@ int idedrv_write (struct device* device, struct proc* proc, struct reschedule_ct
spin_lock (&device->lock, lockflags);
free (req);
uint8_t ctrl = inb (idedrv->ctrl);
ctrl |= 0x02;
outb (idedrv->ctrl, ctrl);
if (idedrv->lba48)
outb (idedrv->io + IDE_REG_CMD, IDE_CMD_FLUSH48);
else
outb (idedrv->io + IDE_REG_CMD, IDE_CMD_FLUSH28);
spin_unlock (&device->lock, *lockflags);
uint8_t status;
do {
status = inb (idedrv->io + IDE_REG_STATUS);
} while (status & IDE_BSY);
while (!atomic_load (&flushreq->done))
spin_lock_relax ();
spin_lock (&device->lock, lockflags);
free (req);
free (flushreq);
if (status & (IDE_ERR | IDE_DF))
return -ST_XDRV_WRITE_ERROR;
return ST_OK;
}