idedrv Implement DMA reading/writing without IRQ support
All checks were successful
Build ISO image / build-and-deploy (push) Successful in 1m35s
Build documentation / build-and-deploy (push) Successful in 38s

This commit is contained in:
2026-04-26 18:47:05 +02:00
parent 05ab0d4c86
commit a2ed5c2b8a
4 changed files with 326 additions and 89 deletions

View File

@@ -36,6 +36,17 @@
#define PCI_BAR_MEM64 0x04
#define PCI_BAR_PREFETCH 0x08
#define PCI_CMD_IOSPACE 0
#define PCI_CMD_MEMSPACE 1
#define PCI_CMD_BUSMASTER 2
#define PCI_CMD_SPECIAL_CYCLES 3
#define PCI_CMD_MEMWRIVENA 4
#define PCI_CMD_VGA_PALT_SNOOP 5
#define PCI_CMD_PARITY_ERR_RESP 6
#define PCI_CMD_SERR 8
#define PCI_CMD_FBBENA 9
#define PCI_CMD_INTRDISABLE 10
#define PCI_CAP_MSI 0x05
struct pci_vendor {

View File

@@ -57,7 +57,9 @@ static void ide_make_device(struct proc* proc, struct reschedule_ctx* rctx,
.ctrl = probe.ctrl,
.devno = probe.devno,
.irq = probe.irq,
.bmbase = probe.bmbase,
.irqs_support = probe.irqs_support,
.bm_support = probe.bm_support,
};
struct device* ide = device_create(DEVICE_TYPE_DRIVE, device_key, ops, lengthof(ops),
@@ -68,7 +70,10 @@ static void ide_make_device(struct proc* proc, struct reschedule_ctx* rctx,
bool pci_ide_init(struct proc* proc, struct reschedule_ctx* rctx, struct pci_info pci_info) {
uint16_t pci_cmd = pci_read16(pci_info.bus, pci_info.slot, pci_info.func, PCI_COMMAND);
uint16_t new_cmd = (pci_cmd | (1 << 0) | (1 << 2)) & ~(1 << 10);
uint16_t new_cmd = pci_cmd;
new_cmd |= (1 << PCI_CMD_IOSPACE);
new_cmd |= (1 << PCI_CMD_BUSMASTER);
new_cmd &= ~(1 << PCI_CMD_INTRDISABLE);
if (pci_cmd != new_cmd) {
pci_write16(pci_info.bus, pci_info.slot, pci_info.func, PCI_COMMAND, new_cmd);
@@ -79,7 +84,14 @@ bool pci_ide_init(struct proc* proc, struct reschedule_ctx* rctx, struct pci_inf
uint8_t progif = pci_read8(pci_info.bus, pci_info.slot, pci_info.func, PCI_PROG_IF);
DEBUG("progif: %s\n", progif_msg[progif]);
uint16_t pcmd, pctrl, scmd, sctrl;
uint16_t pcmd, pctrl, pbmbase, scmd, sctrl, sbmbase;
bool irqs_support = false;
bool bm_support = false;
uint32_t bar4 = pci_read32(pci_info.bus, pci_info.slot, pci_info.func, PCI_BAR4);
uint16_t bmbase = (uint16_t)(bar4 & 0xFFFC);
bm_support = (bmbase != 0) && (bar4 & PCI_BAR_IO);
if ((progif & 0x01)) {
uint32_t bar0 = pci_read32(pci_info.bus, pci_info.slot, pci_info.func, PCI_BAR0);
@@ -99,6 +111,7 @@ bool pci_ide_init(struct proc* proc, struct reschedule_ctx* rctx, struct pci_inf
pcmd = 0x1F0;
pctrl = 0x3F6;
}
pbmbase = bmbase;
if ((progif & 0x04)) {
uint32_t bar2 = pci_read32(pci_info.bus, pci_info.slot, pci_info.func, PCI_BAR2);
@@ -118,8 +131,7 @@ bool pci_ide_init(struct proc* proc, struct reschedule_ctx* rctx, struct pci_inf
scmd = 0x170;
sctrl = 0x376;
}
bool irqs_support = false;
sbmbase = bmbase + 8;
if ((progif & 0x05)) {
irqs_support = false;
@@ -134,17 +146,18 @@ bool pci_ide_init(struct proc* proc, struct reschedule_ctx* rctx, struct pci_inf
}
}
DEBUG("pcmd=%x, pctrl=%x\n", pcmd, pctrl);
DEBUG("scmd=%x, sctrl=%x\n", scmd, sctrl);
DEBUG("IRQ support=%d\n", irqs_support);
DEBUG("pcmd=%x, pctrl=%x, pbmbase=%x\n", pcmd, pctrl, pbmbase);
DEBUG("scmd=%x, sctrl=%x, sbmbase=%x\n", scmd, sctrl, sbmbase);
DEBUG("IRQ support=%d, Bus mastering supported=%d\n", irqs_support, bm_support);
uint16_t channels[2][3] = {{pcmd, pctrl, INTR_IDE_DRIVE_PRIM},
{scmd, sctrl, INTR_IDE_DRIVE_SCND}};
uint16_t channels[2][4] = {{pcmd, pctrl, INTR_IDE_DRIVE_PRIM, pbmbase},
{scmd, sctrl, INTR_IDE_DRIVE_SCND, sbmbase}};
for (size_t i = 0; i < lengthof(channels); i++) {
uint16_t cmd = channels[i][0];
uint16_t ctrl = channels[i][1];
uint8_t irq = channels[i][2];
uint16_t bmbase = channels[i][3];
for (size_t dev = 0; dev < 2; dev++) {
ide_probe(cmd, ctrl, dev, &probe);
@@ -153,6 +166,8 @@ bool pci_ide_init(struct proc* proc, struct reschedule_ctx* rctx, struct pci_inf
probe.io = cmd;
probe.irq = irq;
probe.irqs_support = irqs_support;
probe.bmbase = bmbase;
probe.bm_support = bm_support;
if ((probe.flags & IDE_PROBE_AVAIL))
ide_make_device(proc, rctx, probe);

View File

@@ -1,15 +1,20 @@
#include <amd64/apic.h>
#include <amd64/intr_defs.h>
#include <amd64/io.h>
#include <aux/compiler.h>
#include <device/device.h>
#include <device/storage/idedrv.h>
#include <device/storage/partitions.h>
#include <devices.h>
#include <irq/irq.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/malloc.h>
#include <mm/pmm.h>
#include <page_size.h>
#include <proc/proc.h>
#include <proc/reschedule.h>
#include <status.h>
@@ -31,13 +36,25 @@
#define IDE_ERR 0x01
#define IDE_DRQ 0x08
#define IDE_CMD_READ28 0x20
#define IDE_CMD_WRITE28 0x30
#define IDE_CMD_READ48 0x24
#define IDE_CMD_WRITE48 0x34
#define IDE_CMD_FLUSH48 0xEA
#define IDE_CMD_FLUSH28 0xE7
#define IDE_CMD_IDENTIFY 0xEC
#define IDE_CMD_READ28 0x20
#define IDE_CMD_WRITE28 0x30
#define IDE_CMD_READ48 0x24
#define IDE_CMD_WRITE48 0x34
#define IDE_CMD_FLUSH48 0xEA
#define IDE_CMD_FLUSH28 0xE7
#define IDE_CMD_IDENTIFY 0xEC
#define IDE_CMD_READ_DMA28 0xC8
#define IDE_CMD_WRITE_DMA28 0xCA
#define IDE_CMD_READ_DMA48 0x25
#define IDE_CMD_WRITE_DMA48 0x35
#define IDE_DMA_REG_CMD 0x00
#define IDE_DMA_REG_STATUS 0x02
#define IDE_DMA_REG_PRDT 0x04
#define IDE_DMA_STATUS_ACTIVE 0x01
#define IDE_DMA_STATUS_ERROR 0x02
#define IDE_DMA_STATUS_INTR 0x04
#define IDE_READ 1
#define IDE_WRITE 2
@@ -233,6 +250,8 @@ static void ide_prepare(struct idedrv* idedrv, size_t sector, uint16_t sector_co
}
DEFINE_DEVICE_INIT(idedrv_init) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
struct idedrv_init* init = arg;
struct idedrv* idedrv = malloc(sizeof(*idedrv));
@@ -240,6 +259,8 @@ DEFINE_DEVICE_INIT(idedrv_init) {
if (idedrv == NULL)
return false;
memset(idedrv, 0, sizeof(*idedrv));
idedrv->device = device;
idedrv->lba48 = init->lba48;
idedrv->sector_count = init->sector_count;
@@ -250,6 +271,32 @@ DEFINE_DEVICE_INIT(idedrv_init) {
idedrv->irq = init->irq;
idedrv->current_req = NULL;
idedrv->irqs_support = init->irqs_support;
idedrv->bmbase = init->bmbase;
idedrv->bm_support = init->bm_support;
if (idedrv->bm_support) {
idedrv->prdt_phys = pmm_alloc(1);
if (idedrv->prdt_phys >= 0xFFFFFFFF) {
pmm_free(idedrv->prdt_phys, 1);
free(idedrv);
return false;
}
idedrv->prdt_entry_count = PAGE_SIZE / sizeof(struct ide_prd_entry);
idedrv->prdt = (struct ide_prd_entry*)((uintptr_t)hhdm->offset + idedrv->prdt_phys);
idedrv->bounce_buffer_phys = pmm_alloc_aligned(16, 16);
if (idedrv->bounce_buffer_phys >= 0xFFFFFFFF) {
pmm_free(idedrv->bounce_buffer_phys, 16);
pmm_free(idedrv->prdt_phys, 1);
free(idedrv);
return false;
}
idedrv->bounce_buffer = (void*)((uintptr_t)hhdm->offset + idedrv->bounce_buffer_phys);
}
device->udata = idedrv;
@@ -270,9 +317,120 @@ DEFINE_DEVICE_FINI(idedrv_fini) {
if (idedrv->irqs_support)
irq_detach(idedrv->irq);
if (idedrv->bm_support) {
pmm_free(idedrv->bounce_buffer_phys, 16);
pmm_free(idedrv->prdt_phys, 16);
}
free(idedrv);
}
static int idedrv_do_read_irqs(struct idedrv* idedrv, size_t sector, size_t sector_count,
void* buffer, uint64_t* lockflags) {
struct idedrv_request* req = malloc(sizeof(*req));
if (req == NULL)
return -ST_OOM_ERROR;
memset(req, 0, sizeof(*req));
req->buffer = buffer;
req->sector_count = sector_count;
req->sector_done_count = 0;
req->type = IDE_READ;
idedrv->current_req = req;
ide_prepare(idedrv, sector, sector_count, true);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ48 : IDE_CMD_READ28;
outb(idedrv->io + IDE_REG_CMD, cmd);
spin_unlock(&idedrv->device->lock, *lockflags);
while (!atomic_load(&req->done))
spin_lock_relax();
spin_lock(&idedrv->device->lock, lockflags);
free(req);
return ST_OK;
}
static int idedrv_do_read_no_irqs(struct idedrv* idedrv, size_t sector, size_t sector_count,
void* buffer) {
if (idedrv->bm_support) {
size_t sectors_done = 0;
while (sectors_done < sector_count) {
size_t chunk_sectors = sector_count - sectors_done;
size_t max_chunk = (16 * PAGE_SIZE) / idedrv->sector_size;
if (chunk_sectors > max_chunk)
chunk_sectors = max_chunk;
size_t byte_count = chunk_sectors * idedrv->sector_size;
idedrv->prdt[0].phys_addr = (uint32_t)idedrv->bounce_buffer_phys;
idedrv->prdt[0].size = (uint16_t)byte_count;
idedrv->prdt[0].rsvd_eot = 0x8000;
outl(idedrv->bmbase + IDE_DMA_REG_PRDT, (uint32_t)idedrv->prdt_phys);
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x08);
uint8_t status = inb(idedrv->bmbase + IDE_DMA_REG_STATUS);
outb(idedrv->bmbase + IDE_DMA_REG_STATUS,
status | IDE_DMA_STATUS_INTR | IDE_DMA_STATUS_ERROR);
ide_prepare(idedrv, sector + sectors_done, chunk_sectors, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ_DMA48 : IDE_CMD_READ_DMA28;
outb(idedrv->io + IDE_REG_CMD, cmd);
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x08 | 0x01);
for (;;) {
uint8_t bm_status = inb(idedrv->bmbase + IDE_DMA_REG_STATUS);
if (!(bm_status & IDE_DMA_STATUS_ACTIVE))
break;
if ((bm_status & IDE_DMA_STATUS_ERROR)) {
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x00);
return -ST_XDRV_READ_ERROR;
}
spin_lock_relax();
}
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x00);
inb(idedrv->io + IDE_REG_STATUS);
memcpy((void*)((uint16_t*)buffer + (sectors_done * (idedrv->sector_size / 2))),
idedrv->bounce_buffer, byte_count);
sectors_done += chunk_sectors;
}
} else {
ide_prepare(idedrv, sector, sector_count, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ48 : IDE_CMD_READ28;
outb(idedrv->io + IDE_REG_CMD, cmd);
for (uint16_t s = 0; s < sector_count; s++) {
if (!ide_wait(idedrv->io, 100000, true, true))
return -ST_XDRV_READ_ERROR;
uint16_t* p = (uint16_t*)buffer + (s * (idedrv->sector_size / 2));
insw(idedrv->io + IDE_REG_DATA, p, idedrv->sector_size / 2);
}
}
return ST_OK;
}
DEFINE_DEVICE_OP(idedrv_read) {
if (a1 == NULL || a2 == NULL || a3 == NULL)
return -ST_BAD_ADDRESS_SPACE;
@@ -286,51 +444,126 @@ DEFINE_DEVICE_OP(idedrv_read) {
if (sector + sector_count > idedrv->sector_count)
return -ST_OOB_ERROR;
if (idedrv->bm_support && ((sector_count * idedrv->sector_size) >= 16 * PAGE_SIZE))
return -ST_OOB_ERROR;
if (idedrv->current_req != NULL)
return -ST_TRY_AGAIN;
if (!ide_wait(idedrv->io, 100000, false, false))
return -ST_XDRV_READ_ERROR;
if (idedrv->irqs_support) {
struct idedrv_request* req = malloc(sizeof(*req));
if (idedrv->irqs_support)
return idedrv_do_read_irqs(idedrv, sector, sector_count, buffer, lockflags);
else
return idedrv_do_read_no_irqs(idedrv, sector, sector_count, buffer);
}
if (req == NULL)
return -ST_OOM_ERROR;
static int idedrv_do_write_irqs(struct idedrv* idedrv, size_t sector, size_t sector_count,
void* buffer, uint64_t* lockflags) {
struct idedrv_request* req = malloc(sizeof(*req));
memset(req, 0, sizeof(*req));
req->buffer = buffer;
req->sector_count = sector_count;
req->sector_done_count = 0;
req->type = IDE_READ;
if (req == NULL)
return -ST_OOM_ERROR;
idedrv->current_req = req;
memset(req, 0, sizeof(*req));
req->buffer = buffer;
req->sector_count = sector_count;
req->sector_done_count = 0;
req->type = IDE_WRITE;
ide_prepare(idedrv, sector, sector_count, true);
idedrv->current_req = req;
uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ48 : IDE_CMD_READ28;
outb(idedrv->io + IDE_REG_CMD, cmd);
ide_prepare(idedrv, sector, sector_count, true);
spin_unlock(&device->lock, *lockflags);
while (!atomic_load(&req->done))
spin_lock_relax();
spin_lock(&device->lock, lockflags);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_WRITE48 : IDE_CMD_WRITE28;
outb(idedrv->io + IDE_REG_CMD, cmd);
if (!ide_wait(idedrv->io, 100000, true, true)) {
idedrv->current_req = NULL;
free(req);
return -ST_XDRV_WRITE_ERROR;
}
outsw(idedrv->io + IDE_REG_DATA, buffer, idedrv->sector_size / 2);
req->sector_done_count = 1;
spin_unlock(&idedrv->device->lock, *lockflags);
while (!atomic_load(&req->done))
spin_lock_relax();
spin_lock(&idedrv->device->lock, lockflags);
free(req);
return ST_OK;
}
static int idedrv_do_write_no_irqs(struct idedrv* idedrv, size_t sector, size_t sector_count,
void* buffer) {
if (idedrv->bm_support) {
memcpy(idedrv->bounce_buffer, buffer, sector_count * idedrv->sector_size);
size_t sectors_done = 0;
while (sectors_done < sector_count) {
size_t chunk_sectors = sector_count - sectors_done;
size_t max_chunk = (16 * PAGE_SIZE) / idedrv->sector_size;
if (chunk_sectors > max_chunk)
chunk_sectors = max_chunk;
size_t byte_count = chunk_sectors * idedrv->sector_size;
idedrv->prdt[0].phys_addr = (uint32_t)idedrv->bounce_buffer_phys;
idedrv->prdt[0].size = (uint16_t)byte_count;
idedrv->prdt[0].rsvd_eot = 0x8000;
uint8_t status = inb(idedrv->bmbase + IDE_DMA_REG_STATUS);
outb(idedrv->bmbase + IDE_DMA_REG_STATUS,
status | IDE_DMA_STATUS_INTR | IDE_DMA_STATUS_ERROR);
ide_prepare(idedrv, sector + sectors_done, chunk_sectors, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_WRITE_DMA48 : IDE_CMD_WRITE_DMA28;
outb(idedrv->io + IDE_REG_CMD, cmd);
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x01);
for (;;) {
uint8_t bm_status = inb(idedrv->bmbase + IDE_DMA_REG_STATUS);
if (!(bm_status & IDE_DMA_STATUS_ACTIVE))
break;
if ((bm_status & IDE_DMA_STATUS_ERROR)) {
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x00);
return -ST_XDRV_WRITE_ERROR;
}
spin_lock_relax();
}
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x00);
inb(idedrv->io + IDE_REG_STATUS);
sectors_done += chunk_sectors;
}
} else {
ide_prepare(idedrv, sector, sector_count, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ48 : IDE_CMD_READ28;
uint8_t cmd = idedrv->lba48 ? IDE_CMD_WRITE48 : IDE_CMD_WRITE28;
outb(idedrv->io + IDE_REG_CMD, cmd);
for (uint16_t s = 0; s < sector_count; s++) {
if (!ide_wait(idedrv->io, 100000, true, true))
return -ST_XDRV_READ_ERROR;
return -ST_XDRV_WRITE_ERROR;
insw(idedrv->io + IDE_REG_DATA, buffer + (s * (idedrv->sector_size / 2)),
idedrv->sector_size / 2);
outsw(idedrv->io + IDE_REG_DATA, (uint16_t*)buffer + (s * (idedrv->sector_size / 2)),
idedrv->sector_size / 2);
}
}
@@ -338,6 +571,8 @@ DEFINE_DEVICE_OP(idedrv_read) {
}
DEFINE_DEVICE_OP(idedrv_write) {
int ret;
if (a1 == NULL || a2 == NULL || a3 == NULL)
return -ST_BAD_ADDRESS_SPACE;
@@ -350,63 +585,22 @@ DEFINE_DEVICE_OP(idedrv_write) {
if (sector + sector_count > idedrv->sector_count)
return -ST_OOB_ERROR;
if (idedrv->bm_support && ((sector_count * idedrv->sector_size) >= 16 * PAGE_SIZE))
return -ST_OOB_ERROR;
if (idedrv->current_req != NULL)
return -ST_TRY_AGAIN;
if (!ide_wait(idedrv->io, 100000, false, false))
return -ST_XDRV_WRITE_ERROR;
if (idedrv->irqs_support) {
struct idedrv_request* req = malloc(sizeof(*req));
if (idedrv->irqs_support)
ret = idedrv_do_write_irqs(idedrv, sector, sector_count, buffer, lockflags);
else
ret = idedrv_do_write_no_irqs(idedrv, sector, sector_count, buffer);
if (req == NULL)
return -ST_OOM_ERROR;
memset(req, 0, sizeof(*req));
req->buffer = buffer;
req->sector_count = sector_count;
req->sector_done_count = 0;
req->type = IDE_WRITE;
idedrv->current_req = req;
ide_prepare(idedrv, sector, sector_count, true);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_WRITE48 : IDE_CMD_WRITE28;
outb(idedrv->io + IDE_REG_CMD, cmd);
if (!ide_wait(idedrv->io, 100000, true, true)) {
idedrv->current_req = NULL;
free(req);
return -ST_XDRV_WRITE_ERROR;
}
outsw(idedrv->io + IDE_REG_DATA, buffer, idedrv->sector_size / 2);
req->sector_done_count = 1;
spin_unlock(&device->lock, *lockflags);
while (!atomic_load(&req->done))
spin_lock_relax();
spin_lock(&device->lock, lockflags);
free(req);
} else {
ide_prepare(idedrv, sector, sector_count, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_WRITE48 : IDE_CMD_WRITE28;
outb(idedrv->io + IDE_REG_CMD, cmd);
for (uint16_t s = 0; s < sector_count; s++) {
if (!ide_wait(idedrv->io, 100000, true, true))
return -ST_XDRV_WRITE_ERROR;
outsw(idedrv->io + IDE_REG_DATA, buffer + (s * (idedrv->sector_size / 2)),
idedrv->sector_size / 2);
}
}
if (ret < 0)
return ret;
uint8_t ctrl = inb(idedrv->ctrl);
ctrl |= 0x02;

View File

@@ -20,7 +20,9 @@ struct idedrv_init {
uint16_t io, ctrl;
uint8_t devno;
uint8_t irq;
uint16_t bmbase;
bool irqs_support;
bool bm_support;
};
struct idedrv_request {
@@ -31,6 +33,12 @@ struct idedrv_request {
atomic_int done;
};
struct ide_prd_entry {
uint32_t phys_addr;
uint16_t size;
uint16_t rsvd_eot;
} PACKED;
struct idedrv {
struct device* device;
bool lba48;
@@ -41,6 +49,13 @@ struct idedrv {
uint8_t irq;
struct idedrv_request* current_req;
bool irqs_support;
uint16_t bmbase;
bool bm_support;
struct ide_prd_entry* prdt;
uintptr_t prdt_phys;
size_t prdt_entry_count;
uintptr_t bounce_buffer_phys;
void* bounce_buffer;
};
struct ide_probe {
@@ -50,7 +65,9 @@ struct ide_probe {
uint16_t io, ctrl;
uint8_t devno;
uint8_t irq;
uint16_t bmbase;
bool irqs_support;
bool bm_support;
};
DEFINE_DEVICE_INIT(idedrv_init);