Files
mop3/kernel/device/storage/idedrv.c
kamkow1 e5ebd7f3ba
All checks were successful
Build ISO image / build-and-deploy (push) Successful in 2m21s
Build documentation / build-and-deploy (push) Successful in 54s
Use a big-lock for kernel sychronization instead of fine-grained locking
2026-04-27 18:06:02 +02:00

676 lines
17 KiB
C

#include <amd64/apic.h>
#include <amd64/intr_defs.h>
#include <amd64/io.h>
#include <aux/compiler.h>
#include <device/device.h>
#include <device/storage/idedrv.h>
#include <device/storage/partitions.h>
#include <devices.h>
#include <irq/irq.h>
#include <libk/align.h>
#include <libk/list.h>
#include <libk/std.h>
#include <libk/string.h>
#include <limine/requests.h>
#include <mm/malloc.h>
#include <mm/pmm.h>
#include <page_size.h>
#include <proc/proc.h>
#include <proc/reschedule.h>
#include <status.h>
#include <sync/biglock.h>
#include <sys/debug.h>
#include <sys/intr.h>
#define IDE_REG_DATA 0x00
#define IDE_REG_ERROR 0x01
#define IDE_REG_SECCOUNT 0x02
#define IDE_REG_LBA0 0x03
#define IDE_REG_LBA1 0x04
#define IDE_REG_LBA2 0x05
#define IDE_REG_DRIVE 0x06
#define IDE_REG_STATUS 0x07
#define IDE_REG_CMD 0x07
#define IDE_BSY 0x80
#define IDE_DRDY 0x40
#define IDE_DF 0x20
#define IDE_ERR 0x01
#define IDE_DRQ 0x08
#define IDE_CMD_READ28 0x20
#define IDE_CMD_WRITE28 0x30
#define IDE_CMD_READ48 0x24
#define IDE_CMD_WRITE48 0x34
#define IDE_CMD_FLUSH48 0xEA
#define IDE_CMD_FLUSH28 0xE7
#define IDE_CMD_IDENTIFY 0xEC
#define IDE_CMD_READ_DMA28 0xC8
#define IDE_CMD_WRITE_DMA28 0xCA
#define IDE_CMD_READ_DMA48 0x25
#define IDE_CMD_WRITE_DMA48 0x35
#define IDE_DMA_REG_CMD 0x00
#define IDE_DMA_REG_STATUS 0x02
#define IDE_DMA_REG_PRDT 0x04
#define IDE_DMA_STATUS_ACTIVE 0x01
#define IDE_DMA_STATUS_ERROR 0x02
#define IDE_DMA_STATUS_INTR 0x04
#define IDE_READ 1
#define IDE_WRITE 2
static bool ide_wait(uint16_t io, uint32_t timeout, bool drq, bool errcheck) {
uint32_t i = 0;
uint8_t status;
while (1) {
status = inb(io + IDE_REG_STATUS);
if (!(status & IDE_BSY))
break;
if (++i >= timeout)
return false;
}
if (errcheck && (status & (IDE_ERR | IDE_DF)))
return false;
if (!drq)
return true;
i = 0;
while (1) {
status = inb(io + IDE_REG_STATUS);
if (status & (IDE_ERR | IDE_DF))
return false;
if (status & IDE_DRQ)
return true;
if (++i >= timeout)
return false;
}
}
#pragma clang optimize off
static void ide_delay(uint16_t ctrl) {
inb(ctrl);
inb(ctrl);
inb(ctrl);
inb(ctrl);
}
#pragma clang optimize on
static void ide_irq(void* arg, void* regs, bool user, struct reschedule_ctx* rctx) {
(void)user, (void)regs, (void)rctx;
struct idedrv* idedrv = arg;
struct idedrv_request* req = idedrv->current_req;
if (req == NULL) {
(void)inb(idedrv->io + IDE_REG_STATUS);
return;
}
uint8_t status = inb(idedrv->io + IDE_REG_STATUS);
if ((status & (IDE_ERR | IDE_DF))) {
atomic_store(&req->done, 1);
idedrv->current_req = NULL;
return;
}
if ((status & IDE_DRQ) && (req->sector_done_count < req->sector_count)) {
uint16_t* p = req->buffer + (req->sector_done_count * (idedrv->sector_size / 2));
if (req->type == IDE_READ)
insw(idedrv->io + IDE_REG_DATA, p, idedrv->sector_size / 2);
else
outsw(idedrv->io + IDE_REG_DATA, p, idedrv->sector_size / 2);
req->sector_done_count++;
}
if ((req->sector_done_count >= req->sector_count)) {
atomic_store(&req->done, 1);
idedrv->current_req = NULL;
}
}
void ide_probe(uint16_t io, uint16_t ctrl, uint8_t devno, struct ide_probe* probe) {
probe->flags = 0;
probe->sector_count = 0;
probe->sector_size = 0;
uint16_t identify_buffer[256];
uint8_t status = inb(io + IDE_REG_STATUS);
if (status == 0xFF)
return;
outb(io + IDE_REG_DRIVE, 0xA0 | (devno << 4));
ide_delay(ctrl);
outb(io + IDE_REG_SECCOUNT, 0);
outb(io + IDE_REG_LBA0, 0);
outb(io + IDE_REG_LBA1, 0);
outb(io + IDE_REG_LBA2, 0);
outb(io + IDE_REG_CMD, IDE_CMD_IDENTIFY);
status = inb(io + IDE_REG_STATUS);
if (status == 0)
return;
if (!ide_wait(io, 90000000, true, true)) {
return;
}
insw(io + IDE_REG_DATA, identify_buffer, 256);
probe->flags |= IDE_PROBE_AVAIL;
if ((identify_buffer[106] & 0xC000) == 0x4000 && identify_buffer[106] & (1 << 12)) {
uint32_t words_per_sector =
(uint32_t)identify_buffer[117] | ((uint32_t)identify_buffer[118] << 16);
probe->sector_size = (size_t)words_per_sector * 2;
}
if ((identify_buffer[83] & (1 << 10)) != 0)
probe->flags |= IDE_PROBE_LBA48;
if ((probe->flags & IDE_PROBE_LBA48)) {
probe->sector_count = (size_t)((uint64_t)identify_buffer[100] |
((uint64_t)identify_buffer[101] << 16) |
((uint64_t)identify_buffer[102] << 32) |
((uint64_t)identify_buffer[103] << 48));
} else {
probe->sector_count =
(size_t)((uint64_t)identify_buffer[60] | ((uint64_t)identify_buffer[61] << 16));
}
probe->io = io;
probe->ctrl = ctrl;
probe->devno = devno;
if (probe->sector_size == 0)
probe->sector_size = 512;
}
static void ide_prepare(struct idedrv* idedrv, size_t sector, uint16_t sector_count,
bool irq_enable) {
uint8_t ctrl = inb(idedrv->ctrl);
if (irq_enable)
ctrl &= ~0x02;
else
ctrl |= 0x02;
outb(idedrv->ctrl, ctrl);
if (idedrv->lba48) {
outb(idedrv->io + IDE_REG_DRIVE, 0x40 | (idedrv->devno << 4));
ide_delay(idedrv->ctrl);
ide_wait(idedrv->io, 100000, false, false);
outb(idedrv->io + IDE_REG_SECCOUNT, (sector_count >> 8) & 0xFF);
outb(idedrv->io + IDE_REG_LBA0, (sector >> 24) & 0xFF);
outb(idedrv->io + IDE_REG_LBA1, (sector >> 32) & 0xFF);
outb(idedrv->io + IDE_REG_LBA2, (sector >> 40) & 0xFF);
outb(idedrv->io + IDE_REG_SECCOUNT, sector_count & 0xFF);
outb(idedrv->io + IDE_REG_LBA0, sector & 0xFF);
outb(idedrv->io + IDE_REG_LBA1, (sector >> 8) & 0xFF);
outb(idedrv->io + IDE_REG_LBA2, (sector >> 16) & 0xFF);
} else {
outb(idedrv->io + IDE_REG_DRIVE, 0xE0 | (idedrv->devno << 4) | ((sector >> 24) & 0x0F));
ide_delay(idedrv->ctrl);
ide_wait(idedrv->io, 100000, false, false);
uint8_t count = (sector_count == 256) ? 0 : (uint8_t)sector_count;
outb(idedrv->io + IDE_REG_SECCOUNT, count);
outb(idedrv->io + IDE_REG_LBA0, sector & 0xFF);
outb(idedrv->io + IDE_REG_LBA1, (sector >> 8) & 0xFF);
outb(idedrv->io + IDE_REG_LBA2, (sector >> 16) & 0xFF);
}
}
DEFINE_DEVICE_INIT(idedrv_init) {
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
struct idedrv_init* init = arg;
struct idedrv* idedrv = malloc(sizeof(*idedrv));
if (idedrv == NULL)
return false;
memset(idedrv, 0, sizeof(*idedrv));
idedrv->device = device;
idedrv->lba48 = init->lba48;
idedrv->sector_count = init->sector_count;
idedrv->sector_size = init->sector_size;
idedrv->io = init->io;
idedrv->ctrl = init->ctrl;
idedrv->devno = init->devno;
idedrv->irq = init->irq;
idedrv->current_req = NULL;
idedrv->irqs_support = init->irqs_support;
idedrv->bmbase = init->bmbase;
idedrv->bm_support = init->bm_support;
if (idedrv->bm_support) {
idedrv->prdt_phys = pmm_alloc(1);
if (idedrv->prdt_phys >= 0xFFFFFFFF) {
pmm_free(idedrv->prdt_phys, 1);
free(idedrv);
return false;
}
idedrv->prdt_entry_count = PAGE_SIZE / sizeof(struct ide_prd_entry);
idedrv->prdt = (struct ide_prd_entry*)((uintptr_t)hhdm->offset + idedrv->prdt_phys);
idedrv->bounce_buffer_phys = pmm_alloc_aligned(16, 16);
if (idedrv->bounce_buffer_phys >= 0xFFFFFFFF) {
pmm_free(idedrv->bounce_buffer_phys, 16);
pmm_free(idedrv->prdt_phys, 1);
free(idedrv);
return false;
}
idedrv->bounce_buffer = (void*)((uintptr_t)hhdm->offset + idedrv->bounce_buffer_phys);
}
device->udata = idedrv;
if (idedrv->irqs_support)
irq_attach(&ide_irq, idedrv, idedrv->irq);
return true;
}
DEFINE_DEVICE_FINI(idedrv_fini) {
struct idedrv* idedrv = device->udata;
if (idedrv->current_req != NULL) {
free(idedrv->current_req);
idedrv->current_req = NULL;
}
if (idedrv->irqs_support)
irq_detach(idedrv->irq);
if (idedrv->bm_support) {
pmm_free(idedrv->bounce_buffer_phys, 16);
pmm_free(idedrv->prdt_phys, 16);
}
free(idedrv);
}
static int idedrv_do_read_irqs(struct idedrv* idedrv, size_t sector, size_t sector_count,
void* buffer) {
struct idedrv_request* req = malloc(sizeof(*req));
if (req == NULL)
return -ST_OOM_ERROR;
memset(req, 0, sizeof(*req));
req->buffer = buffer;
req->sector_count = sector_count;
req->sector_done_count = 0;
req->type = IDE_READ;
idedrv->current_req = req;
ide_prepare(idedrv, sector, sector_count, true);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ48 : IDE_CMD_READ28;
outb(idedrv->io + IDE_REG_CMD, cmd);
biglock_unlock();
while (!atomic_load(&req->done))
spin_lock_relax();
biglock_lock();
free(req);
return ST_OK;
}
static int idedrv_do_read_no_irqs(struct idedrv* idedrv, size_t sector, size_t sector_count,
void* buffer) {
if (idedrv->bm_support) {
size_t sectors_done = 0;
while (sectors_done < sector_count) {
size_t chunk_sectors = sector_count - sectors_done;
size_t max_chunk = (16 * PAGE_SIZE) / idedrv->sector_size;
if (chunk_sectors > max_chunk)
chunk_sectors = max_chunk;
size_t byte_count = chunk_sectors * idedrv->sector_size;
idedrv->prdt[0].phys_addr = (uint32_t)idedrv->bounce_buffer_phys;
idedrv->prdt[0].size = (uint16_t)byte_count;
idedrv->prdt[0].rsvd_eot = 0x8000;
outl(idedrv->bmbase + IDE_DMA_REG_PRDT, (uint32_t)idedrv->prdt_phys);
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x08);
uint8_t status = inb(idedrv->bmbase + IDE_DMA_REG_STATUS);
outb(idedrv->bmbase + IDE_DMA_REG_STATUS,
status | IDE_DMA_STATUS_INTR | IDE_DMA_STATUS_ERROR);
ide_prepare(idedrv, sector + sectors_done, chunk_sectors, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ_DMA48 : IDE_CMD_READ_DMA28;
outb(idedrv->io + IDE_REG_CMD, cmd);
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x08 | 0x01);
for (;;) {
uint8_t bm_status = inb(idedrv->bmbase + IDE_DMA_REG_STATUS);
if (!(bm_status & IDE_DMA_STATUS_ACTIVE))
break;
if ((bm_status & IDE_DMA_STATUS_ERROR)) {
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x00);
return -ST_XDRV_READ_ERROR;
}
spin_lock_relax();
}
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x00);
inb(idedrv->io + IDE_REG_STATUS);
memcpy((void*)((uint16_t*)buffer + (sectors_done * (idedrv->sector_size / 2))),
idedrv->bounce_buffer, byte_count);
sectors_done += chunk_sectors;
}
} else {
ide_prepare(idedrv, sector, sector_count, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_READ48 : IDE_CMD_READ28;
outb(idedrv->io + IDE_REG_CMD, cmd);
for (uint16_t s = 0; s < sector_count; s++) {
if (!ide_wait(idedrv->io, 100000, true, true))
return -ST_XDRV_READ_ERROR;
uint16_t* p = (uint16_t*)buffer + (s * (idedrv->sector_size / 2));
insw(idedrv->io + IDE_REG_DATA, p, idedrv->sector_size / 2);
}
}
return ST_OK;
}
DEFINE_DEVICE_OP(idedrv_read) {
if (a1 == NULL || a2 == NULL || a3 == NULL)
return -ST_BAD_ADDRESS_SPACE;
size_t sector = *(size_t*)a1;
size_t sector_count = *(size_t*)a2;
uint16_t* buffer = a3;
struct idedrv* idedrv = device->udata;
if (sector + sector_count > idedrv->sector_count)
return -ST_OOB_ERROR;
if (idedrv->bm_support && ((sector_count * idedrv->sector_size) >= 16 * PAGE_SIZE))
return -ST_OOB_ERROR;
if (idedrv->current_req != NULL)
return -ST_TRY_AGAIN;
if (!ide_wait(idedrv->io, 100000, false, false))
return -ST_XDRV_READ_ERROR;
if (idedrv->irqs_support)
return idedrv_do_read_irqs(idedrv, sector, sector_count, buffer);
else
return idedrv_do_read_no_irqs(idedrv, sector, sector_count, buffer);
}
static int idedrv_do_write_irqs(struct idedrv* idedrv, size_t sector, size_t sector_count,
void* buffer) {
struct idedrv_request* req = malloc(sizeof(*req));
if (req == NULL)
return -ST_OOM_ERROR;
memset(req, 0, sizeof(*req));
req->buffer = buffer;
req->sector_count = sector_count;
req->sector_done_count = 0;
req->type = IDE_WRITE;
idedrv->current_req = req;
ide_prepare(idedrv, sector, sector_count, true);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_WRITE48 : IDE_CMD_WRITE28;
outb(idedrv->io + IDE_REG_CMD, cmd);
if (!ide_wait(idedrv->io, 100000, true, true)) {
idedrv->current_req = NULL;
free(req);
return -ST_XDRV_WRITE_ERROR;
}
outsw(idedrv->io + IDE_REG_DATA, buffer, idedrv->sector_size / 2);
req->sector_done_count = 1;
biglock_unlock();
while (!atomic_load(&req->done))
spin_lock_relax();
biglock_lock();
free(req);
return ST_OK;
}
static int idedrv_do_write_no_irqs(struct idedrv* idedrv, size_t sector, size_t sector_count,
void* buffer) {
if (idedrv->bm_support) {
memcpy(idedrv->bounce_buffer, buffer, sector_count * idedrv->sector_size);
size_t sectors_done = 0;
while (sectors_done < sector_count) {
size_t chunk_sectors = sector_count - sectors_done;
size_t max_chunk = (16 * PAGE_SIZE) / idedrv->sector_size;
if (chunk_sectors > max_chunk)
chunk_sectors = max_chunk;
size_t byte_count = chunk_sectors * idedrv->sector_size;
idedrv->prdt[0].phys_addr = (uint32_t)idedrv->bounce_buffer_phys;
idedrv->prdt[0].size = (uint16_t)byte_count;
idedrv->prdt[0].rsvd_eot = 0x8000;
uint8_t status = inb(idedrv->bmbase + IDE_DMA_REG_STATUS);
outb(idedrv->bmbase + IDE_DMA_REG_STATUS,
status | IDE_DMA_STATUS_INTR | IDE_DMA_STATUS_ERROR);
ide_prepare(idedrv, sector + sectors_done, chunk_sectors, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_WRITE_DMA48 : IDE_CMD_WRITE_DMA28;
outb(idedrv->io + IDE_REG_CMD, cmd);
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x01);
for (;;) {
uint8_t bm_status = inb(idedrv->bmbase + IDE_DMA_REG_STATUS);
if (!(bm_status & IDE_DMA_STATUS_ACTIVE))
break;
if ((bm_status & IDE_DMA_STATUS_ERROR)) {
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x00);
return -ST_XDRV_WRITE_ERROR;
}
spin_lock_relax();
}
outb(idedrv->bmbase + IDE_DMA_REG_CMD, 0x00);
inb(idedrv->io + IDE_REG_STATUS);
sectors_done += chunk_sectors;
}
} else {
ide_prepare(idedrv, sector, sector_count, false);
uint8_t cmd = idedrv->lba48 ? IDE_CMD_WRITE48 : IDE_CMD_WRITE28;
outb(idedrv->io + IDE_REG_CMD, cmd);
for (uint16_t s = 0; s < sector_count; s++) {
if (!ide_wait(idedrv->io, 100000, true, true))
return -ST_XDRV_WRITE_ERROR;
outsw(idedrv->io + IDE_REG_DATA, (uint16_t*)buffer + (s * (idedrv->sector_size / 2)),
idedrv->sector_size / 2);
}
}
return ST_OK;
}
DEFINE_DEVICE_OP(idedrv_write) {
int ret;
if (a1 == NULL || a2 == NULL || a3 == NULL)
return -ST_BAD_ADDRESS_SPACE;
size_t sector = *(size_t*)a1;
size_t sector_count = *(size_t*)a2;
uint16_t* buffer = a3;
struct idedrv* idedrv = device->udata;
if (sector + sector_count > idedrv->sector_count)
return -ST_OOB_ERROR;
if (idedrv->bm_support && ((sector_count * idedrv->sector_size) >= 16 * PAGE_SIZE))
return -ST_OOB_ERROR;
if (idedrv->current_req != NULL)
return -ST_TRY_AGAIN;
if (!ide_wait(idedrv->io, 100000, false, false))
return -ST_XDRV_WRITE_ERROR;
if (idedrv->irqs_support)
ret = idedrv_do_write_irqs(idedrv, sector, sector_count, buffer);
else
ret = idedrv_do_write_no_irqs(idedrv, sector, sector_count, buffer);
if (ret < 0)
return ret;
uint8_t ctrl = inb(idedrv->ctrl);
ctrl |= 0x02;
outb(idedrv->ctrl, ctrl);
if (idedrv->lba48)
outb(idedrv->io + IDE_REG_CMD, IDE_CMD_FLUSH48);
else
outb(idedrv->io + IDE_REG_CMD, IDE_CMD_FLUSH28);
uint8_t status;
do {
status = inb(idedrv->io + IDE_REG_STATUS);
} while (status & IDE_BSY);
if (status & (IDE_ERR | IDE_DF))
return -ST_XDRV_WRITE_ERROR;
return ST_OK;
}
DEFINE_DEVICE_OP(idedrv_get_device_type) {
(void)proc, (void)rctx, (void)device, (void)a2, (void)a3, (void)a4;
if (a1 == NULL)
return -ST_BAD_ADDRESS_SPACE;
int* device_type = (int*)a1;
*device_type = XDRV_TYPE_IDEDRV;
return ST_OK;
}
DEFINE_DEVICE_OP(idedrv_get_sector_size) {
(void)proc, (void)rctx, (void)a2, (void)a3, (void)a4;
if (a1 == NULL)
return -ST_BAD_ADDRESS_SPACE;
size_t* secsize = (size_t*)a1;
struct idedrv* idedrv = device->udata;
*secsize = idedrv->sector_size;
return ST_OK;
}
DEFINE_DEVICE_OP(idedrv_get_size) {
(void)proc, (void)rctx, (void)a2, (void)a3, (void)a4;
if (a1 == NULL)
return -ST_BAD_ADDRESS_SPACE;
size_t* size = (size_t*)a1;
struct idedrv* idedrv = device->udata;
*size = idedrv->sector_size * idedrv->sector_count;
return ST_OK;
}
DEFINE_DEVICE_OP(idedrv_partition_rescan) {
struct list_node_link *subdevice_link, *tmp_subdevice_link;
list_foreach(device->subdevices, subdevice_link, tmp_subdevice_link) {
struct device* subdevice = list_entry(subdevice_link, struct device, subdevices_link);
list_remove(device->subdevices, &subdevice->subdevices_link);
device_delete(subdevice->key, proc, rctx);
}
int r = device_probe_partitions(proc, rctx, device);
return r;
}