1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-16 10:20:30 +00:00

Implement a workaround of the datacorruption problem on serverworks HT1000 chipsets.

The HT1000 DMA engine seems to not always like 64K transfers and sometimes barfs data all over memory leading to instant chrash and burn.
Also fix 48bit adressing issues, apparently newer chips needs 16bit writes and not the usual fifo thing.

HW donated by: Travis Mikalson at TerraNovaNet
This commit is contained in:
Søren Schmidt 2007-12-13 11:47:36 +00:00
parent 4adb5266e8
commit 4c088dcd6c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=174576
4 changed files with 234 additions and 94 deletions

View File

@ -464,6 +464,8 @@ struct ata_lowlevel {
int (*begin_transaction)(struct ata_request *request);
int (*end_transaction)(struct ata_request *request);
int (*command)(struct ata_request *request);
void (*tf_read)(struct ata_request *request);
void (*tf_write)(struct ata_request *request);
};
/* structure holding resources for an ATA channel */

View File

@ -99,7 +99,7 @@ static void ata_intel_new_setmode(device_t dev, int mode);
static void ata_intel_sata_setmode(device_t dev, int mode);
static int ata_intel_31244_allocate(device_t dev);
static int ata_intel_31244_status(device_t dev);
static int ata_intel_31244_command(struct ata_request *request);
static void ata_intel_31244_tf_write(struct ata_request *request);
static void ata_intel_31244_reset(device_t dev);
static int ata_ite_chipinit(device_t dev);
static void ata_ite_setmode(device_t dev, int mode);
@ -152,6 +152,8 @@ static void ata_promise_queue_hpkt(struct ata_pci_controller *ctlr, u_int32_t hp
static void ata_promise_next_hpkt(struct ata_pci_controller *ctlr);
static int ata_serverworks_chipinit(device_t dev);
static int ata_serverworks_allocate(device_t dev);
static void ata_serverworks_tf_read(struct ata_request *request);
static void ata_serverworks_tf_write(struct ata_request *request);
static void ata_serverworks_setmode(device_t dev, int mode);
static int ata_sii_chipinit(device_t dev);
static int ata_cmd_allocate(device_t dev);
@ -2093,7 +2095,7 @@ ata_intel_31244_allocate(device_t dev)
ch->flags |= ATA_NO_SLAVE;
ata_pci_hw(dev);
ch->hw.status = ata_intel_31244_status;
ch->hw.command = ata_intel_31244_command;
ch->hw.tf_write = ata_intel_31244_tf_write;
/* enable PHY state change interrupt */
ATA_OUTL(ctlr->r_res2, 0x4,
@ -2111,32 +2113,55 @@ ata_intel_31244_status(device_t dev)
return ata_pci_status(dev);
}
static int
ata_intel_31244_command(struct ata_request *request)
static void
ata_intel_31244_tf_write(struct ata_request *request)
{
struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
struct ata_device *atadev = device_get_softc(request->dev);
u_int64_t lba;
if (!(atadev->flags & ATA_D_48BIT_ACTIVE))
return (ata_generic_command(request));
lba = request->u.ata.lba;
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | atadev->unit);
/* enable interrupt */
ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT);
ATA_IDX_OUTW(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTW(ch, ATA_COUNT, request->u.ata.count);
ATA_IDX_OUTW(ch, ATA_SECTOR, ((lba >> 16) & 0xff00) | (lba & 0x00ff));
ATA_IDX_OUTW(ch, ATA_CYL_LSB, ((lba >> 24) & 0xff00) |
((lba >> 8) & 0x00ff));
ATA_IDX_OUTW(ch, ATA_CYL_MSB, ((lba >> 32) & 0xff00) |
((lba >> 16) & 0x00ff));
/* issue command to controller */
ATA_IDX_OUTB(ch, ATA_COMMAND, request->u.ata.command);
return 0;
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
ATA_IDX_OUTW(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTW(ch, ATA_COUNT, request->u.ata.count);
ATA_IDX_OUTW(ch, ATA_SECTOR, ((request->u.ata.lba >> 16) & 0xff00) |
(request->u.ata.lba & 0x00ff));
ATA_IDX_OUTW(ch, ATA_CYL_LSB, ((request->u.ata.lba >> 24) & 0xff00) |
((request->u.ata.lba >> 8) & 0x00ff));
ATA_IDX_OUTW(ch, ATA_CYL_MSB, ((request->u.ata.lba >> 32) & 0xff00) |
((request->u.ata.lba >> 16) & 0x00ff));
ATA_IDX_OUTW(ch, ATA_DRIVE, ATA_D_LBA | atadev->unit);
}
else {
ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
if (atadev->flags & ATA_D_USE_CHS) {
int heads, sectors;
if (atadev->param.atavalid & ATA_FLAG_54_58) {
heads = atadev->param.current_heads;
sectors = atadev->param.current_sectors;
}
else {
heads = atadev->param.heads;
sectors = atadev->param.sectors;
}
ATA_IDX_OUTB(ch, ATA_SECTOR, (request->u.ata.lba % sectors)+1);
ATA_IDX_OUTB(ch, ATA_CYL_LSB,
(request->u.ata.lba / (sectors * heads)));
ATA_IDX_OUTB(ch, ATA_CYL_MSB,
(request->u.ata.lba / (sectors * heads)) >> 8);
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | atadev->unit |
(((request->u.ata.lba% (sectors * heads)) /
sectors) & 0xf));
}
else {
ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
ATA_IDX_OUTB(ch, ATA_DRIVE,
ATA_D_IBM | ATA_D_LBA | atadev->unit |
((request->u.ata.lba >> 24) & 0x0f));
}
}
}
static void
@ -2849,8 +2874,12 @@ ata_marvell_edma_dmainit(device_t dev)
/* note start and stop are not used here */
ch->dma->setprd = ata_marvell_edma_dmasetprd;
/* if 64bit support present adjust max address used */
if (ATA_INL(ctlr->r_res1, 0x00d00) & 0x00000004)
ch->dma->max_address = BUS_SPACE_MAXADDR;
/* chip does not reliably do 64K DMA transfers */
ch->dma->max_iosize = 126 * DEV_BSIZE;
}
}
@ -4248,9 +4277,96 @@ ata_serverworks_allocate(device_t dev)
ch->flags |= ATA_NO_SLAVE;
ata_pci_hw(dev);
ch->hw.tf_read = ata_serverworks_tf_read;
ch->hw.tf_write = ata_serverworks_tf_write;
/* chip does not reliably do 64K DMA transfers */
if (ch->dma)
ch->dma->max_iosize = 126 * DEV_BSIZE;
return 0;
}
static void
ata_serverworks_tf_read(struct ata_request *request)
{
struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
struct ata_device *atadev = device_get_softc(request->dev);
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
u_int16_t temp;
request->u.ata.count = ATA_IDX_INW(ch, ATA_COUNT);
temp = ATA_IDX_INW(ch, ATA_SECTOR);
request->u.ata.lba = (u_int64_t)(temp & 0x00ff) |
((u_int64_t)(temp & 0xff00) << 24);
temp = ATA_IDX_INW(ch, ATA_CYL_LSB);
request->u.ata.lba |= ((u_int64_t)(temp & 0x00ff) << 8) |
((u_int64_t)(temp & 0xff00) << 32);
temp = ATA_IDX_INW(ch, ATA_CYL_MSB);
request->u.ata.lba |= ((u_int64_t)(temp & 0x00ff) << 16) |
((u_int64_t)(temp & 0xff00) << 40);
}
else {
request->u.ata.count = ATA_IDX_INW(ch, ATA_COUNT) & 0x00ff;
request->u.ata.lba = (ATA_IDX_INW(ch, ATA_SECTOR) & 0x00ff) |
((ATA_IDX_INW(ch, ATA_CYL_LSB) & 0x00ff) << 8) |
((ATA_IDX_INW(ch, ATA_CYL_MSB) & 0x00ff) << 16) |
((ATA_IDX_INW(ch, ATA_DRIVE) & 0xf) << 24);
}
}
static void
ata_serverworks_tf_write(struct ata_request *request)
{
struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
struct ata_device *atadev = device_get_softc(request->dev);
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
ATA_IDX_OUTW(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTW(ch, ATA_COUNT, request->u.ata.count);
ATA_IDX_OUTW(ch, ATA_SECTOR, ((request->u.ata.lba >> 16) & 0xff00) |
(request->u.ata.lba & 0x00ff));
ATA_IDX_OUTW(ch, ATA_CYL_LSB, ((request->u.ata.lba >> 24) & 0xff00) |
((request->u.ata.lba >> 8) & 0x00ff));
ATA_IDX_OUTW(ch, ATA_CYL_MSB, ((request->u.ata.lba >> 32) & 0xff00) |
((request->u.ata.lba >> 16) & 0x00ff));
ATA_IDX_OUTW(ch, ATA_DRIVE, ATA_D_LBA | atadev->unit);
}
else {
ATA_IDX_OUTW(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTW(ch, ATA_COUNT, request->u.ata.count);
if (atadev->flags & ATA_D_USE_CHS) {
int heads, sectors;
if (atadev->param.atavalid & ATA_FLAG_54_58) {
heads = atadev->param.current_heads;
sectors = atadev->param.current_sectors;
}
else {
heads = atadev->param.heads;
sectors = atadev->param.sectors;
}
ATA_IDX_OUTW(ch, ATA_SECTOR, (request->u.ata.lba % sectors)+1);
ATA_IDX_OUTW(ch, ATA_CYL_LSB,
(request->u.ata.lba / (sectors * heads)));
ATA_IDX_OUTW(ch, ATA_CYL_MSB,
(request->u.ata.lba / (sectors * heads)) >> 8);
ATA_IDX_OUTW(ch, ATA_DRIVE, ATA_D_IBM | atadev->unit |
(((request->u.ata.lba% (sectors * heads)) /
sectors) & 0xf));
}
else {
ATA_IDX_OUTW(ch, ATA_SECTOR, request->u.ata.lba);
ATA_IDX_OUTW(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
ATA_IDX_OUTW(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
ATA_IDX_OUTW(ch, ATA_DRIVE,
ATA_D_IBM | ATA_D_LBA | atadev->unit |
((request->u.ata.lba >> 24) & 0x0f));
}
}
}
static void
ata_serverworks_setmode(device_t dev, int mode)
{
@ -4562,7 +4678,7 @@ ata_sii_allocate(device_t dev)
if ((ctlr->chip->cfg2 & SIIBUG) && ch->dma) {
/* work around errata in early chips */
ch->dma->boundary = 16 * DEV_BSIZE;
ch->dma->boundary = 8192;
ch->dma->segsize = 15 * DEV_BSIZE;
}

View File

@ -75,7 +75,7 @@ ata_dmainit(device_t dev)
ch->dma->load = ata_dmaload;
ch->dma->unload = ata_dmaunload;
ch->dma->alignment = 2;
ch->dma->boundary = 128 * DEV_BSIZE;
ch->dma->boundary = 65536;
ch->dma->segsize = 128 * DEV_BSIZE;
ch->dma->max_iosize = 128 * DEV_BSIZE;
ch->dma->max_address = BUS_SPACE_MAXADDR_32BIT;

View File

@ -50,6 +50,8 @@ static int ata_generic_status(device_t dev);
static int ata_wait(struct ata_channel *ch, struct ata_device *, u_int8_t);
static void ata_pio_read(struct ata_request *, int);
static void ata_pio_write(struct ata_request *, int);
static void ata_tf_read(struct ata_request *);
static void ata_tf_write(struct ata_request *);
/*
* low level ATA functions
@ -63,6 +65,8 @@ ata_generic_hw(device_t dev)
ch->hw.end_transaction = ata_end_transaction;
ch->hw.status = ata_generic_status;
ch->hw.command = ata_generic_command;
ch->hw.tf_read = ata_tf_read;
ch->hw.tf_write = ata_tf_write;
}
/* must be called with ATA channel locked and state_mtx held */
@ -244,28 +248,7 @@ ata_end_transaction(struct ata_request *request)
/* on control commands read back registers to the request struct */
if (request->flags & ATA_R_CONTROL) {
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT | ATA_A_HOB);
request->u.ata.count = (ATA_IDX_INB(ch, ATA_COUNT) << 8);
request->u.ata.lba =
((u_int64_t)(ATA_IDX_INB(ch, ATA_SECTOR)) << 24) |
((u_int64_t)(ATA_IDX_INB(ch, ATA_CYL_LSB)) << 32) |
((u_int64_t)(ATA_IDX_INB(ch, ATA_CYL_MSB)) << 40);
ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT);
request->u.ata.count |= ATA_IDX_INB(ch, ATA_COUNT);
request->u.ata.lba |=
(ATA_IDX_INB(ch, ATA_SECTOR) |
(ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) |
(ATA_IDX_INB(ch, ATA_CYL_MSB) << 16));
}
else {
request->u.ata.count = ATA_IDX_INB(ch, ATA_COUNT);
request->u.ata.lba = ATA_IDX_INB(ch, ATA_SECTOR) |
(ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) |
(ATA_IDX_INB(ch, ATA_CYL_MSB) << 16) |
((ATA_IDX_INB(ch, ATA_DRIVE) & 0xf) << 24);
}
ch->hw.tf_read(request);
}
/* if we got an error we are done with the HW */
@ -734,59 +717,98 @@ ata_generic_command(struct ata_request *request)
ATA_PROTO_ATAPI_12 ? 6 : 8);
}
else {
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature >> 8);
ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count >> 8);
ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba >> 24);
ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 32);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 40);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_LBA | atadev->unit);
}
else {
ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
if (atadev->flags & ATA_D_USE_CHS) {
int heads, sectors;
if (atadev->param.atavalid & ATA_FLAG_54_58) {
heads = atadev->param.current_heads;
sectors = atadev->param.current_sectors;
}
else {
heads = atadev->param.heads;
sectors = atadev->param.sectors;
}
ATA_IDX_OUTB(ch, ATA_SECTOR, (request->u.ata.lba % sectors)+1);
ATA_IDX_OUTB(ch, ATA_CYL_LSB,
(request->u.ata.lba / (sectors * heads)));
ATA_IDX_OUTB(ch, ATA_CYL_MSB,
(request->u.ata.lba / (sectors * heads)) >> 8);
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | atadev->unit |
(((request->u.ata.lba% (sectors * heads)) /
sectors) & 0xf));
}
else {
ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
ATA_IDX_OUTB(ch, ATA_DRIVE,
ATA_D_IBM | ATA_D_LBA | atadev->unit |
((request->u.ata.lba >> 24) & 0x0f));
}
}
ch->hw.tf_write(request);
/* issue command to controller */
ATA_IDX_OUTB(ch, ATA_COMMAND, request->u.ata.command);
}
return 0;
}
static void
ata_tf_read(struct ata_request *request)
{
struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
struct ata_device *atadev = device_get_softc(request->dev);
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT | ATA_A_HOB);
request->u.ata.count = (ATA_IDX_INB(ch, ATA_COUNT) << 8);
request->u.ata.lba =
((u_int64_t)(ATA_IDX_INB(ch, ATA_SECTOR)) << 24) |
((u_int64_t)(ATA_IDX_INB(ch, ATA_CYL_LSB)) << 32) |
((u_int64_t)(ATA_IDX_INB(ch, ATA_CYL_MSB)) << 40);
ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT);
request->u.ata.count |= ATA_IDX_INB(ch, ATA_COUNT);
request->u.ata.lba |=
(ATA_IDX_INB(ch, ATA_SECTOR) |
(ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) |
(ATA_IDX_INB(ch, ATA_CYL_MSB) << 16));
}
else {
request->u.ata.count = ATA_IDX_INB(ch, ATA_COUNT);
request->u.ata.lba = ATA_IDX_INB(ch, ATA_SECTOR) |
(ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) |
(ATA_IDX_INB(ch, ATA_CYL_MSB) << 16) |
((ATA_IDX_INB(ch, ATA_DRIVE) & 0xf) << 24);
}
}
static void
ata_tf_write(struct ata_request *request)
{
struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
struct ata_device *atadev = device_get_softc(request->dev);
if (atadev->flags & ATA_D_48BIT_ACTIVE) {
ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature >> 8);
ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count >> 8);
ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba >> 24);
ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 32);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 40);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_LBA | atadev->unit);
}
else {
ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
if (atadev->flags & ATA_D_USE_CHS) {
int heads, sectors;
if (atadev->param.atavalid & ATA_FLAG_54_58) {
heads = atadev->param.current_heads;
sectors = atadev->param.current_sectors;
}
else {
heads = atadev->param.heads;
sectors = atadev->param.sectors;
}
ATA_IDX_OUTB(ch, ATA_SECTOR, (request->u.ata.lba % sectors)+1);
ATA_IDX_OUTB(ch, ATA_CYL_LSB,
(request->u.ata.lba / (sectors * heads)));
ATA_IDX_OUTB(ch, ATA_CYL_MSB,
(request->u.ata.lba / (sectors * heads)) >> 8);
ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | atadev->unit |
(((request->u.ata.lba% (sectors * heads)) /
sectors) & 0xf));
}
else {
ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
ATA_IDX_OUTB(ch, ATA_DRIVE,
ATA_D_IBM | ATA_D_LBA | atadev->unit |
((request->u.ata.lba >> 24) & 0x0f));
}
}
}
static void
ata_pio_read(struct ata_request *request, int length)
{