* May be copied or modified under the terms of the GNU General Public License
*
* Special Thanks to Mark for his Six years of work.
- */
-
-/*
+*
* This module provides support for the bus-master IDE DMA functions
* of various PCI chipsets, including the Intel PIIX (i82371FB for
* the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and
__ide_end_request(drive, 1, rq->nr_sectors);
return ide_stopped;
}
- printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n",
+ printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n",
drive->name, dma_stat);
}
return ide_error(drive, "dma_intr", stat);
}
-static int ide_build_sglist(struct ata_channel *hwif, struct request *rq)
-{
- request_queue_t *q = &hwif->drives[DEVICE_NR(rq->rq_dev) & 1].queue;
- struct scatterlist *sg = hwif->sg_table;
- int nents;
-
- nents = blk_rq_map_sg(q, rq, hwif->sg_table);
-
- if (rq->q && nents > rq->nr_phys_segments)
- printk("ide-dma: received %d phys segments, build %d\n", rq->nr_phys_segments, nents);
-
- if (rq_data_dir(rq) == READ)
- hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
- else
- hwif->sg_dma_direction = PCI_DMA_TODEVICE;
-
- return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
-}
-
/*
* FIXME: taskfiles should be a map of pages, not a long virt address... /jens
* FIXME: I agree with Jens --mdcki!
*/
-static int raw_build_sglist(struct ata_channel *ch, struct request *rq)
+static int build_sglist(struct ata_channel *ch, struct request *rq)
{
struct scatterlist *sg = ch->sg_table;
int nents = 0;
- struct ata_taskfile *args = rq->special;
+
+ if (rq->flags & REQ_DRIVE_ACB) {
+ struct ata_taskfile *args = rq->special;
#if 1
- unsigned char *virt_addr = rq->buffer;
- int sector_count = rq->nr_sectors;
+ unsigned char *virt_addr = rq->buffer;
+ int sector_count = rq->nr_sectors;
#else
- nents = blk_rq_map_sg(rq->q, rq, ch->sg_table);
+ nents = blk_rq_map_sg(rq->q, rq, ch->sg_table);
- if (nents > rq->nr_segments)
- printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents);
+ if (nents > rq->nr_segments)
+ printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents);
#endif
- if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
- ch->sg_dma_direction = PCI_DMA_TODEVICE;
- else
- ch->sg_dma_direction = PCI_DMA_FROMDEVICE;
+ if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
+ ch->sg_dma_direction = PCI_DMA_TODEVICE;
+ else
+ ch->sg_dma_direction = PCI_DMA_FROMDEVICE;
- if (sector_count > 128) {
+ /*
+ * FIXME: This depends upon a hard coded page size!
+ */
+ if (sector_count > 128) {
+ memset(&sg[nents], 0, sizeof(*sg));
+
+ sg[nents].page = virt_to_page(virt_addr);
+ sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
+ sg[nents].length = 128 * SECTOR_SIZE;
+ ++nents;
+ virt_addr = virt_addr + (128 * SECTOR_SIZE);
+ sector_count -= 128;
+ }
memset(&sg[nents], 0, sizeof(*sg));
-
sg[nents].page = virt_to_page(virt_addr);
sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
- sg[nents].length = 128 * SECTOR_SIZE;
- nents++;
- virt_addr = virt_addr + (128 * SECTOR_SIZE);
- sector_count -= 128;
- }
- memset(&sg[nents], 0, sizeof(*sg));
- sg[nents].page = virt_to_page(virt_addr);
- sg[nents].offset = (unsigned long) virt_addr & ~PAGE_MASK;
- sg[nents].length = sector_count * SECTOR_SIZE;
- nents++;
+ sg[nents].length = sector_count * SECTOR_SIZE;
+ ++nents;
+ } else {
+ nents = blk_rq_map_sg(rq->q, rq, ch->sg_table);
+ if (rq->q && nents > rq->nr_phys_segments)
+ printk("ide-dma: received %d phys segments, build %d\n", rq->nr_phys_segments, nents);
+
+ if (rq_data_dir(rq) == READ)
+ ch->sg_dma_direction = PCI_DMA_FROMDEVICE;
+ else
+ ch->sg_dma_direction = PCI_DMA_TODEVICE;
+
+ }
return pci_map_sg(ch->pci_dev, sg, nents, ch->sg_dma_direction);
}
int i;
struct scatterlist *sg;
- if (HWGROUP(drive)->rq->flags & REQ_DRIVE_ACB) {
- hwif->sg_nents = i = raw_build_sglist(hwif, HWGROUP(drive)->rq);
- } else {
- hwif->sg_nents = i = ide_build_sglist(hwif, HWGROUP(drive)->rq);
- }
+ hwif->sg_nents = i = build_sglist(hwif, HWGROUP(drive)->rq);
if (!i)
return 0;
if (is_trm290_chipset)
xcount = ((xcount >> 2) - 1) << 16;
if (xcount == 0x0000) {
- /*
+ /*
* Most chipsets correctly interpret a length of
* 0x0000 as 64KB, but at least one (e.g. CS5530)
* misinterprets it as zero (!). So here we break
return 0; /* no, it is not a flash memory card */
}
-int __ide_end_request(ide_drive_t *drive, int uptodate, int nr_secs)
+int __ide_end_request(struct ata_device *drive, int uptodate, int nr_secs)
{
struct request *rq;
unsigned long flags;
nr_secs = rq->hard_cur_sectors;
/*
- * decide whether to reenable DMA -- 3 is a random magic for now,
- * if we DMA timeout more than 3 times, just stay in PIO
+ * Decide whether to reenable DMA -- 3 is a random magic for now,
+ * if we DMA timeout more than 3 times, just stay in PIO.
*/
+
if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
drive->state = 0;
- HWGROUP(drive)->hwif->dmaproc(ide_dma_on, drive);
+ drive->channel->dmaproc(ide_dma_on, drive);
}
if (!end_that_request_first(rq, uptodate, nr_secs)) {
}
/*
- * do_reset1() attempts to recover a confused drive by resetting it.
- * Unfortunately, resetting a disk drive actually resets all devices on
- * the same interface, so it can really be thought of as resetting the
- * interface rather than resetting the drive.
+ * Attempt to recover a confused drive by resetting it. Unfortunately,
+ * resetting a disk drive actually resets all devices on the same interface, so
+ * it can really be thought of as resetting the interface rather than resetting
+ * the drive.
*
* ATAPI devices have their own reset mechanism which allows them to be
* individually reset without clobbering other devices on the same interface.
*
- * Unfortunately, the IDE interface does not generate an interrupt to let
- * us know when the reset operation has finished, so we must poll for this.
- * Equally poor, though, is the fact that this may a very long time to complete,
- * (up to 30 seconds worst case). So, instead of busy-waiting here for it,
- * we set a timer to poll at 50ms intervals.
+ * Unfortunately, the IDE interface does not generate an interrupt to let us
+ * know when the reset operation has finished, so we must poll for this.
+ * Equally poor, though, is the fact that this may a very long time to
+ * complete, (up to 30 seconds worst case). So, instead of busy-waiting here
+ * for it, we set a timer to poll at 50ms intervals.
*/
+
static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
{
unsigned int unit;
#endif
__restore_flags (flags); /* local CPU only */
+
return ide_started;
}
/*
* Select the next drive which will be serviced.
*/
-static inline ide_drive_t *choose_drive(ide_hwgroup_t *hwgroup)
+static struct ata_device *choose_drive(struct ata_device *cur)
{
- ide_drive_t *drive, *best;
+ struct ata_device *drive = cur;
+ struct ata_device *best = NULL;
- best = NULL;
- drive = hwgroup->drive;
do {
if (!list_empty(&drive->queue.queue_head)
&& (!drive->PADAM_sleep || time_after_eq(drive->PADAM_sleep, jiffies))) {
best = drive;
}
}
- } while ((drive = drive->next) != hwgroup->drive);
+ drive = drive->next;
+ } while (drive != cur);
return best;
}
/*
- * Issue a new request to a drive from hwgroup
+ * Issue a new request to a drive from hwgroup.
* Caller must have already done spin_lock_irqsave(&ide_lock, ...)
*
* A hwgroup is a serialized group of IDE interfaces. Usually there is
__cli(); /* necessary paranoia: ensure IRQs are masked on local CPU */
while (!test_and_set_bit(IDE_BUSY, &hwgroup->flags)) {
- drive = choose_drive(hwgroup);
+ drive = choose_drive(hwgroup->drive);
+
if (drive == NULL) {
unsigned long sleep = 0;
hwgroup->rq = NULL;
+
drive = hwgroup->drive;
do {
if (drive->PADAM_sleep && (!sleep || time_after(sleep, drive->PADAM_sleep)))
sleep = drive->PADAM_sleep;
- } while ((drive = drive->next) != hwgroup->drive);
+
+ drive = drive->next;
+ } while (drive!= hwgroup->drive);
+
if (sleep) {
/*
* Take a short snooze, and then wake up this
return; /* no more work for this hwgroup (for now) */
}
ch = drive->channel;
- if (hwgroup->hwif->sharing_irq && ch != hwgroup->hwif && ch->io_ports[IDE_CONTROL_OFFSET]) {
+
+ if (hwgroup->drive->channel->sharing_irq && ch != hwgroup->drive->channel && ch->io_ports[IDE_CONTROL_OFFSET]) {
/* set nIEN for previous channel */
if (ch->intrproc)
else
OUT_BYTE((drive)->ctl|2, ch->io_ports[IDE_CONTROL_OFFSET]);
}
- hwgroup->hwif = ch;
hwgroup->drive = drive;
drive->PADAM_sleep = 0;
drive->PADAM_service_start = jiffies;
unsigned long wait;
/*
- * a global lock protects timers etc -- shouldn't get contention
- * worth mentioning
+ * A global lock protects timers etc -- shouldn't get contention
+ * worth mentioning.
*/
+
spin_lock_irqsave(&ide_lock, flags);
del_timer(&hwgroup->timer);
if ((handler = hwgroup->handler) == NULL) {
+
/*
- * Either a marginal timeout occurred
- * (got the interrupt just as timer expired),
- * or we were "sleeping" to give other devices a chance.
- * Either way, we don't really want to complain about anything.
+ * Either a marginal timeout occurred (got the interrupt just
+ * as timer expired), or we were "sleeping" to give other
+ * devices a chance. Either way, we don't really want to
+ * complain about anything.
*/
+
if (test_and_clear_bit(IDE_SLEEP, &hwgroup->flags))
clear_bit(IDE_BUSY, &hwgroup->flags);
} else {
- ide_drive_t *drive = hwgroup->drive;
+ struct ata_device *drive = hwgroup->drive;
if (!drive) {
printk("ide_timer_expiry: hwgroup->drive was NULL\n");
hwgroup->handler = NULL;
} else {
- struct ata_channel *hwif;
+ struct ata_channel *ch;
ide_startstop_t startstop;
+
/* paranoia */
if (!test_and_set_bit(IDE_BUSY, &hwgroup->flags))
printk("%s: ide_timer_expiry: hwgroup was not busy??\n", drive->name);
* mask the specific IRQ:
*/
spin_unlock(&ide_lock);
- hwif = drive->channel;
+ ch = drive->channel;
#if DISABLE_IRQ_NOSYNC
- disable_irq_nosync(hwif->irq);
+ disable_irq_nosync(ch->irq);
#else
- disable_irq(hwif->irq); /* disable_irq_nosync ?? */
+ disable_irq(ch->irq); /* disable_irq_nosync ?? */
#endif
__cli(); /* local CPU only, as if we were handling an interrupt */
if (hwgroup->poll_timeout != 0) {
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
if (drive->waiting_for_dma)
- hwgroup->hwif->dmaproc(ide_dma_lostirq, drive);
- (void) ide_ack_intr(hwif);
+ ch->dmaproc(ide_dma_lostirq, drive);
+ (void) ide_ack_intr(ch);
printk("%s: lost interrupt\n", drive->name);
startstop = handler(drive);
} else {
} else
startstop = ide_error(drive, "irq timeout", GET_STAT());
}
- set_recovery_timer(hwif);
+ set_recovery_timer(ch);
drive->PADAM_service_time = jiffies - drive->PADAM_service_start;
- enable_irq(hwif->irq);
+ enable_irq(ch->irq);
spin_lock_irq(&ide_lock);
if (startstop == ide_stopped)
clear_bit(IDE_BUSY, &hwgroup->flags);
}
}
- ide_do_request(hwgroup->hwif, 0);
+ ide_do_request(hwgroup->drive->channel, 0);
spin_unlock_irqrestore(&ide_lock, flags);
}
* accidentally invoked as a result of any valid command completion interrupt.
*
*/
-static void unexpected_intr(int irq, ide_hwgroup_t *hwgroup)
+static void unexpected_irq(int irq)
{
- u8 stat;
- struct ata_channel *hwif = hwgroup->hwif;
+ int i;
- /*
- * handle the unexpected interrupt
- */
- do {
- if (hwif->irq != irq)
+ for (i = 0; i < MAX_HWIFS; ++i) {
+ u8 stat;
+ struct ata_channel *tmp = &ide_hwifs[i];
+
+ if (!tmp->present)
+ continue;
+
+ if (tmp->irq != irq)
continue;
- stat = IN_BYTE(hwif->io_ports[IDE_STATUS_OFFSET]);
+ stat = IN_BYTE(tmp->io_ports[IDE_STATUS_OFFSET]);
if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
/* Try to not flood the console with msgs */
static unsigned long last_msgtime;
static int count;
+
++count;
if (time_after(jiffies, last_msgtime + HZ)) {
last_msgtime = jiffies;
- printk("%s%s: unexpected interrupt, status=0x%02x, count=%d\n",
- hwif->name, (hwif->next == hwgroup->hwif) ? "" : "(?)", stat, count);
+ printk("%s: unexpected interrupt, status=0x%02x, count=%d\n",
+ tmp->name, stat, count);
}
}
- hwif = hwif->next;
- } while (hwif != hwgroup->hwif);
+ }
}
/*
ide_startstop_t startstop;
spin_lock_irqsave(&ide_lock, flags);
- ch = hwgroup->hwif;
if (!ide_ack_intr(ch))
goto out_lock;
if (ch->pci_dev && !ch->pci_dev->vendor)
#endif
{
- /*
- * Probably not a shared PCI interrupt,
- * so we can safely try to do something about it:
+ /* Probably not a shared PCI interrupt, so we can
+ * safely try to do something about it:
*/
- unexpected_intr(irq, hwgroup);
+ unexpected_irq(irq);
#ifdef CONFIG_BLK_DEV_IDEPCI
} else {
/*
* Note that we only release the standard ports, and do not even try to handle
* any extra ports allocated for weird IDE interface chipsets.
*/
-static void hwif_unregister(struct ata_channel *hwif)
+static void hwif_unregister(struct ata_channel *ch)
{
- if (hwif->straight8) {
- release_region(hwif->io_ports[IDE_DATA_OFFSET], 8);
+ int i;
+ ide_hwgroup_t *hwgroup = ch->hwgroup;
+
+ /*
+ * Free the irq if we were the only channel using it.
+ */
+ int n = 0;
+
+ for (i = 0; i < MAX_HWIFS; ++i) {
+ struct ata_channel *tmp = &ide_hwifs[i];
+
+ if (!tmp->present)
+ continue;
+
+ if (tmp->irq == ch->irq)
+ ++n;
+ }
+ if (n == 1)
+ free_irq(ch->irq, hwgroup);
+
+
+ if (ch->straight8) {
+ release_region(ch->io_ports[IDE_DATA_OFFSET], 8);
} else {
- if (hwif->io_ports[IDE_DATA_OFFSET])
- release_region(hwif->io_ports[IDE_DATA_OFFSET], 1);
- if (hwif->io_ports[IDE_ERROR_OFFSET])
- release_region(hwif->io_ports[IDE_ERROR_OFFSET], 1);
- if (hwif->io_ports[IDE_NSECTOR_OFFSET])
- release_region(hwif->io_ports[IDE_NSECTOR_OFFSET], 1);
- if (hwif->io_ports[IDE_SECTOR_OFFSET])
- release_region(hwif->io_ports[IDE_SECTOR_OFFSET], 1);
- if (hwif->io_ports[IDE_LCYL_OFFSET])
- release_region(hwif->io_ports[IDE_LCYL_OFFSET], 1);
- if (hwif->io_ports[IDE_HCYL_OFFSET])
- release_region(hwif->io_ports[IDE_HCYL_OFFSET], 1);
- if (hwif->io_ports[IDE_SELECT_OFFSET])
- release_region(hwif->io_ports[IDE_SELECT_OFFSET], 1);
- if (hwif->io_ports[IDE_STATUS_OFFSET])
- release_region(hwif->io_ports[IDE_STATUS_OFFSET], 1);
- }
- if (hwif->io_ports[IDE_CONTROL_OFFSET])
- release_region(hwif->io_ports[IDE_CONTROL_OFFSET], 1);
+ if (ch->io_ports[IDE_DATA_OFFSET])
+ release_region(ch->io_ports[IDE_DATA_OFFSET], 1);
+ if (ch->io_ports[IDE_ERROR_OFFSET])
+ release_region(ch->io_ports[IDE_ERROR_OFFSET], 1);
+ if (ch->io_ports[IDE_NSECTOR_OFFSET])
+ release_region(ch->io_ports[IDE_NSECTOR_OFFSET], 1);
+ if (ch->io_ports[IDE_SECTOR_OFFSET])
+ release_region(ch->io_ports[IDE_SECTOR_OFFSET], 1);
+ if (ch->io_ports[IDE_LCYL_OFFSET])
+ release_region(ch->io_ports[IDE_LCYL_OFFSET], 1);
+ if (ch->io_ports[IDE_HCYL_OFFSET])
+ release_region(ch->io_ports[IDE_HCYL_OFFSET], 1);
+ if (ch->io_ports[IDE_SELECT_OFFSET])
+ release_region(ch->io_ports[IDE_SELECT_OFFSET], 1);
+ if (ch->io_ports[IDE_STATUS_OFFSET])
+ release_region(ch->io_ports[IDE_STATUS_OFFSET], 1);
+ }
+ if (ch->io_ports[IDE_CONTROL_OFFSET])
+ release_region(ch->io_ports[IDE_CONTROL_OFFSET], 1);
#if defined(CONFIG_AMIGA) || defined(CONFIG_MAC)
- if (hwif->io_ports[IDE_IRQ_OFFSET])
- release_region(hwif->io_ports[IDE_IRQ_OFFSET], 1);
+ if (ch->io_ports[IDE_IRQ_OFFSET])
+ release_region(ch->io_ports[IDE_IRQ_OFFSET], 1);
#endif
}
-void ide_unregister(struct ata_channel *channel)
+void ide_unregister(struct ata_channel *ch)
{
struct gendisk *gd;
ide_drive_t *drive, *d;
unsigned long flags;
unsigned int p, minor;
struct ata_channel old_hwif;
+ int n = 0;
spin_lock_irqsave(&ide_lock, flags);
- if (!channel->present)
+ if (!ch->present)
goto abort;
- put_device(&channel->dev);
+ put_device(&ch->dev);
for (unit = 0; unit < MAX_DRIVES; ++unit) {
- drive = &channel->drives[unit];
+ drive = &ch->drives[unit];
if (!drive->present)
continue;
if (drive->busy || drive->usage)
ide_unregister_subdriver(drive);
}
}
- channel->present = 0;
+ ch->present = 0;
/*
* All clear? Then blow away the buffer cache
*/
spin_unlock_irqrestore(&ide_lock, flags);
for (unit = 0; unit < MAX_DRIVES; ++unit) {
- drive = &channel->drives[unit];
+ drive = &ch->drives[unit];
if (!drive->present)
continue;
minor = drive->select.b.unit << PARTN_BITS;
for (p = 0; p < (1<<PARTN_BITS); ++p) {
if (drive->part[p].nr_sects > 0) {
- kdev_t devp = mk_kdev(channel->major, minor+p);
+ kdev_t devp = mk_kdev(ch->major, minor+p);
invalidate_device(devp, 0);
}
}
}
#ifdef CONFIG_PROC_FS
- destroy_proc_ide_drives(channel);
+ destroy_proc_ide_drives(ch);
#endif
spin_lock_irqsave(&ide_lock, flags);
- hwgroup = channel->hwgroup;
-
- /*
- * free the irq if we were the only hwif using it
- */
- {
- struct ata_channel *g;
- int irq_count = 0;
- g = hwgroup->hwif;
- do {
- if (g->irq == channel->irq)
- ++irq_count;
- g = g->next;
- } while (g != hwgroup->hwif);
- if (irq_count == 1)
- free_irq(channel->irq, hwgroup);
- }
- hwif_unregister(channel);
+ hwif_unregister(ch);
/*
- * Remove us from the hwgroup, and free
- * the hwgroup if we were the only member
+ * Remove us from the hwgroup, and free the hwgroup if we were the only
+ * member.
*/
+
+ hwgroup = ch->hwgroup;
d = hwgroup->drive;
for (i = 0; i < MAX_DRIVES; ++i) {
- drive = &channel->drives[i];
+ drive = &ch->drives[i];
if (drive->de) {
devfs_unregister (drive->de);
drive->de = NULL;
}
if (d->present)
hwgroup->drive = d;
- while (hwgroup->hwif->next != channel)
- hwgroup->hwif = hwgroup->hwif->next;
- hwgroup->hwif->next = channel->next;
- if (hwgroup->hwif == channel)
- kfree(hwgroup);
- else
- hwgroup->hwif = hwgroup->drive->channel;
+
+ n = 0;
+ for (i = 0; i < MAX_HWIFS; ++i) {
+ struct ata_channel *tmp = &ide_hwifs[i];
+
+ if (!tmp->present)
+ continue;
+
+ if (tmp->hwgroup == ch->hwgroup)
+ ++n;
+ }
+ if (n == 1)
+ kfree(ch->hwgroup);
#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
- ide_release_dma(channel);
+ ide_release_dma(ch);
#endif
/*
* Remove us from the kernel's knowledge.
*/
- unregister_blkdev(channel->major, channel->name);
- kfree(blksize_size[channel->major]);
- blk_dev[channel->major].data = NULL;
- blk_dev[channel->major].queue = NULL;
- blk_clear(channel->major);
- gd = channel->gd;
+ unregister_blkdev(ch->major, ch->name);
+ kfree(blksize_size[ch->major]);
+ blk_dev[ch->major].data = NULL;
+ blk_dev[ch->major].queue = NULL;
+ blk_clear(ch->major);
+ gd = ch->gd;
if (gd) {
del_gendisk(gd);
kfree(gd->sizes);
if (gd->flags)
kfree (gd->flags);
kfree(gd);
- channel->gd = NULL;
+ ch->gd = NULL;
}
/*
* it.
*/
- old_hwif = *channel;
- init_hwif_data(channel, channel->index);
- channel->hwgroup = old_hwif.hwgroup;
- channel->tuneproc = old_hwif.tuneproc;
- channel->speedproc = old_hwif.speedproc;
- channel->selectproc = old_hwif.selectproc;
- channel->resetproc = old_hwif.resetproc;
- channel->intrproc = old_hwif.intrproc;
- channel->maskproc = old_hwif.maskproc;
- channel->quirkproc = old_hwif.quirkproc;
- channel->rwproc = old_hwif.rwproc;
- channel->ata_read = old_hwif.ata_read;
- channel->ata_write = old_hwif.ata_write;
- channel->atapi_read = old_hwif.atapi_read;
- channel->atapi_write = old_hwif.atapi_write;
- channel->dmaproc = old_hwif.dmaproc;
- channel->busproc = old_hwif.busproc;
- channel->bus_state = old_hwif.bus_state;
- channel->dma_base = old_hwif.dma_base;
- channel->dma_extra = old_hwif.dma_extra;
- channel->config_data = old_hwif.config_data;
- channel->select_data = old_hwif.select_data;
- channel->proc = old_hwif.proc;
+ old_hwif = *ch;
+ init_hwif_data(ch, ch->index);
+ ch->hwgroup = old_hwif.hwgroup;
+ ch->tuneproc = old_hwif.tuneproc;
+ ch->speedproc = old_hwif.speedproc;
+ ch->selectproc = old_hwif.selectproc;
+ ch->resetproc = old_hwif.resetproc;
+ ch->intrproc = old_hwif.intrproc;
+ ch->maskproc = old_hwif.maskproc;
+ ch->quirkproc = old_hwif.quirkproc;
+ ch->rwproc = old_hwif.rwproc;
+ ch->ata_read = old_hwif.ata_read;
+ ch->ata_write = old_hwif.ata_write;
+ ch->atapi_read = old_hwif.atapi_read;
+ ch->atapi_write = old_hwif.atapi_write;
+ ch->dmaproc = old_hwif.dmaproc;
+ ch->busproc = old_hwif.busproc;
+ ch->bus_state = old_hwif.bus_state;
+ ch->dma_base = old_hwif.dma_base;
+ ch->dma_extra = old_hwif.dma_extra;
+ ch->config_data = old_hwif.config_data;
+ ch->select_data = old_hwif.select_data;
+ ch->proc = old_hwif.proc;
#ifndef CONFIG_BLK_DEV_IDECS
- channel->irq = old_hwif.irq;
+ ch->irq = old_hwif.irq;
#endif
- channel->major = old_hwif.major;
- channel->chipset = old_hwif.chipset;
- channel->autodma = old_hwif.autodma;
- channel->udma_four = old_hwif.udma_four;
+ ch->major = old_hwif.major;
+ ch->chipset = old_hwif.chipset;
+ ch->autodma = old_hwif.autodma;
+ ch->udma_four = old_hwif.udma_four;
#ifdef CONFIG_BLK_DEV_IDEPCI
- channel->pci_dev = old_hwif.pci_dev;
-#else
- channel->pci_dev = NULL;
+ ch->pci_dev = old_hwif.pci_dev;
#endif
- channel->straight8 = old_hwif.straight8;
+ ch->straight8 = old_hwif.straight8;
+
abort:
spin_unlock_irqrestore(&ide_lock, flags);
}