* the bus may be broken enough to walk on our toes at this
* point.
*/
+ ide_hwif_t *hwif = drive->hwif;
int rc;
#ifdef DEBUG_PM
printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
#endif
- rc = ide_wait_not_busy(HWIF(drive), 35000);
+ rc = ide_wait_not_busy(hwif, 35000);
if (rc)
printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
SELECT_DRIVE(drive);
- ide_set_irq(drive, 1);
- rc = ide_wait_not_busy(HWIF(drive), 100000);
+ hwif->set_irq(hwif, 1);
+ rc = ide_wait_not_busy(hwif, 100000);
if (rc)
printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
}
* quirk_list may not like intr setups/cleanups
*/
if (drive->quirk_list != 1)
- ide_set_irq(drive, 0);
+ hwif->set_irq(hwif, 0);
}
hwgroup->hwif = hwif;
hwgroup->drive = drive;
void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
{
+ ide_hwif_t *hwif = drive->hwif;
ide_task_t task;
memset(&task, 0, sizeof(task));
task.tf.lbah = (bcount >> 8) & 0xff;
ide_tf_dump(drive->name, &task.tf);
- ide_set_irq(drive, 1);
+ hwif->set_irq(hwif, 1);
SELECT_MASK(drive, 0);
- drive->hwif->tf_load(drive, &task);
+ hwif->tf_load(drive, &task);
}
EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
return inb(hwif->dma_base + ATA_DMA_STATUS);
}
+static void ide_set_irq(ide_hwif_t *hwif, int on)
+{
+ u8 ctl = ATA_DEVCTL_OBS;
+
+ if (on == 4) { /* hack for SRST */
+ ctl |= 4;
+ on &= ~4;
+ }
+
+ ctl |= on ? 0 : 2;
+
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
+ else
+ outb(ctl, hwif->io_ports.ctl_addr);
+}
+
static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
{
ide_hwif_t *hwif = drive->hwif;
hwif->read_altstatus = ide_read_altstatus;
hwif->read_sff_dma_status = ide_read_sff_dma_status;
+ hwif->set_irq = ide_set_irq;
+
hwif->tf_load = ide_tf_load;
hwif->tf_read = ide_tf_read;
*/
SELECT_MASK(drive, 1);
- ide_set_irq(drive, 0);
+ hwif->set_irq(hwif, 0);
msleep(50);
hwif->exec_command(hwif, WIN_IDENTIFY);
timeout = jiffies + WAIT_WORSTCASE;
SELECT_DRIVE(drive);
SELECT_MASK(drive, 0);
udelay(1);
- ide_set_irq(drive, 0);
+ hwif->set_irq(hwif, 0);
hwif->OUTB(speed, io_ports->nsect_addr);
hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr);
hwif->exec_command(hwif, WIN_SETFEATURES);
if (drive->quirk_list == 2)
- ide_set_irq(drive, 1);
+ hwif->set_irq(hwif, 1);
error = __ide_wait_stat(drive, drive->ready_stat,
BUSY_STAT|DRQ_STAT|ERR_STAT,
ide_hwgroup_t *hwgroup;
struct ide_io_ports *io_ports;
const struct ide_port_ops *port_ops;
- u8 ctl;
spin_lock_irqsave(&ide_lock, flags);
hwif = HWIF(drive);
* immediate interrupt due to the edge transition it produces.
* This single interrupt gives us a "fast poll" for drives that
* recover from reset very quickly, saving us the first 50ms wait time.
+ *
+ * TODO: add ->softreset method and stop abusing ->set_irq
*/
/* set SRST and nIEN */
- hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | 6, io_ports->ctl_addr);
+ hwif->set_irq(hwif, 4);
/* more than enough time */
udelay(10);
- if (drive->quirk_list == 2)
- ctl = ATA_DEVCTL_OBS; /* clear SRST and nIEN */
- else
- ctl = ATA_DEVCTL_OBS | 2; /* clear SRST, leave nIEN */
- hwif->OUTBSYNC(hwif, ctl, io_ports->ctl_addr);
+ /* clear SRST, leave nIEN (unless device is on the quirk list) */
+ hwif->set_irq(hwif, drive->quirk_list == 2);
/* more than enough time */
udelay(10);
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
autoprobe = 1;
cookie = probe_irq_on();
}
- ide_set_irq(drive, autoprobe);
+ hwif->set_irq(hwif, autoprobe);
}
retval = actual_try_to_identify(drive, cmd);
if (autoprobe) {
int irq;
- ide_set_irq(drive, 0);
+ hwif->set_irq(hwif, 0);
/* clear drive IRQ */
(void)hwif->read_status(hwif);
udelay(5);
/* Ignore disks that we will not probe for later. */
if (!drive->noprobe || drive->present) {
SELECT_DRIVE(drive);
- ide_set_irq(drive, 1);
+ hwif->set_irq(hwif, 1);
mdelay(2);
rc = ide_wait_not_busy(hwif, 35000);
if (rc)
sa = IRQF_SHARED;
if (io_ports->ctl_addr)
- /* clear nIEN */
- hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS, io_ports->ctl_addr);
+ hwif->set_irq(hwif, 1);
if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
goto out_unlink;
if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
ide_tf_dump(drive->name, tf);
- ide_set_irq(drive, 1);
+ hwif->set_irq(hwif, 1);
SELECT_MASK(drive, 0);
hwif->tf_load(drive, task);
}
return (u8)in_be32((void *)(hwif->dma_base + 4));
}
+static void scc_set_irq(ide_hwif_t *hwif, int on)
+{
+ u8 ctl = ATA_DEVCTL_OBS;
+
+ if (on == 4) { /* hack for SRST */
+ ctl |= 4;
+ on &= ~4;
+ }
+
+ ctl |= on ? 0 : 2;
+
+ out_be32((void *)hwif->io_ports.ctl_addr, ctl);
+ eieio();
+ in_be32((void *)(hwif->dma_base + 0x01c));
+ eieio();
+}
+
static void scc_ide_insw(unsigned long port, void *addr, u32 count)
{
u16 *ptr = (u16 *)addr;
hwif->read_altstatus = scc_read_altstatus;
hwif->read_sff_dma_status = scc_read_sff_dma_status;
+ hwif->set_irq = scc_set_irq;
+
hwif->tf_load = scc_tf_load;
hwif->tf_read = scc_tf_read;
+ IDE_TIMING_CONFIG));
}
+static void pmac_set_irq(ide_hwif_t *hwif, int on)
+{
+ u8 ctl = ATA_DEVCTL_OBS;
+
+ if (on == 4) { /* hack for SRST */
+ ctl |= 4;
+ on &= ~4;
+ }
+
+ ctl |= on ? 0 : 2;
+
+ writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
+ (void)readl((void __iomem *)(hwif->io_ports.data_addr
+ + IDE_TIMING_CONFIG));
+}
+
/*
* Old tuning functions (called on hdparm -p), sets up drive PIO timings
*/
return -ENOENT;
hwif->exec_command = pmac_exec_command;
+ hwif->set_irq = pmac_set_irq;
/* Setup MMIO ops */
default_hwif_mmiops(hwif);
u8 (*read_altstatus)(struct hwif_s *);
u8 (*read_sff_dma_status)(struct hwif_s *);
+ void (*set_irq)(struct hwif_s *, int);
+
void (*tf_load)(ide_drive_t *, struct ide_task_s *);
void (*tf_read)(ide_drive_t *, struct ide_task_s *);
return &hwif->drives[(drive->dn ^ 1) & 1];
}
-static inline void ide_set_irq(ide_drive_t *drive, int on)
-{
- ide_hwif_t *hwif = drive->hwif;
-
- hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | (on ? 0 : 2),
- hwif->io_ports.ctl_addr);
-}
-
static inline u8 ide_read_error(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;