4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/completion.h>
44 #include <linux/reboot.h>
45 #include <linux/cdrom.h>
46 #include <linux/seq_file.h>
47 #include <linux/device.h>
48 #include <linux/kmod.h>
49 #include <linux/scatterlist.h>
50 #include <linux/bitops.h>
52 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
58 unsigned int nr_bytes)
61 * decide whether to reenable DMA -- 3 is a random magic for now,
62 * if we DMA timeout more than 3 times, just stay in PIO
64 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
65 drive->retry_pio <= 3) {
66 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
70 if (!blk_update_request(rq, error, nr_bytes)) {
71 if (rq == drive->sense_rq) {
72 drive->sense_rq = NULL;
73 drive->sense_rq_active = false;
76 __blk_mq_end_request(rq, error);
82 EXPORT_SYMBOL_GPL(ide_end_rq);
84 void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
86 const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops;
87 struct ide_taskfile *tf = &cmd->tf;
88 struct request *rq = cmd->rq;
89 u8 tf_cmd = tf->command;
94 if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) {
97 tp_ops->input_data(drive, cmd, data, 2);
99 cmd->tf.data = data[0];
100 cmd->hob.data = data[1];
103 ide_tf_readback(drive, cmd);
105 if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) &&
106 tf_cmd == ATA_CMD_IDLEIMMEDIATE) {
107 if (tf->lbal != 0xc4) {
108 printk(KERN_ERR "%s: head unload failed!\n",
110 ide_tf_dump(drive->name, cmd);
112 drive->dev_flags |= IDE_DFLAG_PARKED;
115 if (rq && ata_taskfile_request(rq)) {
116 struct ide_cmd *orig_cmd = ide_req(rq)->special;
118 if (cmd->tf_flags & IDE_TFLAG_DYN)
120 else if (cmd != orig_cmd)
121 memcpy(orig_cmd, cmd, sizeof(*cmd));
125 int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes)
127 ide_hwif_t *hwif = drive->hwif;
128 struct request *rq = hwif->rq;
132 * if failfast is set on a request, override number of sectors
133 * and complete the whole request right now
135 if (blk_noretry_request(rq) && error)
136 nr_bytes = blk_rq_sectors(rq) << 9;
138 rc = ide_end_rq(drive, rq, error, nr_bytes);
144 EXPORT_SYMBOL(ide_complete_rq);
146 void ide_kill_rq(ide_drive_t *drive, struct request *rq)
148 u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
149 u8 media = drive->media;
151 drive->failed_pc = NULL;
153 if ((media == ide_floppy || media == ide_tape) && drv_req) {
154 scsi_req(rq)->result = 0;
156 if (media == ide_tape)
157 scsi_req(rq)->result = IDE_DRV_ERROR_GENERAL;
158 else if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
159 scsi_req(rq)->result = -EIO;
162 ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
165 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
167 tf->nsect = drive->sect;
168 tf->lbal = drive->sect;
169 tf->lbam = drive->cyl;
170 tf->lbah = drive->cyl >> 8;
171 tf->device = (drive->head - 1) | drive->select;
172 tf->command = ATA_CMD_INIT_DEV_PARAMS;
175 static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
177 tf->nsect = drive->sect;
178 tf->command = ATA_CMD_RESTORE;
181 static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
183 tf->nsect = drive->mult_req;
184 tf->command = ATA_CMD_SET_MULTI;
188 * do_special - issue some special commands
189 * @drive: drive the command is for
191 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
192 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
195 static ide_startstop_t do_special(ide_drive_t *drive)
200 printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
201 drive->special_flags);
203 if (drive->media != ide_disk) {
204 drive->special_flags = 0;
209 memset(&cmd, 0, sizeof(cmd));
210 cmd.protocol = ATA_PROT_NODATA;
212 if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
213 drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY;
214 ide_tf_set_specify_cmd(drive, &cmd.tf);
215 } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
216 drive->special_flags &= ~IDE_SFLAG_RECALIBRATE;
217 ide_tf_set_restore_cmd(drive, &cmd.tf);
218 } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
219 drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE;
220 ide_tf_set_setmult_cmd(drive, &cmd.tf);
224 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
225 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
226 cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER;
228 do_rw_taskfile(drive, &cmd);
233 void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
235 ide_hwif_t *hwif = drive->hwif;
236 struct scatterlist *sg = hwif->sg_table, *last_sg = NULL;
237 struct request *rq = cmd->rq;
239 cmd->sg_nents = __blk_rq_map_sg(drive->queue, rq, sg, &last_sg);
240 if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & rq->q->dma_pad_mask))
242 (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
244 EXPORT_SYMBOL_GPL(ide_map_sg);
246 void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes)
248 cmd->nbytes = cmd->nleft = nr_bytes;
252 EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
255 * execute_drive_command - issue special drive command
256 * @drive: the drive to issue the command on
257 * @rq: the request structure holding the command
259 * execute_drive_cmd() issues a special drive command, usually
260 * initiated by ioctl() from the external hdparm program. The
261 * command can be a drive command, drive task or taskfile
262 * operation. Weirdly you can call it with NULL to wait for
263 * all commands to finish. Don't do this as that is due to change
266 static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
269 struct ide_cmd *cmd = ide_req(rq)->special;
272 if (cmd->protocol == ATA_PROT_PIO) {
273 ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
274 ide_map_sg(drive, cmd);
277 return do_rw_taskfile(drive, cmd);
281 * NULL is actually a valid way of waiting for
282 * all current requests to be flushed from the queue.
285 printk("%s: DRIVE_CMD (null)\n", drive->name);
287 scsi_req(rq)->result = 0;
288 ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
293 static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
295 u8 cmd = scsi_req(rq)->cmd[0];
299 case REQ_UNPARK_HEADS:
300 return ide_do_park_unpark(drive, rq);
301 case REQ_DEVSET_EXEC:
302 return ide_do_devset(drive, rq);
303 case REQ_DRIVE_RESET:
304 return ide_do_reset(drive);
311 * start_request - start of I/O and command issuing for IDE
313 * start_request() initiates handling of a new I/O request. It
314 * accepts commands and I/O (read/write) requests.
316 * FIXME: this function needs a rename
319 static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
321 ide_startstop_t startstop;
324 printk("%s: start_request: current=0x%08lx\n",
325 drive->hwif->name, (unsigned long) rq);
328 /* bail early if we've exceeded max_failures */
329 if (drive->max_failures && (drive->failures > drive->max_failures)) {
330 rq->rq_flags |= RQF_FAILED;
334 if (drive->prep_rq && !drive->prep_rq(drive, rq))
337 if (ata_pm_request(rq))
338 ide_check_pm_state(drive, rq);
340 drive->hwif->tp_ops->dev_select(drive);
341 if (ide_wait_stat(&startstop, drive, drive->ready_stat,
342 ATA_BUSY | ATA_DRQ, WAIT_READY)) {
343 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
347 if (drive->special_flags == 0) {
348 struct ide_driver *drv;
351 * We reset the drive so we need to issue a SETFEATURES.
352 * Do it _after_ do_special() restored device parameters.
354 if (drive->current_speed == 0xff)
355 ide_config_drive_speed(drive, drive->desired_speed);
357 if (ata_taskfile_request(rq))
358 return execute_drive_cmd(drive, rq);
359 else if (ata_pm_request(rq)) {
360 struct ide_pm_state *pm = ide_req(rq)->special;
362 printk("%s: start_power_step(step: %d)\n",
363 drive->name, pm->pm_step);
365 startstop = ide_start_power_step(drive, rq);
366 if (startstop == ide_stopped &&
367 pm->pm_step == IDE_PM_COMPLETED)
368 ide_complete_pm_rq(drive, rq);
370 } else if (!rq->rq_disk && ata_misc_request(rq))
372 * TODO: Once all ULDs have been modified to
373 * check for specific op codes rather than
374 * blindly accepting any special request, the
375 * check for ->rq_disk above may be replaced
376 * by a more suitable mechanism or even
379 return ide_special_rq(drive, rq);
381 drv = *(struct ide_driver **)rq->rq_disk->private_data;
383 return drv->do_request(drive, rq, blk_rq_pos(rq));
385 return do_special(drive);
387 ide_kill_rq(drive, rq);
392 * ide_stall_queue - pause an IDE device
393 * @drive: drive to stall
394 * @timeout: time to stall for (jiffies)
396 * ide_stall_queue() can be used by a drive to give excess bandwidth back
397 * to the port by sleeping for timeout jiffies.
400 void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
402 if (timeout > WAIT_WORSTCASE)
403 timeout = WAIT_WORSTCASE;
404 drive->sleep = timeout + jiffies;
405 drive->dev_flags |= IDE_DFLAG_SLEEPING;
407 EXPORT_SYMBOL(ide_stall_queue);
409 static inline int ide_lock_port(ide_hwif_t *hwif)
419 static inline void ide_unlock_port(ide_hwif_t *hwif)
424 static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
428 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
429 rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy);
432 host->get_lock(ide_intr, hwif);
438 static inline void ide_unlock_host(struct ide_host *host)
440 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
441 if (host->release_lock)
442 host->release_lock();
443 clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy);
447 void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
449 struct request_queue *q = drive->queue;
451 /* Use 3ms as that was the old plug delay */
453 blk_mq_requeue_request(rq, false);
454 blk_mq_delay_kick_requeue_list(q, 3);
456 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
459 blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
462 ide_hwif_t *hwif = drive->hwif;
463 struct ide_host *host = hwif->host;
464 ide_startstop_t startstop;
466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
467 rq->rq_flags |= RQF_DONTPREP;
468 ide_req(rq)->special = NULL;
471 /* HLD do_request() callback might sleep, make sure it's okay */
474 if (ide_lock_host(host, hwif))
475 return BLK_STS_DEV_RESOURCE;
477 spin_lock_irq(&hwif->lock);
479 if (!ide_lock_port(hwif)) {
480 ide_hwif_t *prev_port;
482 WARN_ON_ONCE(hwif->rq);
484 prev_port = hwif->host->cur_port;
485 if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
486 time_after(drive->sleep, jiffies)) {
487 ide_unlock_port(hwif);
491 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
493 ide_drive_t *cur_dev =
494 prev_port ? prev_port->cur_dev : NULL;
497 * set nIEN for previous port, drives in the
498 * quirk list may not like intr setups/cleanups
501 (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
502 prev_port->tp_ops->write_devctl(prev_port,
506 hwif->host->cur_port = hwif;
508 hwif->cur_dev = drive;
509 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
512 * Sanity: don't accept a request that isn't a PM request
513 * if we are currently power managed. This is very important as
514 * blk_stop_queue() doesn't prevent the blk_fetch_request()
515 * above to return us whatever is in the queue. Since we call
516 * ide_do_request() ourselves, we end up taking requests while
517 * the queue is blocked...
519 * We let requests forced at head of queue with ide-preempt
520 * though. I hope that doesn't happen too much, hopefully not
521 * unless the subdriver triggers such a thing in its own PM
524 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
525 ata_pm_request(rq) == 0 &&
526 (rq->rq_flags & RQF_PREEMPT) == 0) {
527 /* there should be no pending command at this point */
528 ide_unlock_port(hwif);
532 scsi_req(rq)->resid_len = blk_rq_bytes(rq);
535 spin_unlock_irq(&hwif->lock);
536 startstop = start_request(drive, rq);
537 spin_lock_irq(&hwif->lock);
539 if (startstop == ide_stopped) {
544 ide_unlock_port(hwif);
550 list_add(&rq->queuelist, &drive->rq_list);
551 spin_unlock_irq(&hwif->lock);
552 ide_unlock_host(host);
554 ide_requeue_and_plug(drive, rq);
559 spin_unlock_irq(&hwif->lock);
561 ide_unlock_host(host);
566 * Issue a new request to a device.
568 blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
569 const struct blk_mq_queue_data *bd)
571 ide_drive_t *drive = hctx->queue->queuedata;
572 ide_hwif_t *hwif = drive->hwif;
574 spin_lock_irq(&hwif->lock);
575 if (drive->sense_rq_active) {
576 spin_unlock_irq(&hwif->lock);
577 return BLK_STS_DEV_RESOURCE;
579 spin_unlock_irq(&hwif->lock);
581 blk_mq_start_request(bd->rq);
582 return ide_issue_rq(drive, bd->rq, false);
585 static int drive_is_ready(ide_drive_t *drive)
587 ide_hwif_t *hwif = drive->hwif;
590 if (drive->waiting_for_dma)
591 return hwif->dma_ops->dma_test_irq(drive);
593 if (hwif->io_ports.ctl_addr &&
594 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
595 stat = hwif->tp_ops->read_altstatus(hwif);
597 /* Note: this may clear a pending IRQ!! */
598 stat = hwif->tp_ops->read_status(hwif);
601 /* drive busy: definitely not interrupting */
604 /* drive ready: *might* be interrupting */
609 * ide_timer_expiry - handle lack of an IDE interrupt
610 * @data: timer callback magic (hwif)
612 * An IDE command has timed out before the expected drive return
613 * occurred. At this point we attempt to clean up the current
614 * mess. If the current handler includes an expiry handler then
615 * we invoke the expiry handler, and providing it is happy the
616 * work is done. If that fails we apply generic recovery rules
617 * invoking the handler and checking the drive DMA status. We
618 * have an excessively incestuous relationship with the DMA
619 * logic that wants cleaning up.
622 void ide_timer_expiry (struct timer_list *t)
624 ide_hwif_t *hwif = from_timer(hwif, t, timer);
626 ide_handler_t *handler;
630 struct request *rq_in_flight;
632 spin_lock_irqsave(&hwif->lock, flags);
634 handler = hwif->handler;
636 if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
638 * Either a marginal timeout occurred
639 * (got the interrupt just as timer expired),
640 * or we were "sleeping" to give other devices a chance.
641 * Either way, we don't really want to complain about anything.
644 ide_expiry_t *expiry = hwif->expiry;
645 ide_startstop_t startstop = ide_stopped;
647 drive = hwif->cur_dev;
650 wait = expiry(drive);
651 if (wait > 0) { /* continue */
653 hwif->timer.expires = jiffies + wait;
654 hwif->req_gen_timer = hwif->req_gen;
655 add_timer(&hwif->timer);
656 spin_unlock_irqrestore(&hwif->lock, flags);
660 hwif->handler = NULL;
663 * We need to simulate a real interrupt when invoking
664 * the handler() function, which means we need to
665 * globally mask the specific IRQ:
667 spin_unlock(&hwif->lock);
668 /* disable_irq_nosync ?? */
669 disable_irq(hwif->irq);
672 startstop = handler(drive);
673 } else if (drive_is_ready(drive)) {
674 if (drive->waiting_for_dma)
675 hwif->dma_ops->dma_lost_irq(drive);
676 if (hwif->port_ops && hwif->port_ops->clear_irq)
677 hwif->port_ops->clear_irq(drive);
679 printk(KERN_WARNING "%s: lost interrupt\n",
681 startstop = handler(drive);
683 if (drive->waiting_for_dma)
684 startstop = ide_dma_timeout_retry(drive, wait);
686 startstop = ide_error(drive, "irq timeout",
687 hwif->tp_ops->read_status(hwif));
689 /* Disable interrupts again, `handler' might have enabled it */
690 spin_lock_irq(&hwif->lock);
691 enable_irq(hwif->irq);
692 if (startstop == ide_stopped && hwif->polling == 0) {
693 rq_in_flight = hwif->rq;
695 ide_unlock_port(hwif);
699 spin_unlock_irqrestore(&hwif->lock, flags);
702 ide_unlock_host(hwif->host);
703 ide_requeue_and_plug(drive, rq_in_flight);
708 * unexpected_intr - handle an unexpected IDE interrupt
709 * @irq: interrupt line
710 * @hwif: port being processed
712 * There's nothing really useful we can do with an unexpected interrupt,
713 * other than reading the status register (to clear it), and logging it.
714 * There should be no way that an irq can happen before we're ready for it,
715 * so we needn't worry much about losing an "important" interrupt here.
717 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
718 * the drive enters "idle", "standby", or "sleep" mode, so if the status
719 * looks "good", we just ignore the interrupt completely.
721 * This routine assumes __cli() is in effect when called.
723 * If an unexpected interrupt happens on irq15 while we are handling irq14
724 * and if the two interfaces are "serialized" (CMD640), then it looks like
725 * we could screw up by interfering with a new request being set up for
728 * In reality, this is a non-issue. The new command is not sent unless
729 * the drive is ready to accept one, in which case we know the drive is
730 * not trying to interrupt us. And ide_set_handler() is always invoked
731 * before completing the issuance of any new drive command, so we will not
732 * be accidentally invoked as a result of any valid command completion
736 static void unexpected_intr(int irq, ide_hwif_t *hwif)
738 u8 stat = hwif->tp_ops->read_status(hwif);
740 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
741 /* Try to not flood the console with msgs */
742 static unsigned long last_msgtime, count;
745 if (time_after(jiffies, last_msgtime + HZ)) {
746 last_msgtime = jiffies;
747 printk(KERN_ERR "%s: unexpected interrupt, "
748 "status=0x%02x, count=%ld\n",
749 hwif->name, stat, count);
755 * ide_intr - default IDE interrupt handler
756 * @irq: interrupt number
758 * @regs: unused weirdness from the kernel irq layer
760 * This is the default IRQ handler for the IDE layer. You should
761 * not need to override it. If you do be aware it is subtle in
764 * hwif is the interface in the group currently performing
765 * a command. hwif->cur_dev is the drive and hwif->handler is
766 * the IRQ handler to call. As we issue a command the handlers
767 * step through multiple states, reassigning the handler to the
768 * next step in the process. Unlike a smart SCSI controller IDE
769 * expects the main processor to sequence the various transfer
770 * stages. We also manage a poll timer to catch up with most
771 * timeout situations. There are still a few where the handlers
772 * don't ever decide to give up.
774 * The handler eventually returns ide_stopped to indicate the
775 * request completed. At this point we issue the next request
776 * on the port and the process begins again.
779 irqreturn_t ide_intr (int irq, void *dev_id)
781 ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
782 struct ide_host *host = hwif->host;
784 ide_handler_t *handler;
786 ide_startstop_t startstop;
787 irqreturn_t irq_ret = IRQ_NONE;
789 struct request *rq_in_flight;
791 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
792 if (hwif != host->cur_port)
796 spin_lock_irqsave(&hwif->lock, flags);
798 if (hwif->port_ops && hwif->port_ops->test_irq &&
799 hwif->port_ops->test_irq(hwif) == 0)
802 handler = hwif->handler;
804 if (handler == NULL || hwif->polling) {
806 * Not expecting an interrupt from this drive.
807 * That means this could be:
808 * (1) an interrupt from another PCI device
809 * sharing the same PCI INT# as us.
810 * or (2) a drive just entered sleep or standby mode,
811 * and is interrupting to let us know.
812 * or (3) a spurious interrupt of unknown origin.
814 * For PCI, we cannot tell the difference,
815 * so in that case we just ignore it and hope it goes away.
817 if ((host->irq_flags & IRQF_SHARED) == 0) {
819 * Probably not a shared PCI interrupt,
820 * so we can safely try to do something about it:
822 unexpected_intr(irq, hwif);
825 * Whack the status register, just in case
826 * we have a leftover pending IRQ.
828 (void)hwif->tp_ops->read_status(hwif);
833 drive = hwif->cur_dev;
835 if (!drive_is_ready(drive))
837 * This happens regularly when we share a PCI IRQ with
838 * another device. Unfortunately, it can also happen
839 * with some buggy drives that trigger the IRQ before
840 * their status register is up to date. Hopefully we have
841 * enough advance overhead that the latter isn't a problem.
845 hwif->handler = NULL;
848 del_timer(&hwif->timer);
849 spin_unlock(&hwif->lock);
851 if (hwif->port_ops && hwif->port_ops->clear_irq)
852 hwif->port_ops->clear_irq(drive);
854 if (drive->dev_flags & IDE_DFLAG_UNMASK)
855 local_irq_enable_in_hardirq();
857 /* service this interrupt, may set handler for next interrupt */
858 startstop = handler(drive);
860 spin_lock_irq(&hwif->lock);
862 * Note that handler() may have set things up for another
863 * interrupt to occur soon, but it cannot happen until
864 * we exit from this routine, because it will be the
865 * same irq as is currently being serviced here, and Linux
866 * won't allow another of the same (on any CPU) until we return.
868 if (startstop == ide_stopped && hwif->polling == 0) {
869 BUG_ON(hwif->handler);
870 rq_in_flight = hwif->rq;
872 ide_unlock_port(hwif);
875 irq_ret = IRQ_HANDLED;
877 spin_unlock_irqrestore(&hwif->lock, flags);
880 ide_unlock_host(hwif->host);
881 ide_requeue_and_plug(drive, rq_in_flight);
886 EXPORT_SYMBOL_GPL(ide_intr);
888 void ide_pad_transfer(ide_drive_t *drive, int write, int len)
890 ide_hwif_t *hwif = drive->hwif;
895 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
897 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
901 EXPORT_SYMBOL_GPL(ide_pad_transfer);
903 void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
905 drive->sense_rq_active = true;
906 list_add_tail(&rq->queuelist, &drive->rq_list);
907 kblockd_schedule_work(&drive->rq_work);
909 EXPORT_SYMBOL_GPL(ide_insert_request_head);