mtip32xx: add module.h include to avoid conflict with moduleh tree
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / block / mtip32xx / mtip32xx.c
1 /*
2  * Driver for the Micron P320 SSD
3  *   Copyright (C) 2011 Micron Technology, Inc.
4  *
5  * Portions of this code were derived from works subjected to the
6  * following copyright:
7  *    Copyright (C) 2009 Integrated Device Technology, Inc.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  */
20
21 #include <linux/pci.h>
22 #include <linux/interrupt.h>
23 #include <linux/ata.h>
24 #include <linux/delay.h>
25 #include <linux/hdreg.h>
26 #include <linux/uaccess.h>
27 #include <linux/random.h>
28 #include <linux/smp.h>
29 #include <linux/compat.h>
30 #include <linux/fs.h>
31 #include <linux/module.h>
32 #include <linux/genhd.h>
33 #include <linux/blkdev.h>
34 #include <linux/bio.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/idr.h>
37 #include <../drivers/ata/ahci.h>
38 #include "mtip32xx.h"
39
40 #define HW_CMD_SLOT_SZ          (MTIP_MAX_COMMAND_SLOTS * 32)
41 #define HW_CMD_TBL_SZ           (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
42 #define HW_CMD_TBL_AR_SZ        (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
43 #define HW_PORT_PRIV_DMA_SZ \
44                 (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
45
46 #define HOST_HSORG              0xFC
47 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
48 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
49 #define HSORG_HWREV             0xFF00
50 #define HSORG_STYLE             0x8
51 #define HSORG_SLOTGROUPS        0x7
52
53 #define PORT_COMMAND_ISSUE      0x38
54 #define PORT_SDBV               0x7C
55
56 #define PORT_OFFSET             0x100
57 #define PORT_MEM_SIZE           0x80
58
59 #define PORT_IRQ_ERR \
60         (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
61          PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
62          PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
63          PORT_IRQ_OVERFLOW)
64 #define PORT_IRQ_LEGACY \
65         (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
66 #define PORT_IRQ_HANDLED \
67         (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
68          PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
69          PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
70 #define DEF_PORT_IRQ \
71         (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
72
73 /* product numbers */
74 #define MTIP_PRODUCT_UNKNOWN    0x00
75 #define MTIP_PRODUCT_ASICFPGA   0x11
76
77 /* Device instance number, incremented each time a device is probed. */
78 static int instance;
79
80 /*
81  * Global variable used to hold the major block device number
82  * allocated in mtip_init().
83  */
84 static int mtip_major;
85
86 static DEFINE_SPINLOCK(rssd_index_lock);
87 static DEFINE_IDA(rssd_index_ida);
88
89 #ifdef CONFIG_COMPAT
90 struct mtip_compat_ide_task_request_s {
91         __u8            io_ports[8];
92         __u8            hob_ports[8];
93         ide_reg_valid_t out_flags;
94         ide_reg_valid_t in_flags;
95         int             data_phase;
96         int             req_cmd;
97         compat_ulong_t  out_size;
98         compat_ulong_t  in_size;
99 };
100 #endif
101
102 static int mtip_exec_internal_command(struct mtip_port *port,
103                                 void *fis,
104                                 int fisLen,
105                                 dma_addr_t buffer,
106                                 int bufLen,
107                                 u32 opts,
108                                 gfp_t atomic,
109                                 unsigned long timeout);
110
111 /*
112  * This function check_for_surprise_removal is called
113  * while card is removed from the system and it will
114  * read the vendor id from the configration space
115  *
116  * @pdev Pointer to the pci_dev structure.
117  *
118  * return value
119  *       true if device removed, else false
120  */
121 static bool mtip_check_surprise_removal(struct pci_dev *pdev)
122 {
123         u16 vendor_id = 0;
124
125        /* Read the vendorID from the configuration space */
126         pci_read_config_word(pdev, 0x00, &vendor_id);
127         if (vendor_id == 0xFFFF)
128                 return true; /* device removed */
129
130         return false; /* device present */
131 }
132
133 /*
134  * This function is called for clean the pending command in the
135  * command slot during the surprise removal of device and return
136  * error to the upper layer.
137  *
138  * @dd Pointer to the DRIVER_DATA structure.
139  *
140  * return value
141  *      None
142  */
143 static void mtip_command_cleanup(struct driver_data *dd)
144 {
145         int group = 0, commandslot = 0, commandindex = 0;
146         struct mtip_cmd *command;
147         struct mtip_port *port = dd->port;
148
149         for (group = 0; group < 4; group++) {
150                 for (commandslot = 0; commandslot < 32; commandslot++) {
151                         if (!(port->allocated[group] & (1 << commandslot)))
152                                 continue;
153
154                         commandindex = group << 5 | commandslot;
155                         command = &port->commands[commandindex];
156
157                         if (atomic_read(&command->active)
158                             && (command->async_callback)) {
159                                 command->async_callback(command->async_data,
160                                         -ENODEV);
161                                 command->async_callback = NULL;
162                                 command->async_data = NULL;
163                         }
164
165                         dma_unmap_sg(&port->dd->pdev->dev,
166                                 command->sg,
167                                 command->scatter_ents,
168                                 command->direction);
169                 }
170         }
171
172         up(&port->cmd_slot);
173
174         atomic_set(&dd->drv_cleanup_done, true);
175 }
176
177 /*
178  * Obtain an empty command slot.
179  *
180  * This function needs to be reentrant since it could be called
181  * at the same time on multiple CPUs. The allocation of the
182  * command slot must be atomic.
183  *
184  * @port Pointer to the port data structure.
185  *
186  * return value
187  *      >= 0    Index of command slot obtained.
188  *      -1      No command slots available.
189  */
190 static int get_slot(struct mtip_port *port)
191 {
192         int slot, i;
193         unsigned int num_command_slots = port->dd->slot_groups * 32;
194
195         /*
196          * Try 10 times, because there is a small race here.
197          *  that's ok, because it's still cheaper than a lock.
198          *
199          * Race: Since this section is not protected by lock, same bit
200          * could be chosen by different process contexts running in
201          * different processor. So instead of costly lock, we are going
202          * with loop.
203          */
204         for (i = 0; i < 10; i++) {
205                 slot = find_next_zero_bit(port->allocated,
206                                          num_command_slots, 1);
207                 if ((slot < num_command_slots) &&
208                     (!test_and_set_bit(slot, port->allocated)))
209                         return slot;
210         }
211         dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
212
213         if (mtip_check_surprise_removal(port->dd->pdev)) {
214                 /* Device not present, clean outstanding commands */
215                 mtip_command_cleanup(port->dd);
216         }
217         return -1;
218 }
219
220 /*
221  * Release a command slot.
222  *
223  * @port Pointer to the port data structure.
224  * @tag  Tag of command to release
225  *
226  * return value
227  *      None
228  */
229 static inline void release_slot(struct mtip_port *port, int tag)
230 {
231         smp_mb__before_clear_bit();
232         clear_bit(tag, port->allocated);
233         smp_mb__after_clear_bit();
234 }
235
236 /*
237  * Reset the HBA (without sleeping)
238  *
239  * Just like hba_reset, except does not call sleep, so can be
240  * run from interrupt/tasklet context.
241  *
242  * @dd Pointer to the driver data structure.
243  *
244  * return value
245  *      0       The reset was successful.
246  *      -1      The HBA Reset bit did not clear.
247  */
248 static int hba_reset_nosleep(struct driver_data *dd)
249 {
250         unsigned long timeout;
251
252         /* Chip quirk: quiesce any chip function */
253         mdelay(10);
254
255         /* Set the reset bit */
256         writel(HOST_RESET, dd->mmio + HOST_CTL);
257
258         /* Flush */
259         readl(dd->mmio + HOST_CTL);
260
261         /*
262          * Wait 10ms then spin for up to 1 second
263          * waiting for reset acknowledgement
264          */
265         timeout = jiffies + msecs_to_jiffies(1000);
266         mdelay(10);
267         while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
268                  && time_before(jiffies, timeout))
269                 mdelay(1);
270
271         if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
272                 return -1;
273
274         return 0;
275 }
276
277 /*
278  * Issue a command to the hardware.
279  *
280  * Set the appropriate bit in the s_active and Command Issue hardware
281  * registers, causing hardware command processing to begin.
282  *
283  * @port Pointer to the port structure.
284  * @tag  The tag of the command to be issued.
285  *
286  * return value
287  *      None
288  */
289 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
290 {
291         unsigned long flags = 0;
292
293         atomic_set(&port->commands[tag].active, 1);
294
295         spin_lock_irqsave(&port->cmd_issue_lock, flags);
296
297         writel((1 << MTIP_TAG_BIT(tag)),
298                         port->s_active[MTIP_TAG_INDEX(tag)]);
299         writel((1 << MTIP_TAG_BIT(tag)),
300                         port->cmd_issue[MTIP_TAG_INDEX(tag)]);
301
302         spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
303 }
304
305 /*
306  * Enable/disable the reception of FIS
307  *
308  * @port   Pointer to the port data structure
309  * @enable 1 to enable, 0 to disable
310  *
311  * return value
312  *      Previous state: 1 enabled, 0 disabled
313  */
314 static int mtip_enable_fis(struct mtip_port *port, int enable)
315 {
316         u32 tmp;
317
318         /* enable FIS reception */
319         tmp = readl(port->mmio + PORT_CMD);
320         if (enable)
321                 writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
322         else
323                 writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
324
325         /* Flush */
326         readl(port->mmio + PORT_CMD);
327
328         return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
329 }
330
331 /*
332  * Enable/disable the DMA engine
333  *
334  * @port   Pointer to the port data structure
335  * @enable 1 to enable, 0 to disable
336  *
337  * return value
338  *      Previous state: 1 enabled, 0 disabled.
339  */
340 static int mtip_enable_engine(struct mtip_port *port, int enable)
341 {
342         u32 tmp;
343
344         /* enable FIS reception */
345         tmp = readl(port->mmio + PORT_CMD);
346         if (enable)
347                 writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
348         else
349                 writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
350
351         readl(port->mmio + PORT_CMD);
352         return (((tmp & PORT_CMD_START) == PORT_CMD_START));
353 }
354
355 /*
356  * Enables the port DMA engine and FIS reception.
357  *
358  * return value
359  *      None
360  */
361 static inline void mtip_start_port(struct mtip_port *port)
362 {
363         /* Enable FIS reception */
364         mtip_enable_fis(port, 1);
365
366         /* Enable the DMA engine */
367         mtip_enable_engine(port, 1);
368 }
369
370 /*
371  * Deinitialize a port by disabling port interrupts, the DMA engine,
372  * and FIS reception.
373  *
374  * @port Pointer to the port structure
375  *
376  * return value
377  *      None
378  */
379 static inline void mtip_deinit_port(struct mtip_port *port)
380 {
381         /* Disable interrupts on this port */
382         writel(0, port->mmio + PORT_IRQ_MASK);
383
384         /* Disable the DMA engine */
385         mtip_enable_engine(port, 0);
386
387         /* Disable FIS reception */
388         mtip_enable_fis(port, 0);
389 }
390
391 /*
392  * Initialize a port.
393  *
394  * This function deinitializes the port by calling mtip_deinit_port() and
395  * then initializes it by setting the command header and RX FIS addresses,
396  * clearing the SError register and any pending port interrupts before
397  * re-enabling the default set of port interrupts.
398  *
399  * @port Pointer to the port structure.
400  *
401  * return value
402  *      None
403  */
404 static void mtip_init_port(struct mtip_port *port)
405 {
406         int i;
407         mtip_deinit_port(port);
408
409         /* Program the command list base and FIS base addresses */
410         if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
411                 writel((port->command_list_dma >> 16) >> 16,
412                          port->mmio + PORT_LST_ADDR_HI);
413                 writel((port->rxfis_dma >> 16) >> 16,
414                          port->mmio + PORT_FIS_ADDR_HI);
415         }
416
417         writel(port->command_list_dma & 0xffffffff,
418                         port->mmio + PORT_LST_ADDR);
419         writel(port->rxfis_dma & 0xffffffff, port->mmio + PORT_FIS_ADDR);
420
421         /* Clear SError */
422         writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
423
424         /* reset the completed registers.*/
425         for (i = 0; i < port->dd->slot_groups; i++)
426                 writel(0xFFFFFFFF, port->completed[i]);
427
428         /* Clear any pending interrupts for this port */
429         writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
430
431         /* Enable port interrupts */
432         writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
433 }
434
435 /*
436  * Restart a port
437  *
438  * @port Pointer to the port data structure.
439  *
440  * return value
441  *      None
442  */
443 static void mtip_restart_port(struct mtip_port *port)
444 {
445         unsigned long timeout;
446
447         /* Disable the DMA engine */
448         mtip_enable_engine(port, 0);
449
450         /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
451         timeout = jiffies + msecs_to_jiffies(500);
452         while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
453                  && time_before(jiffies, timeout))
454                 ;
455
456         /*
457          * Chip quirk: escalate to hba reset if
458          * PxCMD.CR not clear after 500 ms
459          */
460         if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
461                 dev_warn(&port->dd->pdev->dev,
462                         "PxCMD.CR not clear, escalating reset\n");
463
464                 if (hba_reset_nosleep(port->dd))
465                         dev_err(&port->dd->pdev->dev,
466                                 "HBA reset escalation failed.\n");
467
468                 /* 30 ms delay before com reset to quiesce chip */
469                 mdelay(30);
470         }
471
472         dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
473
474         /* Set PxSCTL.DET */
475         writel(readl(port->mmio + PORT_SCR_CTL) |
476                          1, port->mmio + PORT_SCR_CTL);
477         readl(port->mmio + PORT_SCR_CTL);
478
479         /* Wait 1 ms to quiesce chip function */
480         timeout = jiffies + msecs_to_jiffies(1);
481         while (time_before(jiffies, timeout))
482                 ;
483
484         /* Clear PxSCTL.DET */
485         writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
486                          port->mmio + PORT_SCR_CTL);
487         readl(port->mmio + PORT_SCR_CTL);
488
489         /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
490         timeout = jiffies + msecs_to_jiffies(500);
491         while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
492                          && time_before(jiffies, timeout))
493                 ;
494
495         if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
496                 dev_warn(&port->dd->pdev->dev,
497                         "COM reset failed\n");
498
499         /* Clear SError, the PxSERR.DIAG.x should be set so clear it */
500         writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
501
502         /* Enable the DMA engine */
503         mtip_enable_engine(port, 1);
504 }
505
506 /*
507  * Called periodically to see if any read/write commands are
508  * taking too long to complete.
509  *
510  * @data Pointer to the PORT data structure.
511  *
512  * return value
513  *      None
514  */
515 static void mtip_timeout_function(unsigned long int data)
516 {
517         struct mtip_port *port = (struct mtip_port *) data;
518         struct host_to_dev_fis *fis;
519         struct mtip_cmd *command;
520         int tag, cmdto_cnt = 0;
521         unsigned int bit, group;
522         unsigned int num_command_slots = port->dd->slot_groups * 32;
523
524         if (unlikely(!port))
525                 return;
526
527         if (atomic_read(&port->dd->resumeflag) == true) {
528                 mod_timer(&port->cmd_timer,
529                         jiffies + msecs_to_jiffies(30000));
530                 return;
531         }
532
533         for (tag = 0; tag < num_command_slots; tag++) {
534                 /*
535                  * Skip internal command slot as it has
536                  * its own timeout mechanism
537                  */
538                 if (tag == MTIP_TAG_INTERNAL)
539                         continue;
540
541                 if (atomic_read(&port->commands[tag].active) &&
542                    (time_after(jiffies, port->commands[tag].comp_time))) {
543                         group = tag >> 5;
544                         bit = tag & 0x1f;
545
546                         command = &port->commands[tag];
547                         fis = (struct host_to_dev_fis *) command->command;
548
549                         dev_warn(&port->dd->pdev->dev,
550                                 "Timeout for command tag %d\n", tag);
551
552                         cmdto_cnt++;
553                         if (cmdto_cnt == 1)
554                                 atomic_inc(&port->dd->eh_active);
555
556                         /*
557                          * Clear the completed bit. This should prevent
558                          *  any interrupt handlers from trying to retire
559                          *  the command.
560                          */
561                         writel(1 << bit, port->completed[group]);
562
563                         /* Call the async completion callback. */
564                         if (likely(command->async_callback))
565                                 command->async_callback(command->async_data,
566                                                          -EIO);
567                         command->async_callback = NULL;
568                         command->comp_func = NULL;
569
570                         /* Unmap the DMA scatter list entries */
571                         dma_unmap_sg(&port->dd->pdev->dev,
572                                         command->sg,
573                                         command->scatter_ents,
574                                         command->direction);
575
576                         /*
577                          * Clear the allocated bit and active tag for the
578                          * command.
579                          */
580                         atomic_set(&port->commands[tag].active, 0);
581                         release_slot(port, tag);
582
583                         up(&port->cmd_slot);
584                 }
585         }
586
587         if (cmdto_cnt) {
588                 dev_warn(&port->dd->pdev->dev,
589                         "%d commands timed out: restarting port",
590                         cmdto_cnt);
591                 mtip_restart_port(port);
592                 atomic_dec(&port->dd->eh_active);
593         }
594
595         /* Restart the timer */
596         mod_timer(&port->cmd_timer,
597                 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
598 }
599
600 /*
601  * IO completion function.
602  *
603  * This completion function is called by the driver ISR when a
604  * command that was issued by the kernel completes. It first calls the
605  * asynchronous completion function which normally calls back into the block
606  * layer passing the asynchronous callback data, then unmaps the
607  * scatter list associated with the completed command, and finally
608  * clears the allocated bit associated with the completed command.
609  *
610  * @port   Pointer to the port data structure.
611  * @tag    Tag of the command.
612  * @data   Pointer to driver_data.
613  * @status Completion status.
614  *
615  * return value
616  *      None
617  */
618 static void mtip_async_complete(struct mtip_port *port,
619                                 int tag,
620                                 void *data,
621                                 int status)
622 {
623         struct mtip_cmd *command;
624         struct driver_data *dd = data;
625         int cb_status = status ? -EIO : 0;
626
627         if (unlikely(!dd) || unlikely(!port))
628                 return;
629
630         command = &port->commands[tag];
631
632         if (unlikely(status == PORT_IRQ_TF_ERR)) {
633                 dev_warn(&port->dd->pdev->dev,
634                         "Command tag %d failed due to TFE\n", tag);
635         }
636
637         /* Upper layer callback */
638         if (likely(command->async_callback))
639                 command->async_callback(command->async_data, cb_status);
640
641         command->async_callback = NULL;
642         command->comp_func = NULL;
643
644         /* Unmap the DMA scatter list entries */
645         dma_unmap_sg(&dd->pdev->dev,
646                 command->sg,
647                 command->scatter_ents,
648                 command->direction);
649
650         /* Clear the allocated and active bits for the command */
651         atomic_set(&port->commands[tag].active, 0);
652         release_slot(port, tag);
653
654         up(&port->cmd_slot);
655 }
656
657 /*
658  * Internal command completion callback function.
659  *
660  * This function is normally called by the driver ISR when an internal
661  * command completed. This function signals the command completion by
662  * calling complete().
663  *
664  * @port   Pointer to the port data structure.
665  * @tag    Tag of the command that has completed.
666  * @data   Pointer to a completion structure.
667  * @status Completion status.
668  *
669  * return value
670  *      None
671  */
672 static void mtip_completion(struct mtip_port *port,
673                             int tag,
674                             void *data,
675                             int status)
676 {
677         struct mtip_cmd *command = &port->commands[tag];
678         struct completion *waiting = data;
679         if (unlikely(status == PORT_IRQ_TF_ERR))
680                 dev_warn(&port->dd->pdev->dev,
681                         "Internal command %d completed with TFE\n", tag);
682
683         command->async_callback = NULL;
684         command->comp_func = NULL;
685
686         complete(waiting);
687 }
688
689 /*
690  * Helper function for tag logging
691  */
692 static void print_tags(struct driver_data *dd,
693                         char *msg,
694                         unsigned long *tagbits)
695 {
696         unsigned int tag, count = 0;
697
698         for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
699                 if (test_bit(tag, tagbits))
700                         count++;
701         }
702         if (count)
703                 dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
704 }
705
706 /*
707  * Handle an error.
708  *
709  * @dd Pointer to the DRIVER_DATA structure.
710  *
711  * return value
712  *      None
713  */
714 static void mtip_handle_tfe(struct driver_data *dd)
715 {
716         int group, tag, bit, reissue;
717         struct mtip_port *port;
718         struct mtip_cmd  *command;
719         u32 completed;
720         struct host_to_dev_fis *fis;
721         unsigned long tagaccum[SLOTBITS_IN_LONGS];
722
723         dev_warn(&dd->pdev->dev, "Taskfile error\n");
724
725         port = dd->port;
726
727         /* Stop the timer to prevent command timeouts. */
728         del_timer(&port->cmd_timer);
729
730         /* Set eh_active */
731         atomic_inc(&dd->eh_active);
732
733         /* Loop through all the groups */
734         for (group = 0; group < dd->slot_groups; group++) {
735                 completed = readl(port->completed[group]);
736
737                 /* clear completed status register in the hardware.*/
738                 writel(completed, port->completed[group]);
739
740                 /* clear the tag accumulator */
741                 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
742
743                 /* Process successfully completed commands */
744                 for (bit = 0; bit < 32 && completed; bit++) {
745                         if (!(completed & (1<<bit)))
746                                 continue;
747                         tag = (group << 5) + bit;
748
749                         /* Skip the internal command slot */
750                         if (tag == MTIP_TAG_INTERNAL)
751                                 continue;
752
753                         command = &port->commands[tag];
754                         if (likely(command->comp_func)) {
755                                 set_bit(tag, tagaccum);
756                                 atomic_set(&port->commands[tag].active, 0);
757                                 command->comp_func(port,
758                                          tag,
759                                          command->comp_data,
760                                          0);
761                         } else {
762                                 dev_err(&port->dd->pdev->dev,
763                                         "Missing completion func for tag %d",
764                                         tag);
765                                 if (mtip_check_surprise_removal(dd->pdev)) {
766                                         mtip_command_cleanup(dd);
767                                         /* don't proceed further */
768                                         return;
769                                 }
770                         }
771                 }
772         }
773         print_tags(dd, "TFE tags completed:", tagaccum);
774
775         /* Restart the port */
776         mdelay(20);
777         mtip_restart_port(port);
778
779         /* clear the tag accumulator */
780         memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
781
782         /* Loop through all the groups */
783         for (group = 0; group < dd->slot_groups; group++) {
784                 for (bit = 0; bit < 32; bit++) {
785                         reissue = 1;
786                         tag = (group << 5) + bit;
787
788                         /* If the active bit is set re-issue the command */
789                         if (atomic_read(&port->commands[tag].active) == 0)
790                                 continue;
791
792                         fis = (struct host_to_dev_fis *)
793                                 port->commands[tag].command;
794
795                         /* Should re-issue? */
796                         if (tag == MTIP_TAG_INTERNAL ||
797                             fis->command == ATA_CMD_SET_FEATURES)
798                                 reissue = 0;
799
800                         /*
801                          * First check if this command has
802                          *  exceeded its retries.
803                          */
804                         if (reissue &&
805                             (port->commands[tag].retries-- > 0)) {
806
807                                 set_bit(tag, tagaccum);
808
809                                 /* Update the timeout value. */
810                                 port->commands[tag].comp_time =
811                                         jiffies + msecs_to_jiffies(
812                                         MTIP_NCQ_COMMAND_TIMEOUT_MS);
813                                 /* Re-issue the command. */
814                                 mtip_issue_ncq_command(port, tag);
815
816                                 continue;
817                         }
818
819                         /* Retire a command that will not be reissued */
820                         dev_warn(&port->dd->pdev->dev,
821                                 "retiring tag %d\n", tag);
822                         atomic_set(&port->commands[tag].active, 0);
823
824                         if (port->commands[tag].comp_func)
825                                 port->commands[tag].comp_func(
826                                         port,
827                                         tag,
828                                         port->commands[tag].comp_data,
829                                         PORT_IRQ_TF_ERR);
830                         else
831                                 dev_warn(&port->dd->pdev->dev,
832                                         "Bad completion for tag %d\n",
833                                         tag);
834                 }
835         }
836         print_tags(dd, "TFE tags reissued:", tagaccum);
837
838         /* Decrement eh_active */
839         atomic_dec(&dd->eh_active);
840
841         mod_timer(&port->cmd_timer,
842                  jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
843 }
844
845 /*
846  * Handle a set device bits interrupt
847  */
848 static inline void mtip_process_sdbf(struct driver_data *dd)
849 {
850         struct mtip_port  *port = dd->port;
851         int group, tag, bit;
852         u32 completed;
853         struct mtip_cmd *command;
854
855         /* walk all bits in all slot groups */
856         for (group = 0; group < dd->slot_groups; group++) {
857                 completed = readl(port->completed[group]);
858
859                 /* clear completed status register in the hardware.*/
860                 writel(completed, port->completed[group]);
861
862                 /* Process completed commands. */
863                 for (bit = 0;
864                      (bit < 32) && completed;
865                      bit++, completed >>= 1) {
866                         if (completed & 0x01) {
867                                 tag = (group << 5) | bit;
868
869                                 /* skip internal command slot. */
870                                 if (unlikely(tag == MTIP_TAG_INTERNAL))
871                                         continue;
872
873                                 command = &port->commands[tag];
874
875                                 /* make internal callback */
876                                 if (likely(command->comp_func)) {
877                                         command->comp_func(
878                                                 port,
879                                                 tag,
880                                                 command->comp_data,
881                                                 0);
882                                 } else {
883                                         dev_warn(&dd->pdev->dev,
884                                                 "Null completion "
885                                                 "for tag %d",
886                                                 tag);
887
888                                         if (mtip_check_surprise_removal(
889                                                 dd->pdev)) {
890                                                 mtip_command_cleanup(dd);
891                                                 return;
892                                         }
893                                 }
894                         }
895                 }
896         }
897 }
898
899 /*
900  * Process legacy pio and d2h interrupts
901  */
902 static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
903 {
904         struct mtip_port *port = dd->port;
905         struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
906
907         if (port->internal_cmd_in_progress &&
908             cmd != NULL &&
909             !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
910                 & (1 << MTIP_TAG_INTERNAL))) {
911                 if (cmd->comp_func) {
912                         cmd->comp_func(port,
913                                 MTIP_TAG_INTERNAL,
914                                 cmd->comp_data,
915                                 0);
916                         return;
917                 }
918         }
919
920         dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
921
922         return;
923 }
924
925 /*
926  * Demux and handle errors
927  */
928 static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
929 {
930         if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR)))
931                 mtip_handle_tfe(dd);
932
933         if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
934                 dev_warn(&dd->pdev->dev,
935                         "Clearing PxSERR.DIAG.x\n");
936                 writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
937         }
938
939         if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
940                 dev_warn(&dd->pdev->dev,
941                         "Clearing PxSERR.DIAG.n\n");
942                 writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
943         }
944
945         if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
946                 dev_warn(&dd->pdev->dev,
947                         "Port stat errors %x unhandled\n",
948                         (port_stat & ~PORT_IRQ_HANDLED));
949         }
950 }
951
952 static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
953 {
954         struct driver_data *dd = (struct driver_data *) data;
955         struct mtip_port *port = dd->port;
956         u32 hba_stat, port_stat;
957         int rv = IRQ_NONE;
958
959         hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
960         if (hba_stat) {
961                 rv = IRQ_HANDLED;
962
963                 /* Acknowledge the interrupt status on the port.*/
964                 port_stat = readl(port->mmio + PORT_IRQ_STAT);
965                 writel(port_stat, port->mmio + PORT_IRQ_STAT);
966
967                 /* Demux port status */
968                 if (likely(port_stat & PORT_IRQ_SDB_FIS))
969                         mtip_process_sdbf(dd);
970
971                 if (unlikely(port_stat & PORT_IRQ_ERR)) {
972                         if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
973                                 mtip_command_cleanup(dd);
974                                 /* don't proceed further */
975                                 return IRQ_HANDLED;
976                         }
977
978                         mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
979                 }
980
981                 if (unlikely(port_stat & PORT_IRQ_LEGACY))
982                         mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
983         }
984
985         /* acknowledge interrupt */
986         writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
987
988         return rv;
989 }
990
991 /*
992  * Wrapper for mtip_handle_irq
993  * (ignores return code)
994  */
995 static void mtip_tasklet(unsigned long data)
996 {
997         mtip_handle_irq((struct driver_data *) data);
998 }
999
1000 /*
1001  * HBA interrupt subroutine.
1002  *
1003  * @irq         IRQ number.
1004  * @instance    Pointer to the driver data structure.
1005  *
1006  * return value
1007  *      IRQ_HANDLED     A HBA interrupt was pending and handled.
1008  *      IRQ_NONE        This interrupt was not for the HBA.
1009  */
1010 static irqreturn_t mtip_irq_handler(int irq, void *instance)
1011 {
1012         struct driver_data *dd = instance;
1013         tasklet_schedule(&dd->tasklet);
1014         return IRQ_HANDLED;
1015 }
1016
1017 static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
1018 {
1019         atomic_set(&port->commands[tag].active, 1);
1020         writel(1 << MTIP_TAG_BIT(tag),
1021                 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
1022 }
1023
1024 /*
1025  * Wait for port to quiesce
1026  *
1027  * @port    Pointer to port data structure
1028  * @timeout Max duration to wait (ms)
1029  *
1030  * return value
1031  *      0       Success
1032  *      -EBUSY  Commands still active
1033  */
1034 static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
1035 {
1036         unsigned long to;
1037         unsigned int n, active;
1038
1039         to = jiffies + msecs_to_jiffies(timeout);
1040         do {
1041                 /*
1042                  * Ignore s_active bit 0 of array element 0.
1043                  * This bit will always be set
1044                  */
1045                 active = readl(port->s_active[0]) & 0xfffffffe;
1046                 for (n = 1; n < port->dd->slot_groups; n++)
1047                         active |= readl(port->s_active[n]);
1048
1049                 if (!active)
1050                         break;
1051
1052                 msleep(20);
1053         } while (time_before(jiffies, to));
1054
1055         return active ? -EBUSY : 0;
1056 }
1057
1058 /*
1059  * Execute an internal command and wait for the completion.
1060  *
1061  * @port    Pointer to the port data structure.
1062  * @fis     Pointer to the FIS that describes the command.
1063  * @fisLen  Length in WORDS of the FIS.
1064  * @buffer  DMA accessible for command data.
1065  * @bufLen  Length, in bytes, of the data buffer.
1066  * @opts    Command header options, excluding the FIS length
1067  *             and the number of PRD entries.
1068  * @timeout Time in ms to wait for the command to complete.
1069  *
1070  * return value
1071  *      0        Command completed successfully.
1072  *      -EFAULT  The buffer address is not correctly aligned.
1073  *      -EBUSY   Internal command or other IO in progress.
1074  *      -EAGAIN  Time out waiting for command to complete.
1075  */
1076 static int mtip_exec_internal_command(struct mtip_port *port,
1077                                         void *fis,
1078                                         int fisLen,
1079                                         dma_addr_t buffer,
1080                                         int bufLen,
1081                                         u32 opts,
1082                                         gfp_t atomic,
1083                                         unsigned long timeout)
1084 {
1085         struct mtip_cmd_sg *command_sg;
1086         DECLARE_COMPLETION_ONSTACK(wait);
1087         int rv = 0;
1088         struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
1089
1090         /* Make sure the buffer is 8 byte aligned. This is asic specific. */
1091         if (buffer & 0x00000007) {
1092                 dev_err(&port->dd->pdev->dev,
1093                         "SG buffer is not 8 byte aligned\n");
1094                 return -EFAULT;
1095         }
1096
1097         /* Only one internal command should be running at a time */
1098         if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) {
1099                 dev_warn(&port->dd->pdev->dev,
1100                         "Internal command already active\n");
1101                 return -EBUSY;
1102         }
1103         port->internal_cmd_in_progress = 1;
1104
1105         if (atomic == GFP_KERNEL) {
1106                 /* wait for io to complete if non atomic */
1107                 if (mtip_quiesce_io(port, 5000) < 0) {
1108                         dev_warn(&port->dd->pdev->dev,
1109                                 "Failed to quiesce IO\n");
1110                         release_slot(port, MTIP_TAG_INTERNAL);
1111                         port->internal_cmd_in_progress = 0;
1112                         return -EBUSY;
1113                 }
1114
1115                 /* Set the completion function and data for the command. */
1116                 int_cmd->comp_data = &wait;
1117                 int_cmd->comp_func = mtip_completion;
1118
1119         } else {
1120                 /* Clear completion - we're going to poll */
1121                 int_cmd->comp_data = NULL;
1122                 int_cmd->comp_func = NULL;
1123         }
1124
1125         /* Copy the command to the command table */
1126         memcpy(int_cmd->command, fis, fisLen*4);
1127
1128         /* Populate the SG list */
1129         int_cmd->command_header->opts =
1130                  cpu_to_le32(opts | fisLen);
1131         if (bufLen) {
1132                 command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
1133
1134                 command_sg->info = cpu_to_le32((bufLen-1) & 0x3fffff);
1135                 command_sg->dba = cpu_to_le32(buffer & 0xffffffff);
1136                 command_sg->dba_upper = cpu_to_le32((buffer >> 16) >> 16);
1137
1138                 int_cmd->command_header->opts |= cpu_to_le32((1 << 16));
1139         }
1140
1141         /* Populate the command header */
1142         int_cmd->command_header->byte_count = 0;
1143
1144         /* Issue the command to the hardware */
1145         mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
1146
1147         /* Poll if atomic, wait_for_completion otherwise */
1148         if (atomic == GFP_KERNEL) {
1149                 /* Wait for the command to complete or timeout. */
1150                 if (wait_for_completion_timeout(
1151                                 &wait,
1152                                 msecs_to_jiffies(timeout)) == 0) {
1153                         dev_err(&port->dd->pdev->dev,
1154                                 "Internal command did not complete [%d]\n",
1155                                 atomic);
1156                         rv = -EAGAIN;
1157                 }
1158
1159                 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1160                         & (1 << MTIP_TAG_INTERNAL)) {
1161                         dev_warn(&port->dd->pdev->dev,
1162                                 "Retiring internal command but CI is 1.\n");
1163                 }
1164
1165         } else {
1166                 /* Spin for <timeout> checking if command still outstanding */
1167                 timeout = jiffies + msecs_to_jiffies(timeout);
1168
1169                 while ((readl(
1170                         port->cmd_issue[MTIP_TAG_INTERNAL])
1171                         & (1 << MTIP_TAG_INTERNAL))
1172                         && time_before(jiffies, timeout))
1173                         ;
1174
1175                 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1176                         & (1 << MTIP_TAG_INTERNAL)) {
1177                         dev_err(&port->dd->pdev->dev,
1178                                 "Internal command did not complete [%d]\n",
1179                                 atomic);
1180                         rv = -EAGAIN;
1181                 }
1182         }
1183
1184         /* Clear the allocated and active bits for the internal command. */
1185         atomic_set(&int_cmd->active, 0);
1186         release_slot(port, MTIP_TAG_INTERNAL);
1187         port->internal_cmd_in_progress = 0;
1188
1189         return rv;
1190 }
1191
1192 /*
1193  * Byte-swap ATA ID strings.
1194  *
1195  * ATA identify data contains strings in byte-swapped 16-bit words.
1196  * They must be swapped (on all architectures) to be usable as C strings.
1197  * This function swaps bytes in-place.
1198  *
1199  * @buf The buffer location of the string
1200  * @len The number of bytes to swap
1201  *
1202  * return value
1203  *      None
1204  */
1205 static inline void ata_swap_string(u16 *buf, unsigned int len)
1206 {
1207         int i;
1208         for (i = 0; i < (len/2); i++)
1209                 be16_to_cpus(&buf[i]);
1210 }
1211
1212 /*
1213  * Request the device identity information.
1214  *
1215  * If a user space buffer is not specified, i.e. is NULL, the
1216  * identify information is still read from the drive and placed
1217  * into the identify data buffer (@e port->identify) in the
1218  * port data structure.
1219  * When the identify buffer contains valid identify information @e
1220  * port->identify_valid is non-zero.
1221  *
1222  * @port         Pointer to the port structure.
1223  * @user_buffer  A user space buffer where the identify data should be
1224  *                    copied.
1225  *
1226  * return value
1227  *      0       Command completed successfully.
1228  *      -EFAULT An error occurred while coping data to the user buffer.
1229  *      -1      Command failed.
1230  */
1231 static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
1232 {
1233         int rv = 0;
1234         struct host_to_dev_fis fis;
1235
1236         down_write(&port->dd->internal_sem);
1237
1238         /* Build the FIS. */
1239         memset(&fis, 0, sizeof(struct host_to_dev_fis));
1240         fis.type        = 0x27;
1241         fis.opts        = 1 << 7;
1242         fis.command     = ATA_CMD_ID_ATA;
1243
1244         /* Set the identify information as invalid. */
1245         port->identify_valid = 0;
1246
1247         /* Clear the identify information. */
1248         memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
1249
1250         /* Execute the command. */
1251         if (mtip_exec_internal_command(port,
1252                                 &fis,
1253                                 5,
1254                                 port->identify_dma,
1255                                 sizeof(u16) * ATA_ID_WORDS,
1256                                 0,
1257                                 GFP_KERNEL,
1258                                 MTIP_INTERNAL_COMMAND_TIMEOUT_MS)
1259                                 < 0) {
1260                 rv = -1;
1261                 goto out;
1262         }
1263
1264         /*
1265          * Perform any necessary byte-swapping.  Yes, the kernel does in fact
1266          * perform field-sensitive swapping on the string fields.
1267          * See the kernel use of ata_id_string() for proof of this.
1268          */
1269 #ifdef __LITTLE_ENDIAN
1270         ata_swap_string(port->identify + 27, 40);  /* model string*/
1271         ata_swap_string(port->identify + 23, 8);   /* firmware string*/
1272         ata_swap_string(port->identify + 10, 20);  /* serial# string*/
1273 #else
1274         {
1275                 int i;
1276                 for (i = 0; i < ATA_ID_WORDS; i++)
1277                         port->identify[i] = le16_to_cpu(port->identify[i]);
1278         }
1279 #endif
1280
1281         /* Set the identify buffer as valid. */
1282         port->identify_valid = 1;
1283
1284         if (user_buffer) {
1285                 if (copy_to_user(
1286                         user_buffer,
1287                         port->identify,
1288                         ATA_ID_WORDS * sizeof(u16))) {
1289                         rv = -EFAULT;
1290                         goto out;
1291                 }
1292         }
1293
1294 out:
1295         up_write(&port->dd->internal_sem);
1296         return rv;
1297 }
1298
1299 /*
1300  * Issue a standby immediate command to the device.
1301  *
1302  * @port Pointer to the port structure.
1303  *
1304  * return value
1305  *      0       Command was executed successfully.
1306  *      -1      An error occurred while executing the command.
1307  */
1308 static int mtip_standby_immediate(struct mtip_port *port)
1309 {
1310         int rv;
1311         struct host_to_dev_fis  fis;
1312
1313         down_write(&port->dd->internal_sem);
1314
1315         /* Build the FIS. */
1316         memset(&fis, 0, sizeof(struct host_to_dev_fis));
1317         fis.type        = 0x27;
1318         fis.opts        = 1 << 7;
1319         fis.command     = ATA_CMD_STANDBYNOW1;
1320
1321         /* Execute the command.  Use a 15-second timeout for large drives. */
1322         rv = mtip_exec_internal_command(port,
1323                                         &fis,
1324                                         5,
1325                                         0,
1326                                         0,
1327                                         0,
1328                                         GFP_KERNEL,
1329                                         15000);
1330
1331         up_write(&port->dd->internal_sem);
1332
1333         return rv;
1334 }
1335
1336 /*
1337  * Get the drive capacity.
1338  *
1339  * @dd      Pointer to the device data structure.
1340  * @sectors Pointer to the variable that will receive the sector count.
1341  *
1342  * return value
1343  *      1 Capacity was returned successfully.
1344  *      0 The identify information is invalid.
1345  */
1346 static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
1347 {
1348         struct mtip_port *port = dd->port;
1349         u64 total, raw0, raw1, raw2, raw3;
1350         raw0 = port->identify[100];
1351         raw1 = port->identify[101];
1352         raw2 = port->identify[102];
1353         raw3 = port->identify[103];
1354         total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
1355         *sectors = total;
1356         return (bool) !!port->identify_valid;
1357 }
1358
1359 /*
1360  * Reset the HBA.
1361  *
1362  * Resets the HBA by setting the HBA Reset bit in the Global
1363  * HBA Control register. After setting the HBA Reset bit the
1364  * function waits for 1 second before reading the HBA Reset
1365  * bit to make sure it has cleared. If HBA Reset is not clear
1366  * an error is returned. Cannot be used in non-blockable
1367  * context.
1368  *
1369  * @dd Pointer to the driver data structure.
1370  *
1371  * return value
1372  *      0  The reset was successful.
1373  *      -1 The HBA Reset bit did not clear.
1374  */
1375 static int mtip_hba_reset(struct driver_data *dd)
1376 {
1377         mtip_deinit_port(dd->port);
1378
1379         /* Set the reset bit */
1380         writel(HOST_RESET, dd->mmio + HOST_CTL);
1381
1382         /* Flush */
1383         readl(dd->mmio + HOST_CTL);
1384
1385         /* Wait for reset to clear */
1386         ssleep(1);
1387
1388         /* Check the bit has cleared */
1389         if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
1390                 dev_err(&dd->pdev->dev,
1391                         "Reset bit did not clear.\n");
1392                 return -1;
1393         }
1394
1395         return 0;
1396 }
1397
1398 /*
1399  * Display the identify command data.
1400  *
1401  * @port Pointer to the port data structure.
1402  *
1403  * return value
1404  *      None
1405  */
1406 static void mtip_dump_identify(struct mtip_port *port)
1407 {
1408         sector_t sectors;
1409         unsigned short revid;
1410         char cbuf[42];
1411
1412         if (!port->identify_valid)
1413                 return;
1414
1415         strlcpy(cbuf, (char *)(port->identify+10), 21);
1416         dev_info(&port->dd->pdev->dev,
1417                 "Serial No.: %s\n", cbuf);
1418
1419         strlcpy(cbuf, (char *)(port->identify+23), 9);
1420         dev_info(&port->dd->pdev->dev,
1421                 "Firmware Ver.: %s\n", cbuf);
1422
1423         strlcpy(cbuf, (char *)(port->identify+27), 41);
1424         dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
1425
1426         if (mtip_hw_get_capacity(port->dd, &sectors))
1427                 dev_info(&port->dd->pdev->dev,
1428                         "Capacity: %llu sectors (%llu MB)\n",
1429                          (u64)sectors,
1430                          ((u64)sectors) * ATA_SECT_SIZE >> 20);
1431
1432         pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
1433         switch (revid & 0xff) {
1434         case 0x1:
1435                 strlcpy(cbuf, "A0", 3);
1436                 break;
1437         case 0x3:
1438                 strlcpy(cbuf, "A2", 3);
1439                 break;
1440         default:
1441                 strlcpy(cbuf, "?", 2);
1442                 break;
1443         }
1444         dev_info(&port->dd->pdev->dev,
1445                 "Card Type: %s\n", cbuf);
1446 }
1447
1448 /*
1449  * Map the commands scatter list into the command table.
1450  *
1451  * @command Pointer to the command.
1452  * @nents Number of scatter list entries.
1453  *
1454  * return value
1455  *      None
1456  */
1457 static inline void fill_command_sg(struct driver_data *dd,
1458                                 struct mtip_cmd *command,
1459                                 int nents)
1460 {
1461         int n;
1462         unsigned int dma_len;
1463         struct mtip_cmd_sg *command_sg;
1464         struct scatterlist *sg = command->sg;
1465
1466         command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
1467
1468         for (n = 0; n < nents; n++) {
1469                 dma_len = sg_dma_len(sg);
1470                 if (dma_len > 0x400000)
1471                         dev_err(&dd->pdev->dev,
1472                                 "DMA segment length truncated\n");
1473                 command_sg->info = cpu_to_le32((dma_len-1) & 0x3fffff);
1474 #if (BITS_PER_LONG == 64)
1475                 *((unsigned long *) &command_sg->dba) =
1476                          cpu_to_le64(sg_dma_address(sg));
1477 #else
1478                 command_sg->dba = cpu_to_le32(sg_dma_address(sg));
1479                 command_sg->dba_upper   =
1480                          cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
1481 #endif
1482                 command_sg++;
1483                 sg++;
1484         }
1485 }
1486
1487 /*
1488  * @brief Execute a drive command.
1489  *
1490  * return value 0 The command completed successfully.
1491  * return value -1 An error occurred while executing the command.
1492  */
1493 static int exec_drive_task(struct mtip_port *port, u8 *command)
1494 {
1495         struct host_to_dev_fis  fis;
1496         struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
1497
1498         /* Lock the internal command semaphore. */
1499         down_write(&port->dd->internal_sem);
1500
1501         /* Build the FIS. */
1502         memset(&fis, 0, sizeof(struct host_to_dev_fis));
1503         fis.type        = 0x27;
1504         fis.opts        = 1 << 7;
1505         fis.command     = command[0];
1506         fis.features    = command[1];
1507         fis.sect_count  = command[2];
1508         fis.sector      = command[3];
1509         fis.cyl_low     = command[4];
1510         fis.cyl_hi      = command[5];
1511         fis.device      = command[6] & ~0x10; /* Clear the dev bit*/
1512
1513
1514         dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
1515                 "nsect %x, sect %x, lcyl %x, "
1516                 "hcyl %x, sel %x\n",
1517                 __func__,
1518                 command[0],
1519                 command[1],
1520                 command[2],
1521                 command[3],
1522                 command[4],
1523                 command[5],
1524                 command[6]);
1525
1526         /* Execute the command. */
1527         if (mtip_exec_internal_command(port,
1528                                  &fis,
1529                                  5,
1530                                  0,
1531                                  0,
1532                                  0,
1533                                  GFP_KERNEL,
1534                                  MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) {
1535                 up_write(&port->dd->internal_sem);
1536                 return -1;
1537         }
1538
1539         command[0] = reply->command; /* Status*/
1540         command[1] = reply->features; /* Error*/
1541         command[4] = reply->cyl_low;
1542         command[5] = reply->cyl_hi;
1543
1544         dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, "
1545                 "err %x , cyl_lo %x cyl_hi %x\n",
1546                 __func__,
1547                 command[0],
1548                 command[1],
1549                 command[4],
1550                 command[5]);
1551
1552         up_write(&port->dd->internal_sem);
1553         return 0;
1554 }
1555
1556 /*
1557  * @brief Execute a drive command.
1558  *
1559  * @param port Pointer to the port data structure.
1560  * @param command Pointer to the user specified command parameters.
1561  * @param user_buffer Pointer to the user space buffer where read sector
1562  *                   data should be copied.
1563  *
1564  * return value 0 The command completed successfully.
1565  * return value -EFAULT An error occurred while copying the completion
1566  *                 data to the user space buffer.
1567  * return value -1 An error occurred while executing the command.
1568  */
1569 static int exec_drive_command(struct mtip_port *port, u8 *command,
1570                                 void __user *user_buffer)
1571 {
1572         struct host_to_dev_fis  fis;
1573         struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
1574
1575         /* Lock the internal command semaphore. */
1576         down_write(&port->dd->internal_sem);
1577
1578         /* Build the FIS. */
1579         memset(&fis, 0, sizeof(struct host_to_dev_fis));
1580         fis.type                = 0x27;
1581         fis.opts                = 1 << 7;
1582         fis.command             = command[0];
1583         fis.features    = command[2];
1584         fis.sect_count  = command[3];
1585         if (fis.command == ATA_CMD_SMART) {
1586                 fis.sector      = command[1];
1587                 fis.cyl_low     = 0x4f;
1588                 fis.cyl_hi      = 0xc2;
1589         }
1590
1591         dbg_printk(MTIP_DRV_NAME
1592                 "%s: User Command: cmd %x, sect %x, "
1593                 "feat %x, sectcnt %x\n",
1594                 __func__,
1595                 command[0],
1596                 command[1],
1597                 command[2],
1598                 command[3]);
1599
1600         memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
1601
1602         /* Execute the command. */
1603         if (mtip_exec_internal_command(port,
1604                                 &fis,
1605                                  5,
1606                                  port->sector_buffer_dma,
1607                                  (command[3] != 0) ? ATA_SECT_SIZE : 0,
1608                                  0,
1609                                  GFP_KERNEL,
1610                                  MTIP_IOCTL_COMMAND_TIMEOUT_MS)
1611                                  < 0) {
1612                 up_write(&port->dd->internal_sem);
1613                 return -1;
1614         }
1615
1616         /* Collect the completion status. */
1617         command[0] = reply->command; /* Status*/
1618         command[1] = reply->features; /* Error*/
1619         command[2] = command[3];
1620
1621         dbg_printk(MTIP_DRV_NAME
1622                 "%s: Completion Status: stat %x, "
1623                 "err %x, cmd %x\n",
1624                 __func__,
1625                 command[0],
1626                 command[1],
1627                 command[2]);
1628
1629         if (user_buffer && command[3]) {
1630                 if (copy_to_user(user_buffer,
1631                                  port->sector_buffer,
1632                                  ATA_SECT_SIZE * command[3])) {
1633                         up_write(&port->dd->internal_sem);
1634                         return -EFAULT;
1635                 }
1636         }
1637
1638         up_write(&port->dd->internal_sem);
1639         return 0;
1640 }
1641
1642 /*
1643  *  Indicates whether a command has a single sector payload.
1644  *
1645  *  @command passed to the device to perform the certain event.
1646  *  @features passed to the device to perform the certain event.
1647  *
1648  *  return value
1649  *      1       command is one that always has a single sector payload,
1650  *              regardless of the value in the Sector Count field.
1651  *      0       otherwise
1652  *
1653  */
1654 static unsigned int implicit_sector(unsigned char command,
1655                                     unsigned char features)
1656 {
1657         unsigned int rv = 0;
1658
1659         /* list of commands that have an implicit sector count of 1 */
1660         switch (command) {
1661         case 0xF1:
1662         case 0xF2:
1663         case 0xF3:
1664         case 0xF4:
1665         case 0xF5:
1666         case 0xF6:
1667         case 0xE4:
1668         case 0xE8:
1669                 rv = 1;
1670                 break;
1671         case 0xF9:
1672                 if (features == 0x03)
1673                         rv = 1;
1674                 break;
1675         case 0xB0:
1676                 if ((features == 0xD0) || (features == 0xD1))
1677                         rv = 1;
1678                 break;
1679         case 0xB1:
1680                 if ((features == 0xC2) || (features == 0xC3))
1681                         rv = 1;
1682                 break;
1683         }
1684         return rv;
1685 }
1686
1687 /*
1688  * Executes a taskfile
1689  * See ide_taskfile_ioctl() for derivation
1690  */
1691 static int exec_drive_taskfile(struct driver_data *dd,
1692                                void __user *buf,
1693                                ide_task_request_t *req_task,
1694                                int outtotal)
1695 {
1696         struct host_to_dev_fis  fis;
1697         struct host_to_dev_fis *reply;
1698         u8 *outbuf = NULL;
1699         u8 *inbuf = NULL;
1700         dma_addr_t outbuf_dma = 0;
1701         dma_addr_t inbuf_dma = 0;
1702         dma_addr_t dma_buffer = 0;
1703         int err = 0;
1704         unsigned int taskin = 0;
1705         unsigned int taskout = 0;
1706         u8 nsect = 0;
1707         unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
1708         unsigned int force_single_sector;
1709         unsigned int transfer_size;
1710         unsigned long task_file_data;
1711         int intotal = outtotal + req_task->out_size;
1712
1713         taskout = req_task->out_size;
1714         taskin = req_task->in_size;
1715         /* 130560 = 512 * 0xFF*/
1716         if (taskin > 130560 || taskout > 130560) {
1717                 err = -EINVAL;
1718                 goto abort;
1719         }
1720
1721         if (taskout) {
1722                 outbuf = kzalloc(taskout, GFP_KERNEL);
1723                 if (outbuf == NULL) {
1724                         err = -ENOMEM;
1725                         goto abort;
1726                 }
1727                 if (copy_from_user(outbuf, buf + outtotal, taskout)) {
1728                         err = -EFAULT;
1729                         goto abort;
1730                 }
1731                 outbuf_dma = pci_map_single(dd->pdev,
1732                                          outbuf,
1733                                          taskout,
1734                                          DMA_TO_DEVICE);
1735                 if (outbuf_dma == 0) {
1736                         err = -ENOMEM;
1737                         goto abort;
1738                 }
1739                 dma_buffer = outbuf_dma;
1740         }
1741
1742         if (taskin) {
1743                 inbuf = kzalloc(taskin, GFP_KERNEL);
1744                 if (inbuf == NULL) {
1745                         err = -ENOMEM;
1746                         goto abort;
1747                 }
1748
1749                 if (copy_from_user(inbuf, buf + intotal, taskin)) {
1750                         err = -EFAULT;
1751                         goto abort;
1752                 }
1753                 inbuf_dma = pci_map_single(dd->pdev,
1754                                          inbuf,
1755                                          taskin, DMA_FROM_DEVICE);
1756                 if (inbuf_dma == 0) {
1757                         err = -ENOMEM;
1758                         goto abort;
1759                 }
1760                 dma_buffer = inbuf_dma;
1761         }
1762
1763         /* only supports PIO and non-data commands from this ioctl. */
1764         switch (req_task->data_phase) {
1765         case TASKFILE_OUT:
1766                 nsect = taskout / ATA_SECT_SIZE;
1767                 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
1768                 break;
1769         case TASKFILE_IN:
1770                 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
1771                 break;
1772         case TASKFILE_NO_DATA:
1773                 reply = (dd->port->rxfis + RX_FIS_D2H_REG);
1774                 break;
1775         default:
1776                 err = -EINVAL;
1777                 goto abort;
1778         }
1779
1780         /* Lock the internal command semaphore. */
1781         down_write(&dd->internal_sem);
1782
1783         /* Build the FIS. */
1784         memset(&fis, 0, sizeof(struct host_to_dev_fis));
1785
1786         fis.type        = 0x27;
1787         fis.opts        = 1 << 7;
1788         fis.command     = req_task->io_ports[7];
1789         fis.features    = req_task->io_ports[1];
1790         fis.sect_count  = req_task->io_ports[2];
1791         fis.lba_low     = req_task->io_ports[3];
1792         fis.lba_mid     = req_task->io_ports[4];
1793         fis.lba_hi      = req_task->io_ports[5];
1794          /* Clear the dev bit*/
1795         fis.device      = req_task->io_ports[6] & ~0x10;
1796
1797         if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
1798                 req_task->in_flags.all  =
1799                         IDE_TASKFILE_STD_IN_FLAGS |
1800                         (IDE_HOB_STD_IN_FLAGS << 8);
1801                 fis.lba_low_ex          = req_task->hob_ports[3];
1802                 fis.lba_mid_ex          = req_task->hob_ports[4];
1803                 fis.lba_hi_ex           = req_task->hob_ports[5];
1804                 fis.features_ex         = req_task->hob_ports[1];
1805                 fis.sect_cnt_ex         = req_task->hob_ports[2];
1806
1807         } else {
1808                 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
1809         }
1810
1811         force_single_sector = implicit_sector(fis.command, fis.features);
1812
1813         if ((taskin || taskout) && (!fis.sect_count)) {
1814                 if (nsect)
1815                         fis.sect_count = nsect;
1816                 else {
1817                         if (!force_single_sector) {
1818                                 dev_warn(&dd->pdev->dev,
1819                                         "data movement but "
1820                                         "sect_count is 0\n");
1821                                         up_write(&dd->internal_sem);
1822                                         err = -EINVAL;
1823                                         goto abort;
1824                         }
1825                 }
1826         }
1827
1828         dbg_printk(MTIP_DRV_NAME
1829                 "taskfile: cmd %x, feat %x, nsect %x,"
1830                 " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
1831                 " head/dev %x\n",
1832                 fis.command,
1833                 fis.features,
1834                 fis.sect_count,
1835                 fis.lba_low,
1836                 fis.lba_mid,
1837                 fis.lba_hi,
1838                 fis.device);
1839
1840         switch (fis.command) {
1841         case 0x92: /* Change timeout for Download Microcode to 60 seconds.*/
1842                 timeout = 60000;
1843                 break;
1844         case 0xf4: /* Change timeout for Security Erase Unit to 4 minutes.*/
1845                 timeout = 240000;
1846                 break;
1847         case 0xe0: /* Change timeout for standby immediate to 10 seconds.*/
1848                 timeout = 10000;
1849                 break;
1850         case 0xf7: /* Change timeout for vendor unique command to 10 secs */
1851                 timeout = 10000;
1852                 break;
1853         case 0xfa: /* Change timeout for vendor unique command to 10 secs */
1854                 timeout = 10000;
1855                 break;
1856         default:
1857                 timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
1858                 break;
1859         }
1860
1861         /* Determine the correct transfer size.*/
1862         if (force_single_sector)
1863                 transfer_size = ATA_SECT_SIZE;
1864         else
1865                 transfer_size = ATA_SECT_SIZE * fis.sect_count;
1866
1867         /* Execute the command.*/
1868         if (mtip_exec_internal_command(dd->port,
1869                                  &fis,
1870                                  5,
1871                                  dma_buffer,
1872                                  transfer_size,
1873                                  0,
1874                                  GFP_KERNEL,
1875                                  timeout) < 0) {
1876                 up_write(&dd->internal_sem);
1877                 err = -EIO;
1878                 goto abort;
1879         }
1880
1881         task_file_data = readl(dd->port->mmio+PORT_TFDATA);
1882
1883         if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
1884                 reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
1885                 req_task->io_ports[7] = reply->control;
1886         } else {
1887                 reply = dd->port->rxfis + RX_FIS_D2H_REG;
1888                 req_task->io_ports[7] = reply->command;
1889         }
1890
1891         /* reclaim the DMA buffers.*/
1892         if (inbuf_dma)
1893                 pci_unmap_single(dd->pdev, inbuf_dma,
1894                         taskin, DMA_FROM_DEVICE);
1895         if (outbuf_dma)
1896                 pci_unmap_single(dd->pdev, outbuf_dma,
1897                         taskout, DMA_TO_DEVICE);
1898         inbuf_dma  = 0;
1899         outbuf_dma = 0;
1900
1901         /* return the ATA registers to the caller.*/
1902         req_task->io_ports[1] = reply->features;
1903         req_task->io_ports[2] = reply->sect_count;
1904         req_task->io_ports[3] = reply->lba_low;
1905         req_task->io_ports[4] = reply->lba_mid;
1906         req_task->io_ports[5] = reply->lba_hi;
1907         req_task->io_ports[6] = reply->device;
1908
1909         if (req_task->out_flags.all & 1)  {
1910
1911                 req_task->hob_ports[3] = reply->lba_low_ex;
1912                 req_task->hob_ports[4] = reply->lba_mid_ex;
1913                 req_task->hob_ports[5] = reply->lba_hi_ex;
1914                 req_task->hob_ports[1] = reply->features_ex;
1915                 req_task->hob_ports[2] = reply->sect_cnt_ex;
1916         }
1917
1918         /* Com rest after secure erase or lowlevel format */
1919         if (((fis.command == 0xF4) ||
1920                 ((fis.command == 0xFC) &&
1921                         (fis.features == 0x27 || fis.features == 0x72 ||
1922                          fis.features == 0x62 || fis.features == 0x26))) &&
1923                          !(reply->command & 1)) {
1924                 mtip_restart_port(dd->port);
1925         }
1926
1927         dbg_printk(MTIP_DRV_NAME
1928                 "%s: Completion: stat %x,"
1929                 "err %x, sect_cnt %x, lbalo %x,"
1930                 "lbamid %x, lbahi %x, dev %x\n",
1931                 __func__,
1932                 req_task->io_ports[7],
1933                 req_task->io_ports[1],
1934                 req_task->io_ports[2],
1935                 req_task->io_ports[3],
1936                 req_task->io_ports[4],
1937                 req_task->io_ports[5],
1938                 req_task->io_ports[6]);
1939
1940         up_write(&dd->internal_sem);
1941
1942         if (taskout) {
1943                 if (copy_to_user(buf + outtotal, outbuf, taskout)) {
1944                         err = -EFAULT;
1945                         goto abort;
1946                 }
1947         }
1948         if (taskin) {
1949                 if (copy_to_user(buf + intotal, inbuf, taskin)) {
1950                         err = -EFAULT;
1951                         goto abort;
1952                 }
1953         }
1954 abort:
1955         if (inbuf_dma)
1956                 pci_unmap_single(dd->pdev, inbuf_dma,
1957                                         taskin, DMA_FROM_DEVICE);
1958         if (outbuf_dma)
1959                 pci_unmap_single(dd->pdev, outbuf_dma,
1960                                         taskout, DMA_TO_DEVICE);
1961         kfree(outbuf);
1962         kfree(inbuf);
1963
1964         return err;
1965 }
1966
1967 /*
1968  * Handle IOCTL calls from the Block Layer.
1969  *
1970  * This function is called by the Block Layer when it receives an IOCTL
1971  * command that it does not understand. If the IOCTL command is not supported
1972  * this function returns -ENOTTY.
1973  *
1974  * @dd  Pointer to the driver data structure.
1975  * @cmd IOCTL command passed from the Block Layer.
1976  * @arg IOCTL argument passed from the Block Layer.
1977  *
1978  * return value
1979  *      0       The IOCTL completed successfully.
1980  *      -ENOTTY The specified command is not supported.
1981  *      -EFAULT An error occurred copying data to a user space buffer.
1982  *      -EIO    An error occurred while executing the command.
1983  */
1984 static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
1985                          unsigned long arg)
1986 {
1987         switch (cmd) {
1988         case HDIO_GET_IDENTITY:
1989                 if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
1990                         dev_warn(&dd->pdev->dev,
1991                                 "Unable to read identity\n");
1992                         return -EIO;
1993                 }
1994
1995                 break;
1996         case HDIO_DRIVE_CMD:
1997         {
1998                 u8 drive_command[4];
1999
2000                 /* Copy the user command info to our buffer. */
2001                 if (copy_from_user(drive_command,
2002                                          (void __user *) arg,
2003                                          sizeof(drive_command)))
2004                         return -EFAULT;
2005
2006                 /* Execute the drive command. */
2007                 if (exec_drive_command(dd->port,
2008                                          drive_command,
2009                                          (void __user *) (arg+4)))
2010                         return -EIO;
2011
2012                 /* Copy the status back to the users buffer. */
2013                 if (copy_to_user((void __user *) arg,
2014                                          drive_command,
2015                                          sizeof(drive_command)))
2016                         return -EFAULT;
2017
2018                 break;
2019         }
2020         case HDIO_DRIVE_TASK:
2021         {
2022                 u8 drive_command[7];
2023
2024                 /* Copy the user command info to our buffer. */
2025                 if (copy_from_user(drive_command,
2026                                          (void __user *) arg,
2027                                          sizeof(drive_command)))
2028                         return -EFAULT;
2029
2030                 /* Execute the drive command. */
2031                 if (exec_drive_task(dd->port, drive_command))
2032                         return -EIO;
2033
2034                 /* Copy the status back to the users buffer. */
2035                 if (copy_to_user((void __user *) arg,
2036                                          drive_command,
2037                                          sizeof(drive_command)))
2038                         return -EFAULT;
2039
2040                 break;
2041         }
2042         case HDIO_DRIVE_TASKFILE: {
2043                 ide_task_request_t req_task;
2044                 int ret, outtotal;
2045
2046                 if (copy_from_user(&req_task, (void __user *) arg,
2047                                         sizeof(req_task)))
2048                         return -EFAULT;
2049
2050                 outtotal = sizeof(req_task);
2051
2052                 ret = exec_drive_taskfile(dd, (void __user *) arg,
2053                                                 &req_task, outtotal);
2054
2055                 if (copy_to_user((void __user *) arg, &req_task, sizeof(req_task)))
2056                         return -EFAULT;
2057
2058                 return ret;
2059         }
2060
2061         default:
2062                 return -EINVAL;
2063         }
2064         return 0;
2065 }
2066
2067 /*
2068  * Submit an IO to the hw
2069  *
2070  * This function is called by the block layer to issue an io
2071  * to the device. Upon completion, the callback function will
2072  * be called with the data parameter passed as the callback data.
2073  *
2074  * @dd       Pointer to the driver data structure.
2075  * @start    First sector to read.
2076  * @nsect    Number of sectors to read.
2077  * @nents    Number of entries in scatter list for the read command.
2078  * @tag      The tag of this read command.
2079  * @callback Pointer to the function that should be called
2080  *           when the read completes.
2081  * @data     Callback data passed to the callback function
2082  *           when the read completes.
2083  * @barrier  If non-zero, this command must be completed before
2084  *           issuing any other commands.
2085  * @dir      Direction (read or write)
2086  *
2087  * return value
2088  *      None
2089  */
2090 static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
2091                               int nsect, int nents, int tag, void *callback,
2092                               void *data, int barrier, int dir)
2093 {
2094         struct host_to_dev_fis  *fis;
2095         struct mtip_port *port = dd->port;
2096         struct mtip_cmd *command = &port->commands[tag];
2097
2098         /* Map the scatter list for DMA access */
2099         if (dir == READ)
2100                 nents = dma_map_sg(&dd->pdev->dev, command->sg,
2101                                         nents, DMA_FROM_DEVICE);
2102         else
2103                 nents = dma_map_sg(&dd->pdev->dev, command->sg,
2104                                         nents, DMA_TO_DEVICE);
2105
2106         command->scatter_ents = nents;
2107
2108         /*
2109          * The number of retries for this command before it is
2110          * reported as a failure to the upper layers.
2111          */
2112         command->retries = MTIP_MAX_RETRIES;
2113
2114         /* Fill out fis */
2115         fis = command->command;
2116         fis->type        = 0x27;
2117         fis->opts        = 1 << 7;
2118         fis->command     =
2119                 (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
2120         *((unsigned int *) &fis->lba_low) = (start & 0xffffff);
2121         *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xffffff);
2122         fis->device      = 1 << 6;
2123         if (barrier)
2124                 fis->device |= FUA_BIT;
2125         fis->features    = nsect & 0xff;
2126         fis->features_ex = (nsect >> 8) & 0xff;
2127         fis->sect_count  = ((tag << 3) | (tag >> 5));
2128         fis->sect_cnt_ex = 0;
2129         fis->control     = 0;
2130         fis->res2        = 0;
2131         fis->res3        = 0;
2132         fill_command_sg(dd, command, nents);
2133
2134         /* Populate the command header */
2135         command->command_header->opts = cpu_to_le32(
2136                         (nents << 16) | 5 | AHCI_CMD_PREFETCH);
2137         command->command_header->byte_count = 0;
2138
2139         /*
2140          * Set the completion function and data for the command
2141          * within this layer.
2142          */
2143         command->comp_data = dd;
2144         command->comp_func = mtip_async_complete;
2145         command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
2146
2147         /*
2148          * Set the completion function and data for the command passed
2149          * from the upper layer.
2150          */
2151         command->async_data = data;
2152         command->async_callback = callback;
2153
2154         /*
2155          * Lock used to prevent this command from being issued
2156          * if an internal command is in progress.
2157          */
2158         down_read(&port->dd->internal_sem);
2159
2160         /* Issue the command to the hardware */
2161         mtip_issue_ncq_command(port, tag);
2162
2163         /* Set the command's timeout value.*/
2164         port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
2165                                         MTIP_NCQ_COMMAND_TIMEOUT_MS);
2166
2167         up_read(&port->dd->internal_sem);
2168 }
2169
2170 /*
2171  * Release a command slot.
2172  *
2173  * @dd  Pointer to the driver data structure.
2174  * @tag Slot tag
2175  *
2176  * return value
2177  *      None
2178  */
2179 static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
2180 {
2181         release_slot(dd->port, tag);
2182 }
2183
2184 /*
2185  * Obtain a command slot and return its associated scatter list.
2186  *
2187  * @dd  Pointer to the driver data structure.
2188  * @tag Pointer to an int that will receive the allocated command
2189  *            slot tag.
2190  *
2191  * return value
2192  *      Pointer to the scatter list for the allocated command slot
2193  *      or NULL if no command slots are available.
2194  */
2195 static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
2196                                                    int *tag)
2197 {
2198         /*
2199          * It is possible that, even with this semaphore, a thread
2200          * may think that no command slots are available. Therefore, we
2201          * need to make an attempt to get_slot().
2202          */
2203         down(&dd->port->cmd_slot);
2204         *tag = get_slot(dd->port);
2205
2206         if (unlikely(*tag < 0))
2207                 return NULL;
2208
2209         return dd->port->commands[*tag].sg;
2210 }
2211
2212 /*
2213  * Sysfs register/status dump.
2214  *
2215  * @dev  Pointer to the device structure, passed by the kernrel.
2216  * @attr Pointer to the device_attribute structure passed by the kernel.
2217  * @buf  Pointer to the char buffer that will receive the stats info.
2218  *
2219  * return value
2220  *      The size, in bytes, of the data copied into buf.
2221  */
2222 static ssize_t hw_show_registers(struct device *dev,
2223                                 struct device_attribute *attr,
2224                                 char *buf)
2225 {
2226         u32 group_allocated;
2227         struct driver_data *dd = dev_to_disk(dev)->private_data;
2228         int size = 0;
2229         int n;
2230
2231         size += sprintf(&buf[size], "%s:\ns_active:\n", __func__);
2232
2233         for (n = 0; n < dd->slot_groups; n++)
2234                 size += sprintf(&buf[size], "0x%08x\n",
2235                                          readl(dd->port->s_active[n]));
2236
2237         size += sprintf(&buf[size], "Command Issue:\n");
2238
2239         for (n = 0; n < dd->slot_groups; n++)
2240                 size += sprintf(&buf[size], "0x%08x\n",
2241                                         readl(dd->port->cmd_issue[n]));
2242
2243         size += sprintf(&buf[size], "Allocated:\n");
2244
2245         for (n = 0; n < dd->slot_groups; n++) {
2246                 if (sizeof(long) > sizeof(u32))
2247                         group_allocated =
2248                                 dd->port->allocated[n/2] >> (32*(n&1));
2249                 else
2250                         group_allocated = dd->port->allocated[n];
2251                 size += sprintf(&buf[size], "0x%08x\n",
2252                                  group_allocated);
2253         }
2254
2255         size += sprintf(&buf[size], "completed:\n");
2256
2257         for (n = 0; n < dd->slot_groups; n++)
2258                 size += sprintf(&buf[size], "0x%08x\n",
2259                                 readl(dd->port->completed[n]));
2260
2261         size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n",
2262                                 readl(dd->port->mmio + PORT_IRQ_STAT));
2263         size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n",
2264                                 readl(dd->mmio + HOST_IRQ_STAT));
2265
2266         return size;
2267 }
2268 static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL);
2269
2270 /*
2271  * Create the sysfs related attributes.
2272  *
2273  * @dd   Pointer to the driver data structure.
2274  * @kobj Pointer to the kobj for the block device.
2275  *
2276  * return value
2277  *      0       Operation completed successfully.
2278  *      -EINVAL Invalid parameter.
2279  */
2280 static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
2281 {
2282         if (!kobj || !dd)
2283                 return -EINVAL;
2284
2285         if (sysfs_create_file(kobj, &dev_attr_registers.attr))
2286                 dev_warn(&dd->pdev->dev,
2287                         "Error creating registers sysfs entry\n");
2288         return 0;
2289 }
2290
2291 /*
2292  * Remove the sysfs related attributes.
2293  *
2294  * @dd   Pointer to the driver data structure.
2295  * @kobj Pointer to the kobj for the block device.
2296  *
2297  * return value
2298  *      0       Operation completed successfully.
2299  *      -EINVAL Invalid parameter.
2300  */
2301 static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
2302 {
2303         if (!kobj || !dd)
2304                 return -EINVAL;
2305
2306         sysfs_remove_file(kobj, &dev_attr_registers.attr);
2307
2308         return 0;
2309 }
2310
2311 /*
2312  * Perform any init/resume time hardware setup
2313  *
2314  * @dd Pointer to the driver data structure.
2315  *
2316  * return value
2317  *      None
2318  */
2319 static inline void hba_setup(struct driver_data *dd)
2320 {
2321         u32 hwdata;
2322         hwdata = readl(dd->mmio + HOST_HSORG);
2323
2324         /* interrupt bug workaround: use only 1 IS bit.*/
2325         writel(hwdata |
2326                 HSORG_DISABLE_SLOTGRP_INTR |
2327                 HSORG_DISABLE_SLOTGRP_PXIS,
2328                 dd->mmio + HOST_HSORG);
2329 }
2330
2331 /*
2332  * Detect the details of the product, and store anything needed
2333  * into the driver data structure.  This includes product type and
2334  * version and number of slot groups.
2335  *
2336  * @dd Pointer to the driver data structure.
2337  *
2338  * return value
2339  *      None
2340  */
2341 static void mtip_detect_product(struct driver_data *dd)
2342 {
2343         u32 hwdata;
2344         unsigned int rev, slotgroups;
2345
2346         /*
2347          * HBA base + 0xFC [15:0] - vendor-specific hardware interface
2348          * info register:
2349          * [15:8] hardware/software interface rev#
2350          * [   3] asic-style interface
2351          * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
2352          */
2353         hwdata = readl(dd->mmio + HOST_HSORG);
2354
2355         dd->product_type = MTIP_PRODUCT_UNKNOWN;
2356         dd->slot_groups = 1;
2357
2358         if (hwdata & 0x8) {
2359                 dd->product_type = MTIP_PRODUCT_ASICFPGA;
2360                 rev = (hwdata & HSORG_HWREV) >> 8;
2361                 slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
2362                 dev_info(&dd->pdev->dev,
2363                         "ASIC-FPGA design, HS rev 0x%x, "
2364                         "%i slot groups [%i slots]\n",
2365                          rev,
2366                          slotgroups,
2367                          slotgroups * 32);
2368
2369                 if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
2370                         dev_warn(&dd->pdev->dev,
2371                                 "Warning: driver only supports "
2372                                 "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
2373                         slotgroups = MTIP_MAX_SLOT_GROUPS;
2374                 }
2375                 dd->slot_groups = slotgroups;
2376                 return;
2377         }
2378
2379         dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
2380 }
2381
2382 /*
2383  * Blocking wait for FTL rebuild to complete
2384  *
2385  * @dd Pointer to the DRIVER_DATA structure.
2386  *
2387  * return value
2388  *      0       FTL rebuild completed successfully
2389  *      -EFAULT FTL rebuild error/timeout/interruption
2390  */
2391 static int mtip_ftl_rebuild_poll(struct driver_data *dd)
2392 {
2393         unsigned long timeout, cnt = 0, start;
2394
2395         dev_warn(&dd->pdev->dev,
2396                 "FTL rebuild in progress. Polling for completion.\n");
2397
2398         start = jiffies;
2399         dd->ftlrebuildflag = 1;
2400         timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
2401
2402         do {
2403 #ifdef CONFIG_HOTPLUG
2404                 if (mtip_check_surprise_removal(dd->pdev))
2405                         return -EFAULT;
2406 #endif
2407                 if (mtip_get_identify(dd->port, NULL) < 0)
2408                         return -EFAULT;
2409
2410                 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
2411                         MTIP_FTL_REBUILD_MAGIC) {
2412                         ssleep(1);
2413                         /* Print message every 3 minutes */
2414                         if (cnt++ >= 180) {
2415                                 dev_warn(&dd->pdev->dev,
2416                                 "FTL rebuild in progress (%d secs).\n",
2417                                 jiffies_to_msecs(jiffies - start) / 1000);
2418                                 cnt = 0;
2419                         }
2420                 } else {
2421                         dev_warn(&dd->pdev->dev,
2422                                 "FTL rebuild complete (%d secs).\n",
2423                         jiffies_to_msecs(jiffies - start) / 1000);
2424                         dd->ftlrebuildflag = 0;
2425                         break;
2426                 }
2427                 ssleep(10);
2428         } while (time_before(jiffies, timeout));
2429
2430         /* Check for timeout */
2431         if (dd->ftlrebuildflag) {
2432                 dev_err(&dd->pdev->dev,
2433                 "Timed out waiting for FTL rebuild to complete (%d secs).\n",
2434                 jiffies_to_msecs(jiffies - start) / 1000);
2435                 return -EFAULT;
2436         }
2437
2438         return 0;
2439 }
2440
2441 /*
2442  * Called once for each card.
2443  *
2444  * @dd Pointer to the driver data structure.
2445  *
2446  * return value
2447  *      0 on success, else an error code.
2448  */
2449 static int mtip_hw_init(struct driver_data *dd)
2450 {
2451         int i;
2452         int rv;
2453         unsigned int num_command_slots;
2454
2455         dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
2456
2457         mtip_detect_product(dd);
2458         if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
2459                 rv = -EIO;
2460                 goto out1;
2461         }
2462         num_command_slots = dd->slot_groups * 32;
2463
2464         hba_setup(dd);
2465
2466         /*
2467          * Initialize the internal semaphore
2468          * Use a rw semaphore to enable prioritization of
2469          * mgmnt ioctl traffic during heavy IO load
2470          */
2471         init_rwsem(&dd->internal_sem);
2472
2473         tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd);
2474
2475         dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL);
2476         if (!dd->port) {
2477                 dev_err(&dd->pdev->dev,
2478                         "Memory allocation: port structure\n");
2479                 return -ENOMEM;
2480         }
2481
2482         /* Counting semaphore to track command slot usage */
2483         sema_init(&dd->port->cmd_slot, num_command_slots - 1);
2484
2485         /* Spinlock to prevent concurrent issue */
2486         spin_lock_init(&dd->port->cmd_issue_lock);
2487
2488         /* Set the port mmio base address. */
2489         dd->port->mmio  = dd->mmio + PORT_OFFSET;
2490         dd->port->dd    = dd;
2491
2492         /* Allocate memory for the command list. */
2493         dd->port->command_list =
2494                 dmam_alloc_coherent(&dd->pdev->dev,
2495                         HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
2496                         &dd->port->command_list_dma,
2497                         GFP_KERNEL);
2498         if (!dd->port->command_list) {
2499                 dev_err(&dd->pdev->dev,
2500                         "Memory allocation: command list\n");
2501                 rv = -ENOMEM;
2502                 goto out1;
2503         }
2504
2505         /* Clear the memory we have allocated. */
2506         memset(dd->port->command_list,
2507                 0,
2508                 HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2));
2509
2510         /* Setup the addresse of the RX FIS. */
2511         dd->port->rxfis     = dd->port->command_list + HW_CMD_SLOT_SZ;
2512         dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
2513
2514         /* Setup the address of the command tables. */
2515         dd->port->command_table   = dd->port->rxfis + AHCI_RX_FIS_SZ;
2516         dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
2517
2518         /* Setup the address of the identify data. */
2519         dd->port->identify     = dd->port->command_table +
2520                                         HW_CMD_TBL_AR_SZ;
2521         dd->port->identify_dma = dd->port->command_tbl_dma +
2522                                         HW_CMD_TBL_AR_SZ;
2523
2524         /* Setup the address of the sector buffer. */
2525         dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
2526         dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
2527
2528         /* Point the command headers at the command tables. */
2529         for (i = 0; i < num_command_slots; i++) {
2530                 dd->port->commands[i].command_header =
2531                                         dd->port->command_list +
2532                                         (sizeof(struct mtip_cmd_hdr) * i);
2533                 dd->port->commands[i].command_header_dma =
2534                                         dd->port->command_list_dma +
2535                                         (sizeof(struct mtip_cmd_hdr) * i);
2536
2537                 dd->port->commands[i].command =
2538                         dd->port->command_table + (HW_CMD_TBL_SZ * i);
2539                 dd->port->commands[i].command_dma =
2540                         dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
2541
2542                 if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
2543                         dd->port->commands[i].command_header->ctbau =
2544                         cpu_to_le32(
2545                         (dd->port->commands[i].command_dma >> 16) >> 16);
2546                 dd->port->commands[i].command_header->ctba = cpu_to_le32(
2547                         dd->port->commands[i].command_dma & 0xffffffff);
2548
2549                 /*
2550                  * If this is not done, a bug is reported by the stock
2551                  * FC11 i386. Due to the fact that it has lots of kernel
2552                  * debugging enabled.
2553                  */
2554                 sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
2555
2556                 /* Mark all commands as currently inactive.*/
2557                 atomic_set(&dd->port->commands[i].active, 0);
2558         }
2559
2560         /* Setup the pointers to the extended s_active and CI registers. */
2561         for (i = 0; i < dd->slot_groups; i++) {
2562                 dd->port->s_active[i] =
2563                         dd->port->mmio + i*0x80 + PORT_SCR_ACT;
2564                 dd->port->cmd_issue[i] =
2565                         dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
2566                 dd->port->completed[i] =
2567                         dd->port->mmio + i*0x80 + PORT_SDBV;
2568         }
2569
2570         /* Reset the HBA. */
2571         if (mtip_hba_reset(dd) < 0) {
2572                 dev_err(&dd->pdev->dev,
2573                         "Card did not reset within timeout\n");
2574                 rv = -EIO;
2575                 goto out2;
2576         }
2577
2578         mtip_init_port(dd->port);
2579         mtip_start_port(dd->port);
2580
2581         /* Setup the ISR and enable interrupts. */
2582         rv = devm_request_irq(&dd->pdev->dev,
2583                                 dd->pdev->irq,
2584                                 mtip_irq_handler,
2585                                 IRQF_SHARED,
2586                                 dev_driver_string(&dd->pdev->dev),
2587                                 dd);
2588
2589         if (rv) {
2590                 dev_err(&dd->pdev->dev,
2591                         "Unable to allocate IRQ %d\n", dd->pdev->irq);
2592                 goto out2;
2593         }
2594
2595         /* Enable interrupts on the HBA. */
2596         writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
2597                                         dd->mmio + HOST_CTL);
2598
2599         init_timer(&dd->port->cmd_timer);
2600         dd->port->cmd_timer.data = (unsigned long int) dd->port;
2601         dd->port->cmd_timer.function = mtip_timeout_function;
2602         mod_timer(&dd->port->cmd_timer,
2603                 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
2604
2605         if (mtip_get_identify(dd->port, NULL) < 0) {
2606                 rv = -EFAULT;
2607                 goto out3;
2608         }
2609         mtip_dump_identify(dd->port);
2610
2611         if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
2612                 MTIP_FTL_REBUILD_MAGIC) {
2613                 return mtip_ftl_rebuild_poll(dd);
2614         }
2615         return rv;
2616
2617 out3:
2618         del_timer_sync(&dd->port->cmd_timer);
2619
2620         /* Disable interrupts on the HBA. */
2621         writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
2622                         dd->mmio + HOST_CTL);
2623
2624         /*Release the IRQ. */
2625         devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
2626
2627 out2:
2628         mtip_deinit_port(dd->port);
2629
2630         /* Free the command/command header memory. */
2631         dmam_free_coherent(&dd->pdev->dev,
2632                                 HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
2633                                 dd->port->command_list,
2634                                 dd->port->command_list_dma);
2635 out1:
2636         /* Free the memory allocated for the for structure. */
2637         kfree(dd->port);
2638
2639         return rv;
2640 }
2641
2642 /*
2643  * Called to deinitialize an interface.
2644  *
2645  * @dd Pointer to the driver data structure.
2646  *
2647  * return value
2648  *      0
2649  */
2650 static int mtip_hw_exit(struct driver_data *dd)
2651 {
2652         /*
2653          * Send standby immediate (E0h) to the drive so that it
2654          * saves its state.
2655          */
2656         if (atomic_read(&dd->drv_cleanup_done) != true) {
2657
2658                 mtip_standby_immediate(dd->port);
2659
2660                 /* de-initialize the port. */
2661                 mtip_deinit_port(dd->port);
2662
2663                 /* Disable interrupts on the HBA. */
2664                 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
2665                                 dd->mmio + HOST_CTL);
2666         }
2667
2668         del_timer_sync(&dd->port->cmd_timer);
2669
2670         /* Stop the bottom half tasklet. */
2671         tasklet_kill(&dd->tasklet);
2672
2673         /* Release the IRQ. */
2674         devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
2675
2676         /* Free the command/command header memory. */
2677         dmam_free_coherent(&dd->pdev->dev,
2678                         HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
2679                         dd->port->command_list,
2680                         dd->port->command_list_dma);
2681         /* Free the memory allocated for the for structure. */
2682         kfree(dd->port);
2683
2684         return 0;
2685 }
2686
2687 /*
2688  * Issue a Standby Immediate command to the device.
2689  *
2690  * This function is called by the Block Layer just before the
2691  * system powers off during a shutdown.
2692  *
2693  * @dd Pointer to the driver data structure.
2694  *
2695  * return value
2696  *      0
2697  */
2698 static int mtip_hw_shutdown(struct driver_data *dd)
2699 {
2700         /*
2701          * Send standby immediate (E0h) to the drive so that it
2702          * saves its state.
2703          */
2704         mtip_standby_immediate(dd->port);
2705
2706         return 0;
2707 }
2708
2709 /*
2710  * Suspend function
2711  *
2712  * This function is called by the Block Layer just before the
2713  * system hibernates.
2714  *
2715  * @dd Pointer to the driver data structure.
2716  *
2717  * return value
2718  *      0       Suspend was successful
2719  *      -EFAULT Suspend was not successful
2720  */
2721 static int mtip_hw_suspend(struct driver_data *dd)
2722 {
2723         /*
2724          * Send standby immediate (E0h) to the drive
2725          * so that it saves its state.
2726          */
2727         if (mtip_standby_immediate(dd->port) != 0) {
2728                 dev_err(&dd->pdev->dev,
2729                         "Failed standby-immediate command\n");
2730                 return -EFAULT;
2731         }
2732
2733         /* Disable interrupts on the HBA.*/
2734         writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
2735                         dd->mmio + HOST_CTL);
2736         mtip_deinit_port(dd->port);
2737
2738         return 0;
2739 }
2740
2741 /*
2742  * Resume function
2743  *
2744  * This function is called by the Block Layer as the
2745  * system resumes.
2746  *
2747  * @dd Pointer to the driver data structure.
2748  *
2749  * return value
2750  *      0       Resume was successful
2751  *      -EFAULT Resume was not successful
2752  */
2753 static int mtip_hw_resume(struct driver_data *dd)
2754 {
2755         /* Perform any needed hardware setup steps */
2756         hba_setup(dd);
2757
2758         /* Reset the HBA */
2759         if (mtip_hba_reset(dd) != 0) {
2760                 dev_err(&dd->pdev->dev,
2761                         "Unable to reset the HBA\n");
2762                 return -EFAULT;
2763         }
2764
2765         /*
2766          * Enable the port, DMA engine, and FIS reception specific
2767          * h/w in controller.
2768          */
2769         mtip_init_port(dd->port);
2770         mtip_start_port(dd->port);
2771
2772         /* Enable interrupts on the HBA.*/
2773         writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
2774                         dd->mmio + HOST_CTL);
2775
2776         return 0;
2777 }
2778
2779 /*
2780  * Helper function for reusing disk name
2781  * upon hot insertion.
2782  */
2783 static int rssd_disk_name_format(char *prefix,
2784                                  int index,
2785                                  char *buf,
2786                                  int buflen)
2787 {
2788         const int base = 'z' - 'a' + 1;
2789         char *begin = buf + strlen(prefix);
2790         char *end = buf + buflen;
2791         char *p;
2792         int unit;
2793
2794         p = end - 1;
2795         *p = '\0';
2796         unit = base;
2797         do {
2798                 if (p == begin)
2799                         return -EINVAL;
2800                 *--p = 'a' + (index % unit);
2801                 index = (index / unit) - 1;
2802         } while (index >= 0);
2803
2804         memmove(begin, p, end - p);
2805         memcpy(buf, prefix, strlen(prefix));
2806
2807         return 0;
2808 }
2809
2810 /*
2811  * Block layer IOCTL handler.
2812  *
2813  * @dev Pointer to the block_device structure.
2814  * @mode ignored
2815  * @cmd IOCTL command passed from the user application.
2816  * @arg Argument passed from the user application.
2817  *
2818  * return value
2819  *      0        IOCTL completed successfully.
2820  *      -ENOTTY  IOCTL not supported or invalid driver data
2821  *                 structure pointer.
2822  */
2823 static int mtip_block_ioctl(struct block_device *dev,
2824                             fmode_t mode,
2825                             unsigned cmd,
2826                             unsigned long arg)
2827 {
2828         struct driver_data *dd = dev->bd_disk->private_data;
2829
2830         if (!capable(CAP_SYS_ADMIN))
2831                 return -EACCES;
2832
2833         if (!dd)
2834                 return -ENOTTY;
2835
2836         switch (cmd) {
2837         case BLKFLSBUF:
2838                 return 0;
2839         default:
2840                 return mtip_hw_ioctl(dd, cmd, arg);
2841         }
2842 }
2843
2844 #ifdef CONFIG_COMPAT
2845 /*
2846  * Block layer compat IOCTL handler.
2847  *
2848  * @dev Pointer to the block_device structure.
2849  * @mode ignored
2850  * @cmd IOCTL command passed from the user application.
2851  * @arg Argument passed from the user application.
2852  *
2853  * return value
2854  *      0        IOCTL completed successfully.
2855  *      -ENOTTY  IOCTL not supported or invalid driver data
2856  *                 structure pointer.
2857  */
2858 static int mtip_block_compat_ioctl(struct block_device *dev,
2859                             fmode_t mode,
2860                             unsigned cmd,
2861                             unsigned long arg)
2862 {
2863         struct driver_data *dd = dev->bd_disk->private_data;
2864
2865         if (!capable(CAP_SYS_ADMIN))
2866                 return -EACCES;
2867
2868         if (!dd)
2869                 return -ENOTTY;
2870
2871         switch (cmd) {
2872         case BLKFLSBUF:
2873                 return 0;
2874         case HDIO_DRIVE_TASKFILE: {
2875                 struct mtip_compat_ide_task_request_s *compat_req_task;
2876                 ide_task_request_t req_task;
2877                 int compat_tasksize, outtotal, ret;
2878
2879                 compat_tasksize = sizeof(struct mtip_compat_ide_task_request_s);
2880
2881                 compat_req_task =
2882                         (struct mtip_compat_ide_task_request_s __user *) arg;
2883
2884                 if (copy_from_user(&req_task, (void __user *) arg,
2885                                 compat_tasksize - (2 * sizeof(compat_long_t))))
2886                         return -EFAULT;
2887
2888                 if (get_user(req_task.out_size, &compat_req_task->out_size))
2889                         return -EFAULT;
2890
2891                 if (get_user(req_task.in_size, &compat_req_task->in_size))
2892                         return -EFAULT;
2893
2894                 outtotal = sizeof(struct mtip_compat_ide_task_request_s);
2895
2896                 ret = exec_drive_taskfile(dd, (void __user *) arg,
2897                                                 &req_task, outtotal);
2898
2899                 if (copy_to_user((void __user *) arg, &req_task,
2900                                 compat_tasksize -
2901                                 (2 * sizeof(compat_long_t))))
2902                         return -EFAULT;
2903
2904                 if (put_user(req_task.out_size, &compat_req_task->out_size))
2905                         return -EFAULT;
2906
2907                 if (put_user(req_task.in_size, &compat_req_task->in_size))
2908                         return -EFAULT;
2909
2910                 return ret;
2911         }
2912         default:
2913                 return mtip_hw_ioctl(dd, cmd, arg);
2914         }
2915 }
2916 #endif
2917
2918 /*
2919  * Obtain the geometry of the device.
2920  *
2921  * You may think that this function is obsolete, but some applications,
2922  * fdisk for example still used CHS values. This function describes the
2923  * device as having 224 heads and 56 sectors per cylinder. These values are
2924  * chosen so that each cylinder is aligned on a 4KB boundary. Since a
2925  * partition is described in terms of a start and end cylinder this means
2926  * that each partition is also 4KB aligned. Non-aligned partitions adversely
2927  * affects performance.
2928  *
2929  * @dev Pointer to the block_device strucutre.
2930  * @geo Pointer to a hd_geometry structure.
2931  *
2932  * return value
2933  *      0       Operation completed successfully.
2934  *      -ENOTTY An error occurred while reading the drive capacity.
2935  */
2936 static int mtip_block_getgeo(struct block_device *dev,
2937                                 struct hd_geometry *geo)
2938 {
2939         struct driver_data *dd = dev->bd_disk->private_data;
2940         sector_t capacity;
2941
2942         if (!dd)
2943                 return -ENOTTY;
2944
2945         if (!(mtip_hw_get_capacity(dd, &capacity))) {
2946                 dev_warn(&dd->pdev->dev,
2947                         "Could not get drive capacity.\n");
2948                 return -ENOTTY;
2949         }
2950
2951         geo->heads = 224;
2952         geo->sectors = 56;
2953 #if BITS_PER_LONG == 64
2954         geo->cylinders = capacity / (geo->heads * geo->sectors);
2955 #else
2956         do_div(capacity, (geo->heads * geo->sectors));
2957         geo->cylinders = capacity;
2958 #endif
2959         return 0;
2960 }
2961
2962 /*
2963  * Block device operation function.
2964  *
2965  * This structure contains pointers to the functions required by the block
2966  * layer.
2967  */
2968 static const struct block_device_operations mtip_block_ops = {
2969         .ioctl          = mtip_block_ioctl,
2970 #ifdef CONFIG_COMPAT
2971         .compat_ioctl   = mtip_block_compat_ioctl,
2972 #endif
2973         .getgeo         = mtip_block_getgeo,
2974         .owner          = THIS_MODULE
2975 };
2976
2977 /*
2978  * Block layer make request function.
2979  *
2980  * This function is called by the kernel to process a BIO for
2981  * the P320 device.
2982  *
2983  * @queue Pointer to the request queue. Unused other than to obtain
2984  *              the driver data structure.
2985  * @bio   Pointer to the BIO.
2986  *
2987  * return value
2988  *      0
2989  */
2990 static int mtip_make_request(struct request_queue *queue, struct bio *bio)
2991 {
2992         struct driver_data *dd = queue->queuedata;
2993         struct scatterlist *sg;
2994         struct bio_vec *bvec;
2995         int nents = 0;
2996         int tag = 0;
2997
2998         if (unlikely(!bio_has_data(bio))) {
2999                 blk_queue_flush(queue, 0);
3000                 bio_endio(bio, 0);
3001                 return 0;
3002         }
3003
3004         if (unlikely(atomic_read(&dd->eh_active))) {
3005                 bio_endio(bio, -EBUSY);
3006                 return 0;
3007         }
3008
3009         sg = mtip_hw_get_scatterlist(dd, &tag);
3010         if (likely(sg != NULL)) {
3011                 blk_queue_bounce(queue, &bio);
3012
3013                 if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
3014                         dev_warn(&dd->pdev->dev,
3015                                 "Maximum number of SGL entries exceeded");
3016                         bio_io_error(bio);
3017                         mtip_hw_release_scatterlist(dd, tag);
3018                         return 0;
3019                 }
3020
3021                 /* Create the scatter list for this bio. */
3022                 bio_for_each_segment(bvec, bio, nents) {
3023                         sg_set_page(&sg[nents],
3024                                         bvec->bv_page,
3025                                         bvec->bv_len,
3026                                         bvec->bv_offset);
3027                 }
3028
3029                 /* Issue the read/write. */
3030                 mtip_hw_submit_io(dd,
3031                                 bio->bi_sector,
3032                                 bio_sectors(bio),
3033                                 nents,
3034                                 tag,
3035                                 bio_endio,
3036                                 bio,
3037                                 bio->bi_rw & REQ_FLUSH,
3038                                 bio_data_dir(bio));
3039         } else {
3040                 bio_io_error(bio);
3041         }
3042
3043         return 0;
3044 }
3045
3046 /*
3047  * Block layer initialization function.
3048  *
3049  * This function is called once by the PCI layer for each P320
3050  * device that is connected to the system.
3051  *
3052  * @dd Pointer to the driver data structure.
3053  *
3054  * return value
3055  *      0 on success else an error code.
3056  */
3057 static int mtip_block_initialize(struct driver_data *dd)
3058 {
3059         int rv = 0;
3060         sector_t capacity;
3061         unsigned int index = 0;
3062         struct kobject *kobj;
3063
3064         /* Initialize the protocol layer. */
3065         rv = mtip_hw_init(dd);
3066         if (rv < 0) {
3067                 dev_err(&dd->pdev->dev,
3068                         "Protocol layer initialization failed\n");
3069                 rv = -EINVAL;
3070                 goto protocol_init_error;
3071         }
3072
3073         /* Allocate the request queue. */
3074         dd->queue = blk_alloc_queue(GFP_KERNEL);
3075         if (dd->queue == NULL) {
3076                 dev_err(&dd->pdev->dev,
3077                         "Unable to allocate request queue\n");
3078                 rv = -ENOMEM;
3079                 goto block_queue_alloc_init_error;
3080         }
3081
3082         /* Attach our request function to the request queue. */
3083         blk_queue_make_request(dd->queue, mtip_make_request);
3084
3085         /* Set device limits. */
3086         set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
3087         blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
3088         blk_queue_physical_block_size(dd->queue, 4096);
3089         blk_queue_io_min(dd->queue, 4096);
3090
3091         dd->disk = alloc_disk(MTIP_MAX_MINORS);
3092         if (dd->disk  == NULL) {
3093                 dev_err(&dd->pdev->dev,
3094                         "Unable to allocate gendisk structure\n");
3095                 rv = -EINVAL;
3096                 goto alloc_disk_error;
3097         }
3098
3099         /* Generate the disk name, implemented same as in sd.c */
3100         do {
3101                 if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
3102                         goto ida_get_error;
3103
3104                 spin_lock(&rssd_index_lock);
3105                 rv = ida_get_new(&rssd_index_ida, &index);
3106                 spin_unlock(&rssd_index_lock);
3107         } while (rv == -EAGAIN);
3108
3109         if (rv)
3110                 goto ida_get_error;
3111
3112         rv = rssd_disk_name_format("rssd",
3113                                 index,
3114                                 dd->disk->disk_name,
3115                                 DISK_NAME_LEN);
3116         if (rv)
3117                 goto disk_index_error;
3118
3119         dd->disk->driverfs_dev  = &dd->pdev->dev;
3120         dd->disk->major         = dd->major;
3121         dd->disk->first_minor   = dd->instance * MTIP_MAX_MINORS;
3122         dd->disk->fops          = &mtip_block_ops;
3123         dd->disk->queue         = dd->queue;
3124         dd->disk->private_data  = dd;
3125         dd->queue->queuedata    = dd;
3126         dd->index               = index;
3127
3128         /* Set the capacity of the device in 512 byte sectors. */
3129         if (!(mtip_hw_get_capacity(dd, &capacity))) {
3130                 dev_warn(&dd->pdev->dev,
3131                         "Could not read drive capacity\n");
3132                 rv = -EIO;
3133                 goto read_capacity_error;
3134         }
3135         set_capacity(dd->disk, capacity);
3136
3137         /* Enable the block device and add it to /dev */
3138         add_disk(dd->disk);
3139
3140         /*
3141          * Now that the disk is active, initialize any sysfs attributes
3142          * managed by the protocol layer.
3143          */
3144         kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
3145         if (kobj) {
3146                 mtip_hw_sysfs_init(dd, kobj);
3147                 kobject_put(kobj);
3148         }
3149
3150         return rv;
3151
3152 read_capacity_error:
3153         /*
3154          * Delete our gendisk structure. This also removes the device
3155          * from /dev
3156          */
3157         del_gendisk(dd->disk);
3158
3159 disk_index_error:
3160         spin_lock(&rssd_index_lock);
3161         ida_remove(&rssd_index_ida, index);
3162         spin_unlock(&rssd_index_lock);
3163
3164 ida_get_error:
3165         put_disk(dd->disk);
3166
3167 alloc_disk_error:
3168         blk_cleanup_queue(dd->queue);
3169
3170 block_queue_alloc_init_error:
3171         /* De-initialize the protocol layer. */
3172         mtip_hw_exit(dd);
3173
3174 protocol_init_error:
3175         return rv;
3176 }
3177
3178 /*
3179  * Block layer deinitialization function.
3180  *
3181  * Called by the PCI layer as each P320 device is removed.
3182  *
3183  * @dd Pointer to the driver data structure.
3184  *
3185  * return value
3186  *      0
3187  */
3188 static int mtip_block_remove(struct driver_data *dd)
3189 {
3190         struct kobject *kobj;
3191         /* Clean up the sysfs attributes managed by the protocol layer. */
3192         kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
3193         if (kobj) {
3194                 mtip_hw_sysfs_exit(dd, kobj);
3195                 kobject_put(kobj);
3196         }
3197
3198         /*
3199          * Delete our gendisk structure. This also removes the device
3200          * from /dev
3201          */
3202         del_gendisk(dd->disk);
3203         blk_cleanup_queue(dd->queue);
3204         dd->disk  = NULL;
3205         dd->queue = NULL;
3206
3207         /* De-initialize the protocol layer. */
3208         mtip_hw_exit(dd);
3209
3210         return 0;
3211 }
3212
3213 /*
3214  * Function called by the PCI layer when just before the
3215  * machine shuts down.
3216  *
3217  * If a protocol layer shutdown function is present it will be called
3218  * by this function.
3219  *
3220  * @dd Pointer to the driver data structure.
3221  *
3222  * return value
3223  *      0
3224  */
3225 static int mtip_block_shutdown(struct driver_data *dd)
3226 {
3227         dev_info(&dd->pdev->dev,
3228                 "Shutting down %s ...\n", dd->disk->disk_name);
3229
3230         /* Delete our gendisk structure, and cleanup the blk queue. */
3231         del_gendisk(dd->disk);
3232         blk_cleanup_queue(dd->queue);
3233         dd->disk  = NULL;
3234         dd->queue = NULL;
3235
3236         mtip_hw_shutdown(dd);
3237         return 0;
3238 }
3239
3240 static int mtip_block_suspend(struct driver_data *dd)
3241 {
3242         dev_info(&dd->pdev->dev,
3243                 "Suspending %s ...\n", dd->disk->disk_name);
3244         mtip_hw_suspend(dd);
3245         return 0;
3246 }
3247
3248 static int mtip_block_resume(struct driver_data *dd)
3249 {
3250         dev_info(&dd->pdev->dev, "Resuming %s ...\n",
3251                 dd->disk->disk_name);
3252         mtip_hw_resume(dd);
3253         return 0;
3254 }
3255
3256 /*
3257  * Called for each supported PCI device detected.
3258  *
3259  * This function allocates the private data structure, enables the
3260  * PCI device and then calls the block layer initialization function.
3261  *
3262  * return value
3263  *      0 on success else an error code.
3264  */
3265 static int mtip_pci_probe(struct pci_dev *pdev,
3266                         const struct pci_device_id *ent)
3267 {
3268         int rv = 0;
3269         struct driver_data *dd = NULL;
3270
3271         /* Allocate memory for this devices private data. */
3272         dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
3273         if (dd == NULL) {
3274                 dev_err(&pdev->dev,
3275                         "Unable to allocate memory for driver data\n");
3276                 return -ENOMEM;
3277         }
3278
3279         /* Set the atomic variable as 1 in case of SRSI */
3280         atomic_set(&dd->drv_cleanup_done, true);
3281
3282         atomic_set(&dd->resumeflag, false);
3283         atomic_set(&dd->eh_active, 0);
3284
3285         /* Attach the private data to this PCI device.  */
3286         pci_set_drvdata(pdev, dd);
3287
3288         rv = pcim_enable_device(pdev);
3289         if (rv < 0) {
3290                 dev_err(&pdev->dev, "Unable to enable device\n");
3291                 goto iomap_err;
3292         }
3293
3294         /* Map BAR5 to memory. */
3295         rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
3296         if (rv < 0) {
3297                 dev_err(&pdev->dev, "Unable to map regions\n");
3298                 goto iomap_err;
3299         }
3300
3301         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3302                 rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3303
3304                 if (rv) {
3305                         rv = pci_set_consistent_dma_mask(pdev,
3306                                                 DMA_BIT_MASK(32));
3307                         if (rv) {
3308                                 dev_warn(&pdev->dev,
3309                                         "64-bit DMA enable failed\n");
3310                                 goto setmask_err;
3311                         }
3312                 }
3313         }
3314
3315         pci_set_master(pdev);
3316
3317         if (pci_enable_msi(pdev)) {
3318                 dev_warn(&pdev->dev,
3319                         "Unable to enable MSI interrupt.\n");
3320                 goto block_initialize_err;
3321         }
3322
3323         /* Copy the info we may need later into the private data structure. */
3324         dd->major       = mtip_major;
3325         dd->protocol    = ent->driver_data;
3326         dd->instance    = instance;
3327         dd->pdev        = pdev;
3328
3329         /* Initialize the block layer. */
3330         rv = mtip_block_initialize(dd);
3331         if (rv < 0) {
3332                 dev_err(&pdev->dev,
3333                         "Unable to initialize block layer\n");
3334                 goto block_initialize_err;
3335         }
3336
3337         /*
3338          * Increment the instance count so that each device has a unique
3339          * instance number.
3340          */
3341         instance++;
3342
3343         goto done;
3344
3345 block_initialize_err:
3346         pci_disable_msi(pdev);
3347
3348 setmask_err:
3349         pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
3350
3351 iomap_err:
3352         kfree(dd);
3353         pci_set_drvdata(pdev, NULL);
3354         return rv;
3355 done:
3356         /* Set the atomic variable as 0 in case of SRSI */
3357         atomic_set(&dd->drv_cleanup_done, true);
3358
3359         return rv;
3360 }
3361
3362 /*
3363  * Called for each probed device when the device is removed or the
3364  * driver is unloaded.
3365  *
3366  * return value
3367  *      None
3368  */
3369 static void mtip_pci_remove(struct pci_dev *pdev)
3370 {
3371         struct driver_data *dd = pci_get_drvdata(pdev);
3372         int counter = 0;
3373
3374         if (mtip_check_surprise_removal(pdev)) {
3375                 while (atomic_read(&dd->drv_cleanup_done) == false) {
3376                         counter++;
3377                         msleep(20);
3378                         if (counter == 10) {
3379                                 /* Cleanup the outstanding commands */
3380                                 mtip_command_cleanup(dd);
3381                                 break;
3382                         }
3383                 }
3384         }
3385         /* Set the atomic variable as 1 in case of SRSI */
3386         atomic_set(&dd->drv_cleanup_done, true);
3387
3388         /* Clean up the block layer. */
3389         mtip_block_remove(dd);
3390
3391         pci_disable_msi(pdev);
3392
3393         kfree(dd);
3394         pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
3395 }
3396
3397 /*
3398  * Called for each probed device when the device is suspended.
3399  *
3400  * return value
3401  *      0  Success
3402  *      <0 Error
3403  */
3404 static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
3405 {
3406         int rv = 0;
3407         struct driver_data *dd = pci_get_drvdata(pdev);
3408
3409         if (!dd) {
3410                 dev_err(&pdev->dev,
3411                         "Driver private datastructure is NULL\n");
3412                 return -EFAULT;
3413         }
3414
3415         atomic_set(&dd->resumeflag, true);
3416
3417         /* Disable ports & interrupts then send standby immediate */
3418         rv = mtip_block_suspend(dd);
3419         if (rv < 0) {
3420                 dev_err(&pdev->dev,
3421                         "Failed to suspend controller\n");
3422                 return rv;
3423         }
3424
3425         /*
3426          * Save the pci config space to pdev structure &
3427          * disable the device
3428          */
3429         pci_save_state(pdev);
3430         pci_disable_device(pdev);
3431
3432         /* Move to Low power state*/
3433         pci_set_power_state(pdev, PCI_D3hot);
3434
3435         return rv;
3436 }
3437
3438 /*
3439  * Called for each probed device when the device is resumed.
3440  *
3441  * return value
3442  *      0  Success
3443  *      <0 Error
3444  */
3445 static int mtip_pci_resume(struct pci_dev *pdev)
3446 {
3447         int rv = 0;
3448         struct driver_data *dd;
3449
3450         dd = pci_get_drvdata(pdev);
3451         if (!dd) {
3452                 dev_err(&pdev->dev,
3453                         "Driver private datastructure is NULL\n");
3454                 return -EFAULT;
3455         }
3456
3457         /* Move the device to active State */
3458         pci_set_power_state(pdev, PCI_D0);
3459
3460         /* Restore PCI configuration space */
3461         pci_restore_state(pdev);
3462
3463         /* Enable the PCI device*/
3464         rv = pcim_enable_device(pdev);
3465         if (rv < 0) {
3466                 dev_err(&pdev->dev,
3467                         "Failed to enable card during resume\n");
3468                 goto err;
3469         }
3470         pci_set_master(pdev);
3471
3472         /*
3473          * Calls hbaReset, initPort, & startPort function
3474          * then enables interrupts
3475          */
3476         rv = mtip_block_resume(dd);
3477         if (rv < 0)
3478                 dev_err(&pdev->dev, "Unable to resume\n");
3479
3480 err:
3481         atomic_set(&dd->resumeflag, false);
3482
3483         return rv;
3484 }
3485
3486 /*
3487  * Shutdown routine
3488  *
3489  * return value
3490  *      None
3491  */
3492 static void mtip_pci_shutdown(struct pci_dev *pdev)
3493 {
3494         struct driver_data *dd = pci_get_drvdata(pdev);
3495         if (dd)
3496                 mtip_block_shutdown(dd);
3497 }
3498
3499 /* Table of device ids supported by this driver. */
3500 static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = {
3501         {  PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) },
3502         { 0 }
3503 };
3504
3505 /* Structure that describes the PCI driver functions. */
3506 static struct pci_driver mtip_pci_driver = {
3507         .name                   = MTIP_DRV_NAME,
3508         .id_table               = mtip_pci_tbl,
3509         .probe                  = mtip_pci_probe,
3510         .remove                 = mtip_pci_remove,
3511         .suspend                = mtip_pci_suspend,
3512         .resume                 = mtip_pci_resume,
3513         .shutdown               = mtip_pci_shutdown,
3514 };
3515
3516 MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
3517
3518 /*
3519  * Module initialization function.
3520  *
3521  * Called once when the module is loaded. This function allocates a major
3522  * block device number to the Cyclone devices and registers the PCI layer
3523  * of the driver.
3524  *
3525  * Return value
3526  *      0 on success else error code.
3527  */
3528 static int __init mtip_init(void)
3529 {
3530         printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
3531
3532         /* Allocate a major block device number to use with this driver. */
3533         mtip_major = register_blkdev(0, MTIP_DRV_NAME);
3534         if (mtip_major < 0) {
3535                 printk(KERN_ERR "Unable to register block device (%d)\n",
3536                 mtip_major);
3537                 return -EBUSY;
3538         }
3539
3540         /* Register our PCI operations. */
3541         return pci_register_driver(&mtip_pci_driver);
3542 }
3543
3544 /*
3545  * Module de-initialization function.
3546  *
3547  * Called once when the module is unloaded. This function deallocates
3548  * the major block device number allocated by mtip_init() and
3549  * unregisters the PCI layer of the driver.
3550  *
3551  * Return value
3552  *      none
3553  */
3554 static void __exit mtip_exit(void)
3555 {
3556         /* Release the allocated major block device number. */
3557         unregister_blkdev(mtip_major, MTIP_DRV_NAME);
3558
3559         /* Unregister the PCI driver. */
3560         pci_unregister_driver(&mtip_pci_driver);
3561 }
3562
3563 MODULE_AUTHOR("Micron Technology, Inc");
3564 MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
3565 MODULE_LICENSE("GPL");
3566 MODULE_VERSION(MTIP_DRV_VERSION);
3567
3568 module_init(mtip_init);
3569 module_exit(mtip_exit);