Merge remote-tracking branch 'stable/linux-4.19.y' into rpi-4.19.y
[platform/kernel/linux-rpi.git] / drivers / mmc / host / sdhci.c
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/sizes.h>
25 #include <linux/swiotlb.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/of.h>
29
30 #include <linux/leds.h>
31
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/card.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/slot-gpio.h>
37
38 #include "sdhci.h"
39
40 #define DRIVER_NAME "sdhci"
41
42 #define DBG(f, x...) \
43         pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45 #define SDHCI_DUMP(f, x...) \
46         pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
47
48 #define MAX_TUNING_LOOP 40
49
50 static unsigned int debug_quirks = 0;
51 static unsigned int debug_quirks2;
52
53 static void sdhci_finish_data(struct sdhci_host *);
54
55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56
57 void sdhci_dumpregs(struct sdhci_host *host)
58 {
59         SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
60
61         SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
62                    sdhci_readl(host, SDHCI_DMA_ADDRESS),
63                    sdhci_readw(host, SDHCI_HOST_VERSION));
64         SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
65                    sdhci_readw(host, SDHCI_BLOCK_SIZE),
66                    sdhci_readw(host, SDHCI_BLOCK_COUNT));
67         SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
68                    sdhci_readl(host, SDHCI_ARGUMENT),
69                    sdhci_readw(host, SDHCI_TRANSFER_MODE));
70         SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
71                    sdhci_readl(host, SDHCI_PRESENT_STATE),
72                    sdhci_readb(host, SDHCI_HOST_CONTROL));
73         SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
74                    sdhci_readb(host, SDHCI_POWER_CONTROL),
75                    sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
76         SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
77                    sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
78                    sdhci_readw(host, SDHCI_CLOCK_CONTROL));
79         SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
80                    sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
81                    sdhci_readl(host, SDHCI_INT_STATUS));
82         SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
83                    sdhci_readl(host, SDHCI_INT_ENABLE),
84                    sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
85         SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
86                    sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
87                    sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
88         SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
89                    sdhci_readl(host, SDHCI_CAPABILITIES),
90                    sdhci_readl(host, SDHCI_CAPABILITIES_1));
91         SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
92                    sdhci_readw(host, SDHCI_COMMAND),
93                    sdhci_readl(host, SDHCI_MAX_CURRENT));
94         SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
95                    sdhci_readl(host, SDHCI_RESPONSE),
96                    sdhci_readl(host, SDHCI_RESPONSE + 4));
97         SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
98                    sdhci_readl(host, SDHCI_RESPONSE + 8),
99                    sdhci_readl(host, SDHCI_RESPONSE + 12));
100         SDHCI_DUMP("Host ctl2: 0x%08x\n",
101                    sdhci_readw(host, SDHCI_HOST_CONTROL2));
102
103         if (host->flags & SDHCI_USE_ADMA) {
104                 if (host->flags & SDHCI_USE_64_BIT_DMA) {
105                         SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
106                                    sdhci_readl(host, SDHCI_ADMA_ERROR),
107                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
108                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS));
109                 } else {
110                         SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
111                                    sdhci_readl(host, SDHCI_ADMA_ERROR),
112                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS));
113                 }
114         }
115
116         SDHCI_DUMP("============================================\n");
117 }
118 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120 /*****************************************************************************\
121  *                                                                           *
122  * Low level functions                                                       *
123  *                                                                           *
124 \*****************************************************************************/
125
126 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
127 {
128         return cmd->data || cmd->flags & MMC_RSP_BUSY;
129 }
130
131 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
132 {
133         u32 present;
134
135         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
136             !mmc_card_is_removable(host->mmc))
137                 return;
138
139         if (enable) {
140                 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
141                                       SDHCI_CARD_PRESENT;
142
143                 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
144                                        SDHCI_INT_CARD_INSERT;
145         } else {
146                 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
147         }
148
149         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
150         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
151 }
152
153 static void sdhci_enable_card_detection(struct sdhci_host *host)
154 {
155         sdhci_set_card_detection(host, true);
156 }
157
158 static void sdhci_disable_card_detection(struct sdhci_host *host)
159 {
160         sdhci_set_card_detection(host, false);
161 }
162
163 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
164 {
165         if (host->bus_on)
166                 return;
167         host->bus_on = true;
168         pm_runtime_get_noresume(host->mmc->parent);
169 }
170
171 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
172 {
173         if (!host->bus_on)
174                 return;
175         host->bus_on = false;
176         pm_runtime_put_noidle(host->mmc->parent);
177 }
178
179 void sdhci_reset(struct sdhci_host *host, u8 mask)
180 {
181         ktime_t timeout;
182
183         sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
184
185         if (mask & SDHCI_RESET_ALL) {
186                 host->clock = 0;
187                 /* Reset-all turns off SD Bus Power */
188                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
189                         sdhci_runtime_pm_bus_off(host);
190         }
191
192         /* Wait max 100 ms */
193         timeout = ktime_add_ms(ktime_get(), 100);
194
195         /* hw clears the bit when it's done */
196         while (1) {
197                 bool timedout = ktime_after(ktime_get(), timeout);
198
199                 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
200                         break;
201                 if (timedout) {
202                         pr_err("%s: Reset 0x%x never completed.\n",
203                                 mmc_hostname(host->mmc), (int)mask);
204                         sdhci_dumpregs(host);
205                         return;
206                 }
207                 udelay(10);
208         }
209 }
210 EXPORT_SYMBOL_GPL(sdhci_reset);
211
212 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
213 {
214         if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
215                 struct mmc_host *mmc = host->mmc;
216
217                 if (!mmc->ops->get_cd(mmc))
218                         return;
219         }
220
221         host->ops->reset(host, mask);
222
223         if (mask & SDHCI_RESET_ALL) {
224                 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
225                         if (host->ops->enable_dma)
226                                 host->ops->enable_dma(host);
227                 }
228
229                 /* Resetting the controller clears many */
230                 host->preset_enabled = false;
231         }
232 }
233
234 static void sdhci_set_default_irqs(struct sdhci_host *host)
235 {
236         host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
237                     SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
238                     SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
239                     SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
240                     SDHCI_INT_RESPONSE;
241
242         if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
243             host->tuning_mode == SDHCI_TUNING_MODE_3)
244                 host->ier |= SDHCI_INT_RETUNE;
245
246         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
247         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
248 }
249
250 static void sdhci_init(struct sdhci_host *host, int soft)
251 {
252         struct mmc_host *mmc = host->mmc;
253
254         if (soft)
255                 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
256         else
257                 sdhci_do_reset(host, SDHCI_RESET_ALL);
258
259         sdhci_set_default_irqs(host);
260
261         host->cqe_on = false;
262
263         if (soft) {
264                 /* force clock reconfiguration */
265                 host->clock = 0;
266                 mmc->ops->set_ios(mmc, &mmc->ios);
267         }
268 }
269
270 static void sdhci_reinit(struct sdhci_host *host)
271 {
272         sdhci_init(host, 0);
273         sdhci_enable_card_detection(host);
274 }
275
276 static void __sdhci_led_activate(struct sdhci_host *host)
277 {
278         u8 ctrl;
279
280         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
281         ctrl |= SDHCI_CTRL_LED;
282         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
283 }
284
285 static void __sdhci_led_deactivate(struct sdhci_host *host)
286 {
287         u8 ctrl;
288
289         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
290         ctrl &= ~SDHCI_CTRL_LED;
291         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
292 }
293
294 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
295 static void sdhci_led_control(struct led_classdev *led,
296                               enum led_brightness brightness)
297 {
298         struct sdhci_host *host = container_of(led, struct sdhci_host, led);
299         unsigned long flags;
300
301         spin_lock_irqsave(&host->lock, flags);
302
303         if (host->runtime_suspended)
304                 goto out;
305
306         if (brightness == LED_OFF)
307                 __sdhci_led_deactivate(host);
308         else
309                 __sdhci_led_activate(host);
310 out:
311         spin_unlock_irqrestore(&host->lock, flags);
312 }
313
314 static int sdhci_led_register(struct sdhci_host *host)
315 {
316         struct mmc_host *mmc = host->mmc;
317
318         snprintf(host->led_name, sizeof(host->led_name),
319                  "%s::", mmc_hostname(mmc));
320
321         host->led.name = host->led_name;
322         host->led.brightness = LED_OFF;
323         host->led.default_trigger = mmc_hostname(mmc);
324         host->led.brightness_set = sdhci_led_control;
325
326         return led_classdev_register(mmc_dev(mmc), &host->led);
327 }
328
329 static void sdhci_led_unregister(struct sdhci_host *host)
330 {
331         led_classdev_unregister(&host->led);
332 }
333
334 static inline void sdhci_led_activate(struct sdhci_host *host)
335 {
336 }
337
338 static inline void sdhci_led_deactivate(struct sdhci_host *host)
339 {
340 }
341
342 #else
343
344 static inline int sdhci_led_register(struct sdhci_host *host)
345 {
346         return 0;
347 }
348
349 static inline void sdhci_led_unregister(struct sdhci_host *host)
350 {
351 }
352
353 static inline void sdhci_led_activate(struct sdhci_host *host)
354 {
355         __sdhci_led_activate(host);
356 }
357
358 static inline void sdhci_led_deactivate(struct sdhci_host *host)
359 {
360         __sdhci_led_deactivate(host);
361 }
362
363 #endif
364
365 /*****************************************************************************\
366  *                                                                           *
367  * Core functions                                                            *
368  *                                                                           *
369 \*****************************************************************************/
370
371 static void sdhci_read_block_pio(struct sdhci_host *host)
372 {
373         unsigned long flags;
374         size_t blksize, len, chunk;
375         u32 uninitialized_var(scratch);
376         u8 *buf;
377
378         DBG("PIO reading\n");
379
380         blksize = host->data->blksz;
381         chunk = 0;
382
383         local_irq_save(flags);
384
385         while (blksize) {
386                 BUG_ON(!sg_miter_next(&host->sg_miter));
387
388                 len = min(host->sg_miter.length, blksize);
389
390                 blksize -= len;
391                 host->sg_miter.consumed = len;
392
393                 buf = host->sg_miter.addr;
394
395                 while (len) {
396                         if (chunk == 0) {
397                                 scratch = sdhci_readl(host, SDHCI_BUFFER);
398                                 chunk = 4;
399                         }
400
401                         *buf = scratch & 0xFF;
402
403                         buf++;
404                         scratch >>= 8;
405                         chunk--;
406                         len--;
407                 }
408         }
409
410         sg_miter_stop(&host->sg_miter);
411
412         local_irq_restore(flags);
413 }
414
415 static void sdhci_write_block_pio(struct sdhci_host *host)
416 {
417         unsigned long flags;
418         size_t blksize, len, chunk;
419         u32 scratch;
420         u8 *buf;
421
422         DBG("PIO writing\n");
423
424         blksize = host->data->blksz;
425         chunk = 0;
426         scratch = 0;
427
428         local_irq_save(flags);
429
430         while (blksize) {
431                 BUG_ON(!sg_miter_next(&host->sg_miter));
432
433                 len = min(host->sg_miter.length, blksize);
434
435                 blksize -= len;
436                 host->sg_miter.consumed = len;
437
438                 buf = host->sg_miter.addr;
439
440                 while (len) {
441                         scratch |= (u32)*buf << (chunk * 8);
442
443                         buf++;
444                         chunk++;
445                         len--;
446
447                         if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
448                                 sdhci_writel(host, scratch, SDHCI_BUFFER);
449                                 chunk = 0;
450                                 scratch = 0;
451                         }
452                 }
453         }
454
455         sg_miter_stop(&host->sg_miter);
456
457         local_irq_restore(flags);
458 }
459
460 static void sdhci_transfer_pio(struct sdhci_host *host)
461 {
462         u32 mask;
463
464         if (host->blocks == 0)
465                 return;
466
467         if (host->data->flags & MMC_DATA_READ)
468                 mask = SDHCI_DATA_AVAILABLE;
469         else
470                 mask = SDHCI_SPACE_AVAILABLE;
471
472         /*
473          * Some controllers (JMicron JMB38x) mess up the buffer bits
474          * for transfers < 4 bytes. As long as it is just one block,
475          * we can ignore the bits.
476          */
477         if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
478                 (host->data->blocks == 1))
479                 mask = ~0;
480
481         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
482                 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
483                         udelay(100);
484
485                 if (host->data->flags & MMC_DATA_READ)
486                         sdhci_read_block_pio(host);
487                 else
488                         sdhci_write_block_pio(host);
489
490                 host->blocks--;
491                 if (host->blocks == 0)
492                         break;
493         }
494
495         DBG("PIO transfer complete.\n");
496 }
497
498 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
499                                   struct mmc_data *data, int cookie)
500 {
501         int sg_count;
502
503         /*
504          * If the data buffers are already mapped, return the previous
505          * dma_map_sg() result.
506          */
507         if (data->host_cookie == COOKIE_PRE_MAPPED)
508                 return data->sg_count;
509
510         /* Bounce write requests to the bounce buffer */
511         if (host->bounce_buffer) {
512                 unsigned int length = data->blksz * data->blocks;
513
514                 if (length > host->bounce_buffer_size) {
515                         pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
516                                mmc_hostname(host->mmc), length,
517                                host->bounce_buffer_size);
518                         return -EIO;
519                 }
520                 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
521                         /* Copy the data to the bounce buffer */
522                         sg_copy_to_buffer(data->sg, data->sg_len,
523                                           host->bounce_buffer,
524                                           length);
525                 }
526                 /* Switch ownership to the DMA */
527                 dma_sync_single_for_device(host->mmc->parent,
528                                            host->bounce_addr,
529                                            host->bounce_buffer_size,
530                                            mmc_get_dma_dir(data));
531                 /* Just a dummy value */
532                 sg_count = 1;
533         } else {
534                 /* Just access the data directly from memory */
535                 sg_count = dma_map_sg(mmc_dev(host->mmc),
536                                       data->sg, data->sg_len,
537                                       mmc_get_dma_dir(data));
538         }
539
540         if (sg_count == 0)
541                 return -ENOSPC;
542
543         data->sg_count = sg_count;
544         data->host_cookie = cookie;
545
546         return sg_count;
547 }
548
549 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
550 {
551         local_irq_save(*flags);
552         return kmap_atomic(sg_page(sg)) + sg->offset;
553 }
554
555 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
556 {
557         kunmap_atomic(buffer);
558         local_irq_restore(*flags);
559 }
560
561 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
562                                   dma_addr_t addr, int len, unsigned cmd)
563 {
564         struct sdhci_adma2_64_desc *dma_desc = desc;
565
566         /* 32-bit and 64-bit descriptors have these members in same position */
567         dma_desc->cmd = cpu_to_le16(cmd);
568         dma_desc->len = cpu_to_le16(len);
569         dma_desc->addr_lo = cpu_to_le32((u32)addr);
570
571         if (host->flags & SDHCI_USE_64_BIT_DMA)
572                 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
573 }
574
575 static void sdhci_adma_mark_end(void *desc)
576 {
577         struct sdhci_adma2_64_desc *dma_desc = desc;
578
579         /* 32-bit and 64-bit descriptors have 'cmd' in same position */
580         dma_desc->cmd |= cpu_to_le16(ADMA2_END);
581 }
582
583 static void sdhci_adma_table_pre(struct sdhci_host *host,
584         struct mmc_data *data, int sg_count)
585 {
586         struct scatterlist *sg;
587         unsigned long flags;
588         dma_addr_t addr, align_addr;
589         void *desc, *align;
590         char *buffer;
591         int len, offset, i;
592
593         /*
594          * The spec does not specify endianness of descriptor table.
595          * We currently guess that it is LE.
596          */
597
598         host->sg_count = sg_count;
599
600         desc = host->adma_table;
601         align = host->align_buffer;
602
603         align_addr = host->align_addr;
604
605         for_each_sg(data->sg, sg, host->sg_count, i) {
606                 addr = sg_dma_address(sg);
607                 len = sg_dma_len(sg);
608
609                 /*
610                  * The SDHCI specification states that ADMA addresses must
611                  * be 32-bit aligned. If they aren't, then we use a bounce
612                  * buffer for the (up to three) bytes that screw up the
613                  * alignment.
614                  */
615                 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
616                          SDHCI_ADMA2_MASK;
617                 if (offset) {
618                         if (data->flags & MMC_DATA_WRITE) {
619                                 buffer = sdhci_kmap_atomic(sg, &flags);
620                                 memcpy(align, buffer, offset);
621                                 sdhci_kunmap_atomic(buffer, &flags);
622                         }
623
624                         /* tran, valid */
625                         sdhci_adma_write_desc(host, desc, align_addr, offset,
626                                               ADMA2_TRAN_VALID);
627
628                         BUG_ON(offset > 65536);
629
630                         align += SDHCI_ADMA2_ALIGN;
631                         align_addr += SDHCI_ADMA2_ALIGN;
632
633                         desc += host->desc_sz;
634
635                         addr += offset;
636                         len -= offset;
637                 }
638
639                 BUG_ON(len > 65536);
640
641                 if (len) {
642                         /* tran, valid */
643                         sdhci_adma_write_desc(host, desc, addr, len,
644                                               ADMA2_TRAN_VALID);
645                         desc += host->desc_sz;
646                 }
647
648                 /*
649                  * If this triggers then we have a calculation bug
650                  * somewhere. :/
651                  */
652                 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
653         }
654
655         if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
656                 /* Mark the last descriptor as the terminating descriptor */
657                 if (desc != host->adma_table) {
658                         desc -= host->desc_sz;
659                         sdhci_adma_mark_end(desc);
660                 }
661         } else {
662                 /* Add a terminating entry - nop, end, valid */
663                 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
664         }
665 }
666
667 static void sdhci_adma_table_post(struct sdhci_host *host,
668         struct mmc_data *data)
669 {
670         struct scatterlist *sg;
671         int i, size;
672         void *align;
673         char *buffer;
674         unsigned long flags;
675
676         if (data->flags & MMC_DATA_READ) {
677                 bool has_unaligned = false;
678
679                 /* Do a quick scan of the SG list for any unaligned mappings */
680                 for_each_sg(data->sg, sg, host->sg_count, i)
681                         if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
682                                 has_unaligned = true;
683                                 break;
684                         }
685
686                 if (has_unaligned) {
687                         dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
688                                             data->sg_len, DMA_FROM_DEVICE);
689
690                         align = host->align_buffer;
691
692                         for_each_sg(data->sg, sg, host->sg_count, i) {
693                                 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
694                                         size = SDHCI_ADMA2_ALIGN -
695                                                (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
696
697                                         buffer = sdhci_kmap_atomic(sg, &flags);
698                                         memcpy(buffer, align, size);
699                                         sdhci_kunmap_atomic(buffer, &flags);
700
701                                         align += SDHCI_ADMA2_ALIGN;
702                                 }
703                         }
704                 }
705         }
706 }
707
708 static u32 sdhci_sdma_address(struct sdhci_host *host)
709 {
710         if (host->bounce_buffer)
711                 return host->bounce_addr;
712         else
713                 return sg_dma_address(host->data->sg);
714 }
715
716 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
717                                          struct mmc_command *cmd,
718                                          struct mmc_data *data)
719 {
720         unsigned int target_timeout;
721
722         /* timeout in us */
723         if (!data) {
724                 target_timeout = cmd->busy_timeout * 1000;
725         } else {
726                 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
727                 if (host->clock && data->timeout_clks) {
728                         unsigned long long val;
729
730                         /*
731                          * data->timeout_clks is in units of clock cycles.
732                          * host->clock is in Hz.  target_timeout is in us.
733                          * Hence, us = 1000000 * cycles / Hz.  Round up.
734                          */
735                         val = 1000000ULL * data->timeout_clks;
736                         if (do_div(val, host->clock))
737                                 target_timeout++;
738                         target_timeout += val;
739                 }
740         }
741
742         return target_timeout;
743 }
744
745 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
746                                   struct mmc_command *cmd)
747 {
748         struct mmc_data *data = cmd->data;
749         struct mmc_host *mmc = host->mmc;
750         struct mmc_ios *ios = &mmc->ios;
751         unsigned char bus_width = 1 << ios->bus_width;
752         unsigned int blksz;
753         unsigned int freq;
754         u64 target_timeout;
755         u64 transfer_time;
756
757         target_timeout = sdhci_target_timeout(host, cmd, data);
758         target_timeout *= NSEC_PER_USEC;
759
760         if (data) {
761                 blksz = data->blksz;
762                 freq = host->mmc->actual_clock ? : host->clock;
763                 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
764                 do_div(transfer_time, freq);
765                 /* multiply by '2' to account for any unknowns */
766                 transfer_time = transfer_time * 2;
767                 /* calculate timeout for the entire data */
768                 host->data_timeout = data->blocks * target_timeout +
769                                      transfer_time;
770         } else {
771                 host->data_timeout = target_timeout;
772         }
773
774         if (host->data_timeout)
775                 host->data_timeout += MMC_CMD_TRANSFER_TIME;
776 }
777
778 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
779                              bool *too_big)
780 {
781         u8 count;
782         struct mmc_data *data = cmd->data;
783         unsigned target_timeout, current_timeout;
784
785         *too_big = true;
786
787         /*
788          * If the host controller provides us with an incorrect timeout
789          * value, just skip the check and use 0xE.  The hardware may take
790          * longer to time out, but that's much better than having a too-short
791          * timeout value.
792          */
793         if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
794                 return 0xE;
795
796         /* Unspecified timeout, assume max */
797         if (!data && !cmd->busy_timeout)
798                 return 0xE;
799
800         /* timeout in us */
801         target_timeout = sdhci_target_timeout(host, cmd, data);
802
803         /*
804          * Figure out needed cycles.
805          * We do this in steps in order to fit inside a 32 bit int.
806          * The first step is the minimum timeout, which will have a
807          * minimum resolution of 6 bits:
808          * (1) 2^13*1000 > 2^22,
809          * (2) host->timeout_clk < 2^16
810          *     =>
811          *     (1) / (2) > 2^6
812          */
813         count = 0;
814         current_timeout = (1 << 13) * 1000 / host->timeout_clk;
815         while (current_timeout < target_timeout) {
816                 count++;
817                 current_timeout <<= 1;
818                 if (count >= 0xF)
819                         break;
820         }
821
822         if (count >= 0xF) {
823                 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
824                         DBG("Too large timeout 0x%x requested for CMD%d!\n",
825                             count, cmd->opcode);
826                 count = 0xE;
827         } else {
828                 *too_big = false;
829         }
830
831         return count;
832 }
833
834 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
835 {
836         u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
837         u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
838
839         if (host->flags & SDHCI_REQ_USE_DMA)
840                 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
841         else
842                 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
843
844         if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
845                 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
846         else
847                 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
848
849         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
850         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
851 }
852
853 static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
854 {
855         if (enable)
856                 host->ier |= SDHCI_INT_DATA_TIMEOUT;
857         else
858                 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
859         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
860         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
861 }
862
863 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
864 {
865         u8 count;
866
867         if (host->ops->set_timeout) {
868                 host->ops->set_timeout(host, cmd);
869         } else {
870                 bool too_big = false;
871
872                 count = sdhci_calc_timeout(host, cmd, &too_big);
873
874                 if (too_big &&
875                     host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
876                         sdhci_calc_sw_timeout(host, cmd);
877                         sdhci_set_data_timeout_irq(host, false);
878                 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
879                         sdhci_set_data_timeout_irq(host, true);
880                 }
881
882                 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
883         }
884 }
885
886 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
887 {
888         u8 ctrl;
889         struct mmc_data *data = cmd->data;
890
891         host->data_timeout = 0;
892
893         if (sdhci_data_line_cmd(cmd))
894                 sdhci_set_timeout(host, cmd);
895
896         if (!data)
897                 return;
898
899         WARN_ON(host->data);
900
901         /* Sanity checks */
902         BUG_ON(data->blksz * data->blocks > 524288);
903         BUG_ON(data->blksz > host->mmc->max_blk_size);
904         BUG_ON(data->blocks > 65535);
905
906         host->data = data;
907         host->data_early = 0;
908         host->data->bytes_xfered = 0;
909
910         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
911                 struct scatterlist *sg;
912                 unsigned int length_mask, offset_mask;
913                 int i;
914
915                 host->flags |= SDHCI_REQ_USE_DMA;
916
917                 /*
918                  * FIXME: This doesn't account for merging when mapping the
919                  * scatterlist.
920                  *
921                  * The assumption here being that alignment and lengths are
922                  * the same after DMA mapping to device address space.
923                  */
924                 length_mask = 0;
925                 offset_mask = 0;
926                 if (host->flags & SDHCI_USE_ADMA) {
927                         if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
928                                 length_mask = 3;
929                                 /*
930                                  * As we use up to 3 byte chunks to work
931                                  * around alignment problems, we need to
932                                  * check the offset as well.
933                                  */
934                                 offset_mask = 3;
935                         }
936                 } else {
937                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
938                                 length_mask = 3;
939                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
940                                 offset_mask = 3;
941                 }
942
943                 if (unlikely(length_mask | offset_mask)) {
944                         for_each_sg(data->sg, sg, data->sg_len, i) {
945                                 if (sg->length & length_mask) {
946                                         DBG("Reverting to PIO because of transfer size (%d)\n",
947                                             sg->length);
948                                         host->flags &= ~SDHCI_REQ_USE_DMA;
949                                         break;
950                                 }
951                                 if (sg->offset & offset_mask) {
952                                         DBG("Reverting to PIO because of bad alignment\n");
953                                         host->flags &= ~SDHCI_REQ_USE_DMA;
954                                         break;
955                                 }
956                         }
957                 }
958         }
959
960         if (host->flags & SDHCI_REQ_USE_DMA) {
961                 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
962
963                 if (sg_cnt <= 0) {
964                         /*
965                          * This only happens when someone fed
966                          * us an invalid request.
967                          */
968                         WARN_ON(1);
969                         host->flags &= ~SDHCI_REQ_USE_DMA;
970                 } else if (host->flags & SDHCI_USE_ADMA) {
971                         sdhci_adma_table_pre(host, data, sg_cnt);
972
973                         sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
974                         if (host->flags & SDHCI_USE_64_BIT_DMA)
975                                 sdhci_writel(host,
976                                              (u64)host->adma_addr >> 32,
977                                              SDHCI_ADMA_ADDRESS_HI);
978                 } else {
979                         WARN_ON(sg_cnt != 1);
980                         sdhci_writel(host, sdhci_sdma_address(host),
981                                      SDHCI_DMA_ADDRESS);
982                 }
983         }
984
985         /*
986          * Always adjust the DMA selection as some controllers
987          * (e.g. JMicron) can't do PIO properly when the selection
988          * is ADMA.
989          */
990         if (host->version >= SDHCI_SPEC_200) {
991                 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
992                 ctrl &= ~SDHCI_CTRL_DMA_MASK;
993                 if ((host->flags & SDHCI_REQ_USE_DMA) &&
994                         (host->flags & SDHCI_USE_ADMA)) {
995                         if (host->flags & SDHCI_USE_64_BIT_DMA)
996                                 ctrl |= SDHCI_CTRL_ADMA64;
997                         else
998                                 ctrl |= SDHCI_CTRL_ADMA32;
999                 } else {
1000                         ctrl |= SDHCI_CTRL_SDMA;
1001                 }
1002                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1003         }
1004
1005         if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1006                 int flags;
1007
1008                 flags = SG_MITER_ATOMIC;
1009                 if (host->data->flags & MMC_DATA_READ)
1010                         flags |= SG_MITER_TO_SG;
1011                 else
1012                         flags |= SG_MITER_FROM_SG;
1013                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1014                 host->blocks = data->blocks;
1015         }
1016
1017         sdhci_set_transfer_irqs(host);
1018
1019         /* Set the DMA boundary value and block size */
1020         sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1021                      SDHCI_BLOCK_SIZE);
1022         sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1023 }
1024
1025 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1026                                     struct mmc_request *mrq)
1027 {
1028         return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1029                !mrq->cap_cmd_during_tfr;
1030 }
1031
1032 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1033         struct mmc_command *cmd)
1034 {
1035         u16 mode = 0;
1036         struct mmc_data *data = cmd->data;
1037
1038         if (data == NULL) {
1039                 if (host->quirks2 &
1040                         SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1041                         /* must not clear SDHCI_TRANSFER_MODE when tuning */
1042                         if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1043                                 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1044                 } else {
1045                 /* clear Auto CMD settings for no data CMDs */
1046                         mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1047                         sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1048                                 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1049                 }
1050                 return;
1051         }
1052
1053         WARN_ON(!host->data);
1054
1055         if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1056                 mode = SDHCI_TRNS_BLK_CNT_EN;
1057
1058         if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1059                 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1060                 /*
1061                  * If we are sending CMD23, CMD12 never gets sent
1062                  * on successful completion (so no Auto-CMD12).
1063                  */
1064                 if (sdhci_auto_cmd12(host, cmd->mrq) &&
1065                     (cmd->opcode != SD_IO_RW_EXTENDED))
1066                         mode |= SDHCI_TRNS_AUTO_CMD12;
1067                 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
1068                         mode |= SDHCI_TRNS_AUTO_CMD23;
1069                         sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1070                 }
1071         }
1072
1073         if (data->flags & MMC_DATA_READ)
1074                 mode |= SDHCI_TRNS_READ;
1075         if (host->flags & SDHCI_REQ_USE_DMA)
1076                 mode |= SDHCI_TRNS_DMA;
1077
1078         sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1079 }
1080
1081 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1082 {
1083         return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1084                 ((mrq->cmd && mrq->cmd->error) ||
1085                  (mrq->sbc && mrq->sbc->error) ||
1086                  (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1087                  (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1088 }
1089
1090 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1091 {
1092         int i;
1093
1094         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1095                 if (host->mrqs_done[i] == mrq) {
1096                         WARN_ON(1);
1097                         return;
1098                 }
1099         }
1100
1101         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1102                 if (!host->mrqs_done[i]) {
1103                         host->mrqs_done[i] = mrq;
1104                         break;
1105                 }
1106         }
1107
1108         WARN_ON(i >= SDHCI_MAX_MRQS);
1109
1110         tasklet_schedule(&host->finish_tasklet);
1111 }
1112
1113 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1114 {
1115         if (host->cmd && host->cmd->mrq == mrq)
1116                 host->cmd = NULL;
1117
1118         if (host->data_cmd && host->data_cmd->mrq == mrq)
1119                 host->data_cmd = NULL;
1120
1121         if (host->data && host->data->mrq == mrq)
1122                 host->data = NULL;
1123
1124         if (sdhci_needs_reset(host, mrq))
1125                 host->pending_reset = true;
1126
1127         __sdhci_finish_mrq(host, mrq);
1128 }
1129
1130 static void sdhci_finish_data(struct sdhci_host *host)
1131 {
1132         struct mmc_command *data_cmd = host->data_cmd;
1133         struct mmc_data *data = host->data;
1134
1135         host->data = NULL;
1136         host->data_cmd = NULL;
1137
1138         /*
1139          * The controller needs a reset of internal state machines upon error
1140          * conditions.
1141          */
1142         if (data->error) {
1143                 if (!host->cmd || host->cmd == data_cmd)
1144                         sdhci_do_reset(host, SDHCI_RESET_CMD);
1145                 sdhci_do_reset(host, SDHCI_RESET_DATA);
1146         }
1147
1148         if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1149             (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1150                 sdhci_adma_table_post(host, data);
1151
1152         /*
1153          * The specification states that the block count register must
1154          * be updated, but it does not specify at what point in the
1155          * data flow. That makes the register entirely useless to read
1156          * back so we have to assume that nothing made it to the card
1157          * in the event of an error.
1158          */
1159         if (data->error)
1160                 data->bytes_xfered = 0;
1161         else
1162                 data->bytes_xfered = data->blksz * data->blocks;
1163
1164         /*
1165          * Need to send CMD12 if -
1166          * a) open-ended multiblock transfer (no CMD23)
1167          * b) error in multiblock transfer
1168          */
1169         if (data->stop &&
1170             (data->error ||
1171              !data->mrq->sbc)) {
1172                 /*
1173                  * 'cap_cmd_during_tfr' request must not use the command line
1174                  * after mmc_command_done() has been called. It is upper layer's
1175                  * responsibility to send the stop command if required.
1176                  */
1177                 if (data->mrq->cap_cmd_during_tfr) {
1178                         sdhci_finish_mrq(host, data->mrq);
1179                 } else {
1180                         /* Avoid triggering warning in sdhci_send_command() */
1181                         host->cmd = NULL;
1182                         sdhci_send_command(host, data->stop);
1183                 }
1184         } else {
1185                 sdhci_finish_mrq(host, data->mrq);
1186         }
1187 }
1188
1189 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1190                             unsigned long timeout)
1191 {
1192         if (sdhci_data_line_cmd(mrq->cmd))
1193                 mod_timer(&host->data_timer, timeout);
1194         else
1195                 mod_timer(&host->timer, timeout);
1196 }
1197
1198 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1199 {
1200         if (sdhci_data_line_cmd(mrq->cmd))
1201                 del_timer(&host->data_timer);
1202         else
1203                 del_timer(&host->timer);
1204 }
1205
1206 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1207 {
1208         int flags;
1209         u32 mask;
1210         unsigned long timeout;
1211
1212         WARN_ON(host->cmd);
1213
1214         /* Initially, a command has no error */
1215         cmd->error = 0;
1216
1217         if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1218             cmd->opcode == MMC_STOP_TRANSMISSION)
1219                 cmd->flags |= MMC_RSP_BUSY;
1220
1221         /* Wait max 10 ms */
1222         timeout = 10;
1223
1224         mask = SDHCI_CMD_INHIBIT;
1225         if (sdhci_data_line_cmd(cmd))
1226                 mask |= SDHCI_DATA_INHIBIT;
1227
1228         /* We shouldn't wait for data inihibit for stop commands, even
1229            though they might use busy signaling */
1230         if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1231                 mask &= ~SDHCI_DATA_INHIBIT;
1232
1233         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1234                 if (timeout == 0) {
1235                         pr_err("%s: Controller never released inhibit bit(s).\n",
1236                                mmc_hostname(host->mmc));
1237                         sdhci_dumpregs(host);
1238                         cmd->error = -EIO;
1239                         sdhci_finish_mrq(host, cmd->mrq);
1240                         return;
1241                 }
1242                 timeout--;
1243                 mdelay(1);
1244         }
1245
1246         host->cmd = cmd;
1247         if (sdhci_data_line_cmd(cmd)) {
1248                 WARN_ON(host->data_cmd);
1249                 host->data_cmd = cmd;
1250         }
1251
1252         sdhci_prepare_data(host, cmd);
1253
1254         sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1255
1256         sdhci_set_transfer_mode(host, cmd);
1257
1258         if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1259                 pr_err("%s: Unsupported response type!\n",
1260                         mmc_hostname(host->mmc));
1261                 cmd->error = -EINVAL;
1262                 sdhci_finish_mrq(host, cmd->mrq);
1263                 return;
1264         }
1265
1266         if (!(cmd->flags & MMC_RSP_PRESENT))
1267                 flags = SDHCI_CMD_RESP_NONE;
1268         else if (cmd->flags & MMC_RSP_136)
1269                 flags = SDHCI_CMD_RESP_LONG;
1270         else if (cmd->flags & MMC_RSP_BUSY)
1271                 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1272         else
1273                 flags = SDHCI_CMD_RESP_SHORT;
1274
1275         if (cmd->flags & MMC_RSP_CRC)
1276                 flags |= SDHCI_CMD_CRC;
1277         if (cmd->flags & MMC_RSP_OPCODE)
1278                 flags |= SDHCI_CMD_INDEX;
1279
1280         /* CMD19 is special in that the Data Present Select should be set */
1281         if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1282             cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1283                 flags |= SDHCI_CMD_DATA;
1284
1285         timeout = jiffies;
1286         if (host->data_timeout)
1287                 timeout += nsecs_to_jiffies(host->data_timeout);
1288         else if (!cmd->data && cmd->busy_timeout > 9000)
1289                 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1290         else
1291                 timeout += 10 * HZ;
1292         sdhci_mod_timer(host, cmd->mrq, timeout);
1293
1294         sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1295 }
1296 EXPORT_SYMBOL_GPL(sdhci_send_command);
1297
1298 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1299 {
1300         int i, reg;
1301
1302         for (i = 0; i < 4; i++) {
1303                 reg = SDHCI_RESPONSE + (3 - i) * 4;
1304                 cmd->resp[i] = sdhci_readl(host, reg);
1305         }
1306
1307         if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1308                 return;
1309
1310         /* CRC is stripped so we need to do some shifting */
1311         for (i = 0; i < 4; i++) {
1312                 cmd->resp[i] <<= 8;
1313                 if (i != 3)
1314                         cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1315         }
1316 }
1317
1318 static void sdhci_finish_command(struct sdhci_host *host)
1319 {
1320         struct mmc_command *cmd = host->cmd;
1321
1322         host->cmd = NULL;
1323
1324         if (cmd->flags & MMC_RSP_PRESENT) {
1325                 if (cmd->flags & MMC_RSP_136) {
1326                         sdhci_read_rsp_136(host, cmd);
1327                 } else {
1328                         cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1329                 }
1330         }
1331
1332         if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1333                 mmc_command_done(host->mmc, cmd->mrq);
1334
1335         /*
1336          * The host can send and interrupt when the busy state has
1337          * ended, allowing us to wait without wasting CPU cycles.
1338          * The busy signal uses DAT0 so this is similar to waiting
1339          * for data to complete.
1340          *
1341          * Note: The 1.0 specification is a bit ambiguous about this
1342          *       feature so there might be some problems with older
1343          *       controllers.
1344          */
1345         if (cmd->flags & MMC_RSP_BUSY) {
1346                 if (cmd->data) {
1347                         DBG("Cannot wait for busy signal when also doing a data transfer");
1348                 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1349                            cmd == host->data_cmd) {
1350                         /* Command complete before busy is ended */
1351                         return;
1352                 }
1353         }
1354
1355         /* Finished CMD23, now send actual command. */
1356         if (cmd == cmd->mrq->sbc) {
1357                 sdhci_send_command(host, cmd->mrq->cmd);
1358         } else {
1359
1360                 /* Processed actual command. */
1361                 if (host->data && host->data_early)
1362                         sdhci_finish_data(host);
1363
1364                 if (!cmd->data)
1365                         sdhci_finish_mrq(host, cmd->mrq);
1366         }
1367 }
1368
1369 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1370 {
1371         u16 preset = 0;
1372
1373         switch (host->timing) {
1374         case MMC_TIMING_UHS_SDR12:
1375                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1376                 break;
1377         case MMC_TIMING_UHS_SDR25:
1378                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1379                 break;
1380         case MMC_TIMING_UHS_SDR50:
1381                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1382                 break;
1383         case MMC_TIMING_UHS_SDR104:
1384         case MMC_TIMING_MMC_HS200:
1385                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1386                 break;
1387         case MMC_TIMING_UHS_DDR50:
1388         case MMC_TIMING_MMC_DDR52:
1389                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1390                 break;
1391         case MMC_TIMING_MMC_HS400:
1392                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1393                 break;
1394         default:
1395                 pr_warn("%s: Invalid UHS-I mode selected\n",
1396                         mmc_hostname(host->mmc));
1397                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1398                 break;
1399         }
1400         return preset;
1401 }
1402
1403 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1404                    unsigned int *actual_clock)
1405 {
1406         int div = 0; /* Initialized for compiler warning */
1407         int real_div = div, clk_mul = 1;
1408         u16 clk = 0;
1409         bool switch_base_clk = false;
1410
1411         if (host->version >= SDHCI_SPEC_300) {
1412                 if (host->preset_enabled) {
1413                         u16 pre_val;
1414
1415                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1416                         pre_val = sdhci_get_preset_value(host);
1417                         div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1418                                 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1419                         if (host->clk_mul &&
1420                                 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1421                                 clk = SDHCI_PROG_CLOCK_MODE;
1422                                 real_div = div + 1;
1423                                 clk_mul = host->clk_mul;
1424                         } else {
1425                                 real_div = max_t(int, 1, div << 1);
1426                         }
1427                         goto clock_set;
1428                 }
1429
1430                 /*
1431                  * Check if the Host Controller supports Programmable Clock
1432                  * Mode.
1433                  */
1434                 if (host->clk_mul) {
1435                         for (div = 1; div <= 1024; div++) {
1436                                 if ((host->max_clk * host->clk_mul / div)
1437                                         <= clock)
1438                                         break;
1439                         }
1440                         if ((host->max_clk * host->clk_mul / div) <= clock) {
1441                                 /*
1442                                  * Set Programmable Clock Mode in the Clock
1443                                  * Control register.
1444                                  */
1445                                 clk = SDHCI_PROG_CLOCK_MODE;
1446                                 real_div = div;
1447                                 clk_mul = host->clk_mul;
1448                                 div--;
1449                         } else {
1450                                 /*
1451                                  * Divisor can be too small to reach clock
1452                                  * speed requirement. Then use the base clock.
1453                                  */
1454                                 switch_base_clk = true;
1455                         }
1456                 }
1457
1458                 if (!host->clk_mul || switch_base_clk) {
1459                         /* Version 3.00 divisors must be a multiple of 2. */
1460                         if (host->max_clk <= clock)
1461                                 div = 1;
1462                         else {
1463                                 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1464                                      div += 2) {
1465                                         if ((host->max_clk / div) <= clock)
1466                                                 break;
1467                                 }
1468                         }
1469                         real_div = div;
1470                         div >>= 1;
1471                         if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1472                                 && !div && host->max_clk <= 25000000)
1473                                 div = 1;
1474                 }
1475         } else {
1476                 /* Version 2.00 divisors must be a power of 2. */
1477                 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1478                         if ((host->max_clk / div) <= clock)
1479                                 break;
1480                 }
1481                 real_div = div;
1482                 div >>= 1;
1483         }
1484
1485 clock_set:
1486         if (real_div)
1487                 *actual_clock = (host->max_clk * clk_mul) / real_div;
1488         clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1489         clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1490                 << SDHCI_DIVIDER_HI_SHIFT;
1491
1492         return clk;
1493 }
1494 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1495
1496 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1497 {
1498         ktime_t timeout;
1499
1500         clk |= SDHCI_CLOCK_INT_EN;
1501         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1502
1503         /* Wait max 20 ms */
1504         timeout = ktime_add_ms(ktime_get(), 20);
1505         while (1) {
1506                 bool timedout = ktime_after(ktime_get(), timeout);
1507
1508                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1509                 if (clk & SDHCI_CLOCK_INT_STABLE)
1510                         break;
1511                 if (timedout) {
1512                         pr_err("%s: Internal clock never stabilised.\n",
1513                                mmc_hostname(host->mmc));
1514                         sdhci_dumpregs(host);
1515                         return;
1516                 }
1517                 udelay(10);
1518         }
1519
1520         clk |= SDHCI_CLOCK_CARD_EN;
1521         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1522 }
1523 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1524
1525 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1526 {
1527         u16 clk;
1528
1529         host->mmc->actual_clock = 0;
1530
1531         sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1532
1533         if (clock == 0)
1534                 return;
1535
1536         clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1537         sdhci_enable_clk(host, clk);
1538 }
1539 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1540
1541 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1542                                 unsigned short vdd)
1543 {
1544         struct mmc_host *mmc = host->mmc;
1545
1546         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1547
1548         if (mode != MMC_POWER_OFF)
1549                 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1550         else
1551                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1552 }
1553
1554 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1555                            unsigned short vdd)
1556 {
1557         u8 pwr = 0;
1558
1559         if (mode != MMC_POWER_OFF) {
1560                 switch (1 << vdd) {
1561                 case MMC_VDD_165_195:
1562                 /*
1563                  * Without a regulator, SDHCI does not support 2.0v
1564                  * so we only get here if the driver deliberately
1565                  * added the 2.0v range to ocr_avail. Map it to 1.8v
1566                  * for the purpose of turning on the power.
1567                  */
1568                 case MMC_VDD_20_21:
1569                         pwr = SDHCI_POWER_180;
1570                         break;
1571                 case MMC_VDD_29_30:
1572                 case MMC_VDD_30_31:
1573                         pwr = SDHCI_POWER_300;
1574                         break;
1575                 case MMC_VDD_32_33:
1576                 case MMC_VDD_33_34:
1577                         pwr = SDHCI_POWER_330;
1578                         break;
1579                 default:
1580                         WARN(1, "%s: Invalid vdd %#x\n",
1581                              mmc_hostname(host->mmc), vdd);
1582                         break;
1583                 }
1584         }
1585
1586         if (host->pwr == pwr)
1587                 return;
1588
1589         host->pwr = pwr;
1590
1591         if (pwr == 0) {
1592                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1593                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1594                         sdhci_runtime_pm_bus_off(host);
1595         } else {
1596                 /*
1597                  * Spec says that we should clear the power reg before setting
1598                  * a new value. Some controllers don't seem to like this though.
1599                  */
1600                 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1601                         sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1602
1603                 /*
1604                  * At least the Marvell CaFe chip gets confused if we set the
1605                  * voltage and set turn on power at the same time, so set the
1606                  * voltage first.
1607                  */
1608                 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1609                         sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1610
1611                 pwr |= SDHCI_POWER_ON;
1612
1613                 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1614
1615                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1616                         sdhci_runtime_pm_bus_on(host);
1617
1618                 /*
1619                  * Some controllers need an extra 10ms delay of 10ms before
1620                  * they can apply clock after applying power
1621                  */
1622                 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1623                         mdelay(10);
1624         }
1625 }
1626 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1627
1628 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1629                      unsigned short vdd)
1630 {
1631         if (IS_ERR(host->mmc->supply.vmmc))
1632                 sdhci_set_power_noreg(host, mode, vdd);
1633         else
1634                 sdhci_set_power_reg(host, mode, vdd);
1635 }
1636 EXPORT_SYMBOL_GPL(sdhci_set_power);
1637
1638 /*****************************************************************************\
1639  *                                                                           *
1640  * MMC callbacks                                                             *
1641  *                                                                           *
1642 \*****************************************************************************/
1643
1644 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1645 {
1646         struct sdhci_host *host;
1647         int present;
1648         unsigned long flags;
1649
1650         host = mmc_priv(mmc);
1651
1652         /* Firstly check card presence */
1653         present = mmc->ops->get_cd(mmc);
1654
1655         spin_lock_irqsave(&host->lock, flags);
1656
1657         sdhci_led_activate(host);
1658
1659         /*
1660          * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1661          * requests if Auto-CMD12 is enabled.
1662          */
1663         if (sdhci_auto_cmd12(host, mrq)) {
1664                 if (mrq->stop) {
1665                         mrq->data->stop = NULL;
1666                         mrq->stop = NULL;
1667                 }
1668         }
1669
1670         if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1671                 mrq->cmd->error = -ENOMEDIUM;
1672                 sdhci_finish_mrq(host, mrq);
1673         } else {
1674                 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1675                         sdhci_send_command(host, mrq->sbc);
1676                 else
1677                         sdhci_send_command(host, mrq->cmd);
1678         }
1679
1680         mmiowb();
1681         spin_unlock_irqrestore(&host->lock, flags);
1682 }
1683
1684 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1685 {
1686         u8 ctrl;
1687
1688         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1689         if (width == MMC_BUS_WIDTH_8) {
1690                 ctrl &= ~SDHCI_CTRL_4BITBUS;
1691                 ctrl |= SDHCI_CTRL_8BITBUS;
1692         } else {
1693                 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1694                         ctrl &= ~SDHCI_CTRL_8BITBUS;
1695                 if (width == MMC_BUS_WIDTH_4)
1696                         ctrl |= SDHCI_CTRL_4BITBUS;
1697                 else
1698                         ctrl &= ~SDHCI_CTRL_4BITBUS;
1699         }
1700         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1701 }
1702 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1703
1704 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1705 {
1706         u16 ctrl_2;
1707
1708         ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1709         /* Select Bus Speed Mode for host */
1710         ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1711         if ((timing == MMC_TIMING_MMC_HS200) ||
1712             (timing == MMC_TIMING_UHS_SDR104))
1713                 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1714         else if (timing == MMC_TIMING_UHS_SDR12)
1715                 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1716         else if (timing == MMC_TIMING_UHS_SDR25)
1717                 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1718         else if (timing == MMC_TIMING_UHS_SDR50)
1719                 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1720         else if ((timing == MMC_TIMING_UHS_DDR50) ||
1721                  (timing == MMC_TIMING_MMC_DDR52))
1722                 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1723         else if (timing == MMC_TIMING_MMC_HS400)
1724                 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1725         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1726 }
1727 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1728
1729 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1730 {
1731         struct sdhci_host *host = mmc_priv(mmc);
1732         u8 ctrl;
1733
1734         if (ios->power_mode == MMC_POWER_UNDEFINED)
1735                 return;
1736
1737         if (host->flags & SDHCI_DEVICE_DEAD) {
1738                 if (!IS_ERR(mmc->supply.vmmc) &&
1739                     ios->power_mode == MMC_POWER_OFF)
1740                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1741                 return;
1742         }
1743
1744         /*
1745          * Reset the chip on each power off.
1746          * Should clear out any weird states.
1747          */
1748         if (ios->power_mode == MMC_POWER_OFF) {
1749                 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1750                 sdhci_reinit(host);
1751         }
1752
1753         if (host->version >= SDHCI_SPEC_300 &&
1754                 (ios->power_mode == MMC_POWER_UP) &&
1755                 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1756                 sdhci_enable_preset_value(host, false);
1757
1758         if (!ios->clock || ios->clock != host->clock) {
1759                 host->ops->set_clock(host, ios->clock);
1760                 host->clock = ios->clock;
1761
1762                 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1763                     host->clock) {
1764                         host->timeout_clk = host->mmc->actual_clock ?
1765                                                 host->mmc->actual_clock / 1000 :
1766                                                 host->clock / 1000;
1767                         host->mmc->max_busy_timeout =
1768                                 host->ops->get_max_timeout_count ?
1769                                 host->ops->get_max_timeout_count(host) :
1770                                 1 << 27;
1771                         host->mmc->max_busy_timeout /= host->timeout_clk;
1772                 }
1773         }
1774
1775         if (host->ops->set_power)
1776                 host->ops->set_power(host, ios->power_mode, ios->vdd);
1777         else
1778                 sdhci_set_power(host, ios->power_mode, ios->vdd);
1779
1780         if (host->ops->platform_send_init_74_clocks)
1781                 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1782
1783         host->ops->set_bus_width(host, ios->bus_width);
1784
1785         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1786
1787         if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1788                 if (ios->timing == MMC_TIMING_SD_HS ||
1789                      ios->timing == MMC_TIMING_MMC_HS ||
1790                      ios->timing == MMC_TIMING_MMC_HS400 ||
1791                      ios->timing == MMC_TIMING_MMC_HS200 ||
1792                      ios->timing == MMC_TIMING_MMC_DDR52 ||
1793                      ios->timing == MMC_TIMING_UHS_SDR50 ||
1794                      ios->timing == MMC_TIMING_UHS_SDR104 ||
1795                      ios->timing == MMC_TIMING_UHS_DDR50 ||
1796                      ios->timing == MMC_TIMING_UHS_SDR25)
1797                         ctrl |= SDHCI_CTRL_HISPD;
1798                 else
1799                         ctrl &= ~SDHCI_CTRL_HISPD;
1800         }
1801
1802         if (host->version >= SDHCI_SPEC_300) {
1803                 u16 clk, ctrl_2;
1804
1805                 if (!host->preset_enabled) {
1806                         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1807                         /*
1808                          * We only need to set Driver Strength if the
1809                          * preset value enable is not set.
1810                          */
1811                         ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1812                         ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1813                         if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1814                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1815                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1816                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1817                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1818                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1819                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1820                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1821                         else {
1822                                 pr_warn("%s: invalid driver type, default to driver type B\n",
1823                                         mmc_hostname(mmc));
1824                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1825                         }
1826
1827                         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1828                 } else {
1829                         /*
1830                          * According to SDHC Spec v3.00, if the Preset Value
1831                          * Enable in the Host Control 2 register is set, we
1832                          * need to reset SD Clock Enable before changing High
1833                          * Speed Enable to avoid generating clock gliches.
1834                          */
1835
1836                         /* Reset SD Clock Enable */
1837                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1838                         clk &= ~SDHCI_CLOCK_CARD_EN;
1839                         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1840
1841                         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1842
1843                         /* Re-enable SD Clock */
1844                         host->ops->set_clock(host, host->clock);
1845                 }
1846
1847                 /* Reset SD Clock Enable */
1848                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1849                 clk &= ~SDHCI_CLOCK_CARD_EN;
1850                 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1851
1852                 host->ops->set_uhs_signaling(host, ios->timing);
1853                 host->timing = ios->timing;
1854
1855                 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1856                                 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1857                                  (ios->timing == MMC_TIMING_UHS_SDR25) ||
1858                                  (ios->timing == MMC_TIMING_UHS_SDR50) ||
1859                                  (ios->timing == MMC_TIMING_UHS_SDR104) ||
1860                                  (ios->timing == MMC_TIMING_UHS_DDR50) ||
1861                                  (ios->timing == MMC_TIMING_MMC_DDR52))) {
1862                         u16 preset;
1863
1864                         sdhci_enable_preset_value(host, true);
1865                         preset = sdhci_get_preset_value(host);
1866                         ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1867                                 >> SDHCI_PRESET_DRV_SHIFT;
1868                 }
1869
1870                 /* Re-enable SD Clock */
1871                 host->ops->set_clock(host, host->clock);
1872         } else
1873                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1874
1875         /*
1876          * Some (ENE) controllers go apeshit on some ios operation,
1877          * signalling timeout and CRC errors even on CMD0. Resetting
1878          * it on each ios seems to solve the problem.
1879          */
1880         if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1881                 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1882
1883         mmiowb();
1884 }
1885 EXPORT_SYMBOL_GPL(sdhci_set_ios);
1886
1887 static int sdhci_get_cd(struct mmc_host *mmc)
1888 {
1889         struct sdhci_host *host = mmc_priv(mmc);
1890         int gpio_cd = mmc_gpio_get_cd(mmc);
1891
1892         if (host->flags & SDHCI_DEVICE_DEAD)
1893                 return 0;
1894
1895         /* If nonremovable, assume that the card is always present. */
1896         if (!mmc_card_is_removable(host->mmc))
1897                 return 1;
1898
1899         /*
1900          * Try slot gpio detect, if defined it take precedence
1901          * over build in controller functionality
1902          */
1903         if (gpio_cd >= 0)
1904                 return !!gpio_cd;
1905
1906         /* If polling, assume that the card is always present. */
1907         if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1908                 return 1;
1909
1910         /* Host native card detect */
1911         return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1912 }
1913
1914 static int sdhci_check_ro(struct sdhci_host *host)
1915 {
1916         unsigned long flags;
1917         int is_readonly;
1918
1919         spin_lock_irqsave(&host->lock, flags);
1920
1921         if (host->flags & SDHCI_DEVICE_DEAD)
1922                 is_readonly = 0;
1923         else if (host->ops->get_ro)
1924                 is_readonly = host->ops->get_ro(host);
1925         else
1926                 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1927                                 & SDHCI_WRITE_PROTECT);
1928
1929         spin_unlock_irqrestore(&host->lock, flags);
1930
1931         /* This quirk needs to be replaced by a callback-function later */
1932         return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1933                 !is_readonly : is_readonly;
1934 }
1935
1936 #define SAMPLE_COUNT    5
1937
1938 static int sdhci_get_ro(struct mmc_host *mmc)
1939 {
1940         struct sdhci_host *host = mmc_priv(mmc);
1941         int i, ro_count;
1942
1943         if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1944                 return sdhci_check_ro(host);
1945
1946         ro_count = 0;
1947         for (i = 0; i < SAMPLE_COUNT; i++) {
1948                 if (sdhci_check_ro(host)) {
1949                         if (++ro_count > SAMPLE_COUNT / 2)
1950                                 return 1;
1951                 }
1952                 msleep(30);
1953         }
1954         return 0;
1955 }
1956
1957 static void sdhci_hw_reset(struct mmc_host *mmc)
1958 {
1959         struct sdhci_host *host = mmc_priv(mmc);
1960
1961         if (host->ops && host->ops->hw_reset)
1962                 host->ops->hw_reset(host);
1963 }
1964
1965 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1966 {
1967         if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1968                 if (enable)
1969                         host->ier |= SDHCI_INT_CARD_INT;
1970                 else
1971                         host->ier &= ~SDHCI_INT_CARD_INT;
1972
1973                 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1974                 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1975                 mmiowb();
1976         }
1977 }
1978
1979 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1980 {
1981         struct sdhci_host *host = mmc_priv(mmc);
1982         unsigned long flags;
1983
1984         if (enable)
1985                 pm_runtime_get_noresume(host->mmc->parent);
1986
1987         spin_lock_irqsave(&host->lock, flags);
1988         if (enable)
1989                 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1990         else
1991                 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1992
1993         sdhci_enable_sdio_irq_nolock(host, enable);
1994         spin_unlock_irqrestore(&host->lock, flags);
1995
1996         if (!enable)
1997                 pm_runtime_put_noidle(host->mmc->parent);
1998 }
1999 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2000
2001 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2002                                       struct mmc_ios *ios)
2003 {
2004         struct sdhci_host *host = mmc_priv(mmc);
2005         u16 ctrl;
2006         int ret;
2007
2008         /*
2009          * Signal Voltage Switching is only applicable for Host Controllers
2010          * v3.00 and above.
2011          */
2012         if (host->version < SDHCI_SPEC_300)
2013                 return 0;
2014
2015         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2016
2017         switch (ios->signal_voltage) {
2018         case MMC_SIGNAL_VOLTAGE_330:
2019                 if (!(host->flags & SDHCI_SIGNALING_330))
2020                         return -EINVAL;
2021                 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2022                 ctrl &= ~SDHCI_CTRL_VDD_180;
2023                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2024
2025                 if (!IS_ERR(mmc->supply.vqmmc)) {
2026                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2027                         if (ret) {
2028                                 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2029                                         mmc_hostname(mmc));
2030                                 return -EIO;
2031                         }
2032                 }
2033                 /* Wait for 5ms */
2034                 usleep_range(5000, 5500);
2035
2036                 /* 3.3V regulator output should be stable within 5 ms */
2037                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2038                 if (!(ctrl & SDHCI_CTRL_VDD_180))
2039                         return 0;
2040
2041                 pr_warn("%s: 3.3V regulator output did not became stable\n",
2042                         mmc_hostname(mmc));
2043
2044                 return -EAGAIN;
2045         case MMC_SIGNAL_VOLTAGE_180:
2046                 if (!(host->flags & SDHCI_SIGNALING_180))
2047                         return -EINVAL;
2048                 if (!IS_ERR(mmc->supply.vqmmc)) {
2049                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2050                         if (ret) {
2051                                 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2052                                         mmc_hostname(mmc));
2053                                 return -EIO;
2054                         }
2055                 }
2056
2057                 /*
2058                  * Enable 1.8V Signal Enable in the Host Control2
2059                  * register
2060                  */
2061                 ctrl |= SDHCI_CTRL_VDD_180;
2062                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2063
2064                 /* Some controller need to do more when switching */
2065                 if (host->ops->voltage_switch)
2066                         host->ops->voltage_switch(host);
2067
2068                 /* 1.8V regulator output should be stable within 5 ms */
2069                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2070                 if (ctrl & SDHCI_CTRL_VDD_180)
2071                         return 0;
2072
2073                 pr_warn("%s: 1.8V regulator output did not became stable\n",
2074                         mmc_hostname(mmc));
2075
2076                 return -EAGAIN;
2077         case MMC_SIGNAL_VOLTAGE_120:
2078                 if (!(host->flags & SDHCI_SIGNALING_120))
2079                         return -EINVAL;
2080                 if (!IS_ERR(mmc->supply.vqmmc)) {
2081                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2082                         if (ret) {
2083                                 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2084                                         mmc_hostname(mmc));
2085                                 return -EIO;
2086                         }
2087                 }
2088                 return 0;
2089         default:
2090                 /* No signal voltage switch required */
2091                 return 0;
2092         }
2093 }
2094 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2095
2096 static int sdhci_card_busy(struct mmc_host *mmc)
2097 {
2098         struct sdhci_host *host = mmc_priv(mmc);
2099         u32 present_state;
2100
2101         /* Check whether DAT[0] is 0 */
2102         present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2103
2104         return !(present_state & SDHCI_DATA_0_LVL_MASK);
2105 }
2106
2107 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2108 {
2109         struct sdhci_host *host = mmc_priv(mmc);
2110         unsigned long flags;
2111
2112         spin_lock_irqsave(&host->lock, flags);
2113         host->flags |= SDHCI_HS400_TUNING;
2114         spin_unlock_irqrestore(&host->lock, flags);
2115
2116         return 0;
2117 }
2118
2119 void sdhci_start_tuning(struct sdhci_host *host)
2120 {
2121         u16 ctrl;
2122
2123         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2124         ctrl |= SDHCI_CTRL_EXEC_TUNING;
2125         if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2126                 ctrl |= SDHCI_CTRL_TUNED_CLK;
2127         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2128
2129         /*
2130          * As per the Host Controller spec v3.00, tuning command
2131          * generates Buffer Read Ready interrupt, so enable that.
2132          *
2133          * Note: The spec clearly says that when tuning sequence
2134          * is being performed, the controller does not generate
2135          * interrupts other than Buffer Read Ready interrupt. But
2136          * to make sure we don't hit a controller bug, we _only_
2137          * enable Buffer Read Ready interrupt here.
2138          */
2139         sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2140         sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2141 }
2142 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2143
2144 void sdhci_end_tuning(struct sdhci_host *host)
2145 {
2146         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2147         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2148 }
2149 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2150
2151 void sdhci_reset_tuning(struct sdhci_host *host)
2152 {
2153         u16 ctrl;
2154
2155         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2156         ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2157         ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2158         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2159 }
2160 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2161
2162 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2163 {
2164         sdhci_reset_tuning(host);
2165
2166         sdhci_do_reset(host, SDHCI_RESET_CMD);
2167         sdhci_do_reset(host, SDHCI_RESET_DATA);
2168
2169         sdhci_end_tuning(host);
2170
2171         mmc_abort_tuning(host->mmc, opcode);
2172 }
2173
2174 /*
2175  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2176  * tuning command does not have a data payload (or rather the hardware does it
2177  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2178  * interrupt setup is different to other commands and there is no timeout
2179  * interrupt so special handling is needed.
2180  */
2181 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2182 {
2183         struct mmc_host *mmc = host->mmc;
2184         struct mmc_command cmd = {};
2185         struct mmc_request mrq = {};
2186         unsigned long flags;
2187         u32 b = host->sdma_boundary;
2188
2189         spin_lock_irqsave(&host->lock, flags);
2190
2191         cmd.opcode = opcode;
2192         cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2193         cmd.mrq = &mrq;
2194
2195         mrq.cmd = &cmd;
2196         /*
2197          * In response to CMD19, the card sends 64 bytes of tuning
2198          * block to the Host Controller. So we set the block size
2199          * to 64 here.
2200          */
2201         if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2202             mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2203                 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2204         else
2205                 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2206
2207         /*
2208          * The tuning block is sent by the card to the host controller.
2209          * So we set the TRNS_READ bit in the Transfer Mode register.
2210          * This also takes care of setting DMA Enable and Multi Block
2211          * Select in the same register to 0.
2212          */
2213         sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2214
2215         sdhci_send_command(host, &cmd);
2216
2217         host->cmd = NULL;
2218
2219         sdhci_del_timer(host, &mrq);
2220
2221         host->tuning_done = 0;
2222
2223         mmiowb();
2224         spin_unlock_irqrestore(&host->lock, flags);
2225
2226         /* Wait for Buffer Read Ready interrupt */
2227         wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2228                            msecs_to_jiffies(50));
2229
2230 }
2231 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2232
2233 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2234 {
2235         int i;
2236
2237         /*
2238          * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2239          * of loops reaches 40 times.
2240          */
2241         for (i = 0; i < MAX_TUNING_LOOP; i++) {
2242                 u16 ctrl;
2243
2244                 sdhci_send_tuning(host, opcode);
2245
2246                 if (!host->tuning_done) {
2247                         pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2248                                  mmc_hostname(host->mmc));
2249                         sdhci_abort_tuning(host, opcode);
2250                         return;
2251                 }
2252
2253                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2254                 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2255                         if (ctrl & SDHCI_CTRL_TUNED_CLK)
2256                                 return; /* Success! */
2257                         break;
2258                 }
2259
2260                 /* Spec does not require a delay between tuning cycles */
2261                 if (host->tuning_delay > 0)
2262                         mdelay(host->tuning_delay);
2263         }
2264
2265         pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2266                 mmc_hostname(host->mmc));
2267         sdhci_reset_tuning(host);
2268 }
2269
2270 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2271 {
2272         struct sdhci_host *host = mmc_priv(mmc);
2273         int err = 0;
2274         unsigned int tuning_count = 0;
2275         bool hs400_tuning;
2276
2277         hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2278
2279         if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2280                 tuning_count = host->tuning_count;
2281
2282         /*
2283          * The Host Controller needs tuning in case of SDR104 and DDR50
2284          * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2285          * the Capabilities register.
2286          * If the Host Controller supports the HS200 mode then the
2287          * tuning function has to be executed.
2288          */
2289         switch (host->timing) {
2290         /* HS400 tuning is done in HS200 mode */
2291         case MMC_TIMING_MMC_HS400:
2292                 err = -EINVAL;
2293                 goto out;
2294
2295         case MMC_TIMING_MMC_HS200:
2296                 /*
2297                  * Periodic re-tuning for HS400 is not expected to be needed, so
2298                  * disable it here.
2299                  */
2300                 if (hs400_tuning)
2301                         tuning_count = 0;
2302                 break;
2303
2304         case MMC_TIMING_UHS_SDR104:
2305         case MMC_TIMING_UHS_DDR50:
2306                 break;
2307
2308         case MMC_TIMING_UHS_SDR50:
2309                 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2310                         break;
2311                 /* FALLTHROUGH */
2312
2313         default:
2314                 goto out;
2315         }
2316
2317         if (host->ops->platform_execute_tuning) {
2318                 err = host->ops->platform_execute_tuning(host, opcode);
2319                 goto out;
2320         }
2321
2322         host->mmc->retune_period = tuning_count;
2323
2324         if (host->tuning_delay < 0)
2325                 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2326
2327         sdhci_start_tuning(host);
2328
2329         __sdhci_execute_tuning(host, opcode);
2330
2331         sdhci_end_tuning(host);
2332 out:
2333         host->flags &= ~SDHCI_HS400_TUNING;
2334
2335         return err;
2336 }
2337 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2338
2339 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2340 {
2341         /* Host Controller v3.00 defines preset value registers */
2342         if (host->version < SDHCI_SPEC_300)
2343                 return;
2344
2345         /*
2346          * We only enable or disable Preset Value if they are not already
2347          * enabled or disabled respectively. Otherwise, we bail out.
2348          */
2349         if (host->preset_enabled != enable) {
2350                 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2351
2352                 if (enable)
2353                         ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2354                 else
2355                         ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2356
2357                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2358
2359                 if (enable)
2360                         host->flags |= SDHCI_PV_ENABLED;
2361                 else
2362                         host->flags &= ~SDHCI_PV_ENABLED;
2363
2364                 host->preset_enabled = enable;
2365         }
2366 }
2367
2368 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2369                                 int err)
2370 {
2371         struct sdhci_host *host = mmc_priv(mmc);
2372         struct mmc_data *data = mrq->data;
2373
2374         if (data->host_cookie != COOKIE_UNMAPPED)
2375                 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2376                              mmc_get_dma_dir(data));
2377
2378         data->host_cookie = COOKIE_UNMAPPED;
2379 }
2380
2381 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2382 {
2383         struct sdhci_host *host = mmc_priv(mmc);
2384
2385         mrq->data->host_cookie = COOKIE_UNMAPPED;
2386
2387         /*
2388          * No pre-mapping in the pre hook if we're using the bounce buffer,
2389          * for that we would need two bounce buffers since one buffer is
2390          * in flight when this is getting called.
2391          */
2392         if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2393                 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2394 }
2395
2396 static inline bool sdhci_has_requests(struct sdhci_host *host)
2397 {
2398         return host->cmd || host->data_cmd;
2399 }
2400
2401 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2402 {
2403         if (host->data_cmd) {
2404                 host->data_cmd->error = err;
2405                 sdhci_finish_mrq(host, host->data_cmd->mrq);
2406         }
2407
2408         if (host->cmd) {
2409                 host->cmd->error = err;
2410                 sdhci_finish_mrq(host, host->cmd->mrq);
2411         }
2412 }
2413
2414 static void sdhci_card_event(struct mmc_host *mmc)
2415 {
2416         struct sdhci_host *host = mmc_priv(mmc);
2417         unsigned long flags;
2418         int present;
2419
2420         /* First check if client has provided their own card event */
2421         if (host->ops->card_event)
2422                 host->ops->card_event(host);
2423
2424         present = mmc->ops->get_cd(mmc);
2425
2426         spin_lock_irqsave(&host->lock, flags);
2427
2428         /* Check sdhci_has_requests() first in case we are runtime suspended */
2429         if (sdhci_has_requests(host) && !present) {
2430                 pr_err("%s: Card removed during transfer!\n",
2431                         mmc_hostname(host->mmc));
2432                 pr_err("%s: Resetting controller.\n",
2433                         mmc_hostname(host->mmc));
2434
2435                 sdhci_do_reset(host, SDHCI_RESET_CMD);
2436                 sdhci_do_reset(host, SDHCI_RESET_DATA);
2437
2438                 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2439         }
2440
2441         spin_unlock_irqrestore(&host->lock, flags);
2442 }
2443
2444 static const struct mmc_host_ops sdhci_ops = {
2445         .request        = sdhci_request,
2446         .post_req       = sdhci_post_req,
2447         .pre_req        = sdhci_pre_req,
2448         .set_ios        = sdhci_set_ios,
2449         .get_cd         = sdhci_get_cd,
2450         .get_ro         = sdhci_get_ro,
2451         .hw_reset       = sdhci_hw_reset,
2452         .enable_sdio_irq = sdhci_enable_sdio_irq,
2453         .start_signal_voltage_switch    = sdhci_start_signal_voltage_switch,
2454         .prepare_hs400_tuning           = sdhci_prepare_hs400_tuning,
2455         .execute_tuning                 = sdhci_execute_tuning,
2456         .card_event                     = sdhci_card_event,
2457         .card_busy      = sdhci_card_busy,
2458 };
2459
2460 /*****************************************************************************\
2461  *                                                                           *
2462  * Tasklets                                                                  *
2463  *                                                                           *
2464 \*****************************************************************************/
2465
2466 static bool sdhci_request_done(struct sdhci_host *host)
2467 {
2468         unsigned long flags;
2469         struct mmc_request *mrq;
2470         int i;
2471
2472         spin_lock_irqsave(&host->lock, flags);
2473
2474         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2475                 mrq = host->mrqs_done[i];
2476                 if (mrq)
2477                         break;
2478         }
2479
2480         if (!mrq) {
2481                 spin_unlock_irqrestore(&host->lock, flags);
2482                 return true;
2483         }
2484
2485         sdhci_del_timer(host, mrq);
2486
2487         /*
2488          * Always unmap the data buffers if they were mapped by
2489          * sdhci_prepare_data() whenever we finish with a request.
2490          * This avoids leaking DMA mappings on error.
2491          */
2492         if (host->flags & SDHCI_REQ_USE_DMA) {
2493                 struct mmc_data *data = mrq->data;
2494
2495                 if (data && data->host_cookie == COOKIE_MAPPED) {
2496                         if (host->bounce_buffer) {
2497                                 /*
2498                                  * On reads, copy the bounced data into the
2499                                  * sglist
2500                                  */
2501                                 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2502                                         unsigned int length = data->bytes_xfered;
2503
2504                                         if (length > host->bounce_buffer_size) {
2505                                                 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2506                                                        mmc_hostname(host->mmc),
2507                                                        host->bounce_buffer_size,
2508                                                        data->bytes_xfered);
2509                                                 /* Cap it down and continue */
2510                                                 length = host->bounce_buffer_size;
2511                                         }
2512                                         dma_sync_single_for_cpu(
2513                                                 host->mmc->parent,
2514                                                 host->bounce_addr,
2515                                                 host->bounce_buffer_size,
2516                                                 DMA_FROM_DEVICE);
2517                                         sg_copy_from_buffer(data->sg,
2518                                                 data->sg_len,
2519                                                 host->bounce_buffer,
2520                                                 length);
2521                                 } else {
2522                                         /* No copying, just switch ownership */
2523                                         dma_sync_single_for_cpu(
2524                                                 host->mmc->parent,
2525                                                 host->bounce_addr,
2526                                                 host->bounce_buffer_size,
2527                                                 mmc_get_dma_dir(data));
2528                                 }
2529                         } else {
2530                                 /* Unmap the raw data */
2531                                 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2532                                              data->sg_len,
2533                                              mmc_get_dma_dir(data));
2534                         }
2535                         data->host_cookie = COOKIE_UNMAPPED;
2536                 }
2537         }
2538
2539         /*
2540          * The controller needs a reset of internal state machines
2541          * upon error conditions.
2542          */
2543         if (sdhci_needs_reset(host, mrq)) {
2544                 /*
2545                  * Do not finish until command and data lines are available for
2546                  * reset. Note there can only be one other mrq, so it cannot
2547                  * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2548                  * would both be null.
2549                  */
2550                 if (host->cmd || host->data_cmd) {
2551                         spin_unlock_irqrestore(&host->lock, flags);
2552                         return true;
2553                 }
2554
2555                 /* Some controllers need this kick or reset won't work here */
2556                 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2557                         /* This is to force an update */
2558                         host->ops->set_clock(host, host->clock);
2559
2560                 /* Spec says we should do both at the same time, but Ricoh
2561                    controllers do not like that. */
2562                 sdhci_do_reset(host, SDHCI_RESET_CMD);
2563                 sdhci_do_reset(host, SDHCI_RESET_DATA);
2564
2565                 host->pending_reset = false;
2566         }
2567
2568         if (!sdhci_has_requests(host))
2569                 sdhci_led_deactivate(host);
2570
2571         host->mrqs_done[i] = NULL;
2572
2573         mmiowb();
2574         spin_unlock_irqrestore(&host->lock, flags);
2575
2576         mmc_request_done(host->mmc, mrq);
2577
2578         return false;
2579 }
2580
2581 static void sdhci_tasklet_finish(unsigned long param)
2582 {
2583         struct sdhci_host *host = (struct sdhci_host *)param;
2584
2585         while (!sdhci_request_done(host))
2586                 ;
2587 }
2588
2589 static void sdhci_timeout_timer(struct timer_list *t)
2590 {
2591         struct sdhci_host *host;
2592         unsigned long flags;
2593
2594         host = from_timer(host, t, timer);
2595
2596         spin_lock_irqsave(&host->lock, flags);
2597
2598         if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2599                 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2600                        mmc_hostname(host->mmc));
2601                 sdhci_dumpregs(host);
2602
2603                 host->cmd->error = -ETIMEDOUT;
2604                 sdhci_finish_mrq(host, host->cmd->mrq);
2605         }
2606
2607         mmiowb();
2608         spin_unlock_irqrestore(&host->lock, flags);
2609 }
2610
2611 static void sdhci_timeout_data_timer(struct timer_list *t)
2612 {
2613         struct sdhci_host *host;
2614         unsigned long flags;
2615
2616         host = from_timer(host, t, data_timer);
2617
2618         spin_lock_irqsave(&host->lock, flags);
2619
2620         if (host->data || host->data_cmd ||
2621             (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2622                 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2623                        mmc_hostname(host->mmc));
2624                 sdhci_dumpregs(host);
2625
2626                 if (host->data) {
2627                         host->data->error = -ETIMEDOUT;
2628                         sdhci_finish_data(host);
2629                 } else if (host->data_cmd) {
2630                         host->data_cmd->error = -ETIMEDOUT;
2631                         sdhci_finish_mrq(host, host->data_cmd->mrq);
2632                 } else {
2633                         host->cmd->error = -ETIMEDOUT;
2634                         sdhci_finish_mrq(host, host->cmd->mrq);
2635                 }
2636         }
2637
2638         mmiowb();
2639         spin_unlock_irqrestore(&host->lock, flags);
2640 }
2641
2642 /*****************************************************************************\
2643  *                                                                           *
2644  * Interrupt handling                                                        *
2645  *                                                                           *
2646 \*****************************************************************************/
2647
2648 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2649 {
2650         /* Handle auto-CMD12 error */
2651         if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
2652                 struct mmc_request *mrq = host->data_cmd->mrq;
2653                 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2654                 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2655                                    SDHCI_INT_DATA_TIMEOUT :
2656                                    SDHCI_INT_DATA_CRC;
2657
2658                 /* Treat auto-CMD12 error the same as data error */
2659                 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
2660                         *intmask_p |= data_err_bit;
2661                         return;
2662                 }
2663         }
2664
2665         if (!host->cmd) {
2666                 /*
2667                  * SDHCI recovers from errors by resetting the cmd and data
2668                  * circuits.  Until that is done, there very well might be more
2669                  * interrupts, so ignore them in that case.
2670                  */
2671                 if (host->pending_reset)
2672                         return;
2673                 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2674                        mmc_hostname(host->mmc), (unsigned)intmask);
2675                 sdhci_dumpregs(host);
2676                 return;
2677         }
2678
2679         if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2680                        SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2681                 if (intmask & SDHCI_INT_TIMEOUT)
2682                         host->cmd->error = -ETIMEDOUT;
2683                 else
2684                         host->cmd->error = -EILSEQ;
2685
2686                 /* Treat data command CRC error the same as data CRC error */
2687                 if (host->cmd->data &&
2688                     (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2689                      SDHCI_INT_CRC) {
2690                         host->cmd = NULL;
2691                         *intmask_p |= SDHCI_INT_DATA_CRC;
2692                         return;
2693                 }
2694
2695                 sdhci_finish_mrq(host, host->cmd->mrq);
2696                 return;
2697         }
2698
2699         /* Handle auto-CMD23 error */
2700         if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
2701                 struct mmc_request *mrq = host->cmd->mrq;
2702                 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2703                 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2704                           -ETIMEDOUT :
2705                           -EILSEQ;
2706
2707                 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
2708                         mrq->sbc->error = err;
2709                         sdhci_finish_mrq(host, mrq);
2710                         return;
2711                 }
2712         }
2713
2714         if (intmask & SDHCI_INT_RESPONSE)
2715                 sdhci_finish_command(host);
2716 }
2717
2718 static void sdhci_adma_show_error(struct sdhci_host *host)
2719 {
2720         void *desc = host->adma_table;
2721         dma_addr_t dma = host->adma_addr;
2722
2723         sdhci_dumpregs(host);
2724
2725         while (true) {
2726                 struct sdhci_adma2_64_desc *dma_desc = desc;
2727
2728                 if (host->flags & SDHCI_USE_64_BIT_DMA)
2729                         SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2730                             (unsigned long long)dma,
2731                             le32_to_cpu(dma_desc->addr_hi),
2732                             le32_to_cpu(dma_desc->addr_lo),
2733                             le16_to_cpu(dma_desc->len),
2734                             le16_to_cpu(dma_desc->cmd));
2735                 else
2736                         SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2737                             (unsigned long long)dma,
2738                             le32_to_cpu(dma_desc->addr_lo),
2739                             le16_to_cpu(dma_desc->len),
2740                             le16_to_cpu(dma_desc->cmd));
2741
2742                 desc += host->desc_sz;
2743                 dma += host->desc_sz;
2744
2745                 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2746                         break;
2747         }
2748 }
2749
2750 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2751 {
2752         u32 command;
2753
2754         /* CMD19 generates _only_ Buffer Read Ready interrupt */
2755         if (intmask & SDHCI_INT_DATA_AVAIL) {
2756                 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2757                 if (command == MMC_SEND_TUNING_BLOCK ||
2758                     command == MMC_SEND_TUNING_BLOCK_HS200) {
2759                         host->tuning_done = 1;
2760                         wake_up(&host->buf_ready_int);
2761                         return;
2762                 }
2763         }
2764
2765         if (!host->data) {
2766                 struct mmc_command *data_cmd = host->data_cmd;
2767
2768                 /*
2769                  * The "data complete" interrupt is also used to
2770                  * indicate that a busy state has ended. See comment
2771                  * above in sdhci_cmd_irq().
2772                  */
2773                 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2774                         if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2775                                 host->data_cmd = NULL;
2776                                 data_cmd->error = -ETIMEDOUT;
2777                                 sdhci_finish_mrq(host, data_cmd->mrq);
2778                                 return;
2779                         }
2780                         if (intmask & SDHCI_INT_DATA_END) {
2781                                 host->data_cmd = NULL;
2782                                 /*
2783                                  * Some cards handle busy-end interrupt
2784                                  * before the command completed, so make
2785                                  * sure we do things in the proper order.
2786                                  */
2787                                 if (host->cmd == data_cmd)
2788                                         return;
2789
2790                                 sdhci_finish_mrq(host, data_cmd->mrq);
2791                                 return;
2792                         }
2793                 }
2794
2795                 /*
2796                  * SDHCI recovers from errors by resetting the cmd and data
2797                  * circuits. Until that is done, there very well might be more
2798                  * interrupts, so ignore them in that case.
2799                  */
2800                 if (host->pending_reset)
2801                         return;
2802
2803                 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2804                        mmc_hostname(host->mmc), (unsigned)intmask);
2805                 sdhci_dumpregs(host);
2806
2807                 return;
2808         }
2809
2810         if (intmask & SDHCI_INT_DATA_TIMEOUT)
2811                 host->data->error = -ETIMEDOUT;
2812         else if (intmask & SDHCI_INT_DATA_END_BIT)
2813                 host->data->error = -EILSEQ;
2814         else if ((intmask & SDHCI_INT_DATA_CRC) &&
2815                 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2816                         != MMC_BUS_TEST_R)
2817                 host->data->error = -EILSEQ;
2818         else if (intmask & SDHCI_INT_ADMA_ERROR) {
2819                 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
2820                        intmask);
2821                 sdhci_adma_show_error(host);
2822                 host->data->error = -EIO;
2823                 if (host->ops->adma_workaround)
2824                         host->ops->adma_workaround(host, intmask);
2825         }
2826
2827         if (host->data->error)
2828                 sdhci_finish_data(host);
2829         else {
2830                 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2831                         sdhci_transfer_pio(host);
2832
2833                 /*
2834                  * We currently don't do anything fancy with DMA
2835                  * boundaries, but as we can't disable the feature
2836                  * we need to at least restart the transfer.
2837                  *
2838                  * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2839                  * should return a valid address to continue from, but as
2840                  * some controllers are faulty, don't trust them.
2841                  */
2842                 if (intmask & SDHCI_INT_DMA_END) {
2843                         u32 dmastart, dmanow;
2844
2845                         dmastart = sdhci_sdma_address(host);
2846                         dmanow = dmastart + host->data->bytes_xfered;
2847                         /*
2848                          * Force update to the next DMA block boundary.
2849                          */
2850                         dmanow = (dmanow &
2851                                 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2852                                 SDHCI_DEFAULT_BOUNDARY_SIZE;
2853                         host->data->bytes_xfered = dmanow - dmastart;
2854                         DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2855                             dmastart, host->data->bytes_xfered, dmanow);
2856                         sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2857                 }
2858
2859                 if (intmask & SDHCI_INT_DATA_END) {
2860                         if (host->cmd == host->data_cmd) {
2861                                 /*
2862                                  * Data managed to finish before the
2863                                  * command completed. Make sure we do
2864                                  * things in the proper order.
2865                                  */
2866                                 host->data_early = 1;
2867                         } else {
2868                                 sdhci_finish_data(host);
2869                         }
2870                 }
2871         }
2872 }
2873
2874 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2875 {
2876         irqreturn_t result = IRQ_NONE;
2877         struct sdhci_host *host = dev_id;
2878         u32 intmask, mask, unexpected = 0;
2879         int max_loops = 16;
2880
2881         spin_lock(&host->lock);
2882
2883         if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2884                 spin_unlock(&host->lock);
2885                 return IRQ_NONE;
2886         }
2887
2888         intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2889         if (!intmask || intmask == 0xffffffff) {
2890                 result = IRQ_NONE;
2891                 goto out;
2892         }
2893
2894         do {
2895                 DBG("IRQ status 0x%08x\n", intmask);
2896
2897                 if (host->ops->irq) {
2898                         intmask = host->ops->irq(host, intmask);
2899                         if (!intmask)
2900                                 goto cont;
2901                 }
2902
2903                 /* Clear selected interrupts. */
2904                 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2905                                   SDHCI_INT_BUS_POWER);
2906                 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2907
2908                 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2909                         u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2910                                       SDHCI_CARD_PRESENT;
2911
2912                         /*
2913                          * There is a observation on i.mx esdhc.  INSERT
2914                          * bit will be immediately set again when it gets
2915                          * cleared, if a card is inserted.  We have to mask
2916                          * the irq to prevent interrupt storm which will
2917                          * freeze the system.  And the REMOVE gets the
2918                          * same situation.
2919                          *
2920                          * More testing are needed here to ensure it works
2921                          * for other platforms though.
2922                          */
2923                         host->ier &= ~(SDHCI_INT_CARD_INSERT |
2924                                        SDHCI_INT_CARD_REMOVE);
2925                         host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2926                                                SDHCI_INT_CARD_INSERT;
2927                         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2928                         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2929
2930                         sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2931                                      SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2932
2933                         host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2934                                                        SDHCI_INT_CARD_REMOVE);
2935                         result = IRQ_WAKE_THREAD;
2936                 }
2937
2938                 if ((intmask & SDHCI_INT_DATA_END) && !host->data &&
2939                     host->cmd && (host->cmd == host->cmd->mrq->stop))
2940                         intmask &= ~SDHCI_INT_DATA_END;
2941
2942                 if (intmask & SDHCI_INT_CMD_MASK)
2943                         sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
2944
2945                 if (intmask & SDHCI_INT_DATA_MASK)
2946                         sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2947
2948                 if (intmask & SDHCI_INT_BUS_POWER)
2949                         pr_err("%s: Card is consuming too much power!\n",
2950                                 mmc_hostname(host->mmc));
2951
2952                 if (intmask & SDHCI_INT_RETUNE)
2953                         mmc_retune_needed(host->mmc);
2954
2955                 if ((intmask & SDHCI_INT_CARD_INT) &&
2956                     (host->ier & SDHCI_INT_CARD_INT)) {
2957                         sdhci_enable_sdio_irq_nolock(host, false);
2958                         host->thread_isr |= SDHCI_INT_CARD_INT;
2959                         result = IRQ_WAKE_THREAD;
2960                 }
2961
2962                 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2963                              SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2964                              SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2965                              SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2966
2967                 if (intmask) {
2968                         unexpected |= intmask;
2969                         sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2970                 }
2971 cont:
2972                 if (result == IRQ_NONE)
2973                         result = IRQ_HANDLED;
2974
2975                 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2976         } while (intmask && --max_loops);
2977 out:
2978         spin_unlock(&host->lock);
2979
2980         if (unexpected) {
2981                 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2982                            mmc_hostname(host->mmc), unexpected);
2983                 sdhci_dumpregs(host);
2984         }
2985
2986         return result;
2987 }
2988
2989 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2990 {
2991         struct sdhci_host *host = dev_id;
2992         unsigned long flags;
2993         u32 isr;
2994
2995         spin_lock_irqsave(&host->lock, flags);
2996         isr = host->thread_isr;
2997         host->thread_isr = 0;
2998         spin_unlock_irqrestore(&host->lock, flags);
2999
3000         if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3001                 struct mmc_host *mmc = host->mmc;
3002
3003                 mmc->ops->card_event(mmc);
3004                 mmc_detect_change(mmc, msecs_to_jiffies(200));
3005         }
3006
3007         if (isr & SDHCI_INT_CARD_INT) {
3008                 sdio_run_irqs(host->mmc);
3009
3010                 spin_lock_irqsave(&host->lock, flags);
3011                 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3012                         sdhci_enable_sdio_irq_nolock(host, true);
3013                 spin_unlock_irqrestore(&host->lock, flags);
3014         }
3015
3016         return isr ? IRQ_HANDLED : IRQ_NONE;
3017 }
3018
3019 /*****************************************************************************\
3020  *                                                                           *
3021  * Suspend/resume                                                            *
3022  *                                                                           *
3023 \*****************************************************************************/
3024
3025 #ifdef CONFIG_PM
3026
3027 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3028 {
3029         return mmc_card_is_removable(host->mmc) &&
3030                !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3031                !mmc_can_gpio_cd(host->mmc);
3032 }
3033
3034 /*
3035  * To enable wakeup events, the corresponding events have to be enabled in
3036  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3037  * Table' in the SD Host Controller Standard Specification.
3038  * It is useless to restore SDHCI_INT_ENABLE state in
3039  * sdhci_disable_irq_wakeups() since it will be set by
3040  * sdhci_enable_card_detection() or sdhci_init().
3041  */
3042 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3043 {
3044         u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3045                   SDHCI_WAKE_ON_INT;
3046         u32 irq_val = 0;
3047         u8 wake_val = 0;
3048         u8 val;
3049
3050         if (sdhci_cd_irq_can_wakeup(host)) {
3051                 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3052                 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3053         }
3054
3055         if (mmc_card_wake_sdio_irq(host->mmc)) {
3056                 wake_val |= SDHCI_WAKE_ON_INT;
3057                 irq_val |= SDHCI_INT_CARD_INT;
3058         }
3059
3060         if (!irq_val)
3061                 return false;
3062
3063         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3064         val &= ~mask;
3065         val |= wake_val;
3066         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3067
3068         sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3069
3070         host->irq_wake_enabled = !enable_irq_wake(host->irq);
3071
3072         return host->irq_wake_enabled;
3073 }
3074
3075 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3076 {
3077         u8 val;
3078         u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3079                         | SDHCI_WAKE_ON_INT;
3080
3081         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3082         val &= ~mask;
3083         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3084
3085         disable_irq_wake(host->irq);
3086
3087         host->irq_wake_enabled = false;
3088 }
3089
3090 int sdhci_suspend_host(struct sdhci_host *host)
3091 {
3092         sdhci_disable_card_detection(host);
3093
3094         mmc_retune_timer_stop(host->mmc);
3095
3096         if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3097             !sdhci_enable_irq_wakeups(host)) {
3098                 host->ier = 0;
3099                 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3100                 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3101                 free_irq(host->irq, host);
3102         }
3103
3104         return 0;
3105 }
3106
3107 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3108
3109 int sdhci_resume_host(struct sdhci_host *host)
3110 {
3111         struct mmc_host *mmc = host->mmc;
3112         int ret = 0;
3113
3114         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3115                 if (host->ops->enable_dma)
3116                         host->ops->enable_dma(host);
3117         }
3118
3119         if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3120             (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3121                 /* Card keeps power but host controller does not */
3122                 sdhci_init(host, 0);
3123                 host->pwr = 0;
3124                 host->clock = 0;
3125                 mmc->ops->set_ios(mmc, &mmc->ios);
3126         } else {
3127                 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3128                 mmiowb();
3129         }
3130
3131         if (host->irq_wake_enabled) {
3132                 sdhci_disable_irq_wakeups(host);
3133         } else {
3134                 ret = request_threaded_irq(host->irq, sdhci_irq,
3135                                            sdhci_thread_irq, IRQF_SHARED,
3136                                            mmc_hostname(host->mmc), host);
3137                 if (ret)
3138                         return ret;
3139         }
3140
3141         sdhci_enable_card_detection(host);
3142
3143         return ret;
3144 }
3145
3146 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3147
3148 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3149 {
3150         unsigned long flags;
3151
3152         mmc_retune_timer_stop(host->mmc);
3153
3154         spin_lock_irqsave(&host->lock, flags);
3155         host->ier &= SDHCI_INT_CARD_INT;
3156         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3157         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3158         spin_unlock_irqrestore(&host->lock, flags);
3159
3160         synchronize_hardirq(host->irq);
3161
3162         spin_lock_irqsave(&host->lock, flags);
3163         host->runtime_suspended = true;
3164         spin_unlock_irqrestore(&host->lock, flags);
3165
3166         return 0;
3167 }
3168 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3169
3170 int sdhci_runtime_resume_host(struct sdhci_host *host)
3171 {
3172         struct mmc_host *mmc = host->mmc;
3173         unsigned long flags;
3174         int host_flags = host->flags;
3175
3176         if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3177                 if (host->ops->enable_dma)
3178                         host->ops->enable_dma(host);
3179         }
3180
3181         sdhci_init(host, 0);
3182
3183         if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3184             mmc->ios.power_mode != MMC_POWER_OFF) {
3185                 /* Force clock and power re-program */
3186                 host->pwr = 0;
3187                 host->clock = 0;
3188                 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3189                 mmc->ops->set_ios(mmc, &mmc->ios);
3190
3191                 if ((host_flags & SDHCI_PV_ENABLED) &&
3192                     !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3193                         spin_lock_irqsave(&host->lock, flags);
3194                         sdhci_enable_preset_value(host, true);
3195                         spin_unlock_irqrestore(&host->lock, flags);
3196                 }
3197
3198                 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3199                     mmc->ops->hs400_enhanced_strobe)
3200                         mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3201         }
3202
3203         spin_lock_irqsave(&host->lock, flags);
3204
3205         host->runtime_suspended = false;
3206
3207         /* Enable SDIO IRQ */
3208         if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3209                 sdhci_enable_sdio_irq_nolock(host, true);
3210
3211         /* Enable Card Detection */
3212         sdhci_enable_card_detection(host);
3213
3214         spin_unlock_irqrestore(&host->lock, flags);
3215
3216         return 0;
3217 }
3218 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3219
3220 #endif /* CONFIG_PM */
3221
3222 /*****************************************************************************\
3223  *                                                                           *
3224  * Command Queue Engine (CQE) helpers                                        *
3225  *                                                                           *
3226 \*****************************************************************************/
3227
3228 void sdhci_cqe_enable(struct mmc_host *mmc)
3229 {
3230         struct sdhci_host *host = mmc_priv(mmc);
3231         unsigned long flags;
3232         u8 ctrl;
3233
3234         spin_lock_irqsave(&host->lock, flags);
3235
3236         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3237         ctrl &= ~SDHCI_CTRL_DMA_MASK;
3238         if (host->flags & SDHCI_USE_64_BIT_DMA)
3239                 ctrl |= SDHCI_CTRL_ADMA64;
3240         else
3241                 ctrl |= SDHCI_CTRL_ADMA32;
3242         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3243
3244         sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3245                      SDHCI_BLOCK_SIZE);
3246
3247         /* Set maximum timeout */
3248         sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3249
3250         host->ier = host->cqe_ier;
3251
3252         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3253         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3254
3255         host->cqe_on = true;
3256
3257         pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3258                  mmc_hostname(mmc), host->ier,
3259                  sdhci_readl(host, SDHCI_INT_STATUS));
3260
3261         mmiowb();
3262         spin_unlock_irqrestore(&host->lock, flags);
3263 }
3264 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3265
3266 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3267 {
3268         struct sdhci_host *host = mmc_priv(mmc);
3269         unsigned long flags;
3270
3271         spin_lock_irqsave(&host->lock, flags);
3272
3273         sdhci_set_default_irqs(host);
3274
3275         host->cqe_on = false;
3276
3277         if (recovery) {
3278                 sdhci_do_reset(host, SDHCI_RESET_CMD);
3279                 sdhci_do_reset(host, SDHCI_RESET_DATA);
3280         }
3281
3282         pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3283                  mmc_hostname(mmc), host->ier,
3284                  sdhci_readl(host, SDHCI_INT_STATUS));
3285
3286         mmiowb();
3287         spin_unlock_irqrestore(&host->lock, flags);
3288 }
3289 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3290
3291 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3292                    int *data_error)
3293 {
3294         u32 mask;
3295
3296         if (!host->cqe_on)
3297                 return false;
3298
3299         if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3300                 *cmd_error = -EILSEQ;
3301         else if (intmask & SDHCI_INT_TIMEOUT)
3302                 *cmd_error = -ETIMEDOUT;
3303         else
3304                 *cmd_error = 0;
3305
3306         if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3307                 *data_error = -EILSEQ;
3308         else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3309                 *data_error = -ETIMEDOUT;
3310         else if (intmask & SDHCI_INT_ADMA_ERROR)
3311                 *data_error = -EIO;
3312         else
3313                 *data_error = 0;
3314
3315         /* Clear selected interrupts. */
3316         mask = intmask & host->cqe_ier;
3317         sdhci_writel(host, mask, SDHCI_INT_STATUS);
3318
3319         if (intmask & SDHCI_INT_BUS_POWER)
3320                 pr_err("%s: Card is consuming too much power!\n",
3321                        mmc_hostname(host->mmc));
3322
3323         intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3324         if (intmask) {
3325                 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3326                 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3327                        mmc_hostname(host->mmc), intmask);
3328                 sdhci_dumpregs(host);
3329         }
3330
3331         return true;
3332 }
3333 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3334
3335 /*****************************************************************************\
3336  *                                                                           *
3337  * Device allocation/registration                                            *
3338  *                                                                           *
3339 \*****************************************************************************/
3340
3341 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3342         size_t priv_size)
3343 {
3344         struct mmc_host *mmc;
3345         struct sdhci_host *host;
3346
3347         WARN_ON(dev == NULL);
3348
3349         mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3350         if (!mmc)
3351                 return ERR_PTR(-ENOMEM);
3352
3353         host = mmc_priv(mmc);
3354         host->mmc = mmc;
3355         host->mmc_host_ops = sdhci_ops;
3356         mmc->ops = &host->mmc_host_ops;
3357
3358         host->flags = SDHCI_SIGNALING_330;
3359
3360         host->cqe_ier     = SDHCI_CQE_INT_MASK;
3361         host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3362
3363         host->tuning_delay = -1;
3364
3365         host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3366
3367         return host;
3368 }
3369
3370 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3371
3372 static int sdhci_set_dma_mask(struct sdhci_host *host)
3373 {
3374         struct mmc_host *mmc = host->mmc;
3375         struct device *dev = mmc_dev(mmc);
3376         int ret = -EINVAL;
3377
3378         if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3379                 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3380
3381         /* Try 64-bit mask if hardware is capable  of it */
3382         if (host->flags & SDHCI_USE_64_BIT_DMA) {
3383                 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3384                 if (ret) {
3385                         pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3386                                 mmc_hostname(mmc));
3387                         host->flags &= ~SDHCI_USE_64_BIT_DMA;
3388                 }
3389         }
3390
3391         /* 32-bit mask as default & fallback */
3392         if (ret) {
3393                 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3394                 if (ret)
3395                         pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3396                                 mmc_hostname(mmc));
3397         }
3398
3399         return ret;
3400 }
3401
3402 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3403 {
3404         u16 v;
3405         u64 dt_caps_mask = 0;
3406         u64 dt_caps = 0;
3407
3408         if (host->read_caps)
3409                 return;
3410
3411         host->read_caps = true;
3412
3413         if (debug_quirks)
3414                 host->quirks = debug_quirks;
3415
3416         if (debug_quirks2)
3417                 host->quirks2 = debug_quirks2;
3418
3419         sdhci_do_reset(host, SDHCI_RESET_ALL);
3420
3421         of_property_read_u64(mmc_dev(host->mmc)->of_node,
3422                              "sdhci-caps-mask", &dt_caps_mask);
3423         of_property_read_u64(mmc_dev(host->mmc)->of_node,
3424                              "sdhci-caps", &dt_caps);
3425
3426         v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3427         host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3428
3429         if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3430                 return;
3431
3432         if (caps) {
3433                 host->caps = *caps;
3434         } else {
3435                 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3436                 host->caps &= ~lower_32_bits(dt_caps_mask);
3437                 host->caps |= lower_32_bits(dt_caps);
3438         }
3439
3440         if (host->version < SDHCI_SPEC_300)
3441                 return;
3442
3443         if (caps1) {
3444                 host->caps1 = *caps1;
3445         } else {
3446                 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3447                 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3448                 host->caps1 |= upper_32_bits(dt_caps);
3449         }
3450 }
3451 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3452
3453 static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3454 {
3455         struct mmc_host *mmc = host->mmc;
3456         unsigned int max_blocks;
3457         unsigned int bounce_size;
3458         int ret;
3459
3460         /*
3461          * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3462          * has diminishing returns, this is probably because SD/MMC
3463          * cards are usually optimized to handle this size of requests.
3464          */
3465         bounce_size = SZ_64K;
3466         /*
3467          * Adjust downwards to maximum request size if this is less
3468          * than our segment size, else hammer down the maximum
3469          * request size to the maximum buffer size.
3470          */
3471         if (mmc->max_req_size < bounce_size)
3472                 bounce_size = mmc->max_req_size;
3473         max_blocks = bounce_size / 512;
3474
3475         /*
3476          * When we just support one segment, we can get significant
3477          * speedups by the help of a bounce buffer to group scattered
3478          * reads/writes together.
3479          */
3480         host->bounce_buffer = devm_kmalloc(mmc->parent,
3481                                            bounce_size,
3482                                            GFP_KERNEL);
3483         if (!host->bounce_buffer) {
3484                 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3485                        mmc_hostname(mmc),
3486                        bounce_size);
3487                 /*
3488                  * Exiting with zero here makes sure we proceed with
3489                  * mmc->max_segs == 1.
3490                  */
3491                 return 0;
3492         }
3493
3494         host->bounce_addr = dma_map_single(mmc->parent,
3495                                            host->bounce_buffer,
3496                                            bounce_size,
3497                                            DMA_BIDIRECTIONAL);
3498         ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3499         if (ret)
3500                 /* Again fall back to max_segs == 1 */
3501                 return 0;
3502         host->bounce_buffer_size = bounce_size;
3503
3504         /* Lie about this since we're bouncing */
3505         mmc->max_segs = max_blocks;
3506         mmc->max_seg_size = bounce_size;
3507         mmc->max_req_size = bounce_size;
3508
3509         pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3510                 mmc_hostname(mmc), max_blocks, bounce_size);
3511
3512         return 0;
3513 }
3514
3515 int sdhci_setup_host(struct sdhci_host *host)
3516 {
3517         struct mmc_host *mmc;
3518         u32 max_current_caps;
3519         unsigned int ocr_avail;
3520         unsigned int override_timeout_clk;
3521         u32 max_clk;
3522         int ret;
3523
3524         WARN_ON(host == NULL);
3525         if (host == NULL)
3526                 return -EINVAL;
3527
3528         mmc = host->mmc;
3529
3530         /*
3531          * If there are external regulators, get them. Note this must be done
3532          * early before resetting the host and reading the capabilities so that
3533          * the host can take the appropriate action if regulators are not
3534          * available.
3535          */
3536         ret = mmc_regulator_get_supply(mmc);
3537         if (ret)
3538                 return ret;
3539
3540         DBG("Version:   0x%08x | Present:  0x%08x\n",
3541             sdhci_readw(host, SDHCI_HOST_VERSION),
3542             sdhci_readl(host, SDHCI_PRESENT_STATE));
3543         DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3544             sdhci_readl(host, SDHCI_CAPABILITIES),
3545             sdhci_readl(host, SDHCI_CAPABILITIES_1));
3546
3547         sdhci_read_caps(host);
3548
3549         override_timeout_clk = host->timeout_clk;
3550
3551         if (host->version > SDHCI_SPEC_300) {
3552                 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3553                        mmc_hostname(mmc), host->version);
3554         }
3555
3556         if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
3557                 mmc->caps2 &= ~MMC_CAP2_CQE;
3558
3559         if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3560                 host->flags |= SDHCI_USE_SDMA;
3561         else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3562                 DBG("Controller doesn't have SDMA capability\n");
3563         else
3564                 host->flags |= SDHCI_USE_SDMA;
3565
3566         if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3567                 (host->flags & SDHCI_USE_SDMA)) {
3568                 DBG("Disabling DMA as it is marked broken\n");
3569                 host->flags &= ~SDHCI_USE_SDMA;
3570         }
3571
3572         if ((host->version >= SDHCI_SPEC_200) &&
3573                 (host->caps & SDHCI_CAN_DO_ADMA2))
3574                 host->flags |= SDHCI_USE_ADMA;
3575
3576         if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3577                 (host->flags & SDHCI_USE_ADMA)) {
3578                 DBG("Disabling ADMA as it is marked broken\n");
3579                 host->flags &= ~SDHCI_USE_ADMA;
3580         }
3581
3582         /*
3583          * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3584          * and *must* do 64-bit DMA.  A driver has the opportunity to change
3585          * that during the first call to ->enable_dma().  Similarly
3586          * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3587          * implement.
3588          */
3589         if (host->caps & SDHCI_CAN_64BIT)
3590                 host->flags |= SDHCI_USE_64_BIT_DMA;
3591
3592         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3593                 ret = sdhci_set_dma_mask(host);
3594
3595                 if (!ret && host->ops->enable_dma)
3596                         ret = host->ops->enable_dma(host);
3597
3598                 if (ret) {
3599                         pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3600                                 mmc_hostname(mmc));
3601                         host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3602
3603                         ret = 0;
3604                 }
3605         }
3606
3607         /* SDMA does not support 64-bit DMA */
3608         if (host->flags & SDHCI_USE_64_BIT_DMA)
3609                 host->flags &= ~SDHCI_USE_SDMA;
3610
3611         if (host->flags & SDHCI_USE_ADMA) {
3612                 dma_addr_t dma;
3613                 void *buf;
3614
3615                 /*
3616                  * The DMA descriptor table size is calculated as the maximum
3617                  * number of segments times 2, to allow for an alignment
3618                  * descriptor for each segment, plus 1 for a nop end descriptor,
3619                  * all multipled by the descriptor size.
3620                  */
3621                 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3622                         host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3623                                               SDHCI_ADMA2_64_DESC_SZ;
3624                         host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3625                 } else {
3626                         host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3627                                               SDHCI_ADMA2_32_DESC_SZ;
3628                         host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3629                 }
3630
3631                 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3632                 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3633                                          host->adma_table_sz, &dma, GFP_KERNEL);
3634                 if (!buf) {
3635                         pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3636                                 mmc_hostname(mmc));
3637                         host->flags &= ~SDHCI_USE_ADMA;
3638                 } else if ((dma + host->align_buffer_sz) &
3639                            (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3640                         pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3641                                 mmc_hostname(mmc));
3642                         host->flags &= ~SDHCI_USE_ADMA;
3643                         dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3644                                           host->adma_table_sz, buf, dma);
3645                 } else {
3646                         host->align_buffer = buf;
3647                         host->align_addr = dma;
3648
3649                         host->adma_table = buf + host->align_buffer_sz;
3650                         host->adma_addr = dma + host->align_buffer_sz;
3651                 }
3652         }
3653
3654         /*
3655          * If we use DMA, then it's up to the caller to set the DMA
3656          * mask, but PIO does not need the hw shim so we set a new
3657          * mask here in that case.
3658          */
3659         if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3660                 host->dma_mask = DMA_BIT_MASK(64);
3661                 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3662         }
3663
3664         if (host->version >= SDHCI_SPEC_300)
3665                 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3666                         >> SDHCI_CLOCK_BASE_SHIFT;
3667         else
3668                 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3669                         >> SDHCI_CLOCK_BASE_SHIFT;
3670
3671         host->max_clk *= 1000000;
3672         if (host->max_clk == 0 || host->quirks &
3673                         SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3674                 if (!host->ops->get_max_clock) {
3675                         pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3676                                mmc_hostname(mmc));
3677                         ret = -ENODEV;
3678                         goto undma;
3679                 }
3680                 host->max_clk = host->ops->get_max_clock(host);
3681         }
3682
3683         /*
3684          * In case of Host Controller v3.00, find out whether clock
3685          * multiplier is supported.
3686          */
3687         host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3688                         SDHCI_CLOCK_MUL_SHIFT;
3689
3690         /*
3691          * In case the value in Clock Multiplier is 0, then programmable
3692          * clock mode is not supported, otherwise the actual clock
3693          * multiplier is one more than the value of Clock Multiplier
3694          * in the Capabilities Register.
3695          */
3696         if (host->clk_mul)
3697                 host->clk_mul += 1;
3698
3699         /*
3700          * Set host parameters.
3701          */
3702         max_clk = host->max_clk;
3703
3704         if (host->ops->get_min_clock)
3705                 mmc->f_min = host->ops->get_min_clock(host);
3706         else if (host->version >= SDHCI_SPEC_300) {
3707                 if (host->clk_mul) {
3708                         mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3709                         max_clk = host->max_clk * host->clk_mul;
3710                 } else
3711                         mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3712         } else
3713                 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3714
3715         if (!mmc->f_max || mmc->f_max > max_clk)
3716                 mmc->f_max = max_clk;
3717
3718         if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3719                 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3720                                         SDHCI_TIMEOUT_CLK_SHIFT;
3721
3722                 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3723                         host->timeout_clk *= 1000;
3724
3725                 if (host->timeout_clk == 0) {
3726                         if (!host->ops->get_timeout_clock) {
3727                                 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3728                                         mmc_hostname(mmc));
3729                                 ret = -ENODEV;
3730                                 goto undma;
3731                         }
3732
3733                         host->timeout_clk =
3734                                 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3735                                              1000);
3736                 }
3737
3738                 if (override_timeout_clk)
3739                         host->timeout_clk = override_timeout_clk;
3740
3741                 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3742                         host->ops->get_max_timeout_count(host) : 1 << 27;
3743                 mmc->max_busy_timeout /= host->timeout_clk;
3744         }
3745
3746         if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
3747             !host->ops->get_max_timeout_count)
3748                 mmc->max_busy_timeout = 0;
3749
3750         mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3751         mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3752
3753         if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3754                 host->flags |= SDHCI_AUTO_CMD12;
3755
3756         /* Auto-CMD23 stuff only works in ADMA or PIO. */
3757         if ((host->version >= SDHCI_SPEC_300) &&
3758             ((host->flags & SDHCI_USE_ADMA) ||
3759              !(host->flags & SDHCI_USE_SDMA)) &&
3760              !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3761                 host->flags |= SDHCI_AUTO_CMD23;
3762                 DBG("Auto-CMD23 available\n");
3763         } else {
3764                 DBG("Auto-CMD23 unavailable\n");
3765         }
3766
3767         /*
3768          * A controller may support 8-bit width, but the board itself
3769          * might not have the pins brought out.  Boards that support
3770          * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3771          * their platform code before calling sdhci_add_host(), and we
3772          * won't assume 8-bit width for hosts without that CAP.
3773          */
3774         if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3775                 mmc->caps |= MMC_CAP_4_BIT_DATA;
3776
3777         if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3778                 mmc->caps &= ~MMC_CAP_CMD23;
3779
3780         if (host->caps & SDHCI_CAN_DO_HISPD)
3781                 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3782
3783         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3784             mmc_card_is_removable(mmc) &&
3785             mmc_gpio_get_cd(host->mmc) < 0)
3786                 mmc->caps |= MMC_CAP_NEEDS_POLL;
3787
3788         if (!IS_ERR(mmc->supply.vqmmc)) {
3789                 ret = regulator_enable(mmc->supply.vqmmc);
3790
3791                 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
3792                 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3793                                                     1950000))
3794                         host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3795                                          SDHCI_SUPPORT_SDR50 |
3796                                          SDHCI_SUPPORT_DDR50);
3797
3798                 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
3799                 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
3800                                                     3600000))
3801                         host->flags &= ~SDHCI_SIGNALING_330;
3802
3803                 if (ret) {
3804                         pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3805                                 mmc_hostname(mmc), ret);
3806                         mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3807                 }
3808         }
3809
3810         if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3811                 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3812                                  SDHCI_SUPPORT_DDR50);
3813                 /*
3814                  * The SDHCI controller in a SoC might support HS200/HS400
3815                  * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
3816                  * but if the board is modeled such that the IO lines are not
3817                  * connected to 1.8v then HS200/HS400 cannot be supported.
3818                  * Disable HS200/HS400 if the board does not have 1.8v connected
3819                  * to the IO lines. (Applicable for other modes in 1.8v)
3820                  */
3821                 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
3822                 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
3823         }
3824
3825         /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3826         if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3827                            SDHCI_SUPPORT_DDR50))
3828                 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3829
3830         /* SDR104 supports also implies SDR50 support */
3831         if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3832                 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3833                 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3834                  * field can be promoted to support HS200.
3835                  */
3836                 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3837                         mmc->caps2 |= MMC_CAP2_HS200;
3838         } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3839                 mmc->caps |= MMC_CAP_UHS_SDR50;
3840         }
3841
3842         if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3843             (host->caps1 & SDHCI_SUPPORT_HS400))
3844                 mmc->caps2 |= MMC_CAP2_HS400;
3845
3846         if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3847             (IS_ERR(mmc->supply.vqmmc) ||
3848              !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3849                                              1300000)))
3850                 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3851
3852         if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3853             !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3854                 mmc->caps |= MMC_CAP_UHS_DDR50;
3855
3856         /* Does the host need tuning for SDR50? */
3857         if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3858                 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3859
3860         /* Driver Type(s) (A, C, D) supported by the host */
3861         if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3862                 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3863         if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3864                 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3865         if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3866                 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3867
3868         /* Initial value for re-tuning timer count */
3869         host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3870                              SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3871
3872         /*
3873          * In case Re-tuning Timer is not disabled, the actual value of
3874          * re-tuning timer will be 2 ^ (n - 1).
3875          */
3876         if (host->tuning_count)
3877                 host->tuning_count = 1 << (host->tuning_count - 1);
3878
3879         /* Re-tuning mode supported by the Host Controller */
3880         host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3881                              SDHCI_RETUNING_MODE_SHIFT;
3882
3883         ocr_avail = 0;
3884
3885         /*
3886          * According to SD Host Controller spec v3.00, if the Host System
3887          * can afford more than 150mA, Host Driver should set XPC to 1. Also
3888          * the value is meaningful only if Voltage Support in the Capabilities
3889          * register is set. The actual current value is 4 times the register
3890          * value.
3891          */
3892         max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3893         if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3894                 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3895                 if (curr > 0) {
3896
3897                         /* convert to SDHCI_MAX_CURRENT format */
3898                         curr = curr/1000;  /* convert to mA */
3899                         curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3900
3901                         curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3902                         max_current_caps =
3903                                 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3904                                 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3905                                 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3906                 }
3907         }
3908
3909         if (host->caps & SDHCI_CAN_VDD_330) {
3910                 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3911
3912                 mmc->max_current_330 = ((max_current_caps &
3913                                    SDHCI_MAX_CURRENT_330_MASK) >>
3914                                    SDHCI_MAX_CURRENT_330_SHIFT) *
3915                                    SDHCI_MAX_CURRENT_MULTIPLIER;
3916         }
3917         if (host->caps & SDHCI_CAN_VDD_300) {
3918                 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3919
3920                 mmc->max_current_300 = ((max_current_caps &
3921                                    SDHCI_MAX_CURRENT_300_MASK) >>
3922                                    SDHCI_MAX_CURRENT_300_SHIFT) *
3923                                    SDHCI_MAX_CURRENT_MULTIPLIER;
3924         }
3925         if (host->caps & SDHCI_CAN_VDD_180) {
3926                 ocr_avail |= MMC_VDD_165_195;
3927
3928                 mmc->max_current_180 = ((max_current_caps &
3929                                    SDHCI_MAX_CURRENT_180_MASK) >>
3930                                    SDHCI_MAX_CURRENT_180_SHIFT) *
3931                                    SDHCI_MAX_CURRENT_MULTIPLIER;
3932         }
3933
3934         /* If OCR set by host, use it instead. */
3935         if (host->ocr_mask)
3936                 ocr_avail = host->ocr_mask;
3937
3938         /* If OCR set by external regulators, give it highest prio. */
3939         if (mmc->ocr_avail)
3940                 ocr_avail = mmc->ocr_avail;
3941
3942         mmc->ocr_avail = ocr_avail;
3943         mmc->ocr_avail_sdio = ocr_avail;
3944         if (host->ocr_avail_sdio)
3945                 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3946         mmc->ocr_avail_sd = ocr_avail;
3947         if (host->ocr_avail_sd)
3948                 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3949         else /* normal SD controllers don't support 1.8V */
3950                 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3951         mmc->ocr_avail_mmc = ocr_avail;
3952         if (host->ocr_avail_mmc)
3953                 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3954
3955         if (mmc->ocr_avail == 0) {
3956                 pr_err("%s: Hardware doesn't report any support voltages.\n",
3957                        mmc_hostname(mmc));
3958                 ret = -ENODEV;
3959                 goto unreg;
3960         }
3961
3962         if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3963                           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3964                           MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3965             (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3966                 host->flags |= SDHCI_SIGNALING_180;
3967
3968         if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3969                 host->flags |= SDHCI_SIGNALING_120;
3970
3971         spin_lock_init(&host->lock);
3972
3973         /*
3974          * Maximum number of sectors in one transfer. Limited by SDMA boundary
3975          * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3976          * is less anyway.
3977          */
3978         mmc->max_req_size = 524288;
3979
3980         /*
3981          * Maximum number of segments. Depends on if the hardware
3982          * can do scatter/gather or not.
3983          */
3984         if (host->flags & SDHCI_USE_ADMA) {
3985                 mmc->max_segs = SDHCI_MAX_SEGS;
3986         } else if (host->flags & SDHCI_USE_SDMA) {
3987                 mmc->max_segs = 1;
3988                 if (swiotlb_max_segment()) {
3989                         unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
3990                                                 IO_TLB_SEGSIZE;
3991                         mmc->max_req_size = min(mmc->max_req_size,
3992                                                 max_req_size);
3993                 }
3994         } else { /* PIO */
3995                 mmc->max_segs = SDHCI_MAX_SEGS;
3996         }
3997
3998         /*
3999          * Maximum segment size. Could be one segment with the maximum number
4000          * of bytes. When doing hardware scatter/gather, each entry cannot
4001          * be larger than 64 KiB though.
4002          */
4003         if (host->flags & SDHCI_USE_ADMA) {
4004                 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4005                         mmc->max_seg_size = 65535;
4006                 else
4007                         mmc->max_seg_size = 65536;
4008         } else {
4009                 mmc->max_seg_size = mmc->max_req_size;
4010         }
4011
4012         /*
4013          * Maximum block size. This varies from controller to controller and
4014          * is specified in the capabilities register.
4015          */
4016         if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4017                 mmc->max_blk_size = 2;
4018         } else {
4019                 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4020                                 SDHCI_MAX_BLOCK_SHIFT;
4021                 if (mmc->max_blk_size >= 3) {
4022                         pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4023                                 mmc_hostname(mmc));
4024                         mmc->max_blk_size = 0;
4025                 }
4026         }
4027
4028         mmc->max_blk_size = 512 << mmc->max_blk_size;
4029
4030         /*
4031          * Maximum block count.
4032          */
4033         mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4034
4035         if (mmc->max_segs == 1) {
4036                 /* This may alter mmc->*_blk_* parameters */
4037                 ret = sdhci_allocate_bounce_buffer(host);
4038                 if (ret)
4039                         return ret;
4040         }
4041
4042         return 0;
4043
4044 unreg:
4045         if (!IS_ERR(mmc->supply.vqmmc))
4046                 regulator_disable(mmc->supply.vqmmc);
4047 undma:
4048         if (host->align_buffer)
4049                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4050                                   host->adma_table_sz, host->align_buffer,
4051                                   host->align_addr);
4052         host->adma_table = NULL;
4053         host->align_buffer = NULL;
4054
4055         return ret;
4056 }
4057 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4058
4059 void sdhci_cleanup_host(struct sdhci_host *host)
4060 {
4061         struct mmc_host *mmc = host->mmc;
4062
4063         if (!IS_ERR(mmc->supply.vqmmc))
4064                 regulator_disable(mmc->supply.vqmmc);
4065
4066         if (host->align_buffer)
4067                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4068                                   host->adma_table_sz, host->align_buffer,
4069                                   host->align_addr);
4070         host->adma_table = NULL;
4071         host->align_buffer = NULL;
4072 }
4073 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4074
4075 int __sdhci_add_host(struct sdhci_host *host)
4076 {
4077         struct mmc_host *mmc = host->mmc;
4078         int ret;
4079
4080         /*
4081          * Init tasklets.
4082          */
4083         tasklet_init(&host->finish_tasklet,
4084                 sdhci_tasklet_finish, (unsigned long)host);
4085
4086         timer_setup(&host->timer, sdhci_timeout_timer, 0);
4087         timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4088
4089         init_waitqueue_head(&host->buf_ready_int);
4090
4091         sdhci_init(host, 0);
4092
4093         ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4094                                    IRQF_SHARED, mmc_hostname(mmc), host);
4095         if (ret) {
4096                 pr_err("%s: Failed to request IRQ %d: %d\n",
4097                        mmc_hostname(mmc), host->irq, ret);
4098                 goto untasklet;
4099         }
4100
4101         ret = sdhci_led_register(host);
4102         if (ret) {
4103                 pr_err("%s: Failed to register LED device: %d\n",
4104                        mmc_hostname(mmc), ret);
4105                 goto unirq;
4106         }
4107
4108         mmiowb();
4109
4110         ret = mmc_add_host(mmc);
4111         if (ret)
4112                 goto unled;
4113
4114         pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4115                 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4116                 (host->flags & SDHCI_USE_ADMA) ?
4117                 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4118                 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4119
4120         sdhci_enable_card_detection(host);
4121
4122         return 0;
4123
4124 unled:
4125         sdhci_led_unregister(host);
4126 unirq:
4127         sdhci_do_reset(host, SDHCI_RESET_ALL);
4128         sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4129         sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4130         free_irq(host->irq, host);
4131 untasklet:
4132         tasklet_kill(&host->finish_tasklet);
4133
4134         return ret;
4135 }
4136 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4137
4138 int sdhci_add_host(struct sdhci_host *host)
4139 {
4140         int ret;
4141
4142         ret = sdhci_setup_host(host);
4143         if (ret)
4144                 return ret;
4145
4146         ret = __sdhci_add_host(host);
4147         if (ret)
4148                 goto cleanup;
4149
4150         return 0;
4151
4152 cleanup:
4153         sdhci_cleanup_host(host);
4154
4155         return ret;
4156 }
4157 EXPORT_SYMBOL_GPL(sdhci_add_host);
4158
4159 void sdhci_remove_host(struct sdhci_host *host, int dead)
4160 {
4161         struct mmc_host *mmc = host->mmc;
4162         unsigned long flags;
4163
4164         if (dead) {
4165                 spin_lock_irqsave(&host->lock, flags);
4166
4167                 host->flags |= SDHCI_DEVICE_DEAD;
4168
4169                 if (sdhci_has_requests(host)) {
4170                         pr_err("%s: Controller removed during "
4171                                 " transfer!\n", mmc_hostname(mmc));
4172                         sdhci_error_out_mrqs(host, -ENOMEDIUM);
4173                 }
4174
4175                 spin_unlock_irqrestore(&host->lock, flags);
4176         }
4177
4178         sdhci_disable_card_detection(host);
4179
4180         mmc_remove_host(mmc);
4181
4182         sdhci_led_unregister(host);
4183
4184         if (!dead)
4185                 sdhci_do_reset(host, SDHCI_RESET_ALL);
4186
4187         sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4188         sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4189         free_irq(host->irq, host);
4190
4191         del_timer_sync(&host->timer);
4192         del_timer_sync(&host->data_timer);
4193
4194         tasklet_kill(&host->finish_tasklet);
4195
4196         if (!IS_ERR(mmc->supply.vqmmc))
4197                 regulator_disable(mmc->supply.vqmmc);
4198
4199         if (host->align_buffer)
4200                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4201                                   host->adma_table_sz, host->align_buffer,
4202                                   host->align_addr);
4203
4204         host->adma_table = NULL;
4205         host->align_buffer = NULL;
4206 }
4207
4208 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4209
4210 void sdhci_free_host(struct sdhci_host *host)
4211 {
4212         mmc_free_host(host->mmc);
4213 }
4214
4215 EXPORT_SYMBOL_GPL(sdhci_free_host);
4216
4217 /*****************************************************************************\
4218  *                                                                           *
4219  * Driver init/exit                                                          *
4220  *                                                                           *
4221 \*****************************************************************************/
4222
4223 static int __init sdhci_drv_init(void)
4224 {
4225         pr_info(DRIVER_NAME
4226                 ": Secure Digital Host Controller Interface driver\n");
4227         pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4228
4229         return 0;
4230 }
4231
4232 static void __exit sdhci_drv_exit(void)
4233 {
4234 }
4235
4236 module_init(sdhci_drv_init);
4237 module_exit(sdhci_drv_exit);
4238
4239 module_param(debug_quirks, uint, 0444);
4240 module_param(debug_quirks2, uint, 0444);
4241
4242 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4243 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4244 MODULE_LICENSE("GPL");
4245
4246 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4247 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");