upload tizen1.0 source
[kernel/linux-2.6.36.git] / drivers / mmc / host / mshci.c
1 /*
2  *  linux/drivers/mmc/host/mshci.c - Mobile Storage Host Controller Interface driver
3  *
4  *  Copyright (C) 2011 Samsung Electronics, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  */
12 #include <linux/delay.h>
13 #include <linux/highmem.h>
14 #include <linux/io.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/scatterlist.h>
18 #include <linux/regulator/consumer.h>
19
20 #include <linux/leds.h>
21
22 #include <linux/mmc/host.h>
23
24 #include "mshci.h"
25
26 #define DRIVER_NAME "mshci"
27
28 #define DBG(f, x...) \
29         pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
30
31 static unsigned int debug_quirks = 0;
32
33 static void mshci_prepare_data(struct mshci_host *, struct mmc_data *);
34 static void mshci_finish_data(struct mshci_host *);
35
36 static void mshci_send_command(struct mshci_host *, struct mmc_command *);
37 static void mshci_finish_command(struct mshci_host *);
38
39
40 static void mshci_dumpregs(struct mshci_host *host)
41 {
42         /* ToDo */
43 }
44
45
46 /*****************************************************************************\
47  *                                                                           *
48  * Low level functions                                                       *
49  *                                                                           *
50 \*****************************************************************************/
51
52 static void mshci_clear_set_irqs(struct mshci_host *host, u32 clear, u32 set)
53 {
54         u32 ier;
55
56         ier = mshci_readl(host, MSHCI_INTMSK);
57         ier &= ~clear;
58         ier |= set;
59         mshci_writel(host, ier, MSHCI_INTMSK);
60 }
61
62 static void mshci_unmask_irqs(struct mshci_host *host, u32 irqs)
63 {
64         mshci_clear_set_irqs(host, 0, irqs);
65 }
66
67 static void mshci_mask_irqs(struct mshci_host *host, u32 irqs)
68 {
69         mshci_clear_set_irqs(host, irqs, 0);
70 }
71
72 static void mshci_set_card_detection(struct mshci_host *host, bool enable)
73 {
74         u32 irqs = INTMSK_CDETECT;
75
76         if (enable)
77                 mshci_unmask_irqs(host, irqs);
78         else
79                 mshci_mask_irqs(host, irqs);
80 }
81
82 static void mshci_enable_card_detection(struct mshci_host *host)
83 {
84         mshci_set_card_detection(host, true);
85 }
86
87 static void mshci_disable_card_detection(struct mshci_host *host)
88 {
89         mshci_set_card_detection(host, false);
90 }
91
92 static void mshci_reset(struct mshci_host *host, u8 mask)
93 {
94         u32 timeout = 100;
95         u32 ier;
96
97         ier = mshci_readl(host, MSHCI_CTRL);
98         ier |= mask;
99
100         mshci_writel(host, ier, MSHCI_CTRL);
101         while (mshci_readl(host, MSHCI_CTRL) & DMA_RESET) {
102                 if (timeout == 0) {
103                         printk(KERN_ERR "%s: Reset never completed.\n",
104                                 mmc_hostname(host->mmc));
105                         mshci_dumpregs(host);
106                         return;
107                 }
108                 timeout--;
109                 mdelay(1);
110         }
111 }
112
113 static void mshci_init(struct mshci_host *host)
114 {
115         mshci_reset(host, RESET_ALL);
116
117         /* clear interrupt status */
118         mshci_writel(host, INTMSK_ALL, MSHCI_RINTSTS);
119
120         mshci_clear_set_irqs(host, INTMSK_ALL,
121                 INTMSK_CDETECT | INTMSK_RE |
122                 INTMSK_CDONE | INTMSK_DTO | INTMSK_TXDR | INTMSK_RXDR |
123                 INTMSK_RCRC | INTMSK_DCRC | INTMSK_RTO | INTMSK_DRTO |
124                 INTMSK_HTO | INTMSK_FRUN | INTMSK_HLE | INTMSK_SBE |
125                 INTMSK_EBE);
126 }
127
128 static void mshci_reinit(struct mshci_host *host)
129 {
130         mshci_init(host);
131         mshci_enable_card_detection(host);
132 }
133
134 /*****************************************************************************\
135  *                                                                           *
136  * Core functions                                                            *
137  *                                                                           *
138 \*****************************************************************************/
139
140 static void mshci_read_block_pio(struct mshci_host *host)
141 {
142         unsigned long flags;
143         size_t fifo_cnt, len;
144         u32 uninitialized_var(scratch);
145         u8 *buf;
146
147         DBG("PIO reading\n");
148
149         fifo_cnt = (mshci_readl(host,MSHCI_STATUS)&FIFO_COUNT) >> 17;
150         fifo_cnt *= FIFO_WIDTH;
151
152         if (fifo_cnt == 128)
153                 fifo_cnt = 512;
154
155         local_irq_save(flags);
156
157         while (fifo_cnt) {
158                 if (!sg_miter_next(&host->sg_miter))
159                         BUG();
160
161                 len = min(host->sg_miter.length, fifo_cnt);
162
163                 fifo_cnt -= len;
164                 host->sg_miter.consumed = len;
165
166                 buf = host->sg_miter.addr;
167
168                 while (len) {
169                         scratch = mshci_readl(host, MSHCI_FIFODAT);
170
171                         *buf = scratch & 0xFF;
172
173                         *((u32*)buf) = scratch;
174                         buf += 4;
175                         len -= 4;
176                 }
177         }
178
179         sg_miter_stop(&host->sg_miter);
180
181         local_irq_restore(flags);
182 }
183
184 static void mshci_write_block_pio(struct mshci_host *host)
185 {
186         unsigned long flags;
187         size_t fifo_cnt, len, chunk;
188         u32 scratch;
189         u8 *buf;
190
191         DBG("PIO writing\n");
192
193         fifo_cnt = 64;
194
195         fifo_cnt *= FIFO_WIDTH;
196         chunk = 0;
197         scratch = 0;
198
199         local_irq_save(flags);
200
201         while (fifo_cnt) {
202                 if (!sg_miter_next(&host->sg_miter)) {
203
204                         /* Even though transfer is complete, 
205                          * TXDR interrupt occurs again.
206                          * So, it has to check that it has really 
207                          * no next sg buffer or just DTO interrupt 
208                          * has not occured yet.
209                          */
210
211                         if (( host->data->blocks * host->data->blksz ) ==
212                                         host->data_transfered )
213                                 break; /* transfer done but DTO not yet */
214                         BUG();
215                 }
216                 len = min(host->sg_miter.length, fifo_cnt);
217
218                 fifo_cnt -= len;
219                 host->sg_miter.consumed = len;
220                 host->data_transfered += len;
221
222                 buf = (host->sg_miter.addr);
223
224                 while (len) {
225                         scratch |= (u32)*buf << (chunk * 8);
226
227                         buf++;
228                         chunk++;
229                         len--;
230
231                         if ((chunk == 4) || ((len == 0) && (fifo_cnt == 0))) {
232                                 mshci_writel(host, scratch, MSHCI_FIFODAT);
233                                 chunk = 0;
234                                 scratch = 0;
235                         }
236                 }
237         }
238
239         sg_miter_stop(&host->sg_miter);
240
241         local_irq_restore(flags);
242 }
243
244 static void mshci_transfer_pio(struct mshci_host *host)
245 {
246         BUG_ON(!host->data);
247
248         if (host->blocks == 0)
249                 return;
250
251         if (host->data->flags & MMC_DATA_READ)
252                 mshci_read_block_pio(host);
253         else
254                 mshci_write_block_pio(host);
255
256         DBG("PIO transfer complete.\n");
257 }
258
259 static void mshci_set_mdma_desc(u8 *desc_vir, u8 *desc_phy,
260                                 u32 des0, u32 des1, u32 des2)
261 {
262         ((struct mshci_idmac *)(desc_vir))->des[0] = des0;
263         ((struct mshci_idmac *)(desc_vir))->des[1] = des1;
264         ((struct mshci_idmac *)(desc_vir))->des[2] = des2;
265         ((struct mshci_idmac *)(desc_vir))->des[3] = (u32)desc_phy +
266                                         sizeof(struct mshci_idmac);
267 }
268
269 static int mshci_mdma_table_pre(struct mshci_host *host,
270         struct mmc_data *data)
271 {
272         int direction;
273
274         u8 *desc_vir, *desc_phy;
275         dma_addr_t addr;
276         int len;
277
278         struct scatterlist *sg;
279         int i;
280         u32 des_flag;
281         u32 size_idmac = sizeof(struct mshci_idmac);
282
283         if (data->flags & MMC_DATA_READ)
284                 direction = DMA_FROM_DEVICE;
285         else
286                 direction = DMA_TO_DEVICE;
287
288         host->sg_count = dma_map_sg(mmc_dev(host->mmc),
289                 data->sg, data->sg_len, direction);
290         if (host->sg_count == 0)
291                 goto fail;
292
293         desc_vir = host->idma_desc;
294
295         /* to know phy address */
296         host->idma_addr = dma_map_single(mmc_dev(host->mmc),
297                 host->idma_desc, 128 * size_idmac, DMA_TO_DEVICE);
298         if (dma_mapping_error(mmc_dev(host->mmc), host->idma_addr))
299                 goto unmap_entries;
300         BUG_ON(host->idma_addr & 0x3);
301
302         desc_phy = (u8 *)host->idma_addr;
303
304         for_each_sg(data->sg, sg, host->sg_count, i) {
305                 addr = sg_dma_address(sg);
306                 len = sg_dma_len(sg);
307
308                 /* tran, valid */
309                 des_flag = (MSHCI_IDMAC_OWN|MSHCI_IDMAC_CH);
310                 des_flag |= (i==0) ? MSHCI_IDMAC_FS:0;
311
312                 mshci_set_mdma_desc(desc_vir, desc_phy, des_flag, len, addr);
313                 desc_vir += size_idmac;
314                 desc_phy += size_idmac;
315
316                 /*
317                  * If this triggers then we have a calculation bug
318                  * somewhere. :/
319                  */
320                 WARN_ON((desc_vir - host->idma_desc) > 128 * size_idmac);
321         }
322
323         /*
324         * Add a terminating flag.
325          */
326         ((struct mshci_idmac *)(desc_vir-size_idmac))->des[0] |= MSHCI_IDMAC_LD;
327
328         /* it has to dma map again to resync vir data to phy data  */
329         host->idma_addr = dma_map_single(mmc_dev(host->mmc),
330                 host->idma_desc, 128 * size_idmac, DMA_TO_DEVICE);
331         if (dma_mapping_error(mmc_dev(host->mmc), host->idma_addr))
332                 goto unmap_entries;
333         BUG_ON(host->idma_addr & 0x3);
334
335         return 0;
336
337 unmap_entries:
338         dma_unmap_sg(mmc_dev(host->mmc), data->sg,
339                 data->sg_len, direction);
340 fail:
341         return -EINVAL;
342 }
343
344 static void mshci_idma_table_post(struct mshci_host *host,
345         struct mmc_data *data)
346 {
347         int direction;
348
349         if (data->flags & MMC_DATA_READ)
350                 direction = DMA_FROM_DEVICE;
351         else
352                 direction = DMA_TO_DEVICE;
353
354         dma_unmap_single(mmc_dev(host->mmc), host->idma_addr,
355                 (128 * 2 + 1) * 4, DMA_TO_DEVICE);
356
357         dma_unmap_sg(mmc_dev(host->mmc), data->sg,
358                 data->sg_len, direction);
359 }
360
361 static u32 mshci_calc_timeout(struct mshci_host *host, struct mmc_data *data)
362 {
363         return 0xffffffff; /* this value SHOULD be optimized */
364 }
365
366 static void mshci_set_transfer_irqs(struct mshci_host *host)
367 {
368         u32 dma_irqs = INTMSK_DMA;
369         u32 pio_irqs = INTMSK_TXDR | INTMSK_RXDR;
370
371         if (host->flags & MSHCI_REQ_USE_DMA) {
372                 /* Next codes are the W/A for DDR */
373                 if(mshci_readl(host, MSHCI_UHS_REG) && (1 << 16))
374                         dma_irqs |= INTMSK_DCRC;
375                 /* clear interrupts for PIO */
376                 mshci_clear_set_irqs(host, dma_irqs, 0);
377         } else {
378                 /* Next codes are the W/A for DDR */
379                 if(mshci_readl(host, MSHCI_UHS_REG) && (1 << 16))
380                         mshci_clear_set_irqs(host, INTMSK_DCRC, pio_irqs);
381                 else
382                         mshci_clear_set_irqs(host, 0, pio_irqs);
383         }
384 }
385
386 static void mshci_prepare_data(struct mshci_host *host, struct mmc_data *data)
387 {
388         u32 count;
389         u32 ret;
390
391         WARN_ON(host->data);
392
393         if (data == NULL)
394                 return;
395
396         BUG_ON(data->blksz * data->blocks > (host->mmc->max_req_size *
397                                         host->mmc->max_hw_segs));
398         BUG_ON(data->blksz > host->mmc->max_blk_size);
399         BUG_ON(data->blocks > 400000);
400
401         host->data = data;
402         host->data_early = 0;
403
404         count = mshci_calc_timeout(host, data);
405         mshci_writel(host, count, MSHCI_TMOUT);
406
407         mshci_reset(host, FIFO_RESET);
408
409         if (host->flags & (MSHCI_USE_IDMA))
410                 host->flags |= MSHCI_REQ_USE_DMA;
411
412         /*
413          * FIXME: This doesn't account for merging when mapping the
414          * scatterlist.
415          */
416         if (host->flags & MSHCI_REQ_USE_DMA) {
417                 /* mshc's IDMAC can't transfer data that is not aligned 
418                  * or has length not divided by 4 byte. */
419                 int i;
420                 struct scatterlist *sg;
421
422                         for_each_sg(data->sg, sg, data->sg_len, i) {
423                                 if (sg->length & 0x3) {
424                                         DBG("Reverting to PIO because of "
425                                                 "transfer size (%d)\n",
426                                                 sg->length);
427                                         host->flags &= ~MSHCI_REQ_USE_DMA;
428                                         break;
429                         } else if (sg->offset & 0x3) {
430                                 DBG("Reverting to PIO because of "
431                                         "bad alignment\n");
432                                 host->flags &= ~MSHCI_REQ_USE_DMA;
433                                 break;
434                         }
435                 }
436         }
437
438         if (host->flags & MSHCI_REQ_USE_DMA) {
439                 ret = mshci_mdma_table_pre(host, data);
440                 if (ret) {
441                         /*
442                          * This only happens when someone fed
443                          * us an invalid request.
444                          */
445                         WARN_ON(1);
446                         host->flags &= ~MSHCI_REQ_USE_DMA;
447                 } else {
448                         mshci_writel(host, host->idma_addr,
449                                 MSHCI_DBADDR);
450                 }
451         }
452
453         if (host->flags & MSHCI_REQ_USE_DMA) {
454                 /* enable DMA, IDMA interrupts and IDMAC */
455                 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) |
456                                         ENABLE_IDMAC|DMA_ENABLE),MSHCI_CTRL);
457                 mshci_writel(host, (mshci_readl(host, MSHCI_BMOD) |
458                                         (BMOD_IDMAC_ENABLE|BMOD_IDMAC_FB)),
459                                         MSHCI_BMOD);
460                 mshci_writel(host, INTMSK_IDMAC_ERROR, MSHCI_IDINTEN);
461         }
462
463         if (!(host->flags & MSHCI_REQ_USE_DMA)) {
464                 int flags;
465
466                 flags = SG_MITER_ATOMIC;
467                 if (host->data->flags & MMC_DATA_READ)
468                         flags |= SG_MITER_TO_SG;
469                 else
470                         flags |= SG_MITER_FROM_SG;
471
472                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
473                 host->blocks = data->blocks;
474
475                 printk(KERN_ERR "it starts transfer on PIO\n");
476         }
477         /* set transfered data as 0. this value only uses for PIO write */
478         host->data_transfered = 0;
479         mshci_set_transfer_irqs(host);
480
481         mshci_writel(host, data->blksz, MSHCI_BLKSIZ);
482         mshci_writel(host, (data->blocks * data->blksz), MSHCI_BYTCNT);
483 }
484
485 static u32 mshci_set_transfer_mode(struct mshci_host *host,
486         struct mmc_data *data)
487 {
488         u32 ret=0;
489
490         if (data == NULL) {
491                 return ret;
492         }
493
494         WARN_ON(!host->data);
495
496         /* this cmd has data to transmit */
497         ret |= CMD_DATA_EXP_BIT;
498
499         if (data->flags & MMC_DATA_WRITE)
500                 ret |= CMD_RW_BIT;
501         if (data->flags & MMC_DATA_STREAM)
502                 ret |= CMD_TRANSMODE_BIT;
503
504         return ret;
505 }
506
507 static void mshci_finish_data(struct mshci_host *host)
508 {
509         struct mmc_data *data;
510
511         BUG_ON(!host->data);
512
513         data = host->data;
514         host->data = NULL;
515
516         if (host->flags & MSHCI_REQ_USE_DMA) {
517                 mshci_idma_table_post(host, data);
518                 /* disable IDMAC and DMA interrupt */
519                 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) &
520                                 ~(DMA_ENABLE|ENABLE_IDMAC)), MSHCI_CTRL);
521                 /* mask all interrupt source of IDMAC */
522                 mshci_writel(host, 0x0, MSHCI_IDINTEN);
523         }
524
525         if (data->error) {
526                 mshci_reset(host, DMA_RESET);
527                 data->bytes_xfered = 0;
528         }
529         else
530                 data->bytes_xfered = data->blksz * data->blocks;
531
532         if (data->stop)
533                 mshci_send_command(host, data->stop);
534         else
535                 tasklet_schedule(&host->finish_tasklet);
536 }
537
538 static void mshci_clock_onoff(struct mshci_host *host, bool val)
539 {
540         volatile u32 loop_count = 0x100000;
541
542         if (val)
543                 mshci_writel(host, CLK_ENABLE, MSHCI_CLKENA);
544         else
545                 mshci_writel(host, CLK_DISABLE, MSHCI_CLKENA);
546
547         mshci_writel(host, 0, MSHCI_CMD);
548         mshci_writel(host, CMD_ONLY_CLK, MSHCI_CMD);
549
550         do {
551                 if (!(mshci_readl(host, MSHCI_CMD) & CMD_STRT_BIT))
552                         break;
553                 loop_count--;
554         } while (loop_count);
555
556         if (loop_count == 0) {
557                 printk(KERN_ERR "%s: Clock %s has been failed.\n "
558                                 , mmc_hostname(host->mmc),val ? "ON":"OFF");
559         }
560 }
561
562 static void mshci_send_command(struct mshci_host *host, struct mmc_command *cmd)
563 {
564         int flags,ret;
565
566         WARN_ON(host->cmd);
567
568         /* disable interrupt before issuing cmd to the card. */
569         mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) & ~INT_ENABLE),
570                                         MSHCI_CTRL);
571
572         mod_timer(&host->timer, jiffies + 10 * HZ);
573
574         host->cmd = cmd;
575
576         mshci_prepare_data(host, cmd->data);
577
578         mshci_writel(host, cmd->arg, MSHCI_CMDARG);
579
580         flags = mshci_set_transfer_mode(host, cmd->data);
581
582         if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
583                 printk(KERN_ERR "%s: Unsupported response type!\n",
584                         mmc_hostname(host->mmc));
585                 cmd->error = -EINVAL;
586                 tasklet_schedule(&host->finish_tasklet);
587                 return;
588         }
589
590         if (cmd->flags & MMC_RSP_PRESENT) {
591                 flags |= CMD_RESP_EXP_BIT;
592                 if (cmd->flags & MMC_RSP_136)
593                         flags |= CMD_RESP_LENGTH_BIT;
594         }
595         if (cmd->flags & MMC_RSP_CRC)
596                 flags |= CMD_CHECK_CRC_BIT;
597         flags |= (cmd->opcode | CMD_STRT_BIT | CMD_WAIT_PRV_DAT_BIT);
598
599         ret = mshci_readl(host, MSHCI_CMD);
600         if (ret & CMD_STRT_BIT)
601                 printk(KERN_ERR "CMD busy. current cmd %d. last cmd reg 0x%x\n",
602                         cmd->opcode, ret);
603
604         mshci_writel(host, flags, MSHCI_CMD);
605
606         /* enable interrupt upon it sends a command to the card. */
607         mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) | INT_ENABLE),
608                                         MSHCI_CTRL);
609 }
610
611 static void mshci_finish_command(struct mshci_host *host)
612 {
613         int i;
614
615         BUG_ON(host->cmd == NULL);
616
617         if (host->cmd->flags & MMC_RSP_PRESENT) {
618                 if (host->cmd->flags & MMC_RSP_136) {
619                         /* 
620                          * response data are overturned. 
621                          */
622                         for (i = 0;i < 4;i++) {
623                                 host->cmd->resp[i] = mshci_readl(host,
624                                                 MSHCI_RESP0 + (3-i)*4);
625                         }
626                 } else {
627                         host->cmd->resp[0] = mshci_readl(host, MSHCI_RESP0);
628                 }
629         }
630
631         host->cmd->error = 0;
632
633         /* if data interrupt occurs earlier than command interrupt */
634         if (host->data && host->data_early)
635                 mshci_finish_data(host);
636
637         if (!host->cmd->data)
638                 tasklet_schedule(&host->finish_tasklet);
639
640         host->cmd = NULL;
641 }
642
643 static void mshci_set_clock(struct mshci_host *host, unsigned int clock)
644 {
645         int div;
646         volatile u32 loop_count;
647
648         if (clock == host->clock)
649                 return;
650
651         /* befor changing clock. clock needs to be off. */
652         mshci_clock_onoff(host, CLK_DISABLE);
653
654         if (clock == 0)
655                 goto out;
656
657         if (clock >= host->max_clk) {
658                 div = 0;
659         } else {
660                 for (div = 1;div < 255;div++) {
661                         if ((host->max_clk / (div<<1)) <= clock)
662                                 break;
663                 }
664         }
665
666         mshci_writel(host, div, MSHCI_CLKDIV);
667
668         mshci_writel(host, 0, MSHCI_CMD);
669         mshci_writel(host, CMD_ONLY_CLK, MSHCI_CMD);
670         loop_count = 0x10000;
671
672         do {
673                 if (!(mshci_readl(host, MSHCI_CMD) & CMD_STRT_BIT))
674                         break;
675                 loop_count--;
676         } while(loop_count);
677
678         if (loop_count == 0) {
679                 printk(KERN_ERR "%s: Changing clock has been failed.\n "
680                                 , mmc_hostname(host->mmc));
681         }
682         mshci_writel(host, mshci_readl(host, MSHCI_CMD)&(~CMD_SEND_CLK_ONLY),
683                                         MSHCI_CMD);
684
685         mshci_clock_onoff(host, CLK_ENABLE);
686
687 out:
688         host->clock = clock;
689 }
690
691 static void mshci_set_power(struct mshci_host *host, unsigned short power)
692 {
693         u8 pwr;
694
695         if (power == (unsigned short)-1)
696                 pwr = 0;
697
698         if (host->pwr == pwr)
699                 return;
700
701         host->pwr = pwr;
702
703         if (pwr == 0)
704                 mshci_writel(host, 0x0, MSHCI_PWREN);
705         else
706                 mshci_writel(host, 0x1, MSHCI_PWREN);
707 }
708
709 /*****************************************************************************\
710  *                                                                           *
711  * MMC callbacks                                                             *
712  *                                                                           *
713 \*****************************************************************************/
714
715 static void mshci_request(struct mmc_host *mmc, struct mmc_request *mrq)
716 {
717         struct mshci_host *host;
718         bool present;
719         unsigned long flags;
720
721         host = mmc_priv(mmc);
722
723         spin_lock_irqsave(&host->lock, flags);
724
725         WARN_ON(host->mrq != NULL);
726
727         host->mrq = mrq;
728
729         present = !(mshci_readl(host, MSHCI_CDETECT) & CARD_PRESENT);
730
731         if (host->quirks && MSHCI_QUIRK_BROKEN_CARD_DETECTION)
732                 present = true;
733
734         if (!present || host->flags & MSHCI_DEVICE_DEAD) {
735                 host->mrq->cmd->error = -ENOMEDIUM;
736                 tasklet_schedule(&host->finish_tasklet);
737         } else {
738                 mshci_send_command(host, mrq->cmd);
739         }
740
741         mmiowb();
742         spin_unlock_irqrestore(&host->lock, flags);
743 }
744
745 static void mshci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
746 {
747         struct mshci_host *host;
748         unsigned long flags;
749
750         host = mmc_priv(mmc);
751
752         spin_lock_irqsave(&host->lock, flags);
753
754         if (host->flags & MSHCI_DEVICE_DEAD)
755                 goto out;
756
757         if (ios->power_mode == MMC_POWER_OFF)
758                 mshci_reinit(host);
759
760         if (host->ops->set_ios)
761                 host->ops->set_ios(host, ios);
762
763         mshci_set_clock(host, ios->clock);
764
765         if (ios->power_mode == MMC_POWER_OFF)
766                 mshci_set_power(host, -1);
767         else
768                 mshci_set_power(host, ios->vdd);
769
770         if (ios->bus_width == MMC_BUS_WIDTH_8)
771                 mshci_writel(host, (0x1<<16), MSHCI_CTYPE);
772         else if (ios->bus_width == MMC_BUS_WIDTH_4)
773                 mshci_writel(host, (0x1<<0), MSHCI_CTYPE);
774         else if (ios->bus_width == MMC_BUS_WIDTH_8_DDR) {
775                 mshci_writel(host, (0x1<<16), MSHCI_CTYPE);
776                 mshci_writel(host, (0x1<<16), MSHCI_UHS_REG);
777         } else if (ios->bus_width == MMC_BUS_WIDTH_4_DDR) {
778                 mshci_writel(host, (0x1<<0), MSHCI_CTYPE);
779                 mshci_writel(host, (0x1<<16), MSHCI_UHS_REG);
780         } else
781                 mshci_writel(host, (0x0<<0), MSHCI_CTYPE);
782 out:
783         mmiowb();
784         spin_unlock_irqrestore(&host->lock, flags);
785 }
786
787 static int mshci_get_ro(struct mmc_host *mmc)
788 {
789         struct mshci_host *host;
790         unsigned long flags;
791         int wrtprt;
792
793         return 0;
794
795         host = mmc_priv(mmc);
796
797         spin_lock_irqsave(&host->lock, flags);
798
799         if (host->quirks & MSHCI_QUIRK_ALWAYS_WRITABLE)
800                 wrtprt = 0;
801         else if (host->quirks & MSHCI_QUIRK_NO_WP_BIT)
802                 wrtprt = host->ops->get_ro(mmc) ? 0:WRTPRT_ON;
803         else if (host->flags & MSHCI_DEVICE_DEAD)
804                 wrtprt = 0;
805         else
806                 wrtprt = mshci_readl(host, MSHCI_WRTPRT);
807
808         spin_unlock_irqrestore(&host->lock, flags);
809
810         return (wrtprt & WRTPRT_ON);
811 }
812
813 static void mshci_enable_sdio_irq(struct mmc_host *mmc, int enable)
814 {
815         struct mshci_host *host;
816         unsigned long flags;
817
818         host = mmc_priv(mmc);
819
820         spin_lock_irqsave(&host->lock, flags);
821
822         if (host->flags & MSHCI_DEVICE_DEAD)
823                 goto out;
824
825         if (enable)
826                 mshci_unmask_irqs(host, SDIO_INT_ENABLE);
827         else
828                 mshci_mask_irqs(host, SDIO_INT_ENABLE);
829 out:
830         mmiowb();
831
832         spin_unlock_irqrestore(&host->lock, flags);
833 }
834
835 static struct mmc_host_ops mshci_ops = {
836         .request        = mshci_request,
837         .set_ios        = mshci_set_ios,
838         .get_ro         = mshci_get_ro,
839         .enable_sdio_irq = mshci_enable_sdio_irq,
840 };
841
842 /*****************************************************************************\
843  *                                                                           *
844  * Tasklets                                                                  *
845  *                                                                           *
846 \*****************************************************************************/
847
848 static void mshci_tasklet_card(unsigned long param)
849 {
850         struct mshci_host *host;
851         unsigned long flags;
852
853         host = (struct mshci_host*)param;
854
855         spin_lock_irqsave(&host->lock, flags);
856
857         if (!mshci_readl(host, MSHCI_CDETECT) & CARD_PRESENT) {
858                 if (host->mrq) {
859                         printk(KERN_ERR "%s: Card removed during transfer!\n",
860                                 mmc_hostname(host->mmc));
861                         printk(KERN_ERR "%s: Resetting controller.\n",
862                                 mmc_hostname(host->mmc));
863
864                         host->mrq->cmd->error = -ENOMEDIUM;
865                         tasklet_schedule(&host->finish_tasklet);
866                 }
867         }
868
869         spin_unlock_irqrestore(&host->lock, flags);
870
871         mmc_detect_change(host->mmc, msecs_to_jiffies(100));
872 }
873
874 static void mshci_tasklet_finish(unsigned long param)
875 {
876         struct mshci_host *host;
877         unsigned long flags;
878         struct mmc_request *mrq;
879
880         host = (struct mshci_host*)param;
881
882         spin_lock_irqsave(&host->lock, flags);
883
884         del_timer(&host->timer);
885
886         mrq = host->mrq;
887
888         /*
889          * The controller needs a reset of internal state machines
890          * upon error conditions.
891          */
892         if (!(host->flags & MSHCI_DEVICE_DEAD) &&
893                 (mrq->cmd->error ||
894                  (mrq->data && (mrq->data->error ||
895                   (mrq->data->stop && mrq->data->stop->error))))) {
896
897                 /* Spec says we should do both at the same time, but Ricoh
898                    controllers do not like that. */
899                 mshci_reset(host, FIFO_RESET);
900         }
901
902         host->mrq = NULL;
903         host->cmd = NULL;
904         host->data = NULL;
905
906         mmiowb();
907         spin_unlock_irqrestore(&host->lock, flags);
908
909         mmc_request_done(host->mmc, mrq);
910 }
911
912 static void mshci_timeout_timer(unsigned long data)
913 {
914         struct mshci_host *host;
915         unsigned long flags;
916
917         host = (struct mshci_host*)data;
918
919         spin_lock_irqsave(&host->lock, flags);
920
921         if (host->mrq) {
922                 printk(KERN_ERR "%s: Timeout waiting for hardware "
923                         "interrupt.\n", mmc_hostname(host->mmc));
924                 mshci_dumpregs(host);
925
926                 if (host->data) {
927                         host->data->error = -ETIMEDOUT;
928                         mshci_finish_data(host);
929                 } else {
930                         if (host->cmd)
931                                 host->cmd->error = -ETIMEDOUT;
932                         else
933                                 host->mrq->cmd->error = -ETIMEDOUT;
934
935                         tasklet_schedule(&host->finish_tasklet);
936                 }
937         }
938
939         mmiowb();
940         spin_unlock_irqrestore(&host->lock, flags);
941 }
942
943 /*****************************************************************************\
944  *                                                                           *
945  * Interrupt handling                                                        *
946  *                                                                           *
947 \*****************************************************************************/
948
949 static void mshci_cmd_irq(struct mshci_host *host, u32 intmask)
950 {
951         BUG_ON(intmask == 0);
952
953         if (!host->cmd) {
954                 printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
955                         "though no command operation was in progress.\n",
956                         mmc_hostname(host->mmc), (unsigned)intmask);
957                 mshci_dumpregs(host);
958                 return;
959         }
960
961         if (intmask & INTMSK_RTO)
962                 host->cmd->error = -ETIMEDOUT;
963         else if (intmask & (INTMSK_RCRC | INTMSK_RE))
964                 host->cmd->error = -EILSEQ;
965
966         if (host->cmd->error) {
967                 tasklet_schedule(&host->finish_tasklet);
968                 return;
969         }
970
971         if (intmask & INTMSK_CDONE)
972                 mshci_finish_command(host);
973 }
974
975 static void mshci_data_irq(struct mshci_host *host, u32 intmask, u8 intr_src)
976 {
977         BUG_ON(intmask == 0);
978
979         if (!host->data) {
980                 /*
981                  * The "data complete" interrupt is also used to
982                  * indicate that a busy state has ended. See comment
983                  * above in mshci_cmd_irq().
984                  */
985                 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
986                         if (intmask & INTMSK_DTO) {
987                                 mshci_finish_command(host);
988                                 return;
989                         }
990                 }
991
992                 printk(KERN_ERR "%s: Got data interrupt 0x%08x from %s "
993                         "even though no data operation was in progress.\n",
994                         mmc_hostname(host->mmc), (unsigned)intmask,
995                         intr_src ? "MINT":"IDMAC");
996                 mshci_dumpregs(host);
997
998                 return;
999         }
1000         if (intr_src == INT_SRC_MINT) {
1001                 if (intmask & INTMSK_DATA_TIMEOUT)
1002                         host->data->error = -ETIMEDOUT;
1003                 else if (intmask & INTMSK_DATA_CRC)
1004                         host->data->error = -EILSEQ;
1005                 else if (intmask & INTMSK_FRUN) {
1006                         printk(KERN_ERR "%s: FIFO underrun/overrun error\n",
1007                                         mmc_hostname(host->mmc));
1008                         host->data->error = -EIO;
1009                 }
1010         } else {
1011                 if (intmask & (IDSTS_FBE | IDSTS_CES | IDSTS_DU)) {
1012                         printk(KERN_ERR "%s: Fatal Bus error on DMA\n",
1013                                         mmc_hostname(host->mmc));
1014                         host->data->error = -EIO;
1015                 }
1016         }
1017
1018         if (host->data->error)
1019                 mshci_finish_data(host);
1020         else {
1021                 if (!(host->flags & MSHCI_REQ_USE_DMA) &&
1022                                 (((host->data->flags & MMC_DATA_READ)&&
1023                                 (intmask & (INTMSK_RXDR | INTMSK_DTO))) ||
1024                                 ((host->data->flags & MMC_DATA_WRITE)&&
1025                                         (intmask & (INTMSK_TXDR)))))
1026                         mshci_transfer_pio(host);
1027
1028                 if (intmask & INTMSK_DTO) {
1029                         if (host->cmd) {
1030                                 /*
1031                                  * Data managed to finish before the
1032                                  * command completed. Make sure we do
1033                                  * things in the proper order.
1034                                  */
1035                                 host->data_early = 1;
1036                         } else {
1037                                 mshci_finish_data(host);
1038                         }
1039                 }
1040         }
1041 }
1042
1043 static irqreturn_t mshci_irq(int irq, void *dev_id)
1044 {
1045         irqreturn_t result;
1046         struct mshci_host* host = dev_id;
1047         u32 intmask;
1048         int cardint = 0;
1049         int timeout = 100;
1050
1051         spin_lock(&host->lock);
1052
1053         intmask = mshci_readl(host, MSHCI_MINTSTS);
1054
1055         if (!intmask || intmask == 0xffffffff) {
1056                 /* check if there is a interrupt for IDMAC  */
1057                 intmask = mshci_readl(host, MSHCI_IDSTS);
1058                 if (intmask) {
1059                         mshci_writel(host, intmask,MSHCI_IDSTS);
1060                         mshci_data_irq(host, intmask, INT_SRC_IDMAC);
1061                         result = IRQ_HANDLED;
1062                         goto out;
1063                 }
1064                 result = IRQ_NONE;
1065                 goto out;
1066         }
1067         DBG("*** %s got interrupt: 0x%08x\n",
1068                 mmc_hostname(host->mmc), intmask);
1069
1070         mshci_writel(host, intmask, MSHCI_RINTSTS);
1071
1072         if (intmask & (INTMSK_CDETECT))
1073                 tasklet_schedule(&host->card_tasklet);
1074
1075         intmask &= ~INTMSK_CDETECT;
1076
1077         if (intmask & CMD_STATUS) {
1078                 if ( !(intmask & INTMSK_CDONE) && (intmask & INTMSK_RTO)) {
1079                         /*
1080                          * when a error about command timeout occurs,
1081                          * cmd done intr comes together.
1082                          * cmd done intr comes later than error intr.
1083                          * so, it has to wait for cmd done intr.
1084                          */
1085                         while ( --timeout &&
1086                                 !(mshci_readl(host, MSHCI_MINTSTS)
1087                                   & INTMSK_CDONE) )
1088                         if (!timeout)
1089                                 printk(KERN_ERR"*** %s time out for\
1090                                         CDONE intr\n",
1091                                         mmc_hostname(host->mmc));
1092                         else
1093                                 mshci_writel(host, INTMSK_CDONE,
1094                                         MSHCI_RINTSTS);
1095                         mshci_cmd_irq(host, intmask & CMD_STATUS);
1096                 } else {
1097                         mshci_cmd_irq(host, intmask & CMD_STATUS);
1098                 }
1099         }
1100
1101         if (intmask & DATA_STATUS) {
1102                 if ( !(intmask & INTMSK_DTO) && (intmask & INTMSK_DRTO)) {
1103                         /*
1104                          * when a error about data timout occurs,
1105                          * DTO intr comes together.
1106                          * DTO intr comes later than error intr.
1107                          * so, it has to wait for DTO intr.
1108                          */
1109                         while ( --timeout &&
1110                                 !(mshci_readl(host, MSHCI_MINTSTS)
1111                                   & INTMSK_DTO) )
1112                         if (!timeout)
1113                                 printk(KERN_ERR"*** %s time out for\
1114                                         CDONE intr\n",
1115                                         mmc_hostname(host->mmc));
1116                         else
1117                                 mshci_writel(host, INTMSK_DTO,
1118                                         MSHCI_RINTSTS);
1119                         mshci_data_irq(host, intmask & DATA_STATUS,INT_SRC_MINT);
1120                 } else {
1121                         mshci_data_irq(host, intmask & DATA_STATUS,INT_SRC_MINT);
1122                 }
1123         }
1124
1125         intmask &= ~(CMD_STATUS | DATA_STATUS);
1126
1127         if (intmask & SDIO_INT_ENABLE)
1128                 cardint = 1;
1129
1130         intmask &= ~SDIO_INT_ENABLE;
1131
1132         if (intmask) {
1133                 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1134                         mmc_hostname(host->mmc), intmask);
1135                 mshci_dumpregs(host);
1136         }
1137
1138         result = IRQ_HANDLED;
1139
1140         mmiowb();
1141 out:
1142         spin_unlock(&host->lock);
1143
1144         /*
1145          * We have to delay this as it calls back into the driver.
1146          */
1147         if (cardint)
1148                 mmc_signal_sdio_irq(host->mmc);
1149
1150         return result;
1151 }
1152
1153 /*****************************************************************************\
1154  *                                                                           *
1155  * Suspend/resume                                                            *
1156  *                                                                           *
1157 \*****************************************************************************/
1158
1159 #ifdef CONFIG_PM
1160
1161 int mshci_suspend_host(struct mshci_host *host, pm_message_t state)
1162 {
1163         int ret;
1164
1165         mshci_disable_card_detection(host);
1166
1167         ret = mmc_suspend_host(host->mmc);
1168         if (ret)
1169                 return ret;
1170
1171         free_irq(host->irq, host);
1172
1173         if (host->vmmc)
1174                 ret = regulator_disable(host->vmmc);
1175
1176         return ret;
1177 }
1178
1179 EXPORT_SYMBOL_GPL(mshci_suspend_host);
1180
1181 int mshci_resume_host(struct mshci_host *host)
1182 {
1183         int ret;
1184
1185         if (host->vmmc) {
1186                 ret = regulator_enable(host->vmmc);
1187                 if (ret)
1188                         return ret;
1189         }
1190
1191         if (host->flags & (MSHCI_USE_IDMA)) {
1192                 if (host->ops->enable_dma)
1193                         host->ops->enable_dma(host);
1194         }
1195
1196         ret = request_irq(host->irq, mshci_irq, IRQF_SHARED,
1197                           mmc_hostname(host->mmc), host);
1198         if (ret)
1199                 return ret;
1200
1201         mshci_init(host);
1202         mmiowb();
1203
1204         ret = mmc_resume_host(host->mmc);
1205         mshci_enable_card_detection(host);
1206
1207         return ret;
1208 }
1209
1210 EXPORT_SYMBOL_GPL(mshci_resume_host);
1211
1212 #endif /* CONFIG_PM */
1213
1214 /*****************************************************************************\
1215  *                                                                           *
1216  * Device allocation/registration                                            *
1217  *                                                                           *
1218 \*****************************************************************************/
1219
1220 struct mshci_host *mshci_alloc_host(struct device *dev,
1221         size_t priv_size)
1222 {
1223         struct mmc_host *mmc;
1224         struct mshci_host *host;
1225
1226         WARN_ON(dev == NULL);
1227
1228         mmc = mmc_alloc_host(sizeof(struct mshci_host) + priv_size, dev);
1229         if (!mmc)
1230                 return ERR_PTR(-ENOMEM);
1231
1232         host = mmc_priv(mmc);
1233         host->mmc = mmc;
1234
1235         return host;
1236 }
1237
1238 EXPORT_SYMBOL_GPL(mshci_alloc_host);
1239
1240 static void mshci_fifo_init(struct mshci_host *host)
1241 {
1242         int fifo_val, fifo_depth, fifo_threshold;
1243
1244         fifo_val = mshci_readl(host, MSHCI_FIFOTH);
1245         fifo_depth = ((fifo_val & RX_WMARK) >> 16) + 1;
1246         fifo_threshold = fifo_depth / 2;
1247         host->fifo_threshold = fifo_threshold;
1248         host->fifo_depth = fifo_threshold * 2;
1249
1250         printk(KERN_INFO "%s: FIFO WMARK FOR RX 0x%x WX 0x%x.\n",
1251                 mmc_hostname(host->mmc), fifo_depth, ((fifo_val & TX_WMARK)>>16)+1);
1252
1253         fifo_val &= ~(RX_WMARK | TX_WMARK | MSIZE_MASK);
1254
1255         fifo_val |= (fifo_threshold | (fifo_threshold<<16));
1256         fifo_val |= MSIZE_8;
1257
1258         mshci_writel(host, fifo_val, MSHCI_FIFOTH);
1259 }
1260
1261 int mshci_add_host(struct mshci_host *host)
1262 {
1263         struct mmc_host *mmc;
1264         int ret,count;
1265
1266         WARN_ON(host == NULL);
1267         if (host == NULL)
1268                 return -EINVAL;
1269
1270         mmc = host->mmc;
1271
1272         if (debug_quirks)
1273                 host->quirks = debug_quirks;
1274
1275         mshci_reset(host, RESET_ALL);
1276
1277         host->version = mshci_readl(host, MSHCI_VERID);
1278
1279         /* there are no reasons not to use DMA */
1280         host->flags |= MSHCI_USE_IDMA;
1281
1282         if (host->flags & MSHCI_USE_IDMA) {
1283                 /* We need to allocate descriptors for all sg entries
1284                  * 128 transfer for each of those entries. */
1285                 host->idma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1286                 if (!host->idma_desc) {
1287                         kfree(host->idma_desc);
1288                         printk(KERN_WARNING "%s: Unable to allocate IDMA "
1289                                 "buffers. Falling back to standard DMA.\n",
1290                                 mmc_hostname(mmc));
1291                         host->flags &= ~MSHCI_USE_IDMA;
1292                 }
1293         }
1294
1295         /*
1296          * If we use DMA, then it's up to the caller to set the DMA
1297          * mask, but PIO does not need the hw shim so we set a new
1298          * mask here in that case.
1299          */
1300         if (!(host->flags & (MSHCI_USE_IDMA))) {
1301                 host->dma_mask = DMA_BIT_MASK(64);
1302                 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1303         }
1304
1305         printk("%s: Version ID 0x%x.\n",
1306                 mmc_hostname(host->mmc), host->version);
1307
1308         host->max_clk = 0;
1309
1310         if (host->max_clk == 0) {
1311                 if (!host->ops->get_max_clock) {
1312                         printk(KERN_ERR
1313                                "%s: Hardware doesn't specify base clock "
1314                                "frequency.\n", mmc_hostname(mmc));
1315                         return -ENODEV;
1316                 }
1317                 host->max_clk = host->ops->get_max_clock(host);
1318         }
1319
1320         /*
1321          * Set host parameters.
1322          */
1323         if(host->ops->get_ro)
1324                 mshci_ops.get_ro = host->ops->get_ro;
1325
1326         mmc->ops = &mshci_ops;
1327         mmc->f_min = host->max_clk / 512;
1328         mmc->f_max = host->max_clk;
1329
1330         mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_4_BIT_DATA;
1331
1332         mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34 |
1333                 MMC_VDD_29_30 | MMC_VDD_30_31;
1334
1335         spin_lock_init(&host->lock);
1336
1337         mmc->max_hw_segs = 128;
1338         mmc->max_phys_segs = 128;
1339
1340         /* Maximum number of sectors in one transfer */
1341         mmc->max_req_size = 524288;
1342
1343         /*
1344          * Maximum segment size. Could be one segment with the maximum number
1345          * of bytes. When doing hardware scatter/gather, each entry cannot
1346          * be larger than 4 KiB though.
1347          */
1348         if (host->flags & MSHCI_USE_IDMA)
1349                 mmc->max_seg_size = 0x1000;
1350         else
1351                 mmc->max_seg_size = mmc->max_req_size;
1352
1353         /*
1354          * from SD spec 2.0 and MMC spec 4.2, block size has been
1355          * fixed to 512 byte
1356          */
1357         mmc->max_blk_size = 512;
1358
1359         /*
1360          * Maximum block count.
1361          */
1362         mmc->max_blk_count = 65535;
1363
1364         /*
1365          * Init tasklets.
1366          */
1367         tasklet_init(&host->card_tasklet,
1368                 mshci_tasklet_card, (unsigned long)host);
1369         tasklet_init(&host->finish_tasklet,
1370                 mshci_tasklet_finish, (unsigned long)host);
1371
1372         setup_timer(&host->timer, mshci_timeout_timer, (unsigned long)host);
1373
1374         ret = request_irq(host->irq, mshci_irq, IRQF_SHARED,
1375                 mmc_hostname(mmc), host);
1376         if (ret)
1377                 goto untasklet;
1378
1379         host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1380         if (IS_ERR(host->vmmc)) {
1381                 printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
1382                 host->vmmc = NULL;
1383         } else {
1384                 regulator_enable(host->vmmc);
1385         }
1386
1387         mshci_init(host);
1388         mshci_fifo_init(host);
1389
1390         mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) | INT_ENABLE),
1391                         MSHCI_CTRL);
1392
1393         /* set debounce filter value*/
1394         mshci_writel(host, 0xffffff, MSHCI_DEBNCE);
1395
1396         /* clear card type. set 1bit mode */
1397         mshci_writel(host, 0x0, MSHCI_CTYPE);
1398
1399         /* set bus mode register for IDMAC */
1400         if (host->flags & MSHCI_USE_IDMA) {
1401                 mshci_writel(host, BMOD_IDMAC_RESET, MSHCI_BMOD);
1402                 count = 100;
1403                 while( (mshci_readl(host, MSHCI_BMOD) & BMOD_IDMAC_RESET )
1404                         && --count ) ; /* nothing to do */
1405
1406                 mshci_writel(host, (mshci_readl(host, MSHCI_BMOD) |
1407                                 (BMOD_IDMAC_ENABLE|BMOD_IDMAC_FB)), MSHCI_BMOD);
1408         }
1409 #ifdef CONFIG_MMC_DEBUG
1410         mshci_dumpregs(host);
1411 #endif
1412
1413         mmiowb();
1414
1415         mmc_add_host(mmc);
1416
1417         printk(KERN_INFO "%s: MSHCI controller on %s [%s] using %s\n",
1418                 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
1419                 (host->flags & MSHCI_USE_IDMA) ? "IDMA" : "PIO");
1420
1421         mshci_enable_card_detection(host);
1422
1423         return 0;
1424
1425 untasklet:
1426         tasklet_kill(&host->card_tasklet);
1427         tasklet_kill(&host->finish_tasklet);
1428
1429         return ret;
1430 }
1431
1432 EXPORT_SYMBOL_GPL(mshci_add_host);
1433
1434 void mshci_remove_host(struct mshci_host *host, int dead)
1435 {
1436         unsigned long flags;
1437
1438         if (dead) {
1439                 spin_lock_irqsave(&host->lock, flags);
1440
1441                 host->flags |= MSHCI_DEVICE_DEAD;
1442
1443                 if (host->mrq) {
1444                         printk(KERN_ERR "%s: Controller removed during "
1445                                 " transfer!\n", mmc_hostname(host->mmc));
1446
1447                         host->mrq->cmd->error = -ENOMEDIUM;
1448                         tasklet_schedule(&host->finish_tasklet);
1449                 }
1450
1451                 spin_unlock_irqrestore(&host->lock, flags);
1452         }
1453
1454         mshci_disable_card_detection(host);
1455
1456         mmc_remove_host(host->mmc);
1457
1458         if (!dead)
1459                 mshci_reset(host, RESET_ALL);
1460
1461         free_irq(host->irq, host);
1462
1463         del_timer_sync(&host->timer);
1464
1465         tasklet_kill(&host->card_tasklet);
1466         tasklet_kill(&host->finish_tasklet);
1467
1468         kfree(host->idma_desc);
1469
1470         host->idma_desc = NULL;
1471         host->align_buffer = NULL;
1472 }
1473
1474 EXPORT_SYMBOL_GPL(mshci_remove_host);
1475
1476 void mshci_free_host(struct mshci_host *host)
1477 {
1478         mmc_free_host(host->mmc);
1479 }
1480
1481 EXPORT_SYMBOL_GPL(mshci_free_host);
1482
1483 /*****************************************************************************\
1484  *                                                                           *
1485  * Driver init/exit                                                          *
1486  *                                                                           *
1487 \*****************************************************************************/
1488
1489 static int __init mshci_drv_init(void)
1490 {
1491         printk(KERN_INFO DRIVER_NAME
1492                 ": Mobile Storage Host Controller Interface driver\n");
1493
1494         return 0;
1495 }
1496
1497 static void __exit mshci_drv_exit(void)
1498 {
1499 }
1500
1501 module_init(mshci_drv_init);
1502 module_exit(mshci_drv_exit);
1503
1504 module_param(debug_quirks, uint, 0444);
1505
1506 MODULE_AUTHOR("Jaehoon Chung <jh80.chung@samsung.com>");
1507 MODULE_DESCRIPTION("Mobile Storage Host Controller Interface core driver");
1508 MODULE_LICENSE("GPL");
1509
1510 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");