2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/fault-inject.h>
28 #include <linux/random.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
44 static struct workqueue_struct *workqueue;
45 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
48 * Enabling software CRCs on the data blocks can be a significant (30%)
49 * performance cost, and for other reasons may not always be desired.
50 * So we allow it it to be disabled.
53 module_param(use_spi_crc, bool, 0);
56 * We normally treat cards as removed during suspend if they are not
57 * known to be on a non-removable bus, to avoid the risk of writing
58 * back data to a different card after resume. Allow this to be
59 * overridden if necessary.
61 #ifdef CONFIG_MMC_UNSAFE_RESUME
62 bool mmc_assume_removable;
64 bool mmc_assume_removable = 1;
66 EXPORT_SYMBOL(mmc_assume_removable);
67 module_param_named(removable, mmc_assume_removable, bool, 0644);
70 "MMC/SD cards are removable and may be removed during suspend");
73 * Internal function. Schedule delayed work in the MMC work queue.
75 static int mmc_schedule_delayed_work(struct delayed_work *work,
78 return queue_delayed_work(workqueue, work, delay);
82 * Internal function. Flush all scheduled work from the MMC work queue.
84 static void mmc_flush_scheduled_work(void)
86 flush_workqueue(workqueue);
89 #ifdef CONFIG_FAIL_MMC_REQUEST
92 * Internal function. Inject random data errors.
93 * If mmc_data is NULL no errors are injected.
95 static void mmc_should_fail_request(struct mmc_host *host,
96 struct mmc_request *mrq)
98 struct mmc_command *cmd = mrq->cmd;
99 struct mmc_data *data = mrq->data;
100 static const int data_errors[] = {
109 if (cmd->error || data->error ||
110 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
113 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
114 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
117 #else /* CONFIG_FAIL_MMC_REQUEST */
119 static inline void mmc_should_fail_request(struct mmc_host *host,
120 struct mmc_request *mrq)
124 #endif /* CONFIG_FAIL_MMC_REQUEST */
127 * mmc_request_done - finish processing an MMC request
128 * @host: MMC host which completed request
129 * @mrq: MMC request which request
131 * MMC drivers should call this function when they have completed
132 * their processing of a request.
134 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
136 struct mmc_command *cmd = mrq->cmd;
137 int err = cmd->error;
139 if (err && cmd->retries && mmc_host_is_spi(host)) {
140 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
144 if (err && cmd->retries && !mmc_card_removed(host->card)) {
146 * Request starter must handle retries - see
147 * mmc_wait_for_req_done().
152 mmc_should_fail_request(host, mrq);
154 led_trigger_event(host->led, LED_OFF);
156 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
157 mmc_hostname(host), cmd->opcode, err,
158 cmd->resp[0], cmd->resp[1],
159 cmd->resp[2], cmd->resp[3]);
162 pr_debug("%s: %d bytes transferred: %d\n",
164 mrq->data->bytes_xfered, mrq->data->error);
168 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
169 mmc_hostname(host), mrq->stop->opcode,
171 mrq->stop->resp[0], mrq->stop->resp[1],
172 mrq->stop->resp[2], mrq->stop->resp[3]);
178 mmc_host_clk_release(host);
182 EXPORT_SYMBOL(mmc_request_done);
185 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
187 #ifdef CONFIG_MMC_DEBUG
189 struct scatterlist *sg;
193 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
194 mmc_hostname(host), mrq->sbc->opcode,
195 mrq->sbc->arg, mrq->sbc->flags);
198 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
199 mmc_hostname(host), mrq->cmd->opcode,
200 mrq->cmd->arg, mrq->cmd->flags);
203 pr_debug("%s: blksz %d blocks %d flags %08x "
204 "tsac %d ms nsac %d\n",
205 mmc_hostname(host), mrq->data->blksz,
206 mrq->data->blocks, mrq->data->flags,
207 mrq->data->timeout_ns / 1000000,
208 mrq->data->timeout_clks);
212 pr_debug("%s: CMD%u arg %08x flags %08x\n",
213 mmc_hostname(host), mrq->stop->opcode,
214 mrq->stop->arg, mrq->stop->flags);
217 WARN_ON(!host->claimed);
222 BUG_ON(mrq->data->blksz > host->max_blk_size);
223 BUG_ON(mrq->data->blocks > host->max_blk_count);
224 BUG_ON(mrq->data->blocks * mrq->data->blksz >
227 #ifdef CONFIG_MMC_DEBUG
229 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
231 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
234 mrq->cmd->data = mrq->data;
235 mrq->data->error = 0;
236 mrq->data->mrq = mrq;
238 mrq->data->stop = mrq->stop;
239 mrq->stop->error = 0;
240 mrq->stop->mrq = mrq;
243 mmc_host_clk_hold(host);
244 led_trigger_event(host->led, LED_FULL);
245 host->ops->request(host, mrq);
248 static void mmc_wait_done(struct mmc_request *mrq)
250 complete(&mrq->completion);
253 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
255 init_completion(&mrq->completion);
256 mrq->done = mmc_wait_done;
257 if (mmc_card_removed(host->card)) {
258 mrq->cmd->error = -ENOMEDIUM;
259 complete(&mrq->completion);
262 mmc_start_request(host, mrq);
266 static void mmc_wait_for_req_done(struct mmc_host *host,
267 struct mmc_request *mrq)
269 struct mmc_command *cmd;
272 wait_for_completion(&mrq->completion);
275 if (!cmd->error || !cmd->retries ||
276 mmc_card_removed(host->card))
279 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
280 mmc_hostname(host), cmd->opcode, cmd->error);
283 host->ops->request(host, mrq);
288 * mmc_pre_req - Prepare for a new request
289 * @host: MMC host to prepare command
290 * @mrq: MMC request to prepare for
291 * @is_first_req: true if there is no previous started request
292 * that may run in parellel to this call, otherwise false
294 * mmc_pre_req() is called in prior to mmc_start_req() to let
295 * host prepare for the new request. Preparation of a request may be
296 * performed while another request is running on the host.
298 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
301 if (host->ops->pre_req) {
302 mmc_host_clk_hold(host);
303 host->ops->pre_req(host, mrq, is_first_req);
304 mmc_host_clk_release(host);
309 * mmc_post_req - Post process a completed request
310 * @host: MMC host to post process command
311 * @mrq: MMC request to post process for
312 * @err: Error, if non zero, clean up any resources made in pre_req
314 * Let the host post process a completed request. Post processing of
315 * a request may be performed while another reuqest is running.
317 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
320 if (host->ops->post_req) {
321 mmc_host_clk_hold(host);
322 host->ops->post_req(host, mrq, err);
323 mmc_host_clk_release(host);
328 * mmc_start_req - start a non-blocking request
329 * @host: MMC host to start command
330 * @areq: async request to start
331 * @error: out parameter returns 0 for success, otherwise non zero
333 * Start a new MMC custom command request for a host.
334 * If there is on ongoing async request wait for completion
335 * of that request and start the new one and return.
336 * Does not wait for the new request to complete.
338 * Returns the completed request, NULL in case of none completed.
339 * Wait for the an ongoing request (previoulsy started) to complete and
340 * return the completed request. If there is no ongoing request, NULL
341 * is returned without waiting. NULL is not an error condition.
343 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
344 struct mmc_async_req *areq, int *error)
348 struct mmc_async_req *data = host->areq;
350 /* Prepare a new request */
352 mmc_pre_req(host, areq->mrq, !host->areq);
355 mmc_wait_for_req_done(host, host->areq->mrq);
356 err = host->areq->err_check(host->card, host->areq);
360 start_err = __mmc_start_req(host, areq->mrq);
363 mmc_post_req(host, host->areq->mrq, 0);
365 /* Cancel a prepared request if it was not started. */
366 if ((err || start_err) && areq)
367 mmc_post_req(host, areq->mrq, -EINVAL);
378 EXPORT_SYMBOL(mmc_start_req);
381 * mmc_wait_for_req - start a request and wait for completion
382 * @host: MMC host to start command
383 * @mrq: MMC request to start
385 * Start a new MMC custom command request for a host, and wait
386 * for the command to complete. Does not attempt to parse the
389 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
391 __mmc_start_req(host, mrq);
392 mmc_wait_for_req_done(host, mrq);
394 EXPORT_SYMBOL(mmc_wait_for_req);
397 * mmc_interrupt_hpi - Issue for High priority Interrupt
398 * @card: the MMC card associated with the HPI transfer
400 * Issued High Priority Interrupt, and check for card status
401 * util out-of prg-state.
403 int mmc_interrupt_hpi(struct mmc_card *card)
407 unsigned long prg_wait;
411 if (!card->ext_csd.hpi_en) {
412 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
416 mmc_claim_host(card->host);
417 err = mmc_send_status(card, &status);
419 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
423 switch (R1_CURRENT_STATE(status)) {
428 * In idle states, HPI is not needed and the caller
429 * can issue the next intended command immediately
435 /* In all other states, it's illegal to issue HPI */
436 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
437 mmc_hostname(card->host), R1_CURRENT_STATE(status));
442 err = mmc_send_hpi_cmd(card, &status);
446 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
448 err = mmc_send_status(card, &status);
450 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
452 if (time_after(jiffies, prg_wait))
457 mmc_release_host(card->host);
460 EXPORT_SYMBOL(mmc_interrupt_hpi);
463 * mmc_wait_for_cmd - start a command and wait for completion
464 * @host: MMC host to start command
465 * @cmd: MMC command to start
466 * @retries: maximum number of retries
468 * Start a new MMC command for a host, and wait for the command
469 * to complete. Return any error that occurred while the command
470 * was executing. Do not attempt to parse the response.
472 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
474 struct mmc_request mrq = {NULL};
476 WARN_ON(!host->claimed);
478 memset(cmd->resp, 0, sizeof(cmd->resp));
479 cmd->retries = retries;
484 mmc_wait_for_req(host, &mrq);
489 EXPORT_SYMBOL(mmc_wait_for_cmd);
492 * mmc_set_data_timeout - set the timeout for a data command
493 * @data: data phase for command
494 * @card: the MMC card associated with the data transfer
496 * Computes the data timeout parameters according to the
497 * correct algorithm given the card type.
499 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
504 * SDIO cards only define an upper 1 s limit on access.
506 if (mmc_card_sdio(card)) {
507 data->timeout_ns = 1000000000;
508 data->timeout_clks = 0;
513 * SD cards use a 100 multiplier rather than 10
515 mult = mmc_card_sd(card) ? 100 : 10;
518 * Scale up the multiplier (and therefore the timeout) by
519 * the r2w factor for writes.
521 if (data->flags & MMC_DATA_WRITE)
522 mult <<= card->csd.r2w_factor;
524 data->timeout_ns = card->csd.tacc_ns * mult;
525 data->timeout_clks = card->csd.tacc_clks * mult;
528 * SD cards also have an upper limit on the timeout.
530 if (mmc_card_sd(card)) {
531 unsigned int timeout_us, limit_us;
533 timeout_us = data->timeout_ns / 1000;
534 if (mmc_host_clk_rate(card->host))
535 timeout_us += data->timeout_clks * 1000 /
536 (mmc_host_clk_rate(card->host) / 1000);
538 if (data->flags & MMC_DATA_WRITE)
540 * The MMC spec "It is strongly recommended
541 * for hosts to implement more than 500ms
542 * timeout value even if the card indicates
543 * the 250ms maximum busy length." Even the
544 * previous value of 300ms is known to be
545 * insufficient for some cards.
552 * SDHC cards always use these fixed values.
554 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
555 data->timeout_ns = limit_us * 1000;
556 data->timeout_clks = 0;
561 * Some cards require longer data read timeout than indicated in CSD.
562 * Address this by setting the read timeout to a "reasonably high"
563 * value. For the cards tested, 300ms has proven enough. If necessary,
564 * this value can be increased if other problematic cards require this.
566 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
567 data->timeout_ns = 300000000;
568 data->timeout_clks = 0;
572 * Some cards need very high timeouts if driven in SPI mode.
573 * The worst observed timeout was 900ms after writing a
574 * continuous stream of data until the internal logic
577 if (mmc_host_is_spi(card->host)) {
578 if (data->flags & MMC_DATA_WRITE) {
579 if (data->timeout_ns < 1000000000)
580 data->timeout_ns = 1000000000; /* 1s */
582 if (data->timeout_ns < 100000000)
583 data->timeout_ns = 100000000; /* 100ms */
587 EXPORT_SYMBOL(mmc_set_data_timeout);
590 * mmc_align_data_size - pads a transfer size to a more optimal value
591 * @card: the MMC card associated with the data transfer
592 * @sz: original transfer size
594 * Pads the original data size with a number of extra bytes in
595 * order to avoid controller bugs and/or performance hits
596 * (e.g. some controllers revert to PIO for certain sizes).
598 * Returns the improved size, which might be unmodified.
600 * Note that this function is only relevant when issuing a
601 * single scatter gather entry.
603 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
606 * FIXME: We don't have a system for the controller to tell
607 * the core about its problems yet, so for now we just 32-bit
610 sz = ((sz + 3) / 4) * 4;
614 EXPORT_SYMBOL(mmc_align_data_size);
617 * __mmc_claim_host - exclusively claim a host
618 * @host: mmc host to claim
619 * @abort: whether or not the operation should be aborted
621 * Claim a host for a set of operations. If @abort is non null and
622 * dereference a non-zero value then this will return prematurely with
623 * that non-zero value without acquiring the lock. Returns zero
624 * with the lock held otherwise.
626 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
628 DECLARE_WAITQUEUE(wait, current);
634 add_wait_queue(&host->wq, &wait);
635 spin_lock_irqsave(&host->lock, flags);
637 set_current_state(TASK_UNINTERRUPTIBLE);
638 stop = abort ? atomic_read(abort) : 0;
639 if (stop || !host->claimed || host->claimer == current)
641 spin_unlock_irqrestore(&host->lock, flags);
643 spin_lock_irqsave(&host->lock, flags);
645 set_current_state(TASK_RUNNING);
648 host->claimer = current;
649 host->claim_cnt += 1;
652 spin_unlock_irqrestore(&host->lock, flags);
653 remove_wait_queue(&host->wq, &wait);
654 if (host->ops->enable && !stop && host->claim_cnt == 1)
655 host->ops->enable(host);
659 EXPORT_SYMBOL(__mmc_claim_host);
662 * mmc_try_claim_host - try exclusively to claim a host
663 * @host: mmc host to claim
665 * Returns %1 if the host is claimed, %0 otherwise.
667 int mmc_try_claim_host(struct mmc_host *host)
669 int claimed_host = 0;
672 spin_lock_irqsave(&host->lock, flags);
673 if (!host->claimed || host->claimer == current) {
675 host->claimer = current;
676 host->claim_cnt += 1;
679 spin_unlock_irqrestore(&host->lock, flags);
680 if (host->ops->enable && claimed_host && host->claim_cnt == 1)
681 host->ops->enable(host);
684 EXPORT_SYMBOL(mmc_try_claim_host);
687 * mmc_release_host - release a host
688 * @host: mmc host to release
690 * Release a MMC host, allowing others to claim the host
691 * for their operations.
693 void mmc_release_host(struct mmc_host *host)
697 WARN_ON(!host->claimed);
699 if (host->ops->disable && host->claim_cnt == 1)
700 host->ops->disable(host);
702 spin_lock_irqsave(&host->lock, flags);
703 if (--host->claim_cnt) {
704 /* Release for nested claim */
705 spin_unlock_irqrestore(&host->lock, flags);
708 host->claimer = NULL;
709 spin_unlock_irqrestore(&host->lock, flags);
713 EXPORT_SYMBOL(mmc_release_host);
716 * Internal function that does the actual ios call to the host driver,
717 * optionally printing some debug output.
719 static inline void mmc_set_ios(struct mmc_host *host)
721 struct mmc_ios *ios = &host->ios;
723 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
724 "width %u timing %u\n",
725 mmc_hostname(host), ios->clock, ios->bus_mode,
726 ios->power_mode, ios->chip_select, ios->vdd,
727 ios->bus_width, ios->timing);
730 mmc_set_ungated(host);
731 host->ops->set_ios(host, ios);
735 * Control chip select pin on a host.
737 void mmc_set_chip_select(struct mmc_host *host, int mode)
739 mmc_host_clk_hold(host);
740 host->ios.chip_select = mode;
742 mmc_host_clk_release(host);
746 * Sets the host clock to the highest possible frequency that
749 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
751 WARN_ON(hz < host->f_min);
753 if (hz > host->f_max)
756 host->ios.clock = hz;
760 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
762 mmc_host_clk_hold(host);
763 __mmc_set_clock(host, hz);
764 mmc_host_clk_release(host);
767 #ifdef CONFIG_MMC_CLKGATE
769 * This gates the clock by setting it to 0 Hz.
771 void mmc_gate_clock(struct mmc_host *host)
775 spin_lock_irqsave(&host->clk_lock, flags);
776 host->clk_old = host->ios.clock;
778 host->clk_gated = true;
779 spin_unlock_irqrestore(&host->clk_lock, flags);
784 * This restores the clock from gating by using the cached
787 void mmc_ungate_clock(struct mmc_host *host)
790 * We should previously have gated the clock, so the clock shall
791 * be 0 here! The clock may however be 0 during initialization,
792 * when some request operations are performed before setting
793 * the frequency. When ungate is requested in that situation
794 * we just ignore the call.
797 BUG_ON(host->ios.clock);
798 /* This call will also set host->clk_gated to false */
799 __mmc_set_clock(host, host->clk_old);
803 void mmc_set_ungated(struct mmc_host *host)
808 * We've been given a new frequency while the clock is gated,
809 * so make sure we regard this as ungating it.
811 spin_lock_irqsave(&host->clk_lock, flags);
812 host->clk_gated = false;
813 spin_unlock_irqrestore(&host->clk_lock, flags);
817 void mmc_set_ungated(struct mmc_host *host)
823 * Change the bus mode (open drain/push-pull) of a host.
825 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
827 mmc_host_clk_hold(host);
828 host->ios.bus_mode = mode;
830 mmc_host_clk_release(host);
834 * Change data bus width of a host.
836 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
838 mmc_host_clk_hold(host);
839 host->ios.bus_width = width;
841 mmc_host_clk_release(host);
845 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
847 * @low_bits: prefer low bits in boundary cases
849 * This function returns the OCR bit number according to the provided @vdd
850 * value. If conversion is not possible a negative errno value returned.
852 * Depending on the @low_bits flag the function prefers low or high OCR bits
853 * on boundary voltages. For example,
854 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
855 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
857 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
859 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
861 const int max_bit = ilog2(MMC_VDD_35_36);
864 if (vdd < 1650 || vdd > 3600)
867 if (vdd >= 1650 && vdd <= 1950)
868 return ilog2(MMC_VDD_165_195);
873 /* Base 2000 mV, step 100 mV, bit's base 8. */
874 bit = (vdd - 2000) / 100 + 8;
881 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
882 * @vdd_min: minimum voltage value (mV)
883 * @vdd_max: maximum voltage value (mV)
885 * This function returns the OCR mask bits according to the provided @vdd_min
886 * and @vdd_max values. If conversion is not possible the function returns 0.
888 * Notes wrt boundary cases:
889 * This function sets the OCR bits for all boundary voltages, for example
890 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
891 * MMC_VDD_34_35 mask.
893 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
897 if (vdd_max < vdd_min)
900 /* Prefer high bits for the boundary vdd_max values. */
901 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
905 /* Prefer low bits for the boundary vdd_min values. */
906 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
910 /* Fill the mask, from max bit to min bit. */
911 while (vdd_max >= vdd_min)
912 mask |= 1 << vdd_max--;
916 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
918 #ifdef CONFIG_REGULATOR
921 * mmc_regulator_get_ocrmask - return mask of supported voltages
922 * @supply: regulator to use
924 * This returns either a negative errno, or a mask of voltages that
925 * can be provided to MMC/SD/SDIO devices using the specified voltage
926 * regulator. This would normally be called before registering the
929 int mmc_regulator_get_ocrmask(struct regulator *supply)
935 count = regulator_count_voltages(supply);
939 for (i = 0; i < count; i++) {
943 vdd_uV = regulator_list_voltage(supply, i);
947 vdd_mV = vdd_uV / 1000;
948 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
953 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
956 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
957 * @mmc: the host to regulate
958 * @supply: regulator to use
959 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
961 * Returns zero on success, else negative errno.
963 * MMC host drivers may use this to enable or disable a regulator using
964 * a particular supply voltage. This would normally be called from the
967 int mmc_regulator_set_ocr(struct mmc_host *mmc,
968 struct regulator *supply,
969 unsigned short vdd_bit)
978 /* REVISIT mmc_vddrange_to_ocrmask() may have set some
979 * bits this regulator doesn't quite support ... don't
980 * be too picky, most cards and regulators are OK with
981 * a 0.1V range goof (it's a small error percentage).
983 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
985 min_uV = 1650 * 1000;
986 max_uV = 1950 * 1000;
988 min_uV = 1900 * 1000 + tmp * 100 * 1000;
989 max_uV = min_uV + 100 * 1000;
992 /* avoid needless changes to this voltage; the regulator
993 * might not allow this operation
995 voltage = regulator_get_voltage(supply);
997 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
998 min_uV = max_uV = voltage;
1002 else if (voltage < min_uV || voltage > max_uV)
1003 result = regulator_set_voltage(supply, min_uV, max_uV);
1007 if (result == 0 && !mmc->regulator_enabled) {
1008 result = regulator_enable(supply);
1010 mmc->regulator_enabled = true;
1012 } else if (mmc->regulator_enabled) {
1013 result = regulator_disable(supply);
1015 mmc->regulator_enabled = false;
1019 dev_err(mmc_dev(mmc),
1020 "could not set regulator OCR (%d)\n", result);
1023 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1025 #endif /* CONFIG_REGULATOR */
1028 * Mask off any voltages we don't support and select
1029 * the lowest voltage
1031 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1035 ocr &= host->ocr_avail;
1043 mmc_host_clk_hold(host);
1044 host->ios.vdd = bit;
1046 mmc_host_clk_release(host);
1048 pr_warning("%s: host doesn't support card's voltages\n",
1049 mmc_hostname(host));
1056 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1058 struct mmc_command cmd = {0};
1064 * Send CMD11 only if the request is to switch the card to
1067 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1068 cmd.opcode = SD_SWITCH_VOLTAGE;
1070 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1072 err = mmc_wait_for_cmd(host, &cmd, 0);
1076 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1080 host->ios.signal_voltage = signal_voltage;
1082 if (host->ops->start_signal_voltage_switch) {
1083 mmc_host_clk_hold(host);
1084 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1085 mmc_host_clk_release(host);
1092 * Select timing parameters for host.
1094 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1096 mmc_host_clk_hold(host);
1097 host->ios.timing = timing;
1099 mmc_host_clk_release(host);
1103 * Select appropriate driver type for host.
1105 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1107 mmc_host_clk_hold(host);
1108 host->ios.drv_type = drv_type;
1110 mmc_host_clk_release(host);
1113 static void mmc_poweroff_notify(struct mmc_host *host)
1115 struct mmc_card *card;
1116 unsigned int timeout;
1117 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1121 mmc_claim_host(host);
1124 * Send power notify command only if card
1125 * is mmc and notify state is powered ON
1127 if (card && mmc_card_mmc(card) &&
1128 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1130 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1131 notify_type = EXT_CSD_POWER_OFF_SHORT;
1132 timeout = card->ext_csd.generic_cmd6_time;
1133 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1135 notify_type = EXT_CSD_POWER_OFF_LONG;
1136 timeout = card->ext_csd.power_off_longtime;
1137 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1140 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1141 EXT_CSD_POWER_OFF_NOTIFICATION,
1142 notify_type, timeout);
1144 if (err && err != -EBADMSG)
1145 pr_err("Device failed to respond within %d poweroff "
1146 "time. Forcefully powering down the device\n",
1149 /* Set the card state to no notification after the poweroff */
1150 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1152 mmc_release_host(host);
1156 * Apply power to the MMC stack. This is a two-stage process.
1157 * First, we enable power to the card without the clock running.
1158 * We then wait a bit for the power to stabilise. Finally,
1159 * enable the bus drivers and clock to the card.
1161 * We must _NOT_ enable the clock prior to power stablising.
1163 * If a host does all the power sequencing itself, ignore the
1164 * initial MMC_POWER_UP stage.
1166 static void mmc_power_up(struct mmc_host *host)
1170 if (host->ios.power_mode == MMC_POWER_ON)
1173 mmc_host_clk_hold(host);
1175 /* If ocr is set, we use it */
1177 bit = ffs(host->ocr) - 1;
1179 bit = fls(host->ocr_avail) - 1;
1181 host->ios.vdd = bit;
1182 if (mmc_host_is_spi(host))
1183 host->ios.chip_select = MMC_CS_HIGH;
1185 host->ios.chip_select = MMC_CS_DONTCARE;
1186 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1187 host->ios.power_mode = MMC_POWER_UP;
1188 host->ios.bus_width = MMC_BUS_WIDTH_1;
1189 host->ios.timing = MMC_TIMING_LEGACY;
1193 * This delay should be sufficient to allow the power supply
1194 * to reach the minimum voltage.
1198 host->ios.clock = host->f_init;
1200 host->ios.power_mode = MMC_POWER_ON;
1204 * This delay must be at least 74 clock sizes, or 1 ms, or the
1205 * time required to reach a stable voltage.
1209 mmc_host_clk_release(host);
1212 void mmc_power_off(struct mmc_host *host)
1216 if (host->ios.power_mode == MMC_POWER_OFF)
1219 mmc_host_clk_hold(host);
1221 host->ios.clock = 0;
1225 * For eMMC 4.5 device send AWAKE command before
1226 * POWER_OFF_NOTIFY command, because in sleep state
1227 * eMMC 4.5 devices respond to only RESET and AWAKE cmd
1229 if (host->card && mmc_card_is_sleep(host->card) &&
1230 host->bus_ops->resume) {
1231 err = host->bus_ops->resume(host);
1234 mmc_poweroff_notify(host);
1236 pr_warning("%s: error %d during resume "
1237 "(continue with poweroff sequence)\n",
1238 mmc_hostname(host), err);
1242 * Reset ocr mask to be the highest possible voltage supported for
1243 * this mmc host. This value will be used at next power up.
1245 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1247 if (!mmc_host_is_spi(host)) {
1248 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1249 host->ios.chip_select = MMC_CS_DONTCARE;
1251 host->ios.power_mode = MMC_POWER_OFF;
1252 host->ios.bus_width = MMC_BUS_WIDTH_1;
1253 host->ios.timing = MMC_TIMING_LEGACY;
1257 * Some configurations, such as the 802.11 SDIO card in the OLPC
1258 * XO-1.5, require a short delay after poweroff before the card
1259 * can be successfully turned on again.
1263 mmc_host_clk_release(host);
1267 * Cleanup when the last reference to the bus operator is dropped.
1269 static void __mmc_release_bus(struct mmc_host *host)
1272 BUG_ON(host->bus_refs);
1273 BUG_ON(!host->bus_dead);
1275 host->bus_ops = NULL;
1279 * Increase reference count of bus operator
1281 static inline void mmc_bus_get(struct mmc_host *host)
1283 unsigned long flags;
1285 spin_lock_irqsave(&host->lock, flags);
1287 spin_unlock_irqrestore(&host->lock, flags);
1291 * Decrease reference count of bus operator and free it if
1292 * it is the last reference.
1294 static inline void mmc_bus_put(struct mmc_host *host)
1296 unsigned long flags;
1298 spin_lock_irqsave(&host->lock, flags);
1300 if ((host->bus_refs == 0) && host->bus_ops)
1301 __mmc_release_bus(host);
1302 spin_unlock_irqrestore(&host->lock, flags);
1306 * Assign a mmc bus handler to a host. Only one bus handler may control a
1307 * host at any given time.
1309 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1311 unsigned long flags;
1316 WARN_ON(!host->claimed);
1318 spin_lock_irqsave(&host->lock, flags);
1320 BUG_ON(host->bus_ops);
1321 BUG_ON(host->bus_refs);
1323 host->bus_ops = ops;
1327 spin_unlock_irqrestore(&host->lock, flags);
1331 * Remove the current bus handler from a host.
1333 void mmc_detach_bus(struct mmc_host *host)
1335 unsigned long flags;
1339 WARN_ON(!host->claimed);
1340 WARN_ON(!host->bus_ops);
1342 spin_lock_irqsave(&host->lock, flags);
1346 spin_unlock_irqrestore(&host->lock, flags);
1352 * mmc_detect_change - process change of state on a MMC socket
1353 * @host: host which changed state.
1354 * @delay: optional delay to wait before detection (jiffies)
1356 * MMC drivers should call this when they detect a card has been
1357 * inserted or removed. The MMC layer will confirm that any
1358 * present card is still functional, and initialize any newly
1361 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1363 #ifdef CONFIG_MMC_DEBUG
1364 unsigned long flags;
1365 spin_lock_irqsave(&host->lock, flags);
1366 WARN_ON(host->removed);
1367 spin_unlock_irqrestore(&host->lock, flags);
1369 host->detect_change = 1;
1370 mmc_schedule_delayed_work(&host->detect, delay);
1373 EXPORT_SYMBOL(mmc_detect_change);
1375 void mmc_init_erase(struct mmc_card *card)
1379 if (is_power_of_2(card->erase_size))
1380 card->erase_shift = ffs(card->erase_size) - 1;
1382 card->erase_shift = 0;
1385 * It is possible to erase an arbitrarily large area of an SD or MMC
1386 * card. That is not desirable because it can take a long time
1387 * (minutes) potentially delaying more important I/O, and also the
1388 * timeout calculations become increasingly hugely over-estimated.
1389 * Consequently, 'pref_erase' is defined as a guide to limit erases
1390 * to that size and alignment.
1392 * For SD cards that define Allocation Unit size, limit erases to one
1393 * Allocation Unit at a time. For MMC cards that define High Capacity
1394 * Erase Size, whether it is switched on or not, limit to that size.
1395 * Otherwise just have a stab at a good value. For modern cards it
1396 * will end up being 4MiB. Note that if the value is too small, it
1397 * can end up taking longer to erase.
1399 if (mmc_card_sd(card) && card->ssr.au) {
1400 card->pref_erase = card->ssr.au;
1401 card->erase_shift = ffs(card->ssr.au) - 1;
1402 } else if (card->ext_csd.hc_erase_size) {
1403 card->pref_erase = card->ext_csd.hc_erase_size;
1405 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1407 card->pref_erase = 512 * 1024 / 512;
1409 card->pref_erase = 1024 * 1024 / 512;
1411 card->pref_erase = 2 * 1024 * 1024 / 512;
1413 card->pref_erase = 4 * 1024 * 1024 / 512;
1414 if (card->pref_erase < card->erase_size)
1415 card->pref_erase = card->erase_size;
1417 sz = card->pref_erase % card->erase_size;
1419 card->pref_erase += card->erase_size - sz;
1424 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1425 unsigned int arg, unsigned int qty)
1427 unsigned int erase_timeout;
1429 if (arg == MMC_DISCARD_ARG ||
1430 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1431 erase_timeout = card->ext_csd.trim_timeout;
1432 } else if (card->ext_csd.erase_group_def & 1) {
1433 /* High Capacity Erase Group Size uses HC timeouts */
1434 if (arg == MMC_TRIM_ARG)
1435 erase_timeout = card->ext_csd.trim_timeout;
1437 erase_timeout = card->ext_csd.hc_erase_timeout;
1439 /* CSD Erase Group Size uses write timeout */
1440 unsigned int mult = (10 << card->csd.r2w_factor);
1441 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1442 unsigned int timeout_us;
1444 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1445 if (card->csd.tacc_ns < 1000000)
1446 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1448 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1451 * ios.clock is only a target. The real clock rate might be
1452 * less but not that much less, so fudge it by multiplying by 2.
1455 timeout_us += (timeout_clks * 1000) /
1456 (mmc_host_clk_rate(card->host) / 1000);
1458 erase_timeout = timeout_us / 1000;
1461 * Theoretically, the calculation could underflow so round up
1462 * to 1ms in that case.
1468 /* Multiplier for secure operations */
1469 if (arg & MMC_SECURE_ARGS) {
1470 if (arg == MMC_SECURE_ERASE_ARG)
1471 erase_timeout *= card->ext_csd.sec_erase_mult;
1473 erase_timeout *= card->ext_csd.sec_trim_mult;
1476 erase_timeout *= qty;
1479 * Ensure at least a 1 second timeout for SPI as per
1480 * 'mmc_set_data_timeout()'
1482 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1483 erase_timeout = 1000;
1485 return erase_timeout;
1488 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1492 unsigned int erase_timeout;
1494 if (card->ssr.erase_timeout) {
1495 /* Erase timeout specified in SD Status Register (SSR) */
1496 erase_timeout = card->ssr.erase_timeout * qty +
1497 card->ssr.erase_offset;
1500 * Erase timeout not specified in SD Status Register (SSR) so
1501 * use 250ms per write block.
1503 erase_timeout = 250 * qty;
1506 /* Must not be less than 1 second */
1507 if (erase_timeout < 1000)
1508 erase_timeout = 1000;
1510 return erase_timeout;
1513 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1517 if (mmc_card_sd(card))
1518 return mmc_sd_erase_timeout(card, arg, qty);
1520 return mmc_mmc_erase_timeout(card, arg, qty);
1523 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1524 unsigned int to, unsigned int arg)
1526 struct mmc_command cmd = {0};
1527 unsigned int qty = 0;
1531 * qty is used to calculate the erase timeout which depends on how many
1532 * erase groups (or allocation units in SD terminology) are affected.
1533 * We count erasing part of an erase group as one erase group.
1534 * For SD, the allocation units are always a power of 2. For MMC, the
1535 * erase group size is almost certainly also power of 2, but it does not
1536 * seem to insist on that in the JEDEC standard, so we fall back to
1537 * division in that case. SD may not specify an allocation unit size,
1538 * in which case the timeout is based on the number of write blocks.
1540 * Note that the timeout for secure trim 2 will only be correct if the
1541 * number of erase groups specified is the same as the total of all
1542 * preceding secure trim 1 commands. Since the power may have been
1543 * lost since the secure trim 1 commands occurred, it is generally
1544 * impossible to calculate the secure trim 2 timeout correctly.
1546 if (card->erase_shift)
1547 qty += ((to >> card->erase_shift) -
1548 (from >> card->erase_shift)) + 1;
1549 else if (mmc_card_sd(card))
1550 qty += to - from + 1;
1552 qty += ((to / card->erase_size) -
1553 (from / card->erase_size)) + 1;
1555 if (!mmc_card_blockaddr(card)) {
1560 if (mmc_card_sd(card))
1561 cmd.opcode = SD_ERASE_WR_BLK_START;
1563 cmd.opcode = MMC_ERASE_GROUP_START;
1565 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1566 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1568 pr_err("mmc_erase: group start error %d, "
1569 "status %#x\n", err, cmd.resp[0]);
1574 memset(&cmd, 0, sizeof(struct mmc_command));
1575 if (mmc_card_sd(card))
1576 cmd.opcode = SD_ERASE_WR_BLK_END;
1578 cmd.opcode = MMC_ERASE_GROUP_END;
1580 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1581 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1583 pr_err("mmc_erase: group end error %d, status %#x\n",
1589 memset(&cmd, 0, sizeof(struct mmc_command));
1590 cmd.opcode = MMC_ERASE;
1592 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1593 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1594 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1596 pr_err("mmc_erase: erase error %d, status %#x\n",
1602 if (mmc_host_is_spi(card->host))
1606 memset(&cmd, 0, sizeof(struct mmc_command));
1607 cmd.opcode = MMC_SEND_STATUS;
1608 cmd.arg = card->rca << 16;
1609 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1610 /* Do not retry else we can't see errors */
1611 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1612 if (err || (cmd.resp[0] & 0xFDF92000)) {
1613 pr_err("error %d requesting status %#x\n",
1618 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1619 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1625 * mmc_erase - erase sectors.
1626 * @card: card to erase
1627 * @from: first sector to erase
1628 * @nr: number of sectors to erase
1629 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1631 * Caller must claim host before calling this function.
1633 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1636 unsigned int rem, to = from + nr;
1638 if (!(card->host->caps & MMC_CAP_ERASE) ||
1639 !(card->csd.cmdclass & CCC_ERASE))
1642 if (!card->erase_size)
1645 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1648 if ((arg & MMC_SECURE_ARGS) &&
1649 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1652 if ((arg & MMC_TRIM_ARGS) &&
1653 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1656 if (arg == MMC_SECURE_ERASE_ARG) {
1657 if (from % card->erase_size || nr % card->erase_size)
1661 if (arg == MMC_ERASE_ARG) {
1662 rem = from % card->erase_size;
1664 rem = card->erase_size - rem;
1671 rem = nr % card->erase_size;
1684 /* 'from' and 'to' are inclusive */
1687 return mmc_do_erase(card, from, to, arg);
1689 EXPORT_SYMBOL(mmc_erase);
1691 int mmc_can_erase(struct mmc_card *card)
1693 if ((card->host->caps & MMC_CAP_ERASE) &&
1694 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1698 EXPORT_SYMBOL(mmc_can_erase);
1700 int mmc_can_trim(struct mmc_card *card)
1702 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1706 EXPORT_SYMBOL(mmc_can_trim);
1708 int mmc_can_discard(struct mmc_card *card)
1711 * As there's no way to detect the discard support bit at v4.5
1712 * use the s/w feature support filed.
1714 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1718 EXPORT_SYMBOL(mmc_can_discard);
1720 int mmc_can_sanitize(struct mmc_card *card)
1722 if (!mmc_can_trim(card) && !mmc_can_erase(card))
1724 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1728 EXPORT_SYMBOL(mmc_can_sanitize);
1730 int mmc_can_secure_erase_trim(struct mmc_card *card)
1732 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1736 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1738 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1741 if (!card->erase_size)
1743 if (from % card->erase_size || nr % card->erase_size)
1747 EXPORT_SYMBOL(mmc_erase_group_aligned);
1749 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1752 struct mmc_host *host = card->host;
1753 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1754 unsigned int last_timeout = 0;
1756 if (card->erase_shift)
1757 max_qty = UINT_MAX >> card->erase_shift;
1758 else if (mmc_card_sd(card))
1761 max_qty = UINT_MAX / card->erase_size;
1763 /* Find the largest qty with an OK timeout */
1766 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1767 timeout = mmc_erase_timeout(card, arg, qty + x);
1768 if (timeout > host->max_discard_to)
1770 if (timeout < last_timeout)
1772 last_timeout = timeout;
1784 /* Convert qty to sectors */
1785 if (card->erase_shift)
1786 max_discard = --qty << card->erase_shift;
1787 else if (mmc_card_sd(card))
1790 max_discard = --qty * card->erase_size;
1795 unsigned int mmc_calc_max_discard(struct mmc_card *card)
1797 struct mmc_host *host = card->host;
1798 unsigned int max_discard, max_trim;
1800 if (!host->max_discard_to)
1804 * Without erase_group_def set, MMC erase timeout depends on clock
1805 * frequence which can change. In that case, the best choice is
1806 * just the preferred erase size.
1808 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1809 return card->pref_erase;
1811 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1812 if (mmc_can_trim(card)) {
1813 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1814 if (max_trim < max_discard)
1815 max_discard = max_trim;
1816 } else if (max_discard < card->erase_size) {
1819 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1820 mmc_hostname(host), max_discard, host->max_discard_to);
1823 EXPORT_SYMBOL(mmc_calc_max_discard);
1825 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1827 struct mmc_command cmd = {0};
1829 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1832 cmd.opcode = MMC_SET_BLOCKLEN;
1834 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1835 return mmc_wait_for_cmd(card->host, &cmd, 5);
1837 EXPORT_SYMBOL(mmc_set_blocklen);
1839 static void mmc_hw_reset_for_init(struct mmc_host *host)
1841 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1843 mmc_host_clk_hold(host);
1844 host->ops->hw_reset(host);
1845 mmc_host_clk_release(host);
1848 int mmc_can_reset(struct mmc_card *card)
1852 if (!mmc_card_mmc(card))
1854 rst_n_function = card->ext_csd.rst_n_function;
1855 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
1859 EXPORT_SYMBOL(mmc_can_reset);
1861 static int mmc_do_hw_reset(struct mmc_host *host, int check)
1863 struct mmc_card *card = host->card;
1865 if (!host->bus_ops->power_restore)
1868 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1874 if (!mmc_can_reset(card))
1877 mmc_host_clk_hold(host);
1878 mmc_set_clock(host, host->f_init);
1880 host->ops->hw_reset(host);
1882 /* If the reset has happened, then a status command will fail */
1884 struct mmc_command cmd = {0};
1887 cmd.opcode = MMC_SEND_STATUS;
1888 if (!mmc_host_is_spi(card->host))
1889 cmd.arg = card->rca << 16;
1890 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1891 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1893 mmc_host_clk_release(host);
1898 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
1899 if (mmc_host_is_spi(host)) {
1900 host->ios.chip_select = MMC_CS_HIGH;
1901 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1903 host->ios.chip_select = MMC_CS_DONTCARE;
1904 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1906 host->ios.bus_width = MMC_BUS_WIDTH_1;
1907 host->ios.timing = MMC_TIMING_LEGACY;
1910 mmc_host_clk_release(host);
1912 return host->bus_ops->power_restore(host);
1915 int mmc_hw_reset(struct mmc_host *host)
1917 return mmc_do_hw_reset(host, 0);
1919 EXPORT_SYMBOL(mmc_hw_reset);
1921 int mmc_hw_reset_check(struct mmc_host *host)
1923 return mmc_do_hw_reset(host, 1);
1925 EXPORT_SYMBOL(mmc_hw_reset_check);
1927 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1929 host->f_init = freq;
1931 #ifdef CONFIG_MMC_DEBUG
1932 pr_info("%s: %s: trying to init card at %u Hz\n",
1933 mmc_hostname(host), __func__, host->f_init);
1938 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
1939 * do a hardware reset if possible.
1941 mmc_hw_reset_for_init(host);
1943 /* Initialization should be done at 3.3 V I/O voltage. */
1944 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
1947 * sdio_reset sends CMD52 to reset card. Since we do not know
1948 * if the card is being re-initialized, just send it. CMD52
1949 * should be ignored by SD/eMMC cards.
1954 mmc_send_if_cond(host, host->ocr_avail);
1956 /* Order's important: probe SDIO, then SD, then MMC */
1957 if (!mmc_attach_sdio(host))
1959 if (!mmc_attach_sd(host))
1961 if (!mmc_attach_mmc(host))
1964 mmc_power_off(host);
1968 int _mmc_detect_card_removed(struct mmc_host *host)
1972 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
1975 if (!host->card || mmc_card_removed(host->card))
1978 ret = host->bus_ops->alive(host);
1980 mmc_card_set_removed(host->card);
1981 pr_debug("%s: card remove detected\n", mmc_hostname(host));
1987 int mmc_detect_card_removed(struct mmc_host *host)
1989 struct mmc_card *card = host->card;
1992 WARN_ON(!host->claimed);
1997 ret = mmc_card_removed(card);
1999 * The card will be considered unchanged unless we have been asked to
2000 * detect a change or host requires polling to provide card detection.
2002 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
2003 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
2006 host->detect_change = 0;
2008 ret = _mmc_detect_card_removed(host);
2009 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
2011 * Schedule a detect work as soon as possible to let a
2012 * rescan handle the card removal.
2014 cancel_delayed_work(&host->detect);
2015 mmc_detect_change(host, 0);
2021 EXPORT_SYMBOL(mmc_detect_card_removed);
2023 void mmc_rescan(struct work_struct *work)
2025 struct mmc_host *host =
2026 container_of(work, struct mmc_host, detect.work);
2029 if (host->rescan_disable)
2035 * if there is a _removable_ card registered, check whether it is
2038 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
2039 && !(host->caps & MMC_CAP_NONREMOVABLE))
2040 host->bus_ops->detect(host);
2042 host->detect_change = 0;
2045 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2046 * the card is no longer present.
2051 /* if there still is a card present, stop here */
2052 if (host->bus_ops != NULL) {
2058 * Only we can add a new handler, so it's safe to
2059 * release the lock here.
2063 if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
2064 mmc_claim_host(host);
2065 mmc_power_off(host);
2066 mmc_release_host(host);
2070 mmc_claim_host(host);
2071 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2072 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2074 if (freqs[i] <= host->f_min)
2077 mmc_release_host(host);
2080 if (host->caps & MMC_CAP_NEEDS_POLL)
2081 mmc_schedule_delayed_work(&host->detect, HZ);
2084 void mmc_start_host(struct mmc_host *host)
2086 host->f_init = max(freqs[0], host->f_min);
2088 mmc_detect_change(host, 0);
2091 void mmc_stop_host(struct mmc_host *host)
2093 #ifdef CONFIG_MMC_DEBUG
2094 unsigned long flags;
2095 spin_lock_irqsave(&host->lock, flags);
2097 spin_unlock_irqrestore(&host->lock, flags);
2100 cancel_delayed_work_sync(&host->detect);
2101 mmc_flush_scheduled_work();
2103 /* clear pm flags now and let card drivers set them as needed */
2107 if (host->bus_ops && !host->bus_dead) {
2108 /* Calling bus_ops->remove() with a claimed host can deadlock */
2109 if (host->bus_ops->remove)
2110 host->bus_ops->remove(host);
2112 mmc_claim_host(host);
2113 mmc_detach_bus(host);
2114 mmc_power_off(host);
2115 mmc_release_host(host);
2123 mmc_power_off(host);
2126 int mmc_power_save_host(struct mmc_host *host)
2130 #ifdef CONFIG_MMC_DEBUG
2131 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2136 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2141 if (host->bus_ops->power_save)
2142 ret = host->bus_ops->power_save(host);
2146 mmc_power_off(host);
2150 EXPORT_SYMBOL(mmc_power_save_host);
2152 int mmc_power_restore_host(struct mmc_host *host)
2156 #ifdef CONFIG_MMC_DEBUG
2157 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2162 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2168 ret = host->bus_ops->power_restore(host);
2174 EXPORT_SYMBOL(mmc_power_restore_host);
2176 int mmc_card_awake(struct mmc_host *host)
2180 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2185 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2186 err = host->bus_ops->awake(host);
2192 EXPORT_SYMBOL(mmc_card_awake);
2194 int mmc_card_sleep(struct mmc_host *host)
2198 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2203 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2204 err = host->bus_ops->sleep(host);
2210 EXPORT_SYMBOL(mmc_card_sleep);
2212 int mmc_card_can_sleep(struct mmc_host *host)
2214 struct mmc_card *card = host->card;
2216 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2220 EXPORT_SYMBOL(mmc_card_can_sleep);
2223 * Flush the cache to the non-volatile storage.
2225 int mmc_flush_cache(struct mmc_card *card)
2227 struct mmc_host *host = card->host;
2230 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2233 if (mmc_card_mmc(card) &&
2234 (card->ext_csd.cache_size > 0) &&
2235 (card->ext_csd.cache_ctrl & 1)) {
2236 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2237 EXT_CSD_FLUSH_CACHE, 1, 0);
2239 pr_err("%s: cache flush error %d\n",
2240 mmc_hostname(card->host), err);
2245 EXPORT_SYMBOL(mmc_flush_cache);
2248 * Turn the cache ON/OFF.
2249 * Turning the cache OFF shall trigger flushing of the data
2250 * to the non-volatile storage.
2252 int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2254 struct mmc_card *card = host->card;
2255 unsigned int timeout;
2258 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2259 mmc_card_is_removable(host))
2262 mmc_claim_host(host);
2263 if (card && mmc_card_mmc(card) &&
2264 (card->ext_csd.cache_size > 0)) {
2267 if (card->ext_csd.cache_ctrl ^ enable) {
2268 timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
2269 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2270 EXT_CSD_CACHE_CTRL, enable, timeout);
2272 pr_err("%s: cache %s error %d\n",
2273 mmc_hostname(card->host),
2274 enable ? "on" : "off",
2277 card->ext_csd.cache_ctrl = enable;
2280 mmc_release_host(host);
2284 EXPORT_SYMBOL(mmc_cache_ctrl);
2289 * mmc_suspend_host - suspend a host
2292 int mmc_suspend_host(struct mmc_host *host)
2296 cancel_delayed_work(&host->detect);
2297 mmc_flush_scheduled_work();
2299 err = mmc_cache_ctrl(host, 0);
2304 if (host->bus_ops && !host->bus_dead) {
2306 if (host->bus_ops->suspend)
2307 err = host->bus_ops->suspend(host);
2309 if (err == -ENOSYS || !host->bus_ops->resume) {
2311 * We simply "remove" the card in this case.
2312 * It will be redetected on resume. (Calling
2313 * bus_ops->remove() with a claimed host can
2316 if (host->bus_ops->remove)
2317 host->bus_ops->remove(host);
2318 mmc_claim_host(host);
2319 mmc_detach_bus(host);
2320 mmc_power_off(host);
2321 mmc_release_host(host);
2328 if (!err && !mmc_card_keep_power(host))
2329 mmc_power_off(host);
2335 EXPORT_SYMBOL(mmc_suspend_host);
2338 * mmc_resume_host - resume a previously suspended host
2341 int mmc_resume_host(struct mmc_host *host)
2346 if (host->bus_ops && !host->bus_dead) {
2347 if (!mmc_card_keep_power(host)) {
2349 mmc_select_voltage(host, host->ocr);
2351 * Tell runtime PM core we just powered up the card,
2352 * since it still believes the card is powered off.
2353 * Note that currently runtime PM is only enabled
2354 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2356 if (mmc_card_sdio(host->card) &&
2357 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2358 pm_runtime_disable(&host->card->dev);
2359 pm_runtime_set_active(&host->card->dev);
2360 pm_runtime_enable(&host->card->dev);
2363 BUG_ON(!host->bus_ops->resume);
2364 err = host->bus_ops->resume(host);
2366 pr_warning("%s: error %d during resume "
2367 "(card was removed?)\n",
2368 mmc_hostname(host), err);
2372 host->pm_flags &= ~MMC_PM_KEEP_POWER;
2377 EXPORT_SYMBOL(mmc_resume_host);
2379 /* Do the card removal on suspend if card is assumed removeable
2380 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2383 int mmc_pm_notify(struct notifier_block *notify_block,
2384 unsigned long mode, void *unused)
2386 struct mmc_host *host = container_of(
2387 notify_block, struct mmc_host, pm_notify);
2388 unsigned long flags;
2392 case PM_HIBERNATION_PREPARE:
2393 case PM_SUSPEND_PREPARE:
2395 spin_lock_irqsave(&host->lock, flags);
2396 host->rescan_disable = 1;
2397 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
2398 spin_unlock_irqrestore(&host->lock, flags);
2399 cancel_delayed_work_sync(&host->detect);
2401 if (!host->bus_ops || host->bus_ops->suspend)
2404 /* Calling bus_ops->remove() with a claimed host can deadlock */
2405 if (host->bus_ops->remove)
2406 host->bus_ops->remove(host);
2408 mmc_claim_host(host);
2409 mmc_detach_bus(host);
2410 mmc_power_off(host);
2411 mmc_release_host(host);
2415 case PM_POST_SUSPEND:
2416 case PM_POST_HIBERNATION:
2417 case PM_POST_RESTORE:
2419 spin_lock_irqsave(&host->lock, flags);
2420 host->rescan_disable = 0;
2421 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
2422 spin_unlock_irqrestore(&host->lock, flags);
2423 mmc_detect_change(host, 0);
2431 static int __init mmc_init(void)
2435 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2439 ret = mmc_register_bus();
2441 goto destroy_workqueue;
2443 ret = mmc_register_host_class();
2445 goto unregister_bus;
2447 ret = sdio_register_bus();
2449 goto unregister_host_class;
2453 unregister_host_class:
2454 mmc_unregister_host_class();
2456 mmc_unregister_bus();
2458 destroy_workqueue(workqueue);
2463 static void __exit mmc_exit(void)
2465 sdio_unregister_bus();
2466 mmc_unregister_host_class();
2467 mmc_unregister_bus();
2468 destroy_workqueue(workqueue);
2471 subsys_initcall(mmc_init);
2472 module_exit(mmc_exit);
2474 MODULE_LICENSE("GPL");