2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37 #include <linux/pm_runtime.h>
38 #include <linux/idr.h>
40 #include <linux/mmc/ioctl.h>
41 #include <linux/mmc/card.h>
42 #include <linux/mmc/host.h>
43 #include <linux/mmc/mmc.h>
44 #include <linux/mmc/sd.h>
46 #include <linux/uaccess.h>
58 MODULE_ALIAS("mmc:block");
59 #ifdef MODULE_PARAM_PREFIX
60 #undef MODULE_PARAM_PREFIX
62 #define MODULE_PARAM_PREFIX "mmcblk."
64 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
65 #define MMC_SANITIZE_REQ_TIMEOUT 240000
66 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
68 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
69 (rq_data_dir(req) == WRITE))
70 static DEFINE_MUTEX(block_mutex);
73 * The defaults come from config options but can be overriden by module
76 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
79 * We've only got one major, so number of mmcblk devices is
80 * limited to (1 << 20) / number of minors per device. It is also
81 * limited by the MAX_DEVICES below.
83 static int max_devices;
85 #define MAX_DEVICES 256
87 static DEFINE_IDA(mmc_blk_ida);
90 * There is one mmc_blk_data per slot.
94 struct device *parent;
96 struct mmc_queue queue;
97 struct list_head part;
100 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
101 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
104 unsigned int read_only;
105 unsigned int part_type;
106 unsigned int reset_done;
107 #define MMC_BLK_READ BIT(0)
108 #define MMC_BLK_WRITE BIT(1)
109 #define MMC_BLK_DISCARD BIT(2)
110 #define MMC_BLK_SECDISCARD BIT(3)
113 * Only set in main mmc_blk_data associated
114 * with mmc_card with dev_set_drvdata, and keeps
115 * track of the current selected device partition.
117 unsigned int part_curr;
118 struct device_attribute force_ro;
119 struct device_attribute power_ro_lock;
123 static DEFINE_MUTEX(open_lock);
125 module_param(perdev_minors, int, 0444);
126 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
128 static inline int mmc_blk_part_switch(struct mmc_card *card,
129 struct mmc_blk_data *md);
131 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
133 struct mmc_blk_data *md;
135 mutex_lock(&open_lock);
136 md = disk->private_data;
137 if (md && md->usage == 0)
141 mutex_unlock(&open_lock);
146 static inline int mmc_get_devidx(struct gendisk *disk)
148 int devidx = disk->first_minor / perdev_minors;
152 static void mmc_blk_put(struct mmc_blk_data *md)
154 mutex_lock(&open_lock);
156 if (md->usage == 0) {
157 int devidx = mmc_get_devidx(md->disk);
158 blk_cleanup_queue(md->queue.queue);
159 ida_simple_remove(&mmc_blk_ida, devidx);
163 mutex_unlock(&open_lock);
166 static ssize_t power_ro_lock_show(struct device *dev,
167 struct device_attribute *attr, char *buf)
170 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
171 struct mmc_card *card = md->queue.card;
174 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
176 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
179 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
186 static ssize_t power_ro_lock_store(struct device *dev,
187 struct device_attribute *attr, const char *buf, size_t count)
190 struct mmc_blk_data *md, *part_md;
191 struct mmc_card *card;
192 struct mmc_queue *mq;
196 if (kstrtoul(buf, 0, &set))
202 md = mmc_blk_get(dev_to_disk(dev));
204 card = md->queue.card;
206 /* Dispatch locking to the block layer */
207 req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
208 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
209 blk_execute_rq(mq->queue, NULL, req, 0);
210 ret = req_to_mmc_queue_req(req)->drv_op_result;
213 pr_info("%s: Locking boot partition ro until next power on\n",
214 md->disk->disk_name);
215 set_disk_ro(md->disk, 1);
217 list_for_each_entry(part_md, &md->part, part)
218 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
219 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
220 set_disk_ro(part_md->disk, 1);
228 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
232 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
234 ret = snprintf(buf, PAGE_SIZE, "%d\n",
235 get_disk_ro(dev_to_disk(dev)) ^
241 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
242 const char *buf, size_t count)
246 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
247 unsigned long set = simple_strtoul(buf, &end, 0);
253 set_disk_ro(dev_to_disk(dev), set || md->read_only);
260 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
262 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
265 mutex_lock(&block_mutex);
268 check_disk_change(bdev);
271 if ((mode & FMODE_WRITE) && md->read_only) {
276 mutex_unlock(&block_mutex);
281 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
283 struct mmc_blk_data *md = disk->private_data;
285 mutex_lock(&block_mutex);
287 mutex_unlock(&block_mutex);
291 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
293 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
299 struct mmc_blk_ioc_data {
300 struct mmc_ioc_cmd ic;
305 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
306 struct mmc_ioc_cmd __user *user)
308 struct mmc_blk_ioc_data *idata;
311 idata = kmalloc(sizeof(*idata), GFP_KERNEL);
317 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
322 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
323 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
328 if (!idata->buf_bytes) {
333 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
339 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
340 idata->ic.data_ptr, idata->buf_bytes)) {
355 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
356 struct mmc_blk_ioc_data *idata)
358 struct mmc_ioc_cmd *ic = &idata->ic;
360 if (copy_to_user(&(ic_ptr->response), ic->response,
361 sizeof(ic->response)))
364 if (!idata->ic.write_flag) {
365 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
366 idata->buf, idata->buf_bytes))
373 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
379 if (!status || !retries_max)
383 err = __mmc_send_status(card, status, 5);
387 if (!R1_STATUS(*status) &&
388 (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
389 break; /* RPMB programming operation complete */
392 * Rechedule to give the MMC device a chance to continue
393 * processing the previous command without being polled too
396 usleep_range(1000, 5000);
397 } while (++retry_count < retries_max);
399 if (retry_count == retries_max)
405 static int ioctl_do_sanitize(struct mmc_card *card)
409 if (!mmc_can_sanitize(card)) {
410 pr_warn("%s: %s - SANITIZE is not supported\n",
411 mmc_hostname(card->host), __func__);
416 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
417 mmc_hostname(card->host), __func__);
419 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
420 EXT_CSD_SANITIZE_START, 1,
421 MMC_SANITIZE_REQ_TIMEOUT);
424 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
425 mmc_hostname(card->host), __func__, err);
427 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
433 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
434 struct mmc_blk_ioc_data *idata)
436 struct mmc_command cmd = {};
437 struct mmc_data data = {};
438 struct mmc_request mrq = {};
439 struct scatterlist sg;
441 bool is_rpmb = false;
444 if (!card || !md || !idata)
447 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
450 cmd.opcode = idata->ic.opcode;
451 cmd.arg = idata->ic.arg;
452 cmd.flags = idata->ic.flags;
454 if (idata->buf_bytes) {
457 data.blksz = idata->ic.blksz;
458 data.blocks = idata->ic.blocks;
460 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
462 if (idata->ic.write_flag)
463 data.flags = MMC_DATA_WRITE;
465 data.flags = MMC_DATA_READ;
467 /* data.flags must already be set before doing this. */
468 mmc_set_data_timeout(&data, card);
470 /* Allow overriding the timeout_ns for empirical tuning. */
471 if (idata->ic.data_timeout_ns)
472 data.timeout_ns = idata->ic.data_timeout_ns;
474 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
476 * Pretend this is a data transfer and rely on the
477 * host driver to compute timeout. When all host
478 * drivers support cmd.cmd_timeout for R1B, this
482 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
484 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
492 err = mmc_blk_part_switch(card, md);
496 if (idata->ic.is_acmd) {
497 err = mmc_app_cmd(card->host, card);
503 err = mmc_set_blockcount(card, data.blocks,
504 idata->ic.write_flag & (1 << 31));
509 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
510 (cmd.opcode == MMC_SWITCH)) {
511 err = ioctl_do_sanitize(card);
514 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
520 mmc_wait_for_req(card->host, &mrq);
523 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
524 __func__, cmd.error);
528 dev_err(mmc_dev(card->host), "%s: data error %d\n",
529 __func__, data.error);
534 * According to the SD specs, some commands require a delay after
535 * issuing the command.
537 if (idata->ic.postsleep_min_us)
538 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
540 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
544 * Ensure RPMB command has completed by polling CMD13
547 err = ioctl_rpmb_card_status_poll(card, &status, 5);
549 dev_err(mmc_dev(card->host),
550 "%s: Card Status=0x%08X, error %d\n",
551 __func__, status, err);
557 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
558 struct mmc_ioc_cmd __user *ic_ptr)
560 struct mmc_blk_ioc_data *idata;
561 struct mmc_blk_ioc_data *idatas[1];
562 struct mmc_blk_data *md;
563 struct mmc_queue *mq;
564 struct mmc_card *card;
565 int err = 0, ioc_err = 0;
569 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
570 * whole block device, not on a partition. This prevents overspray
571 * between sibling partitions.
573 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
576 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
578 return PTR_ERR(idata);
580 md = mmc_blk_get(bdev->bd_disk);
586 card = md->queue.card;
593 * Dispatch the ioctl() into the block request queue.
596 req = blk_get_request(mq->queue,
597 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
600 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
601 req_to_mmc_queue_req(req)->idata = idatas;
602 req_to_mmc_queue_req(req)->ioc_count = 1;
603 blk_execute_rq(mq->queue, NULL, req, 0);
604 ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
605 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
606 blk_put_request(req);
613 return ioc_err ? ioc_err : err;
616 static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
617 struct mmc_ioc_multi_cmd __user *user)
619 struct mmc_blk_ioc_data **idata = NULL;
620 struct mmc_ioc_cmd __user *cmds = user->cmds;
621 struct mmc_card *card;
622 struct mmc_blk_data *md;
623 struct mmc_queue *mq;
624 int i, err = 0, ioc_err = 0;
629 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
630 * whole block device, not on a partition. This prevents overspray
631 * between sibling partitions.
633 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
636 if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
637 sizeof(num_of_cmds)))
640 if (num_of_cmds > MMC_IOC_MAX_CMDS)
643 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
647 for (i = 0; i < num_of_cmds; i++) {
648 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
649 if (IS_ERR(idata[i])) {
650 err = PTR_ERR(idata[i]);
656 md = mmc_blk_get(bdev->bd_disk);
662 card = md->queue.card;
670 * Dispatch the ioctl()s into the block request queue.
673 req = blk_get_request(mq->queue,
674 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
676 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
677 req_to_mmc_queue_req(req)->idata = idata;
678 req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
679 blk_execute_rq(mq->queue, NULL, req, 0);
680 ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
682 /* copy to user if data and response */
683 for (i = 0; i < num_of_cmds && !err; i++)
684 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
686 blk_put_request(req);
691 for (i = 0; i < num_of_cmds; i++) {
692 kfree(idata[i]->buf);
696 return ioc_err ? ioc_err : err;
699 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
700 unsigned int cmd, unsigned long arg)
704 return mmc_blk_ioctl_cmd(bdev,
705 (struct mmc_ioc_cmd __user *)arg);
706 case MMC_IOC_MULTI_CMD:
707 return mmc_blk_ioctl_multi_cmd(bdev,
708 (struct mmc_ioc_multi_cmd __user *)arg);
715 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
716 unsigned int cmd, unsigned long arg)
718 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
722 static const struct block_device_operations mmc_bdops = {
723 .open = mmc_blk_open,
724 .release = mmc_blk_release,
725 .getgeo = mmc_blk_getgeo,
726 .owner = THIS_MODULE,
727 .ioctl = mmc_blk_ioctl,
729 .compat_ioctl = mmc_blk_compat_ioctl,
733 static int mmc_blk_part_switch_pre(struct mmc_card *card,
734 unsigned int part_type)
738 if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
739 if (card->ext_csd.cmdq_en) {
740 ret = mmc_cmdq_disable(card);
744 mmc_retune_pause(card->host);
750 static int mmc_blk_part_switch_post(struct mmc_card *card,
751 unsigned int part_type)
755 if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
756 mmc_retune_unpause(card->host);
757 if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
758 ret = mmc_cmdq_enable(card);
764 static inline int mmc_blk_part_switch(struct mmc_card *card,
765 struct mmc_blk_data *md)
768 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
770 if (main_md->part_curr == md->part_type)
773 if (mmc_card_mmc(card)) {
774 u8 part_config = card->ext_csd.part_config;
776 ret = mmc_blk_part_switch_pre(card, md->part_type);
780 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
781 part_config |= md->part_type;
783 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
784 EXT_CSD_PART_CONFIG, part_config,
785 card->ext_csd.part_time);
787 mmc_blk_part_switch_post(card, md->part_type);
791 card->ext_csd.part_config = part_config;
793 ret = mmc_blk_part_switch_post(card, main_md->part_curr);
796 main_md->part_curr = md->part_type;
800 static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
806 struct mmc_request mrq = {};
807 struct mmc_command cmd = {};
808 struct mmc_data data = {};
810 struct scatterlist sg;
812 cmd.opcode = MMC_APP_CMD;
813 cmd.arg = card->rca << 16;
814 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
816 err = mmc_wait_for_cmd(card->host, &cmd, 0);
819 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
822 memset(&cmd, 0, sizeof(struct mmc_command));
824 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
826 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
830 data.flags = MMC_DATA_READ;
833 mmc_set_data_timeout(&data, card);
838 blocks = kmalloc(4, GFP_KERNEL);
842 sg_init_one(&sg, blocks, 4);
844 mmc_wait_for_req(card->host, &mrq);
846 result = ntohl(*blocks);
849 if (cmd.error || data.error)
852 *written_blocks = result;
857 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
858 bool hw_busy_detect, struct request *req, bool *gen_err)
860 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
865 err = __mmc_send_status(card, &status, 5);
867 pr_err("%s: error %d requesting status\n",
868 req->rq_disk->disk_name, err);
872 if (status & R1_ERROR) {
873 pr_err("%s: %s: error sending status cmd, status %#x\n",
874 req->rq_disk->disk_name, __func__, status);
878 /* We may rely on the host hw to handle busy detection.*/
879 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
884 * Timeout if the device never becomes ready for data and never
885 * leaves the program state.
887 if (time_after(jiffies, timeout)) {
888 pr_err("%s: Card stuck in programming state! %s %s\n",
889 mmc_hostname(card->host),
890 req->rq_disk->disk_name, __func__);
895 * Some cards mishandle the status bits,
896 * so make sure to check both the busy
897 * indication and the card state.
899 } while (!(status & R1_READY_FOR_DATA) ||
900 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
905 static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
906 struct request *req, bool *gen_err, u32 *stop_status)
908 struct mmc_host *host = card->host;
909 struct mmc_command cmd = {};
911 bool use_r1b_resp = rq_data_dir(req) == WRITE;
914 * Normally we use R1B responses for WRITE, but in cases where the host
915 * has specified a max_busy_timeout we need to validate it. A failure
916 * means we need to prevent the host from doing hw busy detection, which
917 * is done by converting to a R1 response instead.
919 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
920 use_r1b_resp = false;
922 cmd.opcode = MMC_STOP_TRANSMISSION;
924 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
925 cmd.busy_timeout = timeout_ms;
927 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
930 err = mmc_wait_for_cmd(host, &cmd, 5);
934 *stop_status = cmd.resp[0];
936 /* No need to check card status in case of READ. */
937 if (rq_data_dir(req) == READ)
940 if (!mmc_host_is_spi(host) &&
941 (*stop_status & R1_ERROR)) {
942 pr_err("%s: %s: general error sending stop command, resp %#x\n",
943 req->rq_disk->disk_name, __func__, *stop_status);
947 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
950 #define ERR_NOMEDIUM 3
953 #define ERR_CONTINUE 0
955 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
956 bool status_valid, u32 status)
960 /* response crc error, retry the r/w cmd */
961 pr_err("%s: %s sending %s command, card status %#x\n",
962 req->rq_disk->disk_name, "response CRC error",
967 pr_err("%s: %s sending %s command, card status %#x\n",
968 req->rq_disk->disk_name, "timed out", name, status);
970 /* If the status cmd initially failed, retry the r/w cmd */
972 pr_err("%s: status not valid, retrying timeout\n",
973 req->rq_disk->disk_name);
978 * If it was a r/w cmd crc error, or illegal command
979 * (eg, issued in wrong state) then retry - we should
980 * have corrected the state problem above.
982 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
983 pr_err("%s: command error, retrying timeout\n",
984 req->rq_disk->disk_name);
988 /* Otherwise abort the command */
992 /* We don't understand the error code the driver gave us */
993 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
994 req->rq_disk->disk_name, error, status);
1000 * Initial r/w and stop cmd error recovery.
1001 * We don't know whether the card received the r/w cmd or not, so try to
1002 * restore things back to a sane state. Essentially, we do this as follows:
1003 * - Obtain card status. If the first attempt to obtain card status fails,
1004 * the status word will reflect the failed status cmd, not the failed
1005 * r/w cmd. If we fail to obtain card status, it suggests we can no
1006 * longer communicate with the card.
1007 * - Check the card state. If the card received the cmd but there was a
1008 * transient problem with the response, it might still be in a data transfer
1009 * mode. Try to send it a stop command. If this fails, we can't recover.
1010 * - If the r/w cmd failed due to a response CRC error, it was probably
1011 * transient, so retry the cmd.
1012 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
1013 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
1014 * illegal cmd, retry.
1015 * Otherwise we don't understand what happened, so abort.
1017 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
1018 struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
1020 bool prev_cmd_status_valid = true;
1021 u32 status, stop_status = 0;
1024 if (mmc_card_removed(card))
1025 return ERR_NOMEDIUM;
1028 * Try to get card status which indicates both the card state
1029 * and why there was no response. If the first attempt fails,
1030 * we can't be sure the returned status is for the r/w command.
1032 for (retry = 2; retry >= 0; retry--) {
1033 err = __mmc_send_status(card, &status, 0);
1037 /* Re-tune if needed */
1038 mmc_retune_recheck(card->host);
1040 prev_cmd_status_valid = false;
1041 pr_err("%s: error %d sending status command, %sing\n",
1042 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
1045 /* We couldn't get a response from the card. Give up. */
1047 /* Check if the card is removed */
1048 if (mmc_detect_card_removed(card->host))
1049 return ERR_NOMEDIUM;
1053 /* Flag ECC errors */
1054 if ((status & R1_CARD_ECC_FAILED) ||
1055 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
1056 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
1059 /* Flag General errors */
1060 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
1061 if ((status & R1_ERROR) ||
1062 (brq->stop.resp[0] & R1_ERROR)) {
1063 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1064 req->rq_disk->disk_name, __func__,
1065 brq->stop.resp[0], status);
1070 * Check the current card state. If it is in some data transfer
1071 * mode, tell it to stop (and hopefully transition back to TRAN.)
1073 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
1074 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
1075 err = send_stop(card,
1076 DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
1077 req, gen_err, &stop_status);
1079 pr_err("%s: error %d sending stop command\n",
1080 req->rq_disk->disk_name, err);
1082 * If the stop cmd also timed out, the card is probably
1083 * not present, so abort. Other errors are bad news too.
1088 if (stop_status & R1_CARD_ECC_FAILED)
1092 /* Check for set block count errors */
1094 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
1095 prev_cmd_status_valid, status);
1097 /* Check for r/w command errors */
1099 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
1100 prev_cmd_status_valid, status);
1103 if (!brq->stop.error)
1104 return ERR_CONTINUE;
1106 /* Now for stop errors. These aren't fatal to the transfer. */
1107 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1108 req->rq_disk->disk_name, brq->stop.error,
1109 brq->cmd.resp[0], status);
1112 * Subsitute in our own stop status as this will give the error
1113 * state which happened during the execution of the r/w command.
1116 brq->stop.resp[0] = stop_status;
1117 brq->stop.error = 0;
1119 return ERR_CONTINUE;
1122 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
1127 if (md->reset_done & type)
1130 md->reset_done |= type;
1131 err = mmc_hw_reset(host);
1132 /* Ensure we switch back to the correct partition */
1133 if (err != -EOPNOTSUPP) {
1134 struct mmc_blk_data *main_md =
1135 dev_get_drvdata(&host->card->dev);
1138 main_md->part_curr = main_md->part_type;
1139 part_err = mmc_blk_part_switch(host->card, md);
1142 * We have failed to get back into the correct
1143 * partition, so we need to abort the whole request.
1151 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1153 md->reset_done &= ~type;
1156 int mmc_access_rpmb(struct mmc_queue *mq)
1158 struct mmc_blk_data *md = mq->blkdata;
1160 * If this is a RPMB partition access, return ture
1162 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
1169 * The non-block commands come back from the block layer after it queued it and
1170 * processed it with all other requests and then they get issued in this
1173 static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
1175 struct mmc_queue_req *mq_rq;
1176 struct mmc_card *card = mq->card;
1177 struct mmc_blk_data *md = mq->blkdata;
1181 mq_rq = req_to_mmc_queue_req(req);
1183 switch (mq_rq->drv_op) {
1184 case MMC_DRV_OP_IOCTL:
1185 for (i = 0; i < mq_rq->ioc_count; i++) {
1186 ret = __mmc_blk_ioctl_cmd(card, md, mq_rq->idata[i]);
1190 /* Always switch back to main area after RPMB access */
1191 if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
1192 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
1194 case MMC_DRV_OP_BOOT_WP:
1195 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
1196 card->ext_csd.boot_ro_lock |
1197 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
1198 card->ext_csd.part_time);
1200 pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
1201 md->disk->disk_name, ret);
1203 card->ext_csd.boot_ro_lock |=
1204 EXT_CSD_BOOT_WP_B_PWR_WP_EN;
1207 pr_err("%s: unknown driver specific operation\n",
1208 md->disk->disk_name);
1212 mq_rq->drv_op_result = ret;
1213 blk_end_request_all(req, ret);
1216 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1218 struct mmc_blk_data *md = mq->blkdata;
1219 struct mmc_card *card = md->queue.card;
1220 unsigned int from, nr, arg;
1221 int err = 0, type = MMC_BLK_DISCARD;
1223 if (!mmc_can_erase(card)) {
1228 from = blk_rq_pos(req);
1229 nr = blk_rq_sectors(req);
1231 if (mmc_can_discard(card))
1232 arg = MMC_DISCARD_ARG;
1233 else if (mmc_can_trim(card))
1236 arg = MMC_ERASE_ARG;
1239 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1240 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1241 INAND_CMD38_ARG_EXT_CSD,
1242 arg == MMC_TRIM_ARG ?
1243 INAND_CMD38_ARG_TRIM :
1244 INAND_CMD38_ARG_ERASE,
1248 err = mmc_erase(card, from, nr, arg);
1249 } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
1251 mmc_blk_reset_success(md, type);
1253 blk_end_request(req, err, blk_rq_bytes(req));
1256 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1257 struct request *req)
1259 struct mmc_blk_data *md = mq->blkdata;
1260 struct mmc_card *card = md->queue.card;
1261 unsigned int from, nr, arg;
1262 int err = 0, type = MMC_BLK_SECDISCARD;
1264 if (!(mmc_can_secure_erase_trim(card))) {
1269 from = blk_rq_pos(req);
1270 nr = blk_rq_sectors(req);
1272 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1273 arg = MMC_SECURE_TRIM1_ARG;
1275 arg = MMC_SECURE_ERASE_ARG;
1278 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1279 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1280 INAND_CMD38_ARG_EXT_CSD,
1281 arg == MMC_SECURE_TRIM1_ARG ?
1282 INAND_CMD38_ARG_SECTRIM1 :
1283 INAND_CMD38_ARG_SECERASE,
1289 err = mmc_erase(card, from, nr, arg);
1295 if (arg == MMC_SECURE_TRIM1_ARG) {
1296 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1297 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1298 INAND_CMD38_ARG_EXT_CSD,
1299 INAND_CMD38_ARG_SECTRIM2,
1305 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1313 if (err && !mmc_blk_reset(md, card->host, type))
1316 mmc_blk_reset_success(md, type);
1318 blk_end_request(req, err, blk_rq_bytes(req));
1321 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1323 struct mmc_blk_data *md = mq->blkdata;
1324 struct mmc_card *card = md->queue.card;
1327 ret = mmc_flush_cache(card);
1331 blk_end_request_all(req, ret);
1335 * Reformat current write as a reliable write, supporting
1336 * both legacy and the enhanced reliable write MMC cards.
1337 * In each transfer we'll handle only as much as a single
1338 * reliable write can handle, thus finish the request in
1339 * partial completions.
1341 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1342 struct mmc_card *card,
1343 struct request *req)
1345 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1346 /* Legacy mode imposes restrictions on transfers. */
1347 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
1348 brq->data.blocks = 1;
1350 if (brq->data.blocks > card->ext_csd.rel_sectors)
1351 brq->data.blocks = card->ext_csd.rel_sectors;
1352 else if (brq->data.blocks < card->ext_csd.rel_sectors)
1353 brq->data.blocks = 1;
1357 #define CMD_ERRORS \
1358 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1359 R1_ADDRESS_ERROR | /* Misaligned address */ \
1360 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1361 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1362 R1_CC_ERROR | /* Card controller error */ \
1363 R1_ERROR) /* General/unknown error */
1365 static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
1366 struct mmc_async_req *areq)
1368 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1370 struct mmc_blk_request *brq = &mq_mrq->brq;
1371 struct request *req = mmc_queue_req_to_req(mq_mrq);
1372 int need_retune = card->host->need_retune;
1373 bool ecc_err = false;
1374 bool gen_err = false;
1377 * sbc.error indicates a problem with the set block count
1378 * command. No data will have been transferred.
1380 * cmd.error indicates a problem with the r/w command. No
1381 * data will have been transferred.
1383 * stop.error indicates a problem with the stop command. Data
1384 * may have been transferred, or may still be transferring.
1386 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1388 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1390 return MMC_BLK_RETRY;
1392 return MMC_BLK_ABORT;
1394 return MMC_BLK_NOMEDIUM;
1401 * Check for errors relating to the execution of the
1402 * initial command - such as address errors. No data
1403 * has been transferred.
1405 if (brq->cmd.resp[0] & CMD_ERRORS) {
1406 pr_err("%s: r/w command failed, status = %#x\n",
1407 req->rq_disk->disk_name, brq->cmd.resp[0]);
1408 return MMC_BLK_ABORT;
1412 * Everything else is either success, or a data error of some
1413 * kind. If it was a write, we may have transitioned to
1414 * program mode, which we have to wait for it to complete.
1416 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1419 /* Check stop command response */
1420 if (brq->stop.resp[0] & R1_ERROR) {
1421 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1422 req->rq_disk->disk_name, __func__,
1427 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
1430 return MMC_BLK_CMD_ERR;
1433 /* if general error occurs, retry the write operation. */
1435 pr_warn("%s: retrying write for general error\n",
1436 req->rq_disk->disk_name);
1437 return MMC_BLK_RETRY;
1440 if (brq->data.error) {
1441 if (need_retune && !brq->retune_retry_done) {
1442 pr_debug("%s: retrying because a re-tune was needed\n",
1443 req->rq_disk->disk_name);
1444 brq->retune_retry_done = 1;
1445 return MMC_BLK_RETRY;
1447 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1448 req->rq_disk->disk_name, brq->data.error,
1449 (unsigned)blk_rq_pos(req),
1450 (unsigned)blk_rq_sectors(req),
1451 brq->cmd.resp[0], brq->stop.resp[0]);
1453 if (rq_data_dir(req) == READ) {
1455 return MMC_BLK_ECC_ERR;
1456 return MMC_BLK_DATA_ERR;
1458 return MMC_BLK_CMD_ERR;
1462 if (!brq->data.bytes_xfered)
1463 return MMC_BLK_RETRY;
1465 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1466 return MMC_BLK_PARTIAL;
1468 return MMC_BLK_SUCCESS;
1471 static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1472 int disable_multi, bool *do_rel_wr,
1475 struct mmc_blk_data *md = mq->blkdata;
1476 struct mmc_card *card = md->queue.card;
1477 struct mmc_blk_request *brq = &mqrq->brq;
1478 struct request *req = mmc_queue_req_to_req(mqrq);
1481 * Reliable writes are used to implement Forced Unit Access and
1482 * are supported only on MMCs.
1484 *do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1485 rq_data_dir(req) == WRITE &&
1486 (md->flags & MMC_BLK_REL_WR);
1488 memset(brq, 0, sizeof(struct mmc_blk_request));
1490 brq->mrq.data = &brq->data;
1492 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1495 if (rq_data_dir(req) == READ) {
1496 brq->data.flags = MMC_DATA_READ;
1497 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1499 brq->data.flags = MMC_DATA_WRITE;
1500 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1503 brq->data.blksz = 512;
1504 brq->data.blocks = blk_rq_sectors(req);
1507 * The block layer doesn't support all sector count
1508 * restrictions, so we need to be prepared for too big
1511 if (brq->data.blocks > card->host->max_blk_count)
1512 brq->data.blocks = card->host->max_blk_count;
1514 if (brq->data.blocks > 1) {
1516 * After a read error, we redo the request one sector
1517 * at a time in order to accurately determine which
1518 * sectors can be read successfully.
1521 brq->data.blocks = 1;
1524 * Some controllers have HW issues while operating
1525 * in multiple I/O mode
1527 if (card->host->ops->multi_io_quirk)
1528 brq->data.blocks = card->host->ops->multi_io_quirk(card,
1529 (rq_data_dir(req) == READ) ?
1530 MMC_DATA_READ : MMC_DATA_WRITE,
1535 mmc_apply_rel_rw(brq, card, req);
1538 * Data tag is used only during writing meta data to speed
1539 * up write and any subsequent read of this meta data
1541 *do_data_tag = card->ext_csd.data_tag_unit_size &&
1542 (req->cmd_flags & REQ_META) &&
1543 (rq_data_dir(req) == WRITE) &&
1544 ((brq->data.blocks * brq->data.blksz) >=
1545 card->ext_csd.data_tag_unit_size);
1547 mmc_set_data_timeout(&brq->data, card);
1549 brq->data.sg = mqrq->sg;
1550 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1553 * Adjust the sg list so it is the same size as the
1556 if (brq->data.blocks != blk_rq_sectors(req)) {
1557 int i, data_size = brq->data.blocks << 9;
1558 struct scatterlist *sg;
1560 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1561 data_size -= sg->length;
1562 if (data_size <= 0) {
1563 sg->length += data_size;
1568 brq->data.sg_len = i;
1571 mqrq->areq.mrq = &brq->mrq;
1573 mmc_queue_bounce_pre(mqrq);
1576 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1577 struct mmc_card *card,
1579 struct mmc_queue *mq)
1581 u32 readcmd, writecmd;
1582 struct mmc_blk_request *brq = &mqrq->brq;
1583 struct request *req = mmc_queue_req_to_req(mqrq);
1584 struct mmc_blk_data *md = mq->blkdata;
1585 bool do_rel_wr, do_data_tag;
1587 mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
1589 brq->mrq.cmd = &brq->cmd;
1591 brq->cmd.arg = blk_rq_pos(req);
1592 if (!mmc_card_blockaddr(card))
1594 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1596 if (brq->data.blocks > 1 || do_rel_wr) {
1597 /* SPI multiblock writes terminate using a special
1598 * token, not a STOP_TRANSMISSION request.
1600 if (!mmc_host_is_spi(card->host) ||
1601 rq_data_dir(req) == READ)
1602 brq->mrq.stop = &brq->stop;
1603 readcmd = MMC_READ_MULTIPLE_BLOCK;
1604 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1606 brq->mrq.stop = NULL;
1607 readcmd = MMC_READ_SINGLE_BLOCK;
1608 writecmd = MMC_WRITE_BLOCK;
1610 brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
1613 * Pre-defined multi-block transfers are preferable to
1614 * open ended-ones (and necessary for reliable writes).
1615 * However, it is not sufficient to just send CMD23,
1616 * and avoid the final CMD12, as on an error condition
1617 * CMD12 (stop) needs to be sent anyway. This, coupled
1618 * with Auto-CMD23 enhancements provided by some
1619 * hosts, means that the complexity of dealing
1620 * with this is best left to the host. If CMD23 is
1621 * supported by card and host, we'll fill sbc in and let
1622 * the host deal with handling it correctly. This means
1623 * that for hosts that don't expose MMC_CAP_CMD23, no
1624 * change of behavior will be observed.
1626 * N.B: Some MMC cards experience perf degradation.
1627 * We'll avoid using CMD23-bounded multiblock writes for
1628 * these, while retaining features like reliable writes.
1630 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1631 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1633 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1634 brq->sbc.arg = brq->data.blocks |
1635 (do_rel_wr ? (1 << 31) : 0) |
1636 (do_data_tag ? (1 << 29) : 0);
1637 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1638 brq->mrq.sbc = &brq->sbc;
1641 mqrq->areq.err_check = mmc_blk_err_check;
1644 static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1645 struct mmc_blk_request *brq, struct request *req,
1646 bool old_req_pending)
1651 * If this is an SD card and we're writing, we can first
1652 * mark the known good sectors as ok.
1654 * If the card is not SD, we can still ok written sectors
1655 * as reported by the controller (which might be less than
1656 * the real number of written sectors, but never more).
1658 if (mmc_card_sd(card)) {
1662 err = mmc_sd_num_wr_blocks(card, &blocks);
1664 req_pending = old_req_pending;
1666 req_pending = blk_end_request(req, 0, blocks << 9);
1668 req_pending = blk_end_request(req, 0, brq->data.bytes_xfered);
1673 static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
1674 struct request *req,
1675 struct mmc_queue_req *mqrq)
1677 if (mmc_card_removed(card))
1678 req->rq_flags |= RQF_QUIET;
1679 while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req)));
1684 * mmc_blk_rw_try_restart() - tries to restart the current async request
1685 * @mq: the queue with the card and host to restart
1686 * @req: a new request that want to be started after the current one
1688 static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
1689 struct mmc_queue_req *mqrq)
1695 * If the card was removed, just cancel everything and return.
1697 if (mmc_card_removed(mq->card)) {
1698 req->rq_flags |= RQF_QUIET;
1699 blk_end_request_all(req, -EIO);
1700 mq->qcnt--; /* FIXME: just set to 0? */
1703 /* Else proceed and try to restart the current async request */
1704 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
1705 mmc_start_areq(mq->card->host, &mqrq->areq, NULL);
1708 static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1710 struct mmc_blk_data *md = mq->blkdata;
1711 struct mmc_card *card = md->queue.card;
1712 struct mmc_blk_request *brq;
1713 int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
1714 enum mmc_blk_status status;
1715 struct mmc_queue_req *mqrq_cur = NULL;
1716 struct mmc_queue_req *mq_rq;
1717 struct request *old_req;
1718 struct mmc_async_req *new_areq;
1719 struct mmc_async_req *old_areq;
1720 bool req_pending = true;
1723 mqrq_cur = req_to_mmc_queue_req(new_req);
1733 * When 4KB native sector is enabled, only 8 blocks
1734 * multiple read or write is allowed
1736 if (mmc_large_sector(card) &&
1737 !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
1738 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1739 new_req->rq_disk->disk_name);
1740 mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
1744 mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
1745 new_areq = &mqrq_cur->areq;
1749 old_areq = mmc_start_areq(card->host, new_areq, &status);
1752 * We have just put the first request into the pipeline
1753 * and there is nothing more to do until it is
1760 * An asynchronous request has been completed and we proceed
1761 * to handle the result of it.
1763 mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
1765 old_req = mmc_queue_req_to_req(mq_rq);
1766 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1767 mmc_queue_bounce_post(mq_rq);
1770 case MMC_BLK_SUCCESS:
1771 case MMC_BLK_PARTIAL:
1773 * A block was successfully transferred.
1775 mmc_blk_reset_success(md, type);
1777 req_pending = blk_end_request(old_req, 0,
1778 brq->data.bytes_xfered);
1780 * If the blk_end_request function returns non-zero even
1781 * though all data has been transferred and no errors
1782 * were returned by the host controller, it's a bug.
1784 if (status == MMC_BLK_SUCCESS && req_pending) {
1785 pr_err("%s BUG rq_tot %d d_xfer %d\n",
1786 __func__, blk_rq_bytes(old_req),
1787 brq->data.bytes_xfered);
1788 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1792 case MMC_BLK_CMD_ERR:
1793 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
1794 if (mmc_blk_reset(md, card->host, type)) {
1796 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1799 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1804 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1809 retune_retry_done = brq->retune_retry_done;
1814 if (!mmc_blk_reset(md, card->host, type))
1816 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1817 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1819 case MMC_BLK_DATA_ERR: {
1822 err = mmc_blk_reset(md, card->host, type);
1825 if (err == -ENODEV) {
1826 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1827 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1832 case MMC_BLK_ECC_ERR:
1833 if (brq->data.blocks > 1) {
1834 /* Redo read one sector at a time */
1835 pr_warn("%s: retrying using single block read\n",
1836 old_req->rq_disk->disk_name);
1841 * After an error, we redo I/O one sector at a
1842 * time, so we only reach here after trying to
1843 * read a single sector.
1845 req_pending = blk_end_request(old_req, -EIO,
1849 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1853 case MMC_BLK_NOMEDIUM:
1854 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1855 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1858 pr_err("%s: Unhandled return value (%d)",
1859 old_req->rq_disk->disk_name, status);
1860 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
1861 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
1867 * In case of a incomplete request
1868 * prepare it again and resend.
1870 mmc_blk_rw_rq_prep(mq_rq, card,
1872 mmc_start_areq(card->host,
1873 &mq_rq->areq, NULL);
1874 mq_rq->brq.retune_retry_done = retune_retry_done;
1876 } while (req_pending);
1881 void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1884 struct mmc_blk_data *md = mq->blkdata;
1885 struct mmc_card *card = md->queue.card;
1887 if (req && !mq->qcnt)
1888 /* claim host only for the first request */
1891 ret = mmc_blk_part_switch(card, md);
1894 blk_end_request_all(req, -EIO);
1900 switch (req_op(req)) {
1902 case REQ_OP_DRV_OUT:
1904 * Complete ongoing async transfer before issuing
1908 mmc_blk_issue_rw_rq(mq, NULL);
1909 mmc_blk_issue_drv_op(mq, req);
1911 case REQ_OP_DISCARD:
1913 * Complete ongoing async transfer before issuing
1917 mmc_blk_issue_rw_rq(mq, NULL);
1918 mmc_blk_issue_discard_rq(mq, req);
1920 case REQ_OP_SECURE_ERASE:
1922 * Complete ongoing async transfer before issuing
1926 mmc_blk_issue_rw_rq(mq, NULL);
1927 mmc_blk_issue_secdiscard_rq(mq, req);
1931 * Complete ongoing async transfer before issuing
1935 mmc_blk_issue_rw_rq(mq, NULL);
1936 mmc_blk_issue_flush(mq, req);
1939 /* Normal request, just issue it */
1940 mmc_blk_issue_rw_rq(mq, req);
1941 card->host->context_info.is_waiting_last_req = false;
1945 /* No request, flushing the pipeline with NULL */
1946 mmc_blk_issue_rw_rq(mq, NULL);
1947 card->host->context_info.is_waiting_last_req = false;
1955 static inline int mmc_blk_readonly(struct mmc_card *card)
1957 return mmc_card_readonly(card) ||
1958 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
1961 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1962 struct device *parent,
1965 const char *subname,
1968 struct mmc_blk_data *md;
1971 devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
1973 return ERR_PTR(devidx);
1975 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
1981 md->area_type = area_type;
1984 * Set the read-only status based on the supported commands
1985 * and the write protect switch.
1987 md->read_only = mmc_blk_readonly(card);
1989 md->disk = alloc_disk(perdev_minors);
1990 if (md->disk == NULL) {
1995 spin_lock_init(&md->lock);
1996 INIT_LIST_HEAD(&md->part);
1999 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
2003 md->queue.blkdata = md;
2005 md->disk->major = MMC_BLOCK_MAJOR;
2006 md->disk->first_minor = devidx * perdev_minors;
2007 md->disk->fops = &mmc_bdops;
2008 md->disk->private_data = md;
2009 md->disk->queue = md->queue.queue;
2010 md->parent = parent;
2011 set_disk_ro(md->disk, md->read_only || default_ro);
2012 md->disk->flags = GENHD_FL_EXT_DEVT;
2013 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2014 md->disk->flags |= GENHD_FL_NO_PART_SCAN;
2017 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2019 * - be set for removable media with permanent block devices
2020 * - be unset for removable block devices with permanent media
2022 * Since MMC block devices clearly fall under the second
2023 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2024 * should use the block device creation/destruction hotplug
2025 * messages to tell when the card is present.
2028 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2029 "mmcblk%u%s", card->host->index, subname ? subname : "");
2031 if (mmc_card_mmc(card))
2032 blk_queue_logical_block_size(md->queue.queue,
2033 card->ext_csd.data_sector_size);
2035 blk_queue_logical_block_size(md->queue.queue, 512);
2037 set_capacity(md->disk, size);
2039 if (mmc_host_cmd23(card->host)) {
2040 if ((mmc_card_mmc(card) &&
2041 card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
2042 (mmc_card_sd(card) &&
2043 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2044 md->flags |= MMC_BLK_CMD23;
2047 if (mmc_card_mmc(card) &&
2048 md->flags & MMC_BLK_CMD23 &&
2049 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2050 card->ext_csd.rel_sectors)) {
2051 md->flags |= MMC_BLK_REL_WR;
2052 blk_queue_write_cache(md->queue.queue, true, true);
2062 ida_simple_remove(&mmc_blk_ida, devidx);
2063 return ERR_PTR(ret);
2066 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2070 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2072 * The EXT_CSD sector count is in number or 512 byte
2075 size = card->ext_csd.sectors;
2078 * The CSD capacity field is in units of read_blkbits.
2079 * set_capacity takes units of 512 bytes.
2081 size = (typeof(sector_t))card->csd.capacity
2082 << (card->csd.read_blkbits - 9);
2085 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2086 MMC_BLK_DATA_AREA_MAIN);
2089 static int mmc_blk_alloc_part(struct mmc_card *card,
2090 struct mmc_blk_data *md,
2091 unsigned int part_type,
2094 const char *subname,
2098 struct mmc_blk_data *part_md;
2100 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2101 subname, area_type);
2102 if (IS_ERR(part_md))
2103 return PTR_ERR(part_md);
2104 part_md->part_type = part_type;
2105 list_add(&part_md->part, &md->part);
2107 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
2108 cap_str, sizeof(cap_str));
2109 pr_info("%s: %s %s partition %u %s\n",
2110 part_md->disk->disk_name, mmc_card_id(card),
2111 mmc_card_name(card), part_md->part_type, cap_str);
2115 /* MMC Physical partitions consist of two boot partitions and
2116 * up to four general purpose partitions.
2117 * For each partition enabled in EXT_CSD a block device will be allocatedi
2118 * to provide access to the partition.
2121 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2125 if (!mmc_card_mmc(card))
2128 for (idx = 0; idx < card->nr_parts; idx++) {
2129 if (card->part[idx].size) {
2130 ret = mmc_blk_alloc_part(card, md,
2131 card->part[idx].part_cfg,
2132 card->part[idx].size >> 9,
2133 card->part[idx].force_ro,
2134 card->part[idx].name,
2135 card->part[idx].area_type);
2144 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2146 struct mmc_card *card;
2150 * Flush remaining requests and free queues. It
2151 * is freeing the queue that stops new requests
2152 * from being accepted.
2154 card = md->queue.card;
2155 mmc_cleanup_queue(&md->queue);
2156 if (md->disk->flags & GENHD_FL_UP) {
2157 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2158 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2159 card->ext_csd.boot_ro_lockable)
2160 device_remove_file(disk_to_dev(md->disk),
2161 &md->power_ro_lock);
2163 del_gendisk(md->disk);
2169 static void mmc_blk_remove_parts(struct mmc_card *card,
2170 struct mmc_blk_data *md)
2172 struct list_head *pos, *q;
2173 struct mmc_blk_data *part_md;
2175 list_for_each_safe(pos, q, &md->part) {
2176 part_md = list_entry(pos, struct mmc_blk_data, part);
2178 mmc_blk_remove_req(part_md);
2182 static int mmc_add_disk(struct mmc_blk_data *md)
2185 struct mmc_card *card = md->queue.card;
2187 device_add_disk(md->parent, md->disk);
2188 md->force_ro.show = force_ro_show;
2189 md->force_ro.store = force_ro_store;
2190 sysfs_attr_init(&md->force_ro.attr);
2191 md->force_ro.attr.name = "force_ro";
2192 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
2193 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
2197 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
2198 card->ext_csd.boot_ro_lockable) {
2201 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
2204 mode = S_IRUGO | S_IWUSR;
2206 md->power_ro_lock.show = power_ro_lock_show;
2207 md->power_ro_lock.store = power_ro_lock_store;
2208 sysfs_attr_init(&md->power_ro_lock.attr);
2209 md->power_ro_lock.attr.mode = mode;
2210 md->power_ro_lock.attr.name =
2211 "ro_lock_until_next_power_on";
2212 ret = device_create_file(disk_to_dev(md->disk),
2213 &md->power_ro_lock);
2215 goto power_ro_lock_fail;
2220 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
2222 del_gendisk(md->disk);
2227 static int mmc_blk_probe(struct mmc_card *card)
2229 struct mmc_blk_data *md, *part_md;
2233 * Check that the card supports the command class(es) we need.
2235 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2238 mmc_fixup_device(card, mmc_blk_fixups);
2240 md = mmc_blk_alloc(card);
2244 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
2245 cap_str, sizeof(cap_str));
2246 pr_info("%s: %s %s %s %s\n",
2247 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2248 cap_str, md->read_only ? "(ro)" : "");
2250 if (mmc_blk_alloc_parts(card, md))
2253 dev_set_drvdata(&card->dev, md);
2255 if (mmc_add_disk(md))
2258 list_for_each_entry(part_md, &md->part, part) {
2259 if (mmc_add_disk(part_md))
2263 pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2264 pm_runtime_use_autosuspend(&card->dev);
2267 * Don't enable runtime PM for SD-combo cards here. Leave that
2268 * decision to be taken during the SDIO init sequence instead.
2270 if (card->type != MMC_TYPE_SD_COMBO) {
2271 pm_runtime_set_active(&card->dev);
2272 pm_runtime_enable(&card->dev);
2278 mmc_blk_remove_parts(card, md);
2279 mmc_blk_remove_req(md);
2283 static void mmc_blk_remove(struct mmc_card *card)
2285 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2287 mmc_blk_remove_parts(card, md);
2288 pm_runtime_get_sync(&card->dev);
2289 mmc_claim_host(card->host);
2290 mmc_blk_part_switch(card, md);
2291 mmc_release_host(card->host);
2292 if (card->type != MMC_TYPE_SD_COMBO)
2293 pm_runtime_disable(&card->dev);
2294 pm_runtime_put_noidle(&card->dev);
2295 mmc_blk_remove_req(md);
2296 dev_set_drvdata(&card->dev, NULL);
2299 static int _mmc_blk_suspend(struct mmc_card *card)
2301 struct mmc_blk_data *part_md;
2302 struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2305 mmc_queue_suspend(&md->queue);
2306 list_for_each_entry(part_md, &md->part, part) {
2307 mmc_queue_suspend(&part_md->queue);
2313 static void mmc_blk_shutdown(struct mmc_card *card)
2315 _mmc_blk_suspend(card);
2318 #ifdef CONFIG_PM_SLEEP
2319 static int mmc_blk_suspend(struct device *dev)
2321 struct mmc_card *card = mmc_dev_to_card(dev);
2323 return _mmc_blk_suspend(card);
2326 static int mmc_blk_resume(struct device *dev)
2328 struct mmc_blk_data *part_md;
2329 struct mmc_blk_data *md = dev_get_drvdata(dev);
2333 * Resume involves the card going into idle state,
2334 * so current partition is always the main one.
2336 md->part_curr = md->part_type;
2337 mmc_queue_resume(&md->queue);
2338 list_for_each_entry(part_md, &md->part, part) {
2339 mmc_queue_resume(&part_md->queue);
2346 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
2348 static struct mmc_driver mmc_driver = {
2351 .pm = &mmc_blk_pm_ops,
2353 .probe = mmc_blk_probe,
2354 .remove = mmc_blk_remove,
2355 .shutdown = mmc_blk_shutdown,
2358 static int __init mmc_blk_init(void)
2362 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
2363 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
2365 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
2367 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
2371 res = mmc_register_driver(&mmc_driver);
2377 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2382 static void __exit mmc_blk_exit(void)
2384 mmc_unregister_driver(&mmc_driver);
2385 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
2388 module_init(mmc_blk_init);
2389 module_exit(mmc_blk_exit);
2391 MODULE_LICENSE("GPL");
2392 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");