2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
38 #include <linux/mmc/ioctl.h>
39 #include <linux/mmc/card.h>
40 #include <linux/mmc/host.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/sd.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
49 MODULE_ALIAS("mmc:block");
50 #ifdef MODULE_PARAM_PREFIX
51 #undef MODULE_PARAM_PREFIX
53 #define MODULE_PARAM_PREFIX "mmcblk."
55 #define INAND_CMD38_ARG_EXT_CSD 113
56 #define INAND_CMD38_ARG_ERASE 0x00
57 #define INAND_CMD38_ARG_TRIM 0x01
58 #define INAND_CMD38_ARG_SECERASE 0x80
59 #define INAND_CMD38_ARG_SECTRIM1 0x81
60 #define INAND_CMD38_ARG_SECTRIM2 0x88
62 #define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) && \
63 (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || \
64 ((card)->ext_csd.rel_sectors)))
66 static DEFINE_MUTEX(block_mutex);
69 * The defaults come from config options but can be overriden by module
72 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
75 * We've only got one major, so number of mmcblk devices is
76 * limited to 256 / number of minors per device.
78 static int max_devices;
80 /* 256 minors, so at most 256 separate devices */
81 static DECLARE_BITMAP(dev_use, 256);
82 static DECLARE_BITMAP(name_use, 256);
85 * There is one mmc_blk_data per slot.
90 struct mmc_queue queue;
91 struct list_head part;
94 unsigned int read_only;
95 unsigned int part_type;
96 unsigned int name_idx;
99 * Only set in main mmc_blk_data associated
100 * with mmc_card with mmc_set_drvdata, and keeps
101 * track of the current selected device partition.
103 unsigned int part_curr;
104 struct device_attribute force_ro;
107 static DEFINE_MUTEX(open_lock);
109 module_param(perdev_minors, int, 0444);
110 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
112 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
114 struct mmc_blk_data *md;
116 mutex_lock(&open_lock);
117 md = disk->private_data;
118 if (md && md->usage == 0)
122 mutex_unlock(&open_lock);
127 static inline int mmc_get_devidx(struct gendisk *disk)
129 int devmaj = MAJOR(disk_devt(disk));
130 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
133 devidx = disk->first_minor / perdev_minors;
137 static void mmc_blk_put(struct mmc_blk_data *md)
139 mutex_lock(&open_lock);
141 if (md->usage == 0) {
142 int devidx = mmc_get_devidx(md->disk);
143 blk_cleanup_queue(md->queue.queue);
145 __clear_bit(devidx, dev_use);
150 mutex_unlock(&open_lock);
153 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
157 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
159 ret = snprintf(buf, PAGE_SIZE, "%d",
160 get_disk_ro(dev_to_disk(dev)) ^
166 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
167 const char *buf, size_t count)
171 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
172 unsigned long set = simple_strtoul(buf, &end, 0);
178 set_disk_ro(dev_to_disk(dev), set || md->read_only);
185 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
187 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
190 mutex_lock(&block_mutex);
193 check_disk_change(bdev);
196 if ((mode & FMODE_WRITE) && md->read_only) {
201 mutex_unlock(&block_mutex);
206 static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
208 struct mmc_blk_data *md = disk->private_data;
210 mutex_lock(&block_mutex);
212 mutex_unlock(&block_mutex);
217 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
219 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
225 struct mmc_blk_ioc_data {
226 struct mmc_ioc_cmd ic;
231 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
232 struct mmc_ioc_cmd __user *user)
234 struct mmc_blk_ioc_data *idata;
237 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
243 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
248 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
249 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
254 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
260 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
261 idata->ic.data_ptr, idata->buf_bytes)) {
276 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
277 struct mmc_ioc_cmd __user *ic_ptr)
279 struct mmc_blk_ioc_data *idata;
280 struct mmc_blk_data *md;
281 struct mmc_card *card;
282 struct mmc_command cmd = {0};
283 struct mmc_data data = {0};
284 struct mmc_request mrq = {0};
285 struct scatterlist sg;
289 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
290 * whole block device, not on a partition. This prevents overspray
291 * between sibling partitions.
293 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
296 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
298 return PTR_ERR(idata);
300 cmd.opcode = idata->ic.opcode;
301 cmd.arg = idata->ic.arg;
302 cmd.flags = idata->ic.flags;
306 data.blksz = idata->ic.blksz;
307 data.blocks = idata->ic.blocks;
309 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
311 if (idata->ic.write_flag)
312 data.flags = MMC_DATA_WRITE;
314 data.flags = MMC_DATA_READ;
319 md = mmc_blk_get(bdev->bd_disk);
325 card = md->queue.card;
331 mmc_claim_host(card->host);
333 if (idata->ic.is_acmd) {
334 err = mmc_app_cmd(card->host, card);
339 /* data.flags must already be set before doing this. */
340 mmc_set_data_timeout(&data, card);
341 /* Allow overriding the timeout_ns for empirical tuning. */
342 if (idata->ic.data_timeout_ns)
343 data.timeout_ns = idata->ic.data_timeout_ns;
345 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
347 * Pretend this is a data transfer and rely on the host driver
348 * to compute timeout. When all host drivers support
349 * cmd.cmd_timeout for R1B, this can be changed to:
352 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
354 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
357 mmc_wait_for_req(card->host, &mrq);
360 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
361 __func__, cmd.error);
366 dev_err(mmc_dev(card->host), "%s: data error %d\n",
367 __func__, data.error);
373 * According to the SD specs, some commands require a delay after
374 * issuing the command.
376 if (idata->ic.postsleep_min_us)
377 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
379 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
384 if (!idata->ic.write_flag) {
385 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
386 idata->buf, idata->buf_bytes)) {
393 mmc_release_host(card->host);
402 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
403 unsigned int cmd, unsigned long arg)
406 if (cmd == MMC_IOC_CMD)
407 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
412 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
413 unsigned int cmd, unsigned long arg)
415 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
419 static const struct block_device_operations mmc_bdops = {
420 .open = mmc_blk_open,
421 .release = mmc_blk_release,
422 .getgeo = mmc_blk_getgeo,
423 .owner = THIS_MODULE,
424 .ioctl = mmc_blk_ioctl,
426 .compat_ioctl = mmc_blk_compat_ioctl,
430 struct mmc_blk_request {
431 struct mmc_request mrq;
432 struct mmc_command cmd;
433 struct mmc_command stop;
434 struct mmc_data data;
437 static inline int mmc_blk_part_switch(struct mmc_card *card,
438 struct mmc_blk_data *md)
441 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
442 if (main_md->part_curr == md->part_type)
445 if (mmc_card_mmc(card)) {
446 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
447 card->ext_csd.part_config |= md->part_type;
449 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
450 EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
451 card->ext_csd.part_time);
456 main_md->part_curr = md->part_type;
460 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
466 struct mmc_request mrq = {0};
467 struct mmc_command cmd = {0};
468 struct mmc_data data = {0};
469 unsigned int timeout_us;
471 struct scatterlist sg;
473 cmd.opcode = MMC_APP_CMD;
474 cmd.arg = card->rca << 16;
475 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
477 err = mmc_wait_for_cmd(card->host, &cmd, 0);
480 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
483 memset(&cmd, 0, sizeof(struct mmc_command));
485 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
487 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
489 data.timeout_ns = card->csd.tacc_ns * 100;
490 data.timeout_clks = card->csd.tacc_clks * 100;
492 timeout_us = data.timeout_ns / 1000;
493 timeout_us += data.timeout_clks * 1000 /
494 (card->host->ios.clock / 1000);
496 if (timeout_us > 100000) {
497 data.timeout_ns = 100000000;
498 data.timeout_clks = 0;
503 data.flags = MMC_DATA_READ;
510 blocks = kmalloc(4, GFP_KERNEL);
514 sg_init_one(&sg, blocks, 4);
516 mmc_wait_for_req(card->host, &mrq);
518 result = ntohl(*blocks);
521 if (cmd.error || data.error)
527 static u32 get_card_status(struct mmc_card *card, struct request *req)
529 struct mmc_command cmd = {0};
532 cmd.opcode = MMC_SEND_STATUS;
533 if (!mmc_host_is_spi(card->host))
534 cmd.arg = card->rca << 16;
535 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
536 err = mmc_wait_for_cmd(card->host, &cmd, 0);
538 printk(KERN_ERR "%s: error %d sending status command",
539 req->rq_disk->disk_name, err);
543 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
545 struct mmc_blk_data *md = mq->data;
546 struct mmc_card *card = md->queue.card;
547 unsigned int from, nr, arg;
550 if (!mmc_can_erase(card)) {
555 from = blk_rq_pos(req);
556 nr = blk_rq_sectors(req);
558 if (mmc_can_trim(card))
563 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
564 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
565 INAND_CMD38_ARG_EXT_CSD,
566 arg == MMC_TRIM_ARG ?
567 INAND_CMD38_ARG_TRIM :
568 INAND_CMD38_ARG_ERASE,
573 err = mmc_erase(card, from, nr, arg);
575 spin_lock_irq(&md->lock);
576 __blk_end_request(req, err, blk_rq_bytes(req));
577 spin_unlock_irq(&md->lock);
582 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
585 struct mmc_blk_data *md = mq->data;
586 struct mmc_card *card = md->queue.card;
587 unsigned int from, nr, arg;
590 if (!mmc_can_secure_erase_trim(card)) {
595 from = blk_rq_pos(req);
596 nr = blk_rq_sectors(req);
598 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
599 arg = MMC_SECURE_TRIM1_ARG;
601 arg = MMC_SECURE_ERASE_ARG;
603 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
604 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
605 INAND_CMD38_ARG_EXT_CSD,
606 arg == MMC_SECURE_TRIM1_ARG ?
607 INAND_CMD38_ARG_SECTRIM1 :
608 INAND_CMD38_ARG_SECERASE,
613 err = mmc_erase(card, from, nr, arg);
614 if (!err && arg == MMC_SECURE_TRIM1_ARG) {
615 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
616 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
617 INAND_CMD38_ARG_EXT_CSD,
618 INAND_CMD38_ARG_SECTRIM2,
623 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
626 spin_lock_irq(&md->lock);
627 __blk_end_request(req, err, blk_rq_bytes(req));
628 spin_unlock_irq(&md->lock);
633 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
635 struct mmc_blk_data *md = mq->data;
638 * No-op, only service this because we need REQ_FUA for reliable
641 spin_lock_irq(&md->lock);
642 __blk_end_request_all(req, 0);
643 spin_unlock_irq(&md->lock);
649 * Reformat current write as a reliable write, supporting
650 * both legacy and the enhanced reliable write MMC cards.
651 * In each transfer we'll handle only as much as a single
652 * reliable write can handle, thus finish the request in
653 * partial completions.
655 static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
656 struct mmc_card *card,
660 struct mmc_command set_count = {0};
662 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
663 /* Legacy mode imposes restrictions on transfers. */
664 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
665 brq->data.blocks = 1;
667 if (brq->data.blocks > card->ext_csd.rel_sectors)
668 brq->data.blocks = card->ext_csd.rel_sectors;
669 else if (brq->data.blocks < card->ext_csd.rel_sectors)
670 brq->data.blocks = 1;
673 set_count.opcode = MMC_SET_BLOCK_COUNT;
674 set_count.arg = brq->data.blocks | (1 << 31);
675 set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
676 err = mmc_wait_for_cmd(card->host, &set_count, 0);
678 printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
679 req->rq_disk->disk_name, err);
683 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
685 struct mmc_blk_data *md = mq->data;
686 struct mmc_card *card = md->queue.card;
687 struct mmc_blk_request brq;
688 int ret = 1, disable_multi = 0;
691 * Reliable writes are used to implement Forced Unit Access and
692 * REQ_META accesses, and are supported only on MMCs.
694 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
695 (req->cmd_flags & REQ_META)) &&
696 (rq_data_dir(req) == WRITE) &&
697 REL_WRITES_SUPPORTED(card);
700 struct mmc_command cmd = {0};
701 u32 readcmd, writecmd, status = 0;
703 memset(&brq, 0, sizeof(struct mmc_blk_request));
704 brq.mrq.cmd = &brq.cmd;
705 brq.mrq.data = &brq.data;
707 brq.cmd.arg = blk_rq_pos(req);
708 if (!mmc_card_blockaddr(card))
710 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
711 brq.data.blksz = 512;
712 brq.stop.opcode = MMC_STOP_TRANSMISSION;
714 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
715 brq.data.blocks = blk_rq_sectors(req);
718 * The block layer doesn't support all sector count
719 * restrictions, so we need to be prepared for too big
722 if (brq.data.blocks > card->host->max_blk_count)
723 brq.data.blocks = card->host->max_blk_count;
726 * After a read error, we redo the request one sector at a time
727 * in order to accurately determine which sectors can be read
730 if (disable_multi && brq.data.blocks > 1)
733 if (brq.data.blocks > 1 || do_rel_wr) {
734 /* SPI multiblock writes terminate using a special
735 * token, not a STOP_TRANSMISSION request. Reliable
736 * writes use SET_BLOCK_COUNT and do not use a
737 * STOP_TRANSMISSION request either.
739 if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
740 rq_data_dir(req) == READ)
741 brq.mrq.stop = &brq.stop;
742 readcmd = MMC_READ_MULTIPLE_BLOCK;
743 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
746 readcmd = MMC_READ_SINGLE_BLOCK;
747 writecmd = MMC_WRITE_BLOCK;
749 if (rq_data_dir(req) == READ) {
750 brq.cmd.opcode = readcmd;
751 brq.data.flags |= MMC_DATA_READ;
753 brq.cmd.opcode = writecmd;
754 brq.data.flags |= MMC_DATA_WRITE;
757 if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
760 mmc_set_data_timeout(&brq.data, card);
762 brq.data.sg = mq->sg;
763 brq.data.sg_len = mmc_queue_map_sg(mq);
766 * Adjust the sg list so it is the same size as the
769 if (brq.data.blocks != blk_rq_sectors(req)) {
770 int i, data_size = brq.data.blocks << 9;
771 struct scatterlist *sg;
773 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
774 data_size -= sg->length;
775 if (data_size <= 0) {
776 sg->length += data_size;
784 mmc_queue_bounce_pre(mq);
786 mmc_wait_for_req(card->host, &brq.mrq);
788 mmc_queue_bounce_post(mq);
791 * Check for errors here, but don't jump to cmd_err
792 * until later as we need to wait for the card to leave
793 * programming mode even when things go wrong.
795 if (brq.cmd.error || brq.data.error || brq.stop.error) {
796 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
797 /* Redo read one sector at a time */
798 printk(KERN_WARNING "%s: retrying using single "
799 "block read\n", req->rq_disk->disk_name);
803 status = get_card_status(card, req);
807 printk(KERN_ERR "%s: error %d sending read/write "
808 "command, response %#x, card status %#x\n",
809 req->rq_disk->disk_name, brq.cmd.error,
810 brq.cmd.resp[0], status);
813 if (brq.data.error) {
814 if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
815 /* 'Stop' response contains card status */
816 status = brq.mrq.stop->resp[0];
817 printk(KERN_ERR "%s: error %d transferring data,"
818 " sector %u, nr %u, card status %#x\n",
819 req->rq_disk->disk_name, brq.data.error,
820 (unsigned)blk_rq_pos(req),
821 (unsigned)blk_rq_sectors(req), status);
824 if (brq.stop.error) {
825 printk(KERN_ERR "%s: error %d sending stop command, "
826 "response %#x, card status %#x\n",
827 req->rq_disk->disk_name, brq.stop.error,
828 brq.stop.resp[0], status);
831 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
835 cmd.opcode = MMC_SEND_STATUS;
836 cmd.arg = card->rca << 16;
837 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
838 err = mmc_wait_for_cmd(card->host, &cmd, 5);
840 printk(KERN_ERR "%s: error %d requesting status\n",
841 req->rq_disk->disk_name, err);
845 * Some cards mishandle the status bits,
846 * so make sure to check both the busy
847 * indication and the card state.
849 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
850 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
853 if (cmd.resp[0] & ~0x00000900)
854 printk(KERN_ERR "%s: status = %08x\n",
855 req->rq_disk->disk_name, cmd.resp[0]);
856 if (mmc_decode_status(cmd.resp))
861 if (brq.cmd.error || brq.stop.error || brq.data.error) {
862 if (rq_data_dir(req) == READ) {
864 * After an error, we redo I/O one sector at a
865 * time, so we only reach here after trying to
866 * read a single sector.
868 spin_lock_irq(&md->lock);
869 ret = __blk_end_request(req, -EIO, brq.data.blksz);
870 spin_unlock_irq(&md->lock);
877 * A block was successfully transferred.
879 spin_lock_irq(&md->lock);
880 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
881 spin_unlock_irq(&md->lock);
888 * If this is an SD card and we're writing, we can first
889 * mark the known good sectors as ok.
891 * If the card is not SD, we can still ok written sectors
892 * as reported by the controller (which might be less than
893 * the real number of written sectors, but never more).
895 if (mmc_card_sd(card)) {
898 blocks = mmc_sd_num_wr_blocks(card);
899 if (blocks != (u32)-1) {
900 spin_lock_irq(&md->lock);
901 ret = __blk_end_request(req, 0, blocks << 9);
902 spin_unlock_irq(&md->lock);
905 spin_lock_irq(&md->lock);
906 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
907 spin_unlock_irq(&md->lock);
910 spin_lock_irq(&md->lock);
912 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
913 spin_unlock_irq(&md->lock);
918 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
921 struct mmc_blk_data *md = mq->data;
922 struct mmc_card *card = md->queue.card;
924 mmc_claim_host(card->host);
925 ret = mmc_blk_part_switch(card, md);
931 if (req->cmd_flags & REQ_DISCARD) {
932 if (req->cmd_flags & REQ_SECURE)
933 ret = mmc_blk_issue_secdiscard_rq(mq, req);
935 ret = mmc_blk_issue_discard_rq(mq, req);
936 } else if (req->cmd_flags & REQ_FLUSH) {
937 ret = mmc_blk_issue_flush(mq, req);
939 ret = mmc_blk_issue_rw_rq(mq, req);
943 mmc_release_host(card->host);
947 static inline int mmc_blk_readonly(struct mmc_card *card)
949 return mmc_card_readonly(card) ||
950 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
953 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
954 struct device *parent,
959 struct mmc_blk_data *md;
962 devidx = find_first_zero_bit(dev_use, max_devices);
963 if (devidx >= max_devices)
964 return ERR_PTR(-ENOSPC);
965 __set_bit(devidx, dev_use);
967 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
974 * !subname implies we are creating main mmc_blk_data that will be
975 * associated with mmc_card with mmc_set_drvdata. Due to device
976 * partitions, devidx will not coincide with a per-physical card
977 * index anymore so we keep track of a name index.
980 md->name_idx = find_first_zero_bit(name_use, max_devices);
981 __set_bit(md->name_idx, name_use);
984 md->name_idx = ((struct mmc_blk_data *)
985 dev_to_disk(parent)->private_data)->name_idx;
988 * Set the read-only status based on the supported commands
989 * and the write protect switch.
991 md->read_only = mmc_blk_readonly(card);
993 md->disk = alloc_disk(perdev_minors);
994 if (md->disk == NULL) {
999 spin_lock_init(&md->lock);
1000 INIT_LIST_HEAD(&md->part);
1003 ret = mmc_init_queue(&md->queue, card, &md->lock);
1007 md->queue.issue_fn = mmc_blk_issue_rq;
1008 md->queue.data = md;
1010 md->disk->major = MMC_BLOCK_MAJOR;
1011 md->disk->first_minor = devidx * perdev_minors;
1012 md->disk->fops = &mmc_bdops;
1013 md->disk->private_data = md;
1014 md->disk->queue = md->queue.queue;
1015 md->disk->driverfs_dev = parent;
1016 set_disk_ro(md->disk, md->read_only || default_ro);
1017 if (REL_WRITES_SUPPORTED(card))
1018 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1021 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1023 * - be set for removable media with permanent block devices
1024 * - be unset for removable block devices with permanent media
1026 * Since MMC block devices clearly fall under the second
1027 * case, we do not set GENHD_FL_REMOVABLE. Userspace
1028 * should use the block device creation/destruction hotplug
1029 * messages to tell when the card is present.
1032 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1033 "mmcblk%d%s", md->name_idx, subname ? subname : "");
1035 blk_queue_logical_block_size(md->queue.queue, 512);
1036 set_capacity(md->disk, size);
1044 return ERR_PTR(ret);
1047 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1050 struct mmc_blk_data *md;
1052 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
1054 * The EXT_CSD sector count is in number or 512 byte
1057 size = card->ext_csd.sectors;
1060 * The CSD capacity field is in units of read_blkbits.
1061 * set_capacity takes units of 512 bytes.
1063 size = card->csd.capacity << (card->csd.read_blkbits - 9);
1066 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
1070 static int mmc_blk_alloc_part(struct mmc_card *card,
1071 struct mmc_blk_data *md,
1072 unsigned int part_type,
1075 const char *subname)
1078 struct mmc_blk_data *part_md;
1080 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1082 if (IS_ERR(part_md))
1083 return PTR_ERR(part_md);
1084 part_md->part_type = part_type;
1085 list_add(&part_md->part, &md->part);
1087 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1088 cap_str, sizeof(cap_str));
1089 printk(KERN_INFO "%s: %s %s partition %u %s\n",
1090 part_md->disk->disk_name, mmc_card_id(card),
1091 mmc_card_name(card), part_md->part_type, cap_str);
1095 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1099 if (!mmc_card_mmc(card))
1102 if (card->ext_csd.boot_size) {
1103 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
1104 card->ext_csd.boot_size >> 9,
1109 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
1110 card->ext_csd.boot_size >> 9,
1121 mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
1125 mmc_claim_host(card->host);
1126 err = mmc_set_blocklen(card, 512);
1127 mmc_release_host(card->host);
1130 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
1131 md->disk->disk_name, err);
1138 static void mmc_blk_remove_req(struct mmc_blk_data *md)
1141 if (md->disk->flags & GENHD_FL_UP) {
1142 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1144 /* Stop new requests from getting into the queue */
1145 del_gendisk(md->disk);
1148 /* Then flush out any already in there */
1149 mmc_cleanup_queue(&md->queue);
1154 static void mmc_blk_remove_parts(struct mmc_card *card,
1155 struct mmc_blk_data *md)
1157 struct list_head *pos, *q;
1158 struct mmc_blk_data *part_md;
1160 __clear_bit(md->name_idx, name_use);
1161 list_for_each_safe(pos, q, &md->part) {
1162 part_md = list_entry(pos, struct mmc_blk_data, part);
1164 mmc_blk_remove_req(part_md);
1168 static int mmc_add_disk(struct mmc_blk_data *md)
1173 md->force_ro.show = force_ro_show;
1174 md->force_ro.store = force_ro_store;
1175 sysfs_attr_init(&md->force_ro.attr);
1176 md->force_ro.attr.name = "force_ro";
1177 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1178 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1180 del_gendisk(md->disk);
1185 static const struct mmc_fixup blk_fixups[] =
1187 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1188 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1189 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1190 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1191 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1195 static int mmc_blk_probe(struct mmc_card *card)
1197 struct mmc_blk_data *md, *part_md;
1202 * Check that the card supports the command class(es) we need.
1204 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1207 md = mmc_blk_alloc(card);
1211 err = mmc_blk_set_blksize(md, card);
1215 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
1216 cap_str, sizeof(cap_str));
1217 printk(KERN_INFO "%s: %s %s %s %s\n",
1218 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1219 cap_str, md->read_only ? "(ro)" : "");
1221 if (mmc_blk_alloc_parts(card, md))
1224 mmc_set_drvdata(card, md);
1225 mmc_fixup_device(card, blk_fixups);
1227 if (mmc_add_disk(md))
1230 list_for_each_entry(part_md, &md->part, part) {
1231 if (mmc_add_disk(part_md))
1237 mmc_blk_remove_parts(card, md);
1238 mmc_blk_remove_req(md);
1242 static void mmc_blk_remove(struct mmc_card *card)
1244 struct mmc_blk_data *md = mmc_get_drvdata(card);
1246 mmc_blk_remove_parts(card, md);
1247 mmc_blk_remove_req(md);
1248 mmc_set_drvdata(card, NULL);
1252 static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
1254 struct mmc_blk_data *part_md;
1255 struct mmc_blk_data *md = mmc_get_drvdata(card);
1258 mmc_queue_suspend(&md->queue);
1259 list_for_each_entry(part_md, &md->part, part) {
1260 mmc_queue_suspend(&part_md->queue);
1266 static int mmc_blk_resume(struct mmc_card *card)
1268 struct mmc_blk_data *part_md;
1269 struct mmc_blk_data *md = mmc_get_drvdata(card);
1272 mmc_blk_set_blksize(md, card);
1275 * Resume involves the card going into idle state,
1276 * so current partition is always the main one.
1278 md->part_curr = md->part_type;
1279 mmc_queue_resume(&md->queue);
1280 list_for_each_entry(part_md, &md->part, part) {
1281 mmc_queue_resume(&part_md->queue);
1287 #define mmc_blk_suspend NULL
1288 #define mmc_blk_resume NULL
1291 static struct mmc_driver mmc_driver = {
1295 .probe = mmc_blk_probe,
1296 .remove = mmc_blk_remove,
1297 .suspend = mmc_blk_suspend,
1298 .resume = mmc_blk_resume,
1301 static int __init mmc_blk_init(void)
1305 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1306 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1308 max_devices = 256 / perdev_minors;
1310 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1314 res = mmc_register_driver(&mmc_driver);
1320 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1325 static void __exit mmc_blk_exit(void)
1327 mmc_unregister_driver(&mmc_driver);
1328 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1331 module_init(mmc_blk_init);
1332 module_exit(mmc_blk_exit);
1334 MODULE_LICENSE("GPL");
1335 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");