2 * linux/drivers/mmc/card/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
18 #include <linux/version.h>
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/host.h>
24 #define MMC_QUEUE_BOUNCESZ 131072
25 #define MMC_QUEUE_SD_BOUNCESZ 524288
27 static char mmc_queue_cur_bounce_buf[MMC_QUEUE_SD_BOUNCESZ] ____cacheline_aligned;
28 static char mmc_queue_prev_bounce_buf[MMC_QUEUE_SD_BOUNCESZ] ____cacheline_aligned;
31 * Prepare a MMC request. This just filters out odd stuff.
33 static int mmc_prep_request(struct request_queue *q, struct request *req)
35 struct mmc_queue *mq = q->queuedata;
38 * We only like normal block requests and discards.
40 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
41 blk_dump_rq_flags(req, "MMC bad request");
45 if (mq && mmc_card_removed(mq->card))
48 req->cmd_flags |= REQ_DONTPREP;
53 static int mmc_queue_thread(void *d)
55 struct mmc_queue *mq = d;
56 struct request_queue *q = mq->queue;
58 current->flags |= PF_MEMALLOC;
60 down(&mq->thread_sem);
62 struct request *req = NULL;
63 struct mmc_queue_req *tmp;
64 unsigned int cmd_flags = 0;
66 spin_lock_irq(q->queue_lock);
67 set_current_state(TASK_INTERRUPTIBLE);
68 req = blk_fetch_request(q);
69 mq->mqrq_cur->req = req;
70 spin_unlock_irq(q->queue_lock);
72 if (req || mq->mqrq_prev->req) {
73 set_current_state(TASK_RUNNING);
74 cmd_flags = req ? req->cmd_flags : 0;
75 mq->issue_fn(mq, req);
76 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
77 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
78 continue; /* fetch again */
82 * Current request becomes previous request
84 * In case of special requests, current request
85 * has been finished. Do not assign it to previous
88 if (cmd_flags & MMC_REQ_SPECIAL_MASK)
89 mq->mqrq_cur->req = NULL;
91 mq->mqrq_prev->brq.mrq.data = NULL;
92 mq->mqrq_prev->req = NULL;
94 mq->mqrq_prev = mq->mqrq_cur;
97 if (kthread_should_stop()) {
98 set_current_state(TASK_RUNNING);
103 down(&mq->thread_sem);
112 * Generic MMC request handler. This is called for any queue on a
113 * particular host. When the host is not busy, we look for a request
114 * on any queue on this host, and attempt to issue it. This may
115 * not be the queue we were asked to process.
117 static void mmc_request_fn(struct request_queue *q)
119 struct mmc_queue *mq = q->queuedata;
122 struct mmc_context_info *cntx;
125 while ((req = blk_fetch_request(q)) != NULL) {
126 req->cmd_flags |= REQ_QUIET;
127 __blk_end_request_all(req, -EIO);
132 cntx = &mq->card->host->context_info;
133 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
135 * New MMC request arrived when MMC thread may be
136 * blocked on the previous request to be complete
137 * with no current request fetched
139 spin_lock_irqsave(&cntx->lock, flags);
140 if (cntx->is_waiting_last_req) {
141 cntx->is_new_req = true;
142 wake_up_interruptible(&cntx->wait);
144 spin_unlock_irqrestore(&cntx->lock, flags);
145 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
146 wake_up_process(mq->thread);
149 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
151 struct scatterlist *sg;
153 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
158 sg_init_table(sg, sg_len);
164 static void mmc_queue_setup_discard(struct request_queue *q,
165 struct mmc_card *card)
167 unsigned max_discard;
169 max_discard = mmc_calc_max_discard(card);
173 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
174 q->limits.max_discard_sectors = max_discard;
175 if (card->erased_byte == 0 && !mmc_can_discard(card))
176 q->limits.discard_zeroes_data = 1;
177 q->limits.discard_granularity = card->pref_erase << 9;
178 /* granularity must not be greater than max. discard */
179 if (card->pref_erase > max_discard)
180 q->limits.discard_granularity = 0;
181 if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
182 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
186 * mmc_init_queue - initialise a queue structure.
188 * @card: mmc card to attach this queue
190 * @subname: partition subname
192 * Initialise a MMC card request queue.
194 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
195 spinlock_t *lock, const char *subname)
197 struct mmc_host *host = card->host;
198 u64 limit = BLK_BOUNCE_HIGH;
200 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
201 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
203 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
204 limit = *mmc_dev(host)->dma_mask;
207 mq->queue = blk_init_queue(mmc_request_fn, lock);
211 mq->mqrq_cur = mqrq_cur;
212 mq->mqrq_prev = mqrq_prev;
213 mq->queue->queuedata = mq;
215 blk_queue_prep_rq(mq->queue, mmc_prep_request);
216 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
217 if (mmc_can_erase(card))
218 mmc_queue_setup_discard(mq->queue, card);
220 #ifdef CONFIG_MMC_BLOCK_BOUNCE
221 if (host->max_segs == 1) {
222 unsigned int bouncesz;
224 if(!mmc_card_sd(card))
225 bouncesz = MMC_QUEUE_BOUNCESZ;
227 bouncesz = MMC_QUEUE_SD_BOUNCESZ;
229 if (bouncesz > host->max_req_size)
230 bouncesz = host->max_req_size;
231 if (bouncesz > host->max_seg_size)
232 bouncesz = host->max_seg_size;
233 if (bouncesz > (host->max_blk_count * 512))
234 bouncesz = host->max_blk_count * 512;
236 if (bouncesz > 512) {
237 if(!mmc_card_sd(card))
238 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
240 mqrq_cur->bounce_buf = mmc_queue_cur_bounce_buf;
241 if (!mqrq_cur->bounce_buf) {
242 pr_warning("%s: unable to "
243 "allocate bounce cur buffer\n",
244 mmc_card_name(card));
246 if(!mmc_card_sd(card))
247 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
249 mqrq_prev->bounce_buf = mmc_queue_prev_bounce_buf;
250 if (!mqrq_prev->bounce_buf) {
251 pr_warning("%s: unable to "
252 "allocate bounce prev buffer\n",
253 mmc_card_name(card));
254 kfree(mqrq_cur->bounce_buf);
255 mqrq_cur->bounce_buf = NULL;
259 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
260 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
261 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
262 blk_queue_max_segments(mq->queue, bouncesz / 512);
263 blk_queue_max_segment_size(mq->queue, bouncesz);
265 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
269 mqrq_cur->bounce_sg =
270 mmc_alloc_sg(bouncesz / 512, &ret);
274 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
278 mqrq_prev->bounce_sg =
279 mmc_alloc_sg(bouncesz / 512, &ret);
286 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
287 blk_queue_bounce_limit(mq->queue, limit);
288 blk_queue_max_hw_sectors(mq->queue,
289 min(host->max_blk_count, host->max_req_size / 512));
290 blk_queue_max_segments(mq->queue, host->max_segs);
291 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
293 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
298 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
303 sema_init(&mq->thread_sem, 1);
305 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
306 host->index, subname ? subname : "");
308 if (IS_ERR(mq->thread)) {
309 ret = PTR_ERR(mq->thread);
315 kfree(mqrq_cur->bounce_sg);
316 mqrq_cur->bounce_sg = NULL;
317 kfree(mqrq_prev->bounce_sg);
318 mqrq_prev->bounce_sg = NULL;
323 kfree(mqrq_cur->bounce_buf);
324 mqrq_cur->bounce_buf = NULL;
326 kfree(mqrq_prev->sg);
327 mqrq_prev->sg = NULL;
328 kfree(mqrq_prev->bounce_buf);
329 mqrq_prev->bounce_buf = NULL;
331 blk_cleanup_queue(mq->queue);
335 void mmc_cleanup_queue(struct mmc_queue *mq)
337 struct request_queue *q = mq->queue;
339 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
340 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
342 /* Make sure the queue isn't suspended, as that will deadlock */
343 mmc_queue_resume(mq);
345 /* Then terminate our worker thread */
346 kthread_stop(mq->thread);
348 /* Empty the queue */
349 spin_lock_irqsave(q->queue_lock, flags);
352 spin_unlock_irqrestore(q->queue_lock, flags);
354 kfree(mqrq_cur->bounce_sg);
355 mqrq_cur->bounce_sg = NULL;
360 if(!mmc_card_sd(mq->card))
361 kfree(mqrq_cur->bounce_buf);
362 mqrq_cur->bounce_buf = NULL;
364 kfree(mqrq_prev->bounce_sg);
365 mqrq_prev->bounce_sg = NULL;
367 kfree(mqrq_prev->sg);
368 mqrq_prev->sg = NULL;
370 if(!mmc_card_sd(mq->card))
371 kfree(mqrq_prev->bounce_buf);
372 mqrq_prev->bounce_buf = NULL;
376 EXPORT_SYMBOL(mmc_cleanup_queue);
378 int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
380 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
381 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
385 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
386 if (!mqrq_cur->packed) {
387 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
388 mmc_card_name(card));
393 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
394 if (!mqrq_prev->packed) {
395 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
396 mmc_card_name(card));
397 kfree(mqrq_cur->packed);
398 mqrq_cur->packed = NULL;
403 INIT_LIST_HEAD(&mqrq_cur->packed->list);
404 INIT_LIST_HEAD(&mqrq_prev->packed->list);
410 void mmc_packed_clean(struct mmc_queue *mq)
412 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
413 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
415 kfree(mqrq_cur->packed);
416 mqrq_cur->packed = NULL;
417 kfree(mqrq_prev->packed);
418 mqrq_prev->packed = NULL;
422 * mmc_queue_suspend - suspend a MMC request queue
423 * @mq: MMC queue to suspend
425 * Stop the block request queue, and wait for our thread to
426 * complete any outstanding requests. This ensures that we
427 * won't suspend while a request is being processed.
429 int mmc_queue_suspend(struct mmc_queue *mq, int wait)
431 struct request_queue *q = mq->queue;
436 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
437 mq->flags |= MMC_QUEUE_SUSPENDED;
439 spin_lock_irqsave(q->queue_lock, flags);
441 spin_unlock_irqrestore(q->queue_lock, flags);
444 down(&mq->thread_sem);
448 rc = down_trylock(&mq->thread_sem);
450 mq->flags &= ~MMC_QUEUE_SUSPENDED;
451 spin_lock_irqsave(q->queue_lock, flags);
453 spin_unlock_irqrestore(q->queue_lock, flags);
456 printk("%s: mq->flags: %u, q->queue_flags: 0x%lx, \
457 q->in_flight (%d, %d) \n",
458 mmc_hostname(mq->card->host), mq->flags,
459 q->queue_flags, q->in_flight[0], q->in_flight[1]);
460 mutex_lock(&q->sysfs_lock);
461 if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) {
462 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
463 spin_lock_irqsave(q->queue_lock, flags);
464 queue_flag_set(QUEUE_FLAG_DYING, q);
465 } else if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) {
466 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
467 spin_lock_irqsave(q->queue_lock, flags);
468 queue_flag_set(QUEUE_FLAG_DEAD, q);
471 while ((req = blk_fetch_request(q)) != NULL) {
472 req->cmd_flags |= REQ_QUIET;
473 __blk_end_request_all(req, -EIO);
476 spin_unlock_irqrestore(q->queue_lock, flags);
477 mutex_unlock(&q->sysfs_lock);
479 down(&mq->thread_sem);
488 * mmc_queue_resume - resume a previously suspended MMC request queue
489 * @mq: MMC queue to resume
491 void mmc_queue_resume(struct mmc_queue *mq)
493 struct request_queue *q = mq->queue;
496 if (mq->flags & MMC_QUEUE_SUSPENDED) {
497 mq->flags &= ~MMC_QUEUE_SUSPENDED;
501 spin_lock_irqsave(q->queue_lock, flags);
503 spin_unlock_irqrestore(q->queue_lock, flags);
507 static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
508 struct mmc_packed *packed,
509 struct scatterlist *sg,
510 enum mmc_packed_type cmd_type)
512 struct scatterlist *__sg = sg;
513 unsigned int sg_len = 0;
516 if (mmc_packed_wr(cmd_type)) {
517 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
518 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
519 unsigned int len, remain, offset = 0;
520 u8 *buf = (u8 *)packed->cmd_hdr;
524 len = min(remain, max_seg_sz);
525 sg_set_buf(__sg, buf + offset, len);
528 (__sg++)->page_link &= ~0x02;
533 list_for_each_entry(req, &packed->list, queuelist) {
534 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
535 __sg = sg + (sg_len - 1);
536 (__sg++)->page_link &= ~0x02;
538 sg_mark_end(sg + (sg_len - 1));
543 * Prepare the sg list(s) to be handed of to the host driver
545 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
549 struct scatterlist *sg;
550 enum mmc_packed_type cmd_type;
553 cmd_type = mqrq->cmd_type;
555 if (!mqrq->bounce_buf) {
556 if (mmc_packed_cmd(cmd_type))
557 return mmc_queue_packed_map_sg(mq, mqrq->packed,
560 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
563 BUG_ON(!mqrq->bounce_sg);
565 if (mmc_packed_cmd(cmd_type))
566 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
567 mqrq->bounce_sg, cmd_type);
569 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
571 mqrq->bounce_sg_len = sg_len;
574 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
575 buflen += sg->length;
577 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
583 * If writing, bounce the data to the buffer before the request
584 * is sent to the host driver
586 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
588 if (!mqrq->bounce_buf)
591 if (rq_data_dir(mqrq->req) != WRITE)
594 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
595 mqrq->bounce_buf, mqrq->sg[0].length);
599 * If reading, bounce the data from the buffer after the request
600 * has been handled by the host driver
602 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
604 if (!mqrq->bounce_buf)
607 if (rq_data_dir(mqrq->req) != READ)
610 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
611 mqrq->bounce_buf, mqrq->sg[0].length);