1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2003 Russell King, All Rights Reserved.
4 * Copyright 2006-2007 Pierre Ossman
6 #include <linux/slab.h>
7 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/freezer.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/backing-dev.h>
14 #include <linux/mmc/card.h>
15 #include <linux/mmc/host.h>
24 #define MMC_DMA_MAP_MERGE_SEGMENTS 512
26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
28 /* Allow only 1 DCMD at a time */
29 return mq->in_flight[MMC_ISSUE_DCMD];
32 void mmc_cqe_check_busy(struct mmc_queue *mq)
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
38 static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
40 return host->caps2 & MMC_CAP2_CQE_DCMD;
43 static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
46 switch (req_op(req)) {
50 case REQ_OP_SECURE_ERASE:
51 return MMC_ISSUE_SYNC;
53 return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
55 return MMC_ISSUE_ASYNC;
59 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
61 struct mmc_host *host = mq->card->host;
63 if (host->cqe_enabled && !host->hsq_enabled)
64 return mmc_cqe_issue_type(host, req);
66 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
67 return MMC_ISSUE_ASYNC;
69 return MMC_ISSUE_SYNC;
72 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
74 if (!mq->recovery_needed) {
75 mq->recovery_needed = true;
76 schedule_work(&mq->recovery_work);
80 void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
82 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
84 struct request *req = mmc_queue_req_to_req(mqrq);
85 struct request_queue *q = req->q;
86 struct mmc_queue *mq = q->queuedata;
89 spin_lock_irqsave(&mq->lock, flags);
90 __mmc_cqe_recovery_notifier(mq);
91 spin_unlock_irqrestore(&mq->lock, flags);
94 static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
96 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
97 struct mmc_request *mrq = &mqrq->brq.mrq;
98 struct mmc_queue *mq = req->q->queuedata;
99 struct mmc_host *host = mq->card->host;
100 enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
101 bool recovery_needed = false;
103 switch (issue_type) {
104 case MMC_ISSUE_ASYNC:
106 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
108 mmc_cqe_recovery_notifier(mrq);
109 return BLK_EH_RESET_TIMER;
111 /* The request has gone already */
114 /* Timeout is handled by mmc core */
115 return BLK_EH_RESET_TIMER;
119 static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req)
121 struct request_queue *q = req->q;
122 struct mmc_queue *mq = q->queuedata;
123 struct mmc_card *card = mq->card;
124 struct mmc_host *host = card->host;
128 spin_lock_irqsave(&mq->lock, flags);
129 ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled;
130 spin_unlock_irqrestore(&mq->lock, flags);
132 return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
135 static void mmc_mq_recovery_handler(struct work_struct *work)
137 struct mmc_queue *mq = container_of(work, struct mmc_queue,
139 struct request_queue *q = mq->queue;
140 struct mmc_host *host = mq->card->host;
142 mmc_get_card(mq->card, &mq->ctx);
144 mq->in_recovery = true;
146 if (host->cqe_enabled && !host->hsq_enabled)
147 mmc_blk_cqe_recovery(mq);
149 mmc_blk_mq_recovery(mq);
151 mq->in_recovery = false;
153 spin_lock_irq(&mq->lock);
154 mq->recovery_needed = false;
155 spin_unlock_irq(&mq->lock);
157 if (host->hsq_enabled)
158 host->cqe_ops->cqe_recovery_finish(host);
160 mmc_put_card(mq->card, &mq->ctx);
162 blk_mq_run_hw_queues(q, true);
165 static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
167 struct scatterlist *sg;
169 sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
171 sg_init_table(sg, sg_len);
176 static void mmc_queue_setup_discard(struct request_queue *q,
177 struct mmc_card *card)
179 unsigned max_discard;
181 max_discard = mmc_calc_max_discard(card);
185 blk_queue_max_discard_sectors(q, max_discard);
186 q->limits.discard_granularity = card->pref_erase << 9;
187 /* granularity must not be greater than max. discard */
188 if (card->pref_erase > max_discard)
189 q->limits.discard_granularity = SECTOR_SIZE;
190 if (mmc_can_secure_erase_trim(card))
191 blk_queue_max_secure_erase_sectors(q, max_discard);
192 if (mmc_can_trim(card) && card->erased_byte == 0)
193 blk_queue_max_write_zeroes_sectors(q, max_discard);
196 static unsigned short mmc_get_max_segments(struct mmc_host *host)
198 return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
202 static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
203 unsigned int hctx_idx, unsigned int numa_node)
205 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
206 struct mmc_queue *mq = set->driver_data;
207 struct mmc_card *card = mq->card;
208 struct mmc_host *host = card->host;
210 mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL);
217 static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
218 unsigned int hctx_idx)
220 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
226 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
227 const struct blk_mq_queue_data *bd)
229 struct request *req = bd->rq;
230 struct request_queue *q = req->q;
231 struct mmc_queue *mq = q->queuedata;
232 struct mmc_card *card = mq->card;
233 struct mmc_host *host = card->host;
234 enum mmc_issue_type issue_type;
235 enum mmc_issued issued;
236 bool get_card, cqe_retune_ok;
239 if (mmc_card_removed(mq->card)) {
240 req->rq_flags |= RQF_QUIET;
241 return BLK_STS_IOERR;
244 issue_type = mmc_issue_type(mq, req);
246 spin_lock_irq(&mq->lock);
248 if (mq->recovery_needed || mq->busy) {
249 spin_unlock_irq(&mq->lock);
250 return BLK_STS_RESOURCE;
253 switch (issue_type) {
255 if (mmc_cqe_dcmd_busy(mq)) {
256 mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
257 spin_unlock_irq(&mq->lock);
258 return BLK_STS_RESOURCE;
261 case MMC_ISSUE_ASYNC:
263 * For MMC host software queue, we only allow 2 requests in
264 * flight to avoid a long latency.
266 if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
267 spin_unlock_irq(&mq->lock);
268 return BLK_STS_RESOURCE;
273 * Timeouts are handled by mmc core, and we don't have a host
274 * API to abort requests, so we can't handle the timeout anyway.
275 * However, when the timeout happens, blk_mq_complete_request()
276 * no longer works (to stop the request disappearing under us).
277 * To avoid racing with that, set a large timeout.
279 req->timeout = 600 * HZ;
283 /* Parallel dispatch of requests is not supported at the moment */
286 mq->in_flight[issue_type] += 1;
287 get_card = (mmc_tot_in_flight(mq) == 1);
288 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
290 spin_unlock_irq(&mq->lock);
292 if (!(req->rq_flags & RQF_DONTPREP)) {
293 req_to_mmc_queue_req(req)->retries = 0;
294 req->rq_flags |= RQF_DONTPREP;
298 mmc_get_card(card, &mq->ctx);
300 if (host->cqe_enabled) {
301 host->retune_now = host->need_retune && cqe_retune_ok &&
305 blk_mq_start_request(req);
307 issued = mmc_blk_mq_issue_rq(mq, req);
311 ret = BLK_STS_RESOURCE;
313 case MMC_REQ_FAILED_TO_START:
321 if (issued != MMC_REQ_STARTED) {
322 bool put_card = false;
324 spin_lock_irq(&mq->lock);
325 mq->in_flight[issue_type] -= 1;
326 if (mmc_tot_in_flight(mq) == 0)
329 spin_unlock_irq(&mq->lock);
331 mmc_put_card(card, &mq->ctx);
333 WRITE_ONCE(mq->busy, false);
339 static const struct blk_mq_ops mmc_mq_ops = {
340 .queue_rq = mmc_mq_queue_rq,
341 .init_request = mmc_mq_init_request,
342 .exit_request = mmc_mq_exit_request,
343 .complete = mmc_blk_mq_complete,
344 .timeout = mmc_mq_timed_out,
347 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
349 struct mmc_host *host = card->host;
350 unsigned block_size = 512;
352 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
353 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
354 if (mmc_can_erase(card))
355 mmc_queue_setup_discard(mq->queue, card);
357 if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
358 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
359 blk_queue_max_hw_sectors(mq->queue,
360 min(host->max_blk_count, host->max_req_size / 512));
361 if (host->can_dma_map_merge)
362 WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
364 "merging was advertised but not possible");
365 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
367 if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
368 block_size = card->ext_csd.data_sector_size;
369 WARN_ON(block_size != 512 && block_size != 4096);
372 blk_queue_logical_block_size(mq->queue, block_size);
374 * After blk_queue_can_use_dma_map_merging() was called with succeed,
375 * since it calls blk_queue_virt_boundary(), the mmc should not call
376 * both blk_queue_max_segment_size().
378 if (!host->can_dma_map_merge)
379 blk_queue_max_segment_size(mq->queue,
380 round_down(host->max_seg_size, block_size));
382 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
384 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
385 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
387 mutex_init(&mq->complete_lock);
389 init_waitqueue_head(&mq->wait);
391 mmc_crypto_setup_queue(mq->queue, host);
394 static inline bool mmc_merge_capable(struct mmc_host *host)
396 return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
399 /* Set queue depth to get a reasonable value for q->nr_requests */
400 #define MMC_QUEUE_DEPTH 64
403 * mmc_init_queue - initialise a queue structure.
405 * @card: mmc card to attach this queue
407 * Initialise a MMC card request queue.
409 struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
411 struct mmc_host *host = card->host;
412 struct gendisk *disk;
417 spin_lock_init(&mq->lock);
419 memset(&mq->tag_set, 0, sizeof(mq->tag_set));
420 mq->tag_set.ops = &mmc_mq_ops;
422 * The queue depth for CQE must match the hardware because the request
423 * tag is used to index the hardware queue.
425 if (host->cqe_enabled && !host->hsq_enabled)
426 mq->tag_set.queue_depth =
427 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
429 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
430 mq->tag_set.numa_node = NUMA_NO_NODE;
431 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
432 mq->tag_set.nr_hw_queues = 1;
433 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
434 mq->tag_set.driver_data = mq;
437 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
438 * the host->can_dma_map_merge should be set before to get max_segs
439 * from mmc_get_max_segments().
441 if (mmc_merge_capable(host) &&
442 host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
443 dma_get_merge_boundary(mmc_dev(host)))
444 host->can_dma_map_merge = 1;
446 host->can_dma_map_merge = 0;
448 ret = blk_mq_alloc_tag_set(&mq->tag_set);
453 disk = blk_mq_alloc_disk(&mq->tag_set, mq);
455 blk_mq_free_tag_set(&mq->tag_set);
458 mq->queue = disk->queue;
460 if (mmc_host_is_spi(host) && host->use_spi_crc)
461 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
462 blk_queue_rq_timeout(mq->queue, 60 * HZ);
464 mmc_setup_queue(mq, card);
468 void mmc_queue_suspend(struct mmc_queue *mq)
470 blk_mq_quiesce_queue(mq->queue);
473 * The host remains claimed while there are outstanding requests, so
474 * simply claiming and releasing here ensures there are none.
476 mmc_claim_host(mq->card->host);
477 mmc_release_host(mq->card->host);
480 void mmc_queue_resume(struct mmc_queue *mq)
482 blk_mq_unquiesce_queue(mq->queue);
485 void mmc_cleanup_queue(struct mmc_queue *mq)
487 struct request_queue *q = mq->queue;
490 * The legacy code handled the possibility of being suspended,
491 * so do that here too.
493 if (blk_queue_quiesced(q))
494 blk_mq_unquiesce_queue(q);
496 blk_mq_free_tag_set(&mq->tag_set);
499 * A request can be completed before the next request, potentially
500 * leaving a complete_work with nothing to do. Such a work item might
501 * still be queued at this point. Flush it.
503 flush_work(&mq->complete_work);
509 * Prepare the sg list(s) to be handed of to the host driver
511 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
513 struct request *req = mmc_queue_req_to_req(mqrq);
515 return blk_rq_map_sg(mq->queue, req, mqrq->sg);