From: Jens Axboe Date: Mon, 19 May 2014 17:52:35 +0000 (-0600) Subject: Merge branch 'for-3.16/blk-mq-tagging' into for-3.16/core X-Git-Tag: v4.9.8~5950^2~52^2~36 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=39a9f97e5ea99e048c4980c23cf197f6e77995cb;p=platform%2Fkernel%2Flinux-rpi3.git Merge branch 'for-3.16/blk-mq-tagging' into for-3.16/core Signed-off-by: Jens Axboe Conflicts: block/blk-mq-tag.c --- 39a9f97e5ea99e048c4980c23cf197f6e77995cb diff --cc block/blk-mq-tag.c index 03ce6a11,c80086c..e6b3fba --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@@ -40,7 -39,85 +39,85 @@@ bool blk_mq_has_free_tags(struct blk_mq return bt_has_free_tags(&tags->bitmap_tags); } + static inline void bt_index_inc(unsigned int *index) + { + *index = (*index + 1) & (BT_WAIT_QUEUES - 1); + } + + /* + * If a previously inactive queue goes active, bump the active user count. + */ + bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) + { + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && + !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + atomic_inc(&hctx->tags->active_queues); + + return true; + } + + /* + * If a previously busy queue goes inactive, potential waiters could now + * be allowed to queue. Wake them up and check. + */ + void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) + { + struct blk_mq_tags *tags = hctx->tags; + struct blk_mq_bitmap_tags *bt; + int i, wake_index; + + if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return; + + atomic_dec(&tags->active_queues); + + /* + * Will only throttle depth on non-reserved tags + */ + bt = &tags->bitmap_tags; + wake_index = bt->wake_index; + for (i = 0; i < BT_WAIT_QUEUES; i++) { + struct bt_wait_state *bs = &bt->bs[wake_index]; + + if (waitqueue_active(&bs->wait)) + wake_up(&bs->wait); + + bt_index_inc(&wake_index); + } + } + + /* + * For shared tag users, we track the number of currently active users + * and attempt to provide a fair share of the tag depth for each of them. + */ + static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, + struct blk_mq_bitmap_tags *bt) + { + unsigned int depth, users; + + if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return true; + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return true; + + /* + * Don't try dividing an ant + */ + if (bt->depth == 1) + return true; + + users = atomic_read(&hctx->tags->active_queues); + if (!users) + return true; + + /* + * Allow at least some tags + */ + depth = max((bt->depth + users - 1) / users, 4U); + return atomic_read(&hctx->nr_active) < depth; + } + -static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag) +static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag) { int tag, org_last_tag, end; diff --cc block/blk-mq.c index e862c44,3c4f1fc..0fbef7e --- a/block/blk-mq.c +++ b/block/blk-mq.c @@@ -1605,7 -1611,10 +1673,9 @@@ void blk_mq_free_queue(struct request_q struct blk_mq_hw_ctx *hctx; int i; + blk_mq_del_queue_tag_set(q); + queue_for_each_hw_ctx(q, hctx, i) { - kfree(hctx->ctx_map); kfree(hctx->ctxs); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx)