scsi: ufs: remove the duplicated checking for supporting clkscaling
[platform/kernel/linux-rpi.git] / block / blk-mq-sched.h
1 #ifndef BLK_MQ_SCHED_H
2 #define BLK_MQ_SCHED_H
3
4 #include "blk-mq.h"
5 #include "blk-mq-tag.h"
6
7 int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size,
8                                 int (*init)(struct blk_mq_hw_ctx *),
9                                 void (*exit)(struct blk_mq_hw_ctx *));
10
11 void blk_mq_sched_free_hctx_data(struct request_queue *q,
12                                  void (*exit)(struct blk_mq_hw_ctx *));
13
14 struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
15 void blk_mq_sched_put_request(struct request *rq);
16
17 void blk_mq_sched_request_inserted(struct request *rq);
18 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
19                                 struct request **merged_request);
20 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
22 void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
23
24 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
25                                  bool run_queue, bool async, bool can_block);
26 void blk_mq_sched_insert_requests(struct request_queue *q,
27                                   struct blk_mq_ctx *ctx,
28                                   struct list_head *list, bool run_queue_async);
29
30 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
31 void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
32                         struct list_head *rq_list,
33                         struct request *(*get_rq)(struct blk_mq_hw_ctx *));
34
35 int blk_mq_sched_setup(struct request_queue *q);
36 void blk_mq_sched_teardown(struct request_queue *q);
37
38 int blk_mq_sched_init(struct request_queue *q);
39
40 static inline bool
41 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
42 {
43         struct elevator_queue *e = q->elevator;
44
45         if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
46                 return false;
47
48         return __blk_mq_sched_bio_merge(q, bio);
49 }
50
51 static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
52                                            struct request *rq,
53                                            struct bio *bio)
54 {
55         struct elevator_queue *e = q->elevator;
56
57         if (e && e->type->ops.mq.get_rq_priv)
58                 return e->type->ops.mq.get_rq_priv(q, rq, bio);
59
60         return 0;
61 }
62
63 static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
64                                             struct request *rq)
65 {
66         struct elevator_queue *e = q->elevator;
67
68         if (e && e->type->ops.mq.put_rq_priv)
69                 e->type->ops.mq.put_rq_priv(q, rq);
70 }
71
72 static inline bool
73 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
74                          struct bio *bio)
75 {
76         struct elevator_queue *e = q->elevator;
77
78         if (e && e->type->ops.mq.allow_merge)
79                 return e->type->ops.mq.allow_merge(q, rq, bio);
80
81         return true;
82 }
83
84 static inline void
85 blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
86 {
87         struct elevator_queue *e = hctx->queue->elevator;
88
89         if (e && e->type->ops.mq.completed_request)
90                 e->type->ops.mq.completed_request(hctx, rq);
91
92         BUG_ON(rq->internal_tag == -1);
93
94         blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag);
95 }
96
97 static inline void blk_mq_sched_started_request(struct request *rq)
98 {
99         struct request_queue *q = rq->q;
100         struct elevator_queue *e = q->elevator;
101
102         if (e && e->type->ops.mq.started_request)
103                 e->type->ops.mq.started_request(rq);
104 }
105
106 static inline void blk_mq_sched_requeue_request(struct request *rq)
107 {
108         struct request_queue *q = rq->q;
109         struct elevator_queue *e = q->elevator;
110
111         if (e && e->type->ops.mq.requeue_request)
112                 e->type->ops.mq.requeue_request(rq);
113 }
114
115 static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
116 {
117         struct elevator_queue *e = hctx->queue->elevator;
118
119         if (e && e->type->ops.mq.has_work)
120                 return e->type->ops.mq.has_work(hctx);
121
122         return false;
123 }
124
125 static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx)
126 {
127         if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
128                 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
129                 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
130                         struct request_queue *q = hctx->queue;
131
132                         if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
133                                 set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
134                 }
135         }
136 }
137
138 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
139 {
140         return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
141 }
142
143 #endif