Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[platform/kernel/linux-starfive.git] / block / bsg-lib.c
1 /*
2  *  BSG helper library
3  *
4  *  Copyright (C) 2008   James Smart, Emulex Corporation
5  *  Copyright (C) 2011   Red Hat, Inc.  All rights reserved.
6  *  Copyright (C) 2011   Mike Christie
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to the Free Software
20  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 #include <linux/slab.h>
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/scatterlist.h>
27 #include <linux/bsg-lib.h>
28 #include <linux/export.h>
29 #include <scsi/scsi_cmnd.h>
30
31 /**
32  * bsg_teardown_job - routine to teardown a bsg job
33  * @job: bsg_job that is to be torn down
34  */
35 static void bsg_teardown_job(struct kref *kref)
36 {
37         struct bsg_job *job = container_of(kref, struct bsg_job, kref);
38         struct request *rq = job->req;
39
40         put_device(job->dev);   /* release reference for the request */
41
42         kfree(job->request_payload.sg_list);
43         kfree(job->reply_payload.sg_list);
44
45         blk_end_request_all(rq, BLK_STS_OK);
46 }
47
48 void bsg_job_put(struct bsg_job *job)
49 {
50         kref_put(&job->kref, bsg_teardown_job);
51 }
52 EXPORT_SYMBOL_GPL(bsg_job_put);
53
54 int bsg_job_get(struct bsg_job *job)
55 {
56         return kref_get_unless_zero(&job->kref);
57 }
58 EXPORT_SYMBOL_GPL(bsg_job_get);
59
60 /**
61  * bsg_job_done - completion routine for bsg requests
62  * @job: bsg_job that is complete
63  * @result: job reply result
64  * @reply_payload_rcv_len: length of payload recvd
65  *
66  * The LLD should call this when the bsg job has completed.
67  */
68 void bsg_job_done(struct bsg_job *job, int result,
69                   unsigned int reply_payload_rcv_len)
70 {
71         struct request *req = job->req;
72         struct request *rsp = req->next_rq;
73         struct scsi_request *rq = scsi_req(req);
74         int err;
75
76         err = scsi_req(job->req)->result = result;
77         if (err < 0)
78                 /* we're only returning the result field in the reply */
79                 rq->sense_len = sizeof(u32);
80         else
81                 rq->sense_len = job->reply_len;
82         /* we assume all request payload was transferred, residual == 0 */
83         rq->resid_len = 0;
84
85         if (rsp) {
86                 WARN_ON(reply_payload_rcv_len > scsi_req(rsp)->resid_len);
87
88                 /* set reply (bidi) residual */
89                 scsi_req(rsp)->resid_len -=
90                         min(reply_payload_rcv_len, scsi_req(rsp)->resid_len);
91         }
92         blk_complete_request(req);
93 }
94 EXPORT_SYMBOL_GPL(bsg_job_done);
95
96 /**
97  * bsg_softirq_done - softirq done routine for destroying the bsg requests
98  * @rq: BSG request that holds the job to be destroyed
99  */
100 static void bsg_softirq_done(struct request *rq)
101 {
102         struct bsg_job *job = blk_mq_rq_to_pdu(rq);
103
104         bsg_job_put(job);
105 }
106
107 static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
108 {
109         size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
110
111         BUG_ON(!req->nr_phys_segments);
112
113         buf->sg_list = kzalloc(sz, GFP_KERNEL);
114         if (!buf->sg_list)
115                 return -ENOMEM;
116         sg_init_table(buf->sg_list, req->nr_phys_segments);
117         scsi_req(req)->resid_len = blk_rq_bytes(req);
118         buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
119         buf->payload_len = blk_rq_bytes(req);
120         return 0;
121 }
122
123 /**
124  * bsg_prepare_job - create the bsg_job structure for the bsg request
125  * @dev: device that is being sent the bsg request
126  * @req: BSG request that needs a job structure
127  */
128 static int bsg_prepare_job(struct device *dev, struct request *req)
129 {
130         struct request *rsp = req->next_rq;
131         struct scsi_request *rq = scsi_req(req);
132         struct bsg_job *job = blk_mq_rq_to_pdu(req);
133         int ret;
134
135         job->request = rq->cmd;
136         job->request_len = rq->cmd_len;
137
138         if (req->bio) {
139                 ret = bsg_map_buffer(&job->request_payload, req);
140                 if (ret)
141                         goto failjob_rls_job;
142         }
143         if (rsp && rsp->bio) {
144                 ret = bsg_map_buffer(&job->reply_payload, rsp);
145                 if (ret)
146                         goto failjob_rls_rqst_payload;
147         }
148         job->dev = dev;
149         /* take a reference for the request */
150         get_device(job->dev);
151         kref_init(&job->kref);
152         return 0;
153
154 failjob_rls_rqst_payload:
155         kfree(job->request_payload.sg_list);
156 failjob_rls_job:
157         return -ENOMEM;
158 }
159
160 /**
161  * bsg_request_fn - generic handler for bsg requests
162  * @q: request queue to manage
163  *
164  * On error the create_bsg_job function should return a -Exyz error value
165  * that will be set to ->result.
166  *
167  * Drivers/subsys should pass this to the queue init function.
168  */
169 static void bsg_request_fn(struct request_queue *q)
170         __releases(q->queue_lock)
171         __acquires(q->queue_lock)
172 {
173         struct device *dev = q->queuedata;
174         struct request *req;
175         int ret;
176
177         if (!get_device(dev))
178                 return;
179
180         while (1) {
181                 req = blk_fetch_request(q);
182                 if (!req)
183                         break;
184                 spin_unlock_irq(q->queue_lock);
185
186                 ret = bsg_prepare_job(dev, req);
187                 if (ret) {
188                         scsi_req(req)->result = ret;
189                         blk_end_request_all(req, BLK_STS_OK);
190                         spin_lock_irq(q->queue_lock);
191                         continue;
192                 }
193
194                 ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
195                 spin_lock_irq(q->queue_lock);
196                 if (ret)
197                         break;
198         }
199
200         spin_unlock_irq(q->queue_lock);
201         put_device(dev);
202         spin_lock_irq(q->queue_lock);
203 }
204
205 static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
206 {
207         struct bsg_job *job = blk_mq_rq_to_pdu(req);
208         struct scsi_request *sreq = &job->sreq;
209
210         /* called right after the request is allocated for the request_queue */
211
212         sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
213         if (!sreq->sense)
214                 return -ENOMEM;
215
216         return 0;
217 }
218
219 static void bsg_initialize_rq(struct request *req)
220 {
221         struct bsg_job *job = blk_mq_rq_to_pdu(req);
222         struct scsi_request *sreq = &job->sreq;
223         void *sense = sreq->sense;
224
225         /* called right before the request is given to the request_queue user */
226
227         memset(job, 0, sizeof(*job));
228
229         scsi_req_init(sreq);
230
231         sreq->sense = sense;
232         sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
233
234         job->req = req;
235         job->reply = sense;
236         job->reply_len = sreq->sense_len;
237         job->dd_data = job + 1;
238 }
239
240 static void bsg_exit_rq(struct request_queue *q, struct request *req)
241 {
242         struct bsg_job *job = blk_mq_rq_to_pdu(req);
243         struct scsi_request *sreq = &job->sreq;
244
245         kfree(sreq->sense);
246 }
247
248 /**
249  * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
250  * @dev: device to attach bsg device to
251  * @name: device to give bsg device
252  * @job_fn: bsg job handler
253  * @dd_job_size: size of LLD data needed for each job
254  */
255 struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
256                 bsg_job_fn *job_fn, int dd_job_size,
257                 void (*release)(struct device *))
258 {
259         struct request_queue *q;
260         int ret;
261
262         q = blk_alloc_queue(GFP_KERNEL);
263         if (!q)
264                 return ERR_PTR(-ENOMEM);
265         q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
266         q->init_rq_fn = bsg_init_rq;
267         q->exit_rq_fn = bsg_exit_rq;
268         q->initialize_rq_fn = bsg_initialize_rq;
269         q->request_fn = bsg_request_fn;
270
271         ret = blk_init_allocated_queue(q);
272         if (ret)
273                 goto out_cleanup_queue;
274
275         q->queuedata = dev;
276         q->bsg_job_fn = job_fn;
277         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
278         queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
279         blk_queue_softirq_done(q, bsg_softirq_done);
280         blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
281
282         ret = bsg_register_queue(q, dev, name, release);
283         if (ret) {
284                 printk(KERN_ERR "%s: bsg interface failed to "
285                        "initialize - register queue\n", dev->kobj.name);
286                 goto out_cleanup_queue;
287         }
288
289         return q;
290 out_cleanup_queue:
291         blk_cleanup_queue(q);
292         return ERR_PTR(ret);
293 }
294 EXPORT_SYMBOL_GPL(bsg_setup_queue);