m68k: Migrate exception table users off module.h and onto extable.h
[platform/kernel/linux-exynos.git] / net / ceph / osd_client.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
11 #ifdef CONFIG_BLOCK
12 #include <linux/bio.h>
13 #endif
14
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
21
22 #define OSD_OPREPLY_FRONT_LEN   512
23
24 static struct kmem_cache        *ceph_osd_request_cache;
25
26 static const struct ceph_connection_operations osd_con_ops;
27
28 /*
29  * Implement client access to distributed object storage cluster.
30  *
31  * All data objects are stored within a cluster/cloud of OSDs, or
32  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
33  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
34  * remote daemons serving up and coordinating consistent and safe
35  * access to storage.
36  *
37  * Cluster membership and the mapping of data objects onto storage devices
38  * are described by the osd map.
39  *
40  * We keep track of pending OSD requests (read, write), resubmit
41  * requests to different OSDs when the cluster topology/data layout
42  * change, or retry the affected requests when the communications
43  * channel with an OSD is reset.
44  */
45
46 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
47 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
48 static void link_linger(struct ceph_osd *osd,
49                         struct ceph_osd_linger_request *lreq);
50 static void unlink_linger(struct ceph_osd *osd,
51                           struct ceph_osd_linger_request *lreq);
52
53 #if 1
54 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
55 {
56         bool wrlocked = true;
57
58         if (unlikely(down_read_trylock(sem))) {
59                 wrlocked = false;
60                 up_read(sem);
61         }
62
63         return wrlocked;
64 }
65 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
66 {
67         WARN_ON(!rwsem_is_locked(&osdc->lock));
68 }
69 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
70 {
71         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
72 }
73 static inline void verify_osd_locked(struct ceph_osd *osd)
74 {
75         struct ceph_osd_client *osdc = osd->o_osdc;
76
77         WARN_ON(!(mutex_is_locked(&osd->lock) &&
78                   rwsem_is_locked(&osdc->lock)) &&
79                 !rwsem_is_wrlocked(&osdc->lock));
80 }
81 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
82 {
83         WARN_ON(!mutex_is_locked(&lreq->lock));
84 }
85 #else
86 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
87 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
88 static inline void verify_osd_locked(struct ceph_osd *osd) { }
89 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
90 #endif
91
92 /*
93  * calculate the mapping of a file extent onto an object, and fill out the
94  * request accordingly.  shorten extent as necessary if it crosses an
95  * object boundary.
96  *
97  * fill osd op in request message.
98  */
99 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
100                         u64 *objnum, u64 *objoff, u64 *objlen)
101 {
102         u64 orig_len = *plen;
103         int r;
104
105         /* object extent? */
106         r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
107                                           objoff, objlen);
108         if (r < 0)
109                 return r;
110         if (*objlen < orig_len) {
111                 *plen = *objlen;
112                 dout(" skipping last %llu, final file extent %llu~%llu\n",
113                      orig_len - *plen, off, *plen);
114         }
115
116         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
117
118         return 0;
119 }
120
121 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
122 {
123         memset(osd_data, 0, sizeof (*osd_data));
124         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
125 }
126
127 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
128                         struct page **pages, u64 length, u32 alignment,
129                         bool pages_from_pool, bool own_pages)
130 {
131         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
132         osd_data->pages = pages;
133         osd_data->length = length;
134         osd_data->alignment = alignment;
135         osd_data->pages_from_pool = pages_from_pool;
136         osd_data->own_pages = own_pages;
137 }
138
139 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
140                         struct ceph_pagelist *pagelist)
141 {
142         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
143         osd_data->pagelist = pagelist;
144 }
145
146 #ifdef CONFIG_BLOCK
147 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
148                         struct bio *bio, size_t bio_length)
149 {
150         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
151         osd_data->bio = bio;
152         osd_data->bio_length = bio_length;
153 }
154 #endif /* CONFIG_BLOCK */
155
156 #define osd_req_op_data(oreq, whch, typ, fld)                           \
157 ({                                                                      \
158         struct ceph_osd_request *__oreq = (oreq);                       \
159         unsigned int __whch = (whch);                                   \
160         BUG_ON(__whch >= __oreq->r_num_ops);                            \
161         &__oreq->r_ops[__whch].typ.fld;                                 \
162 })
163
164 static struct ceph_osd_data *
165 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
166 {
167         BUG_ON(which >= osd_req->r_num_ops);
168
169         return &osd_req->r_ops[which].raw_data_in;
170 }
171
172 struct ceph_osd_data *
173 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
174                         unsigned int which)
175 {
176         return osd_req_op_data(osd_req, which, extent, osd_data);
177 }
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
179
180 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
181                         unsigned int which, struct page **pages,
182                         u64 length, u32 alignment,
183                         bool pages_from_pool, bool own_pages)
184 {
185         struct ceph_osd_data *osd_data;
186
187         osd_data = osd_req_op_raw_data_in(osd_req, which);
188         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
189                                 pages_from_pool, own_pages);
190 }
191 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
192
193 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
194                         unsigned int which, struct page **pages,
195                         u64 length, u32 alignment,
196                         bool pages_from_pool, bool own_pages)
197 {
198         struct ceph_osd_data *osd_data;
199
200         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
201         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
202                                 pages_from_pool, own_pages);
203 }
204 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
205
206 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
207                         unsigned int which, struct ceph_pagelist *pagelist)
208 {
209         struct ceph_osd_data *osd_data;
210
211         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
212         ceph_osd_data_pagelist_init(osd_data, pagelist);
213 }
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
215
216 #ifdef CONFIG_BLOCK
217 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
218                         unsigned int which, struct bio *bio, size_t bio_length)
219 {
220         struct ceph_osd_data *osd_data;
221
222         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
223         ceph_osd_data_bio_init(osd_data, bio, bio_length);
224 }
225 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
226 #endif /* CONFIG_BLOCK */
227
228 static void osd_req_op_cls_request_info_pagelist(
229                         struct ceph_osd_request *osd_req,
230                         unsigned int which, struct ceph_pagelist *pagelist)
231 {
232         struct ceph_osd_data *osd_data;
233
234         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
235         ceph_osd_data_pagelist_init(osd_data, pagelist);
236 }
237
238 void osd_req_op_cls_request_data_pagelist(
239                         struct ceph_osd_request *osd_req,
240                         unsigned int which, struct ceph_pagelist *pagelist)
241 {
242         struct ceph_osd_data *osd_data;
243
244         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
245         ceph_osd_data_pagelist_init(osd_data, pagelist);
246         osd_req->r_ops[which].cls.indata_len += pagelist->length;
247         osd_req->r_ops[which].indata_len += pagelist->length;
248 }
249 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
250
251 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
252                         unsigned int which, struct page **pages, u64 length,
253                         u32 alignment, bool pages_from_pool, bool own_pages)
254 {
255         struct ceph_osd_data *osd_data;
256
257         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
258         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
259                                 pages_from_pool, own_pages);
260         osd_req->r_ops[which].cls.indata_len += length;
261         osd_req->r_ops[which].indata_len += length;
262 }
263 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
264
265 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
266                         unsigned int which, struct page **pages, u64 length,
267                         u32 alignment, bool pages_from_pool, bool own_pages)
268 {
269         struct ceph_osd_data *osd_data;
270
271         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
272         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
273                                 pages_from_pool, own_pages);
274 }
275 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
276
277 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
278 {
279         switch (osd_data->type) {
280         case CEPH_OSD_DATA_TYPE_NONE:
281                 return 0;
282         case CEPH_OSD_DATA_TYPE_PAGES:
283                 return osd_data->length;
284         case CEPH_OSD_DATA_TYPE_PAGELIST:
285                 return (u64)osd_data->pagelist->length;
286 #ifdef CONFIG_BLOCK
287         case CEPH_OSD_DATA_TYPE_BIO:
288                 return (u64)osd_data->bio_length;
289 #endif /* CONFIG_BLOCK */
290         default:
291                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
292                 return 0;
293         }
294 }
295
296 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
297 {
298         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
299                 int num_pages;
300
301                 num_pages = calc_pages_for((u64)osd_data->alignment,
302                                                 (u64)osd_data->length);
303                 ceph_release_page_vector(osd_data->pages, num_pages);
304         }
305         ceph_osd_data_init(osd_data);
306 }
307
308 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
309                         unsigned int which)
310 {
311         struct ceph_osd_req_op *op;
312
313         BUG_ON(which >= osd_req->r_num_ops);
314         op = &osd_req->r_ops[which];
315
316         switch (op->op) {
317         case CEPH_OSD_OP_READ:
318         case CEPH_OSD_OP_WRITE:
319         case CEPH_OSD_OP_WRITEFULL:
320                 ceph_osd_data_release(&op->extent.osd_data);
321                 break;
322         case CEPH_OSD_OP_CALL:
323                 ceph_osd_data_release(&op->cls.request_info);
324                 ceph_osd_data_release(&op->cls.request_data);
325                 ceph_osd_data_release(&op->cls.response_data);
326                 break;
327         case CEPH_OSD_OP_SETXATTR:
328         case CEPH_OSD_OP_CMPXATTR:
329                 ceph_osd_data_release(&op->xattr.osd_data);
330                 break;
331         case CEPH_OSD_OP_STAT:
332                 ceph_osd_data_release(&op->raw_data_in);
333                 break;
334         case CEPH_OSD_OP_NOTIFY_ACK:
335                 ceph_osd_data_release(&op->notify_ack.request_data);
336                 break;
337         case CEPH_OSD_OP_NOTIFY:
338                 ceph_osd_data_release(&op->notify.request_data);
339                 ceph_osd_data_release(&op->notify.response_data);
340                 break;
341         default:
342                 break;
343         }
344 }
345
346 /*
347  * Assumes @t is zero-initialized.
348  */
349 static void target_init(struct ceph_osd_request_target *t)
350 {
351         ceph_oid_init(&t->base_oid);
352         ceph_oloc_init(&t->base_oloc);
353         ceph_oid_init(&t->target_oid);
354         ceph_oloc_init(&t->target_oloc);
355
356         ceph_osds_init(&t->acting);
357         ceph_osds_init(&t->up);
358         t->size = -1;
359         t->min_size = -1;
360
361         t->osd = CEPH_HOMELESS_OSD;
362 }
363
364 static void target_copy(struct ceph_osd_request_target *dest,
365                         const struct ceph_osd_request_target *src)
366 {
367         ceph_oid_copy(&dest->base_oid, &src->base_oid);
368         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
369         ceph_oid_copy(&dest->target_oid, &src->target_oid);
370         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
371
372         dest->pgid = src->pgid; /* struct */
373         dest->pg_num = src->pg_num;
374         dest->pg_num_mask = src->pg_num_mask;
375         ceph_osds_copy(&dest->acting, &src->acting);
376         ceph_osds_copy(&dest->up, &src->up);
377         dest->size = src->size;
378         dest->min_size = src->min_size;
379         dest->sort_bitwise = src->sort_bitwise;
380
381         dest->flags = src->flags;
382         dest->paused = src->paused;
383
384         dest->osd = src->osd;
385 }
386
387 static void target_destroy(struct ceph_osd_request_target *t)
388 {
389         ceph_oid_destroy(&t->base_oid);
390         ceph_oloc_destroy(&t->base_oloc);
391         ceph_oid_destroy(&t->target_oid);
392         ceph_oloc_destroy(&t->target_oloc);
393 }
394
395 /*
396  * requests
397  */
398 static void request_release_checks(struct ceph_osd_request *req)
399 {
400         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
401         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
402         WARN_ON(!list_empty(&req->r_unsafe_item));
403         WARN_ON(req->r_osd);
404 }
405
406 static void ceph_osdc_release_request(struct kref *kref)
407 {
408         struct ceph_osd_request *req = container_of(kref,
409                                             struct ceph_osd_request, r_kref);
410         unsigned int which;
411
412         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
413              req->r_request, req->r_reply);
414         request_release_checks(req);
415
416         if (req->r_request)
417                 ceph_msg_put(req->r_request);
418         if (req->r_reply)
419                 ceph_msg_put(req->r_reply);
420
421         for (which = 0; which < req->r_num_ops; which++)
422                 osd_req_op_data_release(req, which);
423
424         target_destroy(&req->r_t);
425         ceph_put_snap_context(req->r_snapc);
426
427         if (req->r_mempool)
428                 mempool_free(req, req->r_osdc->req_mempool);
429         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
430                 kmem_cache_free(ceph_osd_request_cache, req);
431         else
432                 kfree(req);
433 }
434
435 void ceph_osdc_get_request(struct ceph_osd_request *req)
436 {
437         dout("%s %p (was %d)\n", __func__, req,
438              atomic_read(&req->r_kref.refcount));
439         kref_get(&req->r_kref);
440 }
441 EXPORT_SYMBOL(ceph_osdc_get_request);
442
443 void ceph_osdc_put_request(struct ceph_osd_request *req)
444 {
445         if (req) {
446                 dout("%s %p (was %d)\n", __func__, req,
447                      atomic_read(&req->r_kref.refcount));
448                 kref_put(&req->r_kref, ceph_osdc_release_request);
449         }
450 }
451 EXPORT_SYMBOL(ceph_osdc_put_request);
452
453 static void request_init(struct ceph_osd_request *req)
454 {
455         /* req only, each op is zeroed in _osd_req_op_init() */
456         memset(req, 0, sizeof(*req));
457
458         kref_init(&req->r_kref);
459         init_completion(&req->r_completion);
460         init_completion(&req->r_safe_completion);
461         RB_CLEAR_NODE(&req->r_node);
462         RB_CLEAR_NODE(&req->r_mc_node);
463         INIT_LIST_HEAD(&req->r_unsafe_item);
464
465         target_init(&req->r_t);
466 }
467
468 /*
469  * This is ugly, but it allows us to reuse linger registration and ping
470  * requests, keeping the structure of the code around send_linger{_ping}()
471  * reasonable.  Setting up a min_nr=2 mempool for each linger request
472  * and dealing with copying ops (this blasts req only, watch op remains
473  * intact) isn't any better.
474  */
475 static void request_reinit(struct ceph_osd_request *req)
476 {
477         struct ceph_osd_client *osdc = req->r_osdc;
478         bool mempool = req->r_mempool;
479         unsigned int num_ops = req->r_num_ops;
480         u64 snapid = req->r_snapid;
481         struct ceph_snap_context *snapc = req->r_snapc;
482         bool linger = req->r_linger;
483         struct ceph_msg *request_msg = req->r_request;
484         struct ceph_msg *reply_msg = req->r_reply;
485
486         dout("%s req %p\n", __func__, req);
487         WARN_ON(atomic_read(&req->r_kref.refcount) != 1);
488         request_release_checks(req);
489
490         WARN_ON(atomic_read(&request_msg->kref.refcount) != 1);
491         WARN_ON(atomic_read(&reply_msg->kref.refcount) != 1);
492         target_destroy(&req->r_t);
493
494         request_init(req);
495         req->r_osdc = osdc;
496         req->r_mempool = mempool;
497         req->r_num_ops = num_ops;
498         req->r_snapid = snapid;
499         req->r_snapc = snapc;
500         req->r_linger = linger;
501         req->r_request = request_msg;
502         req->r_reply = reply_msg;
503 }
504
505 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
506                                                struct ceph_snap_context *snapc,
507                                                unsigned int num_ops,
508                                                bool use_mempool,
509                                                gfp_t gfp_flags)
510 {
511         struct ceph_osd_request *req;
512
513         if (use_mempool) {
514                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
515                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
516         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
517                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
518         } else {
519                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
520                 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
521                               gfp_flags);
522         }
523         if (unlikely(!req))
524                 return NULL;
525
526         request_init(req);
527         req->r_osdc = osdc;
528         req->r_mempool = use_mempool;
529         req->r_num_ops = num_ops;
530         req->r_snapid = CEPH_NOSNAP;
531         req->r_snapc = ceph_get_snap_context(snapc);
532
533         dout("%s req %p\n", __func__, req);
534         return req;
535 }
536 EXPORT_SYMBOL(ceph_osdc_alloc_request);
537
538 static int ceph_oloc_encoding_size(struct ceph_object_locator *oloc)
539 {
540         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
541 }
542
543 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
544 {
545         struct ceph_osd_client *osdc = req->r_osdc;
546         struct ceph_msg *msg;
547         int msg_size;
548
549         WARN_ON(ceph_oid_empty(&req->r_base_oid));
550         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
551
552         /* create request message */
553         msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
554         msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
555         msg_size += CEPH_ENCODING_START_BLK_LEN +
556                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
557         msg_size += 1 + 8 + 4 + 4; /* pgid */
558         msg_size += 4 + req->r_base_oid.name_len; /* oid */
559         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
560         msg_size += 8; /* snapid */
561         msg_size += 8; /* snap_seq */
562         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
563         msg_size += 4; /* retry_attempt */
564
565         if (req->r_mempool)
566                 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
567         else
568                 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
569         if (!msg)
570                 return -ENOMEM;
571
572         memset(msg->front.iov_base, 0, msg->front.iov_len);
573         req->r_request = msg;
574
575         /* create reply message */
576         msg_size = OSD_OPREPLY_FRONT_LEN;
577         msg_size += req->r_base_oid.name_len;
578         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
579
580         if (req->r_mempool)
581                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
582         else
583                 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
584         if (!msg)
585                 return -ENOMEM;
586
587         req->r_reply = msg;
588
589         return 0;
590 }
591 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
592
593 static bool osd_req_opcode_valid(u16 opcode)
594 {
595         switch (opcode) {
596 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
597 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
598 #undef GENERATE_CASE
599         default:
600                 return false;
601         }
602 }
603
604 /*
605  * This is an osd op init function for opcodes that have no data or
606  * other information associated with them.  It also serves as a
607  * common init routine for all the other init functions, below.
608  */
609 static struct ceph_osd_req_op *
610 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
611                  u16 opcode, u32 flags)
612 {
613         struct ceph_osd_req_op *op;
614
615         BUG_ON(which >= osd_req->r_num_ops);
616         BUG_ON(!osd_req_opcode_valid(opcode));
617
618         op = &osd_req->r_ops[which];
619         memset(op, 0, sizeof (*op));
620         op->op = opcode;
621         op->flags = flags;
622
623         return op;
624 }
625
626 void osd_req_op_init(struct ceph_osd_request *osd_req,
627                      unsigned int which, u16 opcode, u32 flags)
628 {
629         (void)_osd_req_op_init(osd_req, which, opcode, flags);
630 }
631 EXPORT_SYMBOL(osd_req_op_init);
632
633 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
634                                 unsigned int which, u16 opcode,
635                                 u64 offset, u64 length,
636                                 u64 truncate_size, u32 truncate_seq)
637 {
638         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
639                                                       opcode, 0);
640         size_t payload_len = 0;
641
642         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
643                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
644                opcode != CEPH_OSD_OP_TRUNCATE);
645
646         op->extent.offset = offset;
647         op->extent.length = length;
648         op->extent.truncate_size = truncate_size;
649         op->extent.truncate_seq = truncate_seq;
650         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
651                 payload_len += length;
652
653         op->indata_len = payload_len;
654 }
655 EXPORT_SYMBOL(osd_req_op_extent_init);
656
657 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
658                                 unsigned int which, u64 length)
659 {
660         struct ceph_osd_req_op *op;
661         u64 previous;
662
663         BUG_ON(which >= osd_req->r_num_ops);
664         op = &osd_req->r_ops[which];
665         previous = op->extent.length;
666
667         if (length == previous)
668                 return;         /* Nothing to do */
669         BUG_ON(length > previous);
670
671         op->extent.length = length;
672         op->indata_len -= previous - length;
673 }
674 EXPORT_SYMBOL(osd_req_op_extent_update);
675
676 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
677                                 unsigned int which, u64 offset_inc)
678 {
679         struct ceph_osd_req_op *op, *prev_op;
680
681         BUG_ON(which + 1 >= osd_req->r_num_ops);
682
683         prev_op = &osd_req->r_ops[which];
684         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
685         /* dup previous one */
686         op->indata_len = prev_op->indata_len;
687         op->outdata_len = prev_op->outdata_len;
688         op->extent = prev_op->extent;
689         /* adjust offset */
690         op->extent.offset += offset_inc;
691         op->extent.length -= offset_inc;
692
693         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
694                 op->indata_len -= offset_inc;
695 }
696 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
697
698 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
699                         u16 opcode, const char *class, const char *method)
700 {
701         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
702                                                       opcode, 0);
703         struct ceph_pagelist *pagelist;
704         size_t payload_len = 0;
705         size_t size;
706
707         BUG_ON(opcode != CEPH_OSD_OP_CALL);
708
709         pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
710         BUG_ON(!pagelist);
711         ceph_pagelist_init(pagelist);
712
713         op->cls.class_name = class;
714         size = strlen(class);
715         BUG_ON(size > (size_t) U8_MAX);
716         op->cls.class_len = size;
717         ceph_pagelist_append(pagelist, class, size);
718         payload_len += size;
719
720         op->cls.method_name = method;
721         size = strlen(method);
722         BUG_ON(size > (size_t) U8_MAX);
723         op->cls.method_len = size;
724         ceph_pagelist_append(pagelist, method, size);
725         payload_len += size;
726
727         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
728
729         op->indata_len = payload_len;
730 }
731 EXPORT_SYMBOL(osd_req_op_cls_init);
732
733 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
734                           u16 opcode, const char *name, const void *value,
735                           size_t size, u8 cmp_op, u8 cmp_mode)
736 {
737         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
738                                                       opcode, 0);
739         struct ceph_pagelist *pagelist;
740         size_t payload_len;
741
742         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
743
744         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
745         if (!pagelist)
746                 return -ENOMEM;
747
748         ceph_pagelist_init(pagelist);
749
750         payload_len = strlen(name);
751         op->xattr.name_len = payload_len;
752         ceph_pagelist_append(pagelist, name, payload_len);
753
754         op->xattr.value_len = size;
755         ceph_pagelist_append(pagelist, value, size);
756         payload_len += size;
757
758         op->xattr.cmp_op = cmp_op;
759         op->xattr.cmp_mode = cmp_mode;
760
761         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
762         op->indata_len = payload_len;
763         return 0;
764 }
765 EXPORT_SYMBOL(osd_req_op_xattr_init);
766
767 /*
768  * @watch_opcode: CEPH_OSD_WATCH_OP_*
769  */
770 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
771                                   u64 cookie, u8 watch_opcode)
772 {
773         struct ceph_osd_req_op *op;
774
775         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
776         op->watch.cookie = cookie;
777         op->watch.op = watch_opcode;
778         op->watch.gen = 0;
779 }
780
781 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
782                                 unsigned int which,
783                                 u64 expected_object_size,
784                                 u64 expected_write_size)
785 {
786         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
787                                                       CEPH_OSD_OP_SETALLOCHINT,
788                                                       0);
789
790         op->alloc_hint.expected_object_size = expected_object_size;
791         op->alloc_hint.expected_write_size = expected_write_size;
792
793         /*
794          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
795          * not worth a feature bit.  Set FAILOK per-op flag to make
796          * sure older osds don't trip over an unsupported opcode.
797          */
798         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
799 }
800 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
801
802 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
803                                 struct ceph_osd_data *osd_data)
804 {
805         u64 length = ceph_osd_data_length(osd_data);
806
807         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
808                 BUG_ON(length > (u64) SIZE_MAX);
809                 if (length)
810                         ceph_msg_data_add_pages(msg, osd_data->pages,
811                                         length, osd_data->alignment);
812         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
813                 BUG_ON(!length);
814                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
815 #ifdef CONFIG_BLOCK
816         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
817                 ceph_msg_data_add_bio(msg, osd_data->bio, length);
818 #endif
819         } else {
820                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
821         }
822 }
823
824 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
825                              const struct ceph_osd_req_op *src)
826 {
827         if (WARN_ON(!osd_req_opcode_valid(src->op))) {
828                 pr_err("unrecognized osd opcode %d\n", src->op);
829
830                 return 0;
831         }
832
833         switch (src->op) {
834         case CEPH_OSD_OP_STAT:
835                 break;
836         case CEPH_OSD_OP_READ:
837         case CEPH_OSD_OP_WRITE:
838         case CEPH_OSD_OP_WRITEFULL:
839         case CEPH_OSD_OP_ZERO:
840         case CEPH_OSD_OP_TRUNCATE:
841                 dst->extent.offset = cpu_to_le64(src->extent.offset);
842                 dst->extent.length = cpu_to_le64(src->extent.length);
843                 dst->extent.truncate_size =
844                         cpu_to_le64(src->extent.truncate_size);
845                 dst->extent.truncate_seq =
846                         cpu_to_le32(src->extent.truncate_seq);
847                 break;
848         case CEPH_OSD_OP_CALL:
849                 dst->cls.class_len = src->cls.class_len;
850                 dst->cls.method_len = src->cls.method_len;
851                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
852                 break;
853         case CEPH_OSD_OP_STARTSYNC:
854                 break;
855         case CEPH_OSD_OP_WATCH:
856                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
857                 dst->watch.ver = cpu_to_le64(0);
858                 dst->watch.op = src->watch.op;
859                 dst->watch.gen = cpu_to_le32(src->watch.gen);
860                 break;
861         case CEPH_OSD_OP_NOTIFY_ACK:
862                 break;
863         case CEPH_OSD_OP_NOTIFY:
864                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
865                 break;
866         case CEPH_OSD_OP_SETALLOCHINT:
867                 dst->alloc_hint.expected_object_size =
868                     cpu_to_le64(src->alloc_hint.expected_object_size);
869                 dst->alloc_hint.expected_write_size =
870                     cpu_to_le64(src->alloc_hint.expected_write_size);
871                 break;
872         case CEPH_OSD_OP_SETXATTR:
873         case CEPH_OSD_OP_CMPXATTR:
874                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
875                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
876                 dst->xattr.cmp_op = src->xattr.cmp_op;
877                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
878                 break;
879         case CEPH_OSD_OP_CREATE:
880         case CEPH_OSD_OP_DELETE:
881                 break;
882         default:
883                 pr_err("unsupported osd opcode %s\n",
884                         ceph_osd_op_name(src->op));
885                 WARN_ON(1);
886
887                 return 0;
888         }
889
890         dst->op = cpu_to_le16(src->op);
891         dst->flags = cpu_to_le32(src->flags);
892         dst->payload_len = cpu_to_le32(src->indata_len);
893
894         return src->indata_len;
895 }
896
897 /*
898  * build new request AND message, calculate layout, and adjust file
899  * extent as needed.
900  *
901  * if the file was recently truncated, we include information about its
902  * old and new size so that the object can be updated appropriately.  (we
903  * avoid synchronously deleting truncated objects because it's slow.)
904  *
905  * if @do_sync, include a 'startsync' command so that the osd will flush
906  * data quickly.
907  */
908 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
909                                                struct ceph_file_layout *layout,
910                                                struct ceph_vino vino,
911                                                u64 off, u64 *plen,
912                                                unsigned int which, int num_ops,
913                                                int opcode, int flags,
914                                                struct ceph_snap_context *snapc,
915                                                u32 truncate_seq,
916                                                u64 truncate_size,
917                                                bool use_mempool)
918 {
919         struct ceph_osd_request *req;
920         u64 objnum = 0;
921         u64 objoff = 0;
922         u64 objlen = 0;
923         int r;
924
925         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
926                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
927                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
928
929         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
930                                         GFP_NOFS);
931         if (!req) {
932                 r = -ENOMEM;
933                 goto fail;
934         }
935
936         /* calculate max write size */
937         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
938         if (r)
939                 goto fail;
940
941         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
942                 osd_req_op_init(req, which, opcode, 0);
943         } else {
944                 u32 object_size = layout->object_size;
945                 u32 object_base = off - objoff;
946                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
947                         if (truncate_size <= object_base) {
948                                 truncate_size = 0;
949                         } else {
950                                 truncate_size -= object_base;
951                                 if (truncate_size > object_size)
952                                         truncate_size = object_size;
953                         }
954                 }
955                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
956                                        truncate_size, truncate_seq);
957         }
958
959         req->r_flags = flags;
960         req->r_base_oloc.pool = layout->pool_id;
961         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
962         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
963
964         req->r_snapid = vino.snap;
965         if (flags & CEPH_OSD_FLAG_WRITE)
966                 req->r_data_offset = off;
967
968         r = ceph_osdc_alloc_messages(req, GFP_NOFS);
969         if (r)
970                 goto fail;
971
972         return req;
973
974 fail:
975         ceph_osdc_put_request(req);
976         return ERR_PTR(r);
977 }
978 EXPORT_SYMBOL(ceph_osdc_new_request);
979
980 /*
981  * We keep osd requests in an rbtree, sorted by ->r_tid.
982  */
983 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
984 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
985
986 static bool osd_homeless(struct ceph_osd *osd)
987 {
988         return osd->o_osd == CEPH_HOMELESS_OSD;
989 }
990
991 static bool osd_registered(struct ceph_osd *osd)
992 {
993         verify_osdc_locked(osd->o_osdc);
994
995         return !RB_EMPTY_NODE(&osd->o_node);
996 }
997
998 /*
999  * Assumes @osd is zero-initialized.
1000  */
1001 static void osd_init(struct ceph_osd *osd)
1002 {
1003         atomic_set(&osd->o_ref, 1);
1004         RB_CLEAR_NODE(&osd->o_node);
1005         osd->o_requests = RB_ROOT;
1006         osd->o_linger_requests = RB_ROOT;
1007         INIT_LIST_HEAD(&osd->o_osd_lru);
1008         INIT_LIST_HEAD(&osd->o_keepalive_item);
1009         osd->o_incarnation = 1;
1010         mutex_init(&osd->lock);
1011 }
1012
1013 static void osd_cleanup(struct ceph_osd *osd)
1014 {
1015         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1016         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1017         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1018         WARN_ON(!list_empty(&osd->o_osd_lru));
1019         WARN_ON(!list_empty(&osd->o_keepalive_item));
1020
1021         if (osd->o_auth.authorizer) {
1022                 WARN_ON(osd_homeless(osd));
1023                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1024         }
1025 }
1026
1027 /*
1028  * Track open sessions with osds.
1029  */
1030 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1031 {
1032         struct ceph_osd *osd;
1033
1034         WARN_ON(onum == CEPH_HOMELESS_OSD);
1035
1036         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1037         osd_init(osd);
1038         osd->o_osdc = osdc;
1039         osd->o_osd = onum;
1040
1041         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1042
1043         return osd;
1044 }
1045
1046 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1047 {
1048         if (atomic_inc_not_zero(&osd->o_ref)) {
1049                 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
1050                      atomic_read(&osd->o_ref));
1051                 return osd;
1052         } else {
1053                 dout("get_osd %p FAIL\n", osd);
1054                 return NULL;
1055         }
1056 }
1057
1058 static void put_osd(struct ceph_osd *osd)
1059 {
1060         dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
1061              atomic_read(&osd->o_ref) - 1);
1062         if (atomic_dec_and_test(&osd->o_ref)) {
1063                 osd_cleanup(osd);
1064                 kfree(osd);
1065         }
1066 }
1067
1068 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1069
1070 static void __move_osd_to_lru(struct ceph_osd *osd)
1071 {
1072         struct ceph_osd_client *osdc = osd->o_osdc;
1073
1074         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1075         BUG_ON(!list_empty(&osd->o_osd_lru));
1076
1077         spin_lock(&osdc->osd_lru_lock);
1078         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1079         spin_unlock(&osdc->osd_lru_lock);
1080
1081         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1082 }
1083
1084 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1085 {
1086         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1087             RB_EMPTY_ROOT(&osd->o_linger_requests))
1088                 __move_osd_to_lru(osd);
1089 }
1090
1091 static void __remove_osd_from_lru(struct ceph_osd *osd)
1092 {
1093         struct ceph_osd_client *osdc = osd->o_osdc;
1094
1095         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1096
1097         spin_lock(&osdc->osd_lru_lock);
1098         if (!list_empty(&osd->o_osd_lru))
1099                 list_del_init(&osd->o_osd_lru);
1100         spin_unlock(&osdc->osd_lru_lock);
1101 }
1102
1103 /*
1104  * Close the connection and assign any leftover requests to the
1105  * homeless session.
1106  */
1107 static void close_osd(struct ceph_osd *osd)
1108 {
1109         struct ceph_osd_client *osdc = osd->o_osdc;
1110         struct rb_node *n;
1111
1112         verify_osdc_wrlocked(osdc);
1113         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1114
1115         ceph_con_close(&osd->o_con);
1116
1117         for (n = rb_first(&osd->o_requests); n; ) {
1118                 struct ceph_osd_request *req =
1119                     rb_entry(n, struct ceph_osd_request, r_node);
1120
1121                 n = rb_next(n); /* unlink_request() */
1122
1123                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1124                 unlink_request(osd, req);
1125                 link_request(&osdc->homeless_osd, req);
1126         }
1127         for (n = rb_first(&osd->o_linger_requests); n; ) {
1128                 struct ceph_osd_linger_request *lreq =
1129                     rb_entry(n, struct ceph_osd_linger_request, node);
1130
1131                 n = rb_next(n); /* unlink_linger() */
1132
1133                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1134                      lreq->linger_id);
1135                 unlink_linger(osd, lreq);
1136                 link_linger(&osdc->homeless_osd, lreq);
1137         }
1138
1139         __remove_osd_from_lru(osd);
1140         erase_osd(&osdc->osds, osd);
1141         put_osd(osd);
1142 }
1143
1144 /*
1145  * reset osd connect
1146  */
1147 static int reopen_osd(struct ceph_osd *osd)
1148 {
1149         struct ceph_entity_addr *peer_addr;
1150
1151         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1152
1153         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1154             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1155                 close_osd(osd);
1156                 return -ENODEV;
1157         }
1158
1159         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1160         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1161                         !ceph_con_opened(&osd->o_con)) {
1162                 struct rb_node *n;
1163
1164                 dout("osd addr hasn't changed and connection never opened, "
1165                      "letting msgr retry\n");
1166                 /* touch each r_stamp for handle_timeout()'s benfit */
1167                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1168                         struct ceph_osd_request *req =
1169                             rb_entry(n, struct ceph_osd_request, r_node);
1170                         req->r_stamp = jiffies;
1171                 }
1172
1173                 return -EAGAIN;
1174         }
1175
1176         ceph_con_close(&osd->o_con);
1177         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1178         osd->o_incarnation++;
1179
1180         return 0;
1181 }
1182
1183 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1184                                           bool wrlocked)
1185 {
1186         struct ceph_osd *osd;
1187
1188         if (wrlocked)
1189                 verify_osdc_wrlocked(osdc);
1190         else
1191                 verify_osdc_locked(osdc);
1192
1193         if (o != CEPH_HOMELESS_OSD)
1194                 osd = lookup_osd(&osdc->osds, o);
1195         else
1196                 osd = &osdc->homeless_osd;
1197         if (!osd) {
1198                 if (!wrlocked)
1199                         return ERR_PTR(-EAGAIN);
1200
1201                 osd = create_osd(osdc, o);
1202                 insert_osd(&osdc->osds, osd);
1203                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1204                               &osdc->osdmap->osd_addr[osd->o_osd]);
1205         }
1206
1207         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1208         return osd;
1209 }
1210
1211 /*
1212  * Create request <-> OSD session relation.
1213  *
1214  * @req has to be assigned a tid, @osd may be homeless.
1215  */
1216 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1217 {
1218         verify_osd_locked(osd);
1219         WARN_ON(!req->r_tid || req->r_osd);
1220         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1221              req, req->r_tid);
1222
1223         if (!osd_homeless(osd))
1224                 __remove_osd_from_lru(osd);
1225         else
1226                 atomic_inc(&osd->o_osdc->num_homeless);
1227
1228         get_osd(osd);
1229         insert_request(&osd->o_requests, req);
1230         req->r_osd = osd;
1231 }
1232
1233 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1234 {
1235         verify_osd_locked(osd);
1236         WARN_ON(req->r_osd != osd);
1237         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1238              req, req->r_tid);
1239
1240         req->r_osd = NULL;
1241         erase_request(&osd->o_requests, req);
1242         put_osd(osd);
1243
1244         if (!osd_homeless(osd))
1245                 maybe_move_osd_to_lru(osd);
1246         else
1247                 atomic_dec(&osd->o_osdc->num_homeless);
1248 }
1249
1250 static bool __pool_full(struct ceph_pg_pool_info *pi)
1251 {
1252         return pi->flags & CEPH_POOL_FLAG_FULL;
1253 }
1254
1255 static bool have_pool_full(struct ceph_osd_client *osdc)
1256 {
1257         struct rb_node *n;
1258
1259         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1260                 struct ceph_pg_pool_info *pi =
1261                     rb_entry(n, struct ceph_pg_pool_info, node);
1262
1263                 if (__pool_full(pi))
1264                         return true;
1265         }
1266
1267         return false;
1268 }
1269
1270 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1271 {
1272         struct ceph_pg_pool_info *pi;
1273
1274         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1275         if (!pi)
1276                 return false;
1277
1278         return __pool_full(pi);
1279 }
1280
1281 /*
1282  * Returns whether a request should be blocked from being sent
1283  * based on the current osdmap and osd_client settings.
1284  */
1285 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1286                                     const struct ceph_osd_request_target *t,
1287                                     struct ceph_pg_pool_info *pi)
1288 {
1289         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1290         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1291                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1292                        __pool_full(pi);
1293
1294         WARN_ON(pi->id != t->base_oloc.pool);
1295         return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
1296                (t->flags & CEPH_OSD_FLAG_WRITE && pausewr);
1297 }
1298
1299 enum calc_target_result {
1300         CALC_TARGET_NO_ACTION = 0,
1301         CALC_TARGET_NEED_RESEND,
1302         CALC_TARGET_POOL_DNE,
1303 };
1304
1305 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1306                                            struct ceph_osd_request_target *t,
1307                                            u32 *last_force_resend,
1308                                            bool any_change)
1309 {
1310         struct ceph_pg_pool_info *pi;
1311         struct ceph_pg pgid, last_pgid;
1312         struct ceph_osds up, acting;
1313         bool force_resend = false;
1314         bool need_check_tiering = false;
1315         bool need_resend = false;
1316         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1317         enum calc_target_result ct_res;
1318         int ret;
1319
1320         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1321         if (!pi) {
1322                 t->osd = CEPH_HOMELESS_OSD;
1323                 ct_res = CALC_TARGET_POOL_DNE;
1324                 goto out;
1325         }
1326
1327         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1328                 if (last_force_resend &&
1329                     *last_force_resend < pi->last_force_request_resend) {
1330                         *last_force_resend = pi->last_force_request_resend;
1331                         force_resend = true;
1332                 } else if (!last_force_resend) {
1333                         force_resend = true;
1334                 }
1335         }
1336         if (ceph_oid_empty(&t->target_oid) || force_resend) {
1337                 ceph_oid_copy(&t->target_oid, &t->base_oid);
1338                 need_check_tiering = true;
1339         }
1340         if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
1341                 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1342                 need_check_tiering = true;
1343         }
1344
1345         if (need_check_tiering &&
1346             (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1347                 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1348                         t->target_oloc.pool = pi->read_tier;
1349                 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1350                         t->target_oloc.pool = pi->write_tier;
1351         }
1352
1353         ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
1354                                         &t->target_oloc, &pgid);
1355         if (ret) {
1356                 WARN_ON(ret != -ENOENT);
1357                 t->osd = CEPH_HOMELESS_OSD;
1358                 ct_res = CALC_TARGET_POOL_DNE;
1359                 goto out;
1360         }
1361         last_pgid.pool = pgid.pool;
1362         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1363
1364         ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
1365         if (any_change &&
1366             ceph_is_new_interval(&t->acting,
1367                                  &acting,
1368                                  &t->up,
1369                                  &up,
1370                                  t->size,
1371                                  pi->size,
1372                                  t->min_size,
1373                                  pi->min_size,
1374                                  t->pg_num,
1375                                  pi->pg_num,
1376                                  t->sort_bitwise,
1377                                  sort_bitwise,
1378                                  &last_pgid))
1379                 force_resend = true;
1380
1381         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1382                 t->paused = false;
1383                 need_resend = true;
1384         }
1385
1386         if (ceph_pg_compare(&t->pgid, &pgid) ||
1387             ceph_osds_changed(&t->acting, &acting, any_change) ||
1388             force_resend) {
1389                 t->pgid = pgid; /* struct */
1390                 ceph_osds_copy(&t->acting, &acting);
1391                 ceph_osds_copy(&t->up, &up);
1392                 t->size = pi->size;
1393                 t->min_size = pi->min_size;
1394                 t->pg_num = pi->pg_num;
1395                 t->pg_num_mask = pi->pg_num_mask;
1396                 t->sort_bitwise = sort_bitwise;
1397
1398                 t->osd = acting.primary;
1399                 need_resend = true;
1400         }
1401
1402         ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
1403 out:
1404         dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1405         return ct_res;
1406 }
1407
1408 static void setup_request_data(struct ceph_osd_request *req,
1409                                struct ceph_msg *msg)
1410 {
1411         u32 data_len = 0;
1412         int i;
1413
1414         if (!list_empty(&msg->data))
1415                 return;
1416
1417         WARN_ON(msg->data_length);
1418         for (i = 0; i < req->r_num_ops; i++) {
1419                 struct ceph_osd_req_op *op = &req->r_ops[i];
1420
1421                 switch (op->op) {
1422                 /* request */
1423                 case CEPH_OSD_OP_WRITE:
1424                 case CEPH_OSD_OP_WRITEFULL:
1425                         WARN_ON(op->indata_len != op->extent.length);
1426                         ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1427                         break;
1428                 case CEPH_OSD_OP_SETXATTR:
1429                 case CEPH_OSD_OP_CMPXATTR:
1430                         WARN_ON(op->indata_len != op->xattr.name_len +
1431                                                   op->xattr.value_len);
1432                         ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1433                         break;
1434                 case CEPH_OSD_OP_NOTIFY_ACK:
1435                         ceph_osdc_msg_data_add(msg,
1436                                                &op->notify_ack.request_data);
1437                         break;
1438
1439                 /* reply */
1440                 case CEPH_OSD_OP_STAT:
1441                         ceph_osdc_msg_data_add(req->r_reply,
1442                                                &op->raw_data_in);
1443                         break;
1444                 case CEPH_OSD_OP_READ:
1445                         ceph_osdc_msg_data_add(req->r_reply,
1446                                                &op->extent.osd_data);
1447                         break;
1448
1449                 /* both */
1450                 case CEPH_OSD_OP_CALL:
1451                         WARN_ON(op->indata_len != op->cls.class_len +
1452                                                   op->cls.method_len +
1453                                                   op->cls.indata_len);
1454                         ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1455                         /* optional, can be NONE */
1456                         ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1457                         /* optional, can be NONE */
1458                         ceph_osdc_msg_data_add(req->r_reply,
1459                                                &op->cls.response_data);
1460                         break;
1461                 case CEPH_OSD_OP_NOTIFY:
1462                         ceph_osdc_msg_data_add(msg,
1463                                                &op->notify.request_data);
1464                         ceph_osdc_msg_data_add(req->r_reply,
1465                                                &op->notify.response_data);
1466                         break;
1467                 }
1468
1469                 data_len += op->indata_len;
1470         }
1471
1472         WARN_ON(data_len != msg->data_length);
1473 }
1474
1475 static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1476 {
1477         void *p = msg->front.iov_base;
1478         void *const end = p + msg->front_alloc_len;
1479         u32 data_len = 0;
1480         int i;
1481
1482         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1483                 /* snapshots aren't writeable */
1484                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1485         } else {
1486                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1487                         req->r_data_offset || req->r_snapc);
1488         }
1489
1490         setup_request_data(req, msg);
1491
1492         ceph_encode_32(&p, 1); /* client_inc, always 1 */
1493         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1494         ceph_encode_32(&p, req->r_flags);
1495         ceph_encode_timespec(p, &req->r_mtime);
1496         p += sizeof(struct ceph_timespec);
1497         /* aka reassert_version */
1498         memcpy(p, &req->r_replay_version, sizeof(req->r_replay_version));
1499         p += sizeof(req->r_replay_version);
1500
1501         /* oloc */
1502         ceph_start_encoding(&p, 5, 4,
1503                             ceph_oloc_encoding_size(&req->r_t.target_oloc));
1504         ceph_encode_64(&p, req->r_t.target_oloc.pool);
1505         ceph_encode_32(&p, -1); /* preferred */
1506         ceph_encode_32(&p, 0); /* key len */
1507         if (req->r_t.target_oloc.pool_ns)
1508                 ceph_encode_string(&p, end, req->r_t.target_oloc.pool_ns->str,
1509                                    req->r_t.target_oloc.pool_ns->len);
1510         else
1511                 ceph_encode_32(&p, 0);
1512
1513         /* pgid */
1514         ceph_encode_8(&p, 1);
1515         ceph_encode_64(&p, req->r_t.pgid.pool);
1516         ceph_encode_32(&p, req->r_t.pgid.seed);
1517         ceph_encode_32(&p, -1); /* preferred */
1518
1519         /* oid */
1520         ceph_encode_32(&p, req->r_t.target_oid.name_len);
1521         memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1522         p += req->r_t.target_oid.name_len;
1523
1524         /* ops, can imply data */
1525         ceph_encode_16(&p, req->r_num_ops);
1526         for (i = 0; i < req->r_num_ops; i++) {
1527                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1528                 p += sizeof(struct ceph_osd_op);
1529         }
1530
1531         ceph_encode_64(&p, req->r_snapid); /* snapid */
1532         if (req->r_snapc) {
1533                 ceph_encode_64(&p, req->r_snapc->seq);
1534                 ceph_encode_32(&p, req->r_snapc->num_snaps);
1535                 for (i = 0; i < req->r_snapc->num_snaps; i++)
1536                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
1537         } else {
1538                 ceph_encode_64(&p, 0); /* snap_seq */
1539                 ceph_encode_32(&p, 0); /* snaps len */
1540         }
1541
1542         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1543
1544         BUG_ON(p > end);
1545         msg->front.iov_len = p - msg->front.iov_base;
1546         msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
1547         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1548         msg->hdr.data_len = cpu_to_le32(data_len);
1549         /*
1550          * The header "data_off" is a hint to the receiver allowing it
1551          * to align received data into its buffers such that there's no
1552          * need to re-copy it before writing it to disk (direct I/O).
1553          */
1554         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1555
1556         dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
1557              req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
1558              msg->front.iov_len, data_len);
1559 }
1560
1561 /*
1562  * @req has to be assigned a tid and registered.
1563  */
1564 static void send_request(struct ceph_osd_request *req)
1565 {
1566         struct ceph_osd *osd = req->r_osd;
1567
1568         verify_osd_locked(osd);
1569         WARN_ON(osd->o_osd != req->r_t.osd);
1570
1571         /*
1572          * We may have a previously queued request message hanging
1573          * around.  Cancel it to avoid corrupting the msgr.
1574          */
1575         if (req->r_sent)
1576                 ceph_msg_revoke(req->r_request);
1577
1578         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
1579         if (req->r_attempts)
1580                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1581         else
1582                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
1583
1584         encode_request(req, req->r_request);
1585
1586         dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
1587              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
1588              req->r_t.osd, req->r_flags, req->r_attempts);
1589
1590         req->r_t.paused = false;
1591         req->r_stamp = jiffies;
1592         req->r_attempts++;
1593
1594         req->r_sent = osd->o_incarnation;
1595         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1596         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
1597 }
1598
1599 static void maybe_request_map(struct ceph_osd_client *osdc)
1600 {
1601         bool continuous = false;
1602
1603         verify_osdc_locked(osdc);
1604         WARN_ON(!osdc->osdmap->epoch);
1605
1606         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1607             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
1608             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1609                 dout("%s osdc %p continuous\n", __func__, osdc);
1610                 continuous = true;
1611         } else {
1612                 dout("%s osdc %p onetime\n", __func__, osdc);
1613         }
1614
1615         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
1616                                osdc->osdmap->epoch + 1, continuous))
1617                 ceph_monc_renew_subs(&osdc->client->monc);
1618 }
1619
1620 static void send_map_check(struct ceph_osd_request *req);
1621
1622 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
1623 {
1624         struct ceph_osd_client *osdc = req->r_osdc;
1625         struct ceph_osd *osd;
1626         enum calc_target_result ct_res;
1627         bool need_send = false;
1628         bool promoted = false;
1629
1630         WARN_ON(req->r_tid || req->r_got_reply);
1631         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
1632
1633 again:
1634         ct_res = calc_target(osdc, &req->r_t, &req->r_last_force_resend, false);
1635         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
1636                 goto promote;
1637
1638         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
1639         if (IS_ERR(osd)) {
1640                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
1641                 goto promote;
1642         }
1643
1644         if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1645             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1646                 dout("req %p pausewr\n", req);
1647                 req->r_t.paused = true;
1648                 maybe_request_map(osdc);
1649         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1650                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
1651                 dout("req %p pauserd\n", req);
1652                 req->r_t.paused = true;
1653                 maybe_request_map(osdc);
1654         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1655                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1656                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
1657                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1658                     pool_full(osdc, req->r_t.base_oloc.pool))) {
1659                 dout("req %p full/pool_full\n", req);
1660                 pr_warn_ratelimited("FULL or reached pool quota\n");
1661                 req->r_t.paused = true;
1662                 maybe_request_map(osdc);
1663         } else if (!osd_homeless(osd)) {
1664                 need_send = true;
1665         } else {
1666                 maybe_request_map(osdc);
1667         }
1668
1669         mutex_lock(&osd->lock);
1670         /*
1671          * Assign the tid atomically with send_request() to protect
1672          * multiple writes to the same object from racing with each
1673          * other, resulting in out of order ops on the OSDs.
1674          */
1675         req->r_tid = atomic64_inc_return(&osdc->last_tid);
1676         link_request(osd, req);
1677         if (need_send)
1678                 send_request(req);
1679         mutex_unlock(&osd->lock);
1680
1681         if (ct_res == CALC_TARGET_POOL_DNE)
1682                 send_map_check(req);
1683
1684         if (promoted)
1685                 downgrade_write(&osdc->lock);
1686         return;
1687
1688 promote:
1689         up_read(&osdc->lock);
1690         down_write(&osdc->lock);
1691         wrlocked = true;
1692         promoted = true;
1693         goto again;
1694 }
1695
1696 static void account_request(struct ceph_osd_request *req)
1697 {
1698         unsigned int mask = CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
1699
1700         if (req->r_flags & CEPH_OSD_FLAG_READ) {
1701                 WARN_ON(req->r_flags & mask);
1702                 req->r_flags |= CEPH_OSD_FLAG_ACK;
1703         } else if (req->r_flags & CEPH_OSD_FLAG_WRITE)
1704                 WARN_ON(!(req->r_flags & mask));
1705         else
1706                 WARN_ON(1);
1707
1708         WARN_ON(req->r_unsafe_callback && (req->r_flags & mask) != mask);
1709         atomic_inc(&req->r_osdc->num_requests);
1710 }
1711
1712 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
1713 {
1714         ceph_osdc_get_request(req);
1715         account_request(req);
1716         __submit_request(req, wrlocked);
1717 }
1718
1719 static void __finish_request(struct ceph_osd_request *req)
1720 {
1721         struct ceph_osd_client *osdc = req->r_osdc;
1722         struct ceph_osd *osd = req->r_osd;
1723
1724         verify_osd_locked(osd);
1725         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1726
1727         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
1728         unlink_request(osd, req);
1729         atomic_dec(&osdc->num_requests);
1730
1731         /*
1732          * If an OSD has failed or returned and a request has been sent
1733          * twice, it's possible to get a reply and end up here while the
1734          * request message is queued for delivery.  We will ignore the
1735          * reply, so not a big deal, but better to try and catch it.
1736          */
1737         ceph_msg_revoke(req->r_request);
1738         ceph_msg_revoke_incoming(req->r_reply);
1739 }
1740
1741 static void finish_request(struct ceph_osd_request *req)
1742 {
1743         __finish_request(req);
1744         ceph_osdc_put_request(req);
1745 }
1746
1747 static void __complete_request(struct ceph_osd_request *req)
1748 {
1749         if (req->r_callback)
1750                 req->r_callback(req);
1751         else
1752                 complete_all(&req->r_completion);
1753 }
1754
1755 /*
1756  * Note that this is open-coded in handle_reply(), which has to deal
1757  * with ack vs commit, dup acks, etc.
1758  */
1759 static void complete_request(struct ceph_osd_request *req, int err)
1760 {
1761         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1762
1763         req->r_result = err;
1764         __finish_request(req);
1765         __complete_request(req);
1766         complete_all(&req->r_safe_completion);
1767         ceph_osdc_put_request(req);
1768 }
1769
1770 static void cancel_map_check(struct ceph_osd_request *req)
1771 {
1772         struct ceph_osd_client *osdc = req->r_osdc;
1773         struct ceph_osd_request *lookup_req;
1774
1775         verify_osdc_wrlocked(osdc);
1776
1777         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1778         if (!lookup_req)
1779                 return;
1780
1781         WARN_ON(lookup_req != req);
1782         erase_request_mc(&osdc->map_checks, req);
1783         ceph_osdc_put_request(req);
1784 }
1785
1786 static void cancel_request(struct ceph_osd_request *req)
1787 {
1788         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1789
1790         cancel_map_check(req);
1791         finish_request(req);
1792 }
1793
1794 static void check_pool_dne(struct ceph_osd_request *req)
1795 {
1796         struct ceph_osd_client *osdc = req->r_osdc;
1797         struct ceph_osdmap *map = osdc->osdmap;
1798
1799         verify_osdc_wrlocked(osdc);
1800         WARN_ON(!map->epoch);
1801
1802         if (req->r_attempts) {
1803                 /*
1804                  * We sent a request earlier, which means that
1805                  * previously the pool existed, and now it does not
1806                  * (i.e., it was deleted).
1807                  */
1808                 req->r_map_dne_bound = map->epoch;
1809                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
1810                      req->r_tid);
1811         } else {
1812                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
1813                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
1814         }
1815
1816         if (req->r_map_dne_bound) {
1817                 if (map->epoch >= req->r_map_dne_bound) {
1818                         /* we had a new enough map */
1819                         pr_info_ratelimited("tid %llu pool does not exist\n",
1820                                             req->r_tid);
1821                         complete_request(req, -ENOENT);
1822                 }
1823         } else {
1824                 send_map_check(req);
1825         }
1826 }
1827
1828 static void map_check_cb(struct ceph_mon_generic_request *greq)
1829 {
1830         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
1831         struct ceph_osd_request *req;
1832         u64 tid = greq->private_data;
1833
1834         WARN_ON(greq->result || !greq->u.newest);
1835
1836         down_write(&osdc->lock);
1837         req = lookup_request_mc(&osdc->map_checks, tid);
1838         if (!req) {
1839                 dout("%s tid %llu dne\n", __func__, tid);
1840                 goto out_unlock;
1841         }
1842
1843         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
1844              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
1845         if (!req->r_map_dne_bound)
1846                 req->r_map_dne_bound = greq->u.newest;
1847         erase_request_mc(&osdc->map_checks, req);
1848         check_pool_dne(req);
1849
1850         ceph_osdc_put_request(req);
1851 out_unlock:
1852         up_write(&osdc->lock);
1853 }
1854
1855 static void send_map_check(struct ceph_osd_request *req)
1856 {
1857         struct ceph_osd_client *osdc = req->r_osdc;
1858         struct ceph_osd_request *lookup_req;
1859         int ret;
1860
1861         verify_osdc_wrlocked(osdc);
1862
1863         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1864         if (lookup_req) {
1865                 WARN_ON(lookup_req != req);
1866                 return;
1867         }
1868
1869         ceph_osdc_get_request(req);
1870         insert_request_mc(&osdc->map_checks, req);
1871         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
1872                                           map_check_cb, req->r_tid);
1873         WARN_ON(ret);
1874 }
1875
1876 /*
1877  * lingering requests, watch/notify v2 infrastructure
1878  */
1879 static void linger_release(struct kref *kref)
1880 {
1881         struct ceph_osd_linger_request *lreq =
1882             container_of(kref, struct ceph_osd_linger_request, kref);
1883
1884         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
1885              lreq->reg_req, lreq->ping_req);
1886         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
1887         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
1888         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
1889         WARN_ON(!list_empty(&lreq->scan_item));
1890         WARN_ON(!list_empty(&lreq->pending_lworks));
1891         WARN_ON(lreq->osd);
1892
1893         if (lreq->reg_req)
1894                 ceph_osdc_put_request(lreq->reg_req);
1895         if (lreq->ping_req)
1896                 ceph_osdc_put_request(lreq->ping_req);
1897         target_destroy(&lreq->t);
1898         kfree(lreq);
1899 }
1900
1901 static void linger_put(struct ceph_osd_linger_request *lreq)
1902 {
1903         if (lreq)
1904                 kref_put(&lreq->kref, linger_release);
1905 }
1906
1907 static struct ceph_osd_linger_request *
1908 linger_get(struct ceph_osd_linger_request *lreq)
1909 {
1910         kref_get(&lreq->kref);
1911         return lreq;
1912 }
1913
1914 static struct ceph_osd_linger_request *
1915 linger_alloc(struct ceph_osd_client *osdc)
1916 {
1917         struct ceph_osd_linger_request *lreq;
1918
1919         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
1920         if (!lreq)
1921                 return NULL;
1922
1923         kref_init(&lreq->kref);
1924         mutex_init(&lreq->lock);
1925         RB_CLEAR_NODE(&lreq->node);
1926         RB_CLEAR_NODE(&lreq->osdc_node);
1927         RB_CLEAR_NODE(&lreq->mc_node);
1928         INIT_LIST_HEAD(&lreq->scan_item);
1929         INIT_LIST_HEAD(&lreq->pending_lworks);
1930         init_completion(&lreq->reg_commit_wait);
1931         init_completion(&lreq->notify_finish_wait);
1932
1933         lreq->osdc = osdc;
1934         target_init(&lreq->t);
1935
1936         dout("%s lreq %p\n", __func__, lreq);
1937         return lreq;
1938 }
1939
1940 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
1941 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
1942 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
1943
1944 /*
1945  * Create linger request <-> OSD session relation.
1946  *
1947  * @lreq has to be registered, @osd may be homeless.
1948  */
1949 static void link_linger(struct ceph_osd *osd,
1950                         struct ceph_osd_linger_request *lreq)
1951 {
1952         verify_osd_locked(osd);
1953         WARN_ON(!lreq->linger_id || lreq->osd);
1954         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
1955              osd->o_osd, lreq, lreq->linger_id);
1956
1957         if (!osd_homeless(osd))
1958                 __remove_osd_from_lru(osd);
1959         else
1960                 atomic_inc(&osd->o_osdc->num_homeless);
1961
1962         get_osd(osd);
1963         insert_linger(&osd->o_linger_requests, lreq);
1964         lreq->osd = osd;
1965 }
1966
1967 static void unlink_linger(struct ceph_osd *osd,
1968                           struct ceph_osd_linger_request *lreq)
1969 {
1970         verify_osd_locked(osd);
1971         WARN_ON(lreq->osd != osd);
1972         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
1973              osd->o_osd, lreq, lreq->linger_id);
1974
1975         lreq->osd = NULL;
1976         erase_linger(&osd->o_linger_requests, lreq);
1977         put_osd(osd);
1978
1979         if (!osd_homeless(osd))
1980                 maybe_move_osd_to_lru(osd);
1981         else
1982                 atomic_dec(&osd->o_osdc->num_homeless);
1983 }
1984
1985 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
1986 {
1987         verify_osdc_locked(lreq->osdc);
1988
1989         return !RB_EMPTY_NODE(&lreq->osdc_node);
1990 }
1991
1992 static bool linger_registered(struct ceph_osd_linger_request *lreq)
1993 {
1994         struct ceph_osd_client *osdc = lreq->osdc;
1995         bool registered;
1996
1997         down_read(&osdc->lock);
1998         registered = __linger_registered(lreq);
1999         up_read(&osdc->lock);
2000
2001         return registered;
2002 }
2003
2004 static void linger_register(struct ceph_osd_linger_request *lreq)
2005 {
2006         struct ceph_osd_client *osdc = lreq->osdc;
2007
2008         verify_osdc_wrlocked(osdc);
2009         WARN_ON(lreq->linger_id);
2010
2011         linger_get(lreq);
2012         lreq->linger_id = ++osdc->last_linger_id;
2013         insert_linger_osdc(&osdc->linger_requests, lreq);
2014 }
2015
2016 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2017 {
2018         struct ceph_osd_client *osdc = lreq->osdc;
2019
2020         verify_osdc_wrlocked(osdc);
2021
2022         erase_linger_osdc(&osdc->linger_requests, lreq);
2023         linger_put(lreq);
2024 }
2025
2026 static void cancel_linger_request(struct ceph_osd_request *req)
2027 {
2028         struct ceph_osd_linger_request *lreq = req->r_priv;
2029
2030         WARN_ON(!req->r_linger);
2031         cancel_request(req);
2032         linger_put(lreq);
2033 }
2034
2035 struct linger_work {
2036         struct work_struct work;
2037         struct ceph_osd_linger_request *lreq;
2038         struct list_head pending_item;
2039         unsigned long queued_stamp;
2040
2041         union {
2042                 struct {
2043                         u64 notify_id;
2044                         u64 notifier_id;
2045                         void *payload; /* points into @msg front */
2046                         size_t payload_len;
2047
2048                         struct ceph_msg *msg; /* for ceph_msg_put() */
2049                 } notify;
2050                 struct {
2051                         int err;
2052                 } error;
2053         };
2054 };
2055
2056 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2057                                        work_func_t workfn)
2058 {
2059         struct linger_work *lwork;
2060
2061         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2062         if (!lwork)
2063                 return NULL;
2064
2065         INIT_WORK(&lwork->work, workfn);
2066         INIT_LIST_HEAD(&lwork->pending_item);
2067         lwork->lreq = linger_get(lreq);
2068
2069         return lwork;
2070 }
2071
2072 static void lwork_free(struct linger_work *lwork)
2073 {
2074         struct ceph_osd_linger_request *lreq = lwork->lreq;
2075
2076         mutex_lock(&lreq->lock);
2077         list_del(&lwork->pending_item);
2078         mutex_unlock(&lreq->lock);
2079
2080         linger_put(lreq);
2081         kfree(lwork);
2082 }
2083
2084 static void lwork_queue(struct linger_work *lwork)
2085 {
2086         struct ceph_osd_linger_request *lreq = lwork->lreq;
2087         struct ceph_osd_client *osdc = lreq->osdc;
2088
2089         verify_lreq_locked(lreq);
2090         WARN_ON(!list_empty(&lwork->pending_item));
2091
2092         lwork->queued_stamp = jiffies;
2093         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2094         queue_work(osdc->notify_wq, &lwork->work);
2095 }
2096
2097 static void do_watch_notify(struct work_struct *w)
2098 {
2099         struct linger_work *lwork = container_of(w, struct linger_work, work);
2100         struct ceph_osd_linger_request *lreq = lwork->lreq;
2101
2102         if (!linger_registered(lreq)) {
2103                 dout("%s lreq %p not registered\n", __func__, lreq);
2104                 goto out;
2105         }
2106
2107         WARN_ON(!lreq->is_watch);
2108         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2109              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2110              lwork->notify.payload_len);
2111         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2112                   lwork->notify.notifier_id, lwork->notify.payload,
2113                   lwork->notify.payload_len);
2114
2115 out:
2116         ceph_msg_put(lwork->notify.msg);
2117         lwork_free(lwork);
2118 }
2119
2120 static void do_watch_error(struct work_struct *w)
2121 {
2122         struct linger_work *lwork = container_of(w, struct linger_work, work);
2123         struct ceph_osd_linger_request *lreq = lwork->lreq;
2124
2125         if (!linger_registered(lreq)) {
2126                 dout("%s lreq %p not registered\n", __func__, lreq);
2127                 goto out;
2128         }
2129
2130         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2131         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2132
2133 out:
2134         lwork_free(lwork);
2135 }
2136
2137 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2138 {
2139         struct linger_work *lwork;
2140
2141         lwork = lwork_alloc(lreq, do_watch_error);
2142         if (!lwork) {
2143                 pr_err("failed to allocate error-lwork\n");
2144                 return;
2145         }
2146
2147         lwork->error.err = lreq->last_error;
2148         lwork_queue(lwork);
2149 }
2150
2151 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2152                                        int result)
2153 {
2154         if (!completion_done(&lreq->reg_commit_wait)) {
2155                 lreq->reg_commit_error = (result <= 0 ? result : 0);
2156                 complete_all(&lreq->reg_commit_wait);
2157         }
2158 }
2159
2160 static void linger_commit_cb(struct ceph_osd_request *req)
2161 {
2162         struct ceph_osd_linger_request *lreq = req->r_priv;
2163
2164         mutex_lock(&lreq->lock);
2165         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2166              lreq->linger_id, req->r_result);
2167         WARN_ON(!__linger_registered(lreq));
2168         linger_reg_commit_complete(lreq, req->r_result);
2169         lreq->committed = true;
2170
2171         if (!lreq->is_watch) {
2172                 struct ceph_osd_data *osd_data =
2173                     osd_req_op_data(req, 0, notify, response_data);
2174                 void *p = page_address(osd_data->pages[0]);
2175
2176                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2177                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2178
2179                 /* make note of the notify_id */
2180                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2181                         lreq->notify_id = ceph_decode_64(&p);
2182                         dout("lreq %p notify_id %llu\n", lreq,
2183                              lreq->notify_id);
2184                 } else {
2185                         dout("lreq %p no notify_id\n", lreq);
2186                 }
2187         }
2188
2189         mutex_unlock(&lreq->lock);
2190         linger_put(lreq);
2191 }
2192
2193 static int normalize_watch_error(int err)
2194 {
2195         /*
2196          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2197          * notification and a failure to reconnect because we raced with
2198          * the delete appear the same to the user.
2199          */
2200         if (err == -ENOENT)
2201                 err = -ENOTCONN;
2202
2203         return err;
2204 }
2205
2206 static void linger_reconnect_cb(struct ceph_osd_request *req)
2207 {
2208         struct ceph_osd_linger_request *lreq = req->r_priv;
2209
2210         mutex_lock(&lreq->lock);
2211         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2212              lreq, lreq->linger_id, req->r_result, lreq->last_error);
2213         if (req->r_result < 0) {
2214                 if (!lreq->last_error) {
2215                         lreq->last_error = normalize_watch_error(req->r_result);
2216                         queue_watch_error(lreq);
2217                 }
2218         }
2219
2220         mutex_unlock(&lreq->lock);
2221         linger_put(lreq);
2222 }
2223
2224 static void send_linger(struct ceph_osd_linger_request *lreq)
2225 {
2226         struct ceph_osd_request *req = lreq->reg_req;
2227         struct ceph_osd_req_op *op = &req->r_ops[0];
2228
2229         verify_osdc_wrlocked(req->r_osdc);
2230         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2231
2232         if (req->r_osd)
2233                 cancel_linger_request(req);
2234
2235         request_reinit(req);
2236         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2237         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2238         req->r_flags = lreq->t.flags;
2239         req->r_mtime = lreq->mtime;
2240
2241         mutex_lock(&lreq->lock);
2242         if (lreq->is_watch && lreq->committed) {
2243                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2244                         op->watch.cookie != lreq->linger_id);
2245                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2246                 op->watch.gen = ++lreq->register_gen;
2247                 dout("lreq %p reconnect register_gen %u\n", lreq,
2248                      op->watch.gen);
2249                 req->r_callback = linger_reconnect_cb;
2250         } else {
2251                 if (!lreq->is_watch)
2252                         lreq->notify_id = 0;
2253                 else
2254                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2255                 dout("lreq %p register\n", lreq);
2256                 req->r_callback = linger_commit_cb;
2257         }
2258         mutex_unlock(&lreq->lock);
2259
2260         req->r_priv = linger_get(lreq);
2261         req->r_linger = true;
2262
2263         submit_request(req, true);
2264 }
2265
2266 static void linger_ping_cb(struct ceph_osd_request *req)
2267 {
2268         struct ceph_osd_linger_request *lreq = req->r_priv;
2269
2270         mutex_lock(&lreq->lock);
2271         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2272              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2273              lreq->last_error);
2274         if (lreq->register_gen == req->r_ops[0].watch.gen) {
2275                 if (!req->r_result) {
2276                         lreq->watch_valid_thru = lreq->ping_sent;
2277                 } else if (!lreq->last_error) {
2278                         lreq->last_error = normalize_watch_error(req->r_result);
2279                         queue_watch_error(lreq);
2280                 }
2281         } else {
2282                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2283                      lreq->register_gen, req->r_ops[0].watch.gen);
2284         }
2285
2286         mutex_unlock(&lreq->lock);
2287         linger_put(lreq);
2288 }
2289
2290 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2291 {
2292         struct ceph_osd_client *osdc = lreq->osdc;
2293         struct ceph_osd_request *req = lreq->ping_req;
2294         struct ceph_osd_req_op *op = &req->r_ops[0];
2295
2296         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2297                 dout("%s PAUSERD\n", __func__);
2298                 return;
2299         }
2300
2301         lreq->ping_sent = jiffies;
2302         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2303              __func__, lreq, lreq->linger_id, lreq->ping_sent,
2304              lreq->register_gen);
2305
2306         if (req->r_osd)
2307                 cancel_linger_request(req);
2308
2309         request_reinit(req);
2310         target_copy(&req->r_t, &lreq->t);
2311
2312         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2313                 op->watch.cookie != lreq->linger_id ||
2314                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2315         op->watch.gen = lreq->register_gen;
2316         req->r_callback = linger_ping_cb;
2317         req->r_priv = linger_get(lreq);
2318         req->r_linger = true;
2319
2320         ceph_osdc_get_request(req);
2321         account_request(req);
2322         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2323         link_request(lreq->osd, req);
2324         send_request(req);
2325 }
2326
2327 static void linger_submit(struct ceph_osd_linger_request *lreq)
2328 {
2329         struct ceph_osd_client *osdc = lreq->osdc;
2330         struct ceph_osd *osd;
2331
2332         calc_target(osdc, &lreq->t, &lreq->last_force_resend, false);
2333         osd = lookup_create_osd(osdc, lreq->t.osd, true);
2334         link_linger(osd, lreq);
2335
2336         send_linger(lreq);
2337 }
2338
2339 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2340 {
2341         struct ceph_osd_client *osdc = lreq->osdc;
2342         struct ceph_osd_linger_request *lookup_lreq;
2343
2344         verify_osdc_wrlocked(osdc);
2345
2346         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2347                                        lreq->linger_id);
2348         if (!lookup_lreq)
2349                 return;
2350
2351         WARN_ON(lookup_lreq != lreq);
2352         erase_linger_mc(&osdc->linger_map_checks, lreq);
2353         linger_put(lreq);
2354 }
2355
2356 /*
2357  * @lreq has to be both registered and linked.
2358  */
2359 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2360 {
2361         if (lreq->is_watch && lreq->ping_req->r_osd)
2362                 cancel_linger_request(lreq->ping_req);
2363         if (lreq->reg_req->r_osd)
2364                 cancel_linger_request(lreq->reg_req);
2365         cancel_linger_map_check(lreq);
2366         unlink_linger(lreq->osd, lreq);
2367         linger_unregister(lreq);
2368 }
2369
2370 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2371 {
2372         struct ceph_osd_client *osdc = lreq->osdc;
2373
2374         down_write(&osdc->lock);
2375         if (__linger_registered(lreq))
2376                 __linger_cancel(lreq);
2377         up_write(&osdc->lock);
2378 }
2379
2380 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2381
2382 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2383 {
2384         struct ceph_osd_client *osdc = lreq->osdc;
2385         struct ceph_osdmap *map = osdc->osdmap;
2386
2387         verify_osdc_wrlocked(osdc);
2388         WARN_ON(!map->epoch);
2389
2390         if (lreq->register_gen) {
2391                 lreq->map_dne_bound = map->epoch;
2392                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2393                      lreq, lreq->linger_id);
2394         } else {
2395                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2396                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2397                      map->epoch);
2398         }
2399
2400         if (lreq->map_dne_bound) {
2401                 if (map->epoch >= lreq->map_dne_bound) {
2402                         /* we had a new enough map */
2403                         pr_info("linger_id %llu pool does not exist\n",
2404                                 lreq->linger_id);
2405                         linger_reg_commit_complete(lreq, -ENOENT);
2406                         __linger_cancel(lreq);
2407                 }
2408         } else {
2409                 send_linger_map_check(lreq);
2410         }
2411 }
2412
2413 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2414 {
2415         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2416         struct ceph_osd_linger_request *lreq;
2417         u64 linger_id = greq->private_data;
2418
2419         WARN_ON(greq->result || !greq->u.newest);
2420
2421         down_write(&osdc->lock);
2422         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
2423         if (!lreq) {
2424                 dout("%s linger_id %llu dne\n", __func__, linger_id);
2425                 goto out_unlock;
2426         }
2427
2428         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
2429              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2430              greq->u.newest);
2431         if (!lreq->map_dne_bound)
2432                 lreq->map_dne_bound = greq->u.newest;
2433         erase_linger_mc(&osdc->linger_map_checks, lreq);
2434         check_linger_pool_dne(lreq);
2435
2436         linger_put(lreq);
2437 out_unlock:
2438         up_write(&osdc->lock);
2439 }
2440
2441 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
2442 {
2443         struct ceph_osd_client *osdc = lreq->osdc;
2444         struct ceph_osd_linger_request *lookup_lreq;
2445         int ret;
2446
2447         verify_osdc_wrlocked(osdc);
2448
2449         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2450                                        lreq->linger_id);
2451         if (lookup_lreq) {
2452                 WARN_ON(lookup_lreq != lreq);
2453                 return;
2454         }
2455
2456         linger_get(lreq);
2457         insert_linger_mc(&osdc->linger_map_checks, lreq);
2458         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2459                                           linger_map_check_cb, lreq->linger_id);
2460         WARN_ON(ret);
2461 }
2462
2463 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
2464 {
2465         int ret;
2466
2467         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2468         ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
2469         return ret ?: lreq->reg_commit_error;
2470 }
2471
2472 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
2473 {
2474         int ret;
2475
2476         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2477         ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
2478         return ret ?: lreq->notify_finish_error;
2479 }
2480
2481 /*
2482  * Timeout callback, called every N seconds.  When 1 or more OSD
2483  * requests has been active for more than N seconds, we send a keepalive
2484  * (tag + timestamp) to its OSD to ensure any communications channel
2485  * reset is detected.
2486  */
2487 static void handle_timeout(struct work_struct *work)
2488 {
2489         struct ceph_osd_client *osdc =
2490                 container_of(work, struct ceph_osd_client, timeout_work.work);
2491         struct ceph_options *opts = osdc->client->options;
2492         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
2493         LIST_HEAD(slow_osds);
2494         struct rb_node *n, *p;
2495
2496         dout("%s osdc %p\n", __func__, osdc);
2497         down_write(&osdc->lock);
2498
2499         /*
2500          * ping osds that are a bit slow.  this ensures that if there
2501          * is a break in the TCP connection we will notice, and reopen
2502          * a connection with that osd (from the fault callback).
2503          */
2504         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2505                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2506                 bool found = false;
2507
2508                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
2509                         struct ceph_osd_request *req =
2510                             rb_entry(p, struct ceph_osd_request, r_node);
2511
2512                         if (time_before(req->r_stamp, cutoff)) {
2513                                 dout(" req %p tid %llu on osd%d is laggy\n",
2514                                      req, req->r_tid, osd->o_osd);
2515                                 found = true;
2516                         }
2517                 }
2518                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
2519                         struct ceph_osd_linger_request *lreq =
2520                             rb_entry(p, struct ceph_osd_linger_request, node);
2521
2522                         dout(" lreq %p linger_id %llu is served by osd%d\n",
2523                              lreq, lreq->linger_id, osd->o_osd);
2524                         found = true;
2525
2526                         mutex_lock(&lreq->lock);
2527                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
2528                                 send_linger_ping(lreq);
2529                         mutex_unlock(&lreq->lock);
2530                 }
2531
2532                 if (found)
2533                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
2534         }
2535
2536         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
2537                 maybe_request_map(osdc);
2538
2539         while (!list_empty(&slow_osds)) {
2540                 struct ceph_osd *osd = list_first_entry(&slow_osds,
2541                                                         struct ceph_osd,
2542                                                         o_keepalive_item);
2543                 list_del_init(&osd->o_keepalive_item);
2544                 ceph_con_keepalive(&osd->o_con);
2545         }
2546
2547         up_write(&osdc->lock);
2548         schedule_delayed_work(&osdc->timeout_work,
2549                               osdc->client->options->osd_keepalive_timeout);
2550 }
2551
2552 static void handle_osds_timeout(struct work_struct *work)
2553 {
2554         struct ceph_osd_client *osdc =
2555                 container_of(work, struct ceph_osd_client,
2556                              osds_timeout_work.work);
2557         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
2558         struct ceph_osd *osd, *nosd;
2559
2560         dout("%s osdc %p\n", __func__, osdc);
2561         down_write(&osdc->lock);
2562         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
2563                 if (time_before(jiffies, osd->lru_ttl))
2564                         break;
2565
2566                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
2567                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
2568                 close_osd(osd);
2569         }
2570
2571         up_write(&osdc->lock);
2572         schedule_delayed_work(&osdc->osds_timeout_work,
2573                               round_jiffies_relative(delay));
2574 }
2575
2576 static int ceph_oloc_decode(void **p, void *end,
2577                             struct ceph_object_locator *oloc)
2578 {
2579         u8 struct_v, struct_cv;
2580         u32 len;
2581         void *struct_end;
2582         int ret = 0;
2583
2584         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2585         struct_v = ceph_decode_8(p);
2586         struct_cv = ceph_decode_8(p);
2587         if (struct_v < 3) {
2588                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
2589                         struct_v, struct_cv);
2590                 goto e_inval;
2591         }
2592         if (struct_cv > 6) {
2593                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
2594                         struct_v, struct_cv);
2595                 goto e_inval;
2596         }
2597         len = ceph_decode_32(p);
2598         ceph_decode_need(p, end, len, e_inval);
2599         struct_end = *p + len;
2600
2601         oloc->pool = ceph_decode_64(p);
2602         *p += 4; /* skip preferred */
2603
2604         len = ceph_decode_32(p);
2605         if (len > 0) {
2606                 pr_warn("ceph_object_locator::key is set\n");
2607                 goto e_inval;
2608         }
2609
2610         if (struct_v >= 5) {
2611                 bool changed = false;
2612
2613                 len = ceph_decode_32(p);
2614                 if (len > 0) {
2615                         ceph_decode_need(p, end, len, e_inval);
2616                         if (!oloc->pool_ns ||
2617                             ceph_compare_string(oloc->pool_ns, *p, len))
2618                                 changed = true;
2619                         *p += len;
2620                 } else {
2621                         if (oloc->pool_ns)
2622                                 changed = true;
2623                 }
2624                 if (changed) {
2625                         /* redirect changes namespace */
2626                         pr_warn("ceph_object_locator::nspace is changed\n");
2627                         goto e_inval;
2628                 }
2629         }
2630
2631         if (struct_v >= 6) {
2632                 s64 hash = ceph_decode_64(p);
2633                 if (hash != -1) {
2634                         pr_warn("ceph_object_locator::hash is set\n");
2635                         goto e_inval;
2636                 }
2637         }
2638
2639         /* skip the rest */
2640         *p = struct_end;
2641 out:
2642         return ret;
2643
2644 e_inval:
2645         ret = -EINVAL;
2646         goto out;
2647 }
2648
2649 static int ceph_redirect_decode(void **p, void *end,
2650                                 struct ceph_request_redirect *redir)
2651 {
2652         u8 struct_v, struct_cv;
2653         u32 len;
2654         void *struct_end;
2655         int ret;
2656
2657         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2658         struct_v = ceph_decode_8(p);
2659         struct_cv = ceph_decode_8(p);
2660         if (struct_cv > 1) {
2661                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
2662                         struct_v, struct_cv);
2663                 goto e_inval;
2664         }
2665         len = ceph_decode_32(p);
2666         ceph_decode_need(p, end, len, e_inval);
2667         struct_end = *p + len;
2668
2669         ret = ceph_oloc_decode(p, end, &redir->oloc);
2670         if (ret)
2671                 goto out;
2672
2673         len = ceph_decode_32(p);
2674         if (len > 0) {
2675                 pr_warn("ceph_request_redirect::object_name is set\n");
2676                 goto e_inval;
2677         }
2678
2679         len = ceph_decode_32(p);
2680         *p += len; /* skip osd_instructions */
2681
2682         /* skip the rest */
2683         *p = struct_end;
2684 out:
2685         return ret;
2686
2687 e_inval:
2688         ret = -EINVAL;
2689         goto out;
2690 }
2691
2692 struct MOSDOpReply {
2693         struct ceph_pg pgid;
2694         u64 flags;
2695         int result;
2696         u32 epoch;
2697         int num_ops;
2698         u32 outdata_len[CEPH_OSD_MAX_OPS];
2699         s32 rval[CEPH_OSD_MAX_OPS];
2700         int retry_attempt;
2701         struct ceph_eversion replay_version;
2702         u64 user_version;
2703         struct ceph_request_redirect redirect;
2704 };
2705
2706 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
2707 {
2708         void *p = msg->front.iov_base;
2709         void *const end = p + msg->front.iov_len;
2710         u16 version = le16_to_cpu(msg->hdr.version);
2711         struct ceph_eversion bad_replay_version;
2712         u8 decode_redir;
2713         u32 len;
2714         int ret;
2715         int i;
2716
2717         ceph_decode_32_safe(&p, end, len, e_inval);
2718         ceph_decode_need(&p, end, len, e_inval);
2719         p += len; /* skip oid */
2720
2721         ret = ceph_decode_pgid(&p, end, &m->pgid);
2722         if (ret)
2723                 return ret;
2724
2725         ceph_decode_64_safe(&p, end, m->flags, e_inval);
2726         ceph_decode_32_safe(&p, end, m->result, e_inval);
2727         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
2728         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
2729         p += sizeof(bad_replay_version);
2730         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
2731
2732         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
2733         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
2734                 goto e_inval;
2735
2736         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
2737                          e_inval);
2738         for (i = 0; i < m->num_ops; i++) {
2739                 struct ceph_osd_op *op = p;
2740
2741                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
2742                 p += sizeof(*op);
2743         }
2744
2745         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
2746         for (i = 0; i < m->num_ops; i++)
2747                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
2748
2749         if (version >= 5) {
2750                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
2751                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
2752                 p += sizeof(m->replay_version);
2753                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
2754         } else {
2755                 m->replay_version = bad_replay_version; /* struct */
2756                 m->user_version = le64_to_cpu(m->replay_version.version);
2757         }
2758
2759         if (version >= 6) {
2760                 if (version >= 7)
2761                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
2762                 else
2763                         decode_redir = 1;
2764         } else {
2765                 decode_redir = 0;
2766         }
2767
2768         if (decode_redir) {
2769                 ret = ceph_redirect_decode(&p, end, &m->redirect);
2770                 if (ret)
2771                         return ret;
2772         } else {
2773                 ceph_oloc_init(&m->redirect.oloc);
2774         }
2775
2776         return 0;
2777
2778 e_inval:
2779         return -EINVAL;
2780 }
2781
2782 /*
2783  * We are done with @req if
2784  *   - @m is a safe reply, or
2785  *   - @m is an unsafe reply and we didn't want a safe one
2786  */
2787 static bool done_request(const struct ceph_osd_request *req,
2788                          const struct MOSDOpReply *m)
2789 {
2790         return (m->result < 0 ||
2791                 (m->flags & CEPH_OSD_FLAG_ONDISK) ||
2792                 !(req->r_flags & CEPH_OSD_FLAG_ONDISK));
2793 }
2794
2795 /*
2796  * handle osd op reply.  either call the callback if it is specified,
2797  * or do the completion to wake up the waiting thread.
2798  *
2799  * ->r_unsafe_callback is set?  yes                     no
2800  *
2801  * first reply is OK (needed    r_cb/r_completion,      r_cb/r_completion,
2802  * any or needed/got safe)      r_safe_completion       r_safe_completion
2803  *
2804  * first reply is unsafe        r_unsafe_cb(true)       (nothing)
2805  *
2806  * when we get the safe reply   r_unsafe_cb(false),     r_cb/r_completion,
2807  *                              r_safe_completion       r_safe_completion
2808  */
2809 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2810 {
2811         struct ceph_osd_client *osdc = osd->o_osdc;
2812         struct ceph_osd_request *req;
2813         struct MOSDOpReply m;
2814         u64 tid = le64_to_cpu(msg->hdr.tid);
2815         u32 data_len = 0;
2816         bool already_acked;
2817         int ret;
2818         int i;
2819
2820         dout("%s msg %p tid %llu\n", __func__, msg, tid);
2821
2822         down_read(&osdc->lock);
2823         if (!osd_registered(osd)) {
2824                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
2825                 goto out_unlock_osdc;
2826         }
2827         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
2828
2829         mutex_lock(&osd->lock);
2830         req = lookup_request(&osd->o_requests, tid);
2831         if (!req) {
2832                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
2833                 goto out_unlock_session;
2834         }
2835
2836         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
2837         ret = decode_MOSDOpReply(msg, &m);
2838         m.redirect.oloc.pool_ns = NULL;
2839         if (ret) {
2840                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2841                        req->r_tid, ret);
2842                 ceph_msg_dump(msg);
2843                 goto fail_request;
2844         }
2845         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2846              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
2847              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
2848              le64_to_cpu(m.replay_version.version), m.user_version);
2849
2850         if (m.retry_attempt >= 0) {
2851                 if (m.retry_attempt != req->r_attempts - 1) {
2852                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2853                              req, req->r_tid, m.retry_attempt,
2854                              req->r_attempts - 1);
2855                         goto out_unlock_session;
2856                 }
2857         } else {
2858                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2859         }
2860
2861         if (!ceph_oloc_empty(&m.redirect.oloc)) {
2862                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
2863                      m.redirect.oloc.pool);
2864                 unlink_request(osd, req);
2865                 mutex_unlock(&osd->lock);
2866
2867                 /*
2868                  * Not ceph_oloc_copy() - changing pool_ns is not
2869                  * supported.
2870                  */
2871                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
2872                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
2873                 req->r_tid = 0;
2874                 __submit_request(req, false);
2875                 goto out_unlock_osdc;
2876         }
2877
2878         if (m.num_ops != req->r_num_ops) {
2879                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
2880                        req->r_num_ops, req->r_tid);
2881                 goto fail_request;
2882         }
2883         for (i = 0; i < req->r_num_ops; i++) {
2884                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
2885                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
2886                 req->r_ops[i].rval = m.rval[i];
2887                 req->r_ops[i].outdata_len = m.outdata_len[i];
2888                 data_len += m.outdata_len[i];
2889         }
2890         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
2891                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
2892                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
2893                 goto fail_request;
2894         }
2895         dout("%s req %p tid %llu acked %d result %d data_len %u\n", __func__,
2896              req, req->r_tid, req->r_got_reply, m.result, data_len);
2897
2898         already_acked = req->r_got_reply;
2899         if (!already_acked) {
2900                 req->r_result = m.result ?: data_len;
2901                 req->r_replay_version = m.replay_version; /* struct */
2902                 req->r_got_reply = true;
2903         } else if (!(m.flags & CEPH_OSD_FLAG_ONDISK)) {
2904                 dout("req %p tid %llu dup ack\n", req, req->r_tid);
2905                 goto out_unlock_session;
2906         }
2907
2908         if (done_request(req, &m)) {
2909                 __finish_request(req);
2910                 if (req->r_linger) {
2911                         WARN_ON(req->r_unsafe_callback);
2912                         dout("req %p tid %llu cb (locked)\n", req, req->r_tid);
2913                         __complete_request(req);
2914                 }
2915         }
2916
2917         mutex_unlock(&osd->lock);
2918         up_read(&osdc->lock);
2919
2920         if (done_request(req, &m)) {
2921                 if (already_acked && req->r_unsafe_callback) {
2922                         dout("req %p tid %llu safe-cb\n", req, req->r_tid);
2923                         req->r_unsafe_callback(req, false);
2924                 } else if (!req->r_linger) {
2925                         dout("req %p tid %llu cb\n", req, req->r_tid);
2926                         __complete_request(req);
2927                 }
2928                 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2929                         complete_all(&req->r_safe_completion);
2930                 ceph_osdc_put_request(req);
2931         } else {
2932                 if (req->r_unsafe_callback) {
2933                         dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
2934                         req->r_unsafe_callback(req, true);
2935                 } else {
2936                         WARN_ON(1);
2937                 }
2938         }
2939
2940         return;
2941
2942 fail_request:
2943         complete_request(req, -EIO);
2944 out_unlock_session:
2945         mutex_unlock(&osd->lock);
2946 out_unlock_osdc:
2947         up_read(&osdc->lock);
2948 }
2949
2950 static void set_pool_was_full(struct ceph_osd_client *osdc)
2951 {
2952         struct rb_node *n;
2953
2954         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
2955                 struct ceph_pg_pool_info *pi =
2956                     rb_entry(n, struct ceph_pg_pool_info, node);
2957
2958                 pi->was_full = __pool_full(pi);
2959         }
2960 }
2961
2962 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
2963 {
2964         struct ceph_pg_pool_info *pi;
2965
2966         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
2967         if (!pi)
2968                 return false;
2969
2970         return pi->was_full && !__pool_full(pi);
2971 }
2972
2973 static enum calc_target_result
2974 recalc_linger_target(struct ceph_osd_linger_request *lreq)
2975 {
2976         struct ceph_osd_client *osdc = lreq->osdc;
2977         enum calc_target_result ct_res;
2978
2979         ct_res = calc_target(osdc, &lreq->t, &lreq->last_force_resend, true);
2980         if (ct_res == CALC_TARGET_NEED_RESEND) {
2981                 struct ceph_osd *osd;
2982
2983                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
2984                 if (osd != lreq->osd) {
2985                         unlink_linger(lreq->osd, lreq);
2986                         link_linger(osd, lreq);
2987                 }
2988         }
2989
2990         return ct_res;
2991 }
2992
2993 /*
2994  * Requeue requests whose mapping to an OSD has changed.
2995  */
2996 static void scan_requests(struct ceph_osd *osd,
2997                           bool force_resend,
2998                           bool cleared_full,
2999                           bool check_pool_cleared_full,
3000                           struct rb_root *need_resend,
3001                           struct list_head *need_resend_linger)
3002 {
3003         struct ceph_osd_client *osdc = osd->o_osdc;
3004         struct rb_node *n;
3005         bool force_resend_writes;
3006
3007         for (n = rb_first(&osd->o_linger_requests); n; ) {
3008                 struct ceph_osd_linger_request *lreq =
3009                     rb_entry(n, struct ceph_osd_linger_request, node);
3010                 enum calc_target_result ct_res;
3011
3012                 n = rb_next(n); /* recalc_linger_target() */
3013
3014                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3015                      lreq->linger_id);
3016                 ct_res = recalc_linger_target(lreq);
3017                 switch (ct_res) {
3018                 case CALC_TARGET_NO_ACTION:
3019                         force_resend_writes = cleared_full ||
3020                             (check_pool_cleared_full &&
3021                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3022                         if (!force_resend && !force_resend_writes)
3023                                 break;
3024
3025                         /* fall through */
3026                 case CALC_TARGET_NEED_RESEND:
3027                         cancel_linger_map_check(lreq);
3028                         /*
3029                          * scan_requests() for the previous epoch(s)
3030                          * may have already added it to the list, since
3031                          * it's not unlinked here.
3032                          */
3033                         if (list_empty(&lreq->scan_item))
3034                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3035                         break;
3036                 case CALC_TARGET_POOL_DNE:
3037                         check_linger_pool_dne(lreq);
3038                         break;
3039                 }
3040         }
3041
3042         for (n = rb_first(&osd->o_requests); n; ) {
3043                 struct ceph_osd_request *req =
3044                     rb_entry(n, struct ceph_osd_request, r_node);
3045                 enum calc_target_result ct_res;
3046
3047                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3048
3049                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3050                 ct_res = calc_target(osdc, &req->r_t,
3051                                      &req->r_last_force_resend, false);
3052                 switch (ct_res) {
3053                 case CALC_TARGET_NO_ACTION:
3054                         force_resend_writes = cleared_full ||
3055                             (check_pool_cleared_full &&
3056                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3057                         if (!force_resend &&
3058                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3059                              !force_resend_writes))
3060                                 break;
3061
3062                         /* fall through */
3063                 case CALC_TARGET_NEED_RESEND:
3064                         cancel_map_check(req);
3065                         unlink_request(osd, req);
3066                         insert_request(need_resend, req);
3067                         break;
3068                 case CALC_TARGET_POOL_DNE:
3069                         check_pool_dne(req);
3070                         break;
3071                 }
3072         }
3073 }
3074
3075 static int handle_one_map(struct ceph_osd_client *osdc,
3076                           void *p, void *end, bool incremental,
3077                           struct rb_root *need_resend,
3078                           struct list_head *need_resend_linger)
3079 {
3080         struct ceph_osdmap *newmap;
3081         struct rb_node *n;
3082         bool skipped_map = false;
3083         bool was_full;
3084
3085         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3086         set_pool_was_full(osdc);
3087
3088         if (incremental)
3089                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3090         else
3091                 newmap = ceph_osdmap_decode(&p, end);
3092         if (IS_ERR(newmap))
3093                 return PTR_ERR(newmap);
3094
3095         if (newmap != osdc->osdmap) {
3096                 /*
3097                  * Preserve ->was_full before destroying the old map.
3098                  * For pools that weren't in the old map, ->was_full
3099                  * should be false.
3100                  */
3101                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3102                         struct ceph_pg_pool_info *pi =
3103                             rb_entry(n, struct ceph_pg_pool_info, node);
3104                         struct ceph_pg_pool_info *old_pi;
3105
3106                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3107                         if (old_pi)
3108                                 pi->was_full = old_pi->was_full;
3109                         else
3110                                 WARN_ON(pi->was_full);
3111                 }
3112
3113                 if (osdc->osdmap->epoch &&
3114                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3115                         WARN_ON(incremental);
3116                         skipped_map = true;
3117                 }
3118
3119                 ceph_osdmap_destroy(osdc->osdmap);
3120                 osdc->osdmap = newmap;
3121         }
3122
3123         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3124         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3125                       need_resend, need_resend_linger);
3126
3127         for (n = rb_first(&osdc->osds); n; ) {
3128                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3129
3130                 n = rb_next(n); /* close_osd() */
3131
3132                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3133                               need_resend_linger);
3134                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3135                     memcmp(&osd->o_con.peer_addr,
3136                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3137                            sizeof(struct ceph_entity_addr)))
3138                         close_osd(osd);
3139         }
3140
3141         return 0;
3142 }
3143
3144 static void kick_requests(struct ceph_osd_client *osdc,
3145                           struct rb_root *need_resend,
3146                           struct list_head *need_resend_linger)
3147 {
3148         struct ceph_osd_linger_request *lreq, *nlreq;
3149         struct rb_node *n;
3150
3151         for (n = rb_first(need_resend); n; ) {
3152                 struct ceph_osd_request *req =
3153                     rb_entry(n, struct ceph_osd_request, r_node);
3154                 struct ceph_osd *osd;
3155
3156                 n = rb_next(n);
3157                 erase_request(need_resend, req); /* before link_request() */
3158
3159                 WARN_ON(req->r_osd);
3160                 calc_target(osdc, &req->r_t, NULL, false);
3161                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3162                 link_request(osd, req);
3163                 if (!req->r_linger) {
3164                         if (!osd_homeless(osd) && !req->r_t.paused)
3165                                 send_request(req);
3166                 } else {
3167                         cancel_linger_request(req);
3168                 }
3169         }
3170
3171         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3172                 if (!osd_homeless(lreq->osd))
3173                         send_linger(lreq);
3174
3175                 list_del_init(&lreq->scan_item);
3176         }
3177 }
3178
3179 /*
3180  * Process updated osd map.
3181  *
3182  * The message contains any number of incremental and full maps, normally
3183  * indicating some sort of topology change in the cluster.  Kick requests
3184  * off to different OSDs as needed.
3185  */
3186 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3187 {
3188         void *p = msg->front.iov_base;
3189         void *const end = p + msg->front.iov_len;
3190         u32 nr_maps, maplen;
3191         u32 epoch;
3192         struct ceph_fsid fsid;
3193         struct rb_root need_resend = RB_ROOT;
3194         LIST_HEAD(need_resend_linger);
3195         bool handled_incremental = false;
3196         bool was_pauserd, was_pausewr;
3197         bool pauserd, pausewr;
3198         int err;
3199
3200         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3201         down_write(&osdc->lock);
3202
3203         /* verify fsid */
3204         ceph_decode_need(&p, end, sizeof(fsid), bad);
3205         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3206         if (ceph_check_fsid(osdc->client, &fsid) < 0)
3207                 goto bad;
3208
3209         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3210         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3211                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3212                       have_pool_full(osdc);
3213
3214         /* incremental maps */
3215         ceph_decode_32_safe(&p, end, nr_maps, bad);
3216         dout(" %d inc maps\n", nr_maps);
3217         while (nr_maps > 0) {
3218                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3219                 epoch = ceph_decode_32(&p);
3220                 maplen = ceph_decode_32(&p);
3221                 ceph_decode_need(&p, end, maplen, bad);
3222                 if (osdc->osdmap->epoch &&
3223                     osdc->osdmap->epoch + 1 == epoch) {
3224                         dout("applying incremental map %u len %d\n",
3225                              epoch, maplen);
3226                         err = handle_one_map(osdc, p, p + maplen, true,
3227                                              &need_resend, &need_resend_linger);
3228                         if (err)
3229                                 goto bad;
3230                         handled_incremental = true;
3231                 } else {
3232                         dout("ignoring incremental map %u len %d\n",
3233                              epoch, maplen);
3234                 }
3235                 p += maplen;
3236                 nr_maps--;
3237         }
3238         if (handled_incremental)
3239                 goto done;
3240
3241         /* full maps */
3242         ceph_decode_32_safe(&p, end, nr_maps, bad);
3243         dout(" %d full maps\n", nr_maps);
3244         while (nr_maps) {
3245                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3246                 epoch = ceph_decode_32(&p);
3247                 maplen = ceph_decode_32(&p);
3248                 ceph_decode_need(&p, end, maplen, bad);
3249                 if (nr_maps > 1) {
3250                         dout("skipping non-latest full map %u len %d\n",
3251                              epoch, maplen);
3252                 } else if (osdc->osdmap->epoch >= epoch) {
3253                         dout("skipping full map %u len %d, "
3254                              "older than our %u\n", epoch, maplen,
3255                              osdc->osdmap->epoch);
3256                 } else {
3257                         dout("taking full map %u len %d\n", epoch, maplen);
3258                         err = handle_one_map(osdc, p, p + maplen, false,
3259                                              &need_resend, &need_resend_linger);
3260                         if (err)
3261                                 goto bad;
3262                 }
3263                 p += maplen;
3264                 nr_maps--;
3265         }
3266
3267 done:
3268         /*
3269          * subscribe to subsequent osdmap updates if full to ensure
3270          * we find out when we are no longer full and stop returning
3271          * ENOSPC.
3272          */
3273         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3274         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3275                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3276                   have_pool_full(osdc);
3277         if (was_pauserd || was_pausewr || pauserd || pausewr)
3278                 maybe_request_map(osdc);
3279
3280         kick_requests(osdc, &need_resend, &need_resend_linger);
3281
3282         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3283                           osdc->osdmap->epoch);
3284         up_write(&osdc->lock);
3285         wake_up_all(&osdc->client->auth_wq);
3286         return;
3287
3288 bad:
3289         pr_err("osdc handle_map corrupt msg\n");
3290         ceph_msg_dump(msg);
3291         up_write(&osdc->lock);
3292 }
3293
3294 /*
3295  * Resubmit requests pending on the given osd.
3296  */
3297 static void kick_osd_requests(struct ceph_osd *osd)
3298 {
3299         struct rb_node *n;
3300
3301         for (n = rb_first(&osd->o_requests); n; ) {
3302                 struct ceph_osd_request *req =
3303                     rb_entry(n, struct ceph_osd_request, r_node);
3304
3305                 n = rb_next(n); /* cancel_linger_request() */
3306
3307                 if (!req->r_linger) {
3308                         if (!req->r_t.paused)
3309                                 send_request(req);
3310                 } else {
3311                         cancel_linger_request(req);
3312                 }
3313         }
3314         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3315                 struct ceph_osd_linger_request *lreq =
3316                     rb_entry(n, struct ceph_osd_linger_request, node);
3317
3318                 send_linger(lreq);
3319         }
3320 }
3321
3322 /*
3323  * If the osd connection drops, we need to resubmit all requests.
3324  */
3325 static void osd_fault(struct ceph_connection *con)
3326 {
3327         struct ceph_osd *osd = con->private;
3328         struct ceph_osd_client *osdc = osd->o_osdc;
3329
3330         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3331
3332         down_write(&osdc->lock);
3333         if (!osd_registered(osd)) {
3334                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3335                 goto out_unlock;
3336         }
3337
3338         if (!reopen_osd(osd))
3339                 kick_osd_requests(osd);
3340         maybe_request_map(osdc);
3341
3342 out_unlock:
3343         up_write(&osdc->lock);
3344 }
3345
3346 /*
3347  * Process osd watch notifications
3348  */
3349 static void handle_watch_notify(struct ceph_osd_client *osdc,
3350                                 struct ceph_msg *msg)
3351 {
3352         void *p = msg->front.iov_base;
3353         void *const end = p + msg->front.iov_len;
3354         struct ceph_osd_linger_request *lreq;
3355         struct linger_work *lwork;
3356         u8 proto_ver, opcode;
3357         u64 cookie, notify_id;
3358         u64 notifier_id = 0;
3359         s32 return_code = 0;
3360         void *payload = NULL;
3361         u32 payload_len = 0;
3362
3363         ceph_decode_8_safe(&p, end, proto_ver, bad);
3364         ceph_decode_8_safe(&p, end, opcode, bad);
3365         ceph_decode_64_safe(&p, end, cookie, bad);
3366         p += 8; /* skip ver */
3367         ceph_decode_64_safe(&p, end, notify_id, bad);
3368
3369         if (proto_ver >= 1) {
3370                 ceph_decode_32_safe(&p, end, payload_len, bad);
3371                 ceph_decode_need(&p, end, payload_len, bad);
3372                 payload = p;
3373                 p += payload_len;
3374         }
3375
3376         if (le16_to_cpu(msg->hdr.version) >= 2)
3377                 ceph_decode_32_safe(&p, end, return_code, bad);
3378
3379         if (le16_to_cpu(msg->hdr.version) >= 3)
3380                 ceph_decode_64_safe(&p, end, notifier_id, bad);
3381
3382         down_read(&osdc->lock);
3383         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
3384         if (!lreq) {
3385                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
3386                      cookie);
3387                 goto out_unlock_osdc;
3388         }
3389
3390         mutex_lock(&lreq->lock);
3391         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
3392              opcode, cookie, lreq, lreq->is_watch);
3393         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
3394                 if (!lreq->last_error) {
3395                         lreq->last_error = -ENOTCONN;
3396                         queue_watch_error(lreq);
3397                 }
3398         } else if (!lreq->is_watch) {
3399                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
3400                 if (lreq->notify_id && lreq->notify_id != notify_id) {
3401                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
3402                              lreq->notify_id, notify_id);
3403                 } else if (!completion_done(&lreq->notify_finish_wait)) {
3404                         struct ceph_msg_data *data =
3405                             list_first_entry_or_null(&msg->data,
3406                                                      struct ceph_msg_data,
3407                                                      links);
3408
3409                         if (data) {
3410                                 if (lreq->preply_pages) {
3411                                         WARN_ON(data->type !=
3412                                                         CEPH_MSG_DATA_PAGES);
3413                                         *lreq->preply_pages = data->pages;
3414                                         *lreq->preply_len = data->length;
3415                                 } else {
3416                                         ceph_release_page_vector(data->pages,
3417                                                calc_pages_for(0, data->length));
3418                                 }
3419                         }
3420                         lreq->notify_finish_error = return_code;
3421                         complete_all(&lreq->notify_finish_wait);
3422                 }
3423         } else {
3424                 /* CEPH_WATCH_EVENT_NOTIFY */
3425                 lwork = lwork_alloc(lreq, do_watch_notify);
3426                 if (!lwork) {
3427                         pr_err("failed to allocate notify-lwork\n");
3428                         goto out_unlock_lreq;
3429                 }
3430
3431                 lwork->notify.notify_id = notify_id;
3432                 lwork->notify.notifier_id = notifier_id;
3433                 lwork->notify.payload = payload;
3434                 lwork->notify.payload_len = payload_len;
3435                 lwork->notify.msg = ceph_msg_get(msg);
3436                 lwork_queue(lwork);
3437         }
3438
3439 out_unlock_lreq:
3440         mutex_unlock(&lreq->lock);
3441 out_unlock_osdc:
3442         up_read(&osdc->lock);
3443         return;
3444
3445 bad:
3446         pr_err("osdc handle_watch_notify corrupt msg\n");
3447 }
3448
3449 /*
3450  * Register request, send initial attempt.
3451  */
3452 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
3453                             struct ceph_osd_request *req,
3454                             bool nofail)
3455 {
3456         down_read(&osdc->lock);
3457         submit_request(req, false);
3458         up_read(&osdc->lock);
3459
3460         return 0;
3461 }
3462 EXPORT_SYMBOL(ceph_osdc_start_request);
3463
3464 /*
3465  * Unregister a registered request.  The request is not completed (i.e.
3466  * no callbacks or wakeups) - higher layers are supposed to know what
3467  * they are canceling.
3468  */
3469 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
3470 {
3471         struct ceph_osd_client *osdc = req->r_osdc;
3472
3473         down_write(&osdc->lock);
3474         if (req->r_osd)
3475                 cancel_request(req);
3476         up_write(&osdc->lock);
3477 }
3478 EXPORT_SYMBOL(ceph_osdc_cancel_request);
3479
3480 /*
3481  * @timeout: in jiffies, 0 means "wait forever"
3482  */
3483 static int wait_request_timeout(struct ceph_osd_request *req,
3484                                 unsigned long timeout)
3485 {
3486         long left;
3487
3488         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3489         left = wait_for_completion_killable_timeout(&req->r_completion,
3490                                                 ceph_timeout_jiffies(timeout));
3491         if (left <= 0) {
3492                 left = left ?: -ETIMEDOUT;
3493                 ceph_osdc_cancel_request(req);
3494
3495                 /* kludge - need to to wake ceph_osdc_sync() */
3496                 complete_all(&req->r_safe_completion);
3497         } else {
3498                 left = req->r_result; /* completed */
3499         }
3500
3501         return left;
3502 }
3503
3504 /*
3505  * wait for a request to complete
3506  */
3507 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
3508                            struct ceph_osd_request *req)
3509 {
3510         return wait_request_timeout(req, 0);
3511 }
3512 EXPORT_SYMBOL(ceph_osdc_wait_request);
3513
3514 /*
3515  * sync - wait for all in-flight requests to flush.  avoid starvation.
3516  */
3517 void ceph_osdc_sync(struct ceph_osd_client *osdc)
3518 {
3519         struct rb_node *n, *p;
3520         u64 last_tid = atomic64_read(&osdc->last_tid);
3521
3522 again:
3523         down_read(&osdc->lock);
3524         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3525                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3526
3527                 mutex_lock(&osd->lock);
3528                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
3529                         struct ceph_osd_request *req =
3530                             rb_entry(p, struct ceph_osd_request, r_node);
3531
3532                         if (req->r_tid > last_tid)
3533                                 break;
3534
3535                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
3536                                 continue;
3537
3538                         ceph_osdc_get_request(req);
3539                         mutex_unlock(&osd->lock);
3540                         up_read(&osdc->lock);
3541                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
3542                              __func__, req, req->r_tid, last_tid);
3543                         wait_for_completion(&req->r_safe_completion);
3544                         ceph_osdc_put_request(req);
3545                         goto again;
3546                 }
3547
3548                 mutex_unlock(&osd->lock);
3549         }
3550
3551         up_read(&osdc->lock);
3552         dout("%s done last_tid %llu\n", __func__, last_tid);
3553 }
3554 EXPORT_SYMBOL(ceph_osdc_sync);
3555
3556 static struct ceph_osd_request *
3557 alloc_linger_request(struct ceph_osd_linger_request *lreq)
3558 {
3559         struct ceph_osd_request *req;
3560
3561         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
3562         if (!req)
3563                 return NULL;
3564
3565         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3566         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3567
3568         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
3569                 ceph_osdc_put_request(req);
3570                 return NULL;
3571         }
3572
3573         return req;
3574 }
3575
3576 /*
3577  * Returns a handle, caller owns a ref.
3578  */
3579 struct ceph_osd_linger_request *
3580 ceph_osdc_watch(struct ceph_osd_client *osdc,
3581                 struct ceph_object_id *oid,
3582                 struct ceph_object_locator *oloc,
3583                 rados_watchcb2_t wcb,
3584                 rados_watcherrcb_t errcb,
3585                 void *data)
3586 {
3587         struct ceph_osd_linger_request *lreq;
3588         int ret;
3589
3590         lreq = linger_alloc(osdc);
3591         if (!lreq)
3592                 return ERR_PTR(-ENOMEM);
3593
3594         lreq->is_watch = true;
3595         lreq->wcb = wcb;
3596         lreq->errcb = errcb;
3597         lreq->data = data;
3598         lreq->watch_valid_thru = jiffies;
3599
3600         ceph_oid_copy(&lreq->t.base_oid, oid);
3601         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3602         lreq->t.flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
3603         lreq->mtime = CURRENT_TIME;
3604
3605         lreq->reg_req = alloc_linger_request(lreq);
3606         if (!lreq->reg_req) {
3607                 ret = -ENOMEM;
3608                 goto err_put_lreq;
3609         }
3610
3611         lreq->ping_req = alloc_linger_request(lreq);
3612         if (!lreq->ping_req) {
3613                 ret = -ENOMEM;
3614                 goto err_put_lreq;
3615         }
3616
3617         down_write(&osdc->lock);
3618         linger_register(lreq); /* before osd_req_op_* */
3619         osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
3620                               CEPH_OSD_WATCH_OP_WATCH);
3621         osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
3622                               CEPH_OSD_WATCH_OP_PING);
3623         linger_submit(lreq);
3624         up_write(&osdc->lock);
3625
3626         ret = linger_reg_commit_wait(lreq);
3627         if (ret) {
3628                 linger_cancel(lreq);
3629                 goto err_put_lreq;
3630         }
3631
3632         return lreq;
3633
3634 err_put_lreq:
3635         linger_put(lreq);
3636         return ERR_PTR(ret);
3637 }
3638 EXPORT_SYMBOL(ceph_osdc_watch);
3639
3640 /*
3641  * Releases a ref.
3642  *
3643  * Times out after mount_timeout to preserve rbd unmap behaviour
3644  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
3645  * with mount_timeout").
3646  */
3647 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
3648                       struct ceph_osd_linger_request *lreq)
3649 {
3650         struct ceph_options *opts = osdc->client->options;
3651         struct ceph_osd_request *req;
3652         int ret;
3653
3654         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3655         if (!req)
3656                 return -ENOMEM;
3657
3658         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3659         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3660         req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
3661         req->r_mtime = CURRENT_TIME;
3662         osd_req_op_watch_init(req, 0, lreq->linger_id,
3663                               CEPH_OSD_WATCH_OP_UNWATCH);
3664
3665         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3666         if (ret)
3667                 goto out_put_req;
3668
3669         ceph_osdc_start_request(osdc, req, false);
3670         linger_cancel(lreq);
3671         linger_put(lreq);
3672         ret = wait_request_timeout(req, opts->mount_timeout);
3673
3674 out_put_req:
3675         ceph_osdc_put_request(req);
3676         return ret;
3677 }
3678 EXPORT_SYMBOL(ceph_osdc_unwatch);
3679
3680 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
3681                                       u64 notify_id, u64 cookie, void *payload,
3682                                       size_t payload_len)
3683 {
3684         struct ceph_osd_req_op *op;
3685         struct ceph_pagelist *pl;
3686         int ret;
3687
3688         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
3689
3690         pl = kmalloc(sizeof(*pl), GFP_NOIO);
3691         if (!pl)
3692                 return -ENOMEM;
3693
3694         ceph_pagelist_init(pl);
3695         ret = ceph_pagelist_encode_64(pl, notify_id);
3696         ret |= ceph_pagelist_encode_64(pl, cookie);
3697         if (payload) {
3698                 ret |= ceph_pagelist_encode_32(pl, payload_len);
3699                 ret |= ceph_pagelist_append(pl, payload, payload_len);
3700         } else {
3701                 ret |= ceph_pagelist_encode_32(pl, 0);
3702         }
3703         if (ret) {
3704                 ceph_pagelist_release(pl);
3705                 return -ENOMEM;
3706         }
3707
3708         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
3709         op->indata_len = pl->length;
3710         return 0;
3711 }
3712
3713 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
3714                          struct ceph_object_id *oid,
3715                          struct ceph_object_locator *oloc,
3716                          u64 notify_id,
3717                          u64 cookie,
3718                          void *payload,
3719                          size_t payload_len)
3720 {
3721         struct ceph_osd_request *req;
3722         int ret;
3723
3724         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3725         if (!req)
3726                 return -ENOMEM;
3727
3728         ceph_oid_copy(&req->r_base_oid, oid);
3729         ceph_oloc_copy(&req->r_base_oloc, oloc);
3730         req->r_flags = CEPH_OSD_FLAG_READ;
3731
3732         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3733         if (ret)
3734                 goto out_put_req;
3735
3736         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
3737                                          payload_len);
3738         if (ret)
3739                 goto out_put_req;
3740
3741         ceph_osdc_start_request(osdc, req, false);
3742         ret = ceph_osdc_wait_request(osdc, req);
3743
3744 out_put_req:
3745         ceph_osdc_put_request(req);
3746         return ret;
3747 }
3748 EXPORT_SYMBOL(ceph_osdc_notify_ack);
3749
3750 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
3751                                   u64 cookie, u32 prot_ver, u32 timeout,
3752                                   void *payload, size_t payload_len)
3753 {
3754         struct ceph_osd_req_op *op;
3755         struct ceph_pagelist *pl;
3756         int ret;
3757
3758         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
3759         op->notify.cookie = cookie;
3760
3761         pl = kmalloc(sizeof(*pl), GFP_NOIO);
3762         if (!pl)
3763                 return -ENOMEM;
3764
3765         ceph_pagelist_init(pl);
3766         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
3767         ret |= ceph_pagelist_encode_32(pl, timeout);
3768         ret |= ceph_pagelist_encode_32(pl, payload_len);
3769         ret |= ceph_pagelist_append(pl, payload, payload_len);
3770         if (ret) {
3771                 ceph_pagelist_release(pl);
3772                 return -ENOMEM;
3773         }
3774
3775         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
3776         op->indata_len = pl->length;
3777         return 0;
3778 }
3779
3780 /*
3781  * @timeout: in seconds
3782  *
3783  * @preply_{pages,len} are initialized both on success and error.
3784  * The caller is responsible for:
3785  *
3786  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
3787  */
3788 int ceph_osdc_notify(struct ceph_osd_client *osdc,
3789                      struct ceph_object_id *oid,
3790                      struct ceph_object_locator *oloc,
3791                      void *payload,
3792                      size_t payload_len,
3793                      u32 timeout,
3794                      struct page ***preply_pages,
3795                      size_t *preply_len)
3796 {
3797         struct ceph_osd_linger_request *lreq;
3798         struct page **pages;
3799         int ret;
3800
3801         WARN_ON(!timeout);
3802         if (preply_pages) {
3803                 *preply_pages = NULL;
3804                 *preply_len = 0;
3805         }
3806
3807         lreq = linger_alloc(osdc);
3808         if (!lreq)
3809                 return -ENOMEM;
3810
3811         lreq->preply_pages = preply_pages;
3812         lreq->preply_len = preply_len;
3813
3814         ceph_oid_copy(&lreq->t.base_oid, oid);
3815         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3816         lreq->t.flags = CEPH_OSD_FLAG_READ;
3817
3818         lreq->reg_req = alloc_linger_request(lreq);
3819         if (!lreq->reg_req) {
3820                 ret = -ENOMEM;
3821                 goto out_put_lreq;
3822         }
3823
3824         /* for notify_id */
3825         pages = ceph_alloc_page_vector(1, GFP_NOIO);
3826         if (IS_ERR(pages)) {
3827                 ret = PTR_ERR(pages);
3828                 goto out_put_lreq;
3829         }
3830
3831         down_write(&osdc->lock);
3832         linger_register(lreq); /* before osd_req_op_* */
3833         ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
3834                                      timeout, payload, payload_len);
3835         if (ret) {
3836                 linger_unregister(lreq);
3837                 up_write(&osdc->lock);
3838                 ceph_release_page_vector(pages, 1);
3839                 goto out_put_lreq;
3840         }
3841         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
3842                                                  response_data),
3843                                  pages, PAGE_SIZE, 0, false, true);
3844         linger_submit(lreq);
3845         up_write(&osdc->lock);
3846
3847         ret = linger_reg_commit_wait(lreq);
3848         if (!ret)
3849                 ret = linger_notify_finish_wait(lreq);
3850         else
3851                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
3852
3853         linger_cancel(lreq);
3854 out_put_lreq:
3855         linger_put(lreq);
3856         return ret;
3857 }
3858 EXPORT_SYMBOL(ceph_osdc_notify);
3859
3860 /*
3861  * Return the number of milliseconds since the watch was last
3862  * confirmed, or an error.  If there is an error, the watch is no
3863  * longer valid, and should be destroyed with ceph_osdc_unwatch().
3864  */
3865 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
3866                           struct ceph_osd_linger_request *lreq)
3867 {
3868         unsigned long stamp, age;
3869         int ret;
3870
3871         down_read(&osdc->lock);
3872         mutex_lock(&lreq->lock);
3873         stamp = lreq->watch_valid_thru;
3874         if (!list_empty(&lreq->pending_lworks)) {
3875                 struct linger_work *lwork =
3876                     list_first_entry(&lreq->pending_lworks,
3877                                      struct linger_work,
3878                                      pending_item);
3879
3880                 if (time_before(lwork->queued_stamp, stamp))
3881                         stamp = lwork->queued_stamp;
3882         }
3883         age = jiffies - stamp;
3884         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
3885              lreq, lreq->linger_id, age, lreq->last_error);
3886         /* we are truncating to msecs, so return a safe upper bound */
3887         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
3888
3889         mutex_unlock(&lreq->lock);
3890         up_read(&osdc->lock);
3891         return ret;
3892 }
3893
3894 /*
3895  * Call all pending notify callbacks - for use after a watch is
3896  * unregistered, to make sure no more callbacks for it will be invoked
3897  */
3898 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
3899 {
3900         flush_workqueue(osdc->notify_wq);
3901 }
3902 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
3903
3904 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
3905 {
3906         down_read(&osdc->lock);
3907         maybe_request_map(osdc);
3908         up_read(&osdc->lock);
3909 }
3910 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
3911
3912 /*
3913  * init, shutdown
3914  */
3915 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
3916 {
3917         int err;
3918
3919         dout("init\n");
3920         osdc->client = client;
3921         init_rwsem(&osdc->lock);
3922         osdc->osds = RB_ROOT;
3923         INIT_LIST_HEAD(&osdc->osd_lru);
3924         spin_lock_init(&osdc->osd_lru_lock);
3925         osd_init(&osdc->homeless_osd);
3926         osdc->homeless_osd.o_osdc = osdc;
3927         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
3928         osdc->linger_requests = RB_ROOT;
3929         osdc->map_checks = RB_ROOT;
3930         osdc->linger_map_checks = RB_ROOT;
3931         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
3932         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
3933
3934         err = -ENOMEM;
3935         osdc->osdmap = ceph_osdmap_alloc();
3936         if (!osdc->osdmap)
3937                 goto out;
3938
3939         osdc->req_mempool = mempool_create_slab_pool(10,
3940                                                      ceph_osd_request_cache);
3941         if (!osdc->req_mempool)
3942                 goto out_map;
3943
3944         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
3945                                 PAGE_SIZE, 10, true, "osd_op");
3946         if (err < 0)
3947                 goto out_mempool;
3948         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
3949                                 PAGE_SIZE, 10, true, "osd_op_reply");
3950         if (err < 0)
3951                 goto out_msgpool;
3952
3953         err = -ENOMEM;
3954         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
3955         if (!osdc->notify_wq)
3956                 goto out_msgpool_reply;
3957
3958         schedule_delayed_work(&osdc->timeout_work,
3959                               osdc->client->options->osd_keepalive_timeout);
3960         schedule_delayed_work(&osdc->osds_timeout_work,
3961             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
3962
3963         return 0;
3964
3965 out_msgpool_reply:
3966         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
3967 out_msgpool:
3968         ceph_msgpool_destroy(&osdc->msgpool_op);
3969 out_mempool:
3970         mempool_destroy(osdc->req_mempool);
3971 out_map:
3972         ceph_osdmap_destroy(osdc->osdmap);
3973 out:
3974         return err;
3975 }
3976
3977 void ceph_osdc_stop(struct ceph_osd_client *osdc)
3978 {
3979         flush_workqueue(osdc->notify_wq);
3980         destroy_workqueue(osdc->notify_wq);
3981         cancel_delayed_work_sync(&osdc->timeout_work);
3982         cancel_delayed_work_sync(&osdc->osds_timeout_work);
3983
3984         down_write(&osdc->lock);
3985         while (!RB_EMPTY_ROOT(&osdc->osds)) {
3986                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
3987                                                 struct ceph_osd, o_node);
3988                 close_osd(osd);
3989         }
3990         up_write(&osdc->lock);
3991         WARN_ON(atomic_read(&osdc->homeless_osd.o_ref) != 1);
3992         osd_cleanup(&osdc->homeless_osd);
3993
3994         WARN_ON(!list_empty(&osdc->osd_lru));
3995         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
3996         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
3997         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
3998         WARN_ON(atomic_read(&osdc->num_requests));
3999         WARN_ON(atomic_read(&osdc->num_homeless));
4000
4001         ceph_osdmap_destroy(osdc->osdmap);
4002         mempool_destroy(osdc->req_mempool);
4003         ceph_msgpool_destroy(&osdc->msgpool_op);
4004         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4005 }
4006
4007 /*
4008  * Read some contiguous pages.  If we cross a stripe boundary, shorten
4009  * *plen.  Return number of bytes read, or error.
4010  */
4011 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
4012                         struct ceph_vino vino, struct ceph_file_layout *layout,
4013                         u64 off, u64 *plen,
4014                         u32 truncate_seq, u64 truncate_size,
4015                         struct page **pages, int num_pages, int page_align)
4016 {
4017         struct ceph_osd_request *req;
4018         int rc = 0;
4019
4020         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
4021              vino.snap, off, *plen);
4022         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
4023                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
4024                                     NULL, truncate_seq, truncate_size,
4025                                     false);
4026         if (IS_ERR(req))
4027                 return PTR_ERR(req);
4028
4029         /* it may be a short read due to an object boundary */
4030         osd_req_op_extent_osd_data_pages(req, 0,
4031                                 pages, *plen, page_align, false, false);
4032
4033         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
4034              off, *plen, *plen, page_align);
4035
4036         rc = ceph_osdc_start_request(osdc, req, false);
4037         if (!rc)
4038                 rc = ceph_osdc_wait_request(osdc, req);
4039
4040         ceph_osdc_put_request(req);
4041         dout("readpages result %d\n", rc);
4042         return rc;
4043 }
4044 EXPORT_SYMBOL(ceph_osdc_readpages);
4045
4046 /*
4047  * do a synchronous write on N pages
4048  */
4049 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
4050                          struct ceph_file_layout *layout,
4051                          struct ceph_snap_context *snapc,
4052                          u64 off, u64 len,
4053                          u32 truncate_seq, u64 truncate_size,
4054                          struct timespec *mtime,
4055                          struct page **pages, int num_pages)
4056 {
4057         struct ceph_osd_request *req;
4058         int rc = 0;
4059         int page_align = off & ~PAGE_MASK;
4060
4061         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
4062                                     CEPH_OSD_OP_WRITE,
4063                                     CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
4064                                     snapc, truncate_seq, truncate_size,
4065                                     true);
4066         if (IS_ERR(req))
4067                 return PTR_ERR(req);
4068
4069         /* it may be a short write due to an object boundary */
4070         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
4071                                 false, false);
4072         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
4073
4074         req->r_mtime = *mtime;
4075         rc = ceph_osdc_start_request(osdc, req, true);
4076         if (!rc)
4077                 rc = ceph_osdc_wait_request(osdc, req);
4078
4079         ceph_osdc_put_request(req);
4080         if (rc == 0)
4081                 rc = len;
4082         dout("writepages result %d\n", rc);
4083         return rc;
4084 }
4085 EXPORT_SYMBOL(ceph_osdc_writepages);
4086
4087 int ceph_osdc_setup(void)
4088 {
4089         size_t size = sizeof(struct ceph_osd_request) +
4090             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
4091
4092         BUG_ON(ceph_osd_request_cache);
4093         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
4094                                                    0, 0, NULL);
4095
4096         return ceph_osd_request_cache ? 0 : -ENOMEM;
4097 }
4098 EXPORT_SYMBOL(ceph_osdc_setup);
4099
4100 void ceph_osdc_cleanup(void)
4101 {
4102         BUG_ON(!ceph_osd_request_cache);
4103         kmem_cache_destroy(ceph_osd_request_cache);
4104         ceph_osd_request_cache = NULL;
4105 }
4106 EXPORT_SYMBOL(ceph_osdc_cleanup);
4107
4108 /*
4109  * handle incoming message
4110  */
4111 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
4112 {
4113         struct ceph_osd *osd = con->private;
4114         struct ceph_osd_client *osdc = osd->o_osdc;
4115         int type = le16_to_cpu(msg->hdr.type);
4116
4117         switch (type) {
4118         case CEPH_MSG_OSD_MAP:
4119                 ceph_osdc_handle_map(osdc, msg);
4120                 break;
4121         case CEPH_MSG_OSD_OPREPLY:
4122                 handle_reply(osd, msg);
4123                 break;
4124         case CEPH_MSG_WATCH_NOTIFY:
4125                 handle_watch_notify(osdc, msg);
4126                 break;
4127
4128         default:
4129                 pr_err("received unknown message type %d %s\n", type,
4130                        ceph_msg_type_name(type));
4131         }
4132
4133         ceph_msg_put(msg);
4134 }
4135
4136 /*
4137  * Lookup and return message for incoming reply.  Don't try to do
4138  * anything about a larger than preallocated data portion of the
4139  * message at the moment - for now, just skip the message.
4140  */
4141 static struct ceph_msg *get_reply(struct ceph_connection *con,
4142                                   struct ceph_msg_header *hdr,
4143                                   int *skip)
4144 {
4145         struct ceph_osd *osd = con->private;
4146         struct ceph_osd_client *osdc = osd->o_osdc;
4147         struct ceph_msg *m = NULL;
4148         struct ceph_osd_request *req;
4149         int front_len = le32_to_cpu(hdr->front_len);
4150         int data_len = le32_to_cpu(hdr->data_len);
4151         u64 tid = le64_to_cpu(hdr->tid);
4152
4153         down_read(&osdc->lock);
4154         if (!osd_registered(osd)) {
4155                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
4156                 *skip = 1;
4157                 goto out_unlock_osdc;
4158         }
4159         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
4160
4161         mutex_lock(&osd->lock);
4162         req = lookup_request(&osd->o_requests, tid);
4163         if (!req) {
4164                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
4165                      osd->o_osd, tid);
4166                 *skip = 1;
4167                 goto out_unlock_session;
4168         }
4169
4170         ceph_msg_revoke_incoming(req->r_reply);
4171
4172         if (front_len > req->r_reply->front_alloc_len) {
4173                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
4174                         __func__, osd->o_osd, req->r_tid, front_len,
4175                         req->r_reply->front_alloc_len);
4176                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
4177                                  false);
4178                 if (!m)
4179                         goto out_unlock_session;
4180                 ceph_msg_put(req->r_reply);
4181                 req->r_reply = m;
4182         }
4183
4184         if (data_len > req->r_reply->data_length) {
4185                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
4186                         __func__, osd->o_osd, req->r_tid, data_len,
4187                         req->r_reply->data_length);
4188                 m = NULL;
4189                 *skip = 1;
4190                 goto out_unlock_session;
4191         }
4192
4193         m = ceph_msg_get(req->r_reply);
4194         dout("get_reply tid %lld %p\n", tid, m);
4195
4196 out_unlock_session:
4197         mutex_unlock(&osd->lock);
4198 out_unlock_osdc:
4199         up_read(&osdc->lock);
4200         return m;
4201 }
4202
4203 /*
4204  * TODO: switch to a msg-owned pagelist
4205  */
4206 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
4207 {
4208         struct ceph_msg *m;
4209         int type = le16_to_cpu(hdr->type);
4210         u32 front_len = le32_to_cpu(hdr->front_len);
4211         u32 data_len = le32_to_cpu(hdr->data_len);
4212
4213         m = ceph_msg_new(type, front_len, GFP_NOIO, false);
4214         if (!m)
4215                 return NULL;
4216
4217         if (data_len) {
4218                 struct page **pages;
4219                 struct ceph_osd_data osd_data;
4220
4221                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
4222                                                GFP_NOIO);
4223                 if (!pages) {
4224                         ceph_msg_put(m);
4225                         return NULL;
4226                 }
4227
4228                 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
4229                                          false);
4230                 ceph_osdc_msg_data_add(m, &osd_data);
4231         }
4232
4233         return m;
4234 }
4235
4236 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
4237                                   struct ceph_msg_header *hdr,
4238                                   int *skip)
4239 {
4240         struct ceph_osd *osd = con->private;
4241         int type = le16_to_cpu(hdr->type);
4242
4243         *skip = 0;
4244         switch (type) {
4245         case CEPH_MSG_OSD_MAP:
4246         case CEPH_MSG_WATCH_NOTIFY:
4247                 return alloc_msg_with_page_vector(hdr);
4248         case CEPH_MSG_OSD_OPREPLY:
4249                 return get_reply(con, hdr, skip);
4250         default:
4251                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
4252                         osd->o_osd, type);
4253                 *skip = 1;
4254                 return NULL;
4255         }
4256 }
4257
4258 /*
4259  * Wrappers to refcount containing ceph_osd struct
4260  */
4261 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
4262 {
4263         struct ceph_osd *osd = con->private;
4264         if (get_osd(osd))
4265                 return con;
4266         return NULL;
4267 }
4268
4269 static void put_osd_con(struct ceph_connection *con)
4270 {
4271         struct ceph_osd *osd = con->private;
4272         put_osd(osd);
4273 }
4274
4275 /*
4276  * authentication
4277  */
4278 /*
4279  * Note: returned pointer is the address of a structure that's
4280  * managed separately.  Caller must *not* attempt to free it.
4281  */
4282 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
4283                                         int *proto, int force_new)
4284 {
4285         struct ceph_osd *o = con->private;
4286         struct ceph_osd_client *osdc = o->o_osdc;
4287         struct ceph_auth_client *ac = osdc->client->monc.auth;
4288         struct ceph_auth_handshake *auth = &o->o_auth;
4289
4290         if (force_new && auth->authorizer) {
4291                 ceph_auth_destroy_authorizer(auth->authorizer);
4292                 auth->authorizer = NULL;
4293         }
4294         if (!auth->authorizer) {
4295                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4296                                                       auth);
4297                 if (ret)
4298                         return ERR_PTR(ret);
4299         } else {
4300                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4301                                                      auth);
4302                 if (ret)
4303                         return ERR_PTR(ret);
4304         }
4305         *proto = ac->protocol;
4306
4307         return auth;
4308 }
4309
4310
4311 static int verify_authorizer_reply(struct ceph_connection *con, int len)
4312 {
4313         struct ceph_osd *o = con->private;
4314         struct ceph_osd_client *osdc = o->o_osdc;
4315         struct ceph_auth_client *ac = osdc->client->monc.auth;
4316
4317         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
4318 }
4319
4320 static int invalidate_authorizer(struct ceph_connection *con)
4321 {
4322         struct ceph_osd *o = con->private;
4323         struct ceph_osd_client *osdc = o->o_osdc;
4324         struct ceph_auth_client *ac = osdc->client->monc.auth;
4325
4326         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
4327         return ceph_monc_validate_auth(&osdc->client->monc);
4328 }
4329
4330 static int osd_sign_message(struct ceph_msg *msg)
4331 {
4332         struct ceph_osd *o = msg->con->private;
4333         struct ceph_auth_handshake *auth = &o->o_auth;
4334
4335         return ceph_auth_sign_message(auth, msg);
4336 }
4337
4338 static int osd_check_message_signature(struct ceph_msg *msg)
4339 {
4340         struct ceph_osd *o = msg->con->private;
4341         struct ceph_auth_handshake *auth = &o->o_auth;
4342
4343         return ceph_auth_check_message_signature(auth, msg);
4344 }
4345
4346 static const struct ceph_connection_operations osd_con_ops = {
4347         .get = get_osd_con,
4348         .put = put_osd_con,
4349         .dispatch = dispatch,
4350         .get_authorizer = get_authorizer,
4351         .verify_authorizer_reply = verify_authorizer_reply,
4352         .invalidate_authorizer = invalidate_authorizer,
4353         .alloc_msg = alloc_msg,
4354         .sign_message = osd_sign_message,
4355         .check_message_signature = osd_check_message_signature,
4356         .fault = osd_fault,
4357 };