ceph: add check_session_state() helper and make it global
[platform/kernel/linux-rpi.git] / fs / ceph / mds_client.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/fs.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
7 #include <linux/gfp.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
12 #include <linux/bits.h>
13 #include <linux/ktime.h>
14
15 #include "super.h"
16 #include "mds_client.h"
17
18 #include <linux/ceph/ceph_features.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/pagelist.h>
22 #include <linux/ceph/auth.h>
23 #include <linux/ceph/debugfs.h>
24
25 #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
26
27 /*
28  * A cluster of MDS (metadata server) daemons is responsible for
29  * managing the file system namespace (the directory hierarchy and
30  * inodes) and for coordinating shared access to storage.  Metadata is
31  * partitioning hierarchically across a number of servers, and that
32  * partition varies over time as the cluster adjusts the distribution
33  * in order to balance load.
34  *
35  * The MDS client is primarily responsible to managing synchronous
36  * metadata requests for operations like open, unlink, and so forth.
37  * If there is a MDS failure, we find out about it when we (possibly
38  * request and) receive a new MDS map, and can resubmit affected
39  * requests.
40  *
41  * For the most part, though, we take advantage of a lossless
42  * communications channel to the MDS, and do not need to worry about
43  * timing out or resubmitting requests.
44  *
45  * We maintain a stateful "session" with each MDS we interact with.
46  * Within each session, we sent periodic heartbeat messages to ensure
47  * any capabilities or leases we have been issues remain valid.  If
48  * the session times out and goes stale, our leases and capabilities
49  * are no longer valid.
50  */
51
52 struct ceph_reconnect_state {
53         struct ceph_mds_session *session;
54         int nr_caps, nr_realms;
55         struct ceph_pagelist *pagelist;
56         unsigned msg_version;
57         bool allow_multi;
58 };
59
60 static void __wake_requests(struct ceph_mds_client *mdsc,
61                             struct list_head *head);
62 static void ceph_cap_release_work(struct work_struct *work);
63 static void ceph_cap_reclaim_work(struct work_struct *work);
64
65 static const struct ceph_connection_operations mds_con_ops;
66
67
68 /*
69  * mds reply parsing
70  */
71
72 static int parse_reply_info_quota(void **p, void *end,
73                                   struct ceph_mds_reply_info_in *info)
74 {
75         u8 struct_v, struct_compat;
76         u32 struct_len;
77
78         ceph_decode_8_safe(p, end, struct_v, bad);
79         ceph_decode_8_safe(p, end, struct_compat, bad);
80         /* struct_v is expected to be >= 1. we only
81          * understand encoding with struct_compat == 1. */
82         if (!struct_v || struct_compat != 1)
83                 goto bad;
84         ceph_decode_32_safe(p, end, struct_len, bad);
85         ceph_decode_need(p, end, struct_len, bad);
86         end = *p + struct_len;
87         ceph_decode_64_safe(p, end, info->max_bytes, bad);
88         ceph_decode_64_safe(p, end, info->max_files, bad);
89         *p = end;
90         return 0;
91 bad:
92         return -EIO;
93 }
94
95 /*
96  * parse individual inode info
97  */
98 static int parse_reply_info_in(void **p, void *end,
99                                struct ceph_mds_reply_info_in *info,
100                                u64 features)
101 {
102         int err = 0;
103         u8 struct_v = 0;
104
105         if (features == (u64)-1) {
106                 u32 struct_len;
107                 u8 struct_compat;
108                 ceph_decode_8_safe(p, end, struct_v, bad);
109                 ceph_decode_8_safe(p, end, struct_compat, bad);
110                 /* struct_v is expected to be >= 1. we only understand
111                  * encoding with struct_compat == 1. */
112                 if (!struct_v || struct_compat != 1)
113                         goto bad;
114                 ceph_decode_32_safe(p, end, struct_len, bad);
115                 ceph_decode_need(p, end, struct_len, bad);
116                 end = *p + struct_len;
117         }
118
119         ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
120         info->in = *p;
121         *p += sizeof(struct ceph_mds_reply_inode) +
122                 sizeof(*info->in->fragtree.splits) *
123                 le32_to_cpu(info->in->fragtree.nsplits);
124
125         ceph_decode_32_safe(p, end, info->symlink_len, bad);
126         ceph_decode_need(p, end, info->symlink_len, bad);
127         info->symlink = *p;
128         *p += info->symlink_len;
129
130         ceph_decode_copy_safe(p, end, &info->dir_layout,
131                               sizeof(info->dir_layout), bad);
132         ceph_decode_32_safe(p, end, info->xattr_len, bad);
133         ceph_decode_need(p, end, info->xattr_len, bad);
134         info->xattr_data = *p;
135         *p += info->xattr_len;
136
137         if (features == (u64)-1) {
138                 /* inline data */
139                 ceph_decode_64_safe(p, end, info->inline_version, bad);
140                 ceph_decode_32_safe(p, end, info->inline_len, bad);
141                 ceph_decode_need(p, end, info->inline_len, bad);
142                 info->inline_data = *p;
143                 *p += info->inline_len;
144                 /* quota */
145                 err = parse_reply_info_quota(p, end, info);
146                 if (err < 0)
147                         goto out_bad;
148                 /* pool namespace */
149                 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
150                 if (info->pool_ns_len > 0) {
151                         ceph_decode_need(p, end, info->pool_ns_len, bad);
152                         info->pool_ns_data = *p;
153                         *p += info->pool_ns_len;
154                 }
155
156                 /* btime */
157                 ceph_decode_need(p, end, sizeof(info->btime), bad);
158                 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
159
160                 /* change attribute */
161                 ceph_decode_64_safe(p, end, info->change_attr, bad);
162
163                 /* dir pin */
164                 if (struct_v >= 2) {
165                         ceph_decode_32_safe(p, end, info->dir_pin, bad);
166                 } else {
167                         info->dir_pin = -ENODATA;
168                 }
169
170                 /* snapshot birth time, remains zero for v<=2 */
171                 if (struct_v >= 3) {
172                         ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
173                         ceph_decode_copy(p, &info->snap_btime,
174                                          sizeof(info->snap_btime));
175                 } else {
176                         memset(&info->snap_btime, 0, sizeof(info->snap_btime));
177                 }
178
179                 *p = end;
180         } else {
181                 if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
182                         ceph_decode_64_safe(p, end, info->inline_version, bad);
183                         ceph_decode_32_safe(p, end, info->inline_len, bad);
184                         ceph_decode_need(p, end, info->inline_len, bad);
185                         info->inline_data = *p;
186                         *p += info->inline_len;
187                 } else
188                         info->inline_version = CEPH_INLINE_NONE;
189
190                 if (features & CEPH_FEATURE_MDS_QUOTA) {
191                         err = parse_reply_info_quota(p, end, info);
192                         if (err < 0)
193                                 goto out_bad;
194                 } else {
195                         info->max_bytes = 0;
196                         info->max_files = 0;
197                 }
198
199                 info->pool_ns_len = 0;
200                 info->pool_ns_data = NULL;
201                 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
202                         ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
203                         if (info->pool_ns_len > 0) {
204                                 ceph_decode_need(p, end, info->pool_ns_len, bad);
205                                 info->pool_ns_data = *p;
206                                 *p += info->pool_ns_len;
207                         }
208                 }
209
210                 if (features & CEPH_FEATURE_FS_BTIME) {
211                         ceph_decode_need(p, end, sizeof(info->btime), bad);
212                         ceph_decode_copy(p, &info->btime, sizeof(info->btime));
213                         ceph_decode_64_safe(p, end, info->change_attr, bad);
214                 }
215
216                 info->dir_pin = -ENODATA;
217                 /* info->snap_btime remains zero */
218         }
219         return 0;
220 bad:
221         err = -EIO;
222 out_bad:
223         return err;
224 }
225
226 static int parse_reply_info_dir(void **p, void *end,
227                                 struct ceph_mds_reply_dirfrag **dirfrag,
228                                 u64 features)
229 {
230         if (features == (u64)-1) {
231                 u8 struct_v, struct_compat;
232                 u32 struct_len;
233                 ceph_decode_8_safe(p, end, struct_v, bad);
234                 ceph_decode_8_safe(p, end, struct_compat, bad);
235                 /* struct_v is expected to be >= 1. we only understand
236                  * encoding whose struct_compat == 1. */
237                 if (!struct_v || struct_compat != 1)
238                         goto bad;
239                 ceph_decode_32_safe(p, end, struct_len, bad);
240                 ceph_decode_need(p, end, struct_len, bad);
241                 end = *p + struct_len;
242         }
243
244         ceph_decode_need(p, end, sizeof(**dirfrag), bad);
245         *dirfrag = *p;
246         *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
247         if (unlikely(*p > end))
248                 goto bad;
249         if (features == (u64)-1)
250                 *p = end;
251         return 0;
252 bad:
253         return -EIO;
254 }
255
256 static int parse_reply_info_lease(void **p, void *end,
257                                   struct ceph_mds_reply_lease **lease,
258                                   u64 features)
259 {
260         if (features == (u64)-1) {
261                 u8 struct_v, struct_compat;
262                 u32 struct_len;
263                 ceph_decode_8_safe(p, end, struct_v, bad);
264                 ceph_decode_8_safe(p, end, struct_compat, bad);
265                 /* struct_v is expected to be >= 1. we only understand
266                  * encoding whose struct_compat == 1. */
267                 if (!struct_v || struct_compat != 1)
268                         goto bad;
269                 ceph_decode_32_safe(p, end, struct_len, bad);
270                 ceph_decode_need(p, end, struct_len, bad);
271                 end = *p + struct_len;
272         }
273
274         ceph_decode_need(p, end, sizeof(**lease), bad);
275         *lease = *p;
276         *p += sizeof(**lease);
277         if (features == (u64)-1)
278                 *p = end;
279         return 0;
280 bad:
281         return -EIO;
282 }
283
284 /*
285  * parse a normal reply, which may contain a (dir+)dentry and/or a
286  * target inode.
287  */
288 static int parse_reply_info_trace(void **p, void *end,
289                                   struct ceph_mds_reply_info_parsed *info,
290                                   u64 features)
291 {
292         int err;
293
294         if (info->head->is_dentry) {
295                 err = parse_reply_info_in(p, end, &info->diri, features);
296                 if (err < 0)
297                         goto out_bad;
298
299                 err = parse_reply_info_dir(p, end, &info->dirfrag, features);
300                 if (err < 0)
301                         goto out_bad;
302
303                 ceph_decode_32_safe(p, end, info->dname_len, bad);
304                 ceph_decode_need(p, end, info->dname_len, bad);
305                 info->dname = *p;
306                 *p += info->dname_len;
307
308                 err = parse_reply_info_lease(p, end, &info->dlease, features);
309                 if (err < 0)
310                         goto out_bad;
311         }
312
313         if (info->head->is_target) {
314                 err = parse_reply_info_in(p, end, &info->targeti, features);
315                 if (err < 0)
316                         goto out_bad;
317         }
318
319         if (unlikely(*p != end))
320                 goto bad;
321         return 0;
322
323 bad:
324         err = -EIO;
325 out_bad:
326         pr_err("problem parsing mds trace %d\n", err);
327         return err;
328 }
329
330 /*
331  * parse readdir results
332  */
333 static int parse_reply_info_readdir(void **p, void *end,
334                                 struct ceph_mds_reply_info_parsed *info,
335                                 u64 features)
336 {
337         u32 num, i = 0;
338         int err;
339
340         err = parse_reply_info_dir(p, end, &info->dir_dir, features);
341         if (err < 0)
342                 goto out_bad;
343
344         ceph_decode_need(p, end, sizeof(num) + 2, bad);
345         num = ceph_decode_32(p);
346         {
347                 u16 flags = ceph_decode_16(p);
348                 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
349                 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
350                 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
351                 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
352         }
353         if (num == 0)
354                 goto done;
355
356         BUG_ON(!info->dir_entries);
357         if ((unsigned long)(info->dir_entries + num) >
358             (unsigned long)info->dir_entries + info->dir_buf_size) {
359                 pr_err("dir contents are larger than expected\n");
360                 WARN_ON(1);
361                 goto bad;
362         }
363
364         info->dir_nr = num;
365         while (num) {
366                 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
367                 /* dentry */
368                 ceph_decode_32_safe(p, end, rde->name_len, bad);
369                 ceph_decode_need(p, end, rde->name_len, bad);
370                 rde->name = *p;
371                 *p += rde->name_len;
372                 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
373
374                 /* dentry lease */
375                 err = parse_reply_info_lease(p, end, &rde->lease, features);
376                 if (err)
377                         goto out_bad;
378                 /* inode */
379                 err = parse_reply_info_in(p, end, &rde->inode, features);
380                 if (err < 0)
381                         goto out_bad;
382                 /* ceph_readdir_prepopulate() will update it */
383                 rde->offset = 0;
384                 i++;
385                 num--;
386         }
387
388 done:
389         /* Skip over any unrecognized fields */
390         *p = end;
391         return 0;
392
393 bad:
394         err = -EIO;
395 out_bad:
396         pr_err("problem parsing dir contents %d\n", err);
397         return err;
398 }
399
400 /*
401  * parse fcntl F_GETLK results
402  */
403 static int parse_reply_info_filelock(void **p, void *end,
404                                      struct ceph_mds_reply_info_parsed *info,
405                                      u64 features)
406 {
407         if (*p + sizeof(*info->filelock_reply) > end)
408                 goto bad;
409
410         info->filelock_reply = *p;
411
412         /* Skip over any unrecognized fields */
413         *p = end;
414         return 0;
415 bad:
416         return -EIO;
417 }
418
419
420 #if BITS_PER_LONG == 64
421
422 #define DELEGATED_INO_AVAILABLE         xa_mk_value(1)
423
424 static int ceph_parse_deleg_inos(void **p, void *end,
425                                  struct ceph_mds_session *s)
426 {
427         u32 sets;
428
429         ceph_decode_32_safe(p, end, sets, bad);
430         dout("got %u sets of delegated inodes\n", sets);
431         while (sets--) {
432                 u64 start, len, ino;
433
434                 ceph_decode_64_safe(p, end, start, bad);
435                 ceph_decode_64_safe(p, end, len, bad);
436                 while (len--) {
437                         int err = xa_insert(&s->s_delegated_inos, ino = start++,
438                                             DELEGATED_INO_AVAILABLE,
439                                             GFP_KERNEL);
440                         if (!err) {
441                                 dout("added delegated inode 0x%llx\n",
442                                      start - 1);
443                         } else if (err == -EBUSY) {
444                                 pr_warn("ceph: MDS delegated inode 0x%llx more than once.\n",
445                                         start - 1);
446                         } else {
447                                 return err;
448                         }
449                 }
450         }
451         return 0;
452 bad:
453         return -EIO;
454 }
455
456 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
457 {
458         unsigned long ino;
459         void *val;
460
461         xa_for_each(&s->s_delegated_inos, ino, val) {
462                 val = xa_erase(&s->s_delegated_inos, ino);
463                 if (val == DELEGATED_INO_AVAILABLE)
464                         return ino;
465         }
466         return 0;
467 }
468
469 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
470 {
471         return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE,
472                          GFP_KERNEL);
473 }
474 #else /* BITS_PER_LONG == 64 */
475 /*
476  * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
477  * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
478  * and bottom words?
479  */
480 static int ceph_parse_deleg_inos(void **p, void *end,
481                                  struct ceph_mds_session *s)
482 {
483         u32 sets;
484
485         ceph_decode_32_safe(p, end, sets, bad);
486         if (sets)
487                 ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
488         return 0;
489 bad:
490         return -EIO;
491 }
492
493 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
494 {
495         return 0;
496 }
497
498 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
499 {
500         return 0;
501 }
502 #endif /* BITS_PER_LONG == 64 */
503
504 /*
505  * parse create results
506  */
507 static int parse_reply_info_create(void **p, void *end,
508                                   struct ceph_mds_reply_info_parsed *info,
509                                   u64 features, struct ceph_mds_session *s)
510 {
511         int ret;
512
513         if (features == (u64)-1 ||
514             (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
515                 if (*p == end) {
516                         /* Malformed reply? */
517                         info->has_create_ino = false;
518                 } else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) {
519                         u8 struct_v, struct_compat;
520                         u32 len;
521
522                         info->has_create_ino = true;
523                         ceph_decode_8_safe(p, end, struct_v, bad);
524                         ceph_decode_8_safe(p, end, struct_compat, bad);
525                         ceph_decode_32_safe(p, end, len, bad);
526                         ceph_decode_64_safe(p, end, info->ino, bad);
527                         ret = ceph_parse_deleg_inos(p, end, s);
528                         if (ret)
529                                 return ret;
530                 } else {
531                         /* legacy */
532                         ceph_decode_64_safe(p, end, info->ino, bad);
533                         info->has_create_ino = true;
534                 }
535         } else {
536                 if (*p != end)
537                         goto bad;
538         }
539
540         /* Skip over any unrecognized fields */
541         *p = end;
542         return 0;
543 bad:
544         return -EIO;
545 }
546
547 /*
548  * parse extra results
549  */
550 static int parse_reply_info_extra(void **p, void *end,
551                                   struct ceph_mds_reply_info_parsed *info,
552                                   u64 features, struct ceph_mds_session *s)
553 {
554         u32 op = le32_to_cpu(info->head->op);
555
556         if (op == CEPH_MDS_OP_GETFILELOCK)
557                 return parse_reply_info_filelock(p, end, info, features);
558         else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
559                 return parse_reply_info_readdir(p, end, info, features);
560         else if (op == CEPH_MDS_OP_CREATE)
561                 return parse_reply_info_create(p, end, info, features, s);
562         else
563                 return -EIO;
564 }
565
566 /*
567  * parse entire mds reply
568  */
569 static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
570                             struct ceph_mds_reply_info_parsed *info,
571                             u64 features)
572 {
573         void *p, *end;
574         u32 len;
575         int err;
576
577         info->head = msg->front.iov_base;
578         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
579         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
580
581         /* trace */
582         ceph_decode_32_safe(&p, end, len, bad);
583         if (len > 0) {
584                 ceph_decode_need(&p, end, len, bad);
585                 err = parse_reply_info_trace(&p, p+len, info, features);
586                 if (err < 0)
587                         goto out_bad;
588         }
589
590         /* extra */
591         ceph_decode_32_safe(&p, end, len, bad);
592         if (len > 0) {
593                 ceph_decode_need(&p, end, len, bad);
594                 err = parse_reply_info_extra(&p, p+len, info, features, s);
595                 if (err < 0)
596                         goto out_bad;
597         }
598
599         /* snap blob */
600         ceph_decode_32_safe(&p, end, len, bad);
601         info->snapblob_len = len;
602         info->snapblob = p;
603         p += len;
604
605         if (p != end)
606                 goto bad;
607         return 0;
608
609 bad:
610         err = -EIO;
611 out_bad:
612         pr_err("mds parse_reply err %d\n", err);
613         return err;
614 }
615
616 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
617 {
618         if (!info->dir_entries)
619                 return;
620         free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
621 }
622
623
624 /*
625  * sessions
626  */
627 const char *ceph_session_state_name(int s)
628 {
629         switch (s) {
630         case CEPH_MDS_SESSION_NEW: return "new";
631         case CEPH_MDS_SESSION_OPENING: return "opening";
632         case CEPH_MDS_SESSION_OPEN: return "open";
633         case CEPH_MDS_SESSION_HUNG: return "hung";
634         case CEPH_MDS_SESSION_CLOSING: return "closing";
635         case CEPH_MDS_SESSION_CLOSED: return "closed";
636         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
637         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
638         case CEPH_MDS_SESSION_REJECTED: return "rejected";
639         default: return "???";
640         }
641 }
642
643 struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
644 {
645         if (refcount_inc_not_zero(&s->s_ref)) {
646                 dout("mdsc get_session %p %d -> %d\n", s,
647                      refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref));
648                 return s;
649         } else {
650                 dout("mdsc get_session %p 0 -- FAIL\n", s);
651                 return NULL;
652         }
653 }
654
655 void ceph_put_mds_session(struct ceph_mds_session *s)
656 {
657         dout("mdsc put_session %p %d -> %d\n", s,
658              refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
659         if (refcount_dec_and_test(&s->s_ref)) {
660                 if (s->s_auth.authorizer)
661                         ceph_auth_destroy_authorizer(s->s_auth.authorizer);
662                 WARN_ON(mutex_is_locked(&s->s_mutex));
663                 xa_destroy(&s->s_delegated_inos);
664                 kfree(s);
665         }
666 }
667
668 /*
669  * called under mdsc->mutex
670  */
671 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
672                                                    int mds)
673 {
674         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
675                 return NULL;
676         return ceph_get_mds_session(mdsc->sessions[mds]);
677 }
678
679 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
680 {
681         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
682                 return false;
683         else
684                 return true;
685 }
686
687 static int __verify_registered_session(struct ceph_mds_client *mdsc,
688                                        struct ceph_mds_session *s)
689 {
690         if (s->s_mds >= mdsc->max_sessions ||
691             mdsc->sessions[s->s_mds] != s)
692                 return -ENOENT;
693         return 0;
694 }
695
696 /*
697  * create+register a new session for given mds.
698  * called under mdsc->mutex.
699  */
700 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
701                                                  int mds)
702 {
703         struct ceph_mds_session *s;
704
705         if (mds >= mdsc->mdsmap->possible_max_rank)
706                 return ERR_PTR(-EINVAL);
707
708         s = kzalloc(sizeof(*s), GFP_NOFS);
709         if (!s)
710                 return ERR_PTR(-ENOMEM);
711
712         if (mds >= mdsc->max_sessions) {
713                 int newmax = 1 << get_count_order(mds + 1);
714                 struct ceph_mds_session **sa;
715
716                 dout("%s: realloc to %d\n", __func__, newmax);
717                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
718                 if (!sa)
719                         goto fail_realloc;
720                 if (mdsc->sessions) {
721                         memcpy(sa, mdsc->sessions,
722                                mdsc->max_sessions * sizeof(void *));
723                         kfree(mdsc->sessions);
724                 }
725                 mdsc->sessions = sa;
726                 mdsc->max_sessions = newmax;
727         }
728
729         dout("%s: mds%d\n", __func__, mds);
730         s->s_mdsc = mdsc;
731         s->s_mds = mds;
732         s->s_state = CEPH_MDS_SESSION_NEW;
733         s->s_ttl = 0;
734         s->s_seq = 0;
735         mutex_init(&s->s_mutex);
736
737         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
738
739         spin_lock_init(&s->s_gen_ttl_lock);
740         s->s_cap_gen = 1;
741         s->s_cap_ttl = jiffies - 1;
742
743         spin_lock_init(&s->s_cap_lock);
744         s->s_renew_requested = 0;
745         s->s_renew_seq = 0;
746         INIT_LIST_HEAD(&s->s_caps);
747         s->s_nr_caps = 0;
748         refcount_set(&s->s_ref, 1);
749         INIT_LIST_HEAD(&s->s_waiting);
750         INIT_LIST_HEAD(&s->s_unsafe);
751         xa_init(&s->s_delegated_inos);
752         s->s_num_cap_releases = 0;
753         s->s_cap_reconnect = 0;
754         s->s_cap_iterator = NULL;
755         INIT_LIST_HEAD(&s->s_cap_releases);
756         INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
757
758         INIT_LIST_HEAD(&s->s_cap_dirty);
759         INIT_LIST_HEAD(&s->s_cap_flushing);
760
761         mdsc->sessions[mds] = s;
762         atomic_inc(&mdsc->num_sessions);
763         refcount_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
764
765         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
766                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
767
768         return s;
769
770 fail_realloc:
771         kfree(s);
772         return ERR_PTR(-ENOMEM);
773 }
774
775 /*
776  * called under mdsc->mutex
777  */
778 static void __unregister_session(struct ceph_mds_client *mdsc,
779                                struct ceph_mds_session *s)
780 {
781         dout("__unregister_session mds%d %p\n", s->s_mds, s);
782         BUG_ON(mdsc->sessions[s->s_mds] != s);
783         mdsc->sessions[s->s_mds] = NULL;
784         ceph_con_close(&s->s_con);
785         ceph_put_mds_session(s);
786         atomic_dec(&mdsc->num_sessions);
787 }
788
789 /*
790  * drop session refs in request.
791  *
792  * should be last request ref, or hold mdsc->mutex
793  */
794 static void put_request_session(struct ceph_mds_request *req)
795 {
796         if (req->r_session) {
797                 ceph_put_mds_session(req->r_session);
798                 req->r_session = NULL;
799         }
800 }
801
802 void ceph_mdsc_release_request(struct kref *kref)
803 {
804         struct ceph_mds_request *req = container_of(kref,
805                                                     struct ceph_mds_request,
806                                                     r_kref);
807         ceph_mdsc_release_dir_caps_no_check(req);
808         destroy_reply_info(&req->r_reply_info);
809         if (req->r_request)
810                 ceph_msg_put(req->r_request);
811         if (req->r_reply)
812                 ceph_msg_put(req->r_reply);
813         if (req->r_inode) {
814                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
815                 /* avoid calling iput_final() in mds dispatch threads */
816                 ceph_async_iput(req->r_inode);
817         }
818         if (req->r_parent) {
819                 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
820                 ceph_async_iput(req->r_parent);
821         }
822         ceph_async_iput(req->r_target_inode);
823         if (req->r_dentry)
824                 dput(req->r_dentry);
825         if (req->r_old_dentry)
826                 dput(req->r_old_dentry);
827         if (req->r_old_dentry_dir) {
828                 /*
829                  * track (and drop pins for) r_old_dentry_dir
830                  * separately, since r_old_dentry's d_parent may have
831                  * changed between the dir mutex being dropped and
832                  * this request being freed.
833                  */
834                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
835                                   CEPH_CAP_PIN);
836                 ceph_async_iput(req->r_old_dentry_dir);
837         }
838         kfree(req->r_path1);
839         kfree(req->r_path2);
840         if (req->r_pagelist)
841                 ceph_pagelist_release(req->r_pagelist);
842         put_request_session(req);
843         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
844         WARN_ON_ONCE(!list_empty(&req->r_wait));
845         kmem_cache_free(ceph_mds_request_cachep, req);
846 }
847
848 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
849
850 /*
851  * lookup session, bump ref if found.
852  *
853  * called under mdsc->mutex.
854  */
855 static struct ceph_mds_request *
856 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
857 {
858         struct ceph_mds_request *req;
859
860         req = lookup_request(&mdsc->request_tree, tid);
861         if (req)
862                 ceph_mdsc_get_request(req);
863
864         return req;
865 }
866
867 /*
868  * Register an in-flight request, and assign a tid.  Link to directory
869  * are modifying (if any).
870  *
871  * Called under mdsc->mutex.
872  */
873 static void __register_request(struct ceph_mds_client *mdsc,
874                                struct ceph_mds_request *req,
875                                struct inode *dir)
876 {
877         int ret = 0;
878
879         req->r_tid = ++mdsc->last_tid;
880         if (req->r_num_caps) {
881                 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
882                                         req->r_num_caps);
883                 if (ret < 0) {
884                         pr_err("__register_request %p "
885                                "failed to reserve caps: %d\n", req, ret);
886                         /* set req->r_err to fail early from __do_request */
887                         req->r_err = ret;
888                         return;
889                 }
890         }
891         dout("__register_request %p tid %lld\n", req, req->r_tid);
892         ceph_mdsc_get_request(req);
893         insert_request(&mdsc->request_tree, req);
894
895         req->r_uid = current_fsuid();
896         req->r_gid = current_fsgid();
897
898         if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
899                 mdsc->oldest_tid = req->r_tid;
900
901         if (dir) {
902                 struct ceph_inode_info *ci = ceph_inode(dir);
903
904                 ihold(dir);
905                 req->r_unsafe_dir = dir;
906                 spin_lock(&ci->i_unsafe_lock);
907                 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
908                 spin_unlock(&ci->i_unsafe_lock);
909         }
910 }
911
912 static void __unregister_request(struct ceph_mds_client *mdsc,
913                                  struct ceph_mds_request *req)
914 {
915         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
916
917         /* Never leave an unregistered request on an unsafe list! */
918         list_del_init(&req->r_unsafe_item);
919
920         if (req->r_tid == mdsc->oldest_tid) {
921                 struct rb_node *p = rb_next(&req->r_node);
922                 mdsc->oldest_tid = 0;
923                 while (p) {
924                         struct ceph_mds_request *next_req =
925                                 rb_entry(p, struct ceph_mds_request, r_node);
926                         if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
927                                 mdsc->oldest_tid = next_req->r_tid;
928                                 break;
929                         }
930                         p = rb_next(p);
931                 }
932         }
933
934         erase_request(&mdsc->request_tree, req);
935
936         if (req->r_unsafe_dir) {
937                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
938                 spin_lock(&ci->i_unsafe_lock);
939                 list_del_init(&req->r_unsafe_dir_item);
940                 spin_unlock(&ci->i_unsafe_lock);
941         }
942         if (req->r_target_inode &&
943             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
944                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
945                 spin_lock(&ci->i_unsafe_lock);
946                 list_del_init(&req->r_unsafe_target_item);
947                 spin_unlock(&ci->i_unsafe_lock);
948         }
949
950         if (req->r_unsafe_dir) {
951                 /* avoid calling iput_final() in mds dispatch threads */
952                 ceph_async_iput(req->r_unsafe_dir);
953                 req->r_unsafe_dir = NULL;
954         }
955
956         complete_all(&req->r_safe_completion);
957
958         ceph_mdsc_put_request(req);
959 }
960
961 /*
962  * Walk back up the dentry tree until we hit a dentry representing a
963  * non-snapshot inode. We do this using the rcu_read_lock (which must be held
964  * when calling this) to ensure that the objects won't disappear while we're
965  * working with them. Once we hit a candidate dentry, we attempt to take a
966  * reference to it, and return that as the result.
967  */
968 static struct inode *get_nonsnap_parent(struct dentry *dentry)
969 {
970         struct inode *inode = NULL;
971
972         while (dentry && !IS_ROOT(dentry)) {
973                 inode = d_inode_rcu(dentry);
974                 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
975                         break;
976                 dentry = dentry->d_parent;
977         }
978         if (inode)
979                 inode = igrab(inode);
980         return inode;
981 }
982
983 /*
984  * Choose mds to send request to next.  If there is a hint set in the
985  * request (e.g., due to a prior forward hint from the mds), use that.
986  * Otherwise, consult frag tree and/or caps to identify the
987  * appropriate mds.  If all else fails, choose randomly.
988  *
989  * Called under mdsc->mutex.
990  */
991 static int __choose_mds(struct ceph_mds_client *mdsc,
992                         struct ceph_mds_request *req,
993                         bool *random)
994 {
995         struct inode *inode;
996         struct ceph_inode_info *ci;
997         struct ceph_cap *cap;
998         int mode = req->r_direct_mode;
999         int mds = -1;
1000         u32 hash = req->r_direct_hash;
1001         bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1002
1003         if (random)
1004                 *random = false;
1005
1006         /*
1007          * is there a specific mds we should try?  ignore hint if we have
1008          * no session and the mds is not up (active or recovering).
1009          */
1010         if (req->r_resend_mds >= 0 &&
1011             (__have_session(mdsc, req->r_resend_mds) ||
1012              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1013                 dout("%s using resend_mds mds%d\n", __func__,
1014                      req->r_resend_mds);
1015                 return req->r_resend_mds;
1016         }
1017
1018         if (mode == USE_RANDOM_MDS)
1019                 goto random;
1020
1021         inode = NULL;
1022         if (req->r_inode) {
1023                 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1024                         inode = req->r_inode;
1025                         ihold(inode);
1026                 } else {
1027                         /* req->r_dentry is non-null for LSSNAP request */
1028                         rcu_read_lock();
1029                         inode = get_nonsnap_parent(req->r_dentry);
1030                         rcu_read_unlock();
1031                         dout("%s using snapdir's parent %p\n", __func__, inode);
1032                 }
1033         } else if (req->r_dentry) {
1034                 /* ignore race with rename; old or new d_parent is okay */
1035                 struct dentry *parent;
1036                 struct inode *dir;
1037
1038                 rcu_read_lock();
1039                 parent = READ_ONCE(req->r_dentry->d_parent);
1040                 dir = req->r_parent ? : d_inode_rcu(parent);
1041
1042                 if (!dir || dir->i_sb != mdsc->fsc->sb) {
1043                         /*  not this fs or parent went negative */
1044                         inode = d_inode(req->r_dentry);
1045                         if (inode)
1046                                 ihold(inode);
1047                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
1048                         /* direct snapped/virtual snapdir requests
1049                          * based on parent dir inode */
1050                         inode = get_nonsnap_parent(parent);
1051                         dout("%s using nonsnap parent %p\n", __func__, inode);
1052                 } else {
1053                         /* dentry target */
1054                         inode = d_inode(req->r_dentry);
1055                         if (!inode || mode == USE_AUTH_MDS) {
1056                                 /* dir + name */
1057                                 inode = igrab(dir);
1058                                 hash = ceph_dentry_hash(dir, req->r_dentry);
1059                                 is_hash = true;
1060                         } else {
1061                                 ihold(inode);
1062                         }
1063                 }
1064                 rcu_read_unlock();
1065         }
1066
1067         dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
1068              hash, mode);
1069         if (!inode)
1070                 goto random;
1071         ci = ceph_inode(inode);
1072
1073         if (is_hash && S_ISDIR(inode->i_mode)) {
1074                 struct ceph_inode_frag frag;
1075                 int found;
1076
1077                 ceph_choose_frag(ci, hash, &frag, &found);
1078                 if (found) {
1079                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
1080                                 u8 r;
1081
1082                                 /* choose a random replica */
1083                                 get_random_bytes(&r, 1);
1084                                 r %= frag.ndist;
1085                                 mds = frag.dist[r];
1086                                 dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
1087                                      __func__, inode, ceph_vinop(inode),
1088                                      frag.frag, mds, (int)r, frag.ndist);
1089                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1090                                     CEPH_MDS_STATE_ACTIVE &&
1091                                     !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
1092                                         goto out;
1093                         }
1094
1095                         /* since this file/dir wasn't known to be
1096                          * replicated, then we want to look for the
1097                          * authoritative mds. */
1098                         if (frag.mds >= 0) {
1099                                 /* choose auth mds */
1100                                 mds = frag.mds;
1101                                 dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
1102                                      __func__, inode, ceph_vinop(inode),
1103                                      frag.frag, mds);
1104                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1105                                     CEPH_MDS_STATE_ACTIVE) {
1106                                         if (mode == USE_ANY_MDS &&
1107                                             !ceph_mdsmap_is_laggy(mdsc->mdsmap,
1108                                                                   mds))
1109                                                 goto out;
1110                                 }
1111                         }
1112                         mode = USE_AUTH_MDS;
1113                 }
1114         }
1115
1116         spin_lock(&ci->i_ceph_lock);
1117         cap = NULL;
1118         if (mode == USE_AUTH_MDS)
1119                 cap = ci->i_auth_cap;
1120         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
1121                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
1122         if (!cap) {
1123                 spin_unlock(&ci->i_ceph_lock);
1124                 ceph_async_iput(inode);
1125                 goto random;
1126         }
1127         mds = cap->session->s_mds;
1128         dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
1129              inode, ceph_vinop(inode), mds,
1130              cap == ci->i_auth_cap ? "auth " : "", cap);
1131         spin_unlock(&ci->i_ceph_lock);
1132 out:
1133         /* avoid calling iput_final() while holding mdsc->mutex or
1134          * in mds dispatch threads */
1135         ceph_async_iput(inode);
1136         return mds;
1137
1138 random:
1139         if (random)
1140                 *random = true;
1141
1142         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1143         dout("%s chose random mds%d\n", __func__, mds);
1144         return mds;
1145 }
1146
1147
1148 /*
1149  * session messages
1150  */
1151 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
1152 {
1153         struct ceph_msg *msg;
1154         struct ceph_mds_session_head *h;
1155
1156         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
1157                            false);
1158         if (!msg) {
1159                 pr_err("create_session_msg ENOMEM creating msg\n");
1160                 return NULL;
1161         }
1162         h = msg->front.iov_base;
1163         h->op = cpu_to_le32(op);
1164         h->seq = cpu_to_le64(seq);
1165
1166         return msg;
1167 }
1168
1169 static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
1170 #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
1171 static void encode_supported_features(void **p, void *end)
1172 {
1173         static const size_t count = ARRAY_SIZE(feature_bits);
1174
1175         if (count > 0) {
1176                 size_t i;
1177                 size_t size = FEATURE_BYTES(count);
1178
1179                 BUG_ON(*p + 4 + size > end);
1180                 ceph_encode_32(p, size);
1181                 memset(*p, 0, size);
1182                 for (i = 0; i < count; i++)
1183                         ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
1184                 *p += size;
1185         } else {
1186                 BUG_ON(*p + 4 > end);
1187                 ceph_encode_32(p, 0);
1188         }
1189 }
1190
1191 /*
1192  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1193  * to include additional client metadata fields.
1194  */
1195 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
1196 {
1197         struct ceph_msg *msg;
1198         struct ceph_mds_session_head *h;
1199         int i = -1;
1200         int extra_bytes = 0;
1201         int metadata_key_count = 0;
1202         struct ceph_options *opt = mdsc->fsc->client->options;
1203         struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1204         size_t size, count;
1205         void *p, *end;
1206
1207         const char* metadata[][2] = {
1208                 {"hostname", mdsc->nodename},
1209                 {"kernel_version", init_utsname()->release},
1210                 {"entity_id", opt->name ? : ""},
1211                 {"root", fsopt->server_path ? : "/"},
1212                 {NULL, NULL}
1213         };
1214
1215         /* Calculate serialized length of metadata */
1216         extra_bytes = 4;  /* map length */
1217         for (i = 0; metadata[i][0]; ++i) {
1218                 extra_bytes += 8 + strlen(metadata[i][0]) +
1219                         strlen(metadata[i][1]);
1220                 metadata_key_count++;
1221         }
1222
1223         /* supported feature */
1224         size = 0;
1225         count = ARRAY_SIZE(feature_bits);
1226         if (count > 0)
1227                 size = FEATURE_BYTES(count);
1228         extra_bytes += 4 + size;
1229
1230         /* Allocate the message */
1231         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
1232                            GFP_NOFS, false);
1233         if (!msg) {
1234                 pr_err("create_session_msg ENOMEM creating msg\n");
1235                 return NULL;
1236         }
1237         p = msg->front.iov_base;
1238         end = p + msg->front.iov_len;
1239
1240         h = p;
1241         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
1242         h->seq = cpu_to_le64(seq);
1243
1244         /*
1245          * Serialize client metadata into waiting buffer space, using
1246          * the format that userspace expects for map<string, string>
1247          *
1248          * ClientSession messages with metadata are v3
1249          */
1250         msg->hdr.version = cpu_to_le16(3);
1251         msg->hdr.compat_version = cpu_to_le16(1);
1252
1253         /* The write pointer, following the session_head structure */
1254         p += sizeof(*h);
1255
1256         /* Number of entries in the map */
1257         ceph_encode_32(&p, metadata_key_count);
1258
1259         /* Two length-prefixed strings for each entry in the map */
1260         for (i = 0; metadata[i][0]; ++i) {
1261                 size_t const key_len = strlen(metadata[i][0]);
1262                 size_t const val_len = strlen(metadata[i][1]);
1263
1264                 ceph_encode_32(&p, key_len);
1265                 memcpy(p, metadata[i][0], key_len);
1266                 p += key_len;
1267                 ceph_encode_32(&p, val_len);
1268                 memcpy(p, metadata[i][1], val_len);
1269                 p += val_len;
1270         }
1271
1272         encode_supported_features(&p, end);
1273         msg->front.iov_len = p - msg->front.iov_base;
1274         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1275
1276         return msg;
1277 }
1278
1279 /*
1280  * send session open request.
1281  *
1282  * called under mdsc->mutex
1283  */
1284 static int __open_session(struct ceph_mds_client *mdsc,
1285                           struct ceph_mds_session *session)
1286 {
1287         struct ceph_msg *msg;
1288         int mstate;
1289         int mds = session->s_mds;
1290
1291         /* wait for mds to go active? */
1292         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1293         dout("open_session to mds%d (%s)\n", mds,
1294              ceph_mds_state_name(mstate));
1295         session->s_state = CEPH_MDS_SESSION_OPENING;
1296         session->s_renew_requested = jiffies;
1297
1298         /* send connect message */
1299         msg = create_session_open_msg(mdsc, session->s_seq);
1300         if (!msg)
1301                 return -ENOMEM;
1302         ceph_con_send(&session->s_con, msg);
1303         return 0;
1304 }
1305
1306 /*
1307  * open sessions for any export targets for the given mds
1308  *
1309  * called under mdsc->mutex
1310  */
1311 static struct ceph_mds_session *
1312 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
1313 {
1314         struct ceph_mds_session *session;
1315
1316         session = __ceph_lookup_mds_session(mdsc, target);
1317         if (!session) {
1318                 session = register_session(mdsc, target);
1319                 if (IS_ERR(session))
1320                         return session;
1321         }
1322         if (session->s_state == CEPH_MDS_SESSION_NEW ||
1323             session->s_state == CEPH_MDS_SESSION_CLOSING)
1324                 __open_session(mdsc, session);
1325
1326         return session;
1327 }
1328
1329 struct ceph_mds_session *
1330 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1331 {
1332         struct ceph_mds_session *session;
1333
1334         dout("open_export_target_session to mds%d\n", target);
1335
1336         mutex_lock(&mdsc->mutex);
1337         session = __open_export_target_session(mdsc, target);
1338         mutex_unlock(&mdsc->mutex);
1339
1340         return session;
1341 }
1342
1343 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1344                                           struct ceph_mds_session *session)
1345 {
1346         struct ceph_mds_info *mi;
1347         struct ceph_mds_session *ts;
1348         int i, mds = session->s_mds;
1349
1350         if (mds >= mdsc->mdsmap->possible_max_rank)
1351                 return;
1352
1353         mi = &mdsc->mdsmap->m_info[mds];
1354         dout("open_export_target_sessions for mds%d (%d targets)\n",
1355              session->s_mds, mi->num_export_targets);
1356
1357         for (i = 0; i < mi->num_export_targets; i++) {
1358                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1359                 if (!IS_ERR(ts))
1360                         ceph_put_mds_session(ts);
1361         }
1362 }
1363
1364 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1365                                            struct ceph_mds_session *session)
1366 {
1367         mutex_lock(&mdsc->mutex);
1368         __open_export_target_sessions(mdsc, session);
1369         mutex_unlock(&mdsc->mutex);
1370 }
1371
1372 /*
1373  * session caps
1374  */
1375
1376 static void detach_cap_releases(struct ceph_mds_session *session,
1377                                 struct list_head *target)
1378 {
1379         lockdep_assert_held(&session->s_cap_lock);
1380
1381         list_splice_init(&session->s_cap_releases, target);
1382         session->s_num_cap_releases = 0;
1383         dout("dispose_cap_releases mds%d\n", session->s_mds);
1384 }
1385
1386 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1387                                  struct list_head *dispose)
1388 {
1389         while (!list_empty(dispose)) {
1390                 struct ceph_cap *cap;
1391                 /* zero out the in-progress message */
1392                 cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1393                 list_del(&cap->session_caps);
1394                 ceph_put_cap(mdsc, cap);
1395         }
1396 }
1397
1398 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1399                                      struct ceph_mds_session *session)
1400 {
1401         struct ceph_mds_request *req;
1402         struct rb_node *p;
1403         struct ceph_inode_info *ci;
1404
1405         dout("cleanup_session_requests mds%d\n", session->s_mds);
1406         mutex_lock(&mdsc->mutex);
1407         while (!list_empty(&session->s_unsafe)) {
1408                 req = list_first_entry(&session->s_unsafe,
1409                                        struct ceph_mds_request, r_unsafe_item);
1410                 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1411                                     req->r_tid);
1412                 if (req->r_target_inode) {
1413                         /* dropping unsafe change of inode's attributes */
1414                         ci = ceph_inode(req->r_target_inode);
1415                         errseq_set(&ci->i_meta_err, -EIO);
1416                 }
1417                 if (req->r_unsafe_dir) {
1418                         /* dropping unsafe directory operation */
1419                         ci = ceph_inode(req->r_unsafe_dir);
1420                         errseq_set(&ci->i_meta_err, -EIO);
1421                 }
1422                 __unregister_request(mdsc, req);
1423         }
1424         /* zero r_attempts, so kick_requests() will re-send requests */
1425         p = rb_first(&mdsc->request_tree);
1426         while (p) {
1427                 req = rb_entry(p, struct ceph_mds_request, r_node);
1428                 p = rb_next(p);
1429                 if (req->r_session &&
1430                     req->r_session->s_mds == session->s_mds)
1431                         req->r_attempts = 0;
1432         }
1433         mutex_unlock(&mdsc->mutex);
1434 }
1435
1436 /*
1437  * Helper to safely iterate over all caps associated with a session, with
1438  * special care taken to handle a racing __ceph_remove_cap().
1439  *
1440  * Caller must hold session s_mutex.
1441  */
1442 int ceph_iterate_session_caps(struct ceph_mds_session *session,
1443                               int (*cb)(struct inode *, struct ceph_cap *,
1444                                         void *), void *arg)
1445 {
1446         struct list_head *p;
1447         struct ceph_cap *cap;
1448         struct inode *inode, *last_inode = NULL;
1449         struct ceph_cap *old_cap = NULL;
1450         int ret;
1451
1452         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1453         spin_lock(&session->s_cap_lock);
1454         p = session->s_caps.next;
1455         while (p != &session->s_caps) {
1456                 cap = list_entry(p, struct ceph_cap, session_caps);
1457                 inode = igrab(&cap->ci->vfs_inode);
1458                 if (!inode) {
1459                         p = p->next;
1460                         continue;
1461                 }
1462                 session->s_cap_iterator = cap;
1463                 spin_unlock(&session->s_cap_lock);
1464
1465                 if (last_inode) {
1466                         /* avoid calling iput_final() while holding
1467                          * s_mutex or in mds dispatch threads */
1468                         ceph_async_iput(last_inode);
1469                         last_inode = NULL;
1470                 }
1471                 if (old_cap) {
1472                         ceph_put_cap(session->s_mdsc, old_cap);
1473                         old_cap = NULL;
1474                 }
1475
1476                 ret = cb(inode, cap, arg);
1477                 last_inode = inode;
1478
1479                 spin_lock(&session->s_cap_lock);
1480                 p = p->next;
1481                 if (!cap->ci) {
1482                         dout("iterate_session_caps  finishing cap %p removal\n",
1483                              cap);
1484                         BUG_ON(cap->session != session);
1485                         cap->session = NULL;
1486                         list_del_init(&cap->session_caps);
1487                         session->s_nr_caps--;
1488                         if (cap->queue_release)
1489                                 __ceph_queue_cap_release(session, cap);
1490                         else
1491                                 old_cap = cap;  /* put_cap it w/o locks held */
1492                 }
1493                 if (ret < 0)
1494                         goto out;
1495         }
1496         ret = 0;
1497 out:
1498         session->s_cap_iterator = NULL;
1499         spin_unlock(&session->s_cap_lock);
1500
1501         ceph_async_iput(last_inode);
1502         if (old_cap)
1503                 ceph_put_cap(session->s_mdsc, old_cap);
1504
1505         return ret;
1506 }
1507
1508 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1509                                   void *arg)
1510 {
1511         struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1512         struct ceph_inode_info *ci = ceph_inode(inode);
1513         LIST_HEAD(to_remove);
1514         bool dirty_dropped = false;
1515         bool invalidate = false;
1516
1517         dout("removing cap %p, ci is %p, inode is %p\n",
1518              cap, ci, &ci->vfs_inode);
1519         spin_lock(&ci->i_ceph_lock);
1520         __ceph_remove_cap(cap, false);
1521         if (!ci->i_auth_cap) {
1522                 struct ceph_cap_flush *cf;
1523                 struct ceph_mds_client *mdsc = fsc->mdsc;
1524
1525                 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1526                         if (inode->i_data.nrpages > 0)
1527                                 invalidate = true;
1528                         if (ci->i_wrbuffer_ref > 0)
1529                                 mapping_set_error(&inode->i_data, -EIO);
1530                 }
1531
1532                 while (!list_empty(&ci->i_cap_flush_list)) {
1533                         cf = list_first_entry(&ci->i_cap_flush_list,
1534                                               struct ceph_cap_flush, i_list);
1535                         list_move(&cf->i_list, &to_remove);
1536                 }
1537
1538                 spin_lock(&mdsc->cap_dirty_lock);
1539
1540                 list_for_each_entry(cf, &to_remove, i_list)
1541                         list_del(&cf->g_list);
1542
1543                 if (!list_empty(&ci->i_dirty_item)) {
1544                         pr_warn_ratelimited(
1545                                 " dropping dirty %s state for %p %lld\n",
1546                                 ceph_cap_string(ci->i_dirty_caps),
1547                                 inode, ceph_ino(inode));
1548                         ci->i_dirty_caps = 0;
1549                         list_del_init(&ci->i_dirty_item);
1550                         dirty_dropped = true;
1551                 }
1552                 if (!list_empty(&ci->i_flushing_item)) {
1553                         pr_warn_ratelimited(
1554                                 " dropping dirty+flushing %s state for %p %lld\n",
1555                                 ceph_cap_string(ci->i_flushing_caps),
1556                                 inode, ceph_ino(inode));
1557                         ci->i_flushing_caps = 0;
1558                         list_del_init(&ci->i_flushing_item);
1559                         mdsc->num_cap_flushing--;
1560                         dirty_dropped = true;
1561                 }
1562                 spin_unlock(&mdsc->cap_dirty_lock);
1563
1564                 if (dirty_dropped) {
1565                         errseq_set(&ci->i_meta_err, -EIO);
1566
1567                         if (ci->i_wrbuffer_ref_head == 0 &&
1568                             ci->i_wr_ref == 0 &&
1569                             ci->i_dirty_caps == 0 &&
1570                             ci->i_flushing_caps == 0) {
1571                                 ceph_put_snap_context(ci->i_head_snapc);
1572                                 ci->i_head_snapc = NULL;
1573                         }
1574                 }
1575
1576                 if (atomic_read(&ci->i_filelock_ref) > 0) {
1577                         /* make further file lock syscall return -EIO */
1578                         ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
1579                         pr_warn_ratelimited(" dropping file locks for %p %lld\n",
1580                                             inode, ceph_ino(inode));
1581                 }
1582
1583                 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1584                         list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1585                         ci->i_prealloc_cap_flush = NULL;
1586                 }
1587         }
1588         spin_unlock(&ci->i_ceph_lock);
1589         while (!list_empty(&to_remove)) {
1590                 struct ceph_cap_flush *cf;
1591                 cf = list_first_entry(&to_remove,
1592                                       struct ceph_cap_flush, i_list);
1593                 list_del(&cf->i_list);
1594                 ceph_free_cap_flush(cf);
1595         }
1596
1597         wake_up_all(&ci->i_cap_wq);
1598         if (invalidate)
1599                 ceph_queue_invalidate(inode);
1600         if (dirty_dropped)
1601                 iput(inode);
1602         return 0;
1603 }
1604
1605 /*
1606  * caller must hold session s_mutex
1607  */
1608 static void remove_session_caps(struct ceph_mds_session *session)
1609 {
1610         struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1611         struct super_block *sb = fsc->sb;
1612         LIST_HEAD(dispose);
1613
1614         dout("remove_session_caps on %p\n", session);
1615         ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
1616
1617         wake_up_all(&fsc->mdsc->cap_flushing_wq);
1618
1619         spin_lock(&session->s_cap_lock);
1620         if (session->s_nr_caps > 0) {
1621                 struct inode *inode;
1622                 struct ceph_cap *cap, *prev = NULL;
1623                 struct ceph_vino vino;
1624                 /*
1625                  * iterate_session_caps() skips inodes that are being
1626                  * deleted, we need to wait until deletions are complete.
1627                  * __wait_on_freeing_inode() is designed for the job,
1628                  * but it is not exported, so use lookup inode function
1629                  * to access it.
1630                  */
1631                 while (!list_empty(&session->s_caps)) {
1632                         cap = list_entry(session->s_caps.next,
1633                                          struct ceph_cap, session_caps);
1634                         if (cap == prev)
1635                                 break;
1636                         prev = cap;
1637                         vino = cap->ci->i_vino;
1638                         spin_unlock(&session->s_cap_lock);
1639
1640                         inode = ceph_find_inode(sb, vino);
1641                          /* avoid calling iput_final() while holding s_mutex */
1642                         ceph_async_iput(inode);
1643
1644                         spin_lock(&session->s_cap_lock);
1645                 }
1646         }
1647
1648         // drop cap expires and unlock s_cap_lock
1649         detach_cap_releases(session, &dispose);
1650
1651         BUG_ON(session->s_nr_caps > 0);
1652         BUG_ON(!list_empty(&session->s_cap_flushing));
1653         spin_unlock(&session->s_cap_lock);
1654         dispose_cap_releases(session->s_mdsc, &dispose);
1655 }
1656
1657 enum {
1658         RECONNECT,
1659         RENEWCAPS,
1660         FORCE_RO,
1661 };
1662
1663 /*
1664  * wake up any threads waiting on this session's caps.  if the cap is
1665  * old (didn't get renewed on the client reconnect), remove it now.
1666  *
1667  * caller must hold s_mutex.
1668  */
1669 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1670                               void *arg)
1671 {
1672         struct ceph_inode_info *ci = ceph_inode(inode);
1673         unsigned long ev = (unsigned long)arg;
1674
1675         if (ev == RECONNECT) {
1676                 spin_lock(&ci->i_ceph_lock);
1677                 ci->i_wanted_max_size = 0;
1678                 ci->i_requested_max_size = 0;
1679                 spin_unlock(&ci->i_ceph_lock);
1680         } else if (ev == RENEWCAPS) {
1681                 if (cap->cap_gen < cap->session->s_cap_gen) {
1682                         /* mds did not re-issue stale cap */
1683                         spin_lock(&ci->i_ceph_lock);
1684                         cap->issued = cap->implemented = CEPH_CAP_PIN;
1685                         spin_unlock(&ci->i_ceph_lock);
1686                 }
1687         } else if (ev == FORCE_RO) {
1688         }
1689         wake_up_all(&ci->i_cap_wq);
1690         return 0;
1691 }
1692
1693 static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
1694 {
1695         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1696         ceph_iterate_session_caps(session, wake_up_session_cb,
1697                                   (void *)(unsigned long)ev);
1698 }
1699
1700 /*
1701  * Send periodic message to MDS renewing all currently held caps.  The
1702  * ack will reset the expiration for all caps from this session.
1703  *
1704  * caller holds s_mutex
1705  */
1706 static int send_renew_caps(struct ceph_mds_client *mdsc,
1707                            struct ceph_mds_session *session)
1708 {
1709         struct ceph_msg *msg;
1710         int state;
1711
1712         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1713             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1714                 pr_info("mds%d caps stale\n", session->s_mds);
1715         session->s_renew_requested = jiffies;
1716
1717         /* do not try to renew caps until a recovering mds has reconnected
1718          * with its clients. */
1719         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1720         if (state < CEPH_MDS_STATE_RECONNECT) {
1721                 dout("send_renew_caps ignoring mds%d (%s)\n",
1722                      session->s_mds, ceph_mds_state_name(state));
1723                 return 0;
1724         }
1725
1726         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1727                 ceph_mds_state_name(state));
1728         msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1729                                  ++session->s_renew_seq);
1730         if (!msg)
1731                 return -ENOMEM;
1732         ceph_con_send(&session->s_con, msg);
1733         return 0;
1734 }
1735
1736 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1737                              struct ceph_mds_session *session, u64 seq)
1738 {
1739         struct ceph_msg *msg;
1740
1741         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1742              session->s_mds, ceph_session_state_name(session->s_state), seq);
1743         msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1744         if (!msg)
1745                 return -ENOMEM;
1746         ceph_con_send(&session->s_con, msg);
1747         return 0;
1748 }
1749
1750
1751 /*
1752  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1753  *
1754  * Called under session->s_mutex
1755  */
1756 static void renewed_caps(struct ceph_mds_client *mdsc,
1757                          struct ceph_mds_session *session, int is_renew)
1758 {
1759         int was_stale;
1760         int wake = 0;
1761
1762         spin_lock(&session->s_cap_lock);
1763         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1764
1765         session->s_cap_ttl = session->s_renew_requested +
1766                 mdsc->mdsmap->m_session_timeout*HZ;
1767
1768         if (was_stale) {
1769                 if (time_before(jiffies, session->s_cap_ttl)) {
1770                         pr_info("mds%d caps renewed\n", session->s_mds);
1771                         wake = 1;
1772                 } else {
1773                         pr_info("mds%d caps still stale\n", session->s_mds);
1774                 }
1775         }
1776         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1777              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1778              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1779         spin_unlock(&session->s_cap_lock);
1780
1781         if (wake)
1782                 wake_up_session_caps(session, RENEWCAPS);
1783 }
1784
1785 /*
1786  * send a session close request
1787  */
1788 static int request_close_session(struct ceph_mds_session *session)
1789 {
1790         struct ceph_msg *msg;
1791
1792         dout("request_close_session mds%d state %s seq %lld\n",
1793              session->s_mds, ceph_session_state_name(session->s_state),
1794              session->s_seq);
1795         msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1796         if (!msg)
1797                 return -ENOMEM;
1798         ceph_con_send(&session->s_con, msg);
1799         return 1;
1800 }
1801
1802 /*
1803  * Called with s_mutex held.
1804  */
1805 static int __close_session(struct ceph_mds_client *mdsc,
1806                          struct ceph_mds_session *session)
1807 {
1808         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1809                 return 0;
1810         session->s_state = CEPH_MDS_SESSION_CLOSING;
1811         return request_close_session(session);
1812 }
1813
1814 static bool drop_negative_children(struct dentry *dentry)
1815 {
1816         struct dentry *child;
1817         bool all_negative = true;
1818
1819         if (!d_is_dir(dentry))
1820                 goto out;
1821
1822         spin_lock(&dentry->d_lock);
1823         list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1824                 if (d_really_is_positive(child)) {
1825                         all_negative = false;
1826                         break;
1827                 }
1828         }
1829         spin_unlock(&dentry->d_lock);
1830
1831         if (all_negative)
1832                 shrink_dcache_parent(dentry);
1833 out:
1834         return all_negative;
1835 }
1836
1837 /*
1838  * Trim old(er) caps.
1839  *
1840  * Because we can't cache an inode without one or more caps, we do
1841  * this indirectly: if a cap is unused, we prune its aliases, at which
1842  * point the inode will hopefully get dropped to.
1843  *
1844  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1845  * memory pressure from the MDS, though, so it needn't be perfect.
1846  */
1847 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1848 {
1849         int *remaining = arg;
1850         struct ceph_inode_info *ci = ceph_inode(inode);
1851         int used, wanted, oissued, mine;
1852
1853         if (*remaining <= 0)
1854                 return -1;
1855
1856         spin_lock(&ci->i_ceph_lock);
1857         mine = cap->issued | cap->implemented;
1858         used = __ceph_caps_used(ci);
1859         wanted = __ceph_caps_file_wanted(ci);
1860         oissued = __ceph_caps_issued_other(ci, cap);
1861
1862         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1863              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1864              ceph_cap_string(used), ceph_cap_string(wanted));
1865         if (cap == ci->i_auth_cap) {
1866                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1867                     !list_empty(&ci->i_cap_snaps))
1868                         goto out;
1869                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1870                         goto out;
1871                 /* Note: it's possible that i_filelock_ref becomes non-zero
1872                  * after dropping auth caps. It doesn't hurt because reply
1873                  * of lock mds request will re-add auth caps. */
1874                 if (atomic_read(&ci->i_filelock_ref) > 0)
1875                         goto out;
1876         }
1877         /* The inode has cached pages, but it's no longer used.
1878          * we can safely drop it */
1879         if (S_ISREG(inode->i_mode) &&
1880             wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1881             !(oissued & CEPH_CAP_FILE_CACHE)) {
1882           used = 0;
1883           oissued = 0;
1884         }
1885         if ((used | wanted) & ~oissued & mine)
1886                 goto out;   /* we need these caps */
1887
1888         if (oissued) {
1889                 /* we aren't the only cap.. just remove us */
1890                 __ceph_remove_cap(cap, true);
1891                 (*remaining)--;
1892         } else {
1893                 struct dentry *dentry;
1894                 /* try dropping referring dentries */
1895                 spin_unlock(&ci->i_ceph_lock);
1896                 dentry = d_find_any_alias(inode);
1897                 if (dentry && drop_negative_children(dentry)) {
1898                         int count;
1899                         dput(dentry);
1900                         d_prune_aliases(inode);
1901                         count = atomic_read(&inode->i_count);
1902                         if (count == 1)
1903                                 (*remaining)--;
1904                         dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1905                              inode, cap, count);
1906                 } else {
1907                         dput(dentry);
1908                 }
1909                 return 0;
1910         }
1911
1912 out:
1913         spin_unlock(&ci->i_ceph_lock);
1914         return 0;
1915 }
1916
1917 /*
1918  * Trim session cap count down to some max number.
1919  */
1920 int ceph_trim_caps(struct ceph_mds_client *mdsc,
1921                    struct ceph_mds_session *session,
1922                    int max_caps)
1923 {
1924         int trim_caps = session->s_nr_caps - max_caps;
1925
1926         dout("trim_caps mds%d start: %d / %d, trim %d\n",
1927              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1928         if (trim_caps > 0) {
1929                 int remaining = trim_caps;
1930
1931                 ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
1932                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1933                      session->s_mds, session->s_nr_caps, max_caps,
1934                         trim_caps - remaining);
1935         }
1936
1937         ceph_flush_cap_releases(mdsc, session);
1938         return 0;
1939 }
1940
1941 static int check_caps_flush(struct ceph_mds_client *mdsc,
1942                             u64 want_flush_tid)
1943 {
1944         int ret = 1;
1945
1946         spin_lock(&mdsc->cap_dirty_lock);
1947         if (!list_empty(&mdsc->cap_flush_list)) {
1948                 struct ceph_cap_flush *cf =
1949                         list_first_entry(&mdsc->cap_flush_list,
1950                                          struct ceph_cap_flush, g_list);
1951                 if (cf->tid <= want_flush_tid) {
1952                         dout("check_caps_flush still flushing tid "
1953                              "%llu <= %llu\n", cf->tid, want_flush_tid);
1954                         ret = 0;
1955                 }
1956         }
1957         spin_unlock(&mdsc->cap_dirty_lock);
1958         return ret;
1959 }
1960
1961 /*
1962  * flush all dirty inode data to disk.
1963  *
1964  * returns true if we've flushed through want_flush_tid
1965  */
1966 static void wait_caps_flush(struct ceph_mds_client *mdsc,
1967                             u64 want_flush_tid)
1968 {
1969         dout("check_caps_flush want %llu\n", want_flush_tid);
1970
1971         wait_event(mdsc->cap_flushing_wq,
1972                    check_caps_flush(mdsc, want_flush_tid));
1973
1974         dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
1975 }
1976
1977 /*
1978  * called under s_mutex
1979  */
1980 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1981                                    struct ceph_mds_session *session)
1982 {
1983         struct ceph_msg *msg = NULL;
1984         struct ceph_mds_cap_release *head;
1985         struct ceph_mds_cap_item *item;
1986         struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
1987         struct ceph_cap *cap;
1988         LIST_HEAD(tmp_list);
1989         int num_cap_releases;
1990         __le32  barrier, *cap_barrier;
1991
1992         down_read(&osdc->lock);
1993         barrier = cpu_to_le32(osdc->epoch_barrier);
1994         up_read(&osdc->lock);
1995
1996         spin_lock(&session->s_cap_lock);
1997 again:
1998         list_splice_init(&session->s_cap_releases, &tmp_list);
1999         num_cap_releases = session->s_num_cap_releases;
2000         session->s_num_cap_releases = 0;
2001         spin_unlock(&session->s_cap_lock);
2002
2003         while (!list_empty(&tmp_list)) {
2004                 if (!msg) {
2005                         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
2006                                         PAGE_SIZE, GFP_NOFS, false);
2007                         if (!msg)
2008                                 goto out_err;
2009                         head = msg->front.iov_base;
2010                         head->num = cpu_to_le32(0);
2011                         msg->front.iov_len = sizeof(*head);
2012
2013                         msg->hdr.version = cpu_to_le16(2);
2014                         msg->hdr.compat_version = cpu_to_le16(1);
2015                 }
2016
2017                 cap = list_first_entry(&tmp_list, struct ceph_cap,
2018                                         session_caps);
2019                 list_del(&cap->session_caps);
2020                 num_cap_releases--;
2021
2022                 head = msg->front.iov_base;
2023                 put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
2024                                    &head->num);
2025                 item = msg->front.iov_base + msg->front.iov_len;
2026                 item->ino = cpu_to_le64(cap->cap_ino);
2027                 item->cap_id = cpu_to_le64(cap->cap_id);
2028                 item->migrate_seq = cpu_to_le32(cap->mseq);
2029                 item->seq = cpu_to_le32(cap->issue_seq);
2030                 msg->front.iov_len += sizeof(*item);
2031
2032                 ceph_put_cap(mdsc, cap);
2033
2034                 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
2035                         // Append cap_barrier field
2036                         cap_barrier = msg->front.iov_base + msg->front.iov_len;
2037                         *cap_barrier = barrier;
2038                         msg->front.iov_len += sizeof(*cap_barrier);
2039
2040                         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2041                         dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2042                         ceph_con_send(&session->s_con, msg);
2043                         msg = NULL;
2044                 }
2045         }
2046
2047         BUG_ON(num_cap_releases != 0);
2048
2049         spin_lock(&session->s_cap_lock);
2050         if (!list_empty(&session->s_cap_releases))
2051                 goto again;
2052         spin_unlock(&session->s_cap_lock);
2053
2054         if (msg) {
2055                 // Append cap_barrier field
2056                 cap_barrier = msg->front.iov_base + msg->front.iov_len;
2057                 *cap_barrier = barrier;
2058                 msg->front.iov_len += sizeof(*cap_barrier);
2059
2060                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2061                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2062                 ceph_con_send(&session->s_con, msg);
2063         }
2064         return;
2065 out_err:
2066         pr_err("send_cap_releases mds%d, failed to allocate message\n",
2067                 session->s_mds);
2068         spin_lock(&session->s_cap_lock);
2069         list_splice(&tmp_list, &session->s_cap_releases);
2070         session->s_num_cap_releases += num_cap_releases;
2071         spin_unlock(&session->s_cap_lock);
2072 }
2073
2074 static void ceph_cap_release_work(struct work_struct *work)
2075 {
2076         struct ceph_mds_session *session =
2077                 container_of(work, struct ceph_mds_session, s_cap_release_work);
2078
2079         mutex_lock(&session->s_mutex);
2080         if (session->s_state == CEPH_MDS_SESSION_OPEN ||
2081             session->s_state == CEPH_MDS_SESSION_HUNG)
2082                 ceph_send_cap_releases(session->s_mdsc, session);
2083         mutex_unlock(&session->s_mutex);
2084         ceph_put_mds_session(session);
2085 }
2086
2087 void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
2088                              struct ceph_mds_session *session)
2089 {
2090         if (mdsc->stopping)
2091                 return;
2092
2093         ceph_get_mds_session(session);
2094         if (queue_work(mdsc->fsc->cap_wq,
2095                        &session->s_cap_release_work)) {
2096                 dout("cap release work queued\n");
2097         } else {
2098                 ceph_put_mds_session(session);
2099                 dout("failed to queue cap release work\n");
2100         }
2101 }
2102
2103 /*
2104  * caller holds session->s_cap_lock
2105  */
2106 void __ceph_queue_cap_release(struct ceph_mds_session *session,
2107                               struct ceph_cap *cap)
2108 {
2109         list_add_tail(&cap->session_caps, &session->s_cap_releases);
2110         session->s_num_cap_releases++;
2111
2112         if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
2113                 ceph_flush_cap_releases(session->s_mdsc, session);
2114 }
2115
2116 static void ceph_cap_reclaim_work(struct work_struct *work)
2117 {
2118         struct ceph_mds_client *mdsc =
2119                 container_of(work, struct ceph_mds_client, cap_reclaim_work);
2120         int ret = ceph_trim_dentries(mdsc);
2121         if (ret == -EAGAIN)
2122                 ceph_queue_cap_reclaim_work(mdsc);
2123 }
2124
2125 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2126 {
2127         if (mdsc->stopping)
2128                 return;
2129
2130         if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2131                 dout("caps reclaim work queued\n");
2132         } else {
2133                 dout("failed to queue caps release work\n");
2134         }
2135 }
2136
2137 void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2138 {
2139         int val;
2140         if (!nr)
2141                 return;
2142         val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2143         if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
2144                 atomic_set(&mdsc->cap_reclaim_pending, 0);
2145                 ceph_queue_cap_reclaim_work(mdsc);
2146         }
2147 }
2148
2149 /*
2150  * requests
2151  */
2152
2153 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2154                                     struct inode *dir)
2155 {
2156         struct ceph_inode_info *ci = ceph_inode(dir);
2157         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2158         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2159         size_t size = sizeof(struct ceph_mds_reply_dir_entry);
2160         unsigned int num_entries;
2161         int order;
2162
2163         spin_lock(&ci->i_ceph_lock);
2164         num_entries = ci->i_files + ci->i_subdirs;
2165         spin_unlock(&ci->i_ceph_lock);
2166         num_entries = max(num_entries, 1U);
2167         num_entries = min(num_entries, opt->max_readdir);
2168
2169         order = get_order(size * num_entries);
2170         while (order >= 0) {
2171                 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
2172                                                              __GFP_NOWARN,
2173                                                              order);
2174                 if (rinfo->dir_entries)
2175                         break;
2176                 order--;
2177         }
2178         if (!rinfo->dir_entries)
2179                 return -ENOMEM;
2180
2181         num_entries = (PAGE_SIZE << order) / size;
2182         num_entries = min(num_entries, opt->max_readdir);
2183
2184         rinfo->dir_buf_size = PAGE_SIZE << order;
2185         req->r_num_caps = num_entries + 1;
2186         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2187         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2188         return 0;
2189 }
2190
2191 /*
2192  * Create an mds request.
2193  */
2194 struct ceph_mds_request *
2195 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2196 {
2197         struct ceph_mds_request *req;
2198
2199         req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2200         if (!req)
2201                 return ERR_PTR(-ENOMEM);
2202
2203         mutex_init(&req->r_fill_mutex);
2204         req->r_mdsc = mdsc;
2205         req->r_started = jiffies;
2206         req->r_start_latency = ktime_get();
2207         req->r_resend_mds = -1;
2208         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2209         INIT_LIST_HEAD(&req->r_unsafe_target_item);
2210         req->r_fmode = -1;
2211         kref_init(&req->r_kref);
2212         RB_CLEAR_NODE(&req->r_node);
2213         INIT_LIST_HEAD(&req->r_wait);
2214         init_completion(&req->r_completion);
2215         init_completion(&req->r_safe_completion);
2216         INIT_LIST_HEAD(&req->r_unsafe_item);
2217
2218         ktime_get_coarse_real_ts64(&req->r_stamp);
2219
2220         req->r_op = op;
2221         req->r_direct_mode = mode;
2222         return req;
2223 }
2224
2225 /*
2226  * return oldest (lowest) request, tid in request tree, 0 if none.
2227  *
2228  * called under mdsc->mutex.
2229  */
2230 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2231 {
2232         if (RB_EMPTY_ROOT(&mdsc->request_tree))
2233                 return NULL;
2234         return rb_entry(rb_first(&mdsc->request_tree),
2235                         struct ceph_mds_request, r_node);
2236 }
2237
2238 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2239 {
2240         return mdsc->oldest_tid;
2241 }
2242
2243 /*
2244  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
2245  * on build_path_from_dentry in fs/cifs/dir.c.
2246  *
2247  * If @stop_on_nosnap, generate path relative to the first non-snapped
2248  * inode.
2249  *
2250  * Encode hidden .snap dirs as a double /, i.e.
2251  *   foo/.snap/bar -> foo//bar
2252  */
2253 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
2254                            int stop_on_nosnap)
2255 {
2256         struct dentry *temp;
2257         char *path;
2258         int pos;
2259         unsigned seq;
2260         u64 base;
2261
2262         if (!dentry)
2263                 return ERR_PTR(-EINVAL);
2264
2265         path = __getname();
2266         if (!path)
2267                 return ERR_PTR(-ENOMEM);
2268 retry:
2269         pos = PATH_MAX - 1;
2270         path[pos] = '\0';
2271
2272         seq = read_seqbegin(&rename_lock);
2273         rcu_read_lock();
2274         temp = dentry;
2275         for (;;) {
2276                 struct inode *inode;
2277
2278                 spin_lock(&temp->d_lock);
2279                 inode = d_inode(temp);
2280                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2281                         dout("build_path path+%d: %p SNAPDIR\n",
2282                              pos, temp);
2283                 } else if (stop_on_nosnap && inode && dentry != temp &&
2284                            ceph_snap(inode) == CEPH_NOSNAP) {
2285                         spin_unlock(&temp->d_lock);
2286                         pos++; /* get rid of any prepended '/' */
2287                         break;
2288                 } else {
2289                         pos -= temp->d_name.len;
2290                         if (pos < 0) {
2291                                 spin_unlock(&temp->d_lock);
2292                                 break;
2293                         }
2294                         memcpy(path + pos, temp->d_name.name, temp->d_name.len);
2295                 }
2296                 spin_unlock(&temp->d_lock);
2297                 temp = READ_ONCE(temp->d_parent);
2298
2299                 /* Are we at the root? */
2300                 if (IS_ROOT(temp))
2301                         break;
2302
2303                 /* Are we out of buffer? */
2304                 if (--pos < 0)
2305                         break;
2306
2307                 path[pos] = '/';
2308         }
2309         base = ceph_ino(d_inode(temp));
2310         rcu_read_unlock();
2311
2312         if (read_seqretry(&rename_lock, seq))
2313                 goto retry;
2314
2315         if (pos < 0) {
2316                 /*
2317                  * A rename didn't occur, but somehow we didn't end up where
2318                  * we thought we would. Throw a warning and try again.
2319                  */
2320                 pr_warn("build_path did not end path lookup where "
2321                         "expected, pos is %d\n", pos);
2322                 goto retry;
2323         }
2324
2325         *pbase = base;
2326         *plen = PATH_MAX - 1 - pos;
2327         dout("build_path on %p %d built %llx '%.*s'\n",
2328              dentry, d_count(dentry), base, *plen, path + pos);
2329         return path + pos;
2330 }
2331
2332 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2333                              const char **ppath, int *ppathlen, u64 *pino,
2334                              bool *pfreepath, bool parent_locked)
2335 {
2336         char *path;
2337
2338         rcu_read_lock();
2339         if (!dir)
2340                 dir = d_inode_rcu(dentry->d_parent);
2341         if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
2342                 *pino = ceph_ino(dir);
2343                 rcu_read_unlock();
2344                 *ppath = dentry->d_name.name;
2345                 *ppathlen = dentry->d_name.len;
2346                 return 0;
2347         }
2348         rcu_read_unlock();
2349         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2350         if (IS_ERR(path))
2351                 return PTR_ERR(path);
2352         *ppath = path;
2353         *pfreepath = true;
2354         return 0;
2355 }
2356
2357 static int build_inode_path(struct inode *inode,
2358                             const char **ppath, int *ppathlen, u64 *pino,
2359                             bool *pfreepath)
2360 {
2361         struct dentry *dentry;
2362         char *path;
2363
2364         if (ceph_snap(inode) == CEPH_NOSNAP) {
2365                 *pino = ceph_ino(inode);
2366                 *ppathlen = 0;
2367                 return 0;
2368         }
2369         dentry = d_find_alias(inode);
2370         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2371         dput(dentry);
2372         if (IS_ERR(path))
2373                 return PTR_ERR(path);
2374         *ppath = path;
2375         *pfreepath = true;
2376         return 0;
2377 }
2378
2379 /*
2380  * request arguments may be specified via an inode *, a dentry *, or
2381  * an explicit ino+path.
2382  */
2383 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2384                                   struct inode *rdiri, const char *rpath,
2385                                   u64 rino, const char **ppath, int *pathlen,
2386                                   u64 *ino, bool *freepath, bool parent_locked)
2387 {
2388         int r = 0;
2389
2390         if (rinode) {
2391                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2392                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2393                      ceph_snap(rinode));
2394         } else if (rdentry) {
2395                 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2396                                         freepath, parent_locked);
2397                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2398                      *ppath);
2399         } else if (rpath || rino) {
2400                 *ino = rino;
2401                 *ppath = rpath;
2402                 *pathlen = rpath ? strlen(rpath) : 0;
2403                 dout(" path %.*s\n", *pathlen, rpath);
2404         }
2405
2406         return r;
2407 }
2408
2409 /*
2410  * called under mdsc->mutex
2411  */
2412 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
2413                                                struct ceph_mds_request *req,
2414                                                int mds, bool drop_cap_releases)
2415 {
2416         struct ceph_msg *msg;
2417         struct ceph_mds_request_head *head;
2418         const char *path1 = NULL;
2419         const char *path2 = NULL;
2420         u64 ino1 = 0, ino2 = 0;
2421         int pathlen1 = 0, pathlen2 = 0;
2422         bool freepath1 = false, freepath2 = false;
2423         int len;
2424         u16 releases;
2425         void *p, *end;
2426         int ret;
2427
2428         ret = set_request_path_attr(req->r_inode, req->r_dentry,
2429                               req->r_parent, req->r_path1, req->r_ino1.ino,
2430                               &path1, &pathlen1, &ino1, &freepath1,
2431                               test_bit(CEPH_MDS_R_PARENT_LOCKED,
2432                                         &req->r_req_flags));
2433         if (ret < 0) {
2434                 msg = ERR_PTR(ret);
2435                 goto out;
2436         }
2437
2438         /* If r_old_dentry is set, then assume that its parent is locked */
2439         ret = set_request_path_attr(NULL, req->r_old_dentry,
2440                               req->r_old_dentry_dir,
2441                               req->r_path2, req->r_ino2.ino,
2442                               &path2, &pathlen2, &ino2, &freepath2, true);
2443         if (ret < 0) {
2444                 msg = ERR_PTR(ret);
2445                 goto out_free1;
2446         }
2447
2448         len = sizeof(*head) +
2449                 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2450                 sizeof(struct ceph_timespec);
2451
2452         /* calculate (max) length for cap releases */
2453         len += sizeof(struct ceph_mds_request_release) *
2454                 (!!req->r_inode_drop + !!req->r_dentry_drop +
2455                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2456         if (req->r_dentry_drop)
2457                 len += pathlen1;
2458         if (req->r_old_dentry_drop)
2459                 len += pathlen2;
2460
2461         msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
2462         if (!msg) {
2463                 msg = ERR_PTR(-ENOMEM);
2464                 goto out_free2;
2465         }
2466
2467         msg->hdr.version = cpu_to_le16(2);
2468         msg->hdr.tid = cpu_to_le64(req->r_tid);
2469
2470         head = msg->front.iov_base;
2471         p = msg->front.iov_base + sizeof(*head);
2472         end = msg->front.iov_base + msg->front.iov_len;
2473
2474         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2475         head->op = cpu_to_le32(req->r_op);
2476         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
2477         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
2478         head->ino = cpu_to_le64(req->r_deleg_ino);
2479         head->args = req->r_args;
2480
2481         ceph_encode_filepath(&p, end, ino1, path1);
2482         ceph_encode_filepath(&p, end, ino2, path2);
2483
2484         /* make note of release offset, in case we need to replay */
2485         req->r_request_release_offset = p - msg->front.iov_base;
2486
2487         /* cap releases */
2488         releases = 0;
2489         if (req->r_inode_drop)
2490                 releases += ceph_encode_inode_release(&p,
2491                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2492                       mds, req->r_inode_drop, req->r_inode_unless,
2493                       req->r_op == CEPH_MDS_OP_READDIR);
2494         if (req->r_dentry_drop)
2495                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2496                                 req->r_parent, mds, req->r_dentry_drop,
2497                                 req->r_dentry_unless);
2498         if (req->r_old_dentry_drop)
2499                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2500                                 req->r_old_dentry_dir, mds,
2501                                 req->r_old_dentry_drop,
2502                                 req->r_old_dentry_unless);
2503         if (req->r_old_inode_drop)
2504                 releases += ceph_encode_inode_release(&p,
2505                       d_inode(req->r_old_dentry),
2506                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2507
2508         if (drop_cap_releases) {
2509                 releases = 0;
2510                 p = msg->front.iov_base + req->r_request_release_offset;
2511         }
2512
2513         head->num_releases = cpu_to_le16(releases);
2514
2515         /* time stamp */
2516         {
2517                 struct ceph_timespec ts;
2518                 ceph_encode_timespec64(&ts, &req->r_stamp);
2519                 ceph_encode_copy(&p, &ts, sizeof(ts));
2520         }
2521
2522         BUG_ON(p > end);
2523         msg->front.iov_len = p - msg->front.iov_base;
2524         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2525
2526         if (req->r_pagelist) {
2527                 struct ceph_pagelist *pagelist = req->r_pagelist;
2528                 ceph_msg_data_add_pagelist(msg, pagelist);
2529                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2530         } else {
2531                 msg->hdr.data_len = 0;
2532         }
2533
2534         msg->hdr.data_off = cpu_to_le16(0);
2535
2536 out_free2:
2537         if (freepath2)
2538                 ceph_mdsc_free_path((char *)path2, pathlen2);
2539 out_free1:
2540         if (freepath1)
2541                 ceph_mdsc_free_path((char *)path1, pathlen1);
2542 out:
2543         return msg;
2544 }
2545
2546 /*
2547  * called under mdsc->mutex if error, under no mutex if
2548  * success.
2549  */
2550 static void complete_request(struct ceph_mds_client *mdsc,
2551                              struct ceph_mds_request *req)
2552 {
2553         req->r_end_latency = ktime_get();
2554
2555         if (req->r_callback)
2556                 req->r_callback(mdsc, req);
2557         complete_all(&req->r_completion);
2558 }
2559
2560 /*
2561  * called under mdsc->mutex
2562  */
2563 static int __prepare_send_request(struct ceph_mds_client *mdsc,
2564                                   struct ceph_mds_request *req,
2565                                   int mds, bool drop_cap_releases)
2566 {
2567         struct ceph_mds_request_head *rhead;
2568         struct ceph_msg *msg;
2569         int flags = 0;
2570
2571         req->r_attempts++;
2572         if (req->r_inode) {
2573                 struct ceph_cap *cap =
2574                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2575
2576                 if (cap)
2577                         req->r_sent_on_mseq = cap->mseq;
2578                 else
2579                         req->r_sent_on_mseq = -1;
2580         }
2581         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2582              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2583
2584         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2585                 void *p;
2586                 /*
2587                  * Replay.  Do not regenerate message (and rebuild
2588                  * paths, etc.); just use the original message.
2589                  * Rebuilding paths will break for renames because
2590                  * d_move mangles the src name.
2591                  */
2592                 msg = req->r_request;
2593                 rhead = msg->front.iov_base;
2594
2595                 flags = le32_to_cpu(rhead->flags);
2596                 flags |= CEPH_MDS_FLAG_REPLAY;
2597                 rhead->flags = cpu_to_le32(flags);
2598
2599                 if (req->r_target_inode)
2600                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2601
2602                 rhead->num_retry = req->r_attempts - 1;
2603
2604                 /* remove cap/dentry releases from message */
2605                 rhead->num_releases = 0;
2606
2607                 /* time stamp */
2608                 p = msg->front.iov_base + req->r_request_release_offset;
2609                 {
2610                         struct ceph_timespec ts;
2611                         ceph_encode_timespec64(&ts, &req->r_stamp);
2612                         ceph_encode_copy(&p, &ts, sizeof(ts));
2613                 }
2614
2615                 msg->front.iov_len = p - msg->front.iov_base;
2616                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2617                 return 0;
2618         }
2619
2620         if (req->r_request) {
2621                 ceph_msg_put(req->r_request);
2622                 req->r_request = NULL;
2623         }
2624         msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2625         if (IS_ERR(msg)) {
2626                 req->r_err = PTR_ERR(msg);
2627                 return PTR_ERR(msg);
2628         }
2629         req->r_request = msg;
2630
2631         rhead = msg->front.iov_base;
2632         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2633         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2634                 flags |= CEPH_MDS_FLAG_REPLAY;
2635         if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
2636                 flags |= CEPH_MDS_FLAG_ASYNC;
2637         if (req->r_parent)
2638                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2639         rhead->flags = cpu_to_le32(flags);
2640         rhead->num_fwd = req->r_num_fwd;
2641         rhead->num_retry = req->r_attempts - 1;
2642
2643         dout(" r_parent = %p\n", req->r_parent);
2644         return 0;
2645 }
2646
2647 /*
2648  * called under mdsc->mutex
2649  */
2650 static int __send_request(struct ceph_mds_client *mdsc,
2651                           struct ceph_mds_session *session,
2652                           struct ceph_mds_request *req,
2653                           bool drop_cap_releases)
2654 {
2655         int err;
2656
2657         err = __prepare_send_request(mdsc, req, session->s_mds,
2658                                      drop_cap_releases);
2659         if (!err) {
2660                 ceph_msg_get(req->r_request);
2661                 ceph_con_send(&session->s_con, req->r_request);
2662         }
2663
2664         return err;
2665 }
2666
2667 /*
2668  * send request, or put it on the appropriate wait list.
2669  */
2670 static void __do_request(struct ceph_mds_client *mdsc,
2671                         struct ceph_mds_request *req)
2672 {
2673         struct ceph_mds_session *session = NULL;
2674         int mds = -1;
2675         int err = 0;
2676         bool random;
2677
2678         if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2679                 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2680                         __unregister_request(mdsc, req);
2681                 return;
2682         }
2683
2684         if (req->r_timeout &&
2685             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2686                 dout("do_request timed out\n");
2687                 err = -ETIMEDOUT;
2688                 goto finish;
2689         }
2690         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2691                 dout("do_request forced umount\n");
2692                 err = -EIO;
2693                 goto finish;
2694         }
2695         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2696                 if (mdsc->mdsmap_err) {
2697                         err = mdsc->mdsmap_err;
2698                         dout("do_request mdsmap err %d\n", err);
2699                         goto finish;
2700                 }
2701                 if (mdsc->mdsmap->m_epoch == 0) {
2702                         dout("do_request no mdsmap, waiting for map\n");
2703                         list_add(&req->r_wait, &mdsc->waiting_for_map);
2704                         return;
2705                 }
2706                 if (!(mdsc->fsc->mount_options->flags &
2707                       CEPH_MOUNT_OPT_MOUNTWAIT) &&
2708                     !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2709                         err = -EHOSTUNREACH;
2710                         goto finish;
2711                 }
2712         }
2713
2714         put_request_session(req);
2715
2716         mds = __choose_mds(mdsc, req, &random);
2717         if (mds < 0 ||
2718             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2719                 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2720                         err = -EJUKEBOX;
2721                         goto finish;
2722                 }
2723                 dout("do_request no mds or not active, waiting for map\n");
2724                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2725                 return;
2726         }
2727
2728         /* get, open session */
2729         session = __ceph_lookup_mds_session(mdsc, mds);
2730         if (!session) {
2731                 session = register_session(mdsc, mds);
2732                 if (IS_ERR(session)) {
2733                         err = PTR_ERR(session);
2734                         goto finish;
2735                 }
2736         }
2737         req->r_session = ceph_get_mds_session(session);
2738
2739         dout("do_request mds%d session %p state %s\n", mds, session,
2740              ceph_session_state_name(session->s_state));
2741         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2742             session->s_state != CEPH_MDS_SESSION_HUNG) {
2743                 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2744                         err = -EACCES;
2745                         goto out_session;
2746                 }
2747                 /*
2748                  * We cannot queue async requests since the caps and delegated
2749                  * inodes are bound to the session. Just return -EJUKEBOX and
2750                  * let the caller retry a sync request in that case.
2751                  */
2752                 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2753                         err = -EJUKEBOX;
2754                         goto out_session;
2755                 }
2756                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2757                     session->s_state == CEPH_MDS_SESSION_CLOSING) {
2758                         __open_session(mdsc, session);
2759                         /* retry the same mds later */
2760                         if (random)
2761                                 req->r_resend_mds = mds;
2762                 }
2763                 list_add(&req->r_wait, &session->s_waiting);
2764                 goto out_session;
2765         }
2766
2767         /* send request */
2768         req->r_resend_mds = -1;   /* forget any previous mds hint */
2769
2770         if (req->r_request_started == 0)   /* note request start time */
2771                 req->r_request_started = jiffies;
2772
2773         err = __send_request(mdsc, session, req, false);
2774
2775 out_session:
2776         ceph_put_mds_session(session);
2777 finish:
2778         if (err) {
2779                 dout("__do_request early error %d\n", err);
2780                 req->r_err = err;
2781                 complete_request(mdsc, req);
2782                 __unregister_request(mdsc, req);
2783         }
2784         return;
2785 }
2786
2787 /*
2788  * called under mdsc->mutex
2789  */
2790 static void __wake_requests(struct ceph_mds_client *mdsc,
2791                             struct list_head *head)
2792 {
2793         struct ceph_mds_request *req;
2794         LIST_HEAD(tmp_list);
2795
2796         list_splice_init(head, &tmp_list);
2797
2798         while (!list_empty(&tmp_list)) {
2799                 req = list_entry(tmp_list.next,
2800                                  struct ceph_mds_request, r_wait);
2801                 list_del_init(&req->r_wait);
2802                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2803                 __do_request(mdsc, req);
2804         }
2805 }
2806
2807 /*
2808  * Wake up threads with requests pending for @mds, so that they can
2809  * resubmit their requests to a possibly different mds.
2810  */
2811 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2812 {
2813         struct ceph_mds_request *req;
2814         struct rb_node *p = rb_first(&mdsc->request_tree);
2815
2816         dout("kick_requests mds%d\n", mds);
2817         while (p) {
2818                 req = rb_entry(p, struct ceph_mds_request, r_node);
2819                 p = rb_next(p);
2820                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2821                         continue;
2822                 if (req->r_attempts > 0)
2823                         continue; /* only new requests */
2824                 if (req->r_session &&
2825                     req->r_session->s_mds == mds) {
2826                         dout(" kicking tid %llu\n", req->r_tid);
2827                         list_del_init(&req->r_wait);
2828                         __do_request(mdsc, req);
2829                 }
2830         }
2831 }
2832
2833 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
2834                               struct ceph_mds_request *req)
2835 {
2836         int err = 0;
2837
2838         /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
2839         if (req->r_inode)
2840                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2841         if (req->r_parent) {
2842                 struct ceph_inode_info *ci = ceph_inode(req->r_parent);
2843                 int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
2844                             CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD;
2845                 spin_lock(&ci->i_ceph_lock);
2846                 ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
2847                 __ceph_touch_fmode(ci, mdsc, fmode);
2848                 spin_unlock(&ci->i_ceph_lock);
2849                 ihold(req->r_parent);
2850         }
2851         if (req->r_old_dentry_dir)
2852                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2853                                   CEPH_CAP_PIN);
2854
2855         if (req->r_inode) {
2856                 err = ceph_wait_on_async_create(req->r_inode);
2857                 if (err) {
2858                         dout("%s: wait for async create returned: %d\n",
2859                              __func__, err);
2860                         return err;
2861                 }
2862         }
2863
2864         if (!err && req->r_old_inode) {
2865                 err = ceph_wait_on_async_create(req->r_old_inode);
2866                 if (err) {
2867                         dout("%s: wait for async create returned: %d\n",
2868                              __func__, err);
2869                         return err;
2870                 }
2871         }
2872
2873         dout("submit_request on %p for inode %p\n", req, dir);
2874         mutex_lock(&mdsc->mutex);
2875         __register_request(mdsc, req, dir);
2876         __do_request(mdsc, req);
2877         err = req->r_err;
2878         mutex_unlock(&mdsc->mutex);
2879         return err;
2880 }
2881
2882 static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
2883                                   struct ceph_mds_request *req)
2884 {
2885         int err;
2886
2887         /* wait */
2888         dout("do_request waiting\n");
2889         if (!req->r_timeout && req->r_wait_for_completion) {
2890                 err = req->r_wait_for_completion(mdsc, req);
2891         } else {
2892                 long timeleft = wait_for_completion_killable_timeout(
2893                                         &req->r_completion,
2894                                         ceph_timeout_jiffies(req->r_timeout));
2895                 if (timeleft > 0)
2896                         err = 0;
2897                 else if (!timeleft)
2898                         err = -ETIMEDOUT;  /* timed out */
2899                 else
2900                         err = timeleft;  /* killed */
2901         }
2902         dout("do_request waited, got %d\n", err);
2903         mutex_lock(&mdsc->mutex);
2904
2905         /* only abort if we didn't race with a real reply */
2906         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2907                 err = le32_to_cpu(req->r_reply_info.head->result);
2908         } else if (err < 0) {
2909                 dout("aborted request %lld with %d\n", req->r_tid, err);
2910
2911                 /*
2912                  * ensure we aren't running concurrently with
2913                  * ceph_fill_trace or ceph_readdir_prepopulate, which
2914                  * rely on locks (dir mutex) held by our caller.
2915                  */
2916                 mutex_lock(&req->r_fill_mutex);
2917                 req->r_err = err;
2918                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
2919                 mutex_unlock(&req->r_fill_mutex);
2920
2921                 if (req->r_parent &&
2922                     (req->r_op & CEPH_MDS_OP_WRITE))
2923                         ceph_invalidate_dir_request(req);
2924         } else {
2925                 err = req->r_err;
2926         }
2927
2928         mutex_unlock(&mdsc->mutex);
2929         return err;
2930 }
2931
2932 /*
2933  * Synchrously perform an mds request.  Take care of all of the
2934  * session setup, forwarding, retry details.
2935  */
2936 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2937                          struct inode *dir,
2938                          struct ceph_mds_request *req)
2939 {
2940         int err;
2941
2942         dout("do_request on %p\n", req);
2943
2944         /* issue */
2945         err = ceph_mdsc_submit_request(mdsc, dir, req);
2946         if (!err)
2947                 err = ceph_mdsc_wait_request(mdsc, req);
2948         dout("do_request %p done, result %d\n", req, err);
2949         return err;
2950 }
2951
2952 /*
2953  * Invalidate dir's completeness, dentry lease state on an aborted MDS
2954  * namespace request.
2955  */
2956 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2957 {
2958         struct inode *dir = req->r_parent;
2959         struct inode *old_dir = req->r_old_dentry_dir;
2960
2961         dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
2962
2963         ceph_dir_clear_complete(dir);
2964         if (old_dir)
2965                 ceph_dir_clear_complete(old_dir);
2966         if (req->r_dentry)
2967                 ceph_invalidate_dentry_lease(req->r_dentry);
2968         if (req->r_old_dentry)
2969                 ceph_invalidate_dentry_lease(req->r_old_dentry);
2970 }
2971
2972 /*
2973  * Handle mds reply.
2974  *
2975  * We take the session mutex and parse and process the reply immediately.
2976  * This preserves the logical ordering of replies, capabilities, etc., sent
2977  * by the MDS as they are applied to our local cache.
2978  */
2979 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2980 {
2981         struct ceph_mds_client *mdsc = session->s_mdsc;
2982         struct ceph_mds_request *req;
2983         struct ceph_mds_reply_head *head = msg->front.iov_base;
2984         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
2985         struct ceph_snap_realm *realm;
2986         u64 tid;
2987         int err, result;
2988         int mds = session->s_mds;
2989
2990         if (msg->front.iov_len < sizeof(*head)) {
2991                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2992                 ceph_msg_dump(msg);
2993                 return;
2994         }
2995
2996         /* get request, session */
2997         tid = le64_to_cpu(msg->hdr.tid);
2998         mutex_lock(&mdsc->mutex);
2999         req = lookup_get_request(mdsc, tid);
3000         if (!req) {
3001                 dout("handle_reply on unknown tid %llu\n", tid);
3002                 mutex_unlock(&mdsc->mutex);
3003                 return;
3004         }
3005         dout("handle_reply %p\n", req);
3006
3007         /* correct session? */
3008         if (req->r_session != session) {
3009                 pr_err("mdsc_handle_reply got %llu on session mds%d"
3010                        " not mds%d\n", tid, session->s_mds,
3011                        req->r_session ? req->r_session->s_mds : -1);
3012                 mutex_unlock(&mdsc->mutex);
3013                 goto out;
3014         }
3015
3016         /* dup? */
3017         if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3018             (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3019                 pr_warn("got a dup %s reply on %llu from mds%d\n",
3020                            head->safe ? "safe" : "unsafe", tid, mds);
3021                 mutex_unlock(&mdsc->mutex);
3022                 goto out;
3023         }
3024         if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3025                 pr_warn("got unsafe after safe on %llu from mds%d\n",
3026                            tid, mds);
3027                 mutex_unlock(&mdsc->mutex);
3028                 goto out;
3029         }
3030
3031         result = le32_to_cpu(head->result);
3032
3033         /*
3034          * Handle an ESTALE
3035          * if we're not talking to the authority, send to them
3036          * if the authority has changed while we weren't looking,
3037          * send to new authority
3038          * Otherwise we just have to return an ESTALE
3039          */
3040         if (result == -ESTALE) {
3041                 dout("got ESTALE on request %llu\n", req->r_tid);
3042                 req->r_resend_mds = -1;
3043                 if (req->r_direct_mode != USE_AUTH_MDS) {
3044                         dout("not using auth, setting for that now\n");
3045                         req->r_direct_mode = USE_AUTH_MDS;
3046                         __do_request(mdsc, req);
3047                         mutex_unlock(&mdsc->mutex);
3048                         goto out;
3049                 } else  {
3050                         int mds = __choose_mds(mdsc, req, NULL);
3051                         if (mds >= 0 && mds != req->r_session->s_mds) {
3052                                 dout("but auth changed, so resending\n");
3053                                 __do_request(mdsc, req);
3054                                 mutex_unlock(&mdsc->mutex);
3055                                 goto out;
3056                         }
3057                 }
3058                 dout("have to return ESTALE on request %llu\n", req->r_tid);
3059         }
3060
3061
3062         if (head->safe) {
3063                 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3064                 __unregister_request(mdsc, req);
3065
3066                 /* last request during umount? */
3067                 if (mdsc->stopping && !__get_oldest_req(mdsc))
3068                         complete_all(&mdsc->safe_umount_waiters);
3069
3070                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3071                         /*
3072                          * We already handled the unsafe response, now do the
3073                          * cleanup.  No need to examine the response; the MDS
3074                          * doesn't include any result info in the safe
3075                          * response.  And even if it did, there is nothing
3076                          * useful we could do with a revised return value.
3077                          */
3078                         dout("got safe reply %llu, mds%d\n", tid, mds);
3079
3080                         mutex_unlock(&mdsc->mutex);
3081                         goto out;
3082                 }
3083         } else {
3084                 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3085                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3086         }
3087
3088         dout("handle_reply tid %lld result %d\n", tid, result);
3089         rinfo = &req->r_reply_info;
3090         if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
3091                 err = parse_reply_info(session, msg, rinfo, (u64)-1);
3092         else
3093                 err = parse_reply_info(session, msg, rinfo, session->s_con.peer_features);
3094         mutex_unlock(&mdsc->mutex);
3095
3096         mutex_lock(&session->s_mutex);
3097         if (err < 0) {
3098                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
3099                 ceph_msg_dump(msg);
3100                 goto out_err;
3101         }
3102
3103         /* snap trace */
3104         realm = NULL;
3105         if (rinfo->snapblob_len) {
3106                 down_write(&mdsc->snap_rwsem);
3107                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
3108                                 rinfo->snapblob + rinfo->snapblob_len,
3109                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
3110                                 &realm);
3111                 downgrade_write(&mdsc->snap_rwsem);
3112         } else {
3113                 down_read(&mdsc->snap_rwsem);
3114         }
3115
3116         /* insert trace into our cache */
3117         mutex_lock(&req->r_fill_mutex);
3118         current->journal_info = req;
3119         err = ceph_fill_trace(mdsc->fsc->sb, req);
3120         if (err == 0) {
3121                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3122                                     req->r_op == CEPH_MDS_OP_LSSNAP))
3123                         ceph_readdir_prepopulate(req, req->r_session);
3124         }
3125         current->journal_info = NULL;
3126         mutex_unlock(&req->r_fill_mutex);
3127
3128         up_read(&mdsc->snap_rwsem);
3129         if (realm)
3130                 ceph_put_snap_realm(mdsc, realm);
3131
3132         if (err == 0) {
3133                 if (req->r_target_inode &&
3134                     test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3135                         struct ceph_inode_info *ci =
3136                                 ceph_inode(req->r_target_inode);
3137                         spin_lock(&ci->i_unsafe_lock);
3138                         list_add_tail(&req->r_unsafe_target_item,
3139                                       &ci->i_unsafe_iops);
3140                         spin_unlock(&ci->i_unsafe_lock);
3141                 }
3142
3143                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3144         }
3145 out_err:
3146         mutex_lock(&mdsc->mutex);
3147         if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3148                 if (err) {
3149                         req->r_err = err;
3150                 } else {
3151                         req->r_reply =  ceph_msg_get(msg);
3152                         set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3153                 }
3154         } else {
3155                 dout("reply arrived after request %lld was aborted\n", tid);
3156         }
3157         mutex_unlock(&mdsc->mutex);
3158
3159         mutex_unlock(&session->s_mutex);
3160
3161         /* kick calling process */
3162         complete_request(mdsc, req);
3163
3164         ceph_update_metadata_latency(&mdsc->metric, req->r_start_latency,
3165                                      req->r_end_latency, err);
3166 out:
3167         ceph_mdsc_put_request(req);
3168         return;
3169 }
3170
3171
3172
3173 /*
3174  * handle mds notification that our request has been forwarded.
3175  */
3176 static void handle_forward(struct ceph_mds_client *mdsc,
3177                            struct ceph_mds_session *session,
3178                            struct ceph_msg *msg)
3179 {
3180         struct ceph_mds_request *req;
3181         u64 tid = le64_to_cpu(msg->hdr.tid);
3182         u32 next_mds;
3183         u32 fwd_seq;
3184         int err = -EINVAL;
3185         void *p = msg->front.iov_base;
3186         void *end = p + msg->front.iov_len;
3187
3188         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3189         next_mds = ceph_decode_32(&p);
3190         fwd_seq = ceph_decode_32(&p);
3191
3192         mutex_lock(&mdsc->mutex);
3193         req = lookup_get_request(mdsc, tid);
3194         if (!req) {
3195                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3196                 goto out;  /* dup reply? */
3197         }
3198
3199         if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3200                 dout("forward tid %llu aborted, unregistering\n", tid);
3201                 __unregister_request(mdsc, req);
3202         } else if (fwd_seq <= req->r_num_fwd) {
3203                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
3204                      tid, next_mds, req->r_num_fwd, fwd_seq);
3205         } else {
3206                 /* resend. forward race not possible; mds would drop */
3207                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
3208                 BUG_ON(req->r_err);
3209                 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3210                 req->r_attempts = 0;
3211                 req->r_num_fwd = fwd_seq;
3212                 req->r_resend_mds = next_mds;
3213                 put_request_session(req);
3214                 __do_request(mdsc, req);
3215         }
3216         ceph_mdsc_put_request(req);
3217 out:
3218         mutex_unlock(&mdsc->mutex);
3219         return;
3220
3221 bad:
3222         pr_err("mdsc_handle_forward decode error err=%d\n", err);
3223 }
3224
3225 static int __decode_session_metadata(void **p, void *end,
3226                                      bool *blacklisted)
3227 {
3228         /* map<string,string> */
3229         u32 n;
3230         bool err_str;
3231         ceph_decode_32_safe(p, end, n, bad);
3232         while (n-- > 0) {
3233                 u32 len;
3234                 ceph_decode_32_safe(p, end, len, bad);
3235                 ceph_decode_need(p, end, len, bad);
3236                 err_str = !strncmp(*p, "error_string", len);
3237                 *p += len;
3238                 ceph_decode_32_safe(p, end, len, bad);
3239                 ceph_decode_need(p, end, len, bad);
3240                 if (err_str && strnstr(*p, "blacklisted", len))
3241                         *blacklisted = true;
3242                 *p += len;
3243         }
3244         return 0;
3245 bad:
3246         return -1;
3247 }
3248
3249 /*
3250  * handle a mds session control message
3251  */
3252 static void handle_session(struct ceph_mds_session *session,
3253                            struct ceph_msg *msg)
3254 {
3255         struct ceph_mds_client *mdsc = session->s_mdsc;
3256         int mds = session->s_mds;
3257         int msg_version = le16_to_cpu(msg->hdr.version);
3258         void *p = msg->front.iov_base;
3259         void *end = p + msg->front.iov_len;
3260         struct ceph_mds_session_head *h;
3261         u32 op;
3262         u64 seq, features = 0;
3263         int wake = 0;
3264         bool blacklisted = false;
3265
3266         /* decode */
3267         ceph_decode_need(&p, end, sizeof(*h), bad);
3268         h = p;
3269         p += sizeof(*h);
3270
3271         op = le32_to_cpu(h->op);
3272         seq = le64_to_cpu(h->seq);
3273
3274         if (msg_version >= 3) {
3275                 u32 len;
3276                 /* version >= 2, metadata */
3277                 if (__decode_session_metadata(&p, end, &blacklisted) < 0)
3278                         goto bad;
3279                 /* version >= 3, feature bits */
3280                 ceph_decode_32_safe(&p, end, len, bad);
3281                 ceph_decode_64_safe(&p, end, features, bad);
3282                 p += len - sizeof(features);
3283         }
3284
3285         mutex_lock(&mdsc->mutex);
3286         if (op == CEPH_SESSION_CLOSE) {
3287                 ceph_get_mds_session(session);
3288                 __unregister_session(mdsc, session);
3289         }
3290         /* FIXME: this ttl calculation is generous */
3291         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
3292         mutex_unlock(&mdsc->mutex);
3293
3294         mutex_lock(&session->s_mutex);
3295
3296         dout("handle_session mds%d %s %p state %s seq %llu\n",
3297              mds, ceph_session_op_name(op), session,
3298              ceph_session_state_name(session->s_state), seq);
3299
3300         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
3301                 session->s_state = CEPH_MDS_SESSION_OPEN;
3302                 pr_info("mds%d came back\n", session->s_mds);
3303         }
3304
3305         switch (op) {
3306         case CEPH_SESSION_OPEN:
3307                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3308                         pr_info("mds%d reconnect success\n", session->s_mds);
3309                 session->s_state = CEPH_MDS_SESSION_OPEN;
3310                 session->s_features = features;
3311                 renewed_caps(mdsc, session, 0);
3312                 wake = 1;
3313                 if (mdsc->stopping)
3314                         __close_session(mdsc, session);
3315                 break;
3316
3317         case CEPH_SESSION_RENEWCAPS:
3318                 if (session->s_renew_seq == seq)
3319                         renewed_caps(mdsc, session, 1);
3320                 break;
3321
3322         case CEPH_SESSION_CLOSE:
3323                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3324                         pr_info("mds%d reconnect denied\n", session->s_mds);
3325                 session->s_state = CEPH_MDS_SESSION_CLOSED;
3326                 cleanup_session_requests(mdsc, session);
3327                 remove_session_caps(session);
3328                 wake = 2; /* for good measure */
3329                 wake_up_all(&mdsc->session_close_wq);
3330                 break;
3331
3332         case CEPH_SESSION_STALE:
3333                 pr_info("mds%d caps went stale, renewing\n",
3334                         session->s_mds);
3335                 spin_lock(&session->s_gen_ttl_lock);
3336                 session->s_cap_gen++;
3337                 session->s_cap_ttl = jiffies - 1;
3338                 spin_unlock(&session->s_gen_ttl_lock);
3339                 send_renew_caps(mdsc, session);
3340                 break;
3341
3342         case CEPH_SESSION_RECALL_STATE:
3343                 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
3344                 break;
3345
3346         case CEPH_SESSION_FLUSHMSG:
3347                 send_flushmsg_ack(mdsc, session, seq);
3348                 break;
3349
3350         case CEPH_SESSION_FORCE_RO:
3351                 dout("force_session_readonly %p\n", session);
3352                 spin_lock(&session->s_cap_lock);
3353                 session->s_readonly = true;
3354                 spin_unlock(&session->s_cap_lock);
3355                 wake_up_session_caps(session, FORCE_RO);
3356                 break;
3357
3358         case CEPH_SESSION_REJECT:
3359                 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
3360                 pr_info("mds%d rejected session\n", session->s_mds);
3361                 session->s_state = CEPH_MDS_SESSION_REJECTED;
3362                 cleanup_session_requests(mdsc, session);
3363                 remove_session_caps(session);
3364                 if (blacklisted)
3365                         mdsc->fsc->blacklisted = true;
3366                 wake = 2; /* for good measure */
3367                 break;
3368
3369         default:
3370                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
3371                 WARN_ON(1);
3372         }
3373
3374         mutex_unlock(&session->s_mutex);
3375         if (wake) {
3376                 mutex_lock(&mdsc->mutex);
3377                 __wake_requests(mdsc, &session->s_waiting);
3378                 if (wake == 2)
3379                         kick_requests(mdsc, mds);
3380                 mutex_unlock(&mdsc->mutex);
3381         }
3382         if (op == CEPH_SESSION_CLOSE)
3383                 ceph_put_mds_session(session);
3384         return;
3385
3386 bad:
3387         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
3388                (int)msg->front.iov_len);
3389         ceph_msg_dump(msg);
3390         return;
3391 }
3392
3393 void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
3394 {
3395         int dcaps;
3396
3397         dcaps = xchg(&req->r_dir_caps, 0);
3398         if (dcaps) {
3399                 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3400                 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
3401         }
3402 }
3403
3404 void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
3405 {
3406         int dcaps;
3407
3408         dcaps = xchg(&req->r_dir_caps, 0);
3409         if (dcaps) {
3410                 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3411                 ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
3412                                                 dcaps);
3413         }
3414 }
3415
3416 /*
3417  * called under session->mutex.
3418  */
3419 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
3420                                    struct ceph_mds_session *session)
3421 {
3422         struct ceph_mds_request *req, *nreq;
3423         struct rb_node *p;
3424
3425         dout("replay_unsafe_requests mds%d\n", session->s_mds);
3426
3427         mutex_lock(&mdsc->mutex);
3428         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
3429                 __send_request(mdsc, session, req, true);
3430
3431         /*
3432          * also re-send old requests when MDS enters reconnect stage. So that MDS
3433          * can process completed request in clientreplay stage.
3434          */
3435         p = rb_first(&mdsc->request_tree);
3436         while (p) {
3437                 req = rb_entry(p, struct ceph_mds_request, r_node);
3438                 p = rb_next(p);
3439                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3440                         continue;
3441                 if (req->r_attempts == 0)
3442                         continue; /* only old requests */
3443                 if (!req->r_session)
3444                         continue;
3445                 if (req->r_session->s_mds != session->s_mds)
3446                         continue;
3447
3448                 ceph_mdsc_release_dir_caps_no_check(req);
3449
3450                 __send_request(mdsc, session, req, true);
3451         }
3452         mutex_unlock(&mdsc->mutex);
3453 }
3454
3455 static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
3456 {
3457         struct ceph_msg *reply;
3458         struct ceph_pagelist *_pagelist;
3459         struct page *page;
3460         __le32 *addr;
3461         int err = -ENOMEM;
3462
3463         if (!recon_state->allow_multi)
3464                 return -ENOSPC;
3465
3466         /* can't handle message that contains both caps and realm */
3467         BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
3468
3469         /* pre-allocate new pagelist */
3470         _pagelist = ceph_pagelist_alloc(GFP_NOFS);
3471         if (!_pagelist)
3472                 return -ENOMEM;
3473
3474         reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3475         if (!reply)
3476                 goto fail_msg;
3477
3478         /* placeholder for nr_caps */
3479         err = ceph_pagelist_encode_32(_pagelist, 0);
3480         if (err < 0)
3481                 goto fail;
3482
3483         if (recon_state->nr_caps) {
3484                 /* currently encoding caps */
3485                 err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
3486                 if (err)
3487                         goto fail;
3488         } else {
3489                 /* placeholder for nr_realms (currently encoding relams) */
3490                 err = ceph_pagelist_encode_32(_pagelist, 0);
3491                 if (err < 0)
3492                         goto fail;
3493         }
3494
3495         err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
3496         if (err)
3497                 goto fail;
3498
3499         page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
3500         addr = kmap_atomic(page);
3501         if (recon_state->nr_caps) {
3502                 /* currently encoding caps */
3503                 *addr = cpu_to_le32(recon_state->nr_caps);
3504         } else {
3505                 /* currently encoding relams */
3506                 *(addr + 1) = cpu_to_le32(recon_state->nr_realms);
3507         }
3508         kunmap_atomic(addr);
3509
3510         reply->hdr.version = cpu_to_le16(5);
3511         reply->hdr.compat_version = cpu_to_le16(4);
3512
3513         reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
3514         ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
3515
3516         ceph_con_send(&recon_state->session->s_con, reply);
3517         ceph_pagelist_release(recon_state->pagelist);
3518
3519         recon_state->pagelist = _pagelist;
3520         recon_state->nr_caps = 0;
3521         recon_state->nr_realms = 0;
3522         recon_state->msg_version = 5;
3523         return 0;
3524 fail:
3525         ceph_msg_put(reply);
3526 fail_msg:
3527         ceph_pagelist_release(_pagelist);
3528         return err;
3529 }
3530
3531 /*
3532  * Encode information about a cap for a reconnect with the MDS.
3533  */
3534 static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
3535                           void *arg)
3536 {
3537         union {
3538                 struct ceph_mds_cap_reconnect v2;
3539                 struct ceph_mds_cap_reconnect_v1 v1;
3540         } rec;
3541         struct ceph_inode_info *ci = cap->ci;
3542         struct ceph_reconnect_state *recon_state = arg;
3543         struct ceph_pagelist *pagelist = recon_state->pagelist;
3544         int err;
3545         u64 snap_follows;
3546
3547         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3548              inode, ceph_vinop(inode), cap, cap->cap_id,
3549              ceph_cap_string(cap->issued));
3550
3551         spin_lock(&ci->i_ceph_lock);
3552         cap->seq = 0;        /* reset cap seq */
3553         cap->issue_seq = 0;  /* and issue_seq */
3554         cap->mseq = 0;       /* and migrate_seq */
3555         cap->cap_gen = cap->session->s_cap_gen;
3556
3557         /* These are lost when the session goes away */
3558         if (S_ISDIR(inode->i_mode)) {
3559                 if (cap->issued & CEPH_CAP_DIR_CREATE) {
3560                         ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
3561                         memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
3562                 }
3563                 cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
3564         }
3565
3566         if (recon_state->msg_version >= 2) {
3567                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
3568                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3569                 rec.v2.issued = cpu_to_le32(cap->issued);
3570                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3571                 rec.v2.pathbase = 0;
3572                 rec.v2.flock_len = (__force __le32)
3573                         ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
3574         } else {
3575                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
3576                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3577                 rec.v1.issued = cpu_to_le32(cap->issued);
3578                 rec.v1.size = cpu_to_le64(inode->i_size);
3579                 ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
3580                 ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
3581                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3582                 rec.v1.pathbase = 0;
3583         }
3584
3585         if (list_empty(&ci->i_cap_snaps)) {
3586                 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
3587         } else {
3588                 struct ceph_cap_snap *capsnap =
3589                         list_first_entry(&ci->i_cap_snaps,
3590                                          struct ceph_cap_snap, ci_item);
3591                 snap_follows = capsnap->follows;
3592         }
3593         spin_unlock(&ci->i_ceph_lock);
3594
3595         if (recon_state->msg_version >= 2) {
3596                 int num_fcntl_locks, num_flock_locks;
3597                 struct ceph_filelock *flocks = NULL;
3598                 size_t struct_len, total_len = sizeof(u64);
3599                 u8 struct_v = 0;
3600
3601 encode_again:
3602                 if (rec.v2.flock_len) {
3603                         ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
3604                 } else {
3605                         num_fcntl_locks = 0;
3606                         num_flock_locks = 0;
3607                 }
3608                 if (num_fcntl_locks + num_flock_locks > 0) {
3609                         flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
3610                                                sizeof(struct ceph_filelock),
3611                                                GFP_NOFS);
3612                         if (!flocks) {
3613                                 err = -ENOMEM;
3614                                 goto out_err;
3615                         }
3616                         err = ceph_encode_locks_to_buffer(inode, flocks,
3617                                                           num_fcntl_locks,
3618                                                           num_flock_locks);
3619                         if (err) {
3620                                 kfree(flocks);
3621                                 flocks = NULL;
3622                                 if (err == -ENOSPC)
3623                                         goto encode_again;
3624                                 goto out_err;
3625                         }
3626                 } else {
3627                         kfree(flocks);
3628                         flocks = NULL;
3629                 }
3630
3631                 if (recon_state->msg_version >= 3) {
3632                         /* version, compat_version and struct_len */
3633                         total_len += 2 * sizeof(u8) + sizeof(u32);
3634                         struct_v = 2;
3635                 }
3636                 /*
3637                  * number of encoded locks is stable, so copy to pagelist
3638                  */
3639                 struct_len = 2 * sizeof(u32) +
3640                             (num_fcntl_locks + num_flock_locks) *
3641                             sizeof(struct ceph_filelock);
3642                 rec.v2.flock_len = cpu_to_le32(struct_len);
3643
3644                 struct_len += sizeof(u32) + sizeof(rec.v2);
3645
3646                 if (struct_v >= 2)
3647                         struct_len += sizeof(u64); /* snap_follows */
3648
3649                 total_len += struct_len;
3650
3651                 if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
3652                         err = send_reconnect_partial(recon_state);
3653                         if (err)
3654                                 goto out_freeflocks;
3655                         pagelist = recon_state->pagelist;
3656                 }
3657
3658                 err = ceph_pagelist_reserve(pagelist, total_len);
3659                 if (err)
3660                         goto out_freeflocks;
3661
3662                 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3663                 if (recon_state->msg_version >= 3) {
3664                         ceph_pagelist_encode_8(pagelist, struct_v);
3665                         ceph_pagelist_encode_8(pagelist, 1);
3666                         ceph_pagelist_encode_32(pagelist, struct_len);
3667                 }
3668                 ceph_pagelist_encode_string(pagelist, NULL, 0);
3669                 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
3670                 ceph_locks_to_pagelist(flocks, pagelist,
3671                                        num_fcntl_locks, num_flock_locks);
3672                 if (struct_v >= 2)
3673                         ceph_pagelist_encode_64(pagelist, snap_follows);
3674 out_freeflocks:
3675                 kfree(flocks);
3676         } else {
3677                 u64 pathbase = 0;
3678                 int pathlen = 0;
3679                 char *path = NULL;
3680                 struct dentry *dentry;
3681
3682                 dentry = d_find_alias(inode);
3683                 if (dentry) {
3684                         path = ceph_mdsc_build_path(dentry,
3685                                                 &pathlen, &pathbase, 0);
3686                         dput(dentry);
3687                         if (IS_ERR(path)) {
3688                                 err = PTR_ERR(path);
3689                                 goto out_err;
3690                         }
3691                         rec.v1.pathbase = cpu_to_le64(pathbase);
3692                 }
3693
3694                 err = ceph_pagelist_reserve(pagelist,
3695                                             sizeof(u64) + sizeof(u32) +
3696                                             pathlen + sizeof(rec.v1));
3697                 if (err) {
3698                         goto out_freepath;
3699                 }
3700
3701                 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3702                 ceph_pagelist_encode_string(pagelist, path, pathlen);
3703                 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
3704 out_freepath:
3705                 ceph_mdsc_free_path(path, pathlen);
3706         }
3707
3708 out_err:
3709         if (err >= 0)
3710                 recon_state->nr_caps++;
3711         return err;
3712 }
3713
3714 static int encode_snap_realms(struct ceph_mds_client *mdsc,
3715                               struct ceph_reconnect_state *recon_state)
3716 {
3717         struct rb_node *p;
3718         struct ceph_pagelist *pagelist = recon_state->pagelist;
3719         int err = 0;
3720
3721         if (recon_state->msg_version >= 4) {
3722                 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
3723                 if (err < 0)
3724                         goto fail;
3725         }
3726
3727         /*
3728          * snaprealms.  we provide mds with the ino, seq (version), and
3729          * parent for all of our realms.  If the mds has any newer info,
3730          * it will tell us.
3731          */
3732         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3733                 struct ceph_snap_realm *realm =
3734                        rb_entry(p, struct ceph_snap_realm, node);
3735                 struct ceph_mds_snaprealm_reconnect sr_rec;
3736
3737                 if (recon_state->msg_version >= 4) {
3738                         size_t need = sizeof(u8) * 2 + sizeof(u32) +
3739                                       sizeof(sr_rec);
3740
3741                         if (pagelist->length + need > RECONNECT_MAX_SIZE) {
3742                                 err = send_reconnect_partial(recon_state);
3743                                 if (err)
3744                                         goto fail;
3745                                 pagelist = recon_state->pagelist;
3746                         }
3747
3748                         err = ceph_pagelist_reserve(pagelist, need);
3749                         if (err)
3750                                 goto fail;
3751
3752                         ceph_pagelist_encode_8(pagelist, 1);
3753                         ceph_pagelist_encode_8(pagelist, 1);
3754                         ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
3755                 }
3756
3757                 dout(" adding snap realm %llx seq %lld parent %llx\n",
3758                      realm->ino, realm->seq, realm->parent_ino);
3759                 sr_rec.ino = cpu_to_le64(realm->ino);
3760                 sr_rec.seq = cpu_to_le64(realm->seq);
3761                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3762
3763                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3764                 if (err)
3765                         goto fail;
3766
3767                 recon_state->nr_realms++;
3768         }
3769 fail:
3770         return err;
3771 }
3772
3773
3774 /*
3775  * If an MDS fails and recovers, clients need to reconnect in order to
3776  * reestablish shared state.  This includes all caps issued through
3777  * this session _and_ the snap_realm hierarchy.  Because it's not
3778  * clear which snap realms the mds cares about, we send everything we
3779  * know about.. that ensures we'll then get any new info the
3780  * recovering MDS might have.
3781  *
3782  * This is a relatively heavyweight operation, but it's rare.
3783  */
3784 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
3785                                struct ceph_mds_session *session)
3786 {
3787         struct ceph_msg *reply;
3788         int mds = session->s_mds;
3789         int err = -ENOMEM;
3790         struct ceph_reconnect_state recon_state = {
3791                 .session = session,
3792         };
3793         LIST_HEAD(dispose);
3794
3795         pr_info("mds%d reconnect start\n", mds);
3796
3797         recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
3798         if (!recon_state.pagelist)
3799                 goto fail_nopagelist;
3800
3801         reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3802         if (!reply)
3803                 goto fail_nomsg;
3804
3805         xa_destroy(&session->s_delegated_inos);
3806
3807         mutex_lock(&session->s_mutex);
3808         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
3809         session->s_seq = 0;
3810
3811         dout("session %p state %s\n", session,
3812              ceph_session_state_name(session->s_state));
3813
3814         spin_lock(&session->s_gen_ttl_lock);
3815         session->s_cap_gen++;
3816         spin_unlock(&session->s_gen_ttl_lock);
3817
3818         spin_lock(&session->s_cap_lock);
3819         /* don't know if session is readonly */
3820         session->s_readonly = 0;
3821         /*
3822          * notify __ceph_remove_cap() that we are composing cap reconnect.
3823          * If a cap get released before being added to the cap reconnect,
3824          * __ceph_remove_cap() should skip queuing cap release.
3825          */
3826         session->s_cap_reconnect = 1;
3827         /* drop old cap expires; we're about to reestablish that state */
3828         detach_cap_releases(session, &dispose);
3829         spin_unlock(&session->s_cap_lock);
3830         dispose_cap_releases(mdsc, &dispose);
3831
3832         /* trim unused caps to reduce MDS's cache rejoin time */
3833         if (mdsc->fsc->sb->s_root)
3834                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
3835
3836         ceph_con_close(&session->s_con);
3837         ceph_con_open(&session->s_con,
3838                       CEPH_ENTITY_TYPE_MDS, mds,
3839                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
3840
3841         /* replay unsafe requests */
3842         replay_unsafe_requests(mdsc, session);
3843
3844         ceph_early_kick_flushing_caps(mdsc, session);
3845
3846         down_read(&mdsc->snap_rwsem);
3847
3848         /* placeholder for nr_caps */
3849         err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
3850         if (err)
3851                 goto fail;
3852
3853         if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
3854                 recon_state.msg_version = 3;
3855                 recon_state.allow_multi = true;
3856         } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
3857                 recon_state.msg_version = 3;
3858         } else {
3859                 recon_state.msg_version = 2;
3860         }
3861         /* trsaverse this session's caps */
3862         err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
3863
3864         spin_lock(&session->s_cap_lock);
3865         session->s_cap_reconnect = 0;
3866         spin_unlock(&session->s_cap_lock);
3867
3868         if (err < 0)
3869                 goto fail;
3870
3871         /* check if all realms can be encoded into current message */
3872         if (mdsc->num_snap_realms) {
3873                 size_t total_len =
3874                         recon_state.pagelist->length +
3875                         mdsc->num_snap_realms *
3876                         sizeof(struct ceph_mds_snaprealm_reconnect);
3877                 if (recon_state.msg_version >= 4) {
3878                         /* number of realms */
3879                         total_len += sizeof(u32);
3880                         /* version, compat_version and struct_len */
3881                         total_len += mdsc->num_snap_realms *
3882                                      (2 * sizeof(u8) + sizeof(u32));
3883                 }
3884                 if (total_len > RECONNECT_MAX_SIZE) {
3885                         if (!recon_state.allow_multi) {
3886                                 err = -ENOSPC;
3887                                 goto fail;
3888                         }
3889                         if (recon_state.nr_caps) {
3890                                 err = send_reconnect_partial(&recon_state);
3891                                 if (err)
3892                                         goto fail;
3893                         }
3894                         recon_state.msg_version = 5;
3895                 }
3896         }
3897
3898         err = encode_snap_realms(mdsc, &recon_state);
3899         if (err < 0)
3900                 goto fail;
3901
3902         if (recon_state.msg_version >= 5) {
3903                 err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
3904                 if (err < 0)
3905                         goto fail;
3906         }
3907
3908         if (recon_state.nr_caps || recon_state.nr_realms) {
3909                 struct page *page =
3910                         list_first_entry(&recon_state.pagelist->head,
3911                                         struct page, lru);
3912                 __le32 *addr = kmap_atomic(page);
3913                 if (recon_state.nr_caps) {
3914                         WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
3915                         *addr = cpu_to_le32(recon_state.nr_caps);
3916                 } else if (recon_state.msg_version >= 4) {
3917                         *(addr + 1) = cpu_to_le32(recon_state.nr_realms);
3918                 }
3919                 kunmap_atomic(addr);
3920         }
3921
3922         reply->hdr.version = cpu_to_le16(recon_state.msg_version);
3923         if (recon_state.msg_version >= 4)
3924                 reply->hdr.compat_version = cpu_to_le16(4);
3925
3926         reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
3927         ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
3928
3929         ceph_con_send(&session->s_con, reply);
3930
3931         mutex_unlock(&session->s_mutex);
3932
3933         mutex_lock(&mdsc->mutex);
3934         __wake_requests(mdsc, &session->s_waiting);
3935         mutex_unlock(&mdsc->mutex);
3936
3937         up_read(&mdsc->snap_rwsem);
3938         ceph_pagelist_release(recon_state.pagelist);
3939         return;
3940
3941 fail:
3942         ceph_msg_put(reply);
3943         up_read(&mdsc->snap_rwsem);
3944         mutex_unlock(&session->s_mutex);
3945 fail_nomsg:
3946         ceph_pagelist_release(recon_state.pagelist);
3947 fail_nopagelist:
3948         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
3949         return;
3950 }
3951
3952
3953 /*
3954  * compare old and new mdsmaps, kicking requests
3955  * and closing out old connections as necessary
3956  *
3957  * called under mdsc->mutex.
3958  */
3959 static void check_new_map(struct ceph_mds_client *mdsc,
3960                           struct ceph_mdsmap *newmap,
3961                           struct ceph_mdsmap *oldmap)
3962 {
3963         int i;
3964         int oldstate, newstate;
3965         struct ceph_mds_session *s;
3966
3967         dout("check_new_map new %u old %u\n",
3968              newmap->m_epoch, oldmap->m_epoch);
3969
3970         for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
3971                 if (!mdsc->sessions[i])
3972                         continue;
3973                 s = mdsc->sessions[i];
3974                 oldstate = ceph_mdsmap_get_state(oldmap, i);
3975                 newstate = ceph_mdsmap_get_state(newmap, i);
3976
3977                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3978                      i, ceph_mds_state_name(oldstate),
3979                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
3980                      ceph_mds_state_name(newstate),
3981                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
3982                      ceph_session_state_name(s->s_state));
3983
3984                 if (i >= newmap->possible_max_rank) {
3985                         /* force close session for stopped mds */
3986                         ceph_get_mds_session(s);
3987                         __unregister_session(mdsc, s);
3988                         __wake_requests(mdsc, &s->s_waiting);
3989                         mutex_unlock(&mdsc->mutex);
3990
3991                         mutex_lock(&s->s_mutex);
3992                         cleanup_session_requests(mdsc, s);
3993                         remove_session_caps(s);
3994                         mutex_unlock(&s->s_mutex);
3995
3996                         ceph_put_mds_session(s);
3997
3998                         mutex_lock(&mdsc->mutex);
3999                         kick_requests(mdsc, i);
4000                         continue;
4001                 }
4002
4003                 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
4004                            ceph_mdsmap_get_addr(newmap, i),
4005                            sizeof(struct ceph_entity_addr))) {
4006                         /* just close it */
4007                         mutex_unlock(&mdsc->mutex);
4008                         mutex_lock(&s->s_mutex);
4009                         mutex_lock(&mdsc->mutex);
4010                         ceph_con_close(&s->s_con);
4011                         mutex_unlock(&s->s_mutex);
4012                         s->s_state = CEPH_MDS_SESSION_RESTARTING;
4013                 } else if (oldstate == newstate) {
4014                         continue;  /* nothing new with this mds */
4015                 }
4016
4017                 /*
4018                  * send reconnect?
4019                  */
4020                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
4021                     newstate >= CEPH_MDS_STATE_RECONNECT) {
4022                         mutex_unlock(&mdsc->mutex);
4023                         send_mds_reconnect(mdsc, s);
4024                         mutex_lock(&mdsc->mutex);
4025                 }
4026
4027                 /*
4028                  * kick request on any mds that has gone active.
4029                  */
4030                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
4031                     newstate >= CEPH_MDS_STATE_ACTIVE) {
4032                         if (oldstate != CEPH_MDS_STATE_CREATING &&
4033                             oldstate != CEPH_MDS_STATE_STARTING)
4034                                 pr_info("mds%d recovery completed\n", s->s_mds);
4035                         kick_requests(mdsc, i);
4036                         mutex_unlock(&mdsc->mutex);
4037                         mutex_lock(&s->s_mutex);
4038                         mutex_lock(&mdsc->mutex);
4039                         ceph_kick_flushing_caps(mdsc, s);
4040                         mutex_unlock(&s->s_mutex);
4041                         wake_up_session_caps(s, RECONNECT);
4042                 }
4043         }
4044
4045         for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4046                 s = mdsc->sessions[i];
4047                 if (!s)
4048                         continue;
4049                 if (!ceph_mdsmap_is_laggy(newmap, i))
4050                         continue;
4051                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4052                     s->s_state == CEPH_MDS_SESSION_HUNG ||
4053                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
4054                         dout(" connecting to export targets of laggy mds%d\n",
4055                              i);
4056                         __open_export_target_sessions(mdsc, s);
4057                 }
4058         }
4059 }
4060
4061
4062
4063 /*
4064  * leases
4065  */
4066
4067 /*
4068  * caller must hold session s_mutex, dentry->d_lock
4069  */
4070 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
4071 {
4072         struct ceph_dentry_info *di = ceph_dentry(dentry);
4073
4074         ceph_put_mds_session(di->lease_session);
4075         di->lease_session = NULL;
4076 }
4077
4078 static void handle_lease(struct ceph_mds_client *mdsc,
4079                          struct ceph_mds_session *session,
4080                          struct ceph_msg *msg)
4081 {
4082         struct super_block *sb = mdsc->fsc->sb;
4083         struct inode *inode;
4084         struct dentry *parent, *dentry;
4085         struct ceph_dentry_info *di;
4086         int mds = session->s_mds;
4087         struct ceph_mds_lease *h = msg->front.iov_base;
4088         u32 seq;
4089         struct ceph_vino vino;
4090         struct qstr dname;
4091         int release = 0;
4092
4093         dout("handle_lease from mds%d\n", mds);
4094
4095         /* decode */
4096         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
4097                 goto bad;
4098         vino.ino = le64_to_cpu(h->ino);
4099         vino.snap = CEPH_NOSNAP;
4100         seq = le32_to_cpu(h->seq);
4101         dname.len = get_unaligned_le32(h + 1);
4102         if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
4103                 goto bad;
4104         dname.name = (void *)(h + 1) + sizeof(u32);
4105
4106         /* lookup inode */
4107         inode = ceph_find_inode(sb, vino);
4108         dout("handle_lease %s, ino %llx %p %.*s\n",
4109              ceph_lease_op_name(h->action), vino.ino, inode,
4110              dname.len, dname.name);
4111
4112         mutex_lock(&session->s_mutex);
4113         session->s_seq++;
4114
4115         if (!inode) {
4116                 dout("handle_lease no inode %llx\n", vino.ino);
4117                 goto release;
4118         }
4119
4120         /* dentry */
4121         parent = d_find_alias(inode);
4122         if (!parent) {
4123                 dout("no parent dentry on inode %p\n", inode);
4124                 WARN_ON(1);
4125                 goto release;  /* hrm... */
4126         }
4127         dname.hash = full_name_hash(parent, dname.name, dname.len);
4128         dentry = d_lookup(parent, &dname);
4129         dput(parent);
4130         if (!dentry)
4131                 goto release;
4132
4133         spin_lock(&dentry->d_lock);
4134         di = ceph_dentry(dentry);
4135         switch (h->action) {
4136         case CEPH_MDS_LEASE_REVOKE:
4137                 if (di->lease_session == session) {
4138                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
4139                                 h->seq = cpu_to_le32(di->lease_seq);
4140                         __ceph_mdsc_drop_dentry_lease(dentry);
4141                 }
4142                 release = 1;
4143                 break;
4144
4145         case CEPH_MDS_LEASE_RENEW:
4146                 if (di->lease_session == session &&
4147                     di->lease_gen == session->s_cap_gen &&
4148                     di->lease_renew_from &&
4149                     di->lease_renew_after == 0) {
4150                         unsigned long duration =
4151                                 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
4152
4153                         di->lease_seq = seq;
4154                         di->time = di->lease_renew_from + duration;
4155                         di->lease_renew_after = di->lease_renew_from +
4156                                 (duration >> 1);
4157                         di->lease_renew_from = 0;
4158                 }
4159                 break;
4160         }
4161         spin_unlock(&dentry->d_lock);
4162         dput(dentry);
4163
4164         if (!release)
4165                 goto out;
4166
4167 release:
4168         /* let's just reuse the same message */
4169         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
4170         ceph_msg_get(msg);
4171         ceph_con_send(&session->s_con, msg);
4172
4173 out:
4174         mutex_unlock(&session->s_mutex);
4175         /* avoid calling iput_final() in mds dispatch threads */
4176         ceph_async_iput(inode);
4177         return;
4178
4179 bad:
4180         pr_err("corrupt lease message\n");
4181         ceph_msg_dump(msg);
4182 }
4183
4184 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
4185                               struct dentry *dentry, char action,
4186                               u32 seq)
4187 {
4188         struct ceph_msg *msg;
4189         struct ceph_mds_lease *lease;
4190         struct inode *dir;
4191         int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
4192
4193         dout("lease_send_msg identry %p %s to mds%d\n",
4194              dentry, ceph_lease_op_name(action), session->s_mds);
4195
4196         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
4197         if (!msg)
4198                 return;
4199         lease = msg->front.iov_base;
4200         lease->action = action;
4201         lease->seq = cpu_to_le32(seq);
4202
4203         spin_lock(&dentry->d_lock);
4204         dir = d_inode(dentry->d_parent);
4205         lease->ino = cpu_to_le64(ceph_ino(dir));
4206         lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
4207
4208         put_unaligned_le32(dentry->d_name.len, lease + 1);
4209         memcpy((void *)(lease + 1) + 4,
4210                dentry->d_name.name, dentry->d_name.len);
4211         spin_unlock(&dentry->d_lock);
4212         /*
4213          * if this is a preemptive lease RELEASE, no need to
4214          * flush request stream, since the actual request will
4215          * soon follow.
4216          */
4217         msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
4218
4219         ceph_con_send(&session->s_con, msg);
4220 }
4221
4222 /*
4223  * lock unlock sessions, to wait ongoing session activities
4224  */
4225 static void lock_unlock_sessions(struct ceph_mds_client *mdsc)
4226 {
4227         int i;
4228
4229         mutex_lock(&mdsc->mutex);
4230         for (i = 0; i < mdsc->max_sessions; i++) {
4231                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4232                 if (!s)
4233                         continue;
4234                 mutex_unlock(&mdsc->mutex);
4235                 mutex_lock(&s->s_mutex);
4236                 mutex_unlock(&s->s_mutex);
4237                 ceph_put_mds_session(s);
4238                 mutex_lock(&mdsc->mutex);
4239         }
4240         mutex_unlock(&mdsc->mutex);
4241 }
4242
4243 static void maybe_recover_session(struct ceph_mds_client *mdsc)
4244 {
4245         struct ceph_fs_client *fsc = mdsc->fsc;
4246
4247         if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
4248                 return;
4249
4250         if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
4251                 return;
4252
4253         if (!READ_ONCE(fsc->blacklisted))
4254                 return;
4255
4256         if (fsc->last_auto_reconnect &&
4257             time_before(jiffies, fsc->last_auto_reconnect + HZ * 60 * 30))
4258                 return;
4259
4260         pr_info("auto reconnect after blacklisted\n");
4261         fsc->last_auto_reconnect = jiffies;
4262         ceph_force_reconnect(fsc->sb);
4263 }
4264
4265 bool check_session_state(struct ceph_mds_session *s)
4266 {
4267         if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
4268                 dout("resending session close request for mds%d\n",
4269                                 s->s_mds);
4270                 request_close_session(s);
4271                 return false;
4272         }
4273         if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
4274                 if (s->s_state == CEPH_MDS_SESSION_OPEN) {
4275                         s->s_state = CEPH_MDS_SESSION_HUNG;
4276                         pr_info("mds%d hung\n", s->s_mds);
4277                 }
4278         }
4279         if (s->s_state == CEPH_MDS_SESSION_NEW ||
4280             s->s_state == CEPH_MDS_SESSION_RESTARTING ||
4281             s->s_state == CEPH_MDS_SESSION_REJECTED)
4282                 /* this mds is failed or recovering, just wait */
4283                 return false;
4284
4285         return true;
4286 }
4287
4288 /*
4289  * delayed work -- periodically trim expired leases, renew caps with mds
4290  */
4291 static void schedule_delayed(struct ceph_mds_client *mdsc)
4292 {
4293         int delay = 5;
4294         unsigned hz = round_jiffies_relative(HZ * delay);
4295         schedule_delayed_work(&mdsc->delayed_work, hz);
4296 }
4297
4298 static void delayed_work(struct work_struct *work)
4299 {
4300         int i;
4301         struct ceph_mds_client *mdsc =
4302                 container_of(work, struct ceph_mds_client, delayed_work.work);
4303         int renew_interval;
4304         int renew_caps;
4305
4306         dout("mdsc delayed_work\n");
4307
4308         mutex_lock(&mdsc->mutex);
4309         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
4310         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
4311                                    mdsc->last_renew_caps);
4312         if (renew_caps)
4313                 mdsc->last_renew_caps = jiffies;
4314
4315         for (i = 0; i < mdsc->max_sessions; i++) {
4316                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4317                 if (!s)
4318                         continue;
4319
4320                 if (!check_session_state(s)) {
4321                         ceph_put_mds_session(s);
4322                         continue;
4323                 }
4324                 mutex_unlock(&mdsc->mutex);
4325
4326                 mutex_lock(&s->s_mutex);
4327                 if (renew_caps)
4328                         send_renew_caps(mdsc, s);
4329                 else
4330                         ceph_con_keepalive(&s->s_con);
4331                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4332                     s->s_state == CEPH_MDS_SESSION_HUNG)
4333                         ceph_send_cap_releases(mdsc, s);
4334                 mutex_unlock(&s->s_mutex);
4335                 ceph_put_mds_session(s);
4336
4337                 mutex_lock(&mdsc->mutex);
4338         }
4339         mutex_unlock(&mdsc->mutex);
4340
4341         ceph_check_delayed_caps(mdsc);
4342
4343         ceph_queue_cap_reclaim_work(mdsc);
4344
4345         ceph_trim_snapid_map(mdsc);
4346
4347         maybe_recover_session(mdsc);
4348
4349         schedule_delayed(mdsc);
4350 }
4351
4352 int ceph_mdsc_init(struct ceph_fs_client *fsc)
4353
4354 {
4355         struct ceph_mds_client *mdsc;
4356         int err;
4357
4358         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
4359         if (!mdsc)
4360                 return -ENOMEM;
4361         mdsc->fsc = fsc;
4362         mutex_init(&mdsc->mutex);
4363         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
4364         if (!mdsc->mdsmap) {
4365                 err = -ENOMEM;
4366                 goto err_mdsc;
4367         }
4368
4369         fsc->mdsc = mdsc;
4370         init_completion(&mdsc->safe_umount_waiters);
4371         init_waitqueue_head(&mdsc->session_close_wq);
4372         INIT_LIST_HEAD(&mdsc->waiting_for_map);
4373         mdsc->sessions = NULL;
4374         atomic_set(&mdsc->num_sessions, 0);
4375         mdsc->max_sessions = 0;
4376         mdsc->stopping = 0;
4377         atomic64_set(&mdsc->quotarealms_count, 0);
4378         mdsc->quotarealms_inodes = RB_ROOT;
4379         mutex_init(&mdsc->quotarealms_inodes_mutex);
4380         mdsc->last_snap_seq = 0;
4381         init_rwsem(&mdsc->snap_rwsem);
4382         mdsc->snap_realms = RB_ROOT;
4383         INIT_LIST_HEAD(&mdsc->snap_empty);
4384         mdsc->num_snap_realms = 0;
4385         spin_lock_init(&mdsc->snap_empty_lock);
4386         mdsc->last_tid = 0;
4387         mdsc->oldest_tid = 0;
4388         mdsc->request_tree = RB_ROOT;
4389         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
4390         mdsc->last_renew_caps = jiffies;
4391         INIT_LIST_HEAD(&mdsc->cap_delay_list);
4392         INIT_LIST_HEAD(&mdsc->cap_wait_list);
4393         spin_lock_init(&mdsc->cap_delay_lock);
4394         INIT_LIST_HEAD(&mdsc->snap_flush_list);
4395         spin_lock_init(&mdsc->snap_flush_lock);
4396         mdsc->last_cap_flush_tid = 1;
4397         INIT_LIST_HEAD(&mdsc->cap_flush_list);
4398         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
4399         mdsc->num_cap_flushing = 0;
4400         spin_lock_init(&mdsc->cap_dirty_lock);
4401         init_waitqueue_head(&mdsc->cap_flushing_wq);
4402         INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
4403         atomic_set(&mdsc->cap_reclaim_pending, 0);
4404         err = ceph_metric_init(&mdsc->metric);
4405         if (err)
4406                 goto err_mdsmap;
4407
4408         spin_lock_init(&mdsc->dentry_list_lock);
4409         INIT_LIST_HEAD(&mdsc->dentry_leases);
4410         INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
4411
4412         ceph_caps_init(mdsc);
4413         ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
4414
4415         spin_lock_init(&mdsc->snapid_map_lock);
4416         mdsc->snapid_map_tree = RB_ROOT;
4417         INIT_LIST_HEAD(&mdsc->snapid_map_lru);
4418
4419         init_rwsem(&mdsc->pool_perm_rwsem);
4420         mdsc->pool_perm_tree = RB_ROOT;
4421
4422         strscpy(mdsc->nodename, utsname()->nodename,
4423                 sizeof(mdsc->nodename));
4424         return 0;
4425
4426 err_mdsmap:
4427         kfree(mdsc->mdsmap);
4428 err_mdsc:
4429         kfree(mdsc);
4430         return err;
4431 }
4432
4433 /*
4434  * Wait for safe replies on open mds requests.  If we time out, drop
4435  * all requests from the tree to avoid dangling dentry refs.
4436  */
4437 static void wait_requests(struct ceph_mds_client *mdsc)
4438 {
4439         struct ceph_options *opts = mdsc->fsc->client->options;
4440         struct ceph_mds_request *req;
4441
4442         mutex_lock(&mdsc->mutex);
4443         if (__get_oldest_req(mdsc)) {
4444                 mutex_unlock(&mdsc->mutex);
4445
4446                 dout("wait_requests waiting for requests\n");
4447                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
4448                                     ceph_timeout_jiffies(opts->mount_timeout));
4449
4450                 /* tear down remaining requests */
4451                 mutex_lock(&mdsc->mutex);
4452                 while ((req = __get_oldest_req(mdsc))) {
4453                         dout("wait_requests timed out on tid %llu\n",
4454                              req->r_tid);
4455                         list_del_init(&req->r_wait);
4456                         __unregister_request(mdsc, req);
4457                 }
4458         }
4459         mutex_unlock(&mdsc->mutex);
4460         dout("wait_requests done\n");
4461 }
4462
4463 /*
4464  * called before mount is ro, and before dentries are torn down.
4465  * (hmm, does this still race with new lookups?)
4466  */
4467 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
4468 {
4469         dout("pre_umount\n");
4470         mdsc->stopping = 1;
4471
4472         lock_unlock_sessions(mdsc);
4473         ceph_flush_dirty_caps(mdsc);
4474         wait_requests(mdsc);
4475
4476         /*
4477          * wait for reply handlers to drop their request refs and
4478          * their inode/dcache refs
4479          */
4480         ceph_msgr_flush();
4481
4482         ceph_cleanup_quotarealms_inodes(mdsc);
4483 }
4484
4485 /*
4486  * wait for all write mds requests to flush.
4487  */
4488 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
4489 {
4490         struct ceph_mds_request *req = NULL, *nextreq;
4491         struct rb_node *n;
4492
4493         mutex_lock(&mdsc->mutex);
4494         dout("wait_unsafe_requests want %lld\n", want_tid);
4495 restart:
4496         req = __get_oldest_req(mdsc);
4497         while (req && req->r_tid <= want_tid) {
4498                 /* find next request */
4499                 n = rb_next(&req->r_node);
4500                 if (n)
4501                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
4502                 else
4503                         nextreq = NULL;
4504                 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
4505                     (req->r_op & CEPH_MDS_OP_WRITE)) {
4506                         /* write op */
4507                         ceph_mdsc_get_request(req);
4508                         if (nextreq)
4509                                 ceph_mdsc_get_request(nextreq);
4510                         mutex_unlock(&mdsc->mutex);
4511                         dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
4512                              req->r_tid, want_tid);
4513                         wait_for_completion(&req->r_safe_completion);
4514                         mutex_lock(&mdsc->mutex);
4515                         ceph_mdsc_put_request(req);
4516                         if (!nextreq)
4517                                 break;  /* next dne before, so we're done! */
4518                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
4519                                 /* next request was removed from tree */
4520                                 ceph_mdsc_put_request(nextreq);
4521                                 goto restart;
4522                         }
4523                         ceph_mdsc_put_request(nextreq);  /* won't go away */
4524                 }
4525                 req = nextreq;
4526         }
4527         mutex_unlock(&mdsc->mutex);
4528         dout("wait_unsafe_requests done\n");
4529 }
4530
4531 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
4532 {
4533         u64 want_tid, want_flush;
4534
4535         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
4536                 return;
4537
4538         dout("sync\n");
4539         mutex_lock(&mdsc->mutex);
4540         want_tid = mdsc->last_tid;
4541         mutex_unlock(&mdsc->mutex);
4542
4543         ceph_flush_dirty_caps(mdsc);
4544         spin_lock(&mdsc->cap_dirty_lock);
4545         want_flush = mdsc->last_cap_flush_tid;
4546         if (!list_empty(&mdsc->cap_flush_list)) {
4547                 struct ceph_cap_flush *cf =
4548                         list_last_entry(&mdsc->cap_flush_list,
4549                                         struct ceph_cap_flush, g_list);
4550                 cf->wake = true;
4551         }
4552         spin_unlock(&mdsc->cap_dirty_lock);
4553
4554         dout("sync want tid %lld flush_seq %lld\n",
4555              want_tid, want_flush);
4556
4557         wait_unsafe_requests(mdsc, want_tid);
4558         wait_caps_flush(mdsc, want_flush);
4559 }
4560
4561 /*
4562  * true if all sessions are closed, or we force unmount
4563  */
4564 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
4565 {
4566         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
4567                 return true;
4568         return atomic_read(&mdsc->num_sessions) <= skipped;
4569 }
4570
4571 /*
4572  * called after sb is ro.
4573  */
4574 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
4575 {
4576         struct ceph_options *opts = mdsc->fsc->client->options;
4577         struct ceph_mds_session *session;
4578         int i;
4579         int skipped = 0;
4580
4581         dout("close_sessions\n");
4582
4583         /* close sessions */
4584         mutex_lock(&mdsc->mutex);
4585         for (i = 0; i < mdsc->max_sessions; i++) {
4586                 session = __ceph_lookup_mds_session(mdsc, i);
4587                 if (!session)
4588                         continue;
4589                 mutex_unlock(&mdsc->mutex);
4590                 mutex_lock(&session->s_mutex);
4591                 if (__close_session(mdsc, session) <= 0)
4592                         skipped++;
4593                 mutex_unlock(&session->s_mutex);
4594                 ceph_put_mds_session(session);
4595                 mutex_lock(&mdsc->mutex);
4596         }
4597         mutex_unlock(&mdsc->mutex);
4598
4599         dout("waiting for sessions to close\n");
4600         wait_event_timeout(mdsc->session_close_wq,
4601                            done_closing_sessions(mdsc, skipped),
4602                            ceph_timeout_jiffies(opts->mount_timeout));
4603
4604         /* tear down remaining sessions */
4605         mutex_lock(&mdsc->mutex);
4606         for (i = 0; i < mdsc->max_sessions; i++) {
4607                 if (mdsc->sessions[i]) {
4608                         session = ceph_get_mds_session(mdsc->sessions[i]);
4609                         __unregister_session(mdsc, session);
4610                         mutex_unlock(&mdsc->mutex);
4611                         mutex_lock(&session->s_mutex);
4612                         remove_session_caps(session);
4613                         mutex_unlock(&session->s_mutex);
4614                         ceph_put_mds_session(session);
4615                         mutex_lock(&mdsc->mutex);
4616                 }
4617         }
4618         WARN_ON(!list_empty(&mdsc->cap_delay_list));
4619         mutex_unlock(&mdsc->mutex);
4620
4621         ceph_cleanup_snapid_map(mdsc);
4622         ceph_cleanup_empty_realms(mdsc);
4623
4624         cancel_work_sync(&mdsc->cap_reclaim_work);
4625         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
4626
4627         dout("stopped\n");
4628 }
4629
4630 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
4631 {
4632         struct ceph_mds_session *session;
4633         int mds;
4634
4635         dout("force umount\n");
4636
4637         mutex_lock(&mdsc->mutex);
4638         for (mds = 0; mds < mdsc->max_sessions; mds++) {
4639                 session = __ceph_lookup_mds_session(mdsc, mds);
4640                 if (!session)
4641                         continue;
4642
4643                 if (session->s_state == CEPH_MDS_SESSION_REJECTED)
4644                         __unregister_session(mdsc, session);
4645                 __wake_requests(mdsc, &session->s_waiting);
4646                 mutex_unlock(&mdsc->mutex);
4647
4648                 mutex_lock(&session->s_mutex);
4649                 __close_session(mdsc, session);
4650                 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
4651                         cleanup_session_requests(mdsc, session);
4652                         remove_session_caps(session);
4653                 }
4654                 mutex_unlock(&session->s_mutex);
4655                 ceph_put_mds_session(session);
4656
4657                 mutex_lock(&mdsc->mutex);
4658                 kick_requests(mdsc, mds);
4659         }
4660         __wake_requests(mdsc, &mdsc->waiting_for_map);
4661         mutex_unlock(&mdsc->mutex);
4662 }
4663
4664 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
4665 {
4666         dout("stop\n");
4667         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
4668         if (mdsc->mdsmap)
4669                 ceph_mdsmap_destroy(mdsc->mdsmap);
4670         kfree(mdsc->sessions);
4671         ceph_caps_finalize(mdsc);
4672         ceph_pool_perm_destroy(mdsc);
4673 }
4674
4675 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
4676 {
4677         struct ceph_mds_client *mdsc = fsc->mdsc;
4678         dout("mdsc_destroy %p\n", mdsc);
4679
4680         if (!mdsc)
4681                 return;
4682
4683         /* flush out any connection work with references to us */
4684         ceph_msgr_flush();
4685
4686         ceph_mdsc_stop(mdsc);
4687
4688         ceph_metric_destroy(&mdsc->metric);
4689
4690         fsc->mdsc = NULL;
4691         kfree(mdsc);
4692         dout("mdsc_destroy %p done\n", mdsc);
4693 }
4694
4695 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
4696 {
4697         struct ceph_fs_client *fsc = mdsc->fsc;
4698         const char *mds_namespace = fsc->mount_options->mds_namespace;
4699         void *p = msg->front.iov_base;
4700         void *end = p + msg->front.iov_len;
4701         u32 epoch;
4702         u32 map_len;
4703         u32 num_fs;
4704         u32 mount_fscid = (u32)-1;
4705         u8 struct_v, struct_cv;
4706         int err = -EINVAL;
4707
4708         ceph_decode_need(&p, end, sizeof(u32), bad);
4709         epoch = ceph_decode_32(&p);
4710
4711         dout("handle_fsmap epoch %u\n", epoch);
4712
4713         ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
4714         struct_v = ceph_decode_8(&p);
4715         struct_cv = ceph_decode_8(&p);
4716         map_len = ceph_decode_32(&p);
4717
4718         ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
4719         p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
4720
4721         num_fs = ceph_decode_32(&p);
4722         while (num_fs-- > 0) {
4723                 void *info_p, *info_end;
4724                 u32 info_len;
4725                 u8 info_v, info_cv;
4726                 u32 fscid, namelen;
4727
4728                 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
4729                 info_v = ceph_decode_8(&p);
4730                 info_cv = ceph_decode_8(&p);
4731                 info_len = ceph_decode_32(&p);
4732                 ceph_decode_need(&p, end, info_len, bad);
4733                 info_p = p;
4734                 info_end = p + info_len;
4735                 p = info_end;
4736
4737                 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
4738                 fscid = ceph_decode_32(&info_p);
4739                 namelen = ceph_decode_32(&info_p);
4740                 ceph_decode_need(&info_p, info_end, namelen, bad);
4741
4742                 if (mds_namespace &&
4743                     strlen(mds_namespace) == namelen &&
4744                     !strncmp(mds_namespace, (char *)info_p, namelen)) {
4745                         mount_fscid = fscid;
4746                         break;
4747                 }
4748         }
4749
4750         ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
4751         if (mount_fscid != (u32)-1) {
4752                 fsc->client->monc.fs_cluster_id = mount_fscid;
4753                 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
4754                                    0, true);
4755                 ceph_monc_renew_subs(&fsc->client->monc);
4756         } else {
4757                 err = -ENOENT;
4758                 goto err_out;
4759         }
4760         return;
4761
4762 bad:
4763         pr_err("error decoding fsmap\n");
4764 err_out:
4765         mutex_lock(&mdsc->mutex);
4766         mdsc->mdsmap_err = err;
4767         __wake_requests(mdsc, &mdsc->waiting_for_map);
4768         mutex_unlock(&mdsc->mutex);
4769 }
4770
4771 /*
4772  * handle mds map update.
4773  */
4774 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
4775 {
4776         u32 epoch;
4777         u32 maplen;
4778         void *p = msg->front.iov_base;
4779         void *end = p + msg->front.iov_len;
4780         struct ceph_mdsmap *newmap, *oldmap;
4781         struct ceph_fsid fsid;
4782         int err = -EINVAL;
4783
4784         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
4785         ceph_decode_copy(&p, &fsid, sizeof(fsid));
4786         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
4787                 return;
4788         epoch = ceph_decode_32(&p);
4789         maplen = ceph_decode_32(&p);
4790         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
4791
4792         /* do we need it? */
4793         mutex_lock(&mdsc->mutex);
4794         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
4795                 dout("handle_map epoch %u <= our %u\n",
4796                      epoch, mdsc->mdsmap->m_epoch);
4797                 mutex_unlock(&mdsc->mutex);
4798                 return;
4799         }
4800
4801         newmap = ceph_mdsmap_decode(&p, end);
4802         if (IS_ERR(newmap)) {
4803                 err = PTR_ERR(newmap);
4804                 goto bad_unlock;
4805         }
4806
4807         /* swap into place */
4808         if (mdsc->mdsmap) {
4809                 oldmap = mdsc->mdsmap;
4810                 mdsc->mdsmap = newmap;
4811                 check_new_map(mdsc, newmap, oldmap);
4812                 ceph_mdsmap_destroy(oldmap);
4813         } else {
4814                 mdsc->mdsmap = newmap;  /* first mds map */
4815         }
4816         mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
4817                                         MAX_LFS_FILESIZE);
4818
4819         __wake_requests(mdsc, &mdsc->waiting_for_map);
4820         ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
4821                           mdsc->mdsmap->m_epoch);
4822
4823         mutex_unlock(&mdsc->mutex);
4824         schedule_delayed(mdsc);
4825         return;
4826
4827 bad_unlock:
4828         mutex_unlock(&mdsc->mutex);
4829 bad:
4830         pr_err("error decoding mdsmap %d\n", err);
4831         return;
4832 }
4833
4834 static struct ceph_connection *con_get(struct ceph_connection *con)
4835 {
4836         struct ceph_mds_session *s = con->private;
4837
4838         if (ceph_get_mds_session(s))
4839                 return con;
4840         return NULL;
4841 }
4842
4843 static void con_put(struct ceph_connection *con)
4844 {
4845         struct ceph_mds_session *s = con->private;
4846
4847         ceph_put_mds_session(s);
4848 }
4849
4850 /*
4851  * if the client is unresponsive for long enough, the mds will kill
4852  * the session entirely.
4853  */
4854 static void peer_reset(struct ceph_connection *con)
4855 {
4856         struct ceph_mds_session *s = con->private;
4857         struct ceph_mds_client *mdsc = s->s_mdsc;
4858
4859         pr_warn("mds%d closed our session\n", s->s_mds);
4860         send_mds_reconnect(mdsc, s);
4861 }
4862
4863 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
4864 {
4865         struct ceph_mds_session *s = con->private;
4866         struct ceph_mds_client *mdsc = s->s_mdsc;
4867         int type = le16_to_cpu(msg->hdr.type);
4868
4869         mutex_lock(&mdsc->mutex);
4870         if (__verify_registered_session(mdsc, s) < 0) {
4871                 mutex_unlock(&mdsc->mutex);
4872                 goto out;
4873         }
4874         mutex_unlock(&mdsc->mutex);
4875
4876         switch (type) {
4877         case CEPH_MSG_MDS_MAP:
4878                 ceph_mdsc_handle_mdsmap(mdsc, msg);
4879                 break;
4880         case CEPH_MSG_FS_MAP_USER:
4881                 ceph_mdsc_handle_fsmap(mdsc, msg);
4882                 break;
4883         case CEPH_MSG_CLIENT_SESSION:
4884                 handle_session(s, msg);
4885                 break;
4886         case CEPH_MSG_CLIENT_REPLY:
4887                 handle_reply(s, msg);
4888                 break;
4889         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
4890                 handle_forward(mdsc, s, msg);
4891                 break;
4892         case CEPH_MSG_CLIENT_CAPS:
4893                 ceph_handle_caps(s, msg);
4894                 break;
4895         case CEPH_MSG_CLIENT_SNAP:
4896                 ceph_handle_snap(mdsc, s, msg);
4897                 break;
4898         case CEPH_MSG_CLIENT_LEASE:
4899                 handle_lease(mdsc, s, msg);
4900                 break;
4901         case CEPH_MSG_CLIENT_QUOTA:
4902                 ceph_handle_quota(mdsc, s, msg);
4903                 break;
4904
4905         default:
4906                 pr_err("received unknown message type %d %s\n", type,
4907                        ceph_msg_type_name(type));
4908         }
4909 out:
4910         ceph_msg_put(msg);
4911 }
4912
4913 /*
4914  * authentication
4915  */
4916
4917 /*
4918  * Note: returned pointer is the address of a structure that's
4919  * managed separately.  Caller must *not* attempt to free it.
4920  */
4921 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
4922                                         int *proto, int force_new)
4923 {
4924         struct ceph_mds_session *s = con->private;
4925         struct ceph_mds_client *mdsc = s->s_mdsc;
4926         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4927         struct ceph_auth_handshake *auth = &s->s_auth;
4928
4929         if (force_new && auth->authorizer) {
4930                 ceph_auth_destroy_authorizer(auth->authorizer);
4931                 auth->authorizer = NULL;
4932         }
4933         if (!auth->authorizer) {
4934                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
4935                                                       auth);
4936                 if (ret)
4937                         return ERR_PTR(ret);
4938         } else {
4939                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
4940                                                       auth);
4941                 if (ret)
4942                         return ERR_PTR(ret);
4943         }
4944         *proto = ac->protocol;
4945
4946         return auth;
4947 }
4948
4949 static int add_authorizer_challenge(struct ceph_connection *con,
4950                                     void *challenge_buf, int challenge_buf_len)
4951 {
4952         struct ceph_mds_session *s = con->private;
4953         struct ceph_mds_client *mdsc = s->s_mdsc;
4954         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4955
4956         return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
4957                                             challenge_buf, challenge_buf_len);
4958 }
4959
4960 static int verify_authorizer_reply(struct ceph_connection *con)
4961 {
4962         struct ceph_mds_session *s = con->private;
4963         struct ceph_mds_client *mdsc = s->s_mdsc;
4964         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4965
4966         return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer);
4967 }
4968
4969 static int invalidate_authorizer(struct ceph_connection *con)
4970 {
4971         struct ceph_mds_session *s = con->private;
4972         struct ceph_mds_client *mdsc = s->s_mdsc;
4973         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4974
4975         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
4976
4977         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
4978 }
4979
4980 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
4981                                 struct ceph_msg_header *hdr, int *skip)
4982 {
4983         struct ceph_msg *msg;
4984         int type = (int) le16_to_cpu(hdr->type);
4985         int front_len = (int) le32_to_cpu(hdr->front_len);
4986
4987         if (con->in_msg)
4988                 return con->in_msg;
4989
4990         *skip = 0;
4991         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
4992         if (!msg) {
4993                 pr_err("unable to allocate msg type %d len %d\n",
4994                        type, front_len);
4995                 return NULL;
4996         }
4997
4998         return msg;
4999 }
5000
5001 static int mds_sign_message(struct ceph_msg *msg)
5002 {
5003        struct ceph_mds_session *s = msg->con->private;
5004        struct ceph_auth_handshake *auth = &s->s_auth;
5005
5006        return ceph_auth_sign_message(auth, msg);
5007 }
5008
5009 static int mds_check_message_signature(struct ceph_msg *msg)
5010 {
5011        struct ceph_mds_session *s = msg->con->private;
5012        struct ceph_auth_handshake *auth = &s->s_auth;
5013
5014        return ceph_auth_check_message_signature(auth, msg);
5015 }
5016
5017 static const struct ceph_connection_operations mds_con_ops = {
5018         .get = con_get,
5019         .put = con_put,
5020         .dispatch = dispatch,
5021         .get_authorizer = get_authorizer,
5022         .add_authorizer_challenge = add_authorizer_challenge,
5023         .verify_authorizer_reply = verify_authorizer_reply,
5024         .invalidate_authorizer = invalidate_authorizer,
5025         .peer_reset = peer_reset,
5026         .alloc_msg = mds_alloc_msg,
5027         .sign_message = mds_sign_message,
5028         .check_message_signature = mds_check_message_signature,
5029 };
5030
5031 /* eof */