ceph: flush cap releases when the session is flushed
[platform/kernel/linux-starfive.git] / fs / ceph / mds_client.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/fs.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
7 #include <linux/gfp.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
12 #include <linux/bits.h>
13 #include <linux/ktime.h>
14 #include <linux/bitmap.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18
19 #include <linux/ceph/ceph_features.h>
20 #include <linux/ceph/messenger.h>
21 #include <linux/ceph/decode.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/auth.h>
24 #include <linux/ceph/debugfs.h>
25
26 #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
27
28 /*
29  * A cluster of MDS (metadata server) daemons is responsible for
30  * managing the file system namespace (the directory hierarchy and
31  * inodes) and for coordinating shared access to storage.  Metadata is
32  * partitioning hierarchically across a number of servers, and that
33  * partition varies over time as the cluster adjusts the distribution
34  * in order to balance load.
35  *
36  * The MDS client is primarily responsible to managing synchronous
37  * metadata requests for operations like open, unlink, and so forth.
38  * If there is a MDS failure, we find out about it when we (possibly
39  * request and) receive a new MDS map, and can resubmit affected
40  * requests.
41  *
42  * For the most part, though, we take advantage of a lossless
43  * communications channel to the MDS, and do not need to worry about
44  * timing out or resubmitting requests.
45  *
46  * We maintain a stateful "session" with each MDS we interact with.
47  * Within each session, we sent periodic heartbeat messages to ensure
48  * any capabilities or leases we have been issues remain valid.  If
49  * the session times out and goes stale, our leases and capabilities
50  * are no longer valid.
51  */
52
53 struct ceph_reconnect_state {
54         struct ceph_mds_session *session;
55         int nr_caps, nr_realms;
56         struct ceph_pagelist *pagelist;
57         unsigned msg_version;
58         bool allow_multi;
59 };
60
61 static void __wake_requests(struct ceph_mds_client *mdsc,
62                             struct list_head *head);
63 static void ceph_cap_release_work(struct work_struct *work);
64 static void ceph_cap_reclaim_work(struct work_struct *work);
65
66 static const struct ceph_connection_operations mds_con_ops;
67
68
69 /*
70  * mds reply parsing
71  */
72
73 static int parse_reply_info_quota(void **p, void *end,
74                                   struct ceph_mds_reply_info_in *info)
75 {
76         u8 struct_v, struct_compat;
77         u32 struct_len;
78
79         ceph_decode_8_safe(p, end, struct_v, bad);
80         ceph_decode_8_safe(p, end, struct_compat, bad);
81         /* struct_v is expected to be >= 1. we only
82          * understand encoding with struct_compat == 1. */
83         if (!struct_v || struct_compat != 1)
84                 goto bad;
85         ceph_decode_32_safe(p, end, struct_len, bad);
86         ceph_decode_need(p, end, struct_len, bad);
87         end = *p + struct_len;
88         ceph_decode_64_safe(p, end, info->max_bytes, bad);
89         ceph_decode_64_safe(p, end, info->max_files, bad);
90         *p = end;
91         return 0;
92 bad:
93         return -EIO;
94 }
95
96 /*
97  * parse individual inode info
98  */
99 static int parse_reply_info_in(void **p, void *end,
100                                struct ceph_mds_reply_info_in *info,
101                                u64 features)
102 {
103         int err = 0;
104         u8 struct_v = 0;
105
106         if (features == (u64)-1) {
107                 u32 struct_len;
108                 u8 struct_compat;
109                 ceph_decode_8_safe(p, end, struct_v, bad);
110                 ceph_decode_8_safe(p, end, struct_compat, bad);
111                 /* struct_v is expected to be >= 1. we only understand
112                  * encoding with struct_compat == 1. */
113                 if (!struct_v || struct_compat != 1)
114                         goto bad;
115                 ceph_decode_32_safe(p, end, struct_len, bad);
116                 ceph_decode_need(p, end, struct_len, bad);
117                 end = *p + struct_len;
118         }
119
120         ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
121         info->in = *p;
122         *p += sizeof(struct ceph_mds_reply_inode) +
123                 sizeof(*info->in->fragtree.splits) *
124                 le32_to_cpu(info->in->fragtree.nsplits);
125
126         ceph_decode_32_safe(p, end, info->symlink_len, bad);
127         ceph_decode_need(p, end, info->symlink_len, bad);
128         info->symlink = *p;
129         *p += info->symlink_len;
130
131         ceph_decode_copy_safe(p, end, &info->dir_layout,
132                               sizeof(info->dir_layout), bad);
133         ceph_decode_32_safe(p, end, info->xattr_len, bad);
134         ceph_decode_need(p, end, info->xattr_len, bad);
135         info->xattr_data = *p;
136         *p += info->xattr_len;
137
138         if (features == (u64)-1) {
139                 /* inline data */
140                 ceph_decode_64_safe(p, end, info->inline_version, bad);
141                 ceph_decode_32_safe(p, end, info->inline_len, bad);
142                 ceph_decode_need(p, end, info->inline_len, bad);
143                 info->inline_data = *p;
144                 *p += info->inline_len;
145                 /* quota */
146                 err = parse_reply_info_quota(p, end, info);
147                 if (err < 0)
148                         goto out_bad;
149                 /* pool namespace */
150                 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
151                 if (info->pool_ns_len > 0) {
152                         ceph_decode_need(p, end, info->pool_ns_len, bad);
153                         info->pool_ns_data = *p;
154                         *p += info->pool_ns_len;
155                 }
156
157                 /* btime */
158                 ceph_decode_need(p, end, sizeof(info->btime), bad);
159                 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
160
161                 /* change attribute */
162                 ceph_decode_64_safe(p, end, info->change_attr, bad);
163
164                 /* dir pin */
165                 if (struct_v >= 2) {
166                         ceph_decode_32_safe(p, end, info->dir_pin, bad);
167                 } else {
168                         info->dir_pin = -ENODATA;
169                 }
170
171                 /* snapshot birth time, remains zero for v<=2 */
172                 if (struct_v >= 3) {
173                         ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
174                         ceph_decode_copy(p, &info->snap_btime,
175                                          sizeof(info->snap_btime));
176                 } else {
177                         memset(&info->snap_btime, 0, sizeof(info->snap_btime));
178                 }
179
180                 /* snapshot count, remains zero for v<=3 */
181                 if (struct_v >= 4) {
182                         ceph_decode_64_safe(p, end, info->rsnaps, bad);
183                 } else {
184                         info->rsnaps = 0;
185                 }
186
187                 *p = end;
188         } else {
189                 if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
190                         ceph_decode_64_safe(p, end, info->inline_version, bad);
191                         ceph_decode_32_safe(p, end, info->inline_len, bad);
192                         ceph_decode_need(p, end, info->inline_len, bad);
193                         info->inline_data = *p;
194                         *p += info->inline_len;
195                 } else
196                         info->inline_version = CEPH_INLINE_NONE;
197
198                 if (features & CEPH_FEATURE_MDS_QUOTA) {
199                         err = parse_reply_info_quota(p, end, info);
200                         if (err < 0)
201                                 goto out_bad;
202                 } else {
203                         info->max_bytes = 0;
204                         info->max_files = 0;
205                 }
206
207                 info->pool_ns_len = 0;
208                 info->pool_ns_data = NULL;
209                 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
210                         ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
211                         if (info->pool_ns_len > 0) {
212                                 ceph_decode_need(p, end, info->pool_ns_len, bad);
213                                 info->pool_ns_data = *p;
214                                 *p += info->pool_ns_len;
215                         }
216                 }
217
218                 if (features & CEPH_FEATURE_FS_BTIME) {
219                         ceph_decode_need(p, end, sizeof(info->btime), bad);
220                         ceph_decode_copy(p, &info->btime, sizeof(info->btime));
221                         ceph_decode_64_safe(p, end, info->change_attr, bad);
222                 }
223
224                 info->dir_pin = -ENODATA;
225                 /* info->snap_btime and info->rsnaps remain zero */
226         }
227         return 0;
228 bad:
229         err = -EIO;
230 out_bad:
231         return err;
232 }
233
234 static int parse_reply_info_dir(void **p, void *end,
235                                 struct ceph_mds_reply_dirfrag **dirfrag,
236                                 u64 features)
237 {
238         if (features == (u64)-1) {
239                 u8 struct_v, struct_compat;
240                 u32 struct_len;
241                 ceph_decode_8_safe(p, end, struct_v, bad);
242                 ceph_decode_8_safe(p, end, struct_compat, bad);
243                 /* struct_v is expected to be >= 1. we only understand
244                  * encoding whose struct_compat == 1. */
245                 if (!struct_v || struct_compat != 1)
246                         goto bad;
247                 ceph_decode_32_safe(p, end, struct_len, bad);
248                 ceph_decode_need(p, end, struct_len, bad);
249                 end = *p + struct_len;
250         }
251
252         ceph_decode_need(p, end, sizeof(**dirfrag), bad);
253         *dirfrag = *p;
254         *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
255         if (unlikely(*p > end))
256                 goto bad;
257         if (features == (u64)-1)
258                 *p = end;
259         return 0;
260 bad:
261         return -EIO;
262 }
263
264 static int parse_reply_info_lease(void **p, void *end,
265                                   struct ceph_mds_reply_lease **lease,
266                                   u64 features)
267 {
268         if (features == (u64)-1) {
269                 u8 struct_v, struct_compat;
270                 u32 struct_len;
271                 ceph_decode_8_safe(p, end, struct_v, bad);
272                 ceph_decode_8_safe(p, end, struct_compat, bad);
273                 /* struct_v is expected to be >= 1. we only understand
274                  * encoding whose struct_compat == 1. */
275                 if (!struct_v || struct_compat != 1)
276                         goto bad;
277                 ceph_decode_32_safe(p, end, struct_len, bad);
278                 ceph_decode_need(p, end, struct_len, bad);
279                 end = *p + struct_len;
280         }
281
282         ceph_decode_need(p, end, sizeof(**lease), bad);
283         *lease = *p;
284         *p += sizeof(**lease);
285         if (features == (u64)-1)
286                 *p = end;
287         return 0;
288 bad:
289         return -EIO;
290 }
291
292 /*
293  * parse a normal reply, which may contain a (dir+)dentry and/or a
294  * target inode.
295  */
296 static int parse_reply_info_trace(void **p, void *end,
297                                   struct ceph_mds_reply_info_parsed *info,
298                                   u64 features)
299 {
300         int err;
301
302         if (info->head->is_dentry) {
303                 err = parse_reply_info_in(p, end, &info->diri, features);
304                 if (err < 0)
305                         goto out_bad;
306
307                 err = parse_reply_info_dir(p, end, &info->dirfrag, features);
308                 if (err < 0)
309                         goto out_bad;
310
311                 ceph_decode_32_safe(p, end, info->dname_len, bad);
312                 ceph_decode_need(p, end, info->dname_len, bad);
313                 info->dname = *p;
314                 *p += info->dname_len;
315
316                 err = parse_reply_info_lease(p, end, &info->dlease, features);
317                 if (err < 0)
318                         goto out_bad;
319         }
320
321         if (info->head->is_target) {
322                 err = parse_reply_info_in(p, end, &info->targeti, features);
323                 if (err < 0)
324                         goto out_bad;
325         }
326
327         if (unlikely(*p != end))
328                 goto bad;
329         return 0;
330
331 bad:
332         err = -EIO;
333 out_bad:
334         pr_err("problem parsing mds trace %d\n", err);
335         return err;
336 }
337
338 /*
339  * parse readdir results
340  */
341 static int parse_reply_info_readdir(void **p, void *end,
342                                 struct ceph_mds_reply_info_parsed *info,
343                                 u64 features)
344 {
345         u32 num, i = 0;
346         int err;
347
348         err = parse_reply_info_dir(p, end, &info->dir_dir, features);
349         if (err < 0)
350                 goto out_bad;
351
352         ceph_decode_need(p, end, sizeof(num) + 2, bad);
353         num = ceph_decode_32(p);
354         {
355                 u16 flags = ceph_decode_16(p);
356                 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
357                 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
358                 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
359                 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
360         }
361         if (num == 0)
362                 goto done;
363
364         BUG_ON(!info->dir_entries);
365         if ((unsigned long)(info->dir_entries + num) >
366             (unsigned long)info->dir_entries + info->dir_buf_size) {
367                 pr_err("dir contents are larger than expected\n");
368                 WARN_ON(1);
369                 goto bad;
370         }
371
372         info->dir_nr = num;
373         while (num) {
374                 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
375                 /* dentry */
376                 ceph_decode_32_safe(p, end, rde->name_len, bad);
377                 ceph_decode_need(p, end, rde->name_len, bad);
378                 rde->name = *p;
379                 *p += rde->name_len;
380                 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
381
382                 /* dentry lease */
383                 err = parse_reply_info_lease(p, end, &rde->lease, features);
384                 if (err)
385                         goto out_bad;
386                 /* inode */
387                 err = parse_reply_info_in(p, end, &rde->inode, features);
388                 if (err < 0)
389                         goto out_bad;
390                 /* ceph_readdir_prepopulate() will update it */
391                 rde->offset = 0;
392                 i++;
393                 num--;
394         }
395
396 done:
397         /* Skip over any unrecognized fields */
398         *p = end;
399         return 0;
400
401 bad:
402         err = -EIO;
403 out_bad:
404         pr_err("problem parsing dir contents %d\n", err);
405         return err;
406 }
407
408 /*
409  * parse fcntl F_GETLK results
410  */
411 static int parse_reply_info_filelock(void **p, void *end,
412                                      struct ceph_mds_reply_info_parsed *info,
413                                      u64 features)
414 {
415         if (*p + sizeof(*info->filelock_reply) > end)
416                 goto bad;
417
418         info->filelock_reply = *p;
419
420         /* Skip over any unrecognized fields */
421         *p = end;
422         return 0;
423 bad:
424         return -EIO;
425 }
426
427
428 #if BITS_PER_LONG == 64
429
430 #define DELEGATED_INO_AVAILABLE         xa_mk_value(1)
431
432 static int ceph_parse_deleg_inos(void **p, void *end,
433                                  struct ceph_mds_session *s)
434 {
435         u32 sets;
436
437         ceph_decode_32_safe(p, end, sets, bad);
438         dout("got %u sets of delegated inodes\n", sets);
439         while (sets--) {
440                 u64 start, len;
441
442                 ceph_decode_64_safe(p, end, start, bad);
443                 ceph_decode_64_safe(p, end, len, bad);
444
445                 /* Don't accept a delegation of system inodes */
446                 if (start < CEPH_INO_SYSTEM_BASE) {
447                         pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
448                                         start, len);
449                         continue;
450                 }
451                 while (len--) {
452                         int err = xa_insert(&s->s_delegated_inos, start++,
453                                             DELEGATED_INO_AVAILABLE,
454                                             GFP_KERNEL);
455                         if (!err) {
456                                 dout("added delegated inode 0x%llx\n",
457                                      start - 1);
458                         } else if (err == -EBUSY) {
459                                 pr_warn("MDS delegated inode 0x%llx more than once.\n",
460                                         start - 1);
461                         } else {
462                                 return err;
463                         }
464                 }
465         }
466         return 0;
467 bad:
468         return -EIO;
469 }
470
471 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
472 {
473         unsigned long ino;
474         void *val;
475
476         xa_for_each(&s->s_delegated_inos, ino, val) {
477                 val = xa_erase(&s->s_delegated_inos, ino);
478                 if (val == DELEGATED_INO_AVAILABLE)
479                         return ino;
480         }
481         return 0;
482 }
483
484 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
485 {
486         return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE,
487                          GFP_KERNEL);
488 }
489 #else /* BITS_PER_LONG == 64 */
490 /*
491  * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
492  * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
493  * and bottom words?
494  */
495 static int ceph_parse_deleg_inos(void **p, void *end,
496                                  struct ceph_mds_session *s)
497 {
498         u32 sets;
499
500         ceph_decode_32_safe(p, end, sets, bad);
501         if (sets)
502                 ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
503         return 0;
504 bad:
505         return -EIO;
506 }
507
508 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
509 {
510         return 0;
511 }
512
513 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
514 {
515         return 0;
516 }
517 #endif /* BITS_PER_LONG == 64 */
518
519 /*
520  * parse create results
521  */
522 static int parse_reply_info_create(void **p, void *end,
523                                   struct ceph_mds_reply_info_parsed *info,
524                                   u64 features, struct ceph_mds_session *s)
525 {
526         int ret;
527
528         if (features == (u64)-1 ||
529             (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
530                 if (*p == end) {
531                         /* Malformed reply? */
532                         info->has_create_ino = false;
533                 } else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) {
534                         info->has_create_ino = true;
535                         /* struct_v, struct_compat, and len */
536                         ceph_decode_skip_n(p, end, 2 + sizeof(u32), bad);
537                         ceph_decode_64_safe(p, end, info->ino, bad);
538                         ret = ceph_parse_deleg_inos(p, end, s);
539                         if (ret)
540                                 return ret;
541                 } else {
542                         /* legacy */
543                         ceph_decode_64_safe(p, end, info->ino, bad);
544                         info->has_create_ino = true;
545                 }
546         } else {
547                 if (*p != end)
548                         goto bad;
549         }
550
551         /* Skip over any unrecognized fields */
552         *p = end;
553         return 0;
554 bad:
555         return -EIO;
556 }
557
558 static int parse_reply_info_getvxattr(void **p, void *end,
559                                       struct ceph_mds_reply_info_parsed *info,
560                                       u64 features)
561 {
562         u32 value_len;
563
564         ceph_decode_skip_8(p, end, bad); /* skip current version: 1 */
565         ceph_decode_skip_8(p, end, bad); /* skip first version: 1 */
566         ceph_decode_skip_32(p, end, bad); /* skip payload length */
567
568         ceph_decode_32_safe(p, end, value_len, bad);
569
570         if (value_len == end - *p) {
571           info->xattr_info.xattr_value = *p;
572           info->xattr_info.xattr_value_len = value_len;
573           *p = end;
574           return value_len;
575         }
576 bad:
577         return -EIO;
578 }
579
580 /*
581  * parse extra results
582  */
583 static int parse_reply_info_extra(void **p, void *end,
584                                   struct ceph_mds_reply_info_parsed *info,
585                                   u64 features, struct ceph_mds_session *s)
586 {
587         u32 op = le32_to_cpu(info->head->op);
588
589         if (op == CEPH_MDS_OP_GETFILELOCK)
590                 return parse_reply_info_filelock(p, end, info, features);
591         else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
592                 return parse_reply_info_readdir(p, end, info, features);
593         else if (op == CEPH_MDS_OP_CREATE)
594                 return parse_reply_info_create(p, end, info, features, s);
595         else if (op == CEPH_MDS_OP_GETVXATTR)
596                 return parse_reply_info_getvxattr(p, end, info, features);
597         else
598                 return -EIO;
599 }
600
601 /*
602  * parse entire mds reply
603  */
604 static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
605                             struct ceph_mds_reply_info_parsed *info,
606                             u64 features)
607 {
608         void *p, *end;
609         u32 len;
610         int err;
611
612         info->head = msg->front.iov_base;
613         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
614         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
615
616         /* trace */
617         ceph_decode_32_safe(&p, end, len, bad);
618         if (len > 0) {
619                 ceph_decode_need(&p, end, len, bad);
620                 err = parse_reply_info_trace(&p, p+len, info, features);
621                 if (err < 0)
622                         goto out_bad;
623         }
624
625         /* extra */
626         ceph_decode_32_safe(&p, end, len, bad);
627         if (len > 0) {
628                 ceph_decode_need(&p, end, len, bad);
629                 err = parse_reply_info_extra(&p, p+len, info, features, s);
630                 if (err < 0)
631                         goto out_bad;
632         }
633
634         /* snap blob */
635         ceph_decode_32_safe(&p, end, len, bad);
636         info->snapblob_len = len;
637         info->snapblob = p;
638         p += len;
639
640         if (p != end)
641                 goto bad;
642         return 0;
643
644 bad:
645         err = -EIO;
646 out_bad:
647         pr_err("mds parse_reply err %d\n", err);
648         return err;
649 }
650
651 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
652 {
653         if (!info->dir_entries)
654                 return;
655         free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
656 }
657
658 /*
659  * In async unlink case the kclient won't wait for the first reply
660  * from MDS and just drop all the links and unhash the dentry and then
661  * succeeds immediately.
662  *
663  * For any new create/link/rename,etc requests followed by using the
664  * same file names we must wait for the first reply of the inflight
665  * unlink request, or the MDS possibly will fail these following
666  * requests with -EEXIST if the inflight async unlink request was
667  * delayed for some reasons.
668  *
669  * And the worst case is that for the none async openc request it will
670  * successfully open the file if the CDentry hasn't been unlinked yet,
671  * but later the previous delayed async unlink request will remove the
672  * CDenty. That means the just created file is possiblly deleted later
673  * by accident.
674  *
675  * We need to wait for the inflight async unlink requests to finish
676  * when creating new files/directories by using the same file names.
677  */
678 int ceph_wait_on_conflict_unlink(struct dentry *dentry)
679 {
680         struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
681         struct dentry *pdentry = dentry->d_parent;
682         struct dentry *udentry, *found = NULL;
683         struct ceph_dentry_info *di;
684         struct qstr dname;
685         u32 hash = dentry->d_name.hash;
686         int err;
687
688         dname.name = dentry->d_name.name;
689         dname.len = dentry->d_name.len;
690
691         rcu_read_lock();
692         hash_for_each_possible_rcu(fsc->async_unlink_conflict, di,
693                                    hnode, hash) {
694                 udentry = di->dentry;
695
696                 spin_lock(&udentry->d_lock);
697                 if (udentry->d_name.hash != hash)
698                         goto next;
699                 if (unlikely(udentry->d_parent != pdentry))
700                         goto next;
701                 if (!hash_hashed(&di->hnode))
702                         goto next;
703
704                 if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
705                         pr_warn("%s dentry %p:%pd async unlink bit is not set\n",
706                                 __func__, dentry, dentry);
707
708                 if (!d_same_name(udentry, pdentry, &dname))
709                         goto next;
710
711                 spin_unlock(&udentry->d_lock);
712                 found = dget(udentry);
713                 break;
714 next:
715                 spin_unlock(&udentry->d_lock);
716         }
717         rcu_read_unlock();
718
719         if (likely(!found))
720                 return 0;
721
722         dout("%s dentry %p:%pd conflict with old %p:%pd\n", __func__,
723              dentry, dentry, found, found);
724
725         err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT,
726                           TASK_KILLABLE);
727         dput(found);
728         return err;
729 }
730
731
732 /*
733  * sessions
734  */
735 const char *ceph_session_state_name(int s)
736 {
737         switch (s) {
738         case CEPH_MDS_SESSION_NEW: return "new";
739         case CEPH_MDS_SESSION_OPENING: return "opening";
740         case CEPH_MDS_SESSION_OPEN: return "open";
741         case CEPH_MDS_SESSION_HUNG: return "hung";
742         case CEPH_MDS_SESSION_CLOSING: return "closing";
743         case CEPH_MDS_SESSION_CLOSED: return "closed";
744         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
745         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
746         case CEPH_MDS_SESSION_REJECTED: return "rejected";
747         default: return "???";
748         }
749 }
750
751 struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
752 {
753         if (refcount_inc_not_zero(&s->s_ref))
754                 return s;
755         return NULL;
756 }
757
758 void ceph_put_mds_session(struct ceph_mds_session *s)
759 {
760         if (IS_ERR_OR_NULL(s))
761                 return;
762
763         if (refcount_dec_and_test(&s->s_ref)) {
764                 if (s->s_auth.authorizer)
765                         ceph_auth_destroy_authorizer(s->s_auth.authorizer);
766                 WARN_ON(mutex_is_locked(&s->s_mutex));
767                 xa_destroy(&s->s_delegated_inos);
768                 kfree(s);
769         }
770 }
771
772 /*
773  * called under mdsc->mutex
774  */
775 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
776                                                    int mds)
777 {
778         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
779                 return NULL;
780         return ceph_get_mds_session(mdsc->sessions[mds]);
781 }
782
783 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
784 {
785         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
786                 return false;
787         else
788                 return true;
789 }
790
791 static int __verify_registered_session(struct ceph_mds_client *mdsc,
792                                        struct ceph_mds_session *s)
793 {
794         if (s->s_mds >= mdsc->max_sessions ||
795             mdsc->sessions[s->s_mds] != s)
796                 return -ENOENT;
797         return 0;
798 }
799
800 /*
801  * create+register a new session for given mds.
802  * called under mdsc->mutex.
803  */
804 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
805                                                  int mds)
806 {
807         struct ceph_mds_session *s;
808
809         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
810                 return ERR_PTR(-EIO);
811
812         if (mds >= mdsc->mdsmap->possible_max_rank)
813                 return ERR_PTR(-EINVAL);
814
815         s = kzalloc(sizeof(*s), GFP_NOFS);
816         if (!s)
817                 return ERR_PTR(-ENOMEM);
818
819         if (mds >= mdsc->max_sessions) {
820                 int newmax = 1 << get_count_order(mds + 1);
821                 struct ceph_mds_session **sa;
822
823                 dout("%s: realloc to %d\n", __func__, newmax);
824                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
825                 if (!sa)
826                         goto fail_realloc;
827                 if (mdsc->sessions) {
828                         memcpy(sa, mdsc->sessions,
829                                mdsc->max_sessions * sizeof(void *));
830                         kfree(mdsc->sessions);
831                 }
832                 mdsc->sessions = sa;
833                 mdsc->max_sessions = newmax;
834         }
835
836         dout("%s: mds%d\n", __func__, mds);
837         s->s_mdsc = mdsc;
838         s->s_mds = mds;
839         s->s_state = CEPH_MDS_SESSION_NEW;
840         mutex_init(&s->s_mutex);
841
842         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
843
844         atomic_set(&s->s_cap_gen, 1);
845         s->s_cap_ttl = jiffies - 1;
846
847         spin_lock_init(&s->s_cap_lock);
848         INIT_LIST_HEAD(&s->s_caps);
849         refcount_set(&s->s_ref, 1);
850         INIT_LIST_HEAD(&s->s_waiting);
851         INIT_LIST_HEAD(&s->s_unsafe);
852         xa_init(&s->s_delegated_inos);
853         INIT_LIST_HEAD(&s->s_cap_releases);
854         INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
855
856         INIT_LIST_HEAD(&s->s_cap_dirty);
857         INIT_LIST_HEAD(&s->s_cap_flushing);
858
859         mdsc->sessions[mds] = s;
860         atomic_inc(&mdsc->num_sessions);
861         refcount_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
862
863         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
864                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
865
866         return s;
867
868 fail_realloc:
869         kfree(s);
870         return ERR_PTR(-ENOMEM);
871 }
872
873 /*
874  * called under mdsc->mutex
875  */
876 static void __unregister_session(struct ceph_mds_client *mdsc,
877                                struct ceph_mds_session *s)
878 {
879         dout("__unregister_session mds%d %p\n", s->s_mds, s);
880         BUG_ON(mdsc->sessions[s->s_mds] != s);
881         mdsc->sessions[s->s_mds] = NULL;
882         ceph_con_close(&s->s_con);
883         ceph_put_mds_session(s);
884         atomic_dec(&mdsc->num_sessions);
885 }
886
887 /*
888  * drop session refs in request.
889  *
890  * should be last request ref, or hold mdsc->mutex
891  */
892 static void put_request_session(struct ceph_mds_request *req)
893 {
894         if (req->r_session) {
895                 ceph_put_mds_session(req->r_session);
896                 req->r_session = NULL;
897         }
898 }
899
900 void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
901                                 void (*cb)(struct ceph_mds_session *),
902                                 bool check_state)
903 {
904         int mds;
905
906         mutex_lock(&mdsc->mutex);
907         for (mds = 0; mds < mdsc->max_sessions; ++mds) {
908                 struct ceph_mds_session *s;
909
910                 s = __ceph_lookup_mds_session(mdsc, mds);
911                 if (!s)
912                         continue;
913
914                 if (check_state && !check_session_state(s)) {
915                         ceph_put_mds_session(s);
916                         continue;
917                 }
918
919                 mutex_unlock(&mdsc->mutex);
920                 cb(s);
921                 ceph_put_mds_session(s);
922                 mutex_lock(&mdsc->mutex);
923         }
924         mutex_unlock(&mdsc->mutex);
925 }
926
927 void ceph_mdsc_release_request(struct kref *kref)
928 {
929         struct ceph_mds_request *req = container_of(kref,
930                                                     struct ceph_mds_request,
931                                                     r_kref);
932         ceph_mdsc_release_dir_caps_no_check(req);
933         destroy_reply_info(&req->r_reply_info);
934         if (req->r_request)
935                 ceph_msg_put(req->r_request);
936         if (req->r_reply)
937                 ceph_msg_put(req->r_reply);
938         if (req->r_inode) {
939                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
940                 iput(req->r_inode);
941         }
942         if (req->r_parent) {
943                 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
944                 iput(req->r_parent);
945         }
946         iput(req->r_target_inode);
947         if (req->r_dentry)
948                 dput(req->r_dentry);
949         if (req->r_old_dentry)
950                 dput(req->r_old_dentry);
951         if (req->r_old_dentry_dir) {
952                 /*
953                  * track (and drop pins for) r_old_dentry_dir
954                  * separately, since r_old_dentry's d_parent may have
955                  * changed between the dir mutex being dropped and
956                  * this request being freed.
957                  */
958                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
959                                   CEPH_CAP_PIN);
960                 iput(req->r_old_dentry_dir);
961         }
962         kfree(req->r_path1);
963         kfree(req->r_path2);
964         put_cred(req->r_cred);
965         if (req->r_pagelist)
966                 ceph_pagelist_release(req->r_pagelist);
967         put_request_session(req);
968         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
969         WARN_ON_ONCE(!list_empty(&req->r_wait));
970         kmem_cache_free(ceph_mds_request_cachep, req);
971 }
972
973 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
974
975 /*
976  * lookup session, bump ref if found.
977  *
978  * called under mdsc->mutex.
979  */
980 static struct ceph_mds_request *
981 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
982 {
983         struct ceph_mds_request *req;
984
985         req = lookup_request(&mdsc->request_tree, tid);
986         if (req)
987                 ceph_mdsc_get_request(req);
988
989         return req;
990 }
991
992 /*
993  * Register an in-flight request, and assign a tid.  Link to directory
994  * are modifying (if any).
995  *
996  * Called under mdsc->mutex.
997  */
998 static void __register_request(struct ceph_mds_client *mdsc,
999                                struct ceph_mds_request *req,
1000                                struct inode *dir)
1001 {
1002         int ret = 0;
1003
1004         req->r_tid = ++mdsc->last_tid;
1005         if (req->r_num_caps) {
1006                 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
1007                                         req->r_num_caps);
1008                 if (ret < 0) {
1009                         pr_err("__register_request %p "
1010                                "failed to reserve caps: %d\n", req, ret);
1011                         /* set req->r_err to fail early from __do_request */
1012                         req->r_err = ret;
1013                         return;
1014                 }
1015         }
1016         dout("__register_request %p tid %lld\n", req, req->r_tid);
1017         ceph_mdsc_get_request(req);
1018         insert_request(&mdsc->request_tree, req);
1019
1020         req->r_cred = get_current_cred();
1021
1022         if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
1023                 mdsc->oldest_tid = req->r_tid;
1024
1025         if (dir) {
1026                 struct ceph_inode_info *ci = ceph_inode(dir);
1027
1028                 ihold(dir);
1029                 req->r_unsafe_dir = dir;
1030                 spin_lock(&ci->i_unsafe_lock);
1031                 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
1032                 spin_unlock(&ci->i_unsafe_lock);
1033         }
1034 }
1035
1036 static void __unregister_request(struct ceph_mds_client *mdsc,
1037                                  struct ceph_mds_request *req)
1038 {
1039         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
1040
1041         /* Never leave an unregistered request on an unsafe list! */
1042         list_del_init(&req->r_unsafe_item);
1043
1044         if (req->r_tid == mdsc->oldest_tid) {
1045                 struct rb_node *p = rb_next(&req->r_node);
1046                 mdsc->oldest_tid = 0;
1047                 while (p) {
1048                         struct ceph_mds_request *next_req =
1049                                 rb_entry(p, struct ceph_mds_request, r_node);
1050                         if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
1051                                 mdsc->oldest_tid = next_req->r_tid;
1052                                 break;
1053                         }
1054                         p = rb_next(p);
1055                 }
1056         }
1057
1058         erase_request(&mdsc->request_tree, req);
1059
1060         if (req->r_unsafe_dir) {
1061                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
1062                 spin_lock(&ci->i_unsafe_lock);
1063                 list_del_init(&req->r_unsafe_dir_item);
1064                 spin_unlock(&ci->i_unsafe_lock);
1065         }
1066         if (req->r_target_inode &&
1067             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
1068                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
1069                 spin_lock(&ci->i_unsafe_lock);
1070                 list_del_init(&req->r_unsafe_target_item);
1071                 spin_unlock(&ci->i_unsafe_lock);
1072         }
1073
1074         if (req->r_unsafe_dir) {
1075                 iput(req->r_unsafe_dir);
1076                 req->r_unsafe_dir = NULL;
1077         }
1078
1079         complete_all(&req->r_safe_completion);
1080
1081         ceph_mdsc_put_request(req);
1082 }
1083
1084 /*
1085  * Walk back up the dentry tree until we hit a dentry representing a
1086  * non-snapshot inode. We do this using the rcu_read_lock (which must be held
1087  * when calling this) to ensure that the objects won't disappear while we're
1088  * working with them. Once we hit a candidate dentry, we attempt to take a
1089  * reference to it, and return that as the result.
1090  */
1091 static struct inode *get_nonsnap_parent(struct dentry *dentry)
1092 {
1093         struct inode *inode = NULL;
1094
1095         while (dentry && !IS_ROOT(dentry)) {
1096                 inode = d_inode_rcu(dentry);
1097                 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
1098                         break;
1099                 dentry = dentry->d_parent;
1100         }
1101         if (inode)
1102                 inode = igrab(inode);
1103         return inode;
1104 }
1105
1106 /*
1107  * Choose mds to send request to next.  If there is a hint set in the
1108  * request (e.g., due to a prior forward hint from the mds), use that.
1109  * Otherwise, consult frag tree and/or caps to identify the
1110  * appropriate mds.  If all else fails, choose randomly.
1111  *
1112  * Called under mdsc->mutex.
1113  */
1114 static int __choose_mds(struct ceph_mds_client *mdsc,
1115                         struct ceph_mds_request *req,
1116                         bool *random)
1117 {
1118         struct inode *inode;
1119         struct ceph_inode_info *ci;
1120         struct ceph_cap *cap;
1121         int mode = req->r_direct_mode;
1122         int mds = -1;
1123         u32 hash = req->r_direct_hash;
1124         bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1125
1126         if (random)
1127                 *random = false;
1128
1129         /*
1130          * is there a specific mds we should try?  ignore hint if we have
1131          * no session and the mds is not up (active or recovering).
1132          */
1133         if (req->r_resend_mds >= 0 &&
1134             (__have_session(mdsc, req->r_resend_mds) ||
1135              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1136                 dout("%s using resend_mds mds%d\n", __func__,
1137                      req->r_resend_mds);
1138                 return req->r_resend_mds;
1139         }
1140
1141         if (mode == USE_RANDOM_MDS)
1142                 goto random;
1143
1144         inode = NULL;
1145         if (req->r_inode) {
1146                 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1147                         inode = req->r_inode;
1148                         ihold(inode);
1149                 } else {
1150                         /* req->r_dentry is non-null for LSSNAP request */
1151                         rcu_read_lock();
1152                         inode = get_nonsnap_parent(req->r_dentry);
1153                         rcu_read_unlock();
1154                         dout("%s using snapdir's parent %p\n", __func__, inode);
1155                 }
1156         } else if (req->r_dentry) {
1157                 /* ignore race with rename; old or new d_parent is okay */
1158                 struct dentry *parent;
1159                 struct inode *dir;
1160
1161                 rcu_read_lock();
1162                 parent = READ_ONCE(req->r_dentry->d_parent);
1163                 dir = req->r_parent ? : d_inode_rcu(parent);
1164
1165                 if (!dir || dir->i_sb != mdsc->fsc->sb) {
1166                         /*  not this fs or parent went negative */
1167                         inode = d_inode(req->r_dentry);
1168                         if (inode)
1169                                 ihold(inode);
1170                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
1171                         /* direct snapped/virtual snapdir requests
1172                          * based on parent dir inode */
1173                         inode = get_nonsnap_parent(parent);
1174                         dout("%s using nonsnap parent %p\n", __func__, inode);
1175                 } else {
1176                         /* dentry target */
1177                         inode = d_inode(req->r_dentry);
1178                         if (!inode || mode == USE_AUTH_MDS) {
1179                                 /* dir + name */
1180                                 inode = igrab(dir);
1181                                 hash = ceph_dentry_hash(dir, req->r_dentry);
1182                                 is_hash = true;
1183                         } else {
1184                                 ihold(inode);
1185                         }
1186                 }
1187                 rcu_read_unlock();
1188         }
1189
1190         dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
1191              hash, mode);
1192         if (!inode)
1193                 goto random;
1194         ci = ceph_inode(inode);
1195
1196         if (is_hash && S_ISDIR(inode->i_mode)) {
1197                 struct ceph_inode_frag frag;
1198                 int found;
1199
1200                 ceph_choose_frag(ci, hash, &frag, &found);
1201                 if (found) {
1202                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
1203                                 u8 r;
1204
1205                                 /* choose a random replica */
1206                                 get_random_bytes(&r, 1);
1207                                 r %= frag.ndist;
1208                                 mds = frag.dist[r];
1209                                 dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
1210                                      __func__, inode, ceph_vinop(inode),
1211                                      frag.frag, mds, (int)r, frag.ndist);
1212                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1213                                     CEPH_MDS_STATE_ACTIVE &&
1214                                     !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
1215                                         goto out;
1216                         }
1217
1218                         /* since this file/dir wasn't known to be
1219                          * replicated, then we want to look for the
1220                          * authoritative mds. */
1221                         if (frag.mds >= 0) {
1222                                 /* choose auth mds */
1223                                 mds = frag.mds;
1224                                 dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
1225                                      __func__, inode, ceph_vinop(inode),
1226                                      frag.frag, mds);
1227                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1228                                     CEPH_MDS_STATE_ACTIVE) {
1229                                         if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
1230                                                                   mds))
1231                                                 goto out;
1232                                 }
1233                         }
1234                         mode = USE_AUTH_MDS;
1235                 }
1236         }
1237
1238         spin_lock(&ci->i_ceph_lock);
1239         cap = NULL;
1240         if (mode == USE_AUTH_MDS)
1241                 cap = ci->i_auth_cap;
1242         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
1243                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
1244         if (!cap) {
1245                 spin_unlock(&ci->i_ceph_lock);
1246                 iput(inode);
1247                 goto random;
1248         }
1249         mds = cap->session->s_mds;
1250         dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
1251              inode, ceph_vinop(inode), mds,
1252              cap == ci->i_auth_cap ? "auth " : "", cap);
1253         spin_unlock(&ci->i_ceph_lock);
1254 out:
1255         iput(inode);
1256         return mds;
1257
1258 random:
1259         if (random)
1260                 *random = true;
1261
1262         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1263         dout("%s chose random mds%d\n", __func__, mds);
1264         return mds;
1265 }
1266
1267
1268 /*
1269  * session messages
1270  */
1271 struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq)
1272 {
1273         struct ceph_msg *msg;
1274         struct ceph_mds_session_head *h;
1275
1276         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
1277                            false);
1278         if (!msg) {
1279                 pr_err("ENOMEM creating session %s msg\n",
1280                        ceph_session_op_name(op));
1281                 return NULL;
1282         }
1283         h = msg->front.iov_base;
1284         h->op = cpu_to_le32(op);
1285         h->seq = cpu_to_le64(seq);
1286
1287         return msg;
1288 }
1289
1290 static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
1291 #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
1292 static int encode_supported_features(void **p, void *end)
1293 {
1294         static const size_t count = ARRAY_SIZE(feature_bits);
1295
1296         if (count > 0) {
1297                 size_t i;
1298                 size_t size = FEATURE_BYTES(count);
1299                 unsigned long bit;
1300
1301                 if (WARN_ON_ONCE(*p + 4 + size > end))
1302                         return -ERANGE;
1303
1304                 ceph_encode_32(p, size);
1305                 memset(*p, 0, size);
1306                 for (i = 0; i < count; i++) {
1307                         bit = feature_bits[i];
1308                         ((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8);
1309                 }
1310                 *p += size;
1311         } else {
1312                 if (WARN_ON_ONCE(*p + 4 > end))
1313                         return -ERANGE;
1314
1315                 ceph_encode_32(p, 0);
1316         }
1317
1318         return 0;
1319 }
1320
1321 static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED;
1322 #define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
1323 static int encode_metric_spec(void **p, void *end)
1324 {
1325         static const size_t count = ARRAY_SIZE(metric_bits);
1326
1327         /* header */
1328         if (WARN_ON_ONCE(*p + 2 > end))
1329                 return -ERANGE;
1330
1331         ceph_encode_8(p, 1); /* version */
1332         ceph_encode_8(p, 1); /* compat */
1333
1334         if (count > 0) {
1335                 size_t i;
1336                 size_t size = METRIC_BYTES(count);
1337
1338                 if (WARN_ON_ONCE(*p + 4 + 4 + size > end))
1339                         return -ERANGE;
1340
1341                 /* metric spec info length */
1342                 ceph_encode_32(p, 4 + size);
1343
1344                 /* metric spec */
1345                 ceph_encode_32(p, size);
1346                 memset(*p, 0, size);
1347                 for (i = 0; i < count; i++)
1348                         ((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8);
1349                 *p += size;
1350         } else {
1351                 if (WARN_ON_ONCE(*p + 4 + 4 > end))
1352                         return -ERANGE;
1353
1354                 /* metric spec info length */
1355                 ceph_encode_32(p, 4);
1356                 /* metric spec */
1357                 ceph_encode_32(p, 0);
1358         }
1359
1360         return 0;
1361 }
1362
1363 /*
1364  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1365  * to include additional client metadata fields.
1366  */
1367 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
1368 {
1369         struct ceph_msg *msg;
1370         struct ceph_mds_session_head *h;
1371         int i;
1372         int extra_bytes = 0;
1373         int metadata_key_count = 0;
1374         struct ceph_options *opt = mdsc->fsc->client->options;
1375         struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1376         size_t size, count;
1377         void *p, *end;
1378         int ret;
1379
1380         const char* metadata[][2] = {
1381                 {"hostname", mdsc->nodename},
1382                 {"kernel_version", init_utsname()->release},
1383                 {"entity_id", opt->name ? : ""},
1384                 {"root", fsopt->server_path ? : "/"},
1385                 {NULL, NULL}
1386         };
1387
1388         /* Calculate serialized length of metadata */
1389         extra_bytes = 4;  /* map length */
1390         for (i = 0; metadata[i][0]; ++i) {
1391                 extra_bytes += 8 + strlen(metadata[i][0]) +
1392                         strlen(metadata[i][1]);
1393                 metadata_key_count++;
1394         }
1395
1396         /* supported feature */
1397         size = 0;
1398         count = ARRAY_SIZE(feature_bits);
1399         if (count > 0)
1400                 size = FEATURE_BYTES(count);
1401         extra_bytes += 4 + size;
1402
1403         /* metric spec */
1404         size = 0;
1405         count = ARRAY_SIZE(metric_bits);
1406         if (count > 0)
1407                 size = METRIC_BYTES(count);
1408         extra_bytes += 2 + 4 + 4 + size;
1409
1410         /* Allocate the message */
1411         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
1412                            GFP_NOFS, false);
1413         if (!msg) {
1414                 pr_err("ENOMEM creating session open msg\n");
1415                 return ERR_PTR(-ENOMEM);
1416         }
1417         p = msg->front.iov_base;
1418         end = p + msg->front.iov_len;
1419
1420         h = p;
1421         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
1422         h->seq = cpu_to_le64(seq);
1423
1424         /*
1425          * Serialize client metadata into waiting buffer space, using
1426          * the format that userspace expects for map<string, string>
1427          *
1428          * ClientSession messages with metadata are v4
1429          */
1430         msg->hdr.version = cpu_to_le16(4);
1431         msg->hdr.compat_version = cpu_to_le16(1);
1432
1433         /* The write pointer, following the session_head structure */
1434         p += sizeof(*h);
1435
1436         /* Number of entries in the map */
1437         ceph_encode_32(&p, metadata_key_count);
1438
1439         /* Two length-prefixed strings for each entry in the map */
1440         for (i = 0; metadata[i][0]; ++i) {
1441                 size_t const key_len = strlen(metadata[i][0]);
1442                 size_t const val_len = strlen(metadata[i][1]);
1443
1444                 ceph_encode_32(&p, key_len);
1445                 memcpy(p, metadata[i][0], key_len);
1446                 p += key_len;
1447                 ceph_encode_32(&p, val_len);
1448                 memcpy(p, metadata[i][1], val_len);
1449                 p += val_len;
1450         }
1451
1452         ret = encode_supported_features(&p, end);
1453         if (ret) {
1454                 pr_err("encode_supported_features failed!\n");
1455                 ceph_msg_put(msg);
1456                 return ERR_PTR(ret);
1457         }
1458
1459         ret = encode_metric_spec(&p, end);
1460         if (ret) {
1461                 pr_err("encode_metric_spec failed!\n");
1462                 ceph_msg_put(msg);
1463                 return ERR_PTR(ret);
1464         }
1465
1466         msg->front.iov_len = p - msg->front.iov_base;
1467         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1468
1469         return msg;
1470 }
1471
1472 /*
1473  * send session open request.
1474  *
1475  * called under mdsc->mutex
1476  */
1477 static int __open_session(struct ceph_mds_client *mdsc,
1478                           struct ceph_mds_session *session)
1479 {
1480         struct ceph_msg *msg;
1481         int mstate;
1482         int mds = session->s_mds;
1483
1484         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
1485                 return -EIO;
1486
1487         /* wait for mds to go active? */
1488         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1489         dout("open_session to mds%d (%s)\n", mds,
1490              ceph_mds_state_name(mstate));
1491         session->s_state = CEPH_MDS_SESSION_OPENING;
1492         session->s_renew_requested = jiffies;
1493
1494         /* send connect message */
1495         msg = create_session_open_msg(mdsc, session->s_seq);
1496         if (IS_ERR(msg))
1497                 return PTR_ERR(msg);
1498         ceph_con_send(&session->s_con, msg);
1499         return 0;
1500 }
1501
1502 /*
1503  * open sessions for any export targets for the given mds
1504  *
1505  * called under mdsc->mutex
1506  */
1507 static struct ceph_mds_session *
1508 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
1509 {
1510         struct ceph_mds_session *session;
1511         int ret;
1512
1513         session = __ceph_lookup_mds_session(mdsc, target);
1514         if (!session) {
1515                 session = register_session(mdsc, target);
1516                 if (IS_ERR(session))
1517                         return session;
1518         }
1519         if (session->s_state == CEPH_MDS_SESSION_NEW ||
1520             session->s_state == CEPH_MDS_SESSION_CLOSING) {
1521                 ret = __open_session(mdsc, session);
1522                 if (ret)
1523                         return ERR_PTR(ret);
1524         }
1525
1526         return session;
1527 }
1528
1529 struct ceph_mds_session *
1530 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1531 {
1532         struct ceph_mds_session *session;
1533
1534         dout("open_export_target_session to mds%d\n", target);
1535
1536         mutex_lock(&mdsc->mutex);
1537         session = __open_export_target_session(mdsc, target);
1538         mutex_unlock(&mdsc->mutex);
1539
1540         return session;
1541 }
1542
1543 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1544                                           struct ceph_mds_session *session)
1545 {
1546         struct ceph_mds_info *mi;
1547         struct ceph_mds_session *ts;
1548         int i, mds = session->s_mds;
1549
1550         if (mds >= mdsc->mdsmap->possible_max_rank)
1551                 return;
1552
1553         mi = &mdsc->mdsmap->m_info[mds];
1554         dout("open_export_target_sessions for mds%d (%d targets)\n",
1555              session->s_mds, mi->num_export_targets);
1556
1557         for (i = 0; i < mi->num_export_targets; i++) {
1558                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1559                 ceph_put_mds_session(ts);
1560         }
1561 }
1562
1563 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1564                                            struct ceph_mds_session *session)
1565 {
1566         mutex_lock(&mdsc->mutex);
1567         __open_export_target_sessions(mdsc, session);
1568         mutex_unlock(&mdsc->mutex);
1569 }
1570
1571 /*
1572  * session caps
1573  */
1574
1575 static void detach_cap_releases(struct ceph_mds_session *session,
1576                                 struct list_head *target)
1577 {
1578         lockdep_assert_held(&session->s_cap_lock);
1579
1580         list_splice_init(&session->s_cap_releases, target);
1581         session->s_num_cap_releases = 0;
1582         dout("dispose_cap_releases mds%d\n", session->s_mds);
1583 }
1584
1585 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1586                                  struct list_head *dispose)
1587 {
1588         while (!list_empty(dispose)) {
1589                 struct ceph_cap *cap;
1590                 /* zero out the in-progress message */
1591                 cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1592                 list_del(&cap->session_caps);
1593                 ceph_put_cap(mdsc, cap);
1594         }
1595 }
1596
1597 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1598                                      struct ceph_mds_session *session)
1599 {
1600         struct ceph_mds_request *req;
1601         struct rb_node *p;
1602
1603         dout("cleanup_session_requests mds%d\n", session->s_mds);
1604         mutex_lock(&mdsc->mutex);
1605         while (!list_empty(&session->s_unsafe)) {
1606                 req = list_first_entry(&session->s_unsafe,
1607                                        struct ceph_mds_request, r_unsafe_item);
1608                 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1609                                     req->r_tid);
1610                 if (req->r_target_inode)
1611                         mapping_set_error(req->r_target_inode->i_mapping, -EIO);
1612                 if (req->r_unsafe_dir)
1613                         mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
1614                 __unregister_request(mdsc, req);
1615         }
1616         /* zero r_attempts, so kick_requests() will re-send requests */
1617         p = rb_first(&mdsc->request_tree);
1618         while (p) {
1619                 req = rb_entry(p, struct ceph_mds_request, r_node);
1620                 p = rb_next(p);
1621                 if (req->r_session &&
1622                     req->r_session->s_mds == session->s_mds)
1623                         req->r_attempts = 0;
1624         }
1625         mutex_unlock(&mdsc->mutex);
1626 }
1627
1628 /*
1629  * Helper to safely iterate over all caps associated with a session, with
1630  * special care taken to handle a racing __ceph_remove_cap().
1631  *
1632  * Caller must hold session s_mutex.
1633  */
1634 int ceph_iterate_session_caps(struct ceph_mds_session *session,
1635                               int (*cb)(struct inode *, struct ceph_cap *,
1636                                         void *), void *arg)
1637 {
1638         struct list_head *p;
1639         struct ceph_cap *cap;
1640         struct inode *inode, *last_inode = NULL;
1641         struct ceph_cap *old_cap = NULL;
1642         int ret;
1643
1644         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1645         spin_lock(&session->s_cap_lock);
1646         p = session->s_caps.next;
1647         while (p != &session->s_caps) {
1648                 cap = list_entry(p, struct ceph_cap, session_caps);
1649                 inode = igrab(&cap->ci->netfs.inode);
1650                 if (!inode) {
1651                         p = p->next;
1652                         continue;
1653                 }
1654                 session->s_cap_iterator = cap;
1655                 spin_unlock(&session->s_cap_lock);
1656
1657                 if (last_inode) {
1658                         iput(last_inode);
1659                         last_inode = NULL;
1660                 }
1661                 if (old_cap) {
1662                         ceph_put_cap(session->s_mdsc, old_cap);
1663                         old_cap = NULL;
1664                 }
1665
1666                 ret = cb(inode, cap, arg);
1667                 last_inode = inode;
1668
1669                 spin_lock(&session->s_cap_lock);
1670                 p = p->next;
1671                 if (!cap->ci) {
1672                         dout("iterate_session_caps  finishing cap %p removal\n",
1673                              cap);
1674                         BUG_ON(cap->session != session);
1675                         cap->session = NULL;
1676                         list_del_init(&cap->session_caps);
1677                         session->s_nr_caps--;
1678                         atomic64_dec(&session->s_mdsc->metric.total_caps);
1679                         if (cap->queue_release)
1680                                 __ceph_queue_cap_release(session, cap);
1681                         else
1682                                 old_cap = cap;  /* put_cap it w/o locks held */
1683                 }
1684                 if (ret < 0)
1685                         goto out;
1686         }
1687         ret = 0;
1688 out:
1689         session->s_cap_iterator = NULL;
1690         spin_unlock(&session->s_cap_lock);
1691
1692         iput(last_inode);
1693         if (old_cap)
1694                 ceph_put_cap(session->s_mdsc, old_cap);
1695
1696         return ret;
1697 }
1698
1699 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1700                                   void *arg)
1701 {
1702         struct ceph_inode_info *ci = ceph_inode(inode);
1703         bool invalidate = false;
1704         int iputs;
1705
1706         dout("removing cap %p, ci is %p, inode is %p\n",
1707              cap, ci, &ci->netfs.inode);
1708         spin_lock(&ci->i_ceph_lock);
1709         iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
1710         spin_unlock(&ci->i_ceph_lock);
1711
1712         wake_up_all(&ci->i_cap_wq);
1713         if (invalidate)
1714                 ceph_queue_invalidate(inode);
1715         while (iputs--)
1716                 iput(inode);
1717         return 0;
1718 }
1719
1720 /*
1721  * caller must hold session s_mutex
1722  */
1723 static void remove_session_caps(struct ceph_mds_session *session)
1724 {
1725         struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1726         struct super_block *sb = fsc->sb;
1727         LIST_HEAD(dispose);
1728
1729         dout("remove_session_caps on %p\n", session);
1730         ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
1731
1732         wake_up_all(&fsc->mdsc->cap_flushing_wq);
1733
1734         spin_lock(&session->s_cap_lock);
1735         if (session->s_nr_caps > 0) {
1736                 struct inode *inode;
1737                 struct ceph_cap *cap, *prev = NULL;
1738                 struct ceph_vino vino;
1739                 /*
1740                  * iterate_session_caps() skips inodes that are being
1741                  * deleted, we need to wait until deletions are complete.
1742                  * __wait_on_freeing_inode() is designed for the job,
1743                  * but it is not exported, so use lookup inode function
1744                  * to access it.
1745                  */
1746                 while (!list_empty(&session->s_caps)) {
1747                         cap = list_entry(session->s_caps.next,
1748                                          struct ceph_cap, session_caps);
1749                         if (cap == prev)
1750                                 break;
1751                         prev = cap;
1752                         vino = cap->ci->i_vino;
1753                         spin_unlock(&session->s_cap_lock);
1754
1755                         inode = ceph_find_inode(sb, vino);
1756                         iput(inode);
1757
1758                         spin_lock(&session->s_cap_lock);
1759                 }
1760         }
1761
1762         // drop cap expires and unlock s_cap_lock
1763         detach_cap_releases(session, &dispose);
1764
1765         BUG_ON(session->s_nr_caps > 0);
1766         BUG_ON(!list_empty(&session->s_cap_flushing));
1767         spin_unlock(&session->s_cap_lock);
1768         dispose_cap_releases(session->s_mdsc, &dispose);
1769 }
1770
1771 enum {
1772         RECONNECT,
1773         RENEWCAPS,
1774         FORCE_RO,
1775 };
1776
1777 /*
1778  * wake up any threads waiting on this session's caps.  if the cap is
1779  * old (didn't get renewed on the client reconnect), remove it now.
1780  *
1781  * caller must hold s_mutex.
1782  */
1783 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1784                               void *arg)
1785 {
1786         struct ceph_inode_info *ci = ceph_inode(inode);
1787         unsigned long ev = (unsigned long)arg;
1788
1789         if (ev == RECONNECT) {
1790                 spin_lock(&ci->i_ceph_lock);
1791                 ci->i_wanted_max_size = 0;
1792                 ci->i_requested_max_size = 0;
1793                 spin_unlock(&ci->i_ceph_lock);
1794         } else if (ev == RENEWCAPS) {
1795                 if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
1796                         /* mds did not re-issue stale cap */
1797                         spin_lock(&ci->i_ceph_lock);
1798                         cap->issued = cap->implemented = CEPH_CAP_PIN;
1799                         spin_unlock(&ci->i_ceph_lock);
1800                 }
1801         } else if (ev == FORCE_RO) {
1802         }
1803         wake_up_all(&ci->i_cap_wq);
1804         return 0;
1805 }
1806
1807 static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
1808 {
1809         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1810         ceph_iterate_session_caps(session, wake_up_session_cb,
1811                                   (void *)(unsigned long)ev);
1812 }
1813
1814 /*
1815  * Send periodic message to MDS renewing all currently held caps.  The
1816  * ack will reset the expiration for all caps from this session.
1817  *
1818  * caller holds s_mutex
1819  */
1820 static int send_renew_caps(struct ceph_mds_client *mdsc,
1821                            struct ceph_mds_session *session)
1822 {
1823         struct ceph_msg *msg;
1824         int state;
1825
1826         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1827             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1828                 pr_info("mds%d caps stale\n", session->s_mds);
1829         session->s_renew_requested = jiffies;
1830
1831         /* do not try to renew caps until a recovering mds has reconnected
1832          * with its clients. */
1833         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1834         if (state < CEPH_MDS_STATE_RECONNECT) {
1835                 dout("send_renew_caps ignoring mds%d (%s)\n",
1836                      session->s_mds, ceph_mds_state_name(state));
1837                 return 0;
1838         }
1839
1840         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1841                 ceph_mds_state_name(state));
1842         msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1843                                       ++session->s_renew_seq);
1844         if (!msg)
1845                 return -ENOMEM;
1846         ceph_con_send(&session->s_con, msg);
1847         return 0;
1848 }
1849
1850 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1851                              struct ceph_mds_session *session, u64 seq)
1852 {
1853         struct ceph_msg *msg;
1854
1855         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1856              session->s_mds, ceph_session_state_name(session->s_state), seq);
1857         msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1858         if (!msg)
1859                 return -ENOMEM;
1860         ceph_con_send(&session->s_con, msg);
1861         return 0;
1862 }
1863
1864
1865 /*
1866  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1867  *
1868  * Called under session->s_mutex
1869  */
1870 static void renewed_caps(struct ceph_mds_client *mdsc,
1871                          struct ceph_mds_session *session, int is_renew)
1872 {
1873         int was_stale;
1874         int wake = 0;
1875
1876         spin_lock(&session->s_cap_lock);
1877         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1878
1879         session->s_cap_ttl = session->s_renew_requested +
1880                 mdsc->mdsmap->m_session_timeout*HZ;
1881
1882         if (was_stale) {
1883                 if (time_before(jiffies, session->s_cap_ttl)) {
1884                         pr_info("mds%d caps renewed\n", session->s_mds);
1885                         wake = 1;
1886                 } else {
1887                         pr_info("mds%d caps still stale\n", session->s_mds);
1888                 }
1889         }
1890         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1891              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1892              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1893         spin_unlock(&session->s_cap_lock);
1894
1895         if (wake)
1896                 wake_up_session_caps(session, RENEWCAPS);
1897 }
1898
1899 /*
1900  * send a session close request
1901  */
1902 static int request_close_session(struct ceph_mds_session *session)
1903 {
1904         struct ceph_msg *msg;
1905
1906         dout("request_close_session mds%d state %s seq %lld\n",
1907              session->s_mds, ceph_session_state_name(session->s_state),
1908              session->s_seq);
1909         msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
1910                                       session->s_seq);
1911         if (!msg)
1912                 return -ENOMEM;
1913         ceph_con_send(&session->s_con, msg);
1914         return 1;
1915 }
1916
1917 /*
1918  * Called with s_mutex held.
1919  */
1920 static int __close_session(struct ceph_mds_client *mdsc,
1921                          struct ceph_mds_session *session)
1922 {
1923         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1924                 return 0;
1925         session->s_state = CEPH_MDS_SESSION_CLOSING;
1926         return request_close_session(session);
1927 }
1928
1929 static bool drop_negative_children(struct dentry *dentry)
1930 {
1931         struct dentry *child;
1932         bool all_negative = true;
1933
1934         if (!d_is_dir(dentry))
1935                 goto out;
1936
1937         spin_lock(&dentry->d_lock);
1938         list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1939                 if (d_really_is_positive(child)) {
1940                         all_negative = false;
1941                         break;
1942                 }
1943         }
1944         spin_unlock(&dentry->d_lock);
1945
1946         if (all_negative)
1947                 shrink_dcache_parent(dentry);
1948 out:
1949         return all_negative;
1950 }
1951
1952 /*
1953  * Trim old(er) caps.
1954  *
1955  * Because we can't cache an inode without one or more caps, we do
1956  * this indirectly: if a cap is unused, we prune its aliases, at which
1957  * point the inode will hopefully get dropped to.
1958  *
1959  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1960  * memory pressure from the MDS, though, so it needn't be perfect.
1961  */
1962 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1963 {
1964         int *remaining = arg;
1965         struct ceph_inode_info *ci = ceph_inode(inode);
1966         int used, wanted, oissued, mine;
1967
1968         if (*remaining <= 0)
1969                 return -1;
1970
1971         spin_lock(&ci->i_ceph_lock);
1972         mine = cap->issued | cap->implemented;
1973         used = __ceph_caps_used(ci);
1974         wanted = __ceph_caps_file_wanted(ci);
1975         oissued = __ceph_caps_issued_other(ci, cap);
1976
1977         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1978              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1979              ceph_cap_string(used), ceph_cap_string(wanted));
1980         if (cap == ci->i_auth_cap) {
1981                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1982                     !list_empty(&ci->i_cap_snaps))
1983                         goto out;
1984                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1985                         goto out;
1986                 /* Note: it's possible that i_filelock_ref becomes non-zero
1987                  * after dropping auth caps. It doesn't hurt because reply
1988                  * of lock mds request will re-add auth caps. */
1989                 if (atomic_read(&ci->i_filelock_ref) > 0)
1990                         goto out;
1991         }
1992         /* The inode has cached pages, but it's no longer used.
1993          * we can safely drop it */
1994         if (S_ISREG(inode->i_mode) &&
1995             wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1996             !(oissued & CEPH_CAP_FILE_CACHE)) {
1997           used = 0;
1998           oissued = 0;
1999         }
2000         if ((used | wanted) & ~oissued & mine)
2001                 goto out;   /* we need these caps */
2002
2003         if (oissued) {
2004                 /* we aren't the only cap.. just remove us */
2005                 ceph_remove_cap(cap, true);
2006                 (*remaining)--;
2007         } else {
2008                 struct dentry *dentry;
2009                 /* try dropping referring dentries */
2010                 spin_unlock(&ci->i_ceph_lock);
2011                 dentry = d_find_any_alias(inode);
2012                 if (dentry && drop_negative_children(dentry)) {
2013                         int count;
2014                         dput(dentry);
2015                         d_prune_aliases(inode);
2016                         count = atomic_read(&inode->i_count);
2017                         if (count == 1)
2018                                 (*remaining)--;
2019                         dout("trim_caps_cb %p cap %p pruned, count now %d\n",
2020                              inode, cap, count);
2021                 } else {
2022                         dput(dentry);
2023                 }
2024                 return 0;
2025         }
2026
2027 out:
2028         spin_unlock(&ci->i_ceph_lock);
2029         return 0;
2030 }
2031
2032 /*
2033  * Trim session cap count down to some max number.
2034  */
2035 int ceph_trim_caps(struct ceph_mds_client *mdsc,
2036                    struct ceph_mds_session *session,
2037                    int max_caps)
2038 {
2039         int trim_caps = session->s_nr_caps - max_caps;
2040
2041         dout("trim_caps mds%d start: %d / %d, trim %d\n",
2042              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
2043         if (trim_caps > 0) {
2044                 int remaining = trim_caps;
2045
2046                 ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
2047                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
2048                      session->s_mds, session->s_nr_caps, max_caps,
2049                         trim_caps - remaining);
2050         }
2051
2052         ceph_flush_cap_releases(mdsc, session);
2053         return 0;
2054 }
2055
2056 static int check_caps_flush(struct ceph_mds_client *mdsc,
2057                             u64 want_flush_tid)
2058 {
2059         int ret = 1;
2060
2061         spin_lock(&mdsc->cap_dirty_lock);
2062         if (!list_empty(&mdsc->cap_flush_list)) {
2063                 struct ceph_cap_flush *cf =
2064                         list_first_entry(&mdsc->cap_flush_list,
2065                                          struct ceph_cap_flush, g_list);
2066                 if (cf->tid <= want_flush_tid) {
2067                         dout("check_caps_flush still flushing tid "
2068                              "%llu <= %llu\n", cf->tid, want_flush_tid);
2069                         ret = 0;
2070                 }
2071         }
2072         spin_unlock(&mdsc->cap_dirty_lock);
2073         return ret;
2074 }
2075
2076 /*
2077  * flush all dirty inode data to disk.
2078  *
2079  * returns true if we've flushed through want_flush_tid
2080  */
2081 static void wait_caps_flush(struct ceph_mds_client *mdsc,
2082                             u64 want_flush_tid)
2083 {
2084         dout("check_caps_flush want %llu\n", want_flush_tid);
2085
2086         wait_event(mdsc->cap_flushing_wq,
2087                    check_caps_flush(mdsc, want_flush_tid));
2088
2089         dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
2090 }
2091
2092 /*
2093  * called under s_mutex
2094  */
2095 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
2096                                    struct ceph_mds_session *session)
2097 {
2098         struct ceph_msg *msg = NULL;
2099         struct ceph_mds_cap_release *head;
2100         struct ceph_mds_cap_item *item;
2101         struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
2102         struct ceph_cap *cap;
2103         LIST_HEAD(tmp_list);
2104         int num_cap_releases;
2105         __le32  barrier, *cap_barrier;
2106
2107         down_read(&osdc->lock);
2108         barrier = cpu_to_le32(osdc->epoch_barrier);
2109         up_read(&osdc->lock);
2110
2111         spin_lock(&session->s_cap_lock);
2112 again:
2113         list_splice_init(&session->s_cap_releases, &tmp_list);
2114         num_cap_releases = session->s_num_cap_releases;
2115         session->s_num_cap_releases = 0;
2116         spin_unlock(&session->s_cap_lock);
2117
2118         while (!list_empty(&tmp_list)) {
2119                 if (!msg) {
2120                         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
2121                                         PAGE_SIZE, GFP_NOFS, false);
2122                         if (!msg)
2123                                 goto out_err;
2124                         head = msg->front.iov_base;
2125                         head->num = cpu_to_le32(0);
2126                         msg->front.iov_len = sizeof(*head);
2127
2128                         msg->hdr.version = cpu_to_le16(2);
2129                         msg->hdr.compat_version = cpu_to_le16(1);
2130                 }
2131
2132                 cap = list_first_entry(&tmp_list, struct ceph_cap,
2133                                         session_caps);
2134                 list_del(&cap->session_caps);
2135                 num_cap_releases--;
2136
2137                 head = msg->front.iov_base;
2138                 put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
2139                                    &head->num);
2140                 item = msg->front.iov_base + msg->front.iov_len;
2141                 item->ino = cpu_to_le64(cap->cap_ino);
2142                 item->cap_id = cpu_to_le64(cap->cap_id);
2143                 item->migrate_seq = cpu_to_le32(cap->mseq);
2144                 item->seq = cpu_to_le32(cap->issue_seq);
2145                 msg->front.iov_len += sizeof(*item);
2146
2147                 ceph_put_cap(mdsc, cap);
2148
2149                 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
2150                         // Append cap_barrier field
2151                         cap_barrier = msg->front.iov_base + msg->front.iov_len;
2152                         *cap_barrier = barrier;
2153                         msg->front.iov_len += sizeof(*cap_barrier);
2154
2155                         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2156                         dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2157                         ceph_con_send(&session->s_con, msg);
2158                         msg = NULL;
2159                 }
2160         }
2161
2162         BUG_ON(num_cap_releases != 0);
2163
2164         spin_lock(&session->s_cap_lock);
2165         if (!list_empty(&session->s_cap_releases))
2166                 goto again;
2167         spin_unlock(&session->s_cap_lock);
2168
2169         if (msg) {
2170                 // Append cap_barrier field
2171                 cap_barrier = msg->front.iov_base + msg->front.iov_len;
2172                 *cap_barrier = barrier;
2173                 msg->front.iov_len += sizeof(*cap_barrier);
2174
2175                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2176                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2177                 ceph_con_send(&session->s_con, msg);
2178         }
2179         return;
2180 out_err:
2181         pr_err("send_cap_releases mds%d, failed to allocate message\n",
2182                 session->s_mds);
2183         spin_lock(&session->s_cap_lock);
2184         list_splice(&tmp_list, &session->s_cap_releases);
2185         session->s_num_cap_releases += num_cap_releases;
2186         spin_unlock(&session->s_cap_lock);
2187 }
2188
2189 static void ceph_cap_release_work(struct work_struct *work)
2190 {
2191         struct ceph_mds_session *session =
2192                 container_of(work, struct ceph_mds_session, s_cap_release_work);
2193
2194         mutex_lock(&session->s_mutex);
2195         if (session->s_state == CEPH_MDS_SESSION_OPEN ||
2196             session->s_state == CEPH_MDS_SESSION_HUNG)
2197                 ceph_send_cap_releases(session->s_mdsc, session);
2198         mutex_unlock(&session->s_mutex);
2199         ceph_put_mds_session(session);
2200 }
2201
2202 void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
2203                              struct ceph_mds_session *session)
2204 {
2205         if (mdsc->stopping)
2206                 return;
2207
2208         ceph_get_mds_session(session);
2209         if (queue_work(mdsc->fsc->cap_wq,
2210                        &session->s_cap_release_work)) {
2211                 dout("cap release work queued\n");
2212         } else {
2213                 ceph_put_mds_session(session);
2214                 dout("failed to queue cap release work\n");
2215         }
2216 }
2217
2218 /*
2219  * caller holds session->s_cap_lock
2220  */
2221 void __ceph_queue_cap_release(struct ceph_mds_session *session,
2222                               struct ceph_cap *cap)
2223 {
2224         list_add_tail(&cap->session_caps, &session->s_cap_releases);
2225         session->s_num_cap_releases++;
2226
2227         if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
2228                 ceph_flush_cap_releases(session->s_mdsc, session);
2229 }
2230
2231 static void ceph_cap_reclaim_work(struct work_struct *work)
2232 {
2233         struct ceph_mds_client *mdsc =
2234                 container_of(work, struct ceph_mds_client, cap_reclaim_work);
2235         int ret = ceph_trim_dentries(mdsc);
2236         if (ret == -EAGAIN)
2237                 ceph_queue_cap_reclaim_work(mdsc);
2238 }
2239
2240 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2241 {
2242         if (mdsc->stopping)
2243                 return;
2244
2245         if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2246                 dout("caps reclaim work queued\n");
2247         } else {
2248                 dout("failed to queue caps release work\n");
2249         }
2250 }
2251
2252 void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2253 {
2254         int val;
2255         if (!nr)
2256                 return;
2257         val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2258         if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
2259                 atomic_set(&mdsc->cap_reclaim_pending, 0);
2260                 ceph_queue_cap_reclaim_work(mdsc);
2261         }
2262 }
2263
2264 /*
2265  * requests
2266  */
2267
2268 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2269                                     struct inode *dir)
2270 {
2271         struct ceph_inode_info *ci = ceph_inode(dir);
2272         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2273         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2274         size_t size = sizeof(struct ceph_mds_reply_dir_entry);
2275         unsigned int num_entries;
2276         int order;
2277
2278         spin_lock(&ci->i_ceph_lock);
2279         num_entries = ci->i_files + ci->i_subdirs;
2280         spin_unlock(&ci->i_ceph_lock);
2281         num_entries = max(num_entries, 1U);
2282         num_entries = min(num_entries, opt->max_readdir);
2283
2284         order = get_order(size * num_entries);
2285         while (order >= 0) {
2286                 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
2287                                                              __GFP_NOWARN |
2288                                                              __GFP_ZERO,
2289                                                              order);
2290                 if (rinfo->dir_entries)
2291                         break;
2292                 order--;
2293         }
2294         if (!rinfo->dir_entries)
2295                 return -ENOMEM;
2296
2297         num_entries = (PAGE_SIZE << order) / size;
2298         num_entries = min(num_entries, opt->max_readdir);
2299
2300         rinfo->dir_buf_size = PAGE_SIZE << order;
2301         req->r_num_caps = num_entries + 1;
2302         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2303         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2304         return 0;
2305 }
2306
2307 /*
2308  * Create an mds request.
2309  */
2310 struct ceph_mds_request *
2311 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2312 {
2313         struct ceph_mds_request *req;
2314
2315         req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2316         if (!req)
2317                 return ERR_PTR(-ENOMEM);
2318
2319         mutex_init(&req->r_fill_mutex);
2320         req->r_mdsc = mdsc;
2321         req->r_started = jiffies;
2322         req->r_start_latency = ktime_get();
2323         req->r_resend_mds = -1;
2324         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2325         INIT_LIST_HEAD(&req->r_unsafe_target_item);
2326         req->r_fmode = -1;
2327         req->r_feature_needed = -1;
2328         kref_init(&req->r_kref);
2329         RB_CLEAR_NODE(&req->r_node);
2330         INIT_LIST_HEAD(&req->r_wait);
2331         init_completion(&req->r_completion);
2332         init_completion(&req->r_safe_completion);
2333         INIT_LIST_HEAD(&req->r_unsafe_item);
2334
2335         ktime_get_coarse_real_ts64(&req->r_stamp);
2336
2337         req->r_op = op;
2338         req->r_direct_mode = mode;
2339         return req;
2340 }
2341
2342 /*
2343  * return oldest (lowest) request, tid in request tree, 0 if none.
2344  *
2345  * called under mdsc->mutex.
2346  */
2347 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2348 {
2349         if (RB_EMPTY_ROOT(&mdsc->request_tree))
2350                 return NULL;
2351         return rb_entry(rb_first(&mdsc->request_tree),
2352                         struct ceph_mds_request, r_node);
2353 }
2354
2355 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2356 {
2357         return mdsc->oldest_tid;
2358 }
2359
2360 /*
2361  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
2362  * on build_path_from_dentry in fs/cifs/dir.c.
2363  *
2364  * If @stop_on_nosnap, generate path relative to the first non-snapped
2365  * inode.
2366  *
2367  * Encode hidden .snap dirs as a double /, i.e.
2368  *   foo/.snap/bar -> foo//bar
2369  */
2370 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
2371                            int stop_on_nosnap)
2372 {
2373         struct dentry *temp;
2374         char *path;
2375         int pos;
2376         unsigned seq;
2377         u64 base;
2378
2379         if (!dentry)
2380                 return ERR_PTR(-EINVAL);
2381
2382         path = __getname();
2383         if (!path)
2384                 return ERR_PTR(-ENOMEM);
2385 retry:
2386         pos = PATH_MAX - 1;
2387         path[pos] = '\0';
2388
2389         seq = read_seqbegin(&rename_lock);
2390         rcu_read_lock();
2391         temp = dentry;
2392         for (;;) {
2393                 struct inode *inode;
2394
2395                 spin_lock(&temp->d_lock);
2396                 inode = d_inode(temp);
2397                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2398                         dout("build_path path+%d: %p SNAPDIR\n",
2399                              pos, temp);
2400                 } else if (stop_on_nosnap && inode && dentry != temp &&
2401                            ceph_snap(inode) == CEPH_NOSNAP) {
2402                         spin_unlock(&temp->d_lock);
2403                         pos++; /* get rid of any prepended '/' */
2404                         break;
2405                 } else {
2406                         pos -= temp->d_name.len;
2407                         if (pos < 0) {
2408                                 spin_unlock(&temp->d_lock);
2409                                 break;
2410                         }
2411                         memcpy(path + pos, temp->d_name.name, temp->d_name.len);
2412                 }
2413                 spin_unlock(&temp->d_lock);
2414                 temp = READ_ONCE(temp->d_parent);
2415
2416                 /* Are we at the root? */
2417                 if (IS_ROOT(temp))
2418                         break;
2419
2420                 /* Are we out of buffer? */
2421                 if (--pos < 0)
2422                         break;
2423
2424                 path[pos] = '/';
2425         }
2426         base = ceph_ino(d_inode(temp));
2427         rcu_read_unlock();
2428
2429         if (read_seqretry(&rename_lock, seq))
2430                 goto retry;
2431
2432         if (pos < 0) {
2433                 /*
2434                  * A rename didn't occur, but somehow we didn't end up where
2435                  * we thought we would. Throw a warning and try again.
2436                  */
2437                 pr_warn("build_path did not end path lookup where "
2438                         "expected, pos is %d\n", pos);
2439                 goto retry;
2440         }
2441
2442         *pbase = base;
2443         *plen = PATH_MAX - 1 - pos;
2444         dout("build_path on %p %d built %llx '%.*s'\n",
2445              dentry, d_count(dentry), base, *plen, path + pos);
2446         return path + pos;
2447 }
2448
2449 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2450                              const char **ppath, int *ppathlen, u64 *pino,
2451                              bool *pfreepath, bool parent_locked)
2452 {
2453         char *path;
2454
2455         rcu_read_lock();
2456         if (!dir)
2457                 dir = d_inode_rcu(dentry->d_parent);
2458         if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
2459                 *pino = ceph_ino(dir);
2460                 rcu_read_unlock();
2461                 *ppath = dentry->d_name.name;
2462                 *ppathlen = dentry->d_name.len;
2463                 return 0;
2464         }
2465         rcu_read_unlock();
2466         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2467         if (IS_ERR(path))
2468                 return PTR_ERR(path);
2469         *ppath = path;
2470         *pfreepath = true;
2471         return 0;
2472 }
2473
2474 static int build_inode_path(struct inode *inode,
2475                             const char **ppath, int *ppathlen, u64 *pino,
2476                             bool *pfreepath)
2477 {
2478         struct dentry *dentry;
2479         char *path;
2480
2481         if (ceph_snap(inode) == CEPH_NOSNAP) {
2482                 *pino = ceph_ino(inode);
2483                 *ppathlen = 0;
2484                 return 0;
2485         }
2486         dentry = d_find_alias(inode);
2487         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2488         dput(dentry);
2489         if (IS_ERR(path))
2490                 return PTR_ERR(path);
2491         *ppath = path;
2492         *pfreepath = true;
2493         return 0;
2494 }
2495
2496 /*
2497  * request arguments may be specified via an inode *, a dentry *, or
2498  * an explicit ino+path.
2499  */
2500 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2501                                   struct inode *rdiri, const char *rpath,
2502                                   u64 rino, const char **ppath, int *pathlen,
2503                                   u64 *ino, bool *freepath, bool parent_locked)
2504 {
2505         int r = 0;
2506
2507         if (rinode) {
2508                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2509                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2510                      ceph_snap(rinode));
2511         } else if (rdentry) {
2512                 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2513                                         freepath, parent_locked);
2514                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2515                      *ppath);
2516         } else if (rpath || rino) {
2517                 *ino = rino;
2518                 *ppath = rpath;
2519                 *pathlen = rpath ? strlen(rpath) : 0;
2520                 dout(" path %.*s\n", *pathlen, rpath);
2521         }
2522
2523         return r;
2524 }
2525
2526 static void encode_timestamp_and_gids(void **p,
2527                                       const struct ceph_mds_request *req)
2528 {
2529         struct ceph_timespec ts;
2530         int i;
2531
2532         ceph_encode_timespec64(&ts, &req->r_stamp);
2533         ceph_encode_copy(p, &ts, sizeof(ts));
2534
2535         /* gid_list */
2536         ceph_encode_32(p, req->r_cred->group_info->ngroups);
2537         for (i = 0; i < req->r_cred->group_info->ngroups; i++)
2538                 ceph_encode_64(p, from_kgid(&init_user_ns,
2539                                             req->r_cred->group_info->gid[i]));
2540 }
2541
2542 /*
2543  * called under mdsc->mutex
2544  */
2545 static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
2546                                                struct ceph_mds_request *req,
2547                                                bool drop_cap_releases)
2548 {
2549         int mds = session->s_mds;
2550         struct ceph_mds_client *mdsc = session->s_mdsc;
2551         struct ceph_msg *msg;
2552         struct ceph_mds_request_head_old *head;
2553         const char *path1 = NULL;
2554         const char *path2 = NULL;
2555         u64 ino1 = 0, ino2 = 0;
2556         int pathlen1 = 0, pathlen2 = 0;
2557         bool freepath1 = false, freepath2 = false;
2558         int len;
2559         u16 releases;
2560         void *p, *end;
2561         int ret;
2562         bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
2563
2564         ret = set_request_path_attr(req->r_inode, req->r_dentry,
2565                               req->r_parent, req->r_path1, req->r_ino1.ino,
2566                               &path1, &pathlen1, &ino1, &freepath1,
2567                               test_bit(CEPH_MDS_R_PARENT_LOCKED,
2568                                         &req->r_req_flags));
2569         if (ret < 0) {
2570                 msg = ERR_PTR(ret);
2571                 goto out;
2572         }
2573
2574         /* If r_old_dentry is set, then assume that its parent is locked */
2575         ret = set_request_path_attr(NULL, req->r_old_dentry,
2576                               req->r_old_dentry_dir,
2577                               req->r_path2, req->r_ino2.ino,
2578                               &path2, &pathlen2, &ino2, &freepath2, true);
2579         if (ret < 0) {
2580                 msg = ERR_PTR(ret);
2581                 goto out_free1;
2582         }
2583
2584         len = legacy ? sizeof(*head) : sizeof(struct ceph_mds_request_head);
2585         len += pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2586                 sizeof(struct ceph_timespec);
2587         len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
2588
2589         /* calculate (max) length for cap releases */
2590         len += sizeof(struct ceph_mds_request_release) *
2591                 (!!req->r_inode_drop + !!req->r_dentry_drop +
2592                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2593
2594         if (req->r_dentry_drop)
2595                 len += pathlen1;
2596         if (req->r_old_dentry_drop)
2597                 len += pathlen2;
2598
2599         msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
2600         if (!msg) {
2601                 msg = ERR_PTR(-ENOMEM);
2602                 goto out_free2;
2603         }
2604
2605         msg->hdr.tid = cpu_to_le64(req->r_tid);
2606
2607         /*
2608          * The old ceph_mds_request_head didn't contain a version field, and
2609          * one was added when we moved the message version from 3->4.
2610          */
2611         if (legacy) {
2612                 msg->hdr.version = cpu_to_le16(3);
2613                 head = msg->front.iov_base;
2614                 p = msg->front.iov_base + sizeof(*head);
2615         } else {
2616                 struct ceph_mds_request_head *new_head = msg->front.iov_base;
2617
2618                 msg->hdr.version = cpu_to_le16(4);
2619                 new_head->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
2620                 head = (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2621                 p = msg->front.iov_base + sizeof(*new_head);
2622         }
2623
2624         end = msg->front.iov_base + msg->front.iov_len;
2625
2626         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2627         head->op = cpu_to_le32(req->r_op);
2628         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
2629                                                  req->r_cred->fsuid));
2630         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
2631                                                  req->r_cred->fsgid));
2632         head->ino = cpu_to_le64(req->r_deleg_ino);
2633         head->args = req->r_args;
2634
2635         ceph_encode_filepath(&p, end, ino1, path1);
2636         ceph_encode_filepath(&p, end, ino2, path2);
2637
2638         /* make note of release offset, in case we need to replay */
2639         req->r_request_release_offset = p - msg->front.iov_base;
2640
2641         /* cap releases */
2642         releases = 0;
2643         if (req->r_inode_drop)
2644                 releases += ceph_encode_inode_release(&p,
2645                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2646                       mds, req->r_inode_drop, req->r_inode_unless,
2647                       req->r_op == CEPH_MDS_OP_READDIR);
2648         if (req->r_dentry_drop)
2649                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2650                                 req->r_parent, mds, req->r_dentry_drop,
2651                                 req->r_dentry_unless);
2652         if (req->r_old_dentry_drop)
2653                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2654                                 req->r_old_dentry_dir, mds,
2655                                 req->r_old_dentry_drop,
2656                                 req->r_old_dentry_unless);
2657         if (req->r_old_inode_drop)
2658                 releases += ceph_encode_inode_release(&p,
2659                       d_inode(req->r_old_dentry),
2660                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2661
2662         if (drop_cap_releases) {
2663                 releases = 0;
2664                 p = msg->front.iov_base + req->r_request_release_offset;
2665         }
2666
2667         head->num_releases = cpu_to_le16(releases);
2668
2669         encode_timestamp_and_gids(&p, req);
2670
2671         if (WARN_ON_ONCE(p > end)) {
2672                 ceph_msg_put(msg);
2673                 msg = ERR_PTR(-ERANGE);
2674                 goto out_free2;
2675         }
2676
2677         msg->front.iov_len = p - msg->front.iov_base;
2678         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2679
2680         if (req->r_pagelist) {
2681                 struct ceph_pagelist *pagelist = req->r_pagelist;
2682                 ceph_msg_data_add_pagelist(msg, pagelist);
2683                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2684         } else {
2685                 msg->hdr.data_len = 0;
2686         }
2687
2688         msg->hdr.data_off = cpu_to_le16(0);
2689
2690 out_free2:
2691         if (freepath2)
2692                 ceph_mdsc_free_path((char *)path2, pathlen2);
2693 out_free1:
2694         if (freepath1)
2695                 ceph_mdsc_free_path((char *)path1, pathlen1);
2696 out:
2697         return msg;
2698 }
2699
2700 /*
2701  * called under mdsc->mutex if error, under no mutex if
2702  * success.
2703  */
2704 static void complete_request(struct ceph_mds_client *mdsc,
2705                              struct ceph_mds_request *req)
2706 {
2707         req->r_end_latency = ktime_get();
2708
2709         if (req->r_callback)
2710                 req->r_callback(mdsc, req);
2711         complete_all(&req->r_completion);
2712 }
2713
2714 static struct ceph_mds_request_head_old *
2715 find_old_request_head(void *p, u64 features)
2716 {
2717         bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
2718         struct ceph_mds_request_head *new_head;
2719
2720         if (legacy)
2721                 return (struct ceph_mds_request_head_old *)p;
2722         new_head = (struct ceph_mds_request_head *)p;
2723         return (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2724 }
2725
2726 /*
2727  * called under mdsc->mutex
2728  */
2729 static int __prepare_send_request(struct ceph_mds_session *session,
2730                                   struct ceph_mds_request *req,
2731                                   bool drop_cap_releases)
2732 {
2733         int mds = session->s_mds;
2734         struct ceph_mds_client *mdsc = session->s_mdsc;
2735         struct ceph_mds_request_head_old *rhead;
2736         struct ceph_msg *msg;
2737         int flags = 0, max_retry;
2738
2739         /*
2740          * The type of 'r_attempts' in kernel 'ceph_mds_request'
2741          * is 'int', while in 'ceph_mds_request_head' the type of
2742          * 'num_retry' is '__u8'. So in case the request retries
2743          *  exceeding 256 times, the MDS will receive a incorrect
2744          *  retry seq.
2745          *
2746          * In this case it's ususally a bug in MDS and continue
2747          * retrying the request makes no sense.
2748          *
2749          * In future this could be fixed in ceph code, so avoid
2750          * using the hardcode here.
2751          */
2752         max_retry = sizeof_field(struct ceph_mds_request_head, num_retry);
2753         max_retry = 1 << (max_retry * BITS_PER_BYTE);
2754         if (req->r_attempts >= max_retry) {
2755                 pr_warn_ratelimited("%s request tid %llu seq overflow\n",
2756                                     __func__, req->r_tid);
2757                 return -EMULTIHOP;
2758         }
2759
2760         req->r_attempts++;
2761         if (req->r_inode) {
2762                 struct ceph_cap *cap =
2763                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2764
2765                 if (cap)
2766                         req->r_sent_on_mseq = cap->mseq;
2767                 else
2768                         req->r_sent_on_mseq = -1;
2769         }
2770         dout("%s %p tid %lld %s (attempt %d)\n", __func__, req,
2771              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2772
2773         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2774                 void *p;
2775
2776                 /*
2777                  * Replay.  Do not regenerate message (and rebuild
2778                  * paths, etc.); just use the original message.
2779                  * Rebuilding paths will break for renames because
2780                  * d_move mangles the src name.
2781                  */
2782                 msg = req->r_request;
2783                 rhead = find_old_request_head(msg->front.iov_base,
2784                                               session->s_con.peer_features);
2785
2786                 flags = le32_to_cpu(rhead->flags);
2787                 flags |= CEPH_MDS_FLAG_REPLAY;
2788                 rhead->flags = cpu_to_le32(flags);
2789
2790                 if (req->r_target_inode)
2791                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2792
2793                 rhead->num_retry = req->r_attempts - 1;
2794
2795                 /* remove cap/dentry releases from message */
2796                 rhead->num_releases = 0;
2797
2798                 p = msg->front.iov_base + req->r_request_release_offset;
2799                 encode_timestamp_and_gids(&p, req);
2800
2801                 msg->front.iov_len = p - msg->front.iov_base;
2802                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2803                 return 0;
2804         }
2805
2806         if (req->r_request) {
2807                 ceph_msg_put(req->r_request);
2808                 req->r_request = NULL;
2809         }
2810         msg = create_request_message(session, req, drop_cap_releases);
2811         if (IS_ERR(msg)) {
2812                 req->r_err = PTR_ERR(msg);
2813                 return PTR_ERR(msg);
2814         }
2815         req->r_request = msg;
2816
2817         rhead = find_old_request_head(msg->front.iov_base,
2818                                       session->s_con.peer_features);
2819         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2820         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2821                 flags |= CEPH_MDS_FLAG_REPLAY;
2822         if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
2823                 flags |= CEPH_MDS_FLAG_ASYNC;
2824         if (req->r_parent)
2825                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2826         rhead->flags = cpu_to_le32(flags);
2827         rhead->num_fwd = req->r_num_fwd;
2828         rhead->num_retry = req->r_attempts - 1;
2829
2830         dout(" r_parent = %p\n", req->r_parent);
2831         return 0;
2832 }
2833
2834 /*
2835  * called under mdsc->mutex
2836  */
2837 static int __send_request(struct ceph_mds_session *session,
2838                           struct ceph_mds_request *req,
2839                           bool drop_cap_releases)
2840 {
2841         int err;
2842
2843         err = __prepare_send_request(session, req, drop_cap_releases);
2844         if (!err) {
2845                 ceph_msg_get(req->r_request);
2846                 ceph_con_send(&session->s_con, req->r_request);
2847         }
2848
2849         return err;
2850 }
2851
2852 /*
2853  * send request, or put it on the appropriate wait list.
2854  */
2855 static void __do_request(struct ceph_mds_client *mdsc,
2856                         struct ceph_mds_request *req)
2857 {
2858         struct ceph_mds_session *session = NULL;
2859         int mds = -1;
2860         int err = 0;
2861         bool random;
2862
2863         if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2864                 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2865                         __unregister_request(mdsc, req);
2866                 return;
2867         }
2868
2869         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
2870                 dout("do_request metadata corrupted\n");
2871                 err = -EIO;
2872                 goto finish;
2873         }
2874         if (req->r_timeout &&
2875             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2876                 dout("do_request timed out\n");
2877                 err = -ETIMEDOUT;
2878                 goto finish;
2879         }
2880         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2881                 dout("do_request forced umount\n");
2882                 err = -EIO;
2883                 goto finish;
2884         }
2885         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2886                 if (mdsc->mdsmap_err) {
2887                         err = mdsc->mdsmap_err;
2888                         dout("do_request mdsmap err %d\n", err);
2889                         goto finish;
2890                 }
2891                 if (mdsc->mdsmap->m_epoch == 0) {
2892                         dout("do_request no mdsmap, waiting for map\n");
2893                         list_add(&req->r_wait, &mdsc->waiting_for_map);
2894                         return;
2895                 }
2896                 if (!(mdsc->fsc->mount_options->flags &
2897                       CEPH_MOUNT_OPT_MOUNTWAIT) &&
2898                     !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2899                         err = -EHOSTUNREACH;
2900                         goto finish;
2901                 }
2902         }
2903
2904         put_request_session(req);
2905
2906         mds = __choose_mds(mdsc, req, &random);
2907         if (mds < 0 ||
2908             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2909                 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2910                         err = -EJUKEBOX;
2911                         goto finish;
2912                 }
2913                 dout("do_request no mds or not active, waiting for map\n");
2914                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2915                 return;
2916         }
2917
2918         /* get, open session */
2919         session = __ceph_lookup_mds_session(mdsc, mds);
2920         if (!session) {
2921                 session = register_session(mdsc, mds);
2922                 if (IS_ERR(session)) {
2923                         err = PTR_ERR(session);
2924                         goto finish;
2925                 }
2926         }
2927         req->r_session = ceph_get_mds_session(session);
2928
2929         dout("do_request mds%d session %p state %s\n", mds, session,
2930              ceph_session_state_name(session->s_state));
2931
2932         /*
2933          * The old ceph will crash the MDSs when see unknown OPs
2934          */
2935         if (req->r_feature_needed > 0 &&
2936             !test_bit(req->r_feature_needed, &session->s_features)) {
2937                 err = -EOPNOTSUPP;
2938                 goto out_session;
2939         }
2940
2941         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2942             session->s_state != CEPH_MDS_SESSION_HUNG) {
2943                 /*
2944                  * We cannot queue async requests since the caps and delegated
2945                  * inodes are bound to the session. Just return -EJUKEBOX and
2946                  * let the caller retry a sync request in that case.
2947                  */
2948                 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2949                         err = -EJUKEBOX;
2950                         goto out_session;
2951                 }
2952
2953                 /*
2954                  * If the session has been REJECTED, then return a hard error,
2955                  * unless it's a CLEANRECOVER mount, in which case we'll queue
2956                  * it to the mdsc queue.
2957                  */
2958                 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2959                         if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
2960                                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2961                         else
2962                                 err = -EACCES;
2963                         goto out_session;
2964                 }
2965
2966                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2967                     session->s_state == CEPH_MDS_SESSION_CLOSING) {
2968                         err = __open_session(mdsc, session);
2969                         if (err)
2970                                 goto out_session;
2971                         /* retry the same mds later */
2972                         if (random)
2973                                 req->r_resend_mds = mds;
2974                 }
2975                 list_add(&req->r_wait, &session->s_waiting);
2976                 goto out_session;
2977         }
2978
2979         /* send request */
2980         req->r_resend_mds = -1;   /* forget any previous mds hint */
2981
2982         if (req->r_request_started == 0)   /* note request start time */
2983                 req->r_request_started = jiffies;
2984
2985         /*
2986          * For async create we will choose the auth MDS of frag in parent
2987          * directory to send the request and ususally this works fine, but
2988          * if the migrated the dirtory to another MDS before it could handle
2989          * it the request will be forwarded.
2990          *
2991          * And then the auth cap will be changed.
2992          */
2993         if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && req->r_num_fwd) {
2994                 struct ceph_dentry_info *di = ceph_dentry(req->r_dentry);
2995                 struct ceph_inode_info *ci;
2996                 struct ceph_cap *cap;
2997
2998                 /*
2999                  * The request maybe handled very fast and the new inode
3000                  * hasn't been linked to the dentry yet. We need to wait
3001                  * for the ceph_finish_async_create(), which shouldn't be
3002                  * stuck too long or fail in thoery, to finish when forwarding
3003                  * the request.
3004                  */
3005                 if (!d_inode(req->r_dentry)) {
3006                         err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT,
3007                                           TASK_KILLABLE);
3008                         if (err) {
3009                                 mutex_lock(&req->r_fill_mutex);
3010                                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3011                                 mutex_unlock(&req->r_fill_mutex);
3012                                 goto out_session;
3013                         }
3014                 }
3015
3016                 ci = ceph_inode(d_inode(req->r_dentry));
3017
3018                 spin_lock(&ci->i_ceph_lock);
3019                 cap = ci->i_auth_cap;
3020                 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) {
3021                         dout("do_request session changed for auth cap %d -> %d\n",
3022                              cap->session->s_mds, session->s_mds);
3023
3024                         /* Remove the auth cap from old session */
3025                         spin_lock(&cap->session->s_cap_lock);
3026                         cap->session->s_nr_caps--;
3027                         list_del_init(&cap->session_caps);
3028                         spin_unlock(&cap->session->s_cap_lock);
3029
3030                         /* Add the auth cap to the new session */
3031                         cap->mds = mds;
3032                         cap->session = session;
3033                         spin_lock(&session->s_cap_lock);
3034                         session->s_nr_caps++;
3035                         list_add_tail(&cap->session_caps, &session->s_caps);
3036                         spin_unlock(&session->s_cap_lock);
3037
3038                         change_auth_cap_ses(ci, session);
3039                 }
3040                 spin_unlock(&ci->i_ceph_lock);
3041         }
3042
3043         err = __send_request(session, req, false);
3044
3045 out_session:
3046         ceph_put_mds_session(session);
3047 finish:
3048         if (err) {
3049                 dout("__do_request early error %d\n", err);
3050                 req->r_err = err;
3051                 complete_request(mdsc, req);
3052                 __unregister_request(mdsc, req);
3053         }
3054         return;
3055 }
3056
3057 /*
3058  * called under mdsc->mutex
3059  */
3060 static void __wake_requests(struct ceph_mds_client *mdsc,
3061                             struct list_head *head)
3062 {
3063         struct ceph_mds_request *req;
3064         LIST_HEAD(tmp_list);
3065
3066         list_splice_init(head, &tmp_list);
3067
3068         while (!list_empty(&tmp_list)) {
3069                 req = list_entry(tmp_list.next,
3070                                  struct ceph_mds_request, r_wait);
3071                 list_del_init(&req->r_wait);
3072                 dout(" wake request %p tid %llu\n", req, req->r_tid);
3073                 __do_request(mdsc, req);
3074         }
3075 }
3076
3077 /*
3078  * Wake up threads with requests pending for @mds, so that they can
3079  * resubmit their requests to a possibly different mds.
3080  */
3081 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
3082 {
3083         struct ceph_mds_request *req;
3084         struct rb_node *p = rb_first(&mdsc->request_tree);
3085
3086         dout("kick_requests mds%d\n", mds);
3087         while (p) {
3088                 req = rb_entry(p, struct ceph_mds_request, r_node);
3089                 p = rb_next(p);
3090                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3091                         continue;
3092                 if (req->r_attempts > 0)
3093                         continue; /* only new requests */
3094                 if (req->r_session &&
3095                     req->r_session->s_mds == mds) {
3096                         dout(" kicking tid %llu\n", req->r_tid);
3097                         list_del_init(&req->r_wait);
3098                         __do_request(mdsc, req);
3099                 }
3100         }
3101 }
3102
3103 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
3104                               struct ceph_mds_request *req)
3105 {
3106         int err = 0;
3107
3108         /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
3109         if (req->r_inode)
3110                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
3111         if (req->r_parent) {
3112                 struct ceph_inode_info *ci = ceph_inode(req->r_parent);
3113                 int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
3114                             CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD;
3115                 spin_lock(&ci->i_ceph_lock);
3116                 ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
3117                 __ceph_touch_fmode(ci, mdsc, fmode);
3118                 spin_unlock(&ci->i_ceph_lock);
3119         }
3120         if (req->r_old_dentry_dir)
3121                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
3122                                   CEPH_CAP_PIN);
3123
3124         if (req->r_inode) {
3125                 err = ceph_wait_on_async_create(req->r_inode);
3126                 if (err) {
3127                         dout("%s: wait for async create returned: %d\n",
3128                              __func__, err);
3129                         return err;
3130                 }
3131         }
3132
3133         if (!err && req->r_old_inode) {
3134                 err = ceph_wait_on_async_create(req->r_old_inode);
3135                 if (err) {
3136                         dout("%s: wait for async create returned: %d\n",
3137                              __func__, err);
3138                         return err;
3139                 }
3140         }
3141
3142         dout("submit_request on %p for inode %p\n", req, dir);
3143         mutex_lock(&mdsc->mutex);
3144         __register_request(mdsc, req, dir);
3145         __do_request(mdsc, req);
3146         err = req->r_err;
3147         mutex_unlock(&mdsc->mutex);
3148         return err;
3149 }
3150
3151 int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
3152                            struct ceph_mds_request *req,
3153                            ceph_mds_request_wait_callback_t wait_func)
3154 {
3155         int err;
3156
3157         /* wait */
3158         dout("do_request waiting\n");
3159         if (wait_func) {
3160                 err = wait_func(mdsc, req);
3161         } else {
3162                 long timeleft = wait_for_completion_killable_timeout(
3163                                         &req->r_completion,
3164                                         ceph_timeout_jiffies(req->r_timeout));
3165                 if (timeleft > 0)
3166                         err = 0;
3167                 else if (!timeleft)
3168                         err = -ETIMEDOUT;  /* timed out */
3169                 else
3170                         err = timeleft;  /* killed */
3171         }
3172         dout("do_request waited, got %d\n", err);
3173         mutex_lock(&mdsc->mutex);
3174
3175         /* only abort if we didn't race with a real reply */
3176         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3177                 err = le32_to_cpu(req->r_reply_info.head->result);
3178         } else if (err < 0) {
3179                 dout("aborted request %lld with %d\n", req->r_tid, err);
3180
3181                 /*
3182                  * ensure we aren't running concurrently with
3183                  * ceph_fill_trace or ceph_readdir_prepopulate, which
3184                  * rely on locks (dir mutex) held by our caller.
3185                  */
3186                 mutex_lock(&req->r_fill_mutex);
3187                 req->r_err = err;
3188                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3189                 mutex_unlock(&req->r_fill_mutex);
3190
3191                 if (req->r_parent &&
3192                     (req->r_op & CEPH_MDS_OP_WRITE))
3193                         ceph_invalidate_dir_request(req);
3194         } else {
3195                 err = req->r_err;
3196         }
3197
3198         mutex_unlock(&mdsc->mutex);
3199         return err;
3200 }
3201
3202 /*
3203  * Synchrously perform an mds request.  Take care of all of the
3204  * session setup, forwarding, retry details.
3205  */
3206 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
3207                          struct inode *dir,
3208                          struct ceph_mds_request *req)
3209 {
3210         int err;
3211
3212         dout("do_request on %p\n", req);
3213
3214         /* issue */
3215         err = ceph_mdsc_submit_request(mdsc, dir, req);
3216         if (!err)
3217                 err = ceph_mdsc_wait_request(mdsc, req, NULL);
3218         dout("do_request %p done, result %d\n", req, err);
3219         return err;
3220 }
3221
3222 /*
3223  * Invalidate dir's completeness, dentry lease state on an aborted MDS
3224  * namespace request.
3225  */
3226 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
3227 {
3228         struct inode *dir = req->r_parent;
3229         struct inode *old_dir = req->r_old_dentry_dir;
3230
3231         dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
3232
3233         ceph_dir_clear_complete(dir);
3234         if (old_dir)
3235                 ceph_dir_clear_complete(old_dir);
3236         if (req->r_dentry)
3237                 ceph_invalidate_dentry_lease(req->r_dentry);
3238         if (req->r_old_dentry)
3239                 ceph_invalidate_dentry_lease(req->r_old_dentry);
3240 }
3241
3242 /*
3243  * Handle mds reply.
3244  *
3245  * We take the session mutex and parse and process the reply immediately.
3246  * This preserves the logical ordering of replies, capabilities, etc., sent
3247  * by the MDS as they are applied to our local cache.
3248  */
3249 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
3250 {
3251         struct ceph_mds_client *mdsc = session->s_mdsc;
3252         struct ceph_mds_request *req;
3253         struct ceph_mds_reply_head *head = msg->front.iov_base;
3254         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
3255         struct ceph_snap_realm *realm;
3256         u64 tid;
3257         int err, result;
3258         int mds = session->s_mds;
3259         bool close_sessions = false;
3260
3261         if (msg->front.iov_len < sizeof(*head)) {
3262                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
3263                 ceph_msg_dump(msg);
3264                 return;
3265         }
3266
3267         /* get request, session */
3268         tid = le64_to_cpu(msg->hdr.tid);
3269         mutex_lock(&mdsc->mutex);
3270         req = lookup_get_request(mdsc, tid);
3271         if (!req) {
3272                 dout("handle_reply on unknown tid %llu\n", tid);
3273                 mutex_unlock(&mdsc->mutex);
3274                 return;
3275         }
3276         dout("handle_reply %p\n", req);
3277
3278         /* correct session? */
3279         if (req->r_session != session) {
3280                 pr_err("mdsc_handle_reply got %llu on session mds%d"
3281                        " not mds%d\n", tid, session->s_mds,
3282                        req->r_session ? req->r_session->s_mds : -1);
3283                 mutex_unlock(&mdsc->mutex);
3284                 goto out;
3285         }
3286
3287         /* dup? */
3288         if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3289             (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3290                 pr_warn("got a dup %s reply on %llu from mds%d\n",
3291                            head->safe ? "safe" : "unsafe", tid, mds);
3292                 mutex_unlock(&mdsc->mutex);
3293                 goto out;
3294         }
3295         if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3296                 pr_warn("got unsafe after safe on %llu from mds%d\n",
3297                            tid, mds);
3298                 mutex_unlock(&mdsc->mutex);
3299                 goto out;
3300         }
3301
3302         result = le32_to_cpu(head->result);
3303
3304         if (head->safe) {
3305                 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3306                 __unregister_request(mdsc, req);
3307
3308                 /* last request during umount? */
3309                 if (mdsc->stopping && !__get_oldest_req(mdsc))
3310                         complete_all(&mdsc->safe_umount_waiters);
3311
3312                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3313                         /*
3314                          * We already handled the unsafe response, now do the
3315                          * cleanup.  No need to examine the response; the MDS
3316                          * doesn't include any result info in the safe
3317                          * response.  And even if it did, there is nothing
3318                          * useful we could do with a revised return value.
3319                          */
3320                         dout("got safe reply %llu, mds%d\n", tid, mds);
3321
3322                         mutex_unlock(&mdsc->mutex);
3323                         goto out;
3324                 }
3325         } else {
3326                 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3327                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3328         }
3329
3330         dout("handle_reply tid %lld result %d\n", tid, result);
3331         rinfo = &req->r_reply_info;
3332         if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
3333                 err = parse_reply_info(session, msg, rinfo, (u64)-1);
3334         else
3335                 err = parse_reply_info(session, msg, rinfo, session->s_con.peer_features);
3336         mutex_unlock(&mdsc->mutex);
3337
3338         /* Must find target inode outside of mutexes to avoid deadlocks */
3339         if ((err >= 0) && rinfo->head->is_target) {
3340                 struct inode *in;
3341                 struct ceph_vino tvino = {
3342                         .ino  = le64_to_cpu(rinfo->targeti.in->ino),
3343                         .snap = le64_to_cpu(rinfo->targeti.in->snapid)
3344                 };
3345
3346                 in = ceph_get_inode(mdsc->fsc->sb, tvino);
3347                 if (IS_ERR(in)) {
3348                         err = PTR_ERR(in);
3349                         mutex_lock(&session->s_mutex);
3350                         goto out_err;
3351                 }
3352                 req->r_target_inode = in;
3353         }
3354
3355         mutex_lock(&session->s_mutex);
3356         if (err < 0) {
3357                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
3358                 ceph_msg_dump(msg);
3359                 goto out_err;
3360         }
3361
3362         /* snap trace */
3363         realm = NULL;
3364         if (rinfo->snapblob_len) {
3365                 down_write(&mdsc->snap_rwsem);
3366                 err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
3367                                 rinfo->snapblob + rinfo->snapblob_len,
3368                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
3369                                 &realm);
3370                 if (err) {
3371                         up_write(&mdsc->snap_rwsem);
3372                         close_sessions = true;
3373                         if (err == -EIO)
3374                                 ceph_msg_dump(msg);
3375                         goto out_err;
3376                 }
3377                 downgrade_write(&mdsc->snap_rwsem);
3378         } else {
3379                 down_read(&mdsc->snap_rwsem);
3380         }
3381
3382         /* insert trace into our cache */
3383         mutex_lock(&req->r_fill_mutex);
3384         current->journal_info = req;
3385         err = ceph_fill_trace(mdsc->fsc->sb, req);
3386         if (err == 0) {
3387                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3388                                     req->r_op == CEPH_MDS_OP_LSSNAP))
3389                         ceph_readdir_prepopulate(req, req->r_session);
3390         }
3391         current->journal_info = NULL;
3392         mutex_unlock(&req->r_fill_mutex);
3393
3394         up_read(&mdsc->snap_rwsem);
3395         if (realm)
3396                 ceph_put_snap_realm(mdsc, realm);
3397
3398         if (err == 0) {
3399                 if (req->r_target_inode &&
3400                     test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3401                         struct ceph_inode_info *ci =
3402                                 ceph_inode(req->r_target_inode);
3403                         spin_lock(&ci->i_unsafe_lock);
3404                         list_add_tail(&req->r_unsafe_target_item,
3405                                       &ci->i_unsafe_iops);
3406                         spin_unlock(&ci->i_unsafe_lock);
3407                 }
3408
3409                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3410         }
3411 out_err:
3412         mutex_lock(&mdsc->mutex);
3413         if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3414                 if (err) {
3415                         req->r_err = err;
3416                 } else {
3417                         req->r_reply =  ceph_msg_get(msg);
3418                         set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3419                 }
3420         } else {
3421                 dout("reply arrived after request %lld was aborted\n", tid);
3422         }
3423         mutex_unlock(&mdsc->mutex);
3424
3425         mutex_unlock(&session->s_mutex);
3426
3427         /* kick calling process */
3428         complete_request(mdsc, req);
3429
3430         ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency,
3431                                      req->r_end_latency, err);
3432 out:
3433         ceph_mdsc_put_request(req);
3434
3435         /* Defer closing the sessions after s_mutex lock being released */
3436         if (close_sessions)
3437                 ceph_mdsc_close_sessions(mdsc);
3438         return;
3439 }
3440
3441
3442
3443 /*
3444  * handle mds notification that our request has been forwarded.
3445  */
3446 static void handle_forward(struct ceph_mds_client *mdsc,
3447                            struct ceph_mds_session *session,
3448                            struct ceph_msg *msg)
3449 {
3450         struct ceph_mds_request *req;
3451         u64 tid = le64_to_cpu(msg->hdr.tid);
3452         u32 next_mds;
3453         u32 fwd_seq;
3454         int err = -EINVAL;
3455         void *p = msg->front.iov_base;
3456         void *end = p + msg->front.iov_len;
3457         bool aborted = false;
3458
3459         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3460         next_mds = ceph_decode_32(&p);
3461         fwd_seq = ceph_decode_32(&p);
3462
3463         mutex_lock(&mdsc->mutex);
3464         req = lookup_get_request(mdsc, tid);
3465         if (!req) {
3466                 mutex_unlock(&mdsc->mutex);
3467                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3468                 return;  /* dup reply? */
3469         }
3470
3471         if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3472                 dout("forward tid %llu aborted, unregistering\n", tid);
3473                 __unregister_request(mdsc, req);
3474         } else if (fwd_seq <= req->r_num_fwd) {
3475                 /*
3476                  * The type of 'num_fwd' in ceph 'MClientRequestForward'
3477                  * is 'int32_t', while in 'ceph_mds_request_head' the
3478                  * type is '__u8'. So in case the request bounces between
3479                  * MDSes exceeding 256 times, the client will get stuck.
3480                  *
3481                  * In this case it's ususally a bug in MDS and continue
3482                  * bouncing the request makes no sense.
3483                  *
3484                  * In future this could be fixed in ceph code, so avoid
3485                  * using the hardcode here.
3486                  */
3487                 int max = sizeof_field(struct ceph_mds_request_head, num_fwd);
3488                 max = 1 << (max * BITS_PER_BYTE);
3489                 if (req->r_num_fwd >= max) {
3490                         mutex_lock(&req->r_fill_mutex);
3491                         req->r_err = -EMULTIHOP;
3492                         set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3493                         mutex_unlock(&req->r_fill_mutex);
3494                         aborted = true;
3495                         pr_warn_ratelimited("forward tid %llu seq overflow\n",
3496                                             tid);
3497                 } else {
3498                         dout("forward tid %llu to mds%d - old seq %d <= %d\n",
3499                              tid, next_mds, req->r_num_fwd, fwd_seq);
3500                 }
3501         } else {
3502                 /* resend. forward race not possible; mds would drop */
3503                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
3504                 BUG_ON(req->r_err);
3505                 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3506                 req->r_attempts = 0;
3507                 req->r_num_fwd = fwd_seq;
3508                 req->r_resend_mds = next_mds;
3509                 put_request_session(req);
3510                 __do_request(mdsc, req);
3511         }
3512         mutex_unlock(&mdsc->mutex);
3513
3514         /* kick calling process */
3515         if (aborted)
3516                 complete_request(mdsc, req);
3517         ceph_mdsc_put_request(req);
3518         return;
3519
3520 bad:
3521         pr_err("mdsc_handle_forward decode error err=%d\n", err);
3522 }
3523
3524 static int __decode_session_metadata(void **p, void *end,
3525                                      bool *blocklisted)
3526 {
3527         /* map<string,string> */
3528         u32 n;
3529         bool err_str;
3530         ceph_decode_32_safe(p, end, n, bad);
3531         while (n-- > 0) {
3532                 u32 len;
3533                 ceph_decode_32_safe(p, end, len, bad);
3534                 ceph_decode_need(p, end, len, bad);
3535                 err_str = !strncmp(*p, "error_string", len);
3536                 *p += len;
3537                 ceph_decode_32_safe(p, end, len, bad);
3538                 ceph_decode_need(p, end, len, bad);
3539                 /*
3540                  * Match "blocklisted (blacklisted)" from newer MDSes,
3541                  * or "blacklisted" from older MDSes.
3542                  */
3543                 if (err_str && strnstr(*p, "blacklisted", len))
3544                         *blocklisted = true;
3545                 *p += len;
3546         }
3547         return 0;
3548 bad:
3549         return -1;
3550 }
3551
3552 /*
3553  * handle a mds session control message
3554  */
3555 static void handle_session(struct ceph_mds_session *session,
3556                            struct ceph_msg *msg)
3557 {
3558         struct ceph_mds_client *mdsc = session->s_mdsc;
3559         int mds = session->s_mds;
3560         int msg_version = le16_to_cpu(msg->hdr.version);
3561         void *p = msg->front.iov_base;
3562         void *end = p + msg->front.iov_len;
3563         struct ceph_mds_session_head *h;
3564         u32 op;
3565         u64 seq, features = 0;
3566         int wake = 0;
3567         bool blocklisted = false;
3568
3569         /* decode */
3570         ceph_decode_need(&p, end, sizeof(*h), bad);
3571         h = p;
3572         p += sizeof(*h);
3573
3574         op = le32_to_cpu(h->op);
3575         seq = le64_to_cpu(h->seq);
3576
3577         if (msg_version >= 3) {
3578                 u32 len;
3579                 /* version >= 2 and < 5, decode metadata, skip otherwise
3580                  * as it's handled via flags.
3581                  */
3582                 if (msg_version >= 5)
3583                         ceph_decode_skip_map(&p, end, string, string, bad);
3584                 else if (__decode_session_metadata(&p, end, &blocklisted) < 0)
3585                         goto bad;
3586
3587                 /* version >= 3, feature bits */
3588                 ceph_decode_32_safe(&p, end, len, bad);
3589                 if (len) {
3590                         ceph_decode_64_safe(&p, end, features, bad);
3591                         p += len - sizeof(features);
3592                 }
3593         }
3594
3595         if (msg_version >= 5) {
3596                 u32 flags, len;
3597
3598                 /* version >= 4 */
3599                 ceph_decode_skip_16(&p, end, bad); /* struct_v, struct_cv */
3600                 ceph_decode_32_safe(&p, end, len, bad); /* len */
3601                 ceph_decode_skip_n(&p, end, len, bad); /* metric_spec */
3602
3603                 /* version >= 5, flags   */
3604                 ceph_decode_32_safe(&p, end, flags, bad);
3605                 if (flags & CEPH_SESSION_BLOCKLISTED) {
3606                         pr_warn("mds%d session blocklisted\n", session->s_mds);
3607                         blocklisted = true;
3608                 }
3609         }
3610
3611         mutex_lock(&mdsc->mutex);
3612         if (op == CEPH_SESSION_CLOSE) {
3613                 ceph_get_mds_session(session);
3614                 __unregister_session(mdsc, session);
3615         }
3616         /* FIXME: this ttl calculation is generous */
3617         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
3618         mutex_unlock(&mdsc->mutex);
3619
3620         mutex_lock(&session->s_mutex);
3621
3622         dout("handle_session mds%d %s %p state %s seq %llu\n",
3623              mds, ceph_session_op_name(op), session,
3624              ceph_session_state_name(session->s_state), seq);
3625
3626         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
3627                 session->s_state = CEPH_MDS_SESSION_OPEN;
3628                 pr_info("mds%d came back\n", session->s_mds);
3629         }
3630
3631         switch (op) {
3632         case CEPH_SESSION_OPEN:
3633                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3634                         pr_info("mds%d reconnect success\n", session->s_mds);
3635
3636                 if (session->s_state == CEPH_MDS_SESSION_OPEN) {
3637                         pr_notice("mds%d is already opened\n", session->s_mds);
3638                 } else {
3639                         session->s_state = CEPH_MDS_SESSION_OPEN;
3640                         session->s_features = features;
3641                         renewed_caps(mdsc, session, 0);
3642                         if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT,
3643                                      &session->s_features))
3644                                 metric_schedule_delayed(&mdsc->metric);
3645                 }
3646
3647                 /*
3648                  * The connection maybe broken and the session in client
3649                  * side has been reinitialized, need to update the seq
3650                  * anyway.
3651                  */
3652                 if (!session->s_seq && seq)
3653                         session->s_seq = seq;
3654
3655                 wake = 1;
3656                 if (mdsc->stopping)
3657                         __close_session(mdsc, session);
3658                 break;
3659
3660         case CEPH_SESSION_RENEWCAPS:
3661                 if (session->s_renew_seq == seq)
3662                         renewed_caps(mdsc, session, 1);
3663                 break;
3664
3665         case CEPH_SESSION_CLOSE:
3666                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3667                         pr_info("mds%d reconnect denied\n", session->s_mds);
3668                 session->s_state = CEPH_MDS_SESSION_CLOSED;
3669                 cleanup_session_requests(mdsc, session);
3670                 remove_session_caps(session);
3671                 wake = 2; /* for good measure */
3672                 wake_up_all(&mdsc->session_close_wq);
3673                 break;
3674
3675         case CEPH_SESSION_STALE:
3676                 pr_info("mds%d caps went stale, renewing\n",
3677                         session->s_mds);
3678                 atomic_inc(&session->s_cap_gen);
3679                 session->s_cap_ttl = jiffies - 1;
3680                 send_renew_caps(mdsc, session);
3681                 break;
3682
3683         case CEPH_SESSION_RECALL_STATE:
3684                 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
3685                 break;
3686
3687         case CEPH_SESSION_FLUSHMSG:
3688                 /* flush cap releases */
3689                 spin_lock(&session->s_cap_lock);
3690                 if (session->s_num_cap_releases)
3691                         ceph_flush_cap_releases(mdsc, session);
3692                 spin_unlock(&session->s_cap_lock);
3693
3694                 send_flushmsg_ack(mdsc, session, seq);
3695                 break;
3696
3697         case CEPH_SESSION_FORCE_RO:
3698                 dout("force_session_readonly %p\n", session);
3699                 spin_lock(&session->s_cap_lock);
3700                 session->s_readonly = true;
3701                 spin_unlock(&session->s_cap_lock);
3702                 wake_up_session_caps(session, FORCE_RO);
3703                 break;
3704
3705         case CEPH_SESSION_REJECT:
3706                 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
3707                 pr_info("mds%d rejected session\n", session->s_mds);
3708                 session->s_state = CEPH_MDS_SESSION_REJECTED;
3709                 cleanup_session_requests(mdsc, session);
3710                 remove_session_caps(session);
3711                 if (blocklisted)
3712                         mdsc->fsc->blocklisted = true;
3713                 wake = 2; /* for good measure */
3714                 break;
3715
3716         default:
3717                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
3718                 WARN_ON(1);
3719         }
3720
3721         mutex_unlock(&session->s_mutex);
3722         if (wake) {
3723                 mutex_lock(&mdsc->mutex);
3724                 __wake_requests(mdsc, &session->s_waiting);
3725                 if (wake == 2)
3726                         kick_requests(mdsc, mds);
3727                 mutex_unlock(&mdsc->mutex);
3728         }
3729         if (op == CEPH_SESSION_CLOSE)
3730                 ceph_put_mds_session(session);
3731         return;
3732
3733 bad:
3734         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
3735                (int)msg->front.iov_len);
3736         ceph_msg_dump(msg);
3737         return;
3738 }
3739
3740 void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
3741 {
3742         int dcaps;
3743
3744         dcaps = xchg(&req->r_dir_caps, 0);
3745         if (dcaps) {
3746                 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3747                 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
3748         }
3749 }
3750
3751 void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
3752 {
3753         int dcaps;
3754
3755         dcaps = xchg(&req->r_dir_caps, 0);
3756         if (dcaps) {
3757                 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3758                 ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
3759                                                 dcaps);
3760         }
3761 }
3762
3763 /*
3764  * called under session->mutex.
3765  */
3766 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
3767                                    struct ceph_mds_session *session)
3768 {
3769         struct ceph_mds_request *req, *nreq;
3770         struct rb_node *p;
3771
3772         dout("replay_unsafe_requests mds%d\n", session->s_mds);
3773
3774         mutex_lock(&mdsc->mutex);
3775         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
3776                 __send_request(session, req, true);
3777
3778         /*
3779          * also re-send old requests when MDS enters reconnect stage. So that MDS
3780          * can process completed request in clientreplay stage.
3781          */
3782         p = rb_first(&mdsc->request_tree);
3783         while (p) {
3784                 req = rb_entry(p, struct ceph_mds_request, r_node);
3785                 p = rb_next(p);
3786                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3787                         continue;
3788                 if (req->r_attempts == 0)
3789                         continue; /* only old requests */
3790                 if (!req->r_session)
3791                         continue;
3792                 if (req->r_session->s_mds != session->s_mds)
3793                         continue;
3794
3795                 ceph_mdsc_release_dir_caps_no_check(req);
3796
3797                 __send_request(session, req, true);
3798         }
3799         mutex_unlock(&mdsc->mutex);
3800 }
3801
3802 static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
3803 {
3804         struct ceph_msg *reply;
3805         struct ceph_pagelist *_pagelist;
3806         struct page *page;
3807         __le32 *addr;
3808         int err = -ENOMEM;
3809
3810         if (!recon_state->allow_multi)
3811                 return -ENOSPC;
3812
3813         /* can't handle message that contains both caps and realm */
3814         BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
3815
3816         /* pre-allocate new pagelist */
3817         _pagelist = ceph_pagelist_alloc(GFP_NOFS);
3818         if (!_pagelist)
3819                 return -ENOMEM;
3820
3821         reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3822         if (!reply)
3823                 goto fail_msg;
3824
3825         /* placeholder for nr_caps */
3826         err = ceph_pagelist_encode_32(_pagelist, 0);
3827         if (err < 0)
3828                 goto fail;
3829
3830         if (recon_state->nr_caps) {
3831                 /* currently encoding caps */
3832                 err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
3833                 if (err)
3834                         goto fail;
3835         } else {
3836                 /* placeholder for nr_realms (currently encoding relams) */
3837                 err = ceph_pagelist_encode_32(_pagelist, 0);
3838                 if (err < 0)
3839                         goto fail;
3840         }
3841
3842         err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
3843         if (err)
3844                 goto fail;
3845
3846         page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
3847         addr = kmap_atomic(page);
3848         if (recon_state->nr_caps) {
3849                 /* currently encoding caps */
3850                 *addr = cpu_to_le32(recon_state->nr_caps);
3851         } else {
3852                 /* currently encoding relams */
3853                 *(addr + 1) = cpu_to_le32(recon_state->nr_realms);
3854         }
3855         kunmap_atomic(addr);
3856
3857         reply->hdr.version = cpu_to_le16(5);
3858         reply->hdr.compat_version = cpu_to_le16(4);
3859
3860         reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
3861         ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
3862
3863         ceph_con_send(&recon_state->session->s_con, reply);
3864         ceph_pagelist_release(recon_state->pagelist);
3865
3866         recon_state->pagelist = _pagelist;
3867         recon_state->nr_caps = 0;
3868         recon_state->nr_realms = 0;
3869         recon_state->msg_version = 5;
3870         return 0;
3871 fail:
3872         ceph_msg_put(reply);
3873 fail_msg:
3874         ceph_pagelist_release(_pagelist);
3875         return err;
3876 }
3877
3878 static struct dentry* d_find_primary(struct inode *inode)
3879 {
3880         struct dentry *alias, *dn = NULL;
3881
3882         if (hlist_empty(&inode->i_dentry))
3883                 return NULL;
3884
3885         spin_lock(&inode->i_lock);
3886         if (hlist_empty(&inode->i_dentry))
3887                 goto out_unlock;
3888
3889         if (S_ISDIR(inode->i_mode)) {
3890                 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
3891                 if (!IS_ROOT(alias))
3892                         dn = dget(alias);
3893                 goto out_unlock;
3894         }
3895
3896         hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
3897                 spin_lock(&alias->d_lock);
3898                 if (!d_unhashed(alias) &&
3899                     (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
3900                         dn = dget_dlock(alias);
3901                 }
3902                 spin_unlock(&alias->d_lock);
3903                 if (dn)
3904                         break;
3905         }
3906 out_unlock:
3907         spin_unlock(&inode->i_lock);
3908         return dn;
3909 }
3910
3911 /*
3912  * Encode information about a cap for a reconnect with the MDS.
3913  */
3914 static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
3915                           void *arg)
3916 {
3917         union {
3918                 struct ceph_mds_cap_reconnect v2;
3919                 struct ceph_mds_cap_reconnect_v1 v1;
3920         } rec;
3921         struct ceph_inode_info *ci = cap->ci;
3922         struct ceph_reconnect_state *recon_state = arg;
3923         struct ceph_pagelist *pagelist = recon_state->pagelist;
3924         struct dentry *dentry;
3925         char *path;
3926         int pathlen = 0, err;
3927         u64 pathbase;
3928         u64 snap_follows;
3929
3930         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3931              inode, ceph_vinop(inode), cap, cap->cap_id,
3932              ceph_cap_string(cap->issued));
3933
3934         dentry = d_find_primary(inode);
3935         if (dentry) {
3936                 /* set pathbase to parent dir when msg_version >= 2 */
3937                 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
3938                                             recon_state->msg_version >= 2);
3939                 dput(dentry);
3940                 if (IS_ERR(path)) {
3941                         err = PTR_ERR(path);
3942                         goto out_err;
3943                 }
3944         } else {
3945                 path = NULL;
3946                 pathbase = 0;
3947         }
3948
3949         spin_lock(&ci->i_ceph_lock);
3950         cap->seq = 0;        /* reset cap seq */
3951         cap->issue_seq = 0;  /* and issue_seq */
3952         cap->mseq = 0;       /* and migrate_seq */
3953         cap->cap_gen = atomic_read(&cap->session->s_cap_gen);
3954
3955         /* These are lost when the session goes away */
3956         if (S_ISDIR(inode->i_mode)) {
3957                 if (cap->issued & CEPH_CAP_DIR_CREATE) {
3958                         ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
3959                         memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
3960                 }
3961                 cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
3962         }
3963
3964         if (recon_state->msg_version >= 2) {
3965                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
3966                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3967                 rec.v2.issued = cpu_to_le32(cap->issued);
3968                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3969                 rec.v2.pathbase = cpu_to_le64(pathbase);
3970                 rec.v2.flock_len = (__force __le32)
3971                         ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
3972         } else {
3973                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
3974                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3975                 rec.v1.issued = cpu_to_le32(cap->issued);
3976                 rec.v1.size = cpu_to_le64(i_size_read(inode));
3977                 ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
3978                 ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
3979                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3980                 rec.v1.pathbase = cpu_to_le64(pathbase);
3981         }
3982
3983         if (list_empty(&ci->i_cap_snaps)) {
3984                 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
3985         } else {
3986                 struct ceph_cap_snap *capsnap =
3987                         list_first_entry(&ci->i_cap_snaps,
3988                                          struct ceph_cap_snap, ci_item);
3989                 snap_follows = capsnap->follows;
3990         }
3991         spin_unlock(&ci->i_ceph_lock);
3992
3993         if (recon_state->msg_version >= 2) {
3994                 int num_fcntl_locks, num_flock_locks;
3995                 struct ceph_filelock *flocks = NULL;
3996                 size_t struct_len, total_len = sizeof(u64);
3997                 u8 struct_v = 0;
3998
3999 encode_again:
4000                 if (rec.v2.flock_len) {
4001                         ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
4002                 } else {
4003                         num_fcntl_locks = 0;
4004                         num_flock_locks = 0;
4005                 }
4006                 if (num_fcntl_locks + num_flock_locks > 0) {
4007                         flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
4008                                                sizeof(struct ceph_filelock),
4009                                                GFP_NOFS);
4010                         if (!flocks) {
4011                                 err = -ENOMEM;
4012                                 goto out_err;
4013                         }
4014                         err = ceph_encode_locks_to_buffer(inode, flocks,
4015                                                           num_fcntl_locks,
4016                                                           num_flock_locks);
4017                         if (err) {
4018                                 kfree(flocks);
4019                                 flocks = NULL;
4020                                 if (err == -ENOSPC)
4021                                         goto encode_again;
4022                                 goto out_err;
4023                         }
4024                 } else {
4025                         kfree(flocks);
4026                         flocks = NULL;
4027                 }
4028
4029                 if (recon_state->msg_version >= 3) {
4030                         /* version, compat_version and struct_len */
4031                         total_len += 2 * sizeof(u8) + sizeof(u32);
4032                         struct_v = 2;
4033                 }
4034                 /*
4035                  * number of encoded locks is stable, so copy to pagelist
4036                  */
4037                 struct_len = 2 * sizeof(u32) +
4038                             (num_fcntl_locks + num_flock_locks) *
4039                             sizeof(struct ceph_filelock);
4040                 rec.v2.flock_len = cpu_to_le32(struct_len);
4041
4042                 struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
4043
4044                 if (struct_v >= 2)
4045                         struct_len += sizeof(u64); /* snap_follows */
4046
4047                 total_len += struct_len;
4048
4049                 if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
4050                         err = send_reconnect_partial(recon_state);
4051                         if (err)
4052                                 goto out_freeflocks;
4053                         pagelist = recon_state->pagelist;
4054                 }
4055
4056                 err = ceph_pagelist_reserve(pagelist, total_len);
4057                 if (err)
4058                         goto out_freeflocks;
4059
4060                 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
4061                 if (recon_state->msg_version >= 3) {
4062                         ceph_pagelist_encode_8(pagelist, struct_v);
4063                         ceph_pagelist_encode_8(pagelist, 1);
4064                         ceph_pagelist_encode_32(pagelist, struct_len);
4065                 }
4066                 ceph_pagelist_encode_string(pagelist, path, pathlen);
4067                 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
4068                 ceph_locks_to_pagelist(flocks, pagelist,
4069                                        num_fcntl_locks, num_flock_locks);
4070                 if (struct_v >= 2)
4071                         ceph_pagelist_encode_64(pagelist, snap_follows);
4072 out_freeflocks:
4073                 kfree(flocks);
4074         } else {
4075                 err = ceph_pagelist_reserve(pagelist,
4076                                             sizeof(u64) + sizeof(u32) +
4077                                             pathlen + sizeof(rec.v1));
4078                 if (err)
4079                         goto out_err;
4080
4081                 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
4082                 ceph_pagelist_encode_string(pagelist, path, pathlen);
4083                 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
4084         }
4085
4086 out_err:
4087         ceph_mdsc_free_path(path, pathlen);
4088         if (!err)
4089                 recon_state->nr_caps++;
4090         return err;
4091 }
4092
4093 static int encode_snap_realms(struct ceph_mds_client *mdsc,
4094                               struct ceph_reconnect_state *recon_state)
4095 {
4096         struct rb_node *p;
4097         struct ceph_pagelist *pagelist = recon_state->pagelist;
4098         int err = 0;
4099
4100         if (recon_state->msg_version >= 4) {
4101                 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
4102                 if (err < 0)
4103                         goto fail;
4104         }
4105
4106         /*
4107          * snaprealms.  we provide mds with the ino, seq (version), and
4108          * parent for all of our realms.  If the mds has any newer info,
4109          * it will tell us.
4110          */
4111         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
4112                 struct ceph_snap_realm *realm =
4113                        rb_entry(p, struct ceph_snap_realm, node);
4114                 struct ceph_mds_snaprealm_reconnect sr_rec;
4115
4116                 if (recon_state->msg_version >= 4) {
4117                         size_t need = sizeof(u8) * 2 + sizeof(u32) +
4118                                       sizeof(sr_rec);
4119
4120                         if (pagelist->length + need > RECONNECT_MAX_SIZE) {
4121                                 err = send_reconnect_partial(recon_state);
4122                                 if (err)
4123                                         goto fail;
4124                                 pagelist = recon_state->pagelist;
4125                         }
4126
4127                         err = ceph_pagelist_reserve(pagelist, need);
4128                         if (err)
4129                                 goto fail;
4130
4131                         ceph_pagelist_encode_8(pagelist, 1);
4132                         ceph_pagelist_encode_8(pagelist, 1);
4133                         ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
4134                 }
4135
4136                 dout(" adding snap realm %llx seq %lld parent %llx\n",
4137                      realm->ino, realm->seq, realm->parent_ino);
4138                 sr_rec.ino = cpu_to_le64(realm->ino);
4139                 sr_rec.seq = cpu_to_le64(realm->seq);
4140                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
4141
4142                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
4143                 if (err)
4144                         goto fail;
4145
4146                 recon_state->nr_realms++;
4147         }
4148 fail:
4149         return err;
4150 }
4151
4152
4153 /*
4154  * If an MDS fails and recovers, clients need to reconnect in order to
4155  * reestablish shared state.  This includes all caps issued through
4156  * this session _and_ the snap_realm hierarchy.  Because it's not
4157  * clear which snap realms the mds cares about, we send everything we
4158  * know about.. that ensures we'll then get any new info the
4159  * recovering MDS might have.
4160  *
4161  * This is a relatively heavyweight operation, but it's rare.
4162  */
4163 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
4164                                struct ceph_mds_session *session)
4165 {
4166         struct ceph_msg *reply;
4167         int mds = session->s_mds;
4168         int err = -ENOMEM;
4169         struct ceph_reconnect_state recon_state = {
4170                 .session = session,
4171         };
4172         LIST_HEAD(dispose);
4173
4174         pr_info("mds%d reconnect start\n", mds);
4175
4176         recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
4177         if (!recon_state.pagelist)
4178                 goto fail_nopagelist;
4179
4180         reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
4181         if (!reply)
4182                 goto fail_nomsg;
4183
4184         xa_destroy(&session->s_delegated_inos);
4185
4186         mutex_lock(&session->s_mutex);
4187         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
4188         session->s_seq = 0;
4189
4190         dout("session %p state %s\n", session,
4191              ceph_session_state_name(session->s_state));
4192
4193         atomic_inc(&session->s_cap_gen);
4194
4195         spin_lock(&session->s_cap_lock);
4196         /* don't know if session is readonly */
4197         session->s_readonly = 0;
4198         /*
4199          * notify __ceph_remove_cap() that we are composing cap reconnect.
4200          * If a cap get released before being added to the cap reconnect,
4201          * __ceph_remove_cap() should skip queuing cap release.
4202          */
4203         session->s_cap_reconnect = 1;
4204         /* drop old cap expires; we're about to reestablish that state */
4205         detach_cap_releases(session, &dispose);
4206         spin_unlock(&session->s_cap_lock);
4207         dispose_cap_releases(mdsc, &dispose);
4208
4209         /* trim unused caps to reduce MDS's cache rejoin time */
4210         if (mdsc->fsc->sb->s_root)
4211                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
4212
4213         ceph_con_close(&session->s_con);
4214         ceph_con_open(&session->s_con,
4215                       CEPH_ENTITY_TYPE_MDS, mds,
4216                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
4217
4218         /* replay unsafe requests */
4219         replay_unsafe_requests(mdsc, session);
4220
4221         ceph_early_kick_flushing_caps(mdsc, session);
4222
4223         down_read(&mdsc->snap_rwsem);
4224
4225         /* placeholder for nr_caps */
4226         err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
4227         if (err)
4228                 goto fail;
4229
4230         if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
4231                 recon_state.msg_version = 3;
4232                 recon_state.allow_multi = true;
4233         } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
4234                 recon_state.msg_version = 3;
4235         } else {
4236                 recon_state.msg_version = 2;
4237         }
4238         /* trsaverse this session's caps */
4239         err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
4240
4241         spin_lock(&session->s_cap_lock);
4242         session->s_cap_reconnect = 0;
4243         spin_unlock(&session->s_cap_lock);
4244
4245         if (err < 0)
4246                 goto fail;
4247
4248         /* check if all realms can be encoded into current message */
4249         if (mdsc->num_snap_realms) {
4250                 size_t total_len =
4251                         recon_state.pagelist->length +
4252                         mdsc->num_snap_realms *
4253                         sizeof(struct ceph_mds_snaprealm_reconnect);
4254                 if (recon_state.msg_version >= 4) {
4255                         /* number of realms */
4256                         total_len += sizeof(u32);
4257                         /* version, compat_version and struct_len */
4258                         total_len += mdsc->num_snap_realms *
4259                                      (2 * sizeof(u8) + sizeof(u32));
4260                 }
4261                 if (total_len > RECONNECT_MAX_SIZE) {
4262                         if (!recon_state.allow_multi) {
4263                                 err = -ENOSPC;
4264                                 goto fail;
4265                         }
4266                         if (recon_state.nr_caps) {
4267                                 err = send_reconnect_partial(&recon_state);
4268                                 if (err)
4269                                         goto fail;
4270                         }
4271                         recon_state.msg_version = 5;
4272                 }
4273         }
4274
4275         err = encode_snap_realms(mdsc, &recon_state);
4276         if (err < 0)
4277                 goto fail;
4278
4279         if (recon_state.msg_version >= 5) {
4280                 err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
4281                 if (err < 0)
4282                         goto fail;
4283         }
4284
4285         if (recon_state.nr_caps || recon_state.nr_realms) {
4286                 struct page *page =
4287                         list_first_entry(&recon_state.pagelist->head,
4288                                         struct page, lru);
4289                 __le32 *addr = kmap_atomic(page);
4290                 if (recon_state.nr_caps) {
4291                         WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
4292                         *addr = cpu_to_le32(recon_state.nr_caps);
4293                 } else if (recon_state.msg_version >= 4) {
4294                         *(addr + 1) = cpu_to_le32(recon_state.nr_realms);
4295                 }
4296                 kunmap_atomic(addr);
4297         }
4298
4299         reply->hdr.version = cpu_to_le16(recon_state.msg_version);
4300         if (recon_state.msg_version >= 4)
4301                 reply->hdr.compat_version = cpu_to_le16(4);
4302
4303         reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
4304         ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
4305
4306         ceph_con_send(&session->s_con, reply);
4307
4308         mutex_unlock(&session->s_mutex);
4309
4310         mutex_lock(&mdsc->mutex);
4311         __wake_requests(mdsc, &session->s_waiting);
4312         mutex_unlock(&mdsc->mutex);
4313
4314         up_read(&mdsc->snap_rwsem);
4315         ceph_pagelist_release(recon_state.pagelist);
4316         return;
4317
4318 fail:
4319         ceph_msg_put(reply);
4320         up_read(&mdsc->snap_rwsem);
4321         mutex_unlock(&session->s_mutex);
4322 fail_nomsg:
4323         ceph_pagelist_release(recon_state.pagelist);
4324 fail_nopagelist:
4325         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
4326         return;
4327 }
4328
4329
4330 /*
4331  * compare old and new mdsmaps, kicking requests
4332  * and closing out old connections as necessary
4333  *
4334  * called under mdsc->mutex.
4335  */
4336 static void check_new_map(struct ceph_mds_client *mdsc,
4337                           struct ceph_mdsmap *newmap,
4338                           struct ceph_mdsmap *oldmap)
4339 {
4340         int i, j, err;
4341         int oldstate, newstate;
4342         struct ceph_mds_session *s;
4343         unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
4344
4345         dout("check_new_map new %u old %u\n",
4346              newmap->m_epoch, oldmap->m_epoch);
4347
4348         if (newmap->m_info) {
4349                 for (i = 0; i < newmap->possible_max_rank; i++) {
4350                         for (j = 0; j < newmap->m_info[i].num_export_targets; j++)
4351                                 set_bit(newmap->m_info[i].export_targets[j], targets);
4352                 }
4353         }
4354
4355         for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4356                 if (!mdsc->sessions[i])
4357                         continue;
4358                 s = mdsc->sessions[i];
4359                 oldstate = ceph_mdsmap_get_state(oldmap, i);
4360                 newstate = ceph_mdsmap_get_state(newmap, i);
4361
4362                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
4363                      i, ceph_mds_state_name(oldstate),
4364                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
4365                      ceph_mds_state_name(newstate),
4366                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
4367                      ceph_session_state_name(s->s_state));
4368
4369                 if (i >= newmap->possible_max_rank) {
4370                         /* force close session for stopped mds */
4371                         ceph_get_mds_session(s);
4372                         __unregister_session(mdsc, s);
4373                         __wake_requests(mdsc, &s->s_waiting);
4374                         mutex_unlock(&mdsc->mutex);
4375
4376                         mutex_lock(&s->s_mutex);
4377                         cleanup_session_requests(mdsc, s);
4378                         remove_session_caps(s);
4379                         mutex_unlock(&s->s_mutex);
4380
4381                         ceph_put_mds_session(s);
4382
4383                         mutex_lock(&mdsc->mutex);
4384                         kick_requests(mdsc, i);
4385                         continue;
4386                 }
4387
4388                 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
4389                            ceph_mdsmap_get_addr(newmap, i),
4390                            sizeof(struct ceph_entity_addr))) {
4391                         /* just close it */
4392                         mutex_unlock(&mdsc->mutex);
4393                         mutex_lock(&s->s_mutex);
4394                         mutex_lock(&mdsc->mutex);
4395                         ceph_con_close(&s->s_con);
4396                         mutex_unlock(&s->s_mutex);
4397                         s->s_state = CEPH_MDS_SESSION_RESTARTING;
4398                 } else if (oldstate == newstate) {
4399                         continue;  /* nothing new with this mds */
4400                 }
4401
4402                 /*
4403                  * send reconnect?
4404                  */
4405                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
4406                     newstate >= CEPH_MDS_STATE_RECONNECT) {
4407                         mutex_unlock(&mdsc->mutex);
4408                         clear_bit(i, targets);
4409                         send_mds_reconnect(mdsc, s);
4410                         mutex_lock(&mdsc->mutex);
4411                 }
4412
4413                 /*
4414                  * kick request on any mds that has gone active.
4415                  */
4416                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
4417                     newstate >= CEPH_MDS_STATE_ACTIVE) {
4418                         if (oldstate != CEPH_MDS_STATE_CREATING &&
4419                             oldstate != CEPH_MDS_STATE_STARTING)
4420                                 pr_info("mds%d recovery completed\n", s->s_mds);
4421                         kick_requests(mdsc, i);
4422                         mutex_unlock(&mdsc->mutex);
4423                         mutex_lock(&s->s_mutex);
4424                         mutex_lock(&mdsc->mutex);
4425                         ceph_kick_flushing_caps(mdsc, s);
4426                         mutex_unlock(&s->s_mutex);
4427                         wake_up_session_caps(s, RECONNECT);
4428                 }
4429         }
4430
4431         /*
4432          * Only open and reconnect sessions that don't exist yet.
4433          */
4434         for (i = 0; i < newmap->possible_max_rank; i++) {
4435                 /*
4436                  * In case the import MDS is crashed just after
4437                  * the EImportStart journal is flushed, so when
4438                  * a standby MDS takes over it and is replaying
4439                  * the EImportStart journal the new MDS daemon
4440                  * will wait the client to reconnect it, but the
4441                  * client may never register/open the session yet.
4442                  *
4443                  * Will try to reconnect that MDS daemon if the
4444                  * rank number is in the export targets array and
4445                  * is the up:reconnect state.
4446                  */
4447                 newstate = ceph_mdsmap_get_state(newmap, i);
4448                 if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
4449                         continue;
4450
4451                 /*
4452                  * The session maybe registered and opened by some
4453                  * requests which were choosing random MDSes during
4454                  * the mdsc->mutex's unlock/lock gap below in rare
4455                  * case. But the related MDS daemon will just queue
4456                  * that requests and be still waiting for the client's
4457                  * reconnection request in up:reconnect state.
4458                  */
4459                 s = __ceph_lookup_mds_session(mdsc, i);
4460                 if (likely(!s)) {
4461                         s = __open_export_target_session(mdsc, i);
4462                         if (IS_ERR(s)) {
4463                                 err = PTR_ERR(s);
4464                                 pr_err("failed to open export target session, err %d\n",
4465                                        err);
4466                                 continue;
4467                         }
4468                 }
4469                 dout("send reconnect to export target mds.%d\n", i);
4470                 mutex_unlock(&mdsc->mutex);
4471                 send_mds_reconnect(mdsc, s);
4472                 ceph_put_mds_session(s);
4473                 mutex_lock(&mdsc->mutex);
4474         }
4475
4476         for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4477                 s = mdsc->sessions[i];
4478                 if (!s)
4479                         continue;
4480                 if (!ceph_mdsmap_is_laggy(newmap, i))
4481                         continue;
4482                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4483                     s->s_state == CEPH_MDS_SESSION_HUNG ||
4484                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
4485                         dout(" connecting to export targets of laggy mds%d\n",
4486                              i);
4487                         __open_export_target_sessions(mdsc, s);
4488                 }
4489         }
4490 }
4491
4492
4493
4494 /*
4495  * leases
4496  */
4497
4498 /*
4499  * caller must hold session s_mutex, dentry->d_lock
4500  */
4501 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
4502 {
4503         struct ceph_dentry_info *di = ceph_dentry(dentry);
4504
4505         ceph_put_mds_session(di->lease_session);
4506         di->lease_session = NULL;
4507 }
4508
4509 static void handle_lease(struct ceph_mds_client *mdsc,
4510                          struct ceph_mds_session *session,
4511                          struct ceph_msg *msg)
4512 {
4513         struct super_block *sb = mdsc->fsc->sb;
4514         struct inode *inode;
4515         struct dentry *parent, *dentry;
4516         struct ceph_dentry_info *di;
4517         int mds = session->s_mds;
4518         struct ceph_mds_lease *h = msg->front.iov_base;
4519         u32 seq;
4520         struct ceph_vino vino;
4521         struct qstr dname;
4522         int release = 0;
4523
4524         dout("handle_lease from mds%d\n", mds);
4525
4526         /* decode */
4527         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
4528                 goto bad;
4529         vino.ino = le64_to_cpu(h->ino);
4530         vino.snap = CEPH_NOSNAP;
4531         seq = le32_to_cpu(h->seq);
4532         dname.len = get_unaligned_le32(h + 1);
4533         if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
4534                 goto bad;
4535         dname.name = (void *)(h + 1) + sizeof(u32);
4536
4537         /* lookup inode */
4538         inode = ceph_find_inode(sb, vino);
4539         dout("handle_lease %s, ino %llx %p %.*s\n",
4540              ceph_lease_op_name(h->action), vino.ino, inode,
4541              dname.len, dname.name);
4542
4543         mutex_lock(&session->s_mutex);
4544         inc_session_sequence(session);
4545
4546         if (!inode) {
4547                 dout("handle_lease no inode %llx\n", vino.ino);
4548                 goto release;
4549         }
4550
4551         /* dentry */
4552         parent = d_find_alias(inode);
4553         if (!parent) {
4554                 dout("no parent dentry on inode %p\n", inode);
4555                 WARN_ON(1);
4556                 goto release;  /* hrm... */
4557         }
4558         dname.hash = full_name_hash(parent, dname.name, dname.len);
4559         dentry = d_lookup(parent, &dname);
4560         dput(parent);
4561         if (!dentry)
4562                 goto release;
4563
4564         spin_lock(&dentry->d_lock);
4565         di = ceph_dentry(dentry);
4566         switch (h->action) {
4567         case CEPH_MDS_LEASE_REVOKE:
4568                 if (di->lease_session == session) {
4569                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
4570                                 h->seq = cpu_to_le32(di->lease_seq);
4571                         __ceph_mdsc_drop_dentry_lease(dentry);
4572                 }
4573                 release = 1;
4574                 break;
4575
4576         case CEPH_MDS_LEASE_RENEW:
4577                 if (di->lease_session == session &&
4578                     di->lease_gen == atomic_read(&session->s_cap_gen) &&
4579                     di->lease_renew_from &&
4580                     di->lease_renew_after == 0) {
4581                         unsigned long duration =
4582                                 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
4583
4584                         di->lease_seq = seq;
4585                         di->time = di->lease_renew_from + duration;
4586                         di->lease_renew_after = di->lease_renew_from +
4587                                 (duration >> 1);
4588                         di->lease_renew_from = 0;
4589                 }
4590                 break;
4591         }
4592         spin_unlock(&dentry->d_lock);
4593         dput(dentry);
4594
4595         if (!release)
4596                 goto out;
4597
4598 release:
4599         /* let's just reuse the same message */
4600         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
4601         ceph_msg_get(msg);
4602         ceph_con_send(&session->s_con, msg);
4603
4604 out:
4605         mutex_unlock(&session->s_mutex);
4606         iput(inode);
4607         return;
4608
4609 bad:
4610         pr_err("corrupt lease message\n");
4611         ceph_msg_dump(msg);
4612 }
4613
4614 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
4615                               struct dentry *dentry, char action,
4616                               u32 seq)
4617 {
4618         struct ceph_msg *msg;
4619         struct ceph_mds_lease *lease;
4620         struct inode *dir;
4621         int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
4622
4623         dout("lease_send_msg identry %p %s to mds%d\n",
4624              dentry, ceph_lease_op_name(action), session->s_mds);
4625
4626         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
4627         if (!msg)
4628                 return;
4629         lease = msg->front.iov_base;
4630         lease->action = action;
4631         lease->seq = cpu_to_le32(seq);
4632
4633         spin_lock(&dentry->d_lock);
4634         dir = d_inode(dentry->d_parent);
4635         lease->ino = cpu_to_le64(ceph_ino(dir));
4636         lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
4637
4638         put_unaligned_le32(dentry->d_name.len, lease + 1);
4639         memcpy((void *)(lease + 1) + 4,
4640                dentry->d_name.name, dentry->d_name.len);
4641         spin_unlock(&dentry->d_lock);
4642
4643         ceph_con_send(&session->s_con, msg);
4644 }
4645
4646 /*
4647  * lock unlock the session, to wait ongoing session activities
4648  */
4649 static void lock_unlock_session(struct ceph_mds_session *s)
4650 {
4651         mutex_lock(&s->s_mutex);
4652         mutex_unlock(&s->s_mutex);
4653 }
4654
4655 static void maybe_recover_session(struct ceph_mds_client *mdsc)
4656 {
4657         struct ceph_fs_client *fsc = mdsc->fsc;
4658
4659         if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
4660                 return;
4661
4662         if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
4663                 return;
4664
4665         if (!READ_ONCE(fsc->blocklisted))
4666                 return;
4667
4668         pr_info("auto reconnect after blocklisted\n");
4669         ceph_force_reconnect(fsc->sb);
4670 }
4671
4672 bool check_session_state(struct ceph_mds_session *s)
4673 {
4674         switch (s->s_state) {
4675         case CEPH_MDS_SESSION_OPEN:
4676                 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
4677                         s->s_state = CEPH_MDS_SESSION_HUNG;
4678                         pr_info("mds%d hung\n", s->s_mds);
4679                 }
4680                 break;
4681         case CEPH_MDS_SESSION_CLOSING:
4682         case CEPH_MDS_SESSION_NEW:
4683         case CEPH_MDS_SESSION_RESTARTING:
4684         case CEPH_MDS_SESSION_CLOSED:
4685         case CEPH_MDS_SESSION_REJECTED:
4686                 return false;
4687         }
4688
4689         return true;
4690 }
4691
4692 /*
4693  * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
4694  * then we need to retransmit that request.
4695  */
4696 void inc_session_sequence(struct ceph_mds_session *s)
4697 {
4698         lockdep_assert_held(&s->s_mutex);
4699
4700         s->s_seq++;
4701
4702         if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
4703                 int ret;
4704
4705                 dout("resending session close request for mds%d\n", s->s_mds);
4706                 ret = request_close_session(s);
4707                 if (ret < 0)
4708                         pr_err("unable to close session to mds%d: %d\n",
4709                                s->s_mds, ret);
4710         }
4711 }
4712
4713 /*
4714  * delayed work -- periodically trim expired leases, renew caps with mds.  If
4715  * the @delay parameter is set to 0 or if it's more than 5 secs, the default
4716  * workqueue delay value of 5 secs will be used.
4717  */
4718 static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
4719 {
4720         unsigned long max_delay = HZ * 5;
4721
4722         /* 5 secs default delay */
4723         if (!delay || (delay > max_delay))
4724                 delay = max_delay;
4725         schedule_delayed_work(&mdsc->delayed_work,
4726                               round_jiffies_relative(delay));
4727 }
4728
4729 static void delayed_work(struct work_struct *work)
4730 {
4731         struct ceph_mds_client *mdsc =
4732                 container_of(work, struct ceph_mds_client, delayed_work.work);
4733         unsigned long delay;
4734         int renew_interval;
4735         int renew_caps;
4736         int i;
4737
4738         dout("mdsc delayed_work\n");
4739
4740         if (mdsc->stopping)
4741                 return;
4742
4743         mutex_lock(&mdsc->mutex);
4744         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
4745         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
4746                                    mdsc->last_renew_caps);
4747         if (renew_caps)
4748                 mdsc->last_renew_caps = jiffies;
4749
4750         for (i = 0; i < mdsc->max_sessions; i++) {
4751                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4752                 if (!s)
4753                         continue;
4754
4755                 if (!check_session_state(s)) {
4756                         ceph_put_mds_session(s);
4757                         continue;
4758                 }
4759                 mutex_unlock(&mdsc->mutex);
4760
4761                 mutex_lock(&s->s_mutex);
4762                 if (renew_caps)
4763                         send_renew_caps(mdsc, s);
4764                 else
4765                         ceph_con_keepalive(&s->s_con);
4766                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4767                     s->s_state == CEPH_MDS_SESSION_HUNG)
4768                         ceph_send_cap_releases(mdsc, s);
4769                 mutex_unlock(&s->s_mutex);
4770                 ceph_put_mds_session(s);
4771
4772                 mutex_lock(&mdsc->mutex);
4773         }
4774         mutex_unlock(&mdsc->mutex);
4775
4776         delay = ceph_check_delayed_caps(mdsc);
4777
4778         ceph_queue_cap_reclaim_work(mdsc);
4779
4780         ceph_trim_snapid_map(mdsc);
4781
4782         maybe_recover_session(mdsc);
4783
4784         schedule_delayed(mdsc, delay);
4785 }
4786
4787 int ceph_mdsc_init(struct ceph_fs_client *fsc)
4788
4789 {
4790         struct ceph_mds_client *mdsc;
4791         int err;
4792
4793         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
4794         if (!mdsc)
4795                 return -ENOMEM;
4796         mdsc->fsc = fsc;
4797         mutex_init(&mdsc->mutex);
4798         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
4799         if (!mdsc->mdsmap) {
4800                 err = -ENOMEM;
4801                 goto err_mdsc;
4802         }
4803
4804         init_completion(&mdsc->safe_umount_waiters);
4805         init_waitqueue_head(&mdsc->session_close_wq);
4806         INIT_LIST_HEAD(&mdsc->waiting_for_map);
4807         mdsc->quotarealms_inodes = RB_ROOT;
4808         mutex_init(&mdsc->quotarealms_inodes_mutex);
4809         init_rwsem(&mdsc->snap_rwsem);
4810         mdsc->snap_realms = RB_ROOT;
4811         INIT_LIST_HEAD(&mdsc->snap_empty);
4812         spin_lock_init(&mdsc->snap_empty_lock);
4813         mdsc->request_tree = RB_ROOT;
4814         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
4815         mdsc->last_renew_caps = jiffies;
4816         INIT_LIST_HEAD(&mdsc->cap_delay_list);
4817         INIT_LIST_HEAD(&mdsc->cap_wait_list);
4818         spin_lock_init(&mdsc->cap_delay_lock);
4819         INIT_LIST_HEAD(&mdsc->snap_flush_list);
4820         spin_lock_init(&mdsc->snap_flush_lock);
4821         mdsc->last_cap_flush_tid = 1;
4822         INIT_LIST_HEAD(&mdsc->cap_flush_list);
4823         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
4824         spin_lock_init(&mdsc->cap_dirty_lock);
4825         init_waitqueue_head(&mdsc->cap_flushing_wq);
4826         INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
4827         err = ceph_metric_init(&mdsc->metric);
4828         if (err)
4829                 goto err_mdsmap;
4830
4831         spin_lock_init(&mdsc->dentry_list_lock);
4832         INIT_LIST_HEAD(&mdsc->dentry_leases);
4833         INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
4834
4835         ceph_caps_init(mdsc);
4836         ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
4837
4838         spin_lock_init(&mdsc->snapid_map_lock);
4839         mdsc->snapid_map_tree = RB_ROOT;
4840         INIT_LIST_HEAD(&mdsc->snapid_map_lru);
4841
4842         init_rwsem(&mdsc->pool_perm_rwsem);
4843         mdsc->pool_perm_tree = RB_ROOT;
4844
4845         strscpy(mdsc->nodename, utsname()->nodename,
4846                 sizeof(mdsc->nodename));
4847
4848         fsc->mdsc = mdsc;
4849         return 0;
4850
4851 err_mdsmap:
4852         kfree(mdsc->mdsmap);
4853 err_mdsc:
4854         kfree(mdsc);
4855         return err;
4856 }
4857
4858 /*
4859  * Wait for safe replies on open mds requests.  If we time out, drop
4860  * all requests from the tree to avoid dangling dentry refs.
4861  */
4862 static void wait_requests(struct ceph_mds_client *mdsc)
4863 {
4864         struct ceph_options *opts = mdsc->fsc->client->options;
4865         struct ceph_mds_request *req;
4866
4867         mutex_lock(&mdsc->mutex);
4868         if (__get_oldest_req(mdsc)) {
4869                 mutex_unlock(&mdsc->mutex);
4870
4871                 dout("wait_requests waiting for requests\n");
4872                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
4873                                     ceph_timeout_jiffies(opts->mount_timeout));
4874
4875                 /* tear down remaining requests */
4876                 mutex_lock(&mdsc->mutex);
4877                 while ((req = __get_oldest_req(mdsc))) {
4878                         dout("wait_requests timed out on tid %llu\n",
4879                              req->r_tid);
4880                         list_del_init(&req->r_wait);
4881                         __unregister_request(mdsc, req);
4882                 }
4883         }
4884         mutex_unlock(&mdsc->mutex);
4885         dout("wait_requests done\n");
4886 }
4887
4888 void send_flush_mdlog(struct ceph_mds_session *s)
4889 {
4890         struct ceph_msg *msg;
4891
4892         /*
4893          * Pre-luminous MDS crashes when it sees an unknown session request
4894          */
4895         if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS))
4896                 return;
4897
4898         mutex_lock(&s->s_mutex);
4899         dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds,
4900              ceph_session_state_name(s->s_state), s->s_seq);
4901         msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
4902                                       s->s_seq);
4903         if (!msg) {
4904                 pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n",
4905                        s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
4906         } else {
4907                 ceph_con_send(&s->s_con, msg);
4908         }
4909         mutex_unlock(&s->s_mutex);
4910 }
4911
4912 /*
4913  * called before mount is ro, and before dentries are torn down.
4914  * (hmm, does this still race with new lookups?)
4915  */
4916 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
4917 {
4918         dout("pre_umount\n");
4919         mdsc->stopping = 1;
4920
4921         ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
4922         ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
4923         ceph_flush_dirty_caps(mdsc);
4924         wait_requests(mdsc);
4925
4926         /*
4927          * wait for reply handlers to drop their request refs and
4928          * their inode/dcache refs
4929          */
4930         ceph_msgr_flush();
4931
4932         ceph_cleanup_quotarealms_inodes(mdsc);
4933 }
4934
4935 /*
4936  * flush the mdlog and wait for all write mds requests to flush.
4937  */
4938 static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
4939                                                  u64 want_tid)
4940 {
4941         struct ceph_mds_request *req = NULL, *nextreq;
4942         struct ceph_mds_session *last_session = NULL;
4943         struct rb_node *n;
4944
4945         mutex_lock(&mdsc->mutex);
4946         dout("%s want %lld\n", __func__, want_tid);
4947 restart:
4948         req = __get_oldest_req(mdsc);
4949         while (req && req->r_tid <= want_tid) {
4950                 /* find next request */
4951                 n = rb_next(&req->r_node);
4952                 if (n)
4953                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
4954                 else
4955                         nextreq = NULL;
4956                 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
4957                     (req->r_op & CEPH_MDS_OP_WRITE)) {
4958                         struct ceph_mds_session *s = req->r_session;
4959
4960                         if (!s) {
4961                                 req = nextreq;
4962                                 continue;
4963                         }
4964
4965                         /* write op */
4966                         ceph_mdsc_get_request(req);
4967                         if (nextreq)
4968                                 ceph_mdsc_get_request(nextreq);
4969                         s = ceph_get_mds_session(s);
4970                         mutex_unlock(&mdsc->mutex);
4971
4972                         /* send flush mdlog request to MDS */
4973                         if (last_session != s) {
4974                                 send_flush_mdlog(s);
4975                                 ceph_put_mds_session(last_session);
4976                                 last_session = s;
4977                         } else {
4978                                 ceph_put_mds_session(s);
4979                         }
4980                         dout("%s wait on %llu (want %llu)\n", __func__,
4981                              req->r_tid, want_tid);
4982                         wait_for_completion(&req->r_safe_completion);
4983
4984                         mutex_lock(&mdsc->mutex);
4985                         ceph_mdsc_put_request(req);
4986                         if (!nextreq)
4987                                 break;  /* next dne before, so we're done! */
4988                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
4989                                 /* next request was removed from tree */
4990                                 ceph_mdsc_put_request(nextreq);
4991                                 goto restart;
4992                         }
4993                         ceph_mdsc_put_request(nextreq);  /* won't go away */
4994                 }
4995                 req = nextreq;
4996         }
4997         mutex_unlock(&mdsc->mutex);
4998         ceph_put_mds_session(last_session);
4999         dout("%s done\n", __func__);
5000 }
5001
5002 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
5003 {
5004         u64 want_tid, want_flush;
5005
5006         if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
5007                 return;
5008
5009         dout("sync\n");
5010         mutex_lock(&mdsc->mutex);
5011         want_tid = mdsc->last_tid;
5012         mutex_unlock(&mdsc->mutex);
5013
5014         ceph_flush_dirty_caps(mdsc);
5015         spin_lock(&mdsc->cap_dirty_lock);
5016         want_flush = mdsc->last_cap_flush_tid;
5017         if (!list_empty(&mdsc->cap_flush_list)) {
5018                 struct ceph_cap_flush *cf =
5019                         list_last_entry(&mdsc->cap_flush_list,
5020                                         struct ceph_cap_flush, g_list);
5021                 cf->wake = true;
5022         }
5023         spin_unlock(&mdsc->cap_dirty_lock);
5024
5025         dout("sync want tid %lld flush_seq %lld\n",
5026              want_tid, want_flush);
5027
5028         flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
5029         wait_caps_flush(mdsc, want_flush);
5030 }
5031
5032 /*
5033  * true if all sessions are closed, or we force unmount
5034  */
5035 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
5036 {
5037         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
5038                 return true;
5039         return atomic_read(&mdsc->num_sessions) <= skipped;
5040 }
5041
5042 /*
5043  * called after sb is ro or when metadata corrupted.
5044  */
5045 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
5046 {
5047         struct ceph_options *opts = mdsc->fsc->client->options;
5048         struct ceph_mds_session *session;
5049         int i;
5050         int skipped = 0;
5051
5052         dout("close_sessions\n");
5053
5054         /* close sessions */
5055         mutex_lock(&mdsc->mutex);
5056         for (i = 0; i < mdsc->max_sessions; i++) {
5057                 session = __ceph_lookup_mds_session(mdsc, i);
5058                 if (!session)
5059                         continue;
5060                 mutex_unlock(&mdsc->mutex);
5061                 mutex_lock(&session->s_mutex);
5062                 if (__close_session(mdsc, session) <= 0)
5063                         skipped++;
5064                 mutex_unlock(&session->s_mutex);
5065                 ceph_put_mds_session(session);
5066                 mutex_lock(&mdsc->mutex);
5067         }
5068         mutex_unlock(&mdsc->mutex);
5069
5070         dout("waiting for sessions to close\n");
5071         wait_event_timeout(mdsc->session_close_wq,
5072                            done_closing_sessions(mdsc, skipped),
5073                            ceph_timeout_jiffies(opts->mount_timeout));
5074
5075         /* tear down remaining sessions */
5076         mutex_lock(&mdsc->mutex);
5077         for (i = 0; i < mdsc->max_sessions; i++) {
5078                 if (mdsc->sessions[i]) {
5079                         session = ceph_get_mds_session(mdsc->sessions[i]);
5080                         __unregister_session(mdsc, session);
5081                         mutex_unlock(&mdsc->mutex);
5082                         mutex_lock(&session->s_mutex);
5083                         remove_session_caps(session);
5084                         mutex_unlock(&session->s_mutex);
5085                         ceph_put_mds_session(session);
5086                         mutex_lock(&mdsc->mutex);
5087                 }
5088         }
5089         WARN_ON(!list_empty(&mdsc->cap_delay_list));
5090         mutex_unlock(&mdsc->mutex);
5091
5092         ceph_cleanup_snapid_map(mdsc);
5093         ceph_cleanup_global_and_empty_realms(mdsc);
5094
5095         cancel_work_sync(&mdsc->cap_reclaim_work);
5096         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
5097
5098         dout("stopped\n");
5099 }
5100
5101 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
5102 {
5103         struct ceph_mds_session *session;
5104         int mds;
5105
5106         dout("force umount\n");
5107
5108         mutex_lock(&mdsc->mutex);
5109         for (mds = 0; mds < mdsc->max_sessions; mds++) {
5110                 session = __ceph_lookup_mds_session(mdsc, mds);
5111                 if (!session)
5112                         continue;
5113
5114                 if (session->s_state == CEPH_MDS_SESSION_REJECTED)
5115                         __unregister_session(mdsc, session);
5116                 __wake_requests(mdsc, &session->s_waiting);
5117                 mutex_unlock(&mdsc->mutex);
5118
5119                 mutex_lock(&session->s_mutex);
5120                 __close_session(mdsc, session);
5121                 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
5122                         cleanup_session_requests(mdsc, session);
5123                         remove_session_caps(session);
5124                 }
5125                 mutex_unlock(&session->s_mutex);
5126                 ceph_put_mds_session(session);
5127
5128                 mutex_lock(&mdsc->mutex);
5129                 kick_requests(mdsc, mds);
5130         }
5131         __wake_requests(mdsc, &mdsc->waiting_for_map);
5132         mutex_unlock(&mdsc->mutex);
5133 }
5134
5135 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
5136 {
5137         dout("stop\n");
5138         /*
5139          * Make sure the delayed work stopped before releasing
5140          * the resources.
5141          *
5142          * Because the cancel_delayed_work_sync() will only
5143          * guarantee that the work finishes executing. But the
5144          * delayed work will re-arm itself again after that.
5145          */
5146         flush_delayed_work(&mdsc->delayed_work);
5147
5148         if (mdsc->mdsmap)
5149                 ceph_mdsmap_destroy(mdsc->mdsmap);
5150         kfree(mdsc->sessions);
5151         ceph_caps_finalize(mdsc);
5152         ceph_pool_perm_destroy(mdsc);
5153 }
5154
5155 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
5156 {
5157         struct ceph_mds_client *mdsc = fsc->mdsc;
5158         dout("mdsc_destroy %p\n", mdsc);
5159
5160         if (!mdsc)
5161                 return;
5162
5163         /* flush out any connection work with references to us */
5164         ceph_msgr_flush();
5165
5166         ceph_mdsc_stop(mdsc);
5167
5168         ceph_metric_destroy(&mdsc->metric);
5169
5170         fsc->mdsc = NULL;
5171         kfree(mdsc);
5172         dout("mdsc_destroy %p done\n", mdsc);
5173 }
5174
5175 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5176 {
5177         struct ceph_fs_client *fsc = mdsc->fsc;
5178         const char *mds_namespace = fsc->mount_options->mds_namespace;
5179         void *p = msg->front.iov_base;
5180         void *end = p + msg->front.iov_len;
5181         u32 epoch;
5182         u32 num_fs;
5183         u32 mount_fscid = (u32)-1;
5184         int err = -EINVAL;
5185
5186         ceph_decode_need(&p, end, sizeof(u32), bad);
5187         epoch = ceph_decode_32(&p);
5188
5189         dout("handle_fsmap epoch %u\n", epoch);
5190
5191         /* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
5192         ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
5193
5194         ceph_decode_32_safe(&p, end, num_fs, bad);
5195         while (num_fs-- > 0) {
5196                 void *info_p, *info_end;
5197                 u32 info_len;
5198                 u32 fscid, namelen;
5199
5200                 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
5201                 p += 2;         // info_v, info_cv
5202                 info_len = ceph_decode_32(&p);
5203                 ceph_decode_need(&p, end, info_len, bad);
5204                 info_p = p;
5205                 info_end = p + info_len;
5206                 p = info_end;
5207
5208                 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
5209                 fscid = ceph_decode_32(&info_p);
5210                 namelen = ceph_decode_32(&info_p);
5211                 ceph_decode_need(&info_p, info_end, namelen, bad);
5212
5213                 if (mds_namespace &&
5214                     strlen(mds_namespace) == namelen &&
5215                     !strncmp(mds_namespace, (char *)info_p, namelen)) {
5216                         mount_fscid = fscid;
5217                         break;
5218                 }
5219         }
5220
5221         ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
5222         if (mount_fscid != (u32)-1) {
5223                 fsc->client->monc.fs_cluster_id = mount_fscid;
5224                 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
5225                                    0, true);
5226                 ceph_monc_renew_subs(&fsc->client->monc);
5227         } else {
5228                 err = -ENOENT;
5229                 goto err_out;
5230         }
5231         return;
5232
5233 bad:
5234         pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
5235         ceph_umount_begin(mdsc->fsc->sb);
5236 err_out:
5237         mutex_lock(&mdsc->mutex);
5238         mdsc->mdsmap_err = err;
5239         __wake_requests(mdsc, &mdsc->waiting_for_map);
5240         mutex_unlock(&mdsc->mutex);
5241 }
5242
5243 /*
5244  * handle mds map update.
5245  */
5246 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5247 {
5248         u32 epoch;
5249         u32 maplen;
5250         void *p = msg->front.iov_base;
5251         void *end = p + msg->front.iov_len;
5252         struct ceph_mdsmap *newmap, *oldmap;
5253         struct ceph_fsid fsid;
5254         int err = -EINVAL;
5255
5256         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
5257         ceph_decode_copy(&p, &fsid, sizeof(fsid));
5258         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
5259                 return;
5260         epoch = ceph_decode_32(&p);
5261         maplen = ceph_decode_32(&p);
5262         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
5263
5264         /* do we need it? */
5265         mutex_lock(&mdsc->mutex);
5266         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
5267                 dout("handle_map epoch %u <= our %u\n",
5268                      epoch, mdsc->mdsmap->m_epoch);
5269                 mutex_unlock(&mdsc->mutex);
5270                 return;
5271         }
5272
5273         newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
5274         if (IS_ERR(newmap)) {
5275                 err = PTR_ERR(newmap);
5276                 goto bad_unlock;
5277         }
5278
5279         /* swap into place */
5280         if (mdsc->mdsmap) {
5281                 oldmap = mdsc->mdsmap;
5282                 mdsc->mdsmap = newmap;
5283                 check_new_map(mdsc, newmap, oldmap);
5284                 ceph_mdsmap_destroy(oldmap);
5285         } else {
5286                 mdsc->mdsmap = newmap;  /* first mds map */
5287         }
5288         mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
5289                                         MAX_LFS_FILESIZE);
5290
5291         __wake_requests(mdsc, &mdsc->waiting_for_map);
5292         ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
5293                           mdsc->mdsmap->m_epoch);
5294
5295         mutex_unlock(&mdsc->mutex);
5296         schedule_delayed(mdsc, 0);
5297         return;
5298
5299 bad_unlock:
5300         mutex_unlock(&mdsc->mutex);
5301 bad:
5302         pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
5303         ceph_umount_begin(mdsc->fsc->sb);
5304         return;
5305 }
5306
5307 static struct ceph_connection *mds_get_con(struct ceph_connection *con)
5308 {
5309         struct ceph_mds_session *s = con->private;
5310
5311         if (ceph_get_mds_session(s))
5312                 return con;
5313         return NULL;
5314 }
5315
5316 static void mds_put_con(struct ceph_connection *con)
5317 {
5318         struct ceph_mds_session *s = con->private;
5319
5320         ceph_put_mds_session(s);
5321 }
5322
5323 /*
5324  * if the client is unresponsive for long enough, the mds will kill
5325  * the session entirely.
5326  */
5327 static void mds_peer_reset(struct ceph_connection *con)
5328 {
5329         struct ceph_mds_session *s = con->private;
5330         struct ceph_mds_client *mdsc = s->s_mdsc;
5331
5332         pr_warn("mds%d closed our session\n", s->s_mds);
5333         if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
5334                 send_mds_reconnect(mdsc, s);
5335 }
5336
5337 static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5338 {
5339         struct ceph_mds_session *s = con->private;
5340         struct ceph_mds_client *mdsc = s->s_mdsc;
5341         int type = le16_to_cpu(msg->hdr.type);
5342
5343         mutex_lock(&mdsc->mutex);
5344         if (__verify_registered_session(mdsc, s) < 0) {
5345                 mutex_unlock(&mdsc->mutex);
5346                 goto out;
5347         }
5348         mutex_unlock(&mdsc->mutex);
5349
5350         switch (type) {
5351         case CEPH_MSG_MDS_MAP:
5352                 ceph_mdsc_handle_mdsmap(mdsc, msg);
5353                 break;
5354         case CEPH_MSG_FS_MAP_USER:
5355                 ceph_mdsc_handle_fsmap(mdsc, msg);
5356                 break;
5357         case CEPH_MSG_CLIENT_SESSION:
5358                 handle_session(s, msg);
5359                 break;
5360         case CEPH_MSG_CLIENT_REPLY:
5361                 handle_reply(s, msg);
5362                 break;
5363         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
5364                 handle_forward(mdsc, s, msg);
5365                 break;
5366         case CEPH_MSG_CLIENT_CAPS:
5367                 ceph_handle_caps(s, msg);
5368                 break;
5369         case CEPH_MSG_CLIENT_SNAP:
5370                 ceph_handle_snap(mdsc, s, msg);
5371                 break;
5372         case CEPH_MSG_CLIENT_LEASE:
5373                 handle_lease(mdsc, s, msg);
5374                 break;
5375         case CEPH_MSG_CLIENT_QUOTA:
5376                 ceph_handle_quota(mdsc, s, msg);
5377                 break;
5378
5379         default:
5380                 pr_err("received unknown message type %d %s\n", type,
5381                        ceph_msg_type_name(type));
5382         }
5383 out:
5384         ceph_msg_put(msg);
5385 }
5386
5387 /*
5388  * authentication
5389  */
5390
5391 /*
5392  * Note: returned pointer is the address of a structure that's
5393  * managed separately.  Caller must *not* attempt to free it.
5394  */
5395 static struct ceph_auth_handshake *
5396 mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
5397 {
5398         struct ceph_mds_session *s = con->private;
5399         struct ceph_mds_client *mdsc = s->s_mdsc;
5400         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5401         struct ceph_auth_handshake *auth = &s->s_auth;
5402         int ret;
5403
5404         ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5405                                          force_new, proto, NULL, NULL);
5406         if (ret)
5407                 return ERR_PTR(ret);
5408
5409         return auth;
5410 }
5411
5412 static int mds_add_authorizer_challenge(struct ceph_connection *con,
5413                                     void *challenge_buf, int challenge_buf_len)
5414 {
5415         struct ceph_mds_session *s = con->private;
5416         struct ceph_mds_client *mdsc = s->s_mdsc;
5417         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5418
5419         return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
5420                                             challenge_buf, challenge_buf_len);
5421 }
5422
5423 static int mds_verify_authorizer_reply(struct ceph_connection *con)
5424 {
5425         struct ceph_mds_session *s = con->private;
5426         struct ceph_mds_client *mdsc = s->s_mdsc;
5427         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5428         struct ceph_auth_handshake *auth = &s->s_auth;
5429
5430         return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
5431                 auth->authorizer_reply_buf, auth->authorizer_reply_buf_len,
5432                 NULL, NULL, NULL, NULL);
5433 }
5434
5435 static int mds_invalidate_authorizer(struct ceph_connection *con)
5436 {
5437         struct ceph_mds_session *s = con->private;
5438         struct ceph_mds_client *mdsc = s->s_mdsc;
5439         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5440
5441         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
5442
5443         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
5444 }
5445
5446 static int mds_get_auth_request(struct ceph_connection *con,
5447                                 void *buf, int *buf_len,
5448                                 void **authorizer, int *authorizer_len)
5449 {
5450         struct ceph_mds_session *s = con->private;
5451         struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5452         struct ceph_auth_handshake *auth = &s->s_auth;
5453         int ret;
5454
5455         ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5456                                        buf, buf_len);
5457         if (ret)
5458                 return ret;
5459
5460         *authorizer = auth->authorizer_buf;
5461         *authorizer_len = auth->authorizer_buf_len;
5462         return 0;
5463 }
5464
5465 static int mds_handle_auth_reply_more(struct ceph_connection *con,
5466                                       void *reply, int reply_len,
5467                                       void *buf, int *buf_len,
5468                                       void **authorizer, int *authorizer_len)
5469 {
5470         struct ceph_mds_session *s = con->private;
5471         struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5472         struct ceph_auth_handshake *auth = &s->s_auth;
5473         int ret;
5474
5475         ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len,
5476                                               buf, buf_len);
5477         if (ret)
5478                 return ret;
5479
5480         *authorizer = auth->authorizer_buf;
5481         *authorizer_len = auth->authorizer_buf_len;
5482         return 0;
5483 }
5484
5485 static int mds_handle_auth_done(struct ceph_connection *con,
5486                                 u64 global_id, void *reply, int reply_len,
5487                                 u8 *session_key, int *session_key_len,
5488                                 u8 *con_secret, int *con_secret_len)
5489 {
5490         struct ceph_mds_session *s = con->private;
5491         struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5492         struct ceph_auth_handshake *auth = &s->s_auth;
5493
5494         return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len,
5495                                                session_key, session_key_len,
5496                                                con_secret, con_secret_len);
5497 }
5498
5499 static int mds_handle_auth_bad_method(struct ceph_connection *con,
5500                                       int used_proto, int result,
5501                                       const int *allowed_protos, int proto_cnt,
5502                                       const int *allowed_modes, int mode_cnt)
5503 {
5504         struct ceph_mds_session *s = con->private;
5505         struct ceph_mon_client *monc = &s->s_mdsc->fsc->client->monc;
5506         int ret;
5507
5508         if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_MDS,
5509                                             used_proto, result,
5510                                             allowed_protos, proto_cnt,
5511                                             allowed_modes, mode_cnt)) {
5512                 ret = ceph_monc_validate_auth(monc);
5513                 if (ret)
5514                         return ret;
5515         }
5516
5517         return -EACCES;
5518 }
5519
5520 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
5521                                 struct ceph_msg_header *hdr, int *skip)
5522 {
5523         struct ceph_msg *msg;
5524         int type = (int) le16_to_cpu(hdr->type);
5525         int front_len = (int) le32_to_cpu(hdr->front_len);
5526
5527         if (con->in_msg)
5528                 return con->in_msg;
5529
5530         *skip = 0;
5531         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
5532         if (!msg) {
5533                 pr_err("unable to allocate msg type %d len %d\n",
5534                        type, front_len);
5535                 return NULL;
5536         }
5537
5538         return msg;
5539 }
5540
5541 static int mds_sign_message(struct ceph_msg *msg)
5542 {
5543        struct ceph_mds_session *s = msg->con->private;
5544        struct ceph_auth_handshake *auth = &s->s_auth;
5545
5546        return ceph_auth_sign_message(auth, msg);
5547 }
5548
5549 static int mds_check_message_signature(struct ceph_msg *msg)
5550 {
5551        struct ceph_mds_session *s = msg->con->private;
5552        struct ceph_auth_handshake *auth = &s->s_auth;
5553
5554        return ceph_auth_check_message_signature(auth, msg);
5555 }
5556
5557 static const struct ceph_connection_operations mds_con_ops = {
5558         .get = mds_get_con,
5559         .put = mds_put_con,
5560         .alloc_msg = mds_alloc_msg,
5561         .dispatch = mds_dispatch,
5562         .peer_reset = mds_peer_reset,
5563         .get_authorizer = mds_get_authorizer,
5564         .add_authorizer_challenge = mds_add_authorizer_challenge,
5565         .verify_authorizer_reply = mds_verify_authorizer_reply,
5566         .invalidate_authorizer = mds_invalidate_authorizer,
5567         .sign_message = mds_sign_message,
5568         .check_message_signature = mds_check_message_signature,
5569         .get_auth_request = mds_get_auth_request,
5570         .handle_auth_reply_more = mds_handle_auth_reply_more,
5571         .handle_auth_done = mds_handle_auth_done,
5572         .handle_auth_bad_method = mds_handle_auth_bad_method,
5573 };
5574
5575 /* eof */