Merge tag 'iomap-5.17' of git://git.infradead.org/users/willy/linux
[platform/kernel/linux-rpi.git] / fs / ceph / mds_client.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/fs.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
7 #include <linux/gfp.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
12 #include <linux/bits.h>
13 #include <linux/ktime.h>
14 #include <linux/bitmap.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18
19 #include <linux/ceph/ceph_features.h>
20 #include <linux/ceph/messenger.h>
21 #include <linux/ceph/decode.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/auth.h>
24 #include <linux/ceph/debugfs.h>
25
26 #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
27
28 /*
29  * A cluster of MDS (metadata server) daemons is responsible for
30  * managing the file system namespace (the directory hierarchy and
31  * inodes) and for coordinating shared access to storage.  Metadata is
32  * partitioning hierarchically across a number of servers, and that
33  * partition varies over time as the cluster adjusts the distribution
34  * in order to balance load.
35  *
36  * The MDS client is primarily responsible to managing synchronous
37  * metadata requests for operations like open, unlink, and so forth.
38  * If there is a MDS failure, we find out about it when we (possibly
39  * request and) receive a new MDS map, and can resubmit affected
40  * requests.
41  *
42  * For the most part, though, we take advantage of a lossless
43  * communications channel to the MDS, and do not need to worry about
44  * timing out or resubmitting requests.
45  *
46  * We maintain a stateful "session" with each MDS we interact with.
47  * Within each session, we sent periodic heartbeat messages to ensure
48  * any capabilities or leases we have been issues remain valid.  If
49  * the session times out and goes stale, our leases and capabilities
50  * are no longer valid.
51  */
52
53 struct ceph_reconnect_state {
54         struct ceph_mds_session *session;
55         int nr_caps, nr_realms;
56         struct ceph_pagelist *pagelist;
57         unsigned msg_version;
58         bool allow_multi;
59 };
60
61 static void __wake_requests(struct ceph_mds_client *mdsc,
62                             struct list_head *head);
63 static void ceph_cap_release_work(struct work_struct *work);
64 static void ceph_cap_reclaim_work(struct work_struct *work);
65
66 static const struct ceph_connection_operations mds_con_ops;
67
68
69 /*
70  * mds reply parsing
71  */
72
73 static int parse_reply_info_quota(void **p, void *end,
74                                   struct ceph_mds_reply_info_in *info)
75 {
76         u8 struct_v, struct_compat;
77         u32 struct_len;
78
79         ceph_decode_8_safe(p, end, struct_v, bad);
80         ceph_decode_8_safe(p, end, struct_compat, bad);
81         /* struct_v is expected to be >= 1. we only
82          * understand encoding with struct_compat == 1. */
83         if (!struct_v || struct_compat != 1)
84                 goto bad;
85         ceph_decode_32_safe(p, end, struct_len, bad);
86         ceph_decode_need(p, end, struct_len, bad);
87         end = *p + struct_len;
88         ceph_decode_64_safe(p, end, info->max_bytes, bad);
89         ceph_decode_64_safe(p, end, info->max_files, bad);
90         *p = end;
91         return 0;
92 bad:
93         return -EIO;
94 }
95
96 /*
97  * parse individual inode info
98  */
99 static int parse_reply_info_in(void **p, void *end,
100                                struct ceph_mds_reply_info_in *info,
101                                u64 features)
102 {
103         int err = 0;
104         u8 struct_v = 0;
105
106         if (features == (u64)-1) {
107                 u32 struct_len;
108                 u8 struct_compat;
109                 ceph_decode_8_safe(p, end, struct_v, bad);
110                 ceph_decode_8_safe(p, end, struct_compat, bad);
111                 /* struct_v is expected to be >= 1. we only understand
112                  * encoding with struct_compat == 1. */
113                 if (!struct_v || struct_compat != 1)
114                         goto bad;
115                 ceph_decode_32_safe(p, end, struct_len, bad);
116                 ceph_decode_need(p, end, struct_len, bad);
117                 end = *p + struct_len;
118         }
119
120         ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
121         info->in = *p;
122         *p += sizeof(struct ceph_mds_reply_inode) +
123                 sizeof(*info->in->fragtree.splits) *
124                 le32_to_cpu(info->in->fragtree.nsplits);
125
126         ceph_decode_32_safe(p, end, info->symlink_len, bad);
127         ceph_decode_need(p, end, info->symlink_len, bad);
128         info->symlink = *p;
129         *p += info->symlink_len;
130
131         ceph_decode_copy_safe(p, end, &info->dir_layout,
132                               sizeof(info->dir_layout), bad);
133         ceph_decode_32_safe(p, end, info->xattr_len, bad);
134         ceph_decode_need(p, end, info->xattr_len, bad);
135         info->xattr_data = *p;
136         *p += info->xattr_len;
137
138         if (features == (u64)-1) {
139                 /* inline data */
140                 ceph_decode_64_safe(p, end, info->inline_version, bad);
141                 ceph_decode_32_safe(p, end, info->inline_len, bad);
142                 ceph_decode_need(p, end, info->inline_len, bad);
143                 info->inline_data = *p;
144                 *p += info->inline_len;
145                 /* quota */
146                 err = parse_reply_info_quota(p, end, info);
147                 if (err < 0)
148                         goto out_bad;
149                 /* pool namespace */
150                 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
151                 if (info->pool_ns_len > 0) {
152                         ceph_decode_need(p, end, info->pool_ns_len, bad);
153                         info->pool_ns_data = *p;
154                         *p += info->pool_ns_len;
155                 }
156
157                 /* btime */
158                 ceph_decode_need(p, end, sizeof(info->btime), bad);
159                 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
160
161                 /* change attribute */
162                 ceph_decode_64_safe(p, end, info->change_attr, bad);
163
164                 /* dir pin */
165                 if (struct_v >= 2) {
166                         ceph_decode_32_safe(p, end, info->dir_pin, bad);
167                 } else {
168                         info->dir_pin = -ENODATA;
169                 }
170
171                 /* snapshot birth time, remains zero for v<=2 */
172                 if (struct_v >= 3) {
173                         ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
174                         ceph_decode_copy(p, &info->snap_btime,
175                                          sizeof(info->snap_btime));
176                 } else {
177                         memset(&info->snap_btime, 0, sizeof(info->snap_btime));
178                 }
179
180                 /* snapshot count, remains zero for v<=3 */
181                 if (struct_v >= 4) {
182                         ceph_decode_64_safe(p, end, info->rsnaps, bad);
183                 } else {
184                         info->rsnaps = 0;
185                 }
186
187                 *p = end;
188         } else {
189                 if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
190                         ceph_decode_64_safe(p, end, info->inline_version, bad);
191                         ceph_decode_32_safe(p, end, info->inline_len, bad);
192                         ceph_decode_need(p, end, info->inline_len, bad);
193                         info->inline_data = *p;
194                         *p += info->inline_len;
195                 } else
196                         info->inline_version = CEPH_INLINE_NONE;
197
198                 if (features & CEPH_FEATURE_MDS_QUOTA) {
199                         err = parse_reply_info_quota(p, end, info);
200                         if (err < 0)
201                                 goto out_bad;
202                 } else {
203                         info->max_bytes = 0;
204                         info->max_files = 0;
205                 }
206
207                 info->pool_ns_len = 0;
208                 info->pool_ns_data = NULL;
209                 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
210                         ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
211                         if (info->pool_ns_len > 0) {
212                                 ceph_decode_need(p, end, info->pool_ns_len, bad);
213                                 info->pool_ns_data = *p;
214                                 *p += info->pool_ns_len;
215                         }
216                 }
217
218                 if (features & CEPH_FEATURE_FS_BTIME) {
219                         ceph_decode_need(p, end, sizeof(info->btime), bad);
220                         ceph_decode_copy(p, &info->btime, sizeof(info->btime));
221                         ceph_decode_64_safe(p, end, info->change_attr, bad);
222                 }
223
224                 info->dir_pin = -ENODATA;
225                 /* info->snap_btime and info->rsnaps remain zero */
226         }
227         return 0;
228 bad:
229         err = -EIO;
230 out_bad:
231         return err;
232 }
233
234 static int parse_reply_info_dir(void **p, void *end,
235                                 struct ceph_mds_reply_dirfrag **dirfrag,
236                                 u64 features)
237 {
238         if (features == (u64)-1) {
239                 u8 struct_v, struct_compat;
240                 u32 struct_len;
241                 ceph_decode_8_safe(p, end, struct_v, bad);
242                 ceph_decode_8_safe(p, end, struct_compat, bad);
243                 /* struct_v is expected to be >= 1. we only understand
244                  * encoding whose struct_compat == 1. */
245                 if (!struct_v || struct_compat != 1)
246                         goto bad;
247                 ceph_decode_32_safe(p, end, struct_len, bad);
248                 ceph_decode_need(p, end, struct_len, bad);
249                 end = *p + struct_len;
250         }
251
252         ceph_decode_need(p, end, sizeof(**dirfrag), bad);
253         *dirfrag = *p;
254         *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
255         if (unlikely(*p > end))
256                 goto bad;
257         if (features == (u64)-1)
258                 *p = end;
259         return 0;
260 bad:
261         return -EIO;
262 }
263
264 static int parse_reply_info_lease(void **p, void *end,
265                                   struct ceph_mds_reply_lease **lease,
266                                   u64 features)
267 {
268         if (features == (u64)-1) {
269                 u8 struct_v, struct_compat;
270                 u32 struct_len;
271                 ceph_decode_8_safe(p, end, struct_v, bad);
272                 ceph_decode_8_safe(p, end, struct_compat, bad);
273                 /* struct_v is expected to be >= 1. we only understand
274                  * encoding whose struct_compat == 1. */
275                 if (!struct_v || struct_compat != 1)
276                         goto bad;
277                 ceph_decode_32_safe(p, end, struct_len, bad);
278                 ceph_decode_need(p, end, struct_len, bad);
279                 end = *p + struct_len;
280         }
281
282         ceph_decode_need(p, end, sizeof(**lease), bad);
283         *lease = *p;
284         *p += sizeof(**lease);
285         if (features == (u64)-1)
286                 *p = end;
287         return 0;
288 bad:
289         return -EIO;
290 }
291
292 /*
293  * parse a normal reply, which may contain a (dir+)dentry and/or a
294  * target inode.
295  */
296 static int parse_reply_info_trace(void **p, void *end,
297                                   struct ceph_mds_reply_info_parsed *info,
298                                   u64 features)
299 {
300         int err;
301
302         if (info->head->is_dentry) {
303                 err = parse_reply_info_in(p, end, &info->diri, features);
304                 if (err < 0)
305                         goto out_bad;
306
307                 err = parse_reply_info_dir(p, end, &info->dirfrag, features);
308                 if (err < 0)
309                         goto out_bad;
310
311                 ceph_decode_32_safe(p, end, info->dname_len, bad);
312                 ceph_decode_need(p, end, info->dname_len, bad);
313                 info->dname = *p;
314                 *p += info->dname_len;
315
316                 err = parse_reply_info_lease(p, end, &info->dlease, features);
317                 if (err < 0)
318                         goto out_bad;
319         }
320
321         if (info->head->is_target) {
322                 err = parse_reply_info_in(p, end, &info->targeti, features);
323                 if (err < 0)
324                         goto out_bad;
325         }
326
327         if (unlikely(*p != end))
328                 goto bad;
329         return 0;
330
331 bad:
332         err = -EIO;
333 out_bad:
334         pr_err("problem parsing mds trace %d\n", err);
335         return err;
336 }
337
338 /*
339  * parse readdir results
340  */
341 static int parse_reply_info_readdir(void **p, void *end,
342                                 struct ceph_mds_reply_info_parsed *info,
343                                 u64 features)
344 {
345         u32 num, i = 0;
346         int err;
347
348         err = parse_reply_info_dir(p, end, &info->dir_dir, features);
349         if (err < 0)
350                 goto out_bad;
351
352         ceph_decode_need(p, end, sizeof(num) + 2, bad);
353         num = ceph_decode_32(p);
354         {
355                 u16 flags = ceph_decode_16(p);
356                 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
357                 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
358                 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
359                 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
360         }
361         if (num == 0)
362                 goto done;
363
364         BUG_ON(!info->dir_entries);
365         if ((unsigned long)(info->dir_entries + num) >
366             (unsigned long)info->dir_entries + info->dir_buf_size) {
367                 pr_err("dir contents are larger than expected\n");
368                 WARN_ON(1);
369                 goto bad;
370         }
371
372         info->dir_nr = num;
373         while (num) {
374                 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
375                 /* dentry */
376                 ceph_decode_32_safe(p, end, rde->name_len, bad);
377                 ceph_decode_need(p, end, rde->name_len, bad);
378                 rde->name = *p;
379                 *p += rde->name_len;
380                 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
381
382                 /* dentry lease */
383                 err = parse_reply_info_lease(p, end, &rde->lease, features);
384                 if (err)
385                         goto out_bad;
386                 /* inode */
387                 err = parse_reply_info_in(p, end, &rde->inode, features);
388                 if (err < 0)
389                         goto out_bad;
390                 /* ceph_readdir_prepopulate() will update it */
391                 rde->offset = 0;
392                 i++;
393                 num--;
394         }
395
396 done:
397         /* Skip over any unrecognized fields */
398         *p = end;
399         return 0;
400
401 bad:
402         err = -EIO;
403 out_bad:
404         pr_err("problem parsing dir contents %d\n", err);
405         return err;
406 }
407
408 /*
409  * parse fcntl F_GETLK results
410  */
411 static int parse_reply_info_filelock(void **p, void *end,
412                                      struct ceph_mds_reply_info_parsed *info,
413                                      u64 features)
414 {
415         if (*p + sizeof(*info->filelock_reply) > end)
416                 goto bad;
417
418         info->filelock_reply = *p;
419
420         /* Skip over any unrecognized fields */
421         *p = end;
422         return 0;
423 bad:
424         return -EIO;
425 }
426
427
428 #if BITS_PER_LONG == 64
429
430 #define DELEGATED_INO_AVAILABLE         xa_mk_value(1)
431
432 static int ceph_parse_deleg_inos(void **p, void *end,
433                                  struct ceph_mds_session *s)
434 {
435         u32 sets;
436
437         ceph_decode_32_safe(p, end, sets, bad);
438         dout("got %u sets of delegated inodes\n", sets);
439         while (sets--) {
440                 u64 start, len, ino;
441
442                 ceph_decode_64_safe(p, end, start, bad);
443                 ceph_decode_64_safe(p, end, len, bad);
444
445                 /* Don't accept a delegation of system inodes */
446                 if (start < CEPH_INO_SYSTEM_BASE) {
447                         pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
448                                         start, len);
449                         continue;
450                 }
451                 while (len--) {
452                         int err = xa_insert(&s->s_delegated_inos, ino = start++,
453                                             DELEGATED_INO_AVAILABLE,
454                                             GFP_KERNEL);
455                         if (!err) {
456                                 dout("added delegated inode 0x%llx\n",
457                                      start - 1);
458                         } else if (err == -EBUSY) {
459                                 pr_warn("ceph: MDS delegated inode 0x%llx more than once.\n",
460                                         start - 1);
461                         } else {
462                                 return err;
463                         }
464                 }
465         }
466         return 0;
467 bad:
468         return -EIO;
469 }
470
471 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
472 {
473         unsigned long ino;
474         void *val;
475
476         xa_for_each(&s->s_delegated_inos, ino, val) {
477                 val = xa_erase(&s->s_delegated_inos, ino);
478                 if (val == DELEGATED_INO_AVAILABLE)
479                         return ino;
480         }
481         return 0;
482 }
483
484 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
485 {
486         return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE,
487                          GFP_KERNEL);
488 }
489 #else /* BITS_PER_LONG == 64 */
490 /*
491  * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
492  * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
493  * and bottom words?
494  */
495 static int ceph_parse_deleg_inos(void **p, void *end,
496                                  struct ceph_mds_session *s)
497 {
498         u32 sets;
499
500         ceph_decode_32_safe(p, end, sets, bad);
501         if (sets)
502                 ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
503         return 0;
504 bad:
505         return -EIO;
506 }
507
508 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
509 {
510         return 0;
511 }
512
513 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
514 {
515         return 0;
516 }
517 #endif /* BITS_PER_LONG == 64 */
518
519 /*
520  * parse create results
521  */
522 static int parse_reply_info_create(void **p, void *end,
523                                   struct ceph_mds_reply_info_parsed *info,
524                                   u64 features, struct ceph_mds_session *s)
525 {
526         int ret;
527
528         if (features == (u64)-1 ||
529             (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
530                 if (*p == end) {
531                         /* Malformed reply? */
532                         info->has_create_ino = false;
533                 } else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) {
534                         info->has_create_ino = true;
535                         /* struct_v, struct_compat, and len */
536                         ceph_decode_skip_n(p, end, 2 + sizeof(u32), bad);
537                         ceph_decode_64_safe(p, end, info->ino, bad);
538                         ret = ceph_parse_deleg_inos(p, end, s);
539                         if (ret)
540                                 return ret;
541                 } else {
542                         /* legacy */
543                         ceph_decode_64_safe(p, end, info->ino, bad);
544                         info->has_create_ino = true;
545                 }
546         } else {
547                 if (*p != end)
548                         goto bad;
549         }
550
551         /* Skip over any unrecognized fields */
552         *p = end;
553         return 0;
554 bad:
555         return -EIO;
556 }
557
558 /*
559  * parse extra results
560  */
561 static int parse_reply_info_extra(void **p, void *end,
562                                   struct ceph_mds_reply_info_parsed *info,
563                                   u64 features, struct ceph_mds_session *s)
564 {
565         u32 op = le32_to_cpu(info->head->op);
566
567         if (op == CEPH_MDS_OP_GETFILELOCK)
568                 return parse_reply_info_filelock(p, end, info, features);
569         else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
570                 return parse_reply_info_readdir(p, end, info, features);
571         else if (op == CEPH_MDS_OP_CREATE)
572                 return parse_reply_info_create(p, end, info, features, s);
573         else
574                 return -EIO;
575 }
576
577 /*
578  * parse entire mds reply
579  */
580 static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
581                             struct ceph_mds_reply_info_parsed *info,
582                             u64 features)
583 {
584         void *p, *end;
585         u32 len;
586         int err;
587
588         info->head = msg->front.iov_base;
589         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
590         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
591
592         /* trace */
593         ceph_decode_32_safe(&p, end, len, bad);
594         if (len > 0) {
595                 ceph_decode_need(&p, end, len, bad);
596                 err = parse_reply_info_trace(&p, p+len, info, features);
597                 if (err < 0)
598                         goto out_bad;
599         }
600
601         /* extra */
602         ceph_decode_32_safe(&p, end, len, bad);
603         if (len > 0) {
604                 ceph_decode_need(&p, end, len, bad);
605                 err = parse_reply_info_extra(&p, p+len, info, features, s);
606                 if (err < 0)
607                         goto out_bad;
608         }
609
610         /* snap blob */
611         ceph_decode_32_safe(&p, end, len, bad);
612         info->snapblob_len = len;
613         info->snapblob = p;
614         p += len;
615
616         if (p != end)
617                 goto bad;
618         return 0;
619
620 bad:
621         err = -EIO;
622 out_bad:
623         pr_err("mds parse_reply err %d\n", err);
624         return err;
625 }
626
627 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
628 {
629         if (!info->dir_entries)
630                 return;
631         free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
632 }
633
634
635 /*
636  * sessions
637  */
638 const char *ceph_session_state_name(int s)
639 {
640         switch (s) {
641         case CEPH_MDS_SESSION_NEW: return "new";
642         case CEPH_MDS_SESSION_OPENING: return "opening";
643         case CEPH_MDS_SESSION_OPEN: return "open";
644         case CEPH_MDS_SESSION_HUNG: return "hung";
645         case CEPH_MDS_SESSION_CLOSING: return "closing";
646         case CEPH_MDS_SESSION_CLOSED: return "closed";
647         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
648         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
649         case CEPH_MDS_SESSION_REJECTED: return "rejected";
650         default: return "???";
651         }
652 }
653
654 struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
655 {
656         if (refcount_inc_not_zero(&s->s_ref))
657                 return s;
658         return NULL;
659 }
660
661 void ceph_put_mds_session(struct ceph_mds_session *s)
662 {
663         if (IS_ERR_OR_NULL(s))
664                 return;
665
666         if (refcount_dec_and_test(&s->s_ref)) {
667                 if (s->s_auth.authorizer)
668                         ceph_auth_destroy_authorizer(s->s_auth.authorizer);
669                 WARN_ON(mutex_is_locked(&s->s_mutex));
670                 xa_destroy(&s->s_delegated_inos);
671                 kfree(s);
672         }
673 }
674
675 /*
676  * called under mdsc->mutex
677  */
678 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
679                                                    int mds)
680 {
681         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
682                 return NULL;
683         return ceph_get_mds_session(mdsc->sessions[mds]);
684 }
685
686 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
687 {
688         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
689                 return false;
690         else
691                 return true;
692 }
693
694 static int __verify_registered_session(struct ceph_mds_client *mdsc,
695                                        struct ceph_mds_session *s)
696 {
697         if (s->s_mds >= mdsc->max_sessions ||
698             mdsc->sessions[s->s_mds] != s)
699                 return -ENOENT;
700         return 0;
701 }
702
703 /*
704  * create+register a new session for given mds.
705  * called under mdsc->mutex.
706  */
707 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
708                                                  int mds)
709 {
710         struct ceph_mds_session *s;
711
712         if (mds >= mdsc->mdsmap->possible_max_rank)
713                 return ERR_PTR(-EINVAL);
714
715         s = kzalloc(sizeof(*s), GFP_NOFS);
716         if (!s)
717                 return ERR_PTR(-ENOMEM);
718
719         if (mds >= mdsc->max_sessions) {
720                 int newmax = 1 << get_count_order(mds + 1);
721                 struct ceph_mds_session **sa;
722
723                 dout("%s: realloc to %d\n", __func__, newmax);
724                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
725                 if (!sa)
726                         goto fail_realloc;
727                 if (mdsc->sessions) {
728                         memcpy(sa, mdsc->sessions,
729                                mdsc->max_sessions * sizeof(void *));
730                         kfree(mdsc->sessions);
731                 }
732                 mdsc->sessions = sa;
733                 mdsc->max_sessions = newmax;
734         }
735
736         dout("%s: mds%d\n", __func__, mds);
737         s->s_mdsc = mdsc;
738         s->s_mds = mds;
739         s->s_state = CEPH_MDS_SESSION_NEW;
740         mutex_init(&s->s_mutex);
741
742         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
743
744         atomic_set(&s->s_cap_gen, 1);
745         s->s_cap_ttl = jiffies - 1;
746
747         spin_lock_init(&s->s_cap_lock);
748         INIT_LIST_HEAD(&s->s_caps);
749         refcount_set(&s->s_ref, 1);
750         INIT_LIST_HEAD(&s->s_waiting);
751         INIT_LIST_HEAD(&s->s_unsafe);
752         xa_init(&s->s_delegated_inos);
753         INIT_LIST_HEAD(&s->s_cap_releases);
754         INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
755
756         INIT_LIST_HEAD(&s->s_cap_dirty);
757         INIT_LIST_HEAD(&s->s_cap_flushing);
758
759         mdsc->sessions[mds] = s;
760         atomic_inc(&mdsc->num_sessions);
761         refcount_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
762
763         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
764                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
765
766         return s;
767
768 fail_realloc:
769         kfree(s);
770         return ERR_PTR(-ENOMEM);
771 }
772
773 /*
774  * called under mdsc->mutex
775  */
776 static void __unregister_session(struct ceph_mds_client *mdsc,
777                                struct ceph_mds_session *s)
778 {
779         dout("__unregister_session mds%d %p\n", s->s_mds, s);
780         BUG_ON(mdsc->sessions[s->s_mds] != s);
781         mdsc->sessions[s->s_mds] = NULL;
782         ceph_con_close(&s->s_con);
783         ceph_put_mds_session(s);
784         atomic_dec(&mdsc->num_sessions);
785 }
786
787 /*
788  * drop session refs in request.
789  *
790  * should be last request ref, or hold mdsc->mutex
791  */
792 static void put_request_session(struct ceph_mds_request *req)
793 {
794         if (req->r_session) {
795                 ceph_put_mds_session(req->r_session);
796                 req->r_session = NULL;
797         }
798 }
799
800 void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
801                                 void (*cb)(struct ceph_mds_session *),
802                                 bool check_state)
803 {
804         int mds;
805
806         mutex_lock(&mdsc->mutex);
807         for (mds = 0; mds < mdsc->max_sessions; ++mds) {
808                 struct ceph_mds_session *s;
809
810                 s = __ceph_lookup_mds_session(mdsc, mds);
811                 if (!s)
812                         continue;
813
814                 if (check_state && !check_session_state(s)) {
815                         ceph_put_mds_session(s);
816                         continue;
817                 }
818
819                 mutex_unlock(&mdsc->mutex);
820                 cb(s);
821                 ceph_put_mds_session(s);
822                 mutex_lock(&mdsc->mutex);
823         }
824         mutex_unlock(&mdsc->mutex);
825 }
826
827 void ceph_mdsc_release_request(struct kref *kref)
828 {
829         struct ceph_mds_request *req = container_of(kref,
830                                                     struct ceph_mds_request,
831                                                     r_kref);
832         ceph_mdsc_release_dir_caps_no_check(req);
833         destroy_reply_info(&req->r_reply_info);
834         if (req->r_request)
835                 ceph_msg_put(req->r_request);
836         if (req->r_reply)
837                 ceph_msg_put(req->r_reply);
838         if (req->r_inode) {
839                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
840                 iput(req->r_inode);
841         }
842         if (req->r_parent) {
843                 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
844                 iput(req->r_parent);
845         }
846         iput(req->r_target_inode);
847         if (req->r_dentry)
848                 dput(req->r_dentry);
849         if (req->r_old_dentry)
850                 dput(req->r_old_dentry);
851         if (req->r_old_dentry_dir) {
852                 /*
853                  * track (and drop pins for) r_old_dentry_dir
854                  * separately, since r_old_dentry's d_parent may have
855                  * changed between the dir mutex being dropped and
856                  * this request being freed.
857                  */
858                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
859                                   CEPH_CAP_PIN);
860                 iput(req->r_old_dentry_dir);
861         }
862         kfree(req->r_path1);
863         kfree(req->r_path2);
864         put_cred(req->r_cred);
865         if (req->r_pagelist)
866                 ceph_pagelist_release(req->r_pagelist);
867         put_request_session(req);
868         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
869         WARN_ON_ONCE(!list_empty(&req->r_wait));
870         kmem_cache_free(ceph_mds_request_cachep, req);
871 }
872
873 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
874
875 /*
876  * lookup session, bump ref if found.
877  *
878  * called under mdsc->mutex.
879  */
880 static struct ceph_mds_request *
881 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
882 {
883         struct ceph_mds_request *req;
884
885         req = lookup_request(&mdsc->request_tree, tid);
886         if (req)
887                 ceph_mdsc_get_request(req);
888
889         return req;
890 }
891
892 /*
893  * Register an in-flight request, and assign a tid.  Link to directory
894  * are modifying (if any).
895  *
896  * Called under mdsc->mutex.
897  */
898 static void __register_request(struct ceph_mds_client *mdsc,
899                                struct ceph_mds_request *req,
900                                struct inode *dir)
901 {
902         int ret = 0;
903
904         req->r_tid = ++mdsc->last_tid;
905         if (req->r_num_caps) {
906                 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
907                                         req->r_num_caps);
908                 if (ret < 0) {
909                         pr_err("__register_request %p "
910                                "failed to reserve caps: %d\n", req, ret);
911                         /* set req->r_err to fail early from __do_request */
912                         req->r_err = ret;
913                         return;
914                 }
915         }
916         dout("__register_request %p tid %lld\n", req, req->r_tid);
917         ceph_mdsc_get_request(req);
918         insert_request(&mdsc->request_tree, req);
919
920         req->r_cred = get_current_cred();
921
922         if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
923                 mdsc->oldest_tid = req->r_tid;
924
925         if (dir) {
926                 struct ceph_inode_info *ci = ceph_inode(dir);
927
928                 ihold(dir);
929                 req->r_unsafe_dir = dir;
930                 spin_lock(&ci->i_unsafe_lock);
931                 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
932                 spin_unlock(&ci->i_unsafe_lock);
933         }
934 }
935
936 static void __unregister_request(struct ceph_mds_client *mdsc,
937                                  struct ceph_mds_request *req)
938 {
939         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
940
941         /* Never leave an unregistered request on an unsafe list! */
942         list_del_init(&req->r_unsafe_item);
943
944         if (req->r_tid == mdsc->oldest_tid) {
945                 struct rb_node *p = rb_next(&req->r_node);
946                 mdsc->oldest_tid = 0;
947                 while (p) {
948                         struct ceph_mds_request *next_req =
949                                 rb_entry(p, struct ceph_mds_request, r_node);
950                         if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
951                                 mdsc->oldest_tid = next_req->r_tid;
952                                 break;
953                         }
954                         p = rb_next(p);
955                 }
956         }
957
958         erase_request(&mdsc->request_tree, req);
959
960         if (req->r_unsafe_dir) {
961                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
962                 spin_lock(&ci->i_unsafe_lock);
963                 list_del_init(&req->r_unsafe_dir_item);
964                 spin_unlock(&ci->i_unsafe_lock);
965         }
966         if (req->r_target_inode &&
967             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
968                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
969                 spin_lock(&ci->i_unsafe_lock);
970                 list_del_init(&req->r_unsafe_target_item);
971                 spin_unlock(&ci->i_unsafe_lock);
972         }
973
974         if (req->r_unsafe_dir) {
975                 iput(req->r_unsafe_dir);
976                 req->r_unsafe_dir = NULL;
977         }
978
979         complete_all(&req->r_safe_completion);
980
981         ceph_mdsc_put_request(req);
982 }
983
984 /*
985  * Walk back up the dentry tree until we hit a dentry representing a
986  * non-snapshot inode. We do this using the rcu_read_lock (which must be held
987  * when calling this) to ensure that the objects won't disappear while we're
988  * working with them. Once we hit a candidate dentry, we attempt to take a
989  * reference to it, and return that as the result.
990  */
991 static struct inode *get_nonsnap_parent(struct dentry *dentry)
992 {
993         struct inode *inode = NULL;
994
995         while (dentry && !IS_ROOT(dentry)) {
996                 inode = d_inode_rcu(dentry);
997                 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
998                         break;
999                 dentry = dentry->d_parent;
1000         }
1001         if (inode)
1002                 inode = igrab(inode);
1003         return inode;
1004 }
1005
1006 /*
1007  * Choose mds to send request to next.  If there is a hint set in the
1008  * request (e.g., due to a prior forward hint from the mds), use that.
1009  * Otherwise, consult frag tree and/or caps to identify the
1010  * appropriate mds.  If all else fails, choose randomly.
1011  *
1012  * Called under mdsc->mutex.
1013  */
1014 static int __choose_mds(struct ceph_mds_client *mdsc,
1015                         struct ceph_mds_request *req,
1016                         bool *random)
1017 {
1018         struct inode *inode;
1019         struct ceph_inode_info *ci;
1020         struct ceph_cap *cap;
1021         int mode = req->r_direct_mode;
1022         int mds = -1;
1023         u32 hash = req->r_direct_hash;
1024         bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1025
1026         if (random)
1027                 *random = false;
1028
1029         /*
1030          * is there a specific mds we should try?  ignore hint if we have
1031          * no session and the mds is not up (active or recovering).
1032          */
1033         if (req->r_resend_mds >= 0 &&
1034             (__have_session(mdsc, req->r_resend_mds) ||
1035              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1036                 dout("%s using resend_mds mds%d\n", __func__,
1037                      req->r_resend_mds);
1038                 return req->r_resend_mds;
1039         }
1040
1041         if (mode == USE_RANDOM_MDS)
1042                 goto random;
1043
1044         inode = NULL;
1045         if (req->r_inode) {
1046                 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1047                         inode = req->r_inode;
1048                         ihold(inode);
1049                 } else {
1050                         /* req->r_dentry is non-null for LSSNAP request */
1051                         rcu_read_lock();
1052                         inode = get_nonsnap_parent(req->r_dentry);
1053                         rcu_read_unlock();
1054                         dout("%s using snapdir's parent %p\n", __func__, inode);
1055                 }
1056         } else if (req->r_dentry) {
1057                 /* ignore race with rename; old or new d_parent is okay */
1058                 struct dentry *parent;
1059                 struct inode *dir;
1060
1061                 rcu_read_lock();
1062                 parent = READ_ONCE(req->r_dentry->d_parent);
1063                 dir = req->r_parent ? : d_inode_rcu(parent);
1064
1065                 if (!dir || dir->i_sb != mdsc->fsc->sb) {
1066                         /*  not this fs or parent went negative */
1067                         inode = d_inode(req->r_dentry);
1068                         if (inode)
1069                                 ihold(inode);
1070                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
1071                         /* direct snapped/virtual snapdir requests
1072                          * based on parent dir inode */
1073                         inode = get_nonsnap_parent(parent);
1074                         dout("%s using nonsnap parent %p\n", __func__, inode);
1075                 } else {
1076                         /* dentry target */
1077                         inode = d_inode(req->r_dentry);
1078                         if (!inode || mode == USE_AUTH_MDS) {
1079                                 /* dir + name */
1080                                 inode = igrab(dir);
1081                                 hash = ceph_dentry_hash(dir, req->r_dentry);
1082                                 is_hash = true;
1083                         } else {
1084                                 ihold(inode);
1085                         }
1086                 }
1087                 rcu_read_unlock();
1088         }
1089
1090         dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
1091              hash, mode);
1092         if (!inode)
1093                 goto random;
1094         ci = ceph_inode(inode);
1095
1096         if (is_hash && S_ISDIR(inode->i_mode)) {
1097                 struct ceph_inode_frag frag;
1098                 int found;
1099
1100                 ceph_choose_frag(ci, hash, &frag, &found);
1101                 if (found) {
1102                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
1103                                 u8 r;
1104
1105                                 /* choose a random replica */
1106                                 get_random_bytes(&r, 1);
1107                                 r %= frag.ndist;
1108                                 mds = frag.dist[r];
1109                                 dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
1110                                      __func__, inode, ceph_vinop(inode),
1111                                      frag.frag, mds, (int)r, frag.ndist);
1112                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1113                                     CEPH_MDS_STATE_ACTIVE &&
1114                                     !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
1115                                         goto out;
1116                         }
1117
1118                         /* since this file/dir wasn't known to be
1119                          * replicated, then we want to look for the
1120                          * authoritative mds. */
1121                         if (frag.mds >= 0) {
1122                                 /* choose auth mds */
1123                                 mds = frag.mds;
1124                                 dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
1125                                      __func__, inode, ceph_vinop(inode),
1126                                      frag.frag, mds);
1127                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1128                                     CEPH_MDS_STATE_ACTIVE) {
1129                                         if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
1130                                                                   mds))
1131                                                 goto out;
1132                                 }
1133                         }
1134                         mode = USE_AUTH_MDS;
1135                 }
1136         }
1137
1138         spin_lock(&ci->i_ceph_lock);
1139         cap = NULL;
1140         if (mode == USE_AUTH_MDS)
1141                 cap = ci->i_auth_cap;
1142         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
1143                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
1144         if (!cap) {
1145                 spin_unlock(&ci->i_ceph_lock);
1146                 iput(inode);
1147                 goto random;
1148         }
1149         mds = cap->session->s_mds;
1150         dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
1151              inode, ceph_vinop(inode), mds,
1152              cap == ci->i_auth_cap ? "auth " : "", cap);
1153         spin_unlock(&ci->i_ceph_lock);
1154 out:
1155         iput(inode);
1156         return mds;
1157
1158 random:
1159         if (random)
1160                 *random = true;
1161
1162         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1163         dout("%s chose random mds%d\n", __func__, mds);
1164         return mds;
1165 }
1166
1167
1168 /*
1169  * session messages
1170  */
1171 struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq)
1172 {
1173         struct ceph_msg *msg;
1174         struct ceph_mds_session_head *h;
1175
1176         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
1177                            false);
1178         if (!msg) {
1179                 pr_err("ENOMEM creating session %s msg\n",
1180                        ceph_session_op_name(op));
1181                 return NULL;
1182         }
1183         h = msg->front.iov_base;
1184         h->op = cpu_to_le32(op);
1185         h->seq = cpu_to_le64(seq);
1186
1187         return msg;
1188 }
1189
1190 static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
1191 #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
1192 static int encode_supported_features(void **p, void *end)
1193 {
1194         static const size_t count = ARRAY_SIZE(feature_bits);
1195
1196         if (count > 0) {
1197                 size_t i;
1198                 size_t size = FEATURE_BYTES(count);
1199
1200                 if (WARN_ON_ONCE(*p + 4 + size > end))
1201                         return -ERANGE;
1202
1203                 ceph_encode_32(p, size);
1204                 memset(*p, 0, size);
1205                 for (i = 0; i < count; i++)
1206                         ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
1207                 *p += size;
1208         } else {
1209                 if (WARN_ON_ONCE(*p + 4 > end))
1210                         return -ERANGE;
1211
1212                 ceph_encode_32(p, 0);
1213         }
1214
1215         return 0;
1216 }
1217
1218 static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED;
1219 #define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
1220 static int encode_metric_spec(void **p, void *end)
1221 {
1222         static const size_t count = ARRAY_SIZE(metric_bits);
1223
1224         /* header */
1225         if (WARN_ON_ONCE(*p + 2 > end))
1226                 return -ERANGE;
1227
1228         ceph_encode_8(p, 1); /* version */
1229         ceph_encode_8(p, 1); /* compat */
1230
1231         if (count > 0) {
1232                 size_t i;
1233                 size_t size = METRIC_BYTES(count);
1234
1235                 if (WARN_ON_ONCE(*p + 4 + 4 + size > end))
1236                         return -ERANGE;
1237
1238                 /* metric spec info length */
1239                 ceph_encode_32(p, 4 + size);
1240
1241                 /* metric spec */
1242                 ceph_encode_32(p, size);
1243                 memset(*p, 0, size);
1244                 for (i = 0; i < count; i++)
1245                         ((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8);
1246                 *p += size;
1247         } else {
1248                 if (WARN_ON_ONCE(*p + 4 + 4 > end))
1249                         return -ERANGE;
1250
1251                 /* metric spec info length */
1252                 ceph_encode_32(p, 4);
1253                 /* metric spec */
1254                 ceph_encode_32(p, 0);
1255         }
1256
1257         return 0;
1258 }
1259
1260 /*
1261  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1262  * to include additional client metadata fields.
1263  */
1264 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
1265 {
1266         struct ceph_msg *msg;
1267         struct ceph_mds_session_head *h;
1268         int i;
1269         int extra_bytes = 0;
1270         int metadata_key_count = 0;
1271         struct ceph_options *opt = mdsc->fsc->client->options;
1272         struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1273         size_t size, count;
1274         void *p, *end;
1275         int ret;
1276
1277         const char* metadata[][2] = {
1278                 {"hostname", mdsc->nodename},
1279                 {"kernel_version", init_utsname()->release},
1280                 {"entity_id", opt->name ? : ""},
1281                 {"root", fsopt->server_path ? : "/"},
1282                 {NULL, NULL}
1283         };
1284
1285         /* Calculate serialized length of metadata */
1286         extra_bytes = 4;  /* map length */
1287         for (i = 0; metadata[i][0]; ++i) {
1288                 extra_bytes += 8 + strlen(metadata[i][0]) +
1289                         strlen(metadata[i][1]);
1290                 metadata_key_count++;
1291         }
1292
1293         /* supported feature */
1294         size = 0;
1295         count = ARRAY_SIZE(feature_bits);
1296         if (count > 0)
1297                 size = FEATURE_BYTES(count);
1298         extra_bytes += 4 + size;
1299
1300         /* metric spec */
1301         size = 0;
1302         count = ARRAY_SIZE(metric_bits);
1303         if (count > 0)
1304                 size = METRIC_BYTES(count);
1305         extra_bytes += 2 + 4 + 4 + size;
1306
1307         /* Allocate the message */
1308         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
1309                            GFP_NOFS, false);
1310         if (!msg) {
1311                 pr_err("ENOMEM creating session open msg\n");
1312                 return ERR_PTR(-ENOMEM);
1313         }
1314         p = msg->front.iov_base;
1315         end = p + msg->front.iov_len;
1316
1317         h = p;
1318         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
1319         h->seq = cpu_to_le64(seq);
1320
1321         /*
1322          * Serialize client metadata into waiting buffer space, using
1323          * the format that userspace expects for map<string, string>
1324          *
1325          * ClientSession messages with metadata are v4
1326          */
1327         msg->hdr.version = cpu_to_le16(4);
1328         msg->hdr.compat_version = cpu_to_le16(1);
1329
1330         /* The write pointer, following the session_head structure */
1331         p += sizeof(*h);
1332
1333         /* Number of entries in the map */
1334         ceph_encode_32(&p, metadata_key_count);
1335
1336         /* Two length-prefixed strings for each entry in the map */
1337         for (i = 0; metadata[i][0]; ++i) {
1338                 size_t const key_len = strlen(metadata[i][0]);
1339                 size_t const val_len = strlen(metadata[i][1]);
1340
1341                 ceph_encode_32(&p, key_len);
1342                 memcpy(p, metadata[i][0], key_len);
1343                 p += key_len;
1344                 ceph_encode_32(&p, val_len);
1345                 memcpy(p, metadata[i][1], val_len);
1346                 p += val_len;
1347         }
1348
1349         ret = encode_supported_features(&p, end);
1350         if (ret) {
1351                 pr_err("encode_supported_features failed!\n");
1352                 ceph_msg_put(msg);
1353                 return ERR_PTR(ret);
1354         }
1355
1356         ret = encode_metric_spec(&p, end);
1357         if (ret) {
1358                 pr_err("encode_metric_spec failed!\n");
1359                 ceph_msg_put(msg);
1360                 return ERR_PTR(ret);
1361         }
1362
1363         msg->front.iov_len = p - msg->front.iov_base;
1364         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1365
1366         return msg;
1367 }
1368
1369 /*
1370  * send session open request.
1371  *
1372  * called under mdsc->mutex
1373  */
1374 static int __open_session(struct ceph_mds_client *mdsc,
1375                           struct ceph_mds_session *session)
1376 {
1377         struct ceph_msg *msg;
1378         int mstate;
1379         int mds = session->s_mds;
1380
1381         /* wait for mds to go active? */
1382         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1383         dout("open_session to mds%d (%s)\n", mds,
1384              ceph_mds_state_name(mstate));
1385         session->s_state = CEPH_MDS_SESSION_OPENING;
1386         session->s_renew_requested = jiffies;
1387
1388         /* send connect message */
1389         msg = create_session_open_msg(mdsc, session->s_seq);
1390         if (IS_ERR(msg))
1391                 return PTR_ERR(msg);
1392         ceph_con_send(&session->s_con, msg);
1393         return 0;
1394 }
1395
1396 /*
1397  * open sessions for any export targets for the given mds
1398  *
1399  * called under mdsc->mutex
1400  */
1401 static struct ceph_mds_session *
1402 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
1403 {
1404         struct ceph_mds_session *session;
1405         int ret;
1406
1407         session = __ceph_lookup_mds_session(mdsc, target);
1408         if (!session) {
1409                 session = register_session(mdsc, target);
1410                 if (IS_ERR(session))
1411                         return session;
1412         }
1413         if (session->s_state == CEPH_MDS_SESSION_NEW ||
1414             session->s_state == CEPH_MDS_SESSION_CLOSING) {
1415                 ret = __open_session(mdsc, session);
1416                 if (ret)
1417                         return ERR_PTR(ret);
1418         }
1419
1420         return session;
1421 }
1422
1423 struct ceph_mds_session *
1424 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1425 {
1426         struct ceph_mds_session *session;
1427
1428         dout("open_export_target_session to mds%d\n", target);
1429
1430         mutex_lock(&mdsc->mutex);
1431         session = __open_export_target_session(mdsc, target);
1432         mutex_unlock(&mdsc->mutex);
1433
1434         return session;
1435 }
1436
1437 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1438                                           struct ceph_mds_session *session)
1439 {
1440         struct ceph_mds_info *mi;
1441         struct ceph_mds_session *ts;
1442         int i, mds = session->s_mds;
1443
1444         if (mds >= mdsc->mdsmap->possible_max_rank)
1445                 return;
1446
1447         mi = &mdsc->mdsmap->m_info[mds];
1448         dout("open_export_target_sessions for mds%d (%d targets)\n",
1449              session->s_mds, mi->num_export_targets);
1450
1451         for (i = 0; i < mi->num_export_targets; i++) {
1452                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1453                 ceph_put_mds_session(ts);
1454         }
1455 }
1456
1457 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1458                                            struct ceph_mds_session *session)
1459 {
1460         mutex_lock(&mdsc->mutex);
1461         __open_export_target_sessions(mdsc, session);
1462         mutex_unlock(&mdsc->mutex);
1463 }
1464
1465 /*
1466  * session caps
1467  */
1468
1469 static void detach_cap_releases(struct ceph_mds_session *session,
1470                                 struct list_head *target)
1471 {
1472         lockdep_assert_held(&session->s_cap_lock);
1473
1474         list_splice_init(&session->s_cap_releases, target);
1475         session->s_num_cap_releases = 0;
1476         dout("dispose_cap_releases mds%d\n", session->s_mds);
1477 }
1478
1479 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1480                                  struct list_head *dispose)
1481 {
1482         while (!list_empty(dispose)) {
1483                 struct ceph_cap *cap;
1484                 /* zero out the in-progress message */
1485                 cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1486                 list_del(&cap->session_caps);
1487                 ceph_put_cap(mdsc, cap);
1488         }
1489 }
1490
1491 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1492                                      struct ceph_mds_session *session)
1493 {
1494         struct ceph_mds_request *req;
1495         struct rb_node *p;
1496
1497         dout("cleanup_session_requests mds%d\n", session->s_mds);
1498         mutex_lock(&mdsc->mutex);
1499         while (!list_empty(&session->s_unsafe)) {
1500                 req = list_first_entry(&session->s_unsafe,
1501                                        struct ceph_mds_request, r_unsafe_item);
1502                 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1503                                     req->r_tid);
1504                 if (req->r_target_inode)
1505                         mapping_set_error(req->r_target_inode->i_mapping, -EIO);
1506                 if (req->r_unsafe_dir)
1507                         mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
1508                 __unregister_request(mdsc, req);
1509         }
1510         /* zero r_attempts, so kick_requests() will re-send requests */
1511         p = rb_first(&mdsc->request_tree);
1512         while (p) {
1513                 req = rb_entry(p, struct ceph_mds_request, r_node);
1514                 p = rb_next(p);
1515                 if (req->r_session &&
1516                     req->r_session->s_mds == session->s_mds)
1517                         req->r_attempts = 0;
1518         }
1519         mutex_unlock(&mdsc->mutex);
1520 }
1521
1522 /*
1523  * Helper to safely iterate over all caps associated with a session, with
1524  * special care taken to handle a racing __ceph_remove_cap().
1525  *
1526  * Caller must hold session s_mutex.
1527  */
1528 int ceph_iterate_session_caps(struct ceph_mds_session *session,
1529                               int (*cb)(struct inode *, struct ceph_cap *,
1530                                         void *), void *arg)
1531 {
1532         struct list_head *p;
1533         struct ceph_cap *cap;
1534         struct inode *inode, *last_inode = NULL;
1535         struct ceph_cap *old_cap = NULL;
1536         int ret;
1537
1538         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1539         spin_lock(&session->s_cap_lock);
1540         p = session->s_caps.next;
1541         while (p != &session->s_caps) {
1542                 cap = list_entry(p, struct ceph_cap, session_caps);
1543                 inode = igrab(&cap->ci->vfs_inode);
1544                 if (!inode) {
1545                         p = p->next;
1546                         continue;
1547                 }
1548                 session->s_cap_iterator = cap;
1549                 spin_unlock(&session->s_cap_lock);
1550
1551                 if (last_inode) {
1552                         iput(last_inode);
1553                         last_inode = NULL;
1554                 }
1555                 if (old_cap) {
1556                         ceph_put_cap(session->s_mdsc, old_cap);
1557                         old_cap = NULL;
1558                 }
1559
1560                 ret = cb(inode, cap, arg);
1561                 last_inode = inode;
1562
1563                 spin_lock(&session->s_cap_lock);
1564                 p = p->next;
1565                 if (!cap->ci) {
1566                         dout("iterate_session_caps  finishing cap %p removal\n",
1567                              cap);
1568                         BUG_ON(cap->session != session);
1569                         cap->session = NULL;
1570                         list_del_init(&cap->session_caps);
1571                         session->s_nr_caps--;
1572                         atomic64_dec(&session->s_mdsc->metric.total_caps);
1573                         if (cap->queue_release)
1574                                 __ceph_queue_cap_release(session, cap);
1575                         else
1576                                 old_cap = cap;  /* put_cap it w/o locks held */
1577                 }
1578                 if (ret < 0)
1579                         goto out;
1580         }
1581         ret = 0;
1582 out:
1583         session->s_cap_iterator = NULL;
1584         spin_unlock(&session->s_cap_lock);
1585
1586         iput(last_inode);
1587         if (old_cap)
1588                 ceph_put_cap(session->s_mdsc, old_cap);
1589
1590         return ret;
1591 }
1592
1593 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1594                                   void *arg)
1595 {
1596         struct ceph_inode_info *ci = ceph_inode(inode);
1597         bool invalidate = false;
1598         int iputs;
1599
1600         dout("removing cap %p, ci is %p, inode is %p\n",
1601              cap, ci, &ci->vfs_inode);
1602         spin_lock(&ci->i_ceph_lock);
1603         iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
1604         spin_unlock(&ci->i_ceph_lock);
1605
1606         wake_up_all(&ci->i_cap_wq);
1607         if (invalidate)
1608                 ceph_queue_invalidate(inode);
1609         while (iputs--)
1610                 iput(inode);
1611         return 0;
1612 }
1613
1614 /*
1615  * caller must hold session s_mutex
1616  */
1617 static void remove_session_caps(struct ceph_mds_session *session)
1618 {
1619         struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1620         struct super_block *sb = fsc->sb;
1621         LIST_HEAD(dispose);
1622
1623         dout("remove_session_caps on %p\n", session);
1624         ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
1625
1626         wake_up_all(&fsc->mdsc->cap_flushing_wq);
1627
1628         spin_lock(&session->s_cap_lock);
1629         if (session->s_nr_caps > 0) {
1630                 struct inode *inode;
1631                 struct ceph_cap *cap, *prev = NULL;
1632                 struct ceph_vino vino;
1633                 /*
1634                  * iterate_session_caps() skips inodes that are being
1635                  * deleted, we need to wait until deletions are complete.
1636                  * __wait_on_freeing_inode() is designed for the job,
1637                  * but it is not exported, so use lookup inode function
1638                  * to access it.
1639                  */
1640                 while (!list_empty(&session->s_caps)) {
1641                         cap = list_entry(session->s_caps.next,
1642                                          struct ceph_cap, session_caps);
1643                         if (cap == prev)
1644                                 break;
1645                         prev = cap;
1646                         vino = cap->ci->i_vino;
1647                         spin_unlock(&session->s_cap_lock);
1648
1649                         inode = ceph_find_inode(sb, vino);
1650                         iput(inode);
1651
1652                         spin_lock(&session->s_cap_lock);
1653                 }
1654         }
1655
1656         // drop cap expires and unlock s_cap_lock
1657         detach_cap_releases(session, &dispose);
1658
1659         BUG_ON(session->s_nr_caps > 0);
1660         BUG_ON(!list_empty(&session->s_cap_flushing));
1661         spin_unlock(&session->s_cap_lock);
1662         dispose_cap_releases(session->s_mdsc, &dispose);
1663 }
1664
1665 enum {
1666         RECONNECT,
1667         RENEWCAPS,
1668         FORCE_RO,
1669 };
1670
1671 /*
1672  * wake up any threads waiting on this session's caps.  if the cap is
1673  * old (didn't get renewed on the client reconnect), remove it now.
1674  *
1675  * caller must hold s_mutex.
1676  */
1677 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1678                               void *arg)
1679 {
1680         struct ceph_inode_info *ci = ceph_inode(inode);
1681         unsigned long ev = (unsigned long)arg;
1682
1683         if (ev == RECONNECT) {
1684                 spin_lock(&ci->i_ceph_lock);
1685                 ci->i_wanted_max_size = 0;
1686                 ci->i_requested_max_size = 0;
1687                 spin_unlock(&ci->i_ceph_lock);
1688         } else if (ev == RENEWCAPS) {
1689                 if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
1690                         /* mds did not re-issue stale cap */
1691                         spin_lock(&ci->i_ceph_lock);
1692                         cap->issued = cap->implemented = CEPH_CAP_PIN;
1693                         spin_unlock(&ci->i_ceph_lock);
1694                 }
1695         } else if (ev == FORCE_RO) {
1696         }
1697         wake_up_all(&ci->i_cap_wq);
1698         return 0;
1699 }
1700
1701 static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
1702 {
1703         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1704         ceph_iterate_session_caps(session, wake_up_session_cb,
1705                                   (void *)(unsigned long)ev);
1706 }
1707
1708 /*
1709  * Send periodic message to MDS renewing all currently held caps.  The
1710  * ack will reset the expiration for all caps from this session.
1711  *
1712  * caller holds s_mutex
1713  */
1714 static int send_renew_caps(struct ceph_mds_client *mdsc,
1715                            struct ceph_mds_session *session)
1716 {
1717         struct ceph_msg *msg;
1718         int state;
1719
1720         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1721             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1722                 pr_info("mds%d caps stale\n", session->s_mds);
1723         session->s_renew_requested = jiffies;
1724
1725         /* do not try to renew caps until a recovering mds has reconnected
1726          * with its clients. */
1727         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1728         if (state < CEPH_MDS_STATE_RECONNECT) {
1729                 dout("send_renew_caps ignoring mds%d (%s)\n",
1730                      session->s_mds, ceph_mds_state_name(state));
1731                 return 0;
1732         }
1733
1734         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1735                 ceph_mds_state_name(state));
1736         msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1737                                       ++session->s_renew_seq);
1738         if (!msg)
1739                 return -ENOMEM;
1740         ceph_con_send(&session->s_con, msg);
1741         return 0;
1742 }
1743
1744 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1745                              struct ceph_mds_session *session, u64 seq)
1746 {
1747         struct ceph_msg *msg;
1748
1749         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1750              session->s_mds, ceph_session_state_name(session->s_state), seq);
1751         msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1752         if (!msg)
1753                 return -ENOMEM;
1754         ceph_con_send(&session->s_con, msg);
1755         return 0;
1756 }
1757
1758
1759 /*
1760  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1761  *
1762  * Called under session->s_mutex
1763  */
1764 static void renewed_caps(struct ceph_mds_client *mdsc,
1765                          struct ceph_mds_session *session, int is_renew)
1766 {
1767         int was_stale;
1768         int wake = 0;
1769
1770         spin_lock(&session->s_cap_lock);
1771         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1772
1773         session->s_cap_ttl = session->s_renew_requested +
1774                 mdsc->mdsmap->m_session_timeout*HZ;
1775
1776         if (was_stale) {
1777                 if (time_before(jiffies, session->s_cap_ttl)) {
1778                         pr_info("mds%d caps renewed\n", session->s_mds);
1779                         wake = 1;
1780                 } else {
1781                         pr_info("mds%d caps still stale\n", session->s_mds);
1782                 }
1783         }
1784         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1785              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1786              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1787         spin_unlock(&session->s_cap_lock);
1788
1789         if (wake)
1790                 wake_up_session_caps(session, RENEWCAPS);
1791 }
1792
1793 /*
1794  * send a session close request
1795  */
1796 static int request_close_session(struct ceph_mds_session *session)
1797 {
1798         struct ceph_msg *msg;
1799
1800         dout("request_close_session mds%d state %s seq %lld\n",
1801              session->s_mds, ceph_session_state_name(session->s_state),
1802              session->s_seq);
1803         msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
1804                                       session->s_seq);
1805         if (!msg)
1806                 return -ENOMEM;
1807         ceph_con_send(&session->s_con, msg);
1808         return 1;
1809 }
1810
1811 /*
1812  * Called with s_mutex held.
1813  */
1814 static int __close_session(struct ceph_mds_client *mdsc,
1815                          struct ceph_mds_session *session)
1816 {
1817         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1818                 return 0;
1819         session->s_state = CEPH_MDS_SESSION_CLOSING;
1820         return request_close_session(session);
1821 }
1822
1823 static bool drop_negative_children(struct dentry *dentry)
1824 {
1825         struct dentry *child;
1826         bool all_negative = true;
1827
1828         if (!d_is_dir(dentry))
1829                 goto out;
1830
1831         spin_lock(&dentry->d_lock);
1832         list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1833                 if (d_really_is_positive(child)) {
1834                         all_negative = false;
1835                         break;
1836                 }
1837         }
1838         spin_unlock(&dentry->d_lock);
1839
1840         if (all_negative)
1841                 shrink_dcache_parent(dentry);
1842 out:
1843         return all_negative;
1844 }
1845
1846 /*
1847  * Trim old(er) caps.
1848  *
1849  * Because we can't cache an inode without one or more caps, we do
1850  * this indirectly: if a cap is unused, we prune its aliases, at which
1851  * point the inode will hopefully get dropped to.
1852  *
1853  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1854  * memory pressure from the MDS, though, so it needn't be perfect.
1855  */
1856 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1857 {
1858         int *remaining = arg;
1859         struct ceph_inode_info *ci = ceph_inode(inode);
1860         int used, wanted, oissued, mine;
1861
1862         if (*remaining <= 0)
1863                 return -1;
1864
1865         spin_lock(&ci->i_ceph_lock);
1866         mine = cap->issued | cap->implemented;
1867         used = __ceph_caps_used(ci);
1868         wanted = __ceph_caps_file_wanted(ci);
1869         oissued = __ceph_caps_issued_other(ci, cap);
1870
1871         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1872              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1873              ceph_cap_string(used), ceph_cap_string(wanted));
1874         if (cap == ci->i_auth_cap) {
1875                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1876                     !list_empty(&ci->i_cap_snaps))
1877                         goto out;
1878                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1879                         goto out;
1880                 /* Note: it's possible that i_filelock_ref becomes non-zero
1881                  * after dropping auth caps. It doesn't hurt because reply
1882                  * of lock mds request will re-add auth caps. */
1883                 if (atomic_read(&ci->i_filelock_ref) > 0)
1884                         goto out;
1885         }
1886         /* The inode has cached pages, but it's no longer used.
1887          * we can safely drop it */
1888         if (S_ISREG(inode->i_mode) &&
1889             wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1890             !(oissued & CEPH_CAP_FILE_CACHE)) {
1891           used = 0;
1892           oissued = 0;
1893         }
1894         if ((used | wanted) & ~oissued & mine)
1895                 goto out;   /* we need these caps */
1896
1897         if (oissued) {
1898                 /* we aren't the only cap.. just remove us */
1899                 ceph_remove_cap(cap, true);
1900                 (*remaining)--;
1901         } else {
1902                 struct dentry *dentry;
1903                 /* try dropping referring dentries */
1904                 spin_unlock(&ci->i_ceph_lock);
1905                 dentry = d_find_any_alias(inode);
1906                 if (dentry && drop_negative_children(dentry)) {
1907                         int count;
1908                         dput(dentry);
1909                         d_prune_aliases(inode);
1910                         count = atomic_read(&inode->i_count);
1911                         if (count == 1)
1912                                 (*remaining)--;
1913                         dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1914                              inode, cap, count);
1915                 } else {
1916                         dput(dentry);
1917                 }
1918                 return 0;
1919         }
1920
1921 out:
1922         spin_unlock(&ci->i_ceph_lock);
1923         return 0;
1924 }
1925
1926 /*
1927  * Trim session cap count down to some max number.
1928  */
1929 int ceph_trim_caps(struct ceph_mds_client *mdsc,
1930                    struct ceph_mds_session *session,
1931                    int max_caps)
1932 {
1933         int trim_caps = session->s_nr_caps - max_caps;
1934
1935         dout("trim_caps mds%d start: %d / %d, trim %d\n",
1936              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1937         if (trim_caps > 0) {
1938                 int remaining = trim_caps;
1939
1940                 ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
1941                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1942                      session->s_mds, session->s_nr_caps, max_caps,
1943                         trim_caps - remaining);
1944         }
1945
1946         ceph_flush_cap_releases(mdsc, session);
1947         return 0;
1948 }
1949
1950 static int check_caps_flush(struct ceph_mds_client *mdsc,
1951                             u64 want_flush_tid)
1952 {
1953         int ret = 1;
1954
1955         spin_lock(&mdsc->cap_dirty_lock);
1956         if (!list_empty(&mdsc->cap_flush_list)) {
1957                 struct ceph_cap_flush *cf =
1958                         list_first_entry(&mdsc->cap_flush_list,
1959                                          struct ceph_cap_flush, g_list);
1960                 if (cf->tid <= want_flush_tid) {
1961                         dout("check_caps_flush still flushing tid "
1962                              "%llu <= %llu\n", cf->tid, want_flush_tid);
1963                         ret = 0;
1964                 }
1965         }
1966         spin_unlock(&mdsc->cap_dirty_lock);
1967         return ret;
1968 }
1969
1970 /*
1971  * flush all dirty inode data to disk.
1972  *
1973  * returns true if we've flushed through want_flush_tid
1974  */
1975 static void wait_caps_flush(struct ceph_mds_client *mdsc,
1976                             u64 want_flush_tid)
1977 {
1978         dout("check_caps_flush want %llu\n", want_flush_tid);
1979
1980         wait_event(mdsc->cap_flushing_wq,
1981                    check_caps_flush(mdsc, want_flush_tid));
1982
1983         dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
1984 }
1985
1986 /*
1987  * called under s_mutex
1988  */
1989 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1990                                    struct ceph_mds_session *session)
1991 {
1992         struct ceph_msg *msg = NULL;
1993         struct ceph_mds_cap_release *head;
1994         struct ceph_mds_cap_item *item;
1995         struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
1996         struct ceph_cap *cap;
1997         LIST_HEAD(tmp_list);
1998         int num_cap_releases;
1999         __le32  barrier, *cap_barrier;
2000
2001         down_read(&osdc->lock);
2002         barrier = cpu_to_le32(osdc->epoch_barrier);
2003         up_read(&osdc->lock);
2004
2005         spin_lock(&session->s_cap_lock);
2006 again:
2007         list_splice_init(&session->s_cap_releases, &tmp_list);
2008         num_cap_releases = session->s_num_cap_releases;
2009         session->s_num_cap_releases = 0;
2010         spin_unlock(&session->s_cap_lock);
2011
2012         while (!list_empty(&tmp_list)) {
2013                 if (!msg) {
2014                         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
2015                                         PAGE_SIZE, GFP_NOFS, false);
2016                         if (!msg)
2017                                 goto out_err;
2018                         head = msg->front.iov_base;
2019                         head->num = cpu_to_le32(0);
2020                         msg->front.iov_len = sizeof(*head);
2021
2022                         msg->hdr.version = cpu_to_le16(2);
2023                         msg->hdr.compat_version = cpu_to_le16(1);
2024                 }
2025
2026                 cap = list_first_entry(&tmp_list, struct ceph_cap,
2027                                         session_caps);
2028                 list_del(&cap->session_caps);
2029                 num_cap_releases--;
2030
2031                 head = msg->front.iov_base;
2032                 put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
2033                                    &head->num);
2034                 item = msg->front.iov_base + msg->front.iov_len;
2035                 item->ino = cpu_to_le64(cap->cap_ino);
2036                 item->cap_id = cpu_to_le64(cap->cap_id);
2037                 item->migrate_seq = cpu_to_le32(cap->mseq);
2038                 item->seq = cpu_to_le32(cap->issue_seq);
2039                 msg->front.iov_len += sizeof(*item);
2040
2041                 ceph_put_cap(mdsc, cap);
2042
2043                 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
2044                         // Append cap_barrier field
2045                         cap_barrier = msg->front.iov_base + msg->front.iov_len;
2046                         *cap_barrier = barrier;
2047                         msg->front.iov_len += sizeof(*cap_barrier);
2048
2049                         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2050                         dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2051                         ceph_con_send(&session->s_con, msg);
2052                         msg = NULL;
2053                 }
2054         }
2055
2056         BUG_ON(num_cap_releases != 0);
2057
2058         spin_lock(&session->s_cap_lock);
2059         if (!list_empty(&session->s_cap_releases))
2060                 goto again;
2061         spin_unlock(&session->s_cap_lock);
2062
2063         if (msg) {
2064                 // Append cap_barrier field
2065                 cap_barrier = msg->front.iov_base + msg->front.iov_len;
2066                 *cap_barrier = barrier;
2067                 msg->front.iov_len += sizeof(*cap_barrier);
2068
2069                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2070                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2071                 ceph_con_send(&session->s_con, msg);
2072         }
2073         return;
2074 out_err:
2075         pr_err("send_cap_releases mds%d, failed to allocate message\n",
2076                 session->s_mds);
2077         spin_lock(&session->s_cap_lock);
2078         list_splice(&tmp_list, &session->s_cap_releases);
2079         session->s_num_cap_releases += num_cap_releases;
2080         spin_unlock(&session->s_cap_lock);
2081 }
2082
2083 static void ceph_cap_release_work(struct work_struct *work)
2084 {
2085         struct ceph_mds_session *session =
2086                 container_of(work, struct ceph_mds_session, s_cap_release_work);
2087
2088         mutex_lock(&session->s_mutex);
2089         if (session->s_state == CEPH_MDS_SESSION_OPEN ||
2090             session->s_state == CEPH_MDS_SESSION_HUNG)
2091                 ceph_send_cap_releases(session->s_mdsc, session);
2092         mutex_unlock(&session->s_mutex);
2093         ceph_put_mds_session(session);
2094 }
2095
2096 void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
2097                              struct ceph_mds_session *session)
2098 {
2099         if (mdsc->stopping)
2100                 return;
2101
2102         ceph_get_mds_session(session);
2103         if (queue_work(mdsc->fsc->cap_wq,
2104                        &session->s_cap_release_work)) {
2105                 dout("cap release work queued\n");
2106         } else {
2107                 ceph_put_mds_session(session);
2108                 dout("failed to queue cap release work\n");
2109         }
2110 }
2111
2112 /*
2113  * caller holds session->s_cap_lock
2114  */
2115 void __ceph_queue_cap_release(struct ceph_mds_session *session,
2116                               struct ceph_cap *cap)
2117 {
2118         list_add_tail(&cap->session_caps, &session->s_cap_releases);
2119         session->s_num_cap_releases++;
2120
2121         if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
2122                 ceph_flush_cap_releases(session->s_mdsc, session);
2123 }
2124
2125 static void ceph_cap_reclaim_work(struct work_struct *work)
2126 {
2127         struct ceph_mds_client *mdsc =
2128                 container_of(work, struct ceph_mds_client, cap_reclaim_work);
2129         int ret = ceph_trim_dentries(mdsc);
2130         if (ret == -EAGAIN)
2131                 ceph_queue_cap_reclaim_work(mdsc);
2132 }
2133
2134 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2135 {
2136         if (mdsc->stopping)
2137                 return;
2138
2139         if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2140                 dout("caps reclaim work queued\n");
2141         } else {
2142                 dout("failed to queue caps release work\n");
2143         }
2144 }
2145
2146 void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2147 {
2148         int val;
2149         if (!nr)
2150                 return;
2151         val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2152         if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
2153                 atomic_set(&mdsc->cap_reclaim_pending, 0);
2154                 ceph_queue_cap_reclaim_work(mdsc);
2155         }
2156 }
2157
2158 /*
2159  * requests
2160  */
2161
2162 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2163                                     struct inode *dir)
2164 {
2165         struct ceph_inode_info *ci = ceph_inode(dir);
2166         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2167         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2168         size_t size = sizeof(struct ceph_mds_reply_dir_entry);
2169         unsigned int num_entries;
2170         int order;
2171
2172         spin_lock(&ci->i_ceph_lock);
2173         num_entries = ci->i_files + ci->i_subdirs;
2174         spin_unlock(&ci->i_ceph_lock);
2175         num_entries = max(num_entries, 1U);
2176         num_entries = min(num_entries, opt->max_readdir);
2177
2178         order = get_order(size * num_entries);
2179         while (order >= 0) {
2180                 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
2181                                                              __GFP_NOWARN,
2182                                                              order);
2183                 if (rinfo->dir_entries)
2184                         break;
2185                 order--;
2186         }
2187         if (!rinfo->dir_entries)
2188                 return -ENOMEM;
2189
2190         num_entries = (PAGE_SIZE << order) / size;
2191         num_entries = min(num_entries, opt->max_readdir);
2192
2193         rinfo->dir_buf_size = PAGE_SIZE << order;
2194         req->r_num_caps = num_entries + 1;
2195         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2196         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2197         return 0;
2198 }
2199
2200 /*
2201  * Create an mds request.
2202  */
2203 struct ceph_mds_request *
2204 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2205 {
2206         struct ceph_mds_request *req;
2207
2208         req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2209         if (!req)
2210                 return ERR_PTR(-ENOMEM);
2211
2212         mutex_init(&req->r_fill_mutex);
2213         req->r_mdsc = mdsc;
2214         req->r_started = jiffies;
2215         req->r_start_latency = ktime_get();
2216         req->r_resend_mds = -1;
2217         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2218         INIT_LIST_HEAD(&req->r_unsafe_target_item);
2219         req->r_fmode = -1;
2220         kref_init(&req->r_kref);
2221         RB_CLEAR_NODE(&req->r_node);
2222         INIT_LIST_HEAD(&req->r_wait);
2223         init_completion(&req->r_completion);
2224         init_completion(&req->r_safe_completion);
2225         INIT_LIST_HEAD(&req->r_unsafe_item);
2226
2227         ktime_get_coarse_real_ts64(&req->r_stamp);
2228
2229         req->r_op = op;
2230         req->r_direct_mode = mode;
2231         return req;
2232 }
2233
2234 /*
2235  * return oldest (lowest) request, tid in request tree, 0 if none.
2236  *
2237  * called under mdsc->mutex.
2238  */
2239 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2240 {
2241         if (RB_EMPTY_ROOT(&mdsc->request_tree))
2242                 return NULL;
2243         return rb_entry(rb_first(&mdsc->request_tree),
2244                         struct ceph_mds_request, r_node);
2245 }
2246
2247 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2248 {
2249         return mdsc->oldest_tid;
2250 }
2251
2252 /*
2253  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
2254  * on build_path_from_dentry in fs/cifs/dir.c.
2255  *
2256  * If @stop_on_nosnap, generate path relative to the first non-snapped
2257  * inode.
2258  *
2259  * Encode hidden .snap dirs as a double /, i.e.
2260  *   foo/.snap/bar -> foo//bar
2261  */
2262 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
2263                            int stop_on_nosnap)
2264 {
2265         struct dentry *temp;
2266         char *path;
2267         int pos;
2268         unsigned seq;
2269         u64 base;
2270
2271         if (!dentry)
2272                 return ERR_PTR(-EINVAL);
2273
2274         path = __getname();
2275         if (!path)
2276                 return ERR_PTR(-ENOMEM);
2277 retry:
2278         pos = PATH_MAX - 1;
2279         path[pos] = '\0';
2280
2281         seq = read_seqbegin(&rename_lock);
2282         rcu_read_lock();
2283         temp = dentry;
2284         for (;;) {
2285                 struct inode *inode;
2286
2287                 spin_lock(&temp->d_lock);
2288                 inode = d_inode(temp);
2289                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2290                         dout("build_path path+%d: %p SNAPDIR\n",
2291                              pos, temp);
2292                 } else if (stop_on_nosnap && inode && dentry != temp &&
2293                            ceph_snap(inode) == CEPH_NOSNAP) {
2294                         spin_unlock(&temp->d_lock);
2295                         pos++; /* get rid of any prepended '/' */
2296                         break;
2297                 } else {
2298                         pos -= temp->d_name.len;
2299                         if (pos < 0) {
2300                                 spin_unlock(&temp->d_lock);
2301                                 break;
2302                         }
2303                         memcpy(path + pos, temp->d_name.name, temp->d_name.len);
2304                 }
2305                 spin_unlock(&temp->d_lock);
2306                 temp = READ_ONCE(temp->d_parent);
2307
2308                 /* Are we at the root? */
2309                 if (IS_ROOT(temp))
2310                         break;
2311
2312                 /* Are we out of buffer? */
2313                 if (--pos < 0)
2314                         break;
2315
2316                 path[pos] = '/';
2317         }
2318         base = ceph_ino(d_inode(temp));
2319         rcu_read_unlock();
2320
2321         if (read_seqretry(&rename_lock, seq))
2322                 goto retry;
2323
2324         if (pos < 0) {
2325                 /*
2326                  * A rename didn't occur, but somehow we didn't end up where
2327                  * we thought we would. Throw a warning and try again.
2328                  */
2329                 pr_warn("build_path did not end path lookup where "
2330                         "expected, pos is %d\n", pos);
2331                 goto retry;
2332         }
2333
2334         *pbase = base;
2335         *plen = PATH_MAX - 1 - pos;
2336         dout("build_path on %p %d built %llx '%.*s'\n",
2337              dentry, d_count(dentry), base, *plen, path + pos);
2338         return path + pos;
2339 }
2340
2341 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2342                              const char **ppath, int *ppathlen, u64 *pino,
2343                              bool *pfreepath, bool parent_locked)
2344 {
2345         char *path;
2346
2347         rcu_read_lock();
2348         if (!dir)
2349                 dir = d_inode_rcu(dentry->d_parent);
2350         if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
2351                 *pino = ceph_ino(dir);
2352                 rcu_read_unlock();
2353                 *ppath = dentry->d_name.name;
2354                 *ppathlen = dentry->d_name.len;
2355                 return 0;
2356         }
2357         rcu_read_unlock();
2358         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2359         if (IS_ERR(path))
2360                 return PTR_ERR(path);
2361         *ppath = path;
2362         *pfreepath = true;
2363         return 0;
2364 }
2365
2366 static int build_inode_path(struct inode *inode,
2367                             const char **ppath, int *ppathlen, u64 *pino,
2368                             bool *pfreepath)
2369 {
2370         struct dentry *dentry;
2371         char *path;
2372
2373         if (ceph_snap(inode) == CEPH_NOSNAP) {
2374                 *pino = ceph_ino(inode);
2375                 *ppathlen = 0;
2376                 return 0;
2377         }
2378         dentry = d_find_alias(inode);
2379         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2380         dput(dentry);
2381         if (IS_ERR(path))
2382                 return PTR_ERR(path);
2383         *ppath = path;
2384         *pfreepath = true;
2385         return 0;
2386 }
2387
2388 /*
2389  * request arguments may be specified via an inode *, a dentry *, or
2390  * an explicit ino+path.
2391  */
2392 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2393                                   struct inode *rdiri, const char *rpath,
2394                                   u64 rino, const char **ppath, int *pathlen,
2395                                   u64 *ino, bool *freepath, bool parent_locked)
2396 {
2397         int r = 0;
2398
2399         if (rinode) {
2400                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2401                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2402                      ceph_snap(rinode));
2403         } else if (rdentry) {
2404                 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2405                                         freepath, parent_locked);
2406                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2407                      *ppath);
2408         } else if (rpath || rino) {
2409                 *ino = rino;
2410                 *ppath = rpath;
2411                 *pathlen = rpath ? strlen(rpath) : 0;
2412                 dout(" path %.*s\n", *pathlen, rpath);
2413         }
2414
2415         return r;
2416 }
2417
2418 static void encode_timestamp_and_gids(void **p,
2419                                       const struct ceph_mds_request *req)
2420 {
2421         struct ceph_timespec ts;
2422         int i;
2423
2424         ceph_encode_timespec64(&ts, &req->r_stamp);
2425         ceph_encode_copy(p, &ts, sizeof(ts));
2426
2427         /* gid_list */
2428         ceph_encode_32(p, req->r_cred->group_info->ngroups);
2429         for (i = 0; i < req->r_cred->group_info->ngroups; i++)
2430                 ceph_encode_64(p, from_kgid(&init_user_ns,
2431                                             req->r_cred->group_info->gid[i]));
2432 }
2433
2434 /*
2435  * called under mdsc->mutex
2436  */
2437 static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
2438                                                struct ceph_mds_request *req,
2439                                                bool drop_cap_releases)
2440 {
2441         int mds = session->s_mds;
2442         struct ceph_mds_client *mdsc = session->s_mdsc;
2443         struct ceph_msg *msg;
2444         struct ceph_mds_request_head_old *head;
2445         const char *path1 = NULL;
2446         const char *path2 = NULL;
2447         u64 ino1 = 0, ino2 = 0;
2448         int pathlen1 = 0, pathlen2 = 0;
2449         bool freepath1 = false, freepath2 = false;
2450         int len;
2451         u16 releases;
2452         void *p, *end;
2453         int ret;
2454         bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
2455
2456         ret = set_request_path_attr(req->r_inode, req->r_dentry,
2457                               req->r_parent, req->r_path1, req->r_ino1.ino,
2458                               &path1, &pathlen1, &ino1, &freepath1,
2459                               test_bit(CEPH_MDS_R_PARENT_LOCKED,
2460                                         &req->r_req_flags));
2461         if (ret < 0) {
2462                 msg = ERR_PTR(ret);
2463                 goto out;
2464         }
2465
2466         /* If r_old_dentry is set, then assume that its parent is locked */
2467         ret = set_request_path_attr(NULL, req->r_old_dentry,
2468                               req->r_old_dentry_dir,
2469                               req->r_path2, req->r_ino2.ino,
2470                               &path2, &pathlen2, &ino2, &freepath2, true);
2471         if (ret < 0) {
2472                 msg = ERR_PTR(ret);
2473                 goto out_free1;
2474         }
2475
2476         len = legacy ? sizeof(*head) : sizeof(struct ceph_mds_request_head);
2477         len += pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2478                 sizeof(struct ceph_timespec);
2479         len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
2480
2481         /* calculate (max) length for cap releases */
2482         len += sizeof(struct ceph_mds_request_release) *
2483                 (!!req->r_inode_drop + !!req->r_dentry_drop +
2484                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2485
2486         if (req->r_dentry_drop)
2487                 len += pathlen1;
2488         if (req->r_old_dentry_drop)
2489                 len += pathlen2;
2490
2491         msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
2492         if (!msg) {
2493                 msg = ERR_PTR(-ENOMEM);
2494                 goto out_free2;
2495         }
2496
2497         msg->hdr.tid = cpu_to_le64(req->r_tid);
2498
2499         /*
2500          * The old ceph_mds_request_head didn't contain a version field, and
2501          * one was added when we moved the message version from 3->4.
2502          */
2503         if (legacy) {
2504                 msg->hdr.version = cpu_to_le16(3);
2505                 head = msg->front.iov_base;
2506                 p = msg->front.iov_base + sizeof(*head);
2507         } else {
2508                 struct ceph_mds_request_head *new_head = msg->front.iov_base;
2509
2510                 msg->hdr.version = cpu_to_le16(4);
2511                 new_head->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
2512                 head = (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2513                 p = msg->front.iov_base + sizeof(*new_head);
2514         }
2515
2516         end = msg->front.iov_base + msg->front.iov_len;
2517
2518         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2519         head->op = cpu_to_le32(req->r_op);
2520         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
2521                                                  req->r_cred->fsuid));
2522         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
2523                                                  req->r_cred->fsgid));
2524         head->ino = cpu_to_le64(req->r_deleg_ino);
2525         head->args = req->r_args;
2526
2527         ceph_encode_filepath(&p, end, ino1, path1);
2528         ceph_encode_filepath(&p, end, ino2, path2);
2529
2530         /* make note of release offset, in case we need to replay */
2531         req->r_request_release_offset = p - msg->front.iov_base;
2532
2533         /* cap releases */
2534         releases = 0;
2535         if (req->r_inode_drop)
2536                 releases += ceph_encode_inode_release(&p,
2537                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2538                       mds, req->r_inode_drop, req->r_inode_unless,
2539                       req->r_op == CEPH_MDS_OP_READDIR);
2540         if (req->r_dentry_drop)
2541                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2542                                 req->r_parent, mds, req->r_dentry_drop,
2543                                 req->r_dentry_unless);
2544         if (req->r_old_dentry_drop)
2545                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2546                                 req->r_old_dentry_dir, mds,
2547                                 req->r_old_dentry_drop,
2548                                 req->r_old_dentry_unless);
2549         if (req->r_old_inode_drop)
2550                 releases += ceph_encode_inode_release(&p,
2551                       d_inode(req->r_old_dentry),
2552                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2553
2554         if (drop_cap_releases) {
2555                 releases = 0;
2556                 p = msg->front.iov_base + req->r_request_release_offset;
2557         }
2558
2559         head->num_releases = cpu_to_le16(releases);
2560
2561         encode_timestamp_and_gids(&p, req);
2562
2563         if (WARN_ON_ONCE(p > end)) {
2564                 ceph_msg_put(msg);
2565                 msg = ERR_PTR(-ERANGE);
2566                 goto out_free2;
2567         }
2568
2569         msg->front.iov_len = p - msg->front.iov_base;
2570         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2571
2572         if (req->r_pagelist) {
2573                 struct ceph_pagelist *pagelist = req->r_pagelist;
2574                 ceph_msg_data_add_pagelist(msg, pagelist);
2575                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2576         } else {
2577                 msg->hdr.data_len = 0;
2578         }
2579
2580         msg->hdr.data_off = cpu_to_le16(0);
2581
2582 out_free2:
2583         if (freepath2)
2584                 ceph_mdsc_free_path((char *)path2, pathlen2);
2585 out_free1:
2586         if (freepath1)
2587                 ceph_mdsc_free_path((char *)path1, pathlen1);
2588 out:
2589         return msg;
2590 }
2591
2592 /*
2593  * called under mdsc->mutex if error, under no mutex if
2594  * success.
2595  */
2596 static void complete_request(struct ceph_mds_client *mdsc,
2597                              struct ceph_mds_request *req)
2598 {
2599         req->r_end_latency = ktime_get();
2600
2601         if (req->r_callback)
2602                 req->r_callback(mdsc, req);
2603         complete_all(&req->r_completion);
2604 }
2605
2606 static struct ceph_mds_request_head_old *
2607 find_old_request_head(void *p, u64 features)
2608 {
2609         bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
2610         struct ceph_mds_request_head *new_head;
2611
2612         if (legacy)
2613                 return (struct ceph_mds_request_head_old *)p;
2614         new_head = (struct ceph_mds_request_head *)p;
2615         return (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
2616 }
2617
2618 /*
2619  * called under mdsc->mutex
2620  */
2621 static int __prepare_send_request(struct ceph_mds_session *session,
2622                                   struct ceph_mds_request *req,
2623                                   bool drop_cap_releases)
2624 {
2625         int mds = session->s_mds;
2626         struct ceph_mds_client *mdsc = session->s_mdsc;
2627         struct ceph_mds_request_head_old *rhead;
2628         struct ceph_msg *msg;
2629         int flags = 0;
2630
2631         req->r_attempts++;
2632         if (req->r_inode) {
2633                 struct ceph_cap *cap =
2634                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2635
2636                 if (cap)
2637                         req->r_sent_on_mseq = cap->mseq;
2638                 else
2639                         req->r_sent_on_mseq = -1;
2640         }
2641         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2642              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2643
2644         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2645                 void *p;
2646
2647                 /*
2648                  * Replay.  Do not regenerate message (and rebuild
2649                  * paths, etc.); just use the original message.
2650                  * Rebuilding paths will break for renames because
2651                  * d_move mangles the src name.
2652                  */
2653                 msg = req->r_request;
2654                 rhead = find_old_request_head(msg->front.iov_base,
2655                                               session->s_con.peer_features);
2656
2657                 flags = le32_to_cpu(rhead->flags);
2658                 flags |= CEPH_MDS_FLAG_REPLAY;
2659                 rhead->flags = cpu_to_le32(flags);
2660
2661                 if (req->r_target_inode)
2662                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2663
2664                 rhead->num_retry = req->r_attempts - 1;
2665
2666                 /* remove cap/dentry releases from message */
2667                 rhead->num_releases = 0;
2668
2669                 p = msg->front.iov_base + req->r_request_release_offset;
2670                 encode_timestamp_and_gids(&p, req);
2671
2672                 msg->front.iov_len = p - msg->front.iov_base;
2673                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2674                 return 0;
2675         }
2676
2677         if (req->r_request) {
2678                 ceph_msg_put(req->r_request);
2679                 req->r_request = NULL;
2680         }
2681         msg = create_request_message(session, req, drop_cap_releases);
2682         if (IS_ERR(msg)) {
2683                 req->r_err = PTR_ERR(msg);
2684                 return PTR_ERR(msg);
2685         }
2686         req->r_request = msg;
2687
2688         rhead = find_old_request_head(msg->front.iov_base,
2689                                       session->s_con.peer_features);
2690         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2691         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2692                 flags |= CEPH_MDS_FLAG_REPLAY;
2693         if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
2694                 flags |= CEPH_MDS_FLAG_ASYNC;
2695         if (req->r_parent)
2696                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2697         rhead->flags = cpu_to_le32(flags);
2698         rhead->num_fwd = req->r_num_fwd;
2699         rhead->num_retry = req->r_attempts - 1;
2700
2701         dout(" r_parent = %p\n", req->r_parent);
2702         return 0;
2703 }
2704
2705 /*
2706  * called under mdsc->mutex
2707  */
2708 static int __send_request(struct ceph_mds_session *session,
2709                           struct ceph_mds_request *req,
2710                           bool drop_cap_releases)
2711 {
2712         int err;
2713
2714         err = __prepare_send_request(session, req, drop_cap_releases);
2715         if (!err) {
2716                 ceph_msg_get(req->r_request);
2717                 ceph_con_send(&session->s_con, req->r_request);
2718         }
2719
2720         return err;
2721 }
2722
2723 /*
2724  * send request, or put it on the appropriate wait list.
2725  */
2726 static void __do_request(struct ceph_mds_client *mdsc,
2727                         struct ceph_mds_request *req)
2728 {
2729         struct ceph_mds_session *session = NULL;
2730         int mds = -1;
2731         int err = 0;
2732         bool random;
2733
2734         if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2735                 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2736                         __unregister_request(mdsc, req);
2737                 return;
2738         }
2739
2740         if (req->r_timeout &&
2741             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2742                 dout("do_request timed out\n");
2743                 err = -ETIMEDOUT;
2744                 goto finish;
2745         }
2746         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2747                 dout("do_request forced umount\n");
2748                 err = -EIO;
2749                 goto finish;
2750         }
2751         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2752                 if (mdsc->mdsmap_err) {
2753                         err = mdsc->mdsmap_err;
2754                         dout("do_request mdsmap err %d\n", err);
2755                         goto finish;
2756                 }
2757                 if (mdsc->mdsmap->m_epoch == 0) {
2758                         dout("do_request no mdsmap, waiting for map\n");
2759                         list_add(&req->r_wait, &mdsc->waiting_for_map);
2760                         return;
2761                 }
2762                 if (!(mdsc->fsc->mount_options->flags &
2763                       CEPH_MOUNT_OPT_MOUNTWAIT) &&
2764                     !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2765                         err = -EHOSTUNREACH;
2766                         goto finish;
2767                 }
2768         }
2769
2770         put_request_session(req);
2771
2772         mds = __choose_mds(mdsc, req, &random);
2773         if (mds < 0 ||
2774             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2775                 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2776                         err = -EJUKEBOX;
2777                         goto finish;
2778                 }
2779                 dout("do_request no mds or not active, waiting for map\n");
2780                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2781                 return;
2782         }
2783
2784         /* get, open session */
2785         session = __ceph_lookup_mds_session(mdsc, mds);
2786         if (!session) {
2787                 session = register_session(mdsc, mds);
2788                 if (IS_ERR(session)) {
2789                         err = PTR_ERR(session);
2790                         goto finish;
2791                 }
2792         }
2793         req->r_session = ceph_get_mds_session(session);
2794
2795         dout("do_request mds%d session %p state %s\n", mds, session,
2796              ceph_session_state_name(session->s_state));
2797         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2798             session->s_state != CEPH_MDS_SESSION_HUNG) {
2799                 /*
2800                  * We cannot queue async requests since the caps and delegated
2801                  * inodes are bound to the session. Just return -EJUKEBOX and
2802                  * let the caller retry a sync request in that case.
2803                  */
2804                 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2805                         err = -EJUKEBOX;
2806                         goto out_session;
2807                 }
2808
2809                 /*
2810                  * If the session has been REJECTED, then return a hard error,
2811                  * unless it's a CLEANRECOVER mount, in which case we'll queue
2812                  * it to the mdsc queue.
2813                  */
2814                 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2815                         if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
2816                                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2817                         else
2818                                 err = -EACCES;
2819                         goto out_session;
2820                 }
2821
2822                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2823                     session->s_state == CEPH_MDS_SESSION_CLOSING) {
2824                         err = __open_session(mdsc, session);
2825                         if (err)
2826                                 goto out_session;
2827                         /* retry the same mds later */
2828                         if (random)
2829                                 req->r_resend_mds = mds;
2830                 }
2831                 list_add(&req->r_wait, &session->s_waiting);
2832                 goto out_session;
2833         }
2834
2835         /* send request */
2836         req->r_resend_mds = -1;   /* forget any previous mds hint */
2837
2838         if (req->r_request_started == 0)   /* note request start time */
2839                 req->r_request_started = jiffies;
2840
2841         err = __send_request(session, req, false);
2842
2843 out_session:
2844         ceph_put_mds_session(session);
2845 finish:
2846         if (err) {
2847                 dout("__do_request early error %d\n", err);
2848                 req->r_err = err;
2849                 complete_request(mdsc, req);
2850                 __unregister_request(mdsc, req);
2851         }
2852         return;
2853 }
2854
2855 /*
2856  * called under mdsc->mutex
2857  */
2858 static void __wake_requests(struct ceph_mds_client *mdsc,
2859                             struct list_head *head)
2860 {
2861         struct ceph_mds_request *req;
2862         LIST_HEAD(tmp_list);
2863
2864         list_splice_init(head, &tmp_list);
2865
2866         while (!list_empty(&tmp_list)) {
2867                 req = list_entry(tmp_list.next,
2868                                  struct ceph_mds_request, r_wait);
2869                 list_del_init(&req->r_wait);
2870                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2871                 __do_request(mdsc, req);
2872         }
2873 }
2874
2875 /*
2876  * Wake up threads with requests pending for @mds, so that they can
2877  * resubmit their requests to a possibly different mds.
2878  */
2879 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2880 {
2881         struct ceph_mds_request *req;
2882         struct rb_node *p = rb_first(&mdsc->request_tree);
2883
2884         dout("kick_requests mds%d\n", mds);
2885         while (p) {
2886                 req = rb_entry(p, struct ceph_mds_request, r_node);
2887                 p = rb_next(p);
2888                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2889                         continue;
2890                 if (req->r_attempts > 0)
2891                         continue; /* only new requests */
2892                 if (req->r_session &&
2893                     req->r_session->s_mds == mds) {
2894                         dout(" kicking tid %llu\n", req->r_tid);
2895                         list_del_init(&req->r_wait);
2896                         __do_request(mdsc, req);
2897                 }
2898         }
2899 }
2900
2901 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
2902                               struct ceph_mds_request *req)
2903 {
2904         int err = 0;
2905
2906         /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
2907         if (req->r_inode)
2908                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2909         if (req->r_parent) {
2910                 struct ceph_inode_info *ci = ceph_inode(req->r_parent);
2911                 int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
2912                             CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD;
2913                 spin_lock(&ci->i_ceph_lock);
2914                 ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
2915                 __ceph_touch_fmode(ci, mdsc, fmode);
2916                 spin_unlock(&ci->i_ceph_lock);
2917         }
2918         if (req->r_old_dentry_dir)
2919                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2920                                   CEPH_CAP_PIN);
2921
2922         if (req->r_inode) {
2923                 err = ceph_wait_on_async_create(req->r_inode);
2924                 if (err) {
2925                         dout("%s: wait for async create returned: %d\n",
2926                              __func__, err);
2927                         return err;
2928                 }
2929         }
2930
2931         if (!err && req->r_old_inode) {
2932                 err = ceph_wait_on_async_create(req->r_old_inode);
2933                 if (err) {
2934                         dout("%s: wait for async create returned: %d\n",
2935                              __func__, err);
2936                         return err;
2937                 }
2938         }
2939
2940         dout("submit_request on %p for inode %p\n", req, dir);
2941         mutex_lock(&mdsc->mutex);
2942         __register_request(mdsc, req, dir);
2943         __do_request(mdsc, req);
2944         err = req->r_err;
2945         mutex_unlock(&mdsc->mutex);
2946         return err;
2947 }
2948
2949 static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
2950                                   struct ceph_mds_request *req)
2951 {
2952         int err;
2953
2954         /* wait */
2955         dout("do_request waiting\n");
2956         if (!req->r_timeout && req->r_wait_for_completion) {
2957                 err = req->r_wait_for_completion(mdsc, req);
2958         } else {
2959                 long timeleft = wait_for_completion_killable_timeout(
2960                                         &req->r_completion,
2961                                         ceph_timeout_jiffies(req->r_timeout));
2962                 if (timeleft > 0)
2963                         err = 0;
2964                 else if (!timeleft)
2965                         err = -ETIMEDOUT;  /* timed out */
2966                 else
2967                         err = timeleft;  /* killed */
2968         }
2969         dout("do_request waited, got %d\n", err);
2970         mutex_lock(&mdsc->mutex);
2971
2972         /* only abort if we didn't race with a real reply */
2973         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2974                 err = le32_to_cpu(req->r_reply_info.head->result);
2975         } else if (err < 0) {
2976                 dout("aborted request %lld with %d\n", req->r_tid, err);
2977
2978                 /*
2979                  * ensure we aren't running concurrently with
2980                  * ceph_fill_trace or ceph_readdir_prepopulate, which
2981                  * rely on locks (dir mutex) held by our caller.
2982                  */
2983                 mutex_lock(&req->r_fill_mutex);
2984                 req->r_err = err;
2985                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
2986                 mutex_unlock(&req->r_fill_mutex);
2987
2988                 if (req->r_parent &&
2989                     (req->r_op & CEPH_MDS_OP_WRITE))
2990                         ceph_invalidate_dir_request(req);
2991         } else {
2992                 err = req->r_err;
2993         }
2994
2995         mutex_unlock(&mdsc->mutex);
2996         return err;
2997 }
2998
2999 /*
3000  * Synchrously perform an mds request.  Take care of all of the
3001  * session setup, forwarding, retry details.
3002  */
3003 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
3004                          struct inode *dir,
3005                          struct ceph_mds_request *req)
3006 {
3007         int err;
3008
3009         dout("do_request on %p\n", req);
3010
3011         /* issue */
3012         err = ceph_mdsc_submit_request(mdsc, dir, req);
3013         if (!err)
3014                 err = ceph_mdsc_wait_request(mdsc, req);
3015         dout("do_request %p done, result %d\n", req, err);
3016         return err;
3017 }
3018
3019 /*
3020  * Invalidate dir's completeness, dentry lease state on an aborted MDS
3021  * namespace request.
3022  */
3023 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
3024 {
3025         struct inode *dir = req->r_parent;
3026         struct inode *old_dir = req->r_old_dentry_dir;
3027
3028         dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
3029
3030         ceph_dir_clear_complete(dir);
3031         if (old_dir)
3032                 ceph_dir_clear_complete(old_dir);
3033         if (req->r_dentry)
3034                 ceph_invalidate_dentry_lease(req->r_dentry);
3035         if (req->r_old_dentry)
3036                 ceph_invalidate_dentry_lease(req->r_old_dentry);
3037 }
3038
3039 /*
3040  * Handle mds reply.
3041  *
3042  * We take the session mutex and parse and process the reply immediately.
3043  * This preserves the logical ordering of replies, capabilities, etc., sent
3044  * by the MDS as they are applied to our local cache.
3045  */
3046 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
3047 {
3048         struct ceph_mds_client *mdsc = session->s_mdsc;
3049         struct ceph_mds_request *req;
3050         struct ceph_mds_reply_head *head = msg->front.iov_base;
3051         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
3052         struct ceph_snap_realm *realm;
3053         u64 tid;
3054         int err, result;
3055         int mds = session->s_mds;
3056
3057         if (msg->front.iov_len < sizeof(*head)) {
3058                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
3059                 ceph_msg_dump(msg);
3060                 return;
3061         }
3062
3063         /* get request, session */
3064         tid = le64_to_cpu(msg->hdr.tid);
3065         mutex_lock(&mdsc->mutex);
3066         req = lookup_get_request(mdsc, tid);
3067         if (!req) {
3068                 dout("handle_reply on unknown tid %llu\n", tid);
3069                 mutex_unlock(&mdsc->mutex);
3070                 return;
3071         }
3072         dout("handle_reply %p\n", req);
3073
3074         /* correct session? */
3075         if (req->r_session != session) {
3076                 pr_err("mdsc_handle_reply got %llu on session mds%d"
3077                        " not mds%d\n", tid, session->s_mds,
3078                        req->r_session ? req->r_session->s_mds : -1);
3079                 mutex_unlock(&mdsc->mutex);
3080                 goto out;
3081         }
3082
3083         /* dup? */
3084         if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3085             (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3086                 pr_warn("got a dup %s reply on %llu from mds%d\n",
3087                            head->safe ? "safe" : "unsafe", tid, mds);
3088                 mutex_unlock(&mdsc->mutex);
3089                 goto out;
3090         }
3091         if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3092                 pr_warn("got unsafe after safe on %llu from mds%d\n",
3093                            tid, mds);
3094                 mutex_unlock(&mdsc->mutex);
3095                 goto out;
3096         }
3097
3098         result = le32_to_cpu(head->result);
3099
3100         /*
3101          * Handle an ESTALE
3102          * if we're not talking to the authority, send to them
3103          * if the authority has changed while we weren't looking,
3104          * send to new authority
3105          * Otherwise we just have to return an ESTALE
3106          */
3107         if (result == -ESTALE) {
3108                 dout("got ESTALE on request %llu\n", req->r_tid);
3109                 req->r_resend_mds = -1;
3110                 if (req->r_direct_mode != USE_AUTH_MDS) {
3111                         dout("not using auth, setting for that now\n");
3112                         req->r_direct_mode = USE_AUTH_MDS;
3113                         __do_request(mdsc, req);
3114                         mutex_unlock(&mdsc->mutex);
3115                         goto out;
3116                 } else  {
3117                         int mds = __choose_mds(mdsc, req, NULL);
3118                         if (mds >= 0 && mds != req->r_session->s_mds) {
3119                                 dout("but auth changed, so resending\n");
3120                                 __do_request(mdsc, req);
3121                                 mutex_unlock(&mdsc->mutex);
3122                                 goto out;
3123                         }
3124                 }
3125                 dout("have to return ESTALE on request %llu\n", req->r_tid);
3126         }
3127
3128
3129         if (head->safe) {
3130                 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3131                 __unregister_request(mdsc, req);
3132
3133                 /* last request during umount? */
3134                 if (mdsc->stopping && !__get_oldest_req(mdsc))
3135                         complete_all(&mdsc->safe_umount_waiters);
3136
3137                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3138                         /*
3139                          * We already handled the unsafe response, now do the
3140                          * cleanup.  No need to examine the response; the MDS
3141                          * doesn't include any result info in the safe
3142                          * response.  And even if it did, there is nothing
3143                          * useful we could do with a revised return value.
3144                          */
3145                         dout("got safe reply %llu, mds%d\n", tid, mds);
3146
3147                         mutex_unlock(&mdsc->mutex);
3148                         goto out;
3149                 }
3150         } else {
3151                 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3152                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3153         }
3154
3155         dout("handle_reply tid %lld result %d\n", tid, result);
3156         rinfo = &req->r_reply_info;
3157         if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
3158                 err = parse_reply_info(session, msg, rinfo, (u64)-1);
3159         else
3160                 err = parse_reply_info(session, msg, rinfo, session->s_con.peer_features);
3161         mutex_unlock(&mdsc->mutex);
3162
3163         /* Must find target inode outside of mutexes to avoid deadlocks */
3164         if ((err >= 0) && rinfo->head->is_target) {
3165                 struct inode *in;
3166                 struct ceph_vino tvino = {
3167                         .ino  = le64_to_cpu(rinfo->targeti.in->ino),
3168                         .snap = le64_to_cpu(rinfo->targeti.in->snapid)
3169                 };
3170
3171                 in = ceph_get_inode(mdsc->fsc->sb, tvino);
3172                 if (IS_ERR(in)) {
3173                         err = PTR_ERR(in);
3174                         mutex_lock(&session->s_mutex);
3175                         goto out_err;
3176                 }
3177                 req->r_target_inode = in;
3178         }
3179
3180         mutex_lock(&session->s_mutex);
3181         if (err < 0) {
3182                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
3183                 ceph_msg_dump(msg);
3184                 goto out_err;
3185         }
3186
3187         /* snap trace */
3188         realm = NULL;
3189         if (rinfo->snapblob_len) {
3190                 down_write(&mdsc->snap_rwsem);
3191                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
3192                                 rinfo->snapblob + rinfo->snapblob_len,
3193                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
3194                                 &realm);
3195                 downgrade_write(&mdsc->snap_rwsem);
3196         } else {
3197                 down_read(&mdsc->snap_rwsem);
3198         }
3199
3200         /* insert trace into our cache */
3201         mutex_lock(&req->r_fill_mutex);
3202         current->journal_info = req;
3203         err = ceph_fill_trace(mdsc->fsc->sb, req);
3204         if (err == 0) {
3205                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3206                                     req->r_op == CEPH_MDS_OP_LSSNAP))
3207                         ceph_readdir_prepopulate(req, req->r_session);
3208         }
3209         current->journal_info = NULL;
3210         mutex_unlock(&req->r_fill_mutex);
3211
3212         up_read(&mdsc->snap_rwsem);
3213         if (realm)
3214                 ceph_put_snap_realm(mdsc, realm);
3215
3216         if (err == 0) {
3217                 if (req->r_target_inode &&
3218                     test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3219                         struct ceph_inode_info *ci =
3220                                 ceph_inode(req->r_target_inode);
3221                         spin_lock(&ci->i_unsafe_lock);
3222                         list_add_tail(&req->r_unsafe_target_item,
3223                                       &ci->i_unsafe_iops);
3224                         spin_unlock(&ci->i_unsafe_lock);
3225                 }
3226
3227                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3228         }
3229 out_err:
3230         mutex_lock(&mdsc->mutex);
3231         if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3232                 if (err) {
3233                         req->r_err = err;
3234                 } else {
3235                         req->r_reply =  ceph_msg_get(msg);
3236                         set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3237                 }
3238         } else {
3239                 dout("reply arrived after request %lld was aborted\n", tid);
3240         }
3241         mutex_unlock(&mdsc->mutex);
3242
3243         mutex_unlock(&session->s_mutex);
3244
3245         /* kick calling process */
3246         complete_request(mdsc, req);
3247
3248         ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency,
3249                                      req->r_end_latency, err);
3250 out:
3251         ceph_mdsc_put_request(req);
3252         return;
3253 }
3254
3255
3256
3257 /*
3258  * handle mds notification that our request has been forwarded.
3259  */
3260 static void handle_forward(struct ceph_mds_client *mdsc,
3261                            struct ceph_mds_session *session,
3262                            struct ceph_msg *msg)
3263 {
3264         struct ceph_mds_request *req;
3265         u64 tid = le64_to_cpu(msg->hdr.tid);
3266         u32 next_mds;
3267         u32 fwd_seq;
3268         int err = -EINVAL;
3269         void *p = msg->front.iov_base;
3270         void *end = p + msg->front.iov_len;
3271
3272         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3273         next_mds = ceph_decode_32(&p);
3274         fwd_seq = ceph_decode_32(&p);
3275
3276         mutex_lock(&mdsc->mutex);
3277         req = lookup_get_request(mdsc, tid);
3278         if (!req) {
3279                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3280                 goto out;  /* dup reply? */
3281         }
3282
3283         if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3284                 dout("forward tid %llu aborted, unregistering\n", tid);
3285                 __unregister_request(mdsc, req);
3286         } else if (fwd_seq <= req->r_num_fwd) {
3287                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
3288                      tid, next_mds, req->r_num_fwd, fwd_seq);
3289         } else {
3290                 /* resend. forward race not possible; mds would drop */
3291                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
3292                 BUG_ON(req->r_err);
3293                 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3294                 req->r_attempts = 0;
3295                 req->r_num_fwd = fwd_seq;
3296                 req->r_resend_mds = next_mds;
3297                 put_request_session(req);
3298                 __do_request(mdsc, req);
3299         }
3300         ceph_mdsc_put_request(req);
3301 out:
3302         mutex_unlock(&mdsc->mutex);
3303         return;
3304
3305 bad:
3306         pr_err("mdsc_handle_forward decode error err=%d\n", err);
3307 }
3308
3309 static int __decode_session_metadata(void **p, void *end,
3310                                      bool *blocklisted)
3311 {
3312         /* map<string,string> */
3313         u32 n;
3314         bool err_str;
3315         ceph_decode_32_safe(p, end, n, bad);
3316         while (n-- > 0) {
3317                 u32 len;
3318                 ceph_decode_32_safe(p, end, len, bad);
3319                 ceph_decode_need(p, end, len, bad);
3320                 err_str = !strncmp(*p, "error_string", len);
3321                 *p += len;
3322                 ceph_decode_32_safe(p, end, len, bad);
3323                 ceph_decode_need(p, end, len, bad);
3324                 /*
3325                  * Match "blocklisted (blacklisted)" from newer MDSes,
3326                  * or "blacklisted" from older MDSes.
3327                  */
3328                 if (err_str && strnstr(*p, "blacklisted", len))
3329                         *blocklisted = true;
3330                 *p += len;
3331         }
3332         return 0;
3333 bad:
3334         return -1;
3335 }
3336
3337 /*
3338  * handle a mds session control message
3339  */
3340 static void handle_session(struct ceph_mds_session *session,
3341                            struct ceph_msg *msg)
3342 {
3343         struct ceph_mds_client *mdsc = session->s_mdsc;
3344         int mds = session->s_mds;
3345         int msg_version = le16_to_cpu(msg->hdr.version);
3346         void *p = msg->front.iov_base;
3347         void *end = p + msg->front.iov_len;
3348         struct ceph_mds_session_head *h;
3349         u32 op;
3350         u64 seq, features = 0;
3351         int wake = 0;
3352         bool blocklisted = false;
3353
3354         /* decode */
3355         ceph_decode_need(&p, end, sizeof(*h), bad);
3356         h = p;
3357         p += sizeof(*h);
3358
3359         op = le32_to_cpu(h->op);
3360         seq = le64_to_cpu(h->seq);
3361
3362         if (msg_version >= 3) {
3363                 u32 len;
3364                 /* version >= 2 and < 5, decode metadata, skip otherwise
3365                  * as it's handled via flags.
3366                  */
3367                 if (msg_version >= 5)
3368                         ceph_decode_skip_map(&p, end, string, string, bad);
3369                 else if (__decode_session_metadata(&p, end, &blocklisted) < 0)
3370                         goto bad;
3371
3372                 /* version >= 3, feature bits */
3373                 ceph_decode_32_safe(&p, end, len, bad);
3374                 if (len) {
3375                         ceph_decode_64_safe(&p, end, features, bad);
3376                         p += len - sizeof(features);
3377                 }
3378         }
3379
3380         if (msg_version >= 5) {
3381                 u32 flags;
3382                 /* version >= 4, struct_v, struct_cv, len, metric_spec */
3383                 ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 2, bad);
3384                 /* version >= 5, flags   */
3385                 ceph_decode_32_safe(&p, end, flags, bad);
3386                 if (flags & CEPH_SESSION_BLOCKLISTED) {
3387                         pr_warn("mds%d session blocklisted\n", session->s_mds);
3388                         blocklisted = true;
3389                 }
3390         }
3391
3392         mutex_lock(&mdsc->mutex);
3393         if (op == CEPH_SESSION_CLOSE) {
3394                 ceph_get_mds_session(session);
3395                 __unregister_session(mdsc, session);
3396         }
3397         /* FIXME: this ttl calculation is generous */
3398         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
3399         mutex_unlock(&mdsc->mutex);
3400
3401         mutex_lock(&session->s_mutex);
3402
3403         dout("handle_session mds%d %s %p state %s seq %llu\n",
3404              mds, ceph_session_op_name(op), session,
3405              ceph_session_state_name(session->s_state), seq);
3406
3407         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
3408                 session->s_state = CEPH_MDS_SESSION_OPEN;
3409                 pr_info("mds%d came back\n", session->s_mds);
3410         }
3411
3412         switch (op) {
3413         case CEPH_SESSION_OPEN:
3414                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3415                         pr_info("mds%d reconnect success\n", session->s_mds);
3416                 session->s_state = CEPH_MDS_SESSION_OPEN;
3417                 session->s_features = features;
3418                 renewed_caps(mdsc, session, 0);
3419                 if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features))
3420                         metric_schedule_delayed(&mdsc->metric);
3421                 wake = 1;
3422                 if (mdsc->stopping)
3423                         __close_session(mdsc, session);
3424                 break;
3425
3426         case CEPH_SESSION_RENEWCAPS:
3427                 if (session->s_renew_seq == seq)
3428                         renewed_caps(mdsc, session, 1);
3429                 break;
3430
3431         case CEPH_SESSION_CLOSE:
3432                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3433                         pr_info("mds%d reconnect denied\n", session->s_mds);
3434                 session->s_state = CEPH_MDS_SESSION_CLOSED;
3435                 cleanup_session_requests(mdsc, session);
3436                 remove_session_caps(session);
3437                 wake = 2; /* for good measure */
3438                 wake_up_all(&mdsc->session_close_wq);
3439                 break;
3440
3441         case CEPH_SESSION_STALE:
3442                 pr_info("mds%d caps went stale, renewing\n",
3443                         session->s_mds);
3444                 atomic_inc(&session->s_cap_gen);
3445                 session->s_cap_ttl = jiffies - 1;
3446                 send_renew_caps(mdsc, session);
3447                 break;
3448
3449         case CEPH_SESSION_RECALL_STATE:
3450                 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
3451                 break;
3452
3453         case CEPH_SESSION_FLUSHMSG:
3454                 send_flushmsg_ack(mdsc, session, seq);
3455                 break;
3456
3457         case CEPH_SESSION_FORCE_RO:
3458                 dout("force_session_readonly %p\n", session);
3459                 spin_lock(&session->s_cap_lock);
3460                 session->s_readonly = true;
3461                 spin_unlock(&session->s_cap_lock);
3462                 wake_up_session_caps(session, FORCE_RO);
3463                 break;
3464
3465         case CEPH_SESSION_REJECT:
3466                 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
3467                 pr_info("mds%d rejected session\n", session->s_mds);
3468                 session->s_state = CEPH_MDS_SESSION_REJECTED;
3469                 cleanup_session_requests(mdsc, session);
3470                 remove_session_caps(session);
3471                 if (blocklisted)
3472                         mdsc->fsc->blocklisted = true;
3473                 wake = 2; /* for good measure */
3474                 break;
3475
3476         default:
3477                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
3478                 WARN_ON(1);
3479         }
3480
3481         mutex_unlock(&session->s_mutex);
3482         if (wake) {
3483                 mutex_lock(&mdsc->mutex);
3484                 __wake_requests(mdsc, &session->s_waiting);
3485                 if (wake == 2)
3486                         kick_requests(mdsc, mds);
3487                 mutex_unlock(&mdsc->mutex);
3488         }
3489         if (op == CEPH_SESSION_CLOSE)
3490                 ceph_put_mds_session(session);
3491         return;
3492
3493 bad:
3494         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
3495                (int)msg->front.iov_len);
3496         ceph_msg_dump(msg);
3497         return;
3498 }
3499
3500 void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
3501 {
3502         int dcaps;
3503
3504         dcaps = xchg(&req->r_dir_caps, 0);
3505         if (dcaps) {
3506                 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3507                 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
3508         }
3509 }
3510
3511 void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
3512 {
3513         int dcaps;
3514
3515         dcaps = xchg(&req->r_dir_caps, 0);
3516         if (dcaps) {
3517                 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3518                 ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
3519                                                 dcaps);
3520         }
3521 }
3522
3523 /*
3524  * called under session->mutex.
3525  */
3526 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
3527                                    struct ceph_mds_session *session)
3528 {
3529         struct ceph_mds_request *req, *nreq;
3530         struct rb_node *p;
3531
3532         dout("replay_unsafe_requests mds%d\n", session->s_mds);
3533
3534         mutex_lock(&mdsc->mutex);
3535         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
3536                 __send_request(session, req, true);
3537
3538         /*
3539          * also re-send old requests when MDS enters reconnect stage. So that MDS
3540          * can process completed request in clientreplay stage.
3541          */
3542         p = rb_first(&mdsc->request_tree);
3543         while (p) {
3544                 req = rb_entry(p, struct ceph_mds_request, r_node);
3545                 p = rb_next(p);
3546                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3547                         continue;
3548                 if (req->r_attempts == 0)
3549                         continue; /* only old requests */
3550                 if (!req->r_session)
3551                         continue;
3552                 if (req->r_session->s_mds != session->s_mds)
3553                         continue;
3554
3555                 ceph_mdsc_release_dir_caps_no_check(req);
3556
3557                 __send_request(session, req, true);
3558         }
3559         mutex_unlock(&mdsc->mutex);
3560 }
3561
3562 static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
3563 {
3564         struct ceph_msg *reply;
3565         struct ceph_pagelist *_pagelist;
3566         struct page *page;
3567         __le32 *addr;
3568         int err = -ENOMEM;
3569
3570         if (!recon_state->allow_multi)
3571                 return -ENOSPC;
3572
3573         /* can't handle message that contains both caps and realm */
3574         BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
3575
3576         /* pre-allocate new pagelist */
3577         _pagelist = ceph_pagelist_alloc(GFP_NOFS);
3578         if (!_pagelist)
3579                 return -ENOMEM;
3580
3581         reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3582         if (!reply)
3583                 goto fail_msg;
3584
3585         /* placeholder for nr_caps */
3586         err = ceph_pagelist_encode_32(_pagelist, 0);
3587         if (err < 0)
3588                 goto fail;
3589
3590         if (recon_state->nr_caps) {
3591                 /* currently encoding caps */
3592                 err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
3593                 if (err)
3594                         goto fail;
3595         } else {
3596                 /* placeholder for nr_realms (currently encoding relams) */
3597                 err = ceph_pagelist_encode_32(_pagelist, 0);
3598                 if (err < 0)
3599                         goto fail;
3600         }
3601
3602         err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
3603         if (err)
3604                 goto fail;
3605
3606         page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
3607         addr = kmap_atomic(page);
3608         if (recon_state->nr_caps) {
3609                 /* currently encoding caps */
3610                 *addr = cpu_to_le32(recon_state->nr_caps);
3611         } else {
3612                 /* currently encoding relams */
3613                 *(addr + 1) = cpu_to_le32(recon_state->nr_realms);
3614         }
3615         kunmap_atomic(addr);
3616
3617         reply->hdr.version = cpu_to_le16(5);
3618         reply->hdr.compat_version = cpu_to_le16(4);
3619
3620         reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
3621         ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
3622
3623         ceph_con_send(&recon_state->session->s_con, reply);
3624         ceph_pagelist_release(recon_state->pagelist);
3625
3626         recon_state->pagelist = _pagelist;
3627         recon_state->nr_caps = 0;
3628         recon_state->nr_realms = 0;
3629         recon_state->msg_version = 5;
3630         return 0;
3631 fail:
3632         ceph_msg_put(reply);
3633 fail_msg:
3634         ceph_pagelist_release(_pagelist);
3635         return err;
3636 }
3637
3638 static struct dentry* d_find_primary(struct inode *inode)
3639 {
3640         struct dentry *alias, *dn = NULL;
3641
3642         if (hlist_empty(&inode->i_dentry))
3643                 return NULL;
3644
3645         spin_lock(&inode->i_lock);
3646         if (hlist_empty(&inode->i_dentry))
3647                 goto out_unlock;
3648
3649         if (S_ISDIR(inode->i_mode)) {
3650                 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
3651                 if (!IS_ROOT(alias))
3652                         dn = dget(alias);
3653                 goto out_unlock;
3654         }
3655
3656         hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
3657                 spin_lock(&alias->d_lock);
3658                 if (!d_unhashed(alias) &&
3659                     (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
3660                         dn = dget_dlock(alias);
3661                 }
3662                 spin_unlock(&alias->d_lock);
3663                 if (dn)
3664                         break;
3665         }
3666 out_unlock:
3667         spin_unlock(&inode->i_lock);
3668         return dn;
3669 }
3670
3671 /*
3672  * Encode information about a cap for a reconnect with the MDS.
3673  */
3674 static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
3675                           void *arg)
3676 {
3677         union {
3678                 struct ceph_mds_cap_reconnect v2;
3679                 struct ceph_mds_cap_reconnect_v1 v1;
3680         } rec;
3681         struct ceph_inode_info *ci = cap->ci;
3682         struct ceph_reconnect_state *recon_state = arg;
3683         struct ceph_pagelist *pagelist = recon_state->pagelist;
3684         struct dentry *dentry;
3685         char *path;
3686         int pathlen = 0, err;
3687         u64 pathbase;
3688         u64 snap_follows;
3689
3690         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3691              inode, ceph_vinop(inode), cap, cap->cap_id,
3692              ceph_cap_string(cap->issued));
3693
3694         dentry = d_find_primary(inode);
3695         if (dentry) {
3696                 /* set pathbase to parent dir when msg_version >= 2 */
3697                 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
3698                                             recon_state->msg_version >= 2);
3699                 dput(dentry);
3700                 if (IS_ERR(path)) {
3701                         err = PTR_ERR(path);
3702                         goto out_err;
3703                 }
3704         } else {
3705                 path = NULL;
3706                 pathbase = 0;
3707         }
3708
3709         spin_lock(&ci->i_ceph_lock);
3710         cap->seq = 0;        /* reset cap seq */
3711         cap->issue_seq = 0;  /* and issue_seq */
3712         cap->mseq = 0;       /* and migrate_seq */
3713         cap->cap_gen = atomic_read(&cap->session->s_cap_gen);
3714
3715         /* These are lost when the session goes away */
3716         if (S_ISDIR(inode->i_mode)) {
3717                 if (cap->issued & CEPH_CAP_DIR_CREATE) {
3718                         ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
3719                         memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
3720                 }
3721                 cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
3722         }
3723
3724         if (recon_state->msg_version >= 2) {
3725                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
3726                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3727                 rec.v2.issued = cpu_to_le32(cap->issued);
3728                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3729                 rec.v2.pathbase = cpu_to_le64(pathbase);
3730                 rec.v2.flock_len = (__force __le32)
3731                         ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
3732         } else {
3733                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
3734                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3735                 rec.v1.issued = cpu_to_le32(cap->issued);
3736                 rec.v1.size = cpu_to_le64(i_size_read(inode));
3737                 ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
3738                 ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
3739                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3740                 rec.v1.pathbase = cpu_to_le64(pathbase);
3741         }
3742
3743         if (list_empty(&ci->i_cap_snaps)) {
3744                 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
3745         } else {
3746                 struct ceph_cap_snap *capsnap =
3747                         list_first_entry(&ci->i_cap_snaps,
3748                                          struct ceph_cap_snap, ci_item);
3749                 snap_follows = capsnap->follows;
3750         }
3751         spin_unlock(&ci->i_ceph_lock);
3752
3753         if (recon_state->msg_version >= 2) {
3754                 int num_fcntl_locks, num_flock_locks;
3755                 struct ceph_filelock *flocks = NULL;
3756                 size_t struct_len, total_len = sizeof(u64);
3757                 u8 struct_v = 0;
3758
3759 encode_again:
3760                 if (rec.v2.flock_len) {
3761                         ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
3762                 } else {
3763                         num_fcntl_locks = 0;
3764                         num_flock_locks = 0;
3765                 }
3766                 if (num_fcntl_locks + num_flock_locks > 0) {
3767                         flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
3768                                                sizeof(struct ceph_filelock),
3769                                                GFP_NOFS);
3770                         if (!flocks) {
3771                                 err = -ENOMEM;
3772                                 goto out_err;
3773                         }
3774                         err = ceph_encode_locks_to_buffer(inode, flocks,
3775                                                           num_fcntl_locks,
3776                                                           num_flock_locks);
3777                         if (err) {
3778                                 kfree(flocks);
3779                                 flocks = NULL;
3780                                 if (err == -ENOSPC)
3781                                         goto encode_again;
3782                                 goto out_err;
3783                         }
3784                 } else {
3785                         kfree(flocks);
3786                         flocks = NULL;
3787                 }
3788
3789                 if (recon_state->msg_version >= 3) {
3790                         /* version, compat_version and struct_len */
3791                         total_len += 2 * sizeof(u8) + sizeof(u32);
3792                         struct_v = 2;
3793                 }
3794                 /*
3795                  * number of encoded locks is stable, so copy to pagelist
3796                  */
3797                 struct_len = 2 * sizeof(u32) +
3798                             (num_fcntl_locks + num_flock_locks) *
3799                             sizeof(struct ceph_filelock);
3800                 rec.v2.flock_len = cpu_to_le32(struct_len);
3801
3802                 struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
3803
3804                 if (struct_v >= 2)
3805                         struct_len += sizeof(u64); /* snap_follows */
3806
3807                 total_len += struct_len;
3808
3809                 if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
3810                         err = send_reconnect_partial(recon_state);
3811                         if (err)
3812                                 goto out_freeflocks;
3813                         pagelist = recon_state->pagelist;
3814                 }
3815
3816                 err = ceph_pagelist_reserve(pagelist, total_len);
3817                 if (err)
3818                         goto out_freeflocks;
3819
3820                 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3821                 if (recon_state->msg_version >= 3) {
3822                         ceph_pagelist_encode_8(pagelist, struct_v);
3823                         ceph_pagelist_encode_8(pagelist, 1);
3824                         ceph_pagelist_encode_32(pagelist, struct_len);
3825                 }
3826                 ceph_pagelist_encode_string(pagelist, path, pathlen);
3827                 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
3828                 ceph_locks_to_pagelist(flocks, pagelist,
3829                                        num_fcntl_locks, num_flock_locks);
3830                 if (struct_v >= 2)
3831                         ceph_pagelist_encode_64(pagelist, snap_follows);
3832 out_freeflocks:
3833                 kfree(flocks);
3834         } else {
3835                 err = ceph_pagelist_reserve(pagelist,
3836                                             sizeof(u64) + sizeof(u32) +
3837                                             pathlen + sizeof(rec.v1));
3838                 if (err)
3839                         goto out_err;
3840
3841                 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3842                 ceph_pagelist_encode_string(pagelist, path, pathlen);
3843                 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
3844         }
3845
3846 out_err:
3847         ceph_mdsc_free_path(path, pathlen);
3848         if (!err)
3849                 recon_state->nr_caps++;
3850         return err;
3851 }
3852
3853 static int encode_snap_realms(struct ceph_mds_client *mdsc,
3854                               struct ceph_reconnect_state *recon_state)
3855 {
3856         struct rb_node *p;
3857         struct ceph_pagelist *pagelist = recon_state->pagelist;
3858         int err = 0;
3859
3860         if (recon_state->msg_version >= 4) {
3861                 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
3862                 if (err < 0)
3863                         goto fail;
3864         }
3865
3866         /*
3867          * snaprealms.  we provide mds with the ino, seq (version), and
3868          * parent for all of our realms.  If the mds has any newer info,
3869          * it will tell us.
3870          */
3871         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3872                 struct ceph_snap_realm *realm =
3873                        rb_entry(p, struct ceph_snap_realm, node);
3874                 struct ceph_mds_snaprealm_reconnect sr_rec;
3875
3876                 if (recon_state->msg_version >= 4) {
3877                         size_t need = sizeof(u8) * 2 + sizeof(u32) +
3878                                       sizeof(sr_rec);
3879
3880                         if (pagelist->length + need > RECONNECT_MAX_SIZE) {
3881                                 err = send_reconnect_partial(recon_state);
3882                                 if (err)
3883                                         goto fail;
3884                                 pagelist = recon_state->pagelist;
3885                         }
3886
3887                         err = ceph_pagelist_reserve(pagelist, need);
3888                         if (err)
3889                                 goto fail;
3890
3891                         ceph_pagelist_encode_8(pagelist, 1);
3892                         ceph_pagelist_encode_8(pagelist, 1);
3893                         ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
3894                 }
3895
3896                 dout(" adding snap realm %llx seq %lld parent %llx\n",
3897                      realm->ino, realm->seq, realm->parent_ino);
3898                 sr_rec.ino = cpu_to_le64(realm->ino);
3899                 sr_rec.seq = cpu_to_le64(realm->seq);
3900                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3901
3902                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3903                 if (err)
3904                         goto fail;
3905
3906                 recon_state->nr_realms++;
3907         }
3908 fail:
3909         return err;
3910 }
3911
3912
3913 /*
3914  * If an MDS fails and recovers, clients need to reconnect in order to
3915  * reestablish shared state.  This includes all caps issued through
3916  * this session _and_ the snap_realm hierarchy.  Because it's not
3917  * clear which snap realms the mds cares about, we send everything we
3918  * know about.. that ensures we'll then get any new info the
3919  * recovering MDS might have.
3920  *
3921  * This is a relatively heavyweight operation, but it's rare.
3922  */
3923 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
3924                                struct ceph_mds_session *session)
3925 {
3926         struct ceph_msg *reply;
3927         int mds = session->s_mds;
3928         int err = -ENOMEM;
3929         struct ceph_reconnect_state recon_state = {
3930                 .session = session,
3931         };
3932         LIST_HEAD(dispose);
3933
3934         pr_info("mds%d reconnect start\n", mds);
3935
3936         recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
3937         if (!recon_state.pagelist)
3938                 goto fail_nopagelist;
3939
3940         reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3941         if (!reply)
3942                 goto fail_nomsg;
3943
3944         xa_destroy(&session->s_delegated_inos);
3945
3946         mutex_lock(&session->s_mutex);
3947         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
3948         session->s_seq = 0;
3949
3950         dout("session %p state %s\n", session,
3951              ceph_session_state_name(session->s_state));
3952
3953         atomic_inc(&session->s_cap_gen);
3954
3955         spin_lock(&session->s_cap_lock);
3956         /* don't know if session is readonly */
3957         session->s_readonly = 0;
3958         /*
3959          * notify __ceph_remove_cap() that we are composing cap reconnect.
3960          * If a cap get released before being added to the cap reconnect,
3961          * __ceph_remove_cap() should skip queuing cap release.
3962          */
3963         session->s_cap_reconnect = 1;
3964         /* drop old cap expires; we're about to reestablish that state */
3965         detach_cap_releases(session, &dispose);
3966         spin_unlock(&session->s_cap_lock);
3967         dispose_cap_releases(mdsc, &dispose);
3968
3969         /* trim unused caps to reduce MDS's cache rejoin time */
3970         if (mdsc->fsc->sb->s_root)
3971                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
3972
3973         ceph_con_close(&session->s_con);
3974         ceph_con_open(&session->s_con,
3975                       CEPH_ENTITY_TYPE_MDS, mds,
3976                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
3977
3978         /* replay unsafe requests */
3979         replay_unsafe_requests(mdsc, session);
3980
3981         ceph_early_kick_flushing_caps(mdsc, session);
3982
3983         down_read(&mdsc->snap_rwsem);
3984
3985         /* placeholder for nr_caps */
3986         err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
3987         if (err)
3988                 goto fail;
3989
3990         if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
3991                 recon_state.msg_version = 3;
3992                 recon_state.allow_multi = true;
3993         } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
3994                 recon_state.msg_version = 3;
3995         } else {
3996                 recon_state.msg_version = 2;
3997         }
3998         /* trsaverse this session's caps */
3999         err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
4000
4001         spin_lock(&session->s_cap_lock);
4002         session->s_cap_reconnect = 0;
4003         spin_unlock(&session->s_cap_lock);
4004
4005         if (err < 0)
4006                 goto fail;
4007
4008         /* check if all realms can be encoded into current message */
4009         if (mdsc->num_snap_realms) {
4010                 size_t total_len =
4011                         recon_state.pagelist->length +
4012                         mdsc->num_snap_realms *
4013                         sizeof(struct ceph_mds_snaprealm_reconnect);
4014                 if (recon_state.msg_version >= 4) {
4015                         /* number of realms */
4016                         total_len += sizeof(u32);
4017                         /* version, compat_version and struct_len */
4018                         total_len += mdsc->num_snap_realms *
4019                                      (2 * sizeof(u8) + sizeof(u32));
4020                 }
4021                 if (total_len > RECONNECT_MAX_SIZE) {
4022                         if (!recon_state.allow_multi) {
4023                                 err = -ENOSPC;
4024                                 goto fail;
4025                         }
4026                         if (recon_state.nr_caps) {
4027                                 err = send_reconnect_partial(&recon_state);
4028                                 if (err)
4029                                         goto fail;
4030                         }
4031                         recon_state.msg_version = 5;
4032                 }
4033         }
4034
4035         err = encode_snap_realms(mdsc, &recon_state);
4036         if (err < 0)
4037                 goto fail;
4038
4039         if (recon_state.msg_version >= 5) {
4040                 err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
4041                 if (err < 0)
4042                         goto fail;
4043         }
4044
4045         if (recon_state.nr_caps || recon_state.nr_realms) {
4046                 struct page *page =
4047                         list_first_entry(&recon_state.pagelist->head,
4048                                         struct page, lru);
4049                 __le32 *addr = kmap_atomic(page);
4050                 if (recon_state.nr_caps) {
4051                         WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
4052                         *addr = cpu_to_le32(recon_state.nr_caps);
4053                 } else if (recon_state.msg_version >= 4) {
4054                         *(addr + 1) = cpu_to_le32(recon_state.nr_realms);
4055                 }
4056                 kunmap_atomic(addr);
4057         }
4058
4059         reply->hdr.version = cpu_to_le16(recon_state.msg_version);
4060         if (recon_state.msg_version >= 4)
4061                 reply->hdr.compat_version = cpu_to_le16(4);
4062
4063         reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
4064         ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
4065
4066         ceph_con_send(&session->s_con, reply);
4067
4068         mutex_unlock(&session->s_mutex);
4069
4070         mutex_lock(&mdsc->mutex);
4071         __wake_requests(mdsc, &session->s_waiting);
4072         mutex_unlock(&mdsc->mutex);
4073
4074         up_read(&mdsc->snap_rwsem);
4075         ceph_pagelist_release(recon_state.pagelist);
4076         return;
4077
4078 fail:
4079         ceph_msg_put(reply);
4080         up_read(&mdsc->snap_rwsem);
4081         mutex_unlock(&session->s_mutex);
4082 fail_nomsg:
4083         ceph_pagelist_release(recon_state.pagelist);
4084 fail_nopagelist:
4085         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
4086         return;
4087 }
4088
4089
4090 /*
4091  * compare old and new mdsmaps, kicking requests
4092  * and closing out old connections as necessary
4093  *
4094  * called under mdsc->mutex.
4095  */
4096 static void check_new_map(struct ceph_mds_client *mdsc,
4097                           struct ceph_mdsmap *newmap,
4098                           struct ceph_mdsmap *oldmap)
4099 {
4100         int i, j, err;
4101         int oldstate, newstate;
4102         struct ceph_mds_session *s;
4103         unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
4104
4105         dout("check_new_map new %u old %u\n",
4106              newmap->m_epoch, oldmap->m_epoch);
4107
4108         if (newmap->m_info) {
4109                 for (i = 0; i < newmap->possible_max_rank; i++) {
4110                         for (j = 0; j < newmap->m_info[i].num_export_targets; j++)
4111                                 set_bit(newmap->m_info[i].export_targets[j], targets);
4112                 }
4113         }
4114
4115         for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4116                 if (!mdsc->sessions[i])
4117                         continue;
4118                 s = mdsc->sessions[i];
4119                 oldstate = ceph_mdsmap_get_state(oldmap, i);
4120                 newstate = ceph_mdsmap_get_state(newmap, i);
4121
4122                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
4123                      i, ceph_mds_state_name(oldstate),
4124                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
4125                      ceph_mds_state_name(newstate),
4126                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
4127                      ceph_session_state_name(s->s_state));
4128
4129                 if (i >= newmap->possible_max_rank) {
4130                         /* force close session for stopped mds */
4131                         ceph_get_mds_session(s);
4132                         __unregister_session(mdsc, s);
4133                         __wake_requests(mdsc, &s->s_waiting);
4134                         mutex_unlock(&mdsc->mutex);
4135
4136                         mutex_lock(&s->s_mutex);
4137                         cleanup_session_requests(mdsc, s);
4138                         remove_session_caps(s);
4139                         mutex_unlock(&s->s_mutex);
4140
4141                         ceph_put_mds_session(s);
4142
4143                         mutex_lock(&mdsc->mutex);
4144                         kick_requests(mdsc, i);
4145                         continue;
4146                 }
4147
4148                 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
4149                            ceph_mdsmap_get_addr(newmap, i),
4150                            sizeof(struct ceph_entity_addr))) {
4151                         /* just close it */
4152                         mutex_unlock(&mdsc->mutex);
4153                         mutex_lock(&s->s_mutex);
4154                         mutex_lock(&mdsc->mutex);
4155                         ceph_con_close(&s->s_con);
4156                         mutex_unlock(&s->s_mutex);
4157                         s->s_state = CEPH_MDS_SESSION_RESTARTING;
4158                 } else if (oldstate == newstate) {
4159                         continue;  /* nothing new with this mds */
4160                 }
4161
4162                 /*
4163                  * send reconnect?
4164                  */
4165                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
4166                     newstate >= CEPH_MDS_STATE_RECONNECT) {
4167                         mutex_unlock(&mdsc->mutex);
4168                         clear_bit(i, targets);
4169                         send_mds_reconnect(mdsc, s);
4170                         mutex_lock(&mdsc->mutex);
4171                 }
4172
4173                 /*
4174                  * kick request on any mds that has gone active.
4175                  */
4176                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
4177                     newstate >= CEPH_MDS_STATE_ACTIVE) {
4178                         if (oldstate != CEPH_MDS_STATE_CREATING &&
4179                             oldstate != CEPH_MDS_STATE_STARTING)
4180                                 pr_info("mds%d recovery completed\n", s->s_mds);
4181                         kick_requests(mdsc, i);
4182                         mutex_unlock(&mdsc->mutex);
4183                         mutex_lock(&s->s_mutex);
4184                         mutex_lock(&mdsc->mutex);
4185                         ceph_kick_flushing_caps(mdsc, s);
4186                         mutex_unlock(&s->s_mutex);
4187                         wake_up_session_caps(s, RECONNECT);
4188                 }
4189         }
4190
4191         /*
4192          * Only open and reconnect sessions that don't exist yet.
4193          */
4194         for (i = 0; i < newmap->possible_max_rank; i++) {
4195                 /*
4196                  * In case the import MDS is crashed just after
4197                  * the EImportStart journal is flushed, so when
4198                  * a standby MDS takes over it and is replaying
4199                  * the EImportStart journal the new MDS daemon
4200                  * will wait the client to reconnect it, but the
4201                  * client may never register/open the session yet.
4202                  *
4203                  * Will try to reconnect that MDS daemon if the
4204                  * rank number is in the export targets array and
4205                  * is the up:reconnect state.
4206                  */
4207                 newstate = ceph_mdsmap_get_state(newmap, i);
4208                 if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
4209                         continue;
4210
4211                 /*
4212                  * The session maybe registered and opened by some
4213                  * requests which were choosing random MDSes during
4214                  * the mdsc->mutex's unlock/lock gap below in rare
4215                  * case. But the related MDS daemon will just queue
4216                  * that requests and be still waiting for the client's
4217                  * reconnection request in up:reconnect state.
4218                  */
4219                 s = __ceph_lookup_mds_session(mdsc, i);
4220                 if (likely(!s)) {
4221                         s = __open_export_target_session(mdsc, i);
4222                         if (IS_ERR(s)) {
4223                                 err = PTR_ERR(s);
4224                                 pr_err("failed to open export target session, err %d\n",
4225                                        err);
4226                                 continue;
4227                         }
4228                 }
4229                 dout("send reconnect to export target mds.%d\n", i);
4230                 mutex_unlock(&mdsc->mutex);
4231                 send_mds_reconnect(mdsc, s);
4232                 ceph_put_mds_session(s);
4233                 mutex_lock(&mdsc->mutex);
4234         }
4235
4236         for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4237                 s = mdsc->sessions[i];
4238                 if (!s)
4239                         continue;
4240                 if (!ceph_mdsmap_is_laggy(newmap, i))
4241                         continue;
4242                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4243                     s->s_state == CEPH_MDS_SESSION_HUNG ||
4244                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
4245                         dout(" connecting to export targets of laggy mds%d\n",
4246                              i);
4247                         __open_export_target_sessions(mdsc, s);
4248                 }
4249         }
4250 }
4251
4252
4253
4254 /*
4255  * leases
4256  */
4257
4258 /*
4259  * caller must hold session s_mutex, dentry->d_lock
4260  */
4261 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
4262 {
4263         struct ceph_dentry_info *di = ceph_dentry(dentry);
4264
4265         ceph_put_mds_session(di->lease_session);
4266         di->lease_session = NULL;
4267 }
4268
4269 static void handle_lease(struct ceph_mds_client *mdsc,
4270                          struct ceph_mds_session *session,
4271                          struct ceph_msg *msg)
4272 {
4273         struct super_block *sb = mdsc->fsc->sb;
4274         struct inode *inode;
4275         struct dentry *parent, *dentry;
4276         struct ceph_dentry_info *di;
4277         int mds = session->s_mds;
4278         struct ceph_mds_lease *h = msg->front.iov_base;
4279         u32 seq;
4280         struct ceph_vino vino;
4281         struct qstr dname;
4282         int release = 0;
4283
4284         dout("handle_lease from mds%d\n", mds);
4285
4286         /* decode */
4287         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
4288                 goto bad;
4289         vino.ino = le64_to_cpu(h->ino);
4290         vino.snap = CEPH_NOSNAP;
4291         seq = le32_to_cpu(h->seq);
4292         dname.len = get_unaligned_le32(h + 1);
4293         if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
4294                 goto bad;
4295         dname.name = (void *)(h + 1) + sizeof(u32);
4296
4297         /* lookup inode */
4298         inode = ceph_find_inode(sb, vino);
4299         dout("handle_lease %s, ino %llx %p %.*s\n",
4300              ceph_lease_op_name(h->action), vino.ino, inode,
4301              dname.len, dname.name);
4302
4303         mutex_lock(&session->s_mutex);
4304         inc_session_sequence(session);
4305
4306         if (!inode) {
4307                 dout("handle_lease no inode %llx\n", vino.ino);
4308                 goto release;
4309         }
4310
4311         /* dentry */
4312         parent = d_find_alias(inode);
4313         if (!parent) {
4314                 dout("no parent dentry on inode %p\n", inode);
4315                 WARN_ON(1);
4316                 goto release;  /* hrm... */
4317         }
4318         dname.hash = full_name_hash(parent, dname.name, dname.len);
4319         dentry = d_lookup(parent, &dname);
4320         dput(parent);
4321         if (!dentry)
4322                 goto release;
4323
4324         spin_lock(&dentry->d_lock);
4325         di = ceph_dentry(dentry);
4326         switch (h->action) {
4327         case CEPH_MDS_LEASE_REVOKE:
4328                 if (di->lease_session == session) {
4329                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
4330                                 h->seq = cpu_to_le32(di->lease_seq);
4331                         __ceph_mdsc_drop_dentry_lease(dentry);
4332                 }
4333                 release = 1;
4334                 break;
4335
4336         case CEPH_MDS_LEASE_RENEW:
4337                 if (di->lease_session == session &&
4338                     di->lease_gen == atomic_read(&session->s_cap_gen) &&
4339                     di->lease_renew_from &&
4340                     di->lease_renew_after == 0) {
4341                         unsigned long duration =
4342                                 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
4343
4344                         di->lease_seq = seq;
4345                         di->time = di->lease_renew_from + duration;
4346                         di->lease_renew_after = di->lease_renew_from +
4347                                 (duration >> 1);
4348                         di->lease_renew_from = 0;
4349                 }
4350                 break;
4351         }
4352         spin_unlock(&dentry->d_lock);
4353         dput(dentry);
4354
4355         if (!release)
4356                 goto out;
4357
4358 release:
4359         /* let's just reuse the same message */
4360         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
4361         ceph_msg_get(msg);
4362         ceph_con_send(&session->s_con, msg);
4363
4364 out:
4365         mutex_unlock(&session->s_mutex);
4366         iput(inode);
4367         return;
4368
4369 bad:
4370         pr_err("corrupt lease message\n");
4371         ceph_msg_dump(msg);
4372 }
4373
4374 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
4375                               struct dentry *dentry, char action,
4376                               u32 seq)
4377 {
4378         struct ceph_msg *msg;
4379         struct ceph_mds_lease *lease;
4380         struct inode *dir;
4381         int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
4382
4383         dout("lease_send_msg identry %p %s to mds%d\n",
4384              dentry, ceph_lease_op_name(action), session->s_mds);
4385
4386         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
4387         if (!msg)
4388                 return;
4389         lease = msg->front.iov_base;
4390         lease->action = action;
4391         lease->seq = cpu_to_le32(seq);
4392
4393         spin_lock(&dentry->d_lock);
4394         dir = d_inode(dentry->d_parent);
4395         lease->ino = cpu_to_le64(ceph_ino(dir));
4396         lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
4397
4398         put_unaligned_le32(dentry->d_name.len, lease + 1);
4399         memcpy((void *)(lease + 1) + 4,
4400                dentry->d_name.name, dentry->d_name.len);
4401         spin_unlock(&dentry->d_lock);
4402         /*
4403          * if this is a preemptive lease RELEASE, no need to
4404          * flush request stream, since the actual request will
4405          * soon follow.
4406          */
4407         msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
4408
4409         ceph_con_send(&session->s_con, msg);
4410 }
4411
4412 /*
4413  * lock unlock the session, to wait ongoing session activities
4414  */
4415 static void lock_unlock_session(struct ceph_mds_session *s)
4416 {
4417         mutex_lock(&s->s_mutex);
4418         mutex_unlock(&s->s_mutex);
4419 }
4420
4421 static void maybe_recover_session(struct ceph_mds_client *mdsc)
4422 {
4423         struct ceph_fs_client *fsc = mdsc->fsc;
4424
4425         if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
4426                 return;
4427
4428         if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
4429                 return;
4430
4431         if (!READ_ONCE(fsc->blocklisted))
4432                 return;
4433
4434         pr_info("auto reconnect after blocklisted\n");
4435         ceph_force_reconnect(fsc->sb);
4436 }
4437
4438 bool check_session_state(struct ceph_mds_session *s)
4439 {
4440         struct ceph_fs_client *fsc = s->s_mdsc->fsc;
4441
4442         switch (s->s_state) {
4443         case CEPH_MDS_SESSION_OPEN:
4444                 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
4445                         s->s_state = CEPH_MDS_SESSION_HUNG;
4446                         pr_info("mds%d hung\n", s->s_mds);
4447                 }
4448                 break;
4449         case CEPH_MDS_SESSION_CLOSING:
4450                 /* Should never reach this when not force unmounting */
4451                 WARN_ON_ONCE(s->s_ttl &&
4452                              READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
4453                 fallthrough;
4454         case CEPH_MDS_SESSION_NEW:
4455         case CEPH_MDS_SESSION_RESTARTING:
4456         case CEPH_MDS_SESSION_CLOSED:
4457         case CEPH_MDS_SESSION_REJECTED:
4458                 return false;
4459         }
4460
4461         return true;
4462 }
4463
4464 /*
4465  * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
4466  * then we need to retransmit that request.
4467  */
4468 void inc_session_sequence(struct ceph_mds_session *s)
4469 {
4470         lockdep_assert_held(&s->s_mutex);
4471
4472         s->s_seq++;
4473
4474         if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
4475                 int ret;
4476
4477                 dout("resending session close request for mds%d\n", s->s_mds);
4478                 ret = request_close_session(s);
4479                 if (ret < 0)
4480                         pr_err("unable to close session to mds%d: %d\n",
4481                                s->s_mds, ret);
4482         }
4483 }
4484
4485 /*
4486  * delayed work -- periodically trim expired leases, renew caps with mds.  If
4487  * the @delay parameter is set to 0 or if it's more than 5 secs, the default
4488  * workqueue delay value of 5 secs will be used.
4489  */
4490 static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
4491 {
4492         unsigned long max_delay = HZ * 5;
4493
4494         /* 5 secs default delay */
4495         if (!delay || (delay > max_delay))
4496                 delay = max_delay;
4497         schedule_delayed_work(&mdsc->delayed_work,
4498                               round_jiffies_relative(delay));
4499 }
4500
4501 static void delayed_work(struct work_struct *work)
4502 {
4503         struct ceph_mds_client *mdsc =
4504                 container_of(work, struct ceph_mds_client, delayed_work.work);
4505         unsigned long delay;
4506         int renew_interval;
4507         int renew_caps;
4508         int i;
4509
4510         dout("mdsc delayed_work\n");
4511
4512         if (mdsc->stopping)
4513                 return;
4514
4515         mutex_lock(&mdsc->mutex);
4516         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
4517         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
4518                                    mdsc->last_renew_caps);
4519         if (renew_caps)
4520                 mdsc->last_renew_caps = jiffies;
4521
4522         for (i = 0; i < mdsc->max_sessions; i++) {
4523                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4524                 if (!s)
4525                         continue;
4526
4527                 if (!check_session_state(s)) {
4528                         ceph_put_mds_session(s);
4529                         continue;
4530                 }
4531                 mutex_unlock(&mdsc->mutex);
4532
4533                 mutex_lock(&s->s_mutex);
4534                 if (renew_caps)
4535                         send_renew_caps(mdsc, s);
4536                 else
4537                         ceph_con_keepalive(&s->s_con);
4538                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4539                     s->s_state == CEPH_MDS_SESSION_HUNG)
4540                         ceph_send_cap_releases(mdsc, s);
4541                 mutex_unlock(&s->s_mutex);
4542                 ceph_put_mds_session(s);
4543
4544                 mutex_lock(&mdsc->mutex);
4545         }
4546         mutex_unlock(&mdsc->mutex);
4547
4548         delay = ceph_check_delayed_caps(mdsc);
4549
4550         ceph_queue_cap_reclaim_work(mdsc);
4551
4552         ceph_trim_snapid_map(mdsc);
4553
4554         maybe_recover_session(mdsc);
4555
4556         schedule_delayed(mdsc, delay);
4557 }
4558
4559 int ceph_mdsc_init(struct ceph_fs_client *fsc)
4560
4561 {
4562         struct ceph_mds_client *mdsc;
4563         int err;
4564
4565         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
4566         if (!mdsc)
4567                 return -ENOMEM;
4568         mdsc->fsc = fsc;
4569         mutex_init(&mdsc->mutex);
4570         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
4571         if (!mdsc->mdsmap) {
4572                 err = -ENOMEM;
4573                 goto err_mdsc;
4574         }
4575
4576         init_completion(&mdsc->safe_umount_waiters);
4577         init_waitqueue_head(&mdsc->session_close_wq);
4578         INIT_LIST_HEAD(&mdsc->waiting_for_map);
4579         mdsc->quotarealms_inodes = RB_ROOT;
4580         mutex_init(&mdsc->quotarealms_inodes_mutex);
4581         init_rwsem(&mdsc->snap_rwsem);
4582         mdsc->snap_realms = RB_ROOT;
4583         INIT_LIST_HEAD(&mdsc->snap_empty);
4584         spin_lock_init(&mdsc->snap_empty_lock);
4585         mdsc->request_tree = RB_ROOT;
4586         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
4587         mdsc->last_renew_caps = jiffies;
4588         INIT_LIST_HEAD(&mdsc->cap_delay_list);
4589         INIT_LIST_HEAD(&mdsc->cap_wait_list);
4590         spin_lock_init(&mdsc->cap_delay_lock);
4591         INIT_LIST_HEAD(&mdsc->snap_flush_list);
4592         spin_lock_init(&mdsc->snap_flush_lock);
4593         mdsc->last_cap_flush_tid = 1;
4594         INIT_LIST_HEAD(&mdsc->cap_flush_list);
4595         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
4596         spin_lock_init(&mdsc->cap_dirty_lock);
4597         init_waitqueue_head(&mdsc->cap_flushing_wq);
4598         INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
4599         err = ceph_metric_init(&mdsc->metric);
4600         if (err)
4601                 goto err_mdsmap;
4602
4603         spin_lock_init(&mdsc->dentry_list_lock);
4604         INIT_LIST_HEAD(&mdsc->dentry_leases);
4605         INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
4606
4607         ceph_caps_init(mdsc);
4608         ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
4609
4610         spin_lock_init(&mdsc->snapid_map_lock);
4611         mdsc->snapid_map_tree = RB_ROOT;
4612         INIT_LIST_HEAD(&mdsc->snapid_map_lru);
4613
4614         init_rwsem(&mdsc->pool_perm_rwsem);
4615         mdsc->pool_perm_tree = RB_ROOT;
4616
4617         strscpy(mdsc->nodename, utsname()->nodename,
4618                 sizeof(mdsc->nodename));
4619
4620         fsc->mdsc = mdsc;
4621         return 0;
4622
4623 err_mdsmap:
4624         kfree(mdsc->mdsmap);
4625 err_mdsc:
4626         kfree(mdsc);
4627         return err;
4628 }
4629
4630 /*
4631  * Wait for safe replies on open mds requests.  If we time out, drop
4632  * all requests from the tree to avoid dangling dentry refs.
4633  */
4634 static void wait_requests(struct ceph_mds_client *mdsc)
4635 {
4636         struct ceph_options *opts = mdsc->fsc->client->options;
4637         struct ceph_mds_request *req;
4638
4639         mutex_lock(&mdsc->mutex);
4640         if (__get_oldest_req(mdsc)) {
4641                 mutex_unlock(&mdsc->mutex);
4642
4643                 dout("wait_requests waiting for requests\n");
4644                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
4645                                     ceph_timeout_jiffies(opts->mount_timeout));
4646
4647                 /* tear down remaining requests */
4648                 mutex_lock(&mdsc->mutex);
4649                 while ((req = __get_oldest_req(mdsc))) {
4650                         dout("wait_requests timed out on tid %llu\n",
4651                              req->r_tid);
4652                         list_del_init(&req->r_wait);
4653                         __unregister_request(mdsc, req);
4654                 }
4655         }
4656         mutex_unlock(&mdsc->mutex);
4657         dout("wait_requests done\n");
4658 }
4659
4660 void send_flush_mdlog(struct ceph_mds_session *s)
4661 {
4662         struct ceph_msg *msg;
4663
4664         /*
4665          * Pre-luminous MDS crashes when it sees an unknown session request
4666          */
4667         if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS))
4668                 return;
4669
4670         mutex_lock(&s->s_mutex);
4671         dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds,
4672              ceph_session_state_name(s->s_state), s->s_seq);
4673         msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
4674                                       s->s_seq);
4675         if (!msg) {
4676                 pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n",
4677                        s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
4678         } else {
4679                 ceph_con_send(&s->s_con, msg);
4680         }
4681         mutex_unlock(&s->s_mutex);
4682 }
4683
4684 /*
4685  * called before mount is ro, and before dentries are torn down.
4686  * (hmm, does this still race with new lookups?)
4687  */
4688 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
4689 {
4690         dout("pre_umount\n");
4691         mdsc->stopping = 1;
4692
4693         ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
4694         ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
4695         ceph_flush_dirty_caps(mdsc);
4696         wait_requests(mdsc);
4697
4698         /*
4699          * wait for reply handlers to drop their request refs and
4700          * their inode/dcache refs
4701          */
4702         ceph_msgr_flush();
4703
4704         ceph_cleanup_quotarealms_inodes(mdsc);
4705 }
4706
4707 /*
4708  * wait for all write mds requests to flush.
4709  */
4710 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
4711 {
4712         struct ceph_mds_request *req = NULL, *nextreq;
4713         struct rb_node *n;
4714
4715         mutex_lock(&mdsc->mutex);
4716         dout("wait_unsafe_requests want %lld\n", want_tid);
4717 restart:
4718         req = __get_oldest_req(mdsc);
4719         while (req && req->r_tid <= want_tid) {
4720                 /* find next request */
4721                 n = rb_next(&req->r_node);
4722                 if (n)
4723                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
4724                 else
4725                         nextreq = NULL;
4726                 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
4727                     (req->r_op & CEPH_MDS_OP_WRITE)) {
4728                         /* write op */
4729                         ceph_mdsc_get_request(req);
4730                         if (nextreq)
4731                                 ceph_mdsc_get_request(nextreq);
4732                         mutex_unlock(&mdsc->mutex);
4733                         dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
4734                              req->r_tid, want_tid);
4735                         wait_for_completion(&req->r_safe_completion);
4736                         mutex_lock(&mdsc->mutex);
4737                         ceph_mdsc_put_request(req);
4738                         if (!nextreq)
4739                                 break;  /* next dne before, so we're done! */
4740                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
4741                                 /* next request was removed from tree */
4742                                 ceph_mdsc_put_request(nextreq);
4743                                 goto restart;
4744                         }
4745                         ceph_mdsc_put_request(nextreq);  /* won't go away */
4746                 }
4747                 req = nextreq;
4748         }
4749         mutex_unlock(&mdsc->mutex);
4750         dout("wait_unsafe_requests done\n");
4751 }
4752
4753 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
4754 {
4755         u64 want_tid, want_flush;
4756
4757         if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
4758                 return;
4759
4760         dout("sync\n");
4761         mutex_lock(&mdsc->mutex);
4762         want_tid = mdsc->last_tid;
4763         mutex_unlock(&mdsc->mutex);
4764
4765         ceph_flush_dirty_caps(mdsc);
4766         spin_lock(&mdsc->cap_dirty_lock);
4767         want_flush = mdsc->last_cap_flush_tid;
4768         if (!list_empty(&mdsc->cap_flush_list)) {
4769                 struct ceph_cap_flush *cf =
4770                         list_last_entry(&mdsc->cap_flush_list,
4771                                         struct ceph_cap_flush, g_list);
4772                 cf->wake = true;
4773         }
4774         spin_unlock(&mdsc->cap_dirty_lock);
4775
4776         dout("sync want tid %lld flush_seq %lld\n",
4777              want_tid, want_flush);
4778
4779         wait_unsafe_requests(mdsc, want_tid);
4780         wait_caps_flush(mdsc, want_flush);
4781 }
4782
4783 /*
4784  * true if all sessions are closed, or we force unmount
4785  */
4786 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
4787 {
4788         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
4789                 return true;
4790         return atomic_read(&mdsc->num_sessions) <= skipped;
4791 }
4792
4793 /*
4794  * called after sb is ro.
4795  */
4796 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
4797 {
4798         struct ceph_options *opts = mdsc->fsc->client->options;
4799         struct ceph_mds_session *session;
4800         int i;
4801         int skipped = 0;
4802
4803         dout("close_sessions\n");
4804
4805         /* close sessions */
4806         mutex_lock(&mdsc->mutex);
4807         for (i = 0; i < mdsc->max_sessions; i++) {
4808                 session = __ceph_lookup_mds_session(mdsc, i);
4809                 if (!session)
4810                         continue;
4811                 mutex_unlock(&mdsc->mutex);
4812                 mutex_lock(&session->s_mutex);
4813                 if (__close_session(mdsc, session) <= 0)
4814                         skipped++;
4815                 mutex_unlock(&session->s_mutex);
4816                 ceph_put_mds_session(session);
4817                 mutex_lock(&mdsc->mutex);
4818         }
4819         mutex_unlock(&mdsc->mutex);
4820
4821         dout("waiting for sessions to close\n");
4822         wait_event_timeout(mdsc->session_close_wq,
4823                            done_closing_sessions(mdsc, skipped),
4824                            ceph_timeout_jiffies(opts->mount_timeout));
4825
4826         /* tear down remaining sessions */
4827         mutex_lock(&mdsc->mutex);
4828         for (i = 0; i < mdsc->max_sessions; i++) {
4829                 if (mdsc->sessions[i]) {
4830                         session = ceph_get_mds_session(mdsc->sessions[i]);
4831                         __unregister_session(mdsc, session);
4832                         mutex_unlock(&mdsc->mutex);
4833                         mutex_lock(&session->s_mutex);
4834                         remove_session_caps(session);
4835                         mutex_unlock(&session->s_mutex);
4836                         ceph_put_mds_session(session);
4837                         mutex_lock(&mdsc->mutex);
4838                 }
4839         }
4840         WARN_ON(!list_empty(&mdsc->cap_delay_list));
4841         mutex_unlock(&mdsc->mutex);
4842
4843         ceph_cleanup_snapid_map(mdsc);
4844         ceph_cleanup_empty_realms(mdsc);
4845
4846         cancel_work_sync(&mdsc->cap_reclaim_work);
4847         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
4848
4849         dout("stopped\n");
4850 }
4851
4852 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
4853 {
4854         struct ceph_mds_session *session;
4855         int mds;
4856
4857         dout("force umount\n");
4858
4859         mutex_lock(&mdsc->mutex);
4860         for (mds = 0; mds < mdsc->max_sessions; mds++) {
4861                 session = __ceph_lookup_mds_session(mdsc, mds);
4862                 if (!session)
4863                         continue;
4864
4865                 if (session->s_state == CEPH_MDS_SESSION_REJECTED)
4866                         __unregister_session(mdsc, session);
4867                 __wake_requests(mdsc, &session->s_waiting);
4868                 mutex_unlock(&mdsc->mutex);
4869
4870                 mutex_lock(&session->s_mutex);
4871                 __close_session(mdsc, session);
4872                 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
4873                         cleanup_session_requests(mdsc, session);
4874                         remove_session_caps(session);
4875                 }
4876                 mutex_unlock(&session->s_mutex);
4877                 ceph_put_mds_session(session);
4878
4879                 mutex_lock(&mdsc->mutex);
4880                 kick_requests(mdsc, mds);
4881         }
4882         __wake_requests(mdsc, &mdsc->waiting_for_map);
4883         mutex_unlock(&mdsc->mutex);
4884 }
4885
4886 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
4887 {
4888         dout("stop\n");
4889         /*
4890          * Make sure the delayed work stopped before releasing
4891          * the resources.
4892          *
4893          * Because the cancel_delayed_work_sync() will only
4894          * guarantee that the work finishes executing. But the
4895          * delayed work will re-arm itself again after that.
4896          */
4897         flush_delayed_work(&mdsc->delayed_work);
4898
4899         if (mdsc->mdsmap)
4900                 ceph_mdsmap_destroy(mdsc->mdsmap);
4901         kfree(mdsc->sessions);
4902         ceph_caps_finalize(mdsc);
4903         ceph_pool_perm_destroy(mdsc);
4904 }
4905
4906 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
4907 {
4908         struct ceph_mds_client *mdsc = fsc->mdsc;
4909         dout("mdsc_destroy %p\n", mdsc);
4910
4911         if (!mdsc)
4912                 return;
4913
4914         /* flush out any connection work with references to us */
4915         ceph_msgr_flush();
4916
4917         ceph_mdsc_stop(mdsc);
4918
4919         ceph_metric_destroy(&mdsc->metric);
4920
4921         fsc->mdsc = NULL;
4922         kfree(mdsc);
4923         dout("mdsc_destroy %p done\n", mdsc);
4924 }
4925
4926 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
4927 {
4928         struct ceph_fs_client *fsc = mdsc->fsc;
4929         const char *mds_namespace = fsc->mount_options->mds_namespace;
4930         void *p = msg->front.iov_base;
4931         void *end = p + msg->front.iov_len;
4932         u32 epoch;
4933         u32 num_fs;
4934         u32 mount_fscid = (u32)-1;
4935         int err = -EINVAL;
4936
4937         ceph_decode_need(&p, end, sizeof(u32), bad);
4938         epoch = ceph_decode_32(&p);
4939
4940         dout("handle_fsmap epoch %u\n", epoch);
4941
4942         /* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
4943         ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
4944
4945         ceph_decode_32_safe(&p, end, num_fs, bad);
4946         while (num_fs-- > 0) {
4947                 void *info_p, *info_end;
4948                 u32 info_len;
4949                 u32 fscid, namelen;
4950
4951                 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
4952                 p += 2;         // info_v, info_cv
4953                 info_len = ceph_decode_32(&p);
4954                 ceph_decode_need(&p, end, info_len, bad);
4955                 info_p = p;
4956                 info_end = p + info_len;
4957                 p = info_end;
4958
4959                 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
4960                 fscid = ceph_decode_32(&info_p);
4961                 namelen = ceph_decode_32(&info_p);
4962                 ceph_decode_need(&info_p, info_end, namelen, bad);
4963
4964                 if (mds_namespace &&
4965                     strlen(mds_namespace) == namelen &&
4966                     !strncmp(mds_namespace, (char *)info_p, namelen)) {
4967                         mount_fscid = fscid;
4968                         break;
4969                 }
4970         }
4971
4972         ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
4973         if (mount_fscid != (u32)-1) {
4974                 fsc->client->monc.fs_cluster_id = mount_fscid;
4975                 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
4976                                    0, true);
4977                 ceph_monc_renew_subs(&fsc->client->monc);
4978         } else {
4979                 err = -ENOENT;
4980                 goto err_out;
4981         }
4982         return;
4983
4984 bad:
4985         pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
4986         ceph_umount_begin(mdsc->fsc->sb);
4987 err_out:
4988         mutex_lock(&mdsc->mutex);
4989         mdsc->mdsmap_err = err;
4990         __wake_requests(mdsc, &mdsc->waiting_for_map);
4991         mutex_unlock(&mdsc->mutex);
4992 }
4993
4994 /*
4995  * handle mds map update.
4996  */
4997 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
4998 {
4999         u32 epoch;
5000         u32 maplen;
5001         void *p = msg->front.iov_base;
5002         void *end = p + msg->front.iov_len;
5003         struct ceph_mdsmap *newmap, *oldmap;
5004         struct ceph_fsid fsid;
5005         int err = -EINVAL;
5006
5007         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
5008         ceph_decode_copy(&p, &fsid, sizeof(fsid));
5009         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
5010                 return;
5011         epoch = ceph_decode_32(&p);
5012         maplen = ceph_decode_32(&p);
5013         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
5014
5015         /* do we need it? */
5016         mutex_lock(&mdsc->mutex);
5017         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
5018                 dout("handle_map epoch %u <= our %u\n",
5019                      epoch, mdsc->mdsmap->m_epoch);
5020                 mutex_unlock(&mdsc->mutex);
5021                 return;
5022         }
5023
5024         newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
5025         if (IS_ERR(newmap)) {
5026                 err = PTR_ERR(newmap);
5027                 goto bad_unlock;
5028         }
5029
5030         /* swap into place */
5031         if (mdsc->mdsmap) {
5032                 oldmap = mdsc->mdsmap;
5033                 mdsc->mdsmap = newmap;
5034                 check_new_map(mdsc, newmap, oldmap);
5035                 ceph_mdsmap_destroy(oldmap);
5036         } else {
5037                 mdsc->mdsmap = newmap;  /* first mds map */
5038         }
5039         mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
5040                                         MAX_LFS_FILESIZE);
5041
5042         __wake_requests(mdsc, &mdsc->waiting_for_map);
5043         ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
5044                           mdsc->mdsmap->m_epoch);
5045
5046         mutex_unlock(&mdsc->mutex);
5047         schedule_delayed(mdsc, 0);
5048         return;
5049
5050 bad_unlock:
5051         mutex_unlock(&mdsc->mutex);
5052 bad:
5053         pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
5054         ceph_umount_begin(mdsc->fsc->sb);
5055         return;
5056 }
5057
5058 static struct ceph_connection *mds_get_con(struct ceph_connection *con)
5059 {
5060         struct ceph_mds_session *s = con->private;
5061
5062         if (ceph_get_mds_session(s))
5063                 return con;
5064         return NULL;
5065 }
5066
5067 static void mds_put_con(struct ceph_connection *con)
5068 {
5069         struct ceph_mds_session *s = con->private;
5070
5071         ceph_put_mds_session(s);
5072 }
5073
5074 /*
5075  * if the client is unresponsive for long enough, the mds will kill
5076  * the session entirely.
5077  */
5078 static void mds_peer_reset(struct ceph_connection *con)
5079 {
5080         struct ceph_mds_session *s = con->private;
5081         struct ceph_mds_client *mdsc = s->s_mdsc;
5082
5083         pr_warn("mds%d closed our session\n", s->s_mds);
5084         send_mds_reconnect(mdsc, s);
5085 }
5086
5087 static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5088 {
5089         struct ceph_mds_session *s = con->private;
5090         struct ceph_mds_client *mdsc = s->s_mdsc;
5091         int type = le16_to_cpu(msg->hdr.type);
5092
5093         mutex_lock(&mdsc->mutex);
5094         if (__verify_registered_session(mdsc, s) < 0) {
5095                 mutex_unlock(&mdsc->mutex);
5096                 goto out;
5097         }
5098         mutex_unlock(&mdsc->mutex);
5099
5100         switch (type) {
5101         case CEPH_MSG_MDS_MAP:
5102                 ceph_mdsc_handle_mdsmap(mdsc, msg);
5103                 break;
5104         case CEPH_MSG_FS_MAP_USER:
5105                 ceph_mdsc_handle_fsmap(mdsc, msg);
5106                 break;
5107         case CEPH_MSG_CLIENT_SESSION:
5108                 handle_session(s, msg);
5109                 break;
5110         case CEPH_MSG_CLIENT_REPLY:
5111                 handle_reply(s, msg);
5112                 break;
5113         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
5114                 handle_forward(mdsc, s, msg);
5115                 break;
5116         case CEPH_MSG_CLIENT_CAPS:
5117                 ceph_handle_caps(s, msg);
5118                 break;
5119         case CEPH_MSG_CLIENT_SNAP:
5120                 ceph_handle_snap(mdsc, s, msg);
5121                 break;
5122         case CEPH_MSG_CLIENT_LEASE:
5123                 handle_lease(mdsc, s, msg);
5124                 break;
5125         case CEPH_MSG_CLIENT_QUOTA:
5126                 ceph_handle_quota(mdsc, s, msg);
5127                 break;
5128
5129         default:
5130                 pr_err("received unknown message type %d %s\n", type,
5131                        ceph_msg_type_name(type));
5132         }
5133 out:
5134         ceph_msg_put(msg);
5135 }
5136
5137 /*
5138  * authentication
5139  */
5140
5141 /*
5142  * Note: returned pointer is the address of a structure that's
5143  * managed separately.  Caller must *not* attempt to free it.
5144  */
5145 static struct ceph_auth_handshake *
5146 mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
5147 {
5148         struct ceph_mds_session *s = con->private;
5149         struct ceph_mds_client *mdsc = s->s_mdsc;
5150         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5151         struct ceph_auth_handshake *auth = &s->s_auth;
5152         int ret;
5153
5154         ret = __ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5155                                          force_new, proto, NULL, NULL);
5156         if (ret)
5157                 return ERR_PTR(ret);
5158
5159         return auth;
5160 }
5161
5162 static int mds_add_authorizer_challenge(struct ceph_connection *con,
5163                                     void *challenge_buf, int challenge_buf_len)
5164 {
5165         struct ceph_mds_session *s = con->private;
5166         struct ceph_mds_client *mdsc = s->s_mdsc;
5167         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5168
5169         return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
5170                                             challenge_buf, challenge_buf_len);
5171 }
5172
5173 static int mds_verify_authorizer_reply(struct ceph_connection *con)
5174 {
5175         struct ceph_mds_session *s = con->private;
5176         struct ceph_mds_client *mdsc = s->s_mdsc;
5177         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5178         struct ceph_auth_handshake *auth = &s->s_auth;
5179
5180         return ceph_auth_verify_authorizer_reply(ac, auth->authorizer,
5181                 auth->authorizer_reply_buf, auth->authorizer_reply_buf_len,
5182                 NULL, NULL, NULL, NULL);
5183 }
5184
5185 static int mds_invalidate_authorizer(struct ceph_connection *con)
5186 {
5187         struct ceph_mds_session *s = con->private;
5188         struct ceph_mds_client *mdsc = s->s_mdsc;
5189         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5190
5191         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
5192
5193         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
5194 }
5195
5196 static int mds_get_auth_request(struct ceph_connection *con,
5197                                 void *buf, int *buf_len,
5198                                 void **authorizer, int *authorizer_len)
5199 {
5200         struct ceph_mds_session *s = con->private;
5201         struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5202         struct ceph_auth_handshake *auth = &s->s_auth;
5203         int ret;
5204
5205         ret = ceph_auth_get_authorizer(ac, auth, CEPH_ENTITY_TYPE_MDS,
5206                                        buf, buf_len);
5207         if (ret)
5208                 return ret;
5209
5210         *authorizer = auth->authorizer_buf;
5211         *authorizer_len = auth->authorizer_buf_len;
5212         return 0;
5213 }
5214
5215 static int mds_handle_auth_reply_more(struct ceph_connection *con,
5216                                       void *reply, int reply_len,
5217                                       void *buf, int *buf_len,
5218                                       void **authorizer, int *authorizer_len)
5219 {
5220         struct ceph_mds_session *s = con->private;
5221         struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5222         struct ceph_auth_handshake *auth = &s->s_auth;
5223         int ret;
5224
5225         ret = ceph_auth_handle_svc_reply_more(ac, auth, reply, reply_len,
5226                                               buf, buf_len);
5227         if (ret)
5228                 return ret;
5229
5230         *authorizer = auth->authorizer_buf;
5231         *authorizer_len = auth->authorizer_buf_len;
5232         return 0;
5233 }
5234
5235 static int mds_handle_auth_done(struct ceph_connection *con,
5236                                 u64 global_id, void *reply, int reply_len,
5237                                 u8 *session_key, int *session_key_len,
5238                                 u8 *con_secret, int *con_secret_len)
5239 {
5240         struct ceph_mds_session *s = con->private;
5241         struct ceph_auth_client *ac = s->s_mdsc->fsc->client->monc.auth;
5242         struct ceph_auth_handshake *auth = &s->s_auth;
5243
5244         return ceph_auth_handle_svc_reply_done(ac, auth, reply, reply_len,
5245                                                session_key, session_key_len,
5246                                                con_secret, con_secret_len);
5247 }
5248
5249 static int mds_handle_auth_bad_method(struct ceph_connection *con,
5250                                       int used_proto, int result,
5251                                       const int *allowed_protos, int proto_cnt,
5252                                       const int *allowed_modes, int mode_cnt)
5253 {
5254         struct ceph_mds_session *s = con->private;
5255         struct ceph_mon_client *monc = &s->s_mdsc->fsc->client->monc;
5256         int ret;
5257
5258         if (ceph_auth_handle_bad_authorizer(monc->auth, CEPH_ENTITY_TYPE_MDS,
5259                                             used_proto, result,
5260                                             allowed_protos, proto_cnt,
5261                                             allowed_modes, mode_cnt)) {
5262                 ret = ceph_monc_validate_auth(monc);
5263                 if (ret)
5264                         return ret;
5265         }
5266
5267         return -EACCES;
5268 }
5269
5270 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
5271                                 struct ceph_msg_header *hdr, int *skip)
5272 {
5273         struct ceph_msg *msg;
5274         int type = (int) le16_to_cpu(hdr->type);
5275         int front_len = (int) le32_to_cpu(hdr->front_len);
5276
5277         if (con->in_msg)
5278                 return con->in_msg;
5279
5280         *skip = 0;
5281         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
5282         if (!msg) {
5283                 pr_err("unable to allocate msg type %d len %d\n",
5284                        type, front_len);
5285                 return NULL;
5286         }
5287
5288         return msg;
5289 }
5290
5291 static int mds_sign_message(struct ceph_msg *msg)
5292 {
5293        struct ceph_mds_session *s = msg->con->private;
5294        struct ceph_auth_handshake *auth = &s->s_auth;
5295
5296        return ceph_auth_sign_message(auth, msg);
5297 }
5298
5299 static int mds_check_message_signature(struct ceph_msg *msg)
5300 {
5301        struct ceph_mds_session *s = msg->con->private;
5302        struct ceph_auth_handshake *auth = &s->s_auth;
5303
5304        return ceph_auth_check_message_signature(auth, msg);
5305 }
5306
5307 static const struct ceph_connection_operations mds_con_ops = {
5308         .get = mds_get_con,
5309         .put = mds_put_con,
5310         .alloc_msg = mds_alloc_msg,
5311         .dispatch = mds_dispatch,
5312         .peer_reset = mds_peer_reset,
5313         .get_authorizer = mds_get_authorizer,
5314         .add_authorizer_challenge = mds_add_authorizer_challenge,
5315         .verify_authorizer_reply = mds_verify_authorizer_reply,
5316         .invalidate_authorizer = mds_invalidate_authorizer,
5317         .sign_message = mds_sign_message,
5318         .check_message_signature = mds_check_message_signature,
5319         .get_auth_request = mds_get_auth_request,
5320         .handle_auth_reply_more = mds_handle_auth_reply_more,
5321         .handle_auth_done = mds_handle_auth_done,
5322         .handle_auth_bad_method = mds_handle_auth_bad_method,
5323 };
5324
5325 /* eof */