Merge tag 'timers-core-2023-09-04-v2' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-starfive.git] / fs / ceph / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21
22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24         u32 wire_flags = 0;
25
26         switch (flags & O_ACCMODE) {
27         case O_RDONLY:
28                 wire_flags |= CEPH_O_RDONLY;
29                 break;
30         case O_WRONLY:
31                 wire_flags |= CEPH_O_WRONLY;
32                 break;
33         case O_RDWR:
34                 wire_flags |= CEPH_O_RDWR;
35                 break;
36         }
37
38         flags &= ~O_ACCMODE;
39
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41
42         ceph_sys2wire(O_CREAT);
43         ceph_sys2wire(O_EXCL);
44         ceph_sys2wire(O_TRUNC);
45         ceph_sys2wire(O_DIRECTORY);
46         ceph_sys2wire(O_NOFOLLOW);
47
48 #undef ceph_sys2wire
49
50         if (flags)
51                 dout("unused open flags: %x\n", flags);
52
53         return cpu_to_le32(wire_flags);
54 }
55
56 /*
57  * Ceph file operations
58  *
59  * Implement basic open/close functionality, and implement
60  * read/write.
61  *
62  * We implement three modes of file I/O:
63  *  - buffered uses the generic_file_aio_{read,write} helpers
64  *
65  *  - synchronous is used when there is multi-client read/write
66  *    sharing, avoids the page cache, and synchronously waits for an
67  *    ack from the OSD.
68  *
69  *  - direct io takes the variant of the sync path that references
70  *    user pages directly.
71  *
72  * fsync() flushes and waits on dirty pages, but just queues metadata
73  * for writeback: since the MDS can recover size and mtime there is no
74  * need to wait for MDS acknowledgement.
75  */
76
77 /*
78  * How many pages to get in one call to iov_iter_get_pages().  This
79  * determines the size of the on-stack array used as a buffer.
80  */
81 #define ITER_GET_BVECS_PAGES    64
82
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84                                 struct bio_vec *bvecs)
85 {
86         size_t size = 0;
87         int bvec_idx = 0;
88
89         if (maxsize > iov_iter_count(iter))
90                 maxsize = iov_iter_count(iter);
91
92         while (size < maxsize) {
93                 struct page *pages[ITER_GET_BVECS_PAGES];
94                 ssize_t bytes;
95                 size_t start;
96                 int idx = 0;
97
98                 bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
99                                            ITER_GET_BVECS_PAGES, &start);
100                 if (bytes < 0)
101                         return size ?: bytes;
102
103                 size += bytes;
104
105                 for ( ; bytes; idx++, bvec_idx++) {
106                         int len = min_t(int, bytes, PAGE_SIZE - start);
107
108                         bvec_set_page(&bvecs[bvec_idx], pages[idx], len, start);
109                         bytes -= len;
110                         start = 0;
111                 }
112         }
113
114         return size;
115 }
116
117 /*
118  * iov_iter_get_pages() only considers one iov_iter segment, no matter
119  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
120  * page.
121  *
122  * Attempt to get up to @maxsize bytes worth of pages from @iter.
123  * Return the number of bytes in the created bio_vec array, or an error.
124  */
125 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
126                                     struct bio_vec **bvecs, int *num_bvecs)
127 {
128         struct bio_vec *bv;
129         size_t orig_count = iov_iter_count(iter);
130         ssize_t bytes;
131         int npages;
132
133         iov_iter_truncate(iter, maxsize);
134         npages = iov_iter_npages(iter, INT_MAX);
135         iov_iter_reexpand(iter, orig_count);
136
137         /*
138          * __iter_get_bvecs() may populate only part of the array -- zero it
139          * out.
140          */
141         bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
142         if (!bv)
143                 return -ENOMEM;
144
145         bytes = __iter_get_bvecs(iter, maxsize, bv);
146         if (bytes < 0) {
147                 /*
148                  * No pages were pinned -- just free the array.
149                  */
150                 kvfree(bv);
151                 return bytes;
152         }
153
154         *bvecs = bv;
155         *num_bvecs = npages;
156         return bytes;
157 }
158
159 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
160 {
161         int i;
162
163         for (i = 0; i < num_bvecs; i++) {
164                 if (bvecs[i].bv_page) {
165                         if (should_dirty)
166                                 set_page_dirty_lock(bvecs[i].bv_page);
167                         put_page(bvecs[i].bv_page);
168                 }
169         }
170         kvfree(bvecs);
171 }
172
173 /*
174  * Prepare an open request.  Preallocate ceph_cap to avoid an
175  * inopportune ENOMEM later.
176  */
177 static struct ceph_mds_request *
178 prepare_open_request(struct super_block *sb, int flags, int create_mode)
179 {
180         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
181         struct ceph_mds_request *req;
182         int want_auth = USE_ANY_MDS;
183         int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
184
185         if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
186                 want_auth = USE_AUTH_MDS;
187
188         req = ceph_mdsc_create_request(mdsc, op, want_auth);
189         if (IS_ERR(req))
190                 goto out;
191         req->r_fmode = ceph_flags_to_mode(flags);
192         req->r_args.open.flags = ceph_flags_sys2wire(flags);
193         req->r_args.open.mode = cpu_to_le32(create_mode);
194 out:
195         return req;
196 }
197
198 static int ceph_init_file_info(struct inode *inode, struct file *file,
199                                         int fmode, bool isdir)
200 {
201         struct ceph_inode_info *ci = ceph_inode(inode);
202         struct ceph_mount_options *opt =
203                 ceph_inode_to_client(&ci->netfs.inode)->mount_options;
204         struct ceph_file_info *fi;
205         int ret;
206
207         dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
208                         inode->i_mode, isdir ? "dir" : "regular");
209         BUG_ON(inode->i_fop->release != ceph_release);
210
211         if (isdir) {
212                 struct ceph_dir_file_info *dfi =
213                         kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
214                 if (!dfi)
215                         return -ENOMEM;
216
217                 file->private_data = dfi;
218                 fi = &dfi->file_info;
219                 dfi->next_offset = 2;
220                 dfi->readdir_cache_idx = -1;
221         } else {
222                 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
223                 if (!fi)
224                         return -ENOMEM;
225
226                 if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
227                         fi->flags |= CEPH_F_SYNC;
228
229                 file->private_data = fi;
230         }
231
232         ceph_get_fmode(ci, fmode, 1);
233         fi->fmode = fmode;
234
235         spin_lock_init(&fi->rw_contexts_lock);
236         INIT_LIST_HEAD(&fi->rw_contexts);
237         fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
238
239         if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
240                 ret = ceph_uninline_data(file);
241                 if (ret < 0)
242                         goto error;
243         }
244
245         return 0;
246
247 error:
248         ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
249         ceph_put_fmode(ci, fi->fmode, 1);
250         kmem_cache_free(ceph_file_cachep, fi);
251         /* wake up anyone waiting for caps on this inode */
252         wake_up_all(&ci->i_cap_wq);
253         return ret;
254 }
255
256 /*
257  * initialize private struct file data.
258  * if we fail, clean up by dropping fmode reference on the ceph_inode
259  */
260 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
261 {
262         int ret = 0;
263
264         switch (inode->i_mode & S_IFMT) {
265         case S_IFREG:
266                 ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
267                 fallthrough;
268         case S_IFDIR:
269                 ret = ceph_init_file_info(inode, file, fmode,
270                                                 S_ISDIR(inode->i_mode));
271                 break;
272
273         case S_IFLNK:
274                 dout("init_file %p %p 0%o (symlink)\n", inode, file,
275                      inode->i_mode);
276                 break;
277
278         default:
279                 dout("init_file %p %p 0%o (special)\n", inode, file,
280                      inode->i_mode);
281                 /*
282                  * we need to drop the open ref now, since we don't
283                  * have .release set to ceph_release.
284                  */
285                 BUG_ON(inode->i_fop->release == ceph_release);
286
287                 /* call the proper open fop */
288                 ret = inode->i_fop->open(inode, file);
289         }
290         return ret;
291 }
292
293 /*
294  * try renew caps after session gets killed.
295  */
296 int ceph_renew_caps(struct inode *inode, int fmode)
297 {
298         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
299         struct ceph_inode_info *ci = ceph_inode(inode);
300         struct ceph_mds_request *req;
301         int err, flags, wanted;
302
303         spin_lock(&ci->i_ceph_lock);
304         __ceph_touch_fmode(ci, mdsc, fmode);
305         wanted = __ceph_caps_file_wanted(ci);
306         if (__ceph_is_any_real_caps(ci) &&
307             (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
308                 int issued = __ceph_caps_issued(ci, NULL);
309                 spin_unlock(&ci->i_ceph_lock);
310                 dout("renew caps %p want %s issued %s updating mds_wanted\n",
311                      inode, ceph_cap_string(wanted), ceph_cap_string(issued));
312                 ceph_check_caps(ci, 0);
313                 return 0;
314         }
315         spin_unlock(&ci->i_ceph_lock);
316
317         flags = 0;
318         if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
319                 flags = O_RDWR;
320         else if (wanted & CEPH_CAP_FILE_RD)
321                 flags = O_RDONLY;
322         else if (wanted & CEPH_CAP_FILE_WR)
323                 flags = O_WRONLY;
324 #ifdef O_LAZY
325         if (wanted & CEPH_CAP_FILE_LAZYIO)
326                 flags |= O_LAZY;
327 #endif
328
329         req = prepare_open_request(inode->i_sb, flags, 0);
330         if (IS_ERR(req)) {
331                 err = PTR_ERR(req);
332                 goto out;
333         }
334
335         req->r_inode = inode;
336         ihold(inode);
337         req->r_num_caps = 1;
338
339         err = ceph_mdsc_do_request(mdsc, NULL, req);
340         ceph_mdsc_put_request(req);
341 out:
342         dout("renew caps %p open result=%d\n", inode, err);
343         return err < 0 ? err : 0;
344 }
345
346 /*
347  * If we already have the requisite capabilities, we can satisfy
348  * the open request locally (no need to request new caps from the
349  * MDS).  We do, however, need to inform the MDS (asynchronously)
350  * if our wanted caps set expands.
351  */
352 int ceph_open(struct inode *inode, struct file *file)
353 {
354         struct ceph_inode_info *ci = ceph_inode(inode);
355         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
356         struct ceph_mds_client *mdsc = fsc->mdsc;
357         struct ceph_mds_request *req;
358         struct ceph_file_info *fi = file->private_data;
359         int err;
360         int flags, fmode, wanted;
361
362         if (fi) {
363                 dout("open file %p is already opened\n", file);
364                 return 0;
365         }
366
367         /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
368         flags = file->f_flags & ~(O_CREAT|O_EXCL);
369         if (S_ISDIR(inode->i_mode))
370                 flags = O_DIRECTORY;  /* mds likes to know */
371
372         dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
373              ceph_vinop(inode), file, flags, file->f_flags);
374         fmode = ceph_flags_to_mode(flags);
375         wanted = ceph_caps_for_mode(fmode);
376
377         /* snapped files are read-only */
378         if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
379                 return -EROFS;
380
381         /* trivially open snapdir */
382         if (ceph_snap(inode) == CEPH_SNAPDIR) {
383                 return ceph_init_file(inode, file, fmode);
384         }
385
386         /*
387          * No need to block if we have caps on the auth MDS (for
388          * write) or any MDS (for read).  Update wanted set
389          * asynchronously.
390          */
391         spin_lock(&ci->i_ceph_lock);
392         if (__ceph_is_any_real_caps(ci) &&
393             (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
394                 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
395                 int issued = __ceph_caps_issued(ci, NULL);
396
397                 dout("open %p fmode %d want %s issued %s using existing\n",
398                      inode, fmode, ceph_cap_string(wanted),
399                      ceph_cap_string(issued));
400                 __ceph_touch_fmode(ci, mdsc, fmode);
401                 spin_unlock(&ci->i_ceph_lock);
402
403                 /* adjust wanted? */
404                 if ((issued & wanted) != wanted &&
405                     (mds_wanted & wanted) != wanted &&
406                     ceph_snap(inode) != CEPH_SNAPDIR)
407                         ceph_check_caps(ci, 0);
408
409                 return ceph_init_file(inode, file, fmode);
410         } else if (ceph_snap(inode) != CEPH_NOSNAP &&
411                    (ci->i_snap_caps & wanted) == wanted) {
412                 __ceph_touch_fmode(ci, mdsc, fmode);
413                 spin_unlock(&ci->i_ceph_lock);
414                 return ceph_init_file(inode, file, fmode);
415         }
416
417         spin_unlock(&ci->i_ceph_lock);
418
419         dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
420         req = prepare_open_request(inode->i_sb, flags, 0);
421         if (IS_ERR(req)) {
422                 err = PTR_ERR(req);
423                 goto out;
424         }
425         req->r_inode = inode;
426         ihold(inode);
427
428         req->r_num_caps = 1;
429         err = ceph_mdsc_do_request(mdsc, NULL, req);
430         if (!err)
431                 err = ceph_init_file(inode, file, req->r_fmode);
432         ceph_mdsc_put_request(req);
433         dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
434 out:
435         return err;
436 }
437
438 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
439 static void
440 cache_file_layout(struct inode *dst, struct inode *src)
441 {
442         struct ceph_inode_info *cdst = ceph_inode(dst);
443         struct ceph_inode_info *csrc = ceph_inode(src);
444
445         spin_lock(&cdst->i_ceph_lock);
446         if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
447             !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
448                 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
449                         sizeof(cdst->i_cached_layout));
450                 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
451                                    ceph_try_get_string(csrc->i_layout.pool_ns));
452         }
453         spin_unlock(&cdst->i_ceph_lock);
454 }
455
456 /*
457  * Try to set up an async create. We need caps, a file layout, and inode number,
458  * and either a lease on the dentry or complete dir info. If any of those
459  * criteria are not satisfied, then return false and the caller can go
460  * synchronous.
461  */
462 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
463                                  struct ceph_file_layout *lo, u64 *pino)
464 {
465         struct ceph_inode_info *ci = ceph_inode(dir);
466         struct ceph_dentry_info *di = ceph_dentry(dentry);
467         int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
468         u64 ino;
469
470         spin_lock(&ci->i_ceph_lock);
471         /* No auth cap means no chance for Dc caps */
472         if (!ci->i_auth_cap)
473                 goto no_async;
474
475         /* Any delegated inos? */
476         if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
477                 goto no_async;
478
479         if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
480                 goto no_async;
481
482         if ((__ceph_caps_issued(ci, NULL) & want) != want)
483                 goto no_async;
484
485         if (d_in_lookup(dentry)) {
486                 if (!__ceph_dir_is_complete(ci))
487                         goto no_async;
488                 spin_lock(&dentry->d_lock);
489                 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
490                 spin_unlock(&dentry->d_lock);
491         } else if (atomic_read(&ci->i_shared_gen) !=
492                    READ_ONCE(di->lease_shared_gen)) {
493                 goto no_async;
494         }
495
496         ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
497         if (!ino)
498                 goto no_async;
499
500         *pino = ino;
501         ceph_take_cap_refs(ci, want, false);
502         memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
503         rcu_assign_pointer(lo->pool_ns,
504                            ceph_try_get_string(ci->i_cached_layout.pool_ns));
505         got = want;
506 no_async:
507         spin_unlock(&ci->i_ceph_lock);
508         return got;
509 }
510
511 static void restore_deleg_ino(struct inode *dir, u64 ino)
512 {
513         struct ceph_inode_info *ci = ceph_inode(dir);
514         struct ceph_mds_session *s = NULL;
515
516         spin_lock(&ci->i_ceph_lock);
517         if (ci->i_auth_cap)
518                 s = ceph_get_mds_session(ci->i_auth_cap->session);
519         spin_unlock(&ci->i_ceph_lock);
520         if (s) {
521                 int err = ceph_restore_deleg_ino(s, ino);
522                 if (err)
523                         pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
524                                 ino, err);
525                 ceph_put_mds_session(s);
526         }
527 }
528
529 static void wake_async_create_waiters(struct inode *inode,
530                                       struct ceph_mds_session *session)
531 {
532         struct ceph_inode_info *ci = ceph_inode(inode);
533         bool check_cap = false;
534
535         spin_lock(&ci->i_ceph_lock);
536         if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
537                 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
538                 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
539
540                 if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
541                         ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
542                         check_cap = true;
543                 }
544         }
545         ceph_kick_flushing_inode_caps(session, ci);
546         spin_unlock(&ci->i_ceph_lock);
547
548         if (check_cap)
549                 ceph_check_caps(ci, CHECK_CAPS_FLUSH);
550 }
551
552 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
553                                  struct ceph_mds_request *req)
554 {
555         struct dentry *dentry = req->r_dentry;
556         struct inode *dinode = d_inode(dentry);
557         struct inode *tinode = req->r_target_inode;
558         int result = req->r_err ? req->r_err :
559                         le32_to_cpu(req->r_reply_info.head->result);
560
561         WARN_ON_ONCE(dinode && tinode && dinode != tinode);
562
563         /* MDS changed -- caller must resubmit */
564         if (result == -EJUKEBOX)
565                 goto out;
566
567         mapping_set_error(req->r_parent->i_mapping, result);
568
569         if (result) {
570                 int pathlen = 0;
571                 u64 base = 0;
572                 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
573                                                   &base, 0);
574
575                 pr_warn("async create failure path=(%llx)%s result=%d!\n",
576                         base, IS_ERR(path) ? "<<bad>>" : path, result);
577                 ceph_mdsc_free_path(path, pathlen);
578
579                 ceph_dir_clear_complete(req->r_parent);
580                 if (!d_unhashed(dentry))
581                         d_drop(dentry);
582
583                 if (dinode) {
584                         mapping_set_error(dinode->i_mapping, result);
585                         ceph_inode_shutdown(dinode);
586                         wake_async_create_waiters(dinode, req->r_session);
587                 }
588         }
589
590         if (tinode) {
591                 u64 ino = ceph_vino(tinode).ino;
592
593                 if (req->r_deleg_ino != ino)
594                         pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
595                                 __func__, req->r_err, req->r_deleg_ino, ino);
596
597                 mapping_set_error(tinode->i_mapping, result);
598                 wake_async_create_waiters(tinode, req->r_session);
599         } else if (!result) {
600                 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
601                         req->r_deleg_ino);
602         }
603 out:
604         ceph_mdsc_release_dir_caps(req);
605 }
606
607 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
608                                     struct file *file, umode_t mode,
609                                     struct ceph_mds_request *req,
610                                     struct ceph_acl_sec_ctx *as_ctx,
611                                     struct ceph_file_layout *lo)
612 {
613         int ret;
614         char xattr_buf[4];
615         struct ceph_mds_reply_inode in = { };
616         struct ceph_mds_reply_info_in iinfo = { .in = &in };
617         struct ceph_inode_info *ci = ceph_inode(dir);
618         struct ceph_dentry_info *di = ceph_dentry(dentry);
619         struct inode *inode;
620         struct timespec64 now;
621         struct ceph_string *pool_ns;
622         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
623         struct ceph_vino vino = { .ino = req->r_deleg_ino,
624                                   .snap = CEPH_NOSNAP };
625
626         ktime_get_real_ts64(&now);
627
628         inode = ceph_get_inode(dentry->d_sb, vino);
629         if (IS_ERR(inode))
630                 return PTR_ERR(inode);
631
632         iinfo.inline_version = CEPH_INLINE_NONE;
633         iinfo.change_attr = 1;
634         ceph_encode_timespec64(&iinfo.btime, &now);
635
636         if (req->r_pagelist) {
637                 iinfo.xattr_len = req->r_pagelist->length;
638                 iinfo.xattr_data = req->r_pagelist->mapped_tail;
639         } else {
640                 /* fake it */
641                 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
642                 iinfo.xattr_data = xattr_buf;
643                 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
644         }
645
646         in.ino = cpu_to_le64(vino.ino);
647         in.snapid = cpu_to_le64(CEPH_NOSNAP);
648         in.version = cpu_to_le64(1);    // ???
649         in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
650         in.cap.cap_id = cpu_to_le64(1);
651         in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
652         in.cap.flags = CEPH_CAP_FLAG_AUTH;
653         in.ctime = in.mtime = in.atime = iinfo.btime;
654         in.truncate_seq = cpu_to_le32(1);
655         in.truncate_size = cpu_to_le64(-1ULL);
656         in.xattr_version = cpu_to_le64(1);
657         in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
658         if (dir->i_mode & S_ISGID) {
659                 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
660
661                 /* Directories always inherit the setgid bit. */
662                 if (S_ISDIR(mode))
663                         mode |= S_ISGID;
664         } else {
665                 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
666         }
667         in.mode = cpu_to_le32((u32)mode);
668
669         in.nlink = cpu_to_le32(1);
670         in.max_size = cpu_to_le64(lo->stripe_unit);
671
672         ceph_file_layout_to_legacy(lo, &in.layout);
673         /* lo is private, so pool_ns can't change */
674         pool_ns = rcu_dereference_raw(lo->pool_ns);
675         if (pool_ns) {
676                 iinfo.pool_ns_len = pool_ns->len;
677                 iinfo.pool_ns_data = pool_ns->str;
678         }
679
680         down_read(&mdsc->snap_rwsem);
681         ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
682                               req->r_fmode, NULL);
683         up_read(&mdsc->snap_rwsem);
684         if (ret) {
685                 dout("%s failed to fill inode: %d\n", __func__, ret);
686                 ceph_dir_clear_complete(dir);
687                 if (!d_unhashed(dentry))
688                         d_drop(dentry);
689                 if (inode->i_state & I_NEW)
690                         discard_new_inode(inode);
691         } else {
692                 struct dentry *dn;
693
694                 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
695                         vino.ino, ceph_ino(dir), dentry->d_name.name);
696                 ceph_dir_clear_ordered(dir);
697                 ceph_init_inode_acls(inode, as_ctx);
698                 if (inode->i_state & I_NEW) {
699                         /*
700                          * If it's not I_NEW, then someone created this before
701                          * we got here. Assume the server is aware of it at
702                          * that point and don't worry about setting
703                          * CEPH_I_ASYNC_CREATE.
704                          */
705                         ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
706                         unlock_new_inode(inode);
707                 }
708                 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
709                         if (!d_unhashed(dentry))
710                                 d_drop(dentry);
711                         dn = d_splice_alias(inode, dentry);
712                         WARN_ON_ONCE(dn && dn != dentry);
713                 }
714                 file->f_mode |= FMODE_CREATED;
715                 ret = finish_open(file, dentry, ceph_open);
716         }
717
718         spin_lock(&dentry->d_lock);
719         di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
720         wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
721         spin_unlock(&dentry->d_lock);
722
723         return ret;
724 }
725
726 /*
727  * Do a lookup + open with a single request.  If we get a non-existent
728  * file or symlink, return 1 so the VFS can retry.
729  */
730 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
731                      struct file *file, unsigned flags, umode_t mode)
732 {
733         struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
734         struct ceph_mds_client *mdsc = fsc->mdsc;
735         struct ceph_mds_request *req;
736         struct dentry *dn;
737         struct ceph_acl_sec_ctx as_ctx = {};
738         bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
739         int mask;
740         int err;
741
742         dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
743              dir, dentry, dentry,
744              d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
745
746         if (dentry->d_name.len > NAME_MAX)
747                 return -ENAMETOOLONG;
748
749         err = ceph_wait_on_conflict_unlink(dentry);
750         if (err)
751                 return err;
752         /*
753          * Do not truncate the file, since atomic_open is called before the
754          * permission check. The caller will do the truncation afterward.
755          */
756         flags &= ~O_TRUNC;
757
758         if (flags & O_CREAT) {
759                 if (ceph_quota_is_max_files_exceeded(dir))
760                         return -EDQUOT;
761                 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
762                 if (err < 0)
763                         return err;
764                 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
765                 if (err < 0)
766                         goto out_ctx;
767                 /* Async create can't handle more than a page of xattrs */
768                 if (as_ctx.pagelist &&
769                     !list_is_singular(&as_ctx.pagelist->head))
770                         try_async = false;
771         } else if (!d_in_lookup(dentry)) {
772                 /* If it's not being looked up, it's negative */
773                 return -ENOENT;
774         }
775 retry:
776         /* do the open */
777         req = prepare_open_request(dir->i_sb, flags, mode);
778         if (IS_ERR(req)) {
779                 err = PTR_ERR(req);
780                 goto out_ctx;
781         }
782         req->r_dentry = dget(dentry);
783         req->r_num_caps = 2;
784         mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
785         if (ceph_security_xattr_wanted(dir))
786                 mask |= CEPH_CAP_XATTR_SHARED;
787         req->r_args.open.mask = cpu_to_le32(mask);
788         req->r_parent = dir;
789         ihold(dir);
790
791         if (flags & O_CREAT) {
792                 struct ceph_file_layout lo;
793
794                 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
795                                      CEPH_CAP_XATTR_EXCL;
796                 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
797                 if (as_ctx.pagelist) {
798                         req->r_pagelist = as_ctx.pagelist;
799                         as_ctx.pagelist = NULL;
800                 }
801                 if (try_async &&
802                     (req->r_dir_caps =
803                       try_prep_async_create(dir, dentry, &lo,
804                                             &req->r_deleg_ino))) {
805                         struct ceph_dentry_info *di = ceph_dentry(dentry);
806
807                         set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
808                         req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
809                         req->r_callback = ceph_async_create_cb;
810
811                         spin_lock(&dentry->d_lock);
812                         di->flags |= CEPH_DENTRY_ASYNC_CREATE;
813                         spin_unlock(&dentry->d_lock);
814
815                         err = ceph_mdsc_submit_request(mdsc, dir, req);
816                         if (!err) {
817                                 err = ceph_finish_async_create(dir, dentry,
818                                                         file, mode, req,
819                                                         &as_ctx, &lo);
820                         } else if (err == -EJUKEBOX) {
821                                 restore_deleg_ino(dir, req->r_deleg_ino);
822                                 ceph_mdsc_put_request(req);
823                                 try_async = false;
824                                 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
825                                 goto retry;
826                         }
827                         ceph_put_string(rcu_dereference_raw(lo.pool_ns));
828                         goto out_req;
829                 }
830         }
831
832         set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
833         err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
834         if (err == -ENOENT) {
835                 dentry = ceph_handle_snapdir(req, dentry);
836                 if (IS_ERR(dentry)) {
837                         err = PTR_ERR(dentry);
838                         goto out_req;
839                 }
840                 err = 0;
841         }
842
843         if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
844                 err = ceph_handle_notrace_create(dir, dentry);
845
846         if (d_in_lookup(dentry)) {
847                 dn = ceph_finish_lookup(req, dentry, err);
848                 if (IS_ERR(dn))
849                         err = PTR_ERR(dn);
850         } else {
851                 /* we were given a hashed negative dentry */
852                 dn = NULL;
853         }
854         if (err)
855                 goto out_req;
856         if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
857                 /* make vfs retry on splice, ENOENT, or symlink */
858                 dout("atomic_open finish_no_open on dn %p\n", dn);
859                 err = finish_no_open(file, dn);
860         } else {
861                 dout("atomic_open finish_open on dn %p\n", dn);
862                 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
863                         struct inode *newino = d_inode(dentry);
864
865                         cache_file_layout(dir, newino);
866                         ceph_init_inode_acls(newino, &as_ctx);
867                         file->f_mode |= FMODE_CREATED;
868                 }
869                 err = finish_open(file, dentry, ceph_open);
870         }
871 out_req:
872         ceph_mdsc_put_request(req);
873 out_ctx:
874         ceph_release_acl_sec_ctx(&as_ctx);
875         dout("atomic_open result=%d\n", err);
876         return err;
877 }
878
879 int ceph_release(struct inode *inode, struct file *file)
880 {
881         struct ceph_inode_info *ci = ceph_inode(inode);
882
883         if (S_ISDIR(inode->i_mode)) {
884                 struct ceph_dir_file_info *dfi = file->private_data;
885                 dout("release inode %p dir file %p\n", inode, file);
886                 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
887
888                 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
889
890                 if (dfi->last_readdir)
891                         ceph_mdsc_put_request(dfi->last_readdir);
892                 kfree(dfi->last_name);
893                 kfree(dfi->dir_info);
894                 kmem_cache_free(ceph_dir_file_cachep, dfi);
895         } else {
896                 struct ceph_file_info *fi = file->private_data;
897                 dout("release inode %p regular file %p\n", inode, file);
898                 WARN_ON(!list_empty(&fi->rw_contexts));
899
900                 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
901                 ceph_put_fmode(ci, fi->fmode, 1);
902
903                 kmem_cache_free(ceph_file_cachep, fi);
904         }
905
906         /* wake up anyone waiting for caps on this inode */
907         wake_up_all(&ci->i_cap_wq);
908         return 0;
909 }
910
911 enum {
912         HAVE_RETRIED = 1,
913         CHECK_EOF =    2,
914         READ_INLINE =  3,
915 };
916
917 /*
918  * Completely synchronous read and write methods.  Direct from __user
919  * buffer to osd, or directly to user pages (if O_DIRECT).
920  *
921  * If the read spans object boundary, just do multiple reads.  (That's not
922  * atomic, but good enough for now.)
923  *
924  * If we get a short result from the OSD, check against i_size; we need to
925  * only return a short read to the caller if we hit EOF.
926  */
927 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
928                               int *retry_op)
929 {
930         struct file *file = iocb->ki_filp;
931         struct inode *inode = file_inode(file);
932         struct ceph_inode_info *ci = ceph_inode(inode);
933         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
934         struct ceph_osd_client *osdc = &fsc->client->osdc;
935         ssize_t ret;
936         u64 off = iocb->ki_pos;
937         u64 len = iov_iter_count(to);
938         u64 i_size = i_size_read(inode);
939
940         dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
941              (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
942
943         if (!len)
944                 return 0;
945         /*
946          * flush any page cache pages in this range.  this
947          * will make concurrent normal and sync io slow,
948          * but it will at least behave sensibly when they are
949          * in sequence.
950          */
951         ret = filemap_write_and_wait_range(inode->i_mapping,
952                                            off, off + len - 1);
953         if (ret < 0)
954                 return ret;
955
956         ret = 0;
957         while ((len = iov_iter_count(to)) > 0) {
958                 struct ceph_osd_request *req;
959                 struct page **pages;
960                 int num_pages;
961                 size_t page_off;
962                 bool more;
963                 int idx;
964                 size_t left;
965
966                 req = ceph_osdc_new_request(osdc, &ci->i_layout,
967                                         ci->i_vino, off, &len, 0, 1,
968                                         CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
969                                         NULL, ci->i_truncate_seq,
970                                         ci->i_truncate_size, false);
971                 if (IS_ERR(req)) {
972                         ret = PTR_ERR(req);
973                         break;
974                 }
975
976                 more = len < iov_iter_count(to);
977
978                 num_pages = calc_pages_for(off, len);
979                 page_off = off & ~PAGE_MASK;
980                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
981                 if (IS_ERR(pages)) {
982                         ceph_osdc_put_request(req);
983                         ret = PTR_ERR(pages);
984                         break;
985                 }
986
987                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
988                                                  false, false);
989                 ceph_osdc_start_request(osdc, req);
990                 ret = ceph_osdc_wait_request(osdc, req);
991
992                 ceph_update_read_metrics(&fsc->mdsc->metric,
993                                          req->r_start_latency,
994                                          req->r_end_latency,
995                                          len, ret);
996
997                 ceph_osdc_put_request(req);
998
999                 i_size = i_size_read(inode);
1000                 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
1001                      off, len, ret, i_size, (more ? " MORE" : ""));
1002
1003                 if (ret == -ENOENT)
1004                         ret = 0;
1005                 if (ret >= 0 && ret < len && (off + ret < i_size)) {
1006                         int zlen = min(len - ret, i_size - off - ret);
1007                         int zoff = page_off + ret;
1008                         dout("sync_read zero gap %llu~%llu\n",
1009                              off + ret, off + ret + zlen);
1010                         ceph_zero_page_vector_range(zoff, zlen, pages);
1011                         ret += zlen;
1012                 }
1013
1014                 idx = 0;
1015                 left = ret > 0 ? ret : 0;
1016                 while (left > 0) {
1017                         size_t len, copied;
1018                         page_off = off & ~PAGE_MASK;
1019                         len = min_t(size_t, left, PAGE_SIZE - page_off);
1020                         SetPageUptodate(pages[idx]);
1021                         copied = copy_page_to_iter(pages[idx++],
1022                                                    page_off, len, to);
1023                         off += copied;
1024                         left -= copied;
1025                         if (copied < len) {
1026                                 ret = -EFAULT;
1027                                 break;
1028                         }
1029                 }
1030                 ceph_release_page_vector(pages, num_pages);
1031
1032                 if (ret < 0) {
1033                         if (ret == -EBLOCKLISTED)
1034                                 fsc->blocklisted = true;
1035                         break;
1036                 }
1037
1038                 if (off >= i_size || !more)
1039                         break;
1040         }
1041
1042         if (off > iocb->ki_pos) {
1043                 if (off >= i_size) {
1044                         *retry_op = CHECK_EOF;
1045                         ret = i_size - iocb->ki_pos;
1046                         iocb->ki_pos = i_size;
1047                 } else {
1048                         ret = off - iocb->ki_pos;
1049                         iocb->ki_pos = off;
1050                 }
1051         }
1052
1053         dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
1054         return ret;
1055 }
1056
1057 struct ceph_aio_request {
1058         struct kiocb *iocb;
1059         size_t total_len;
1060         bool write;
1061         bool should_dirty;
1062         int error;
1063         struct list_head osd_reqs;
1064         unsigned num_reqs;
1065         atomic_t pending_reqs;
1066         struct timespec64 mtime;
1067         struct ceph_cap_flush *prealloc_cf;
1068 };
1069
1070 struct ceph_aio_work {
1071         struct work_struct work;
1072         struct ceph_osd_request *req;
1073 };
1074
1075 static void ceph_aio_retry_work(struct work_struct *work);
1076
1077 static void ceph_aio_complete(struct inode *inode,
1078                               struct ceph_aio_request *aio_req)
1079 {
1080         struct ceph_inode_info *ci = ceph_inode(inode);
1081         int ret;
1082
1083         if (!atomic_dec_and_test(&aio_req->pending_reqs))
1084                 return;
1085
1086         if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1087                 inode_dio_end(inode);
1088
1089         ret = aio_req->error;
1090         if (!ret)
1091                 ret = aio_req->total_len;
1092
1093         dout("ceph_aio_complete %p rc %d\n", inode, ret);
1094
1095         if (ret >= 0 && aio_req->write) {
1096                 int dirty;
1097
1098                 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1099                 if (endoff > i_size_read(inode)) {
1100                         if (ceph_inode_set_size(inode, endoff))
1101                                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
1102                 }
1103
1104                 spin_lock(&ci->i_ceph_lock);
1105                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1106                                                &aio_req->prealloc_cf);
1107                 spin_unlock(&ci->i_ceph_lock);
1108                 if (dirty)
1109                         __mark_inode_dirty(inode, dirty);
1110
1111         }
1112
1113         ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1114                                                 CEPH_CAP_FILE_RD));
1115
1116         aio_req->iocb->ki_complete(aio_req->iocb, ret);
1117
1118         ceph_free_cap_flush(aio_req->prealloc_cf);
1119         kfree(aio_req);
1120 }
1121
1122 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1123 {
1124         int rc = req->r_result;
1125         struct inode *inode = req->r_inode;
1126         struct ceph_aio_request *aio_req = req->r_priv;
1127         struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1128         struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1129         unsigned int len = osd_data->bvec_pos.iter.bi_size;
1130
1131         BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1132         BUG_ON(!osd_data->num_bvecs);
1133
1134         dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1135
1136         if (rc == -EOLDSNAPC) {
1137                 struct ceph_aio_work *aio_work;
1138                 BUG_ON(!aio_req->write);
1139
1140                 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1141                 if (aio_work) {
1142                         INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1143                         aio_work->req = req;
1144                         queue_work(ceph_inode_to_client(inode)->inode_wq,
1145                                    &aio_work->work);
1146                         return;
1147                 }
1148                 rc = -ENOMEM;
1149         } else if (!aio_req->write) {
1150                 if (rc == -ENOENT)
1151                         rc = 0;
1152                 if (rc >= 0 && len > rc) {
1153                         struct iov_iter i;
1154                         int zlen = len - rc;
1155
1156                         /*
1157                          * If read is satisfied by single OSD request,
1158                          * it can pass EOF. Otherwise read is within
1159                          * i_size.
1160                          */
1161                         if (aio_req->num_reqs == 1) {
1162                                 loff_t i_size = i_size_read(inode);
1163                                 loff_t endoff = aio_req->iocb->ki_pos + rc;
1164                                 if (endoff < i_size)
1165                                         zlen = min_t(size_t, zlen,
1166                                                      i_size - endoff);
1167                                 aio_req->total_len = rc + zlen;
1168                         }
1169
1170                         iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs,
1171                                       osd_data->num_bvecs, len);
1172                         iov_iter_advance(&i, rc);
1173                         iov_iter_zero(zlen, &i);
1174                 }
1175         }
1176
1177         /* r_start_latency == 0 means the request was not submitted */
1178         if (req->r_start_latency) {
1179                 if (aio_req->write)
1180                         ceph_update_write_metrics(metric, req->r_start_latency,
1181                                                   req->r_end_latency, len, rc);
1182                 else
1183                         ceph_update_read_metrics(metric, req->r_start_latency,
1184                                                  req->r_end_latency, len, rc);
1185         }
1186
1187         put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1188                   aio_req->should_dirty);
1189         ceph_osdc_put_request(req);
1190
1191         if (rc < 0)
1192                 cmpxchg(&aio_req->error, 0, rc);
1193
1194         ceph_aio_complete(inode, aio_req);
1195         return;
1196 }
1197
1198 static void ceph_aio_retry_work(struct work_struct *work)
1199 {
1200         struct ceph_aio_work *aio_work =
1201                 container_of(work, struct ceph_aio_work, work);
1202         struct ceph_osd_request *orig_req = aio_work->req;
1203         struct ceph_aio_request *aio_req = orig_req->r_priv;
1204         struct inode *inode = orig_req->r_inode;
1205         struct ceph_inode_info *ci = ceph_inode(inode);
1206         struct ceph_snap_context *snapc;
1207         struct ceph_osd_request *req;
1208         int ret;
1209
1210         spin_lock(&ci->i_ceph_lock);
1211         if (__ceph_have_pending_cap_snap(ci)) {
1212                 struct ceph_cap_snap *capsnap =
1213                         list_last_entry(&ci->i_cap_snaps,
1214                                         struct ceph_cap_snap,
1215                                         ci_item);
1216                 snapc = ceph_get_snap_context(capsnap->context);
1217         } else {
1218                 BUG_ON(!ci->i_head_snapc);
1219                 snapc = ceph_get_snap_context(ci->i_head_snapc);
1220         }
1221         spin_unlock(&ci->i_ceph_lock);
1222
1223         req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1224                         false, GFP_NOFS);
1225         if (!req) {
1226                 ret = -ENOMEM;
1227                 req = orig_req;
1228                 goto out;
1229         }
1230
1231         req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1232         ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1233         ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1234
1235         req->r_ops[0] = orig_req->r_ops[0];
1236
1237         req->r_mtime = aio_req->mtime;
1238         req->r_data_offset = req->r_ops[0].extent.offset;
1239
1240         ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1241         if (ret) {
1242                 ceph_osdc_put_request(req);
1243                 req = orig_req;
1244                 goto out;
1245         }
1246
1247         ceph_osdc_put_request(orig_req);
1248
1249         req->r_callback = ceph_aio_complete_req;
1250         req->r_inode = inode;
1251         req->r_priv = aio_req;
1252
1253         ceph_osdc_start_request(req->r_osdc, req);
1254 out:
1255         if (ret < 0) {
1256                 req->r_result = ret;
1257                 ceph_aio_complete_req(req);
1258         }
1259
1260         ceph_put_snap_context(snapc);
1261         kfree(aio_work);
1262 }
1263
1264 static ssize_t
1265 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1266                        struct ceph_snap_context *snapc,
1267                        struct ceph_cap_flush **pcf)
1268 {
1269         struct file *file = iocb->ki_filp;
1270         struct inode *inode = file_inode(file);
1271         struct ceph_inode_info *ci = ceph_inode(inode);
1272         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1273         struct ceph_client_metric *metric = &fsc->mdsc->metric;
1274         struct ceph_vino vino;
1275         struct ceph_osd_request *req;
1276         struct bio_vec *bvecs;
1277         struct ceph_aio_request *aio_req = NULL;
1278         int num_pages = 0;
1279         int flags;
1280         int ret = 0;
1281         struct timespec64 mtime = current_time(inode);
1282         size_t count = iov_iter_count(iter);
1283         loff_t pos = iocb->ki_pos;
1284         bool write = iov_iter_rw(iter) == WRITE;
1285         bool should_dirty = !write && user_backed_iter(iter);
1286
1287         if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1288                 return -EROFS;
1289
1290         dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1291              (write ? "write" : "read"), file, pos, (unsigned)count,
1292              snapc, snapc ? snapc->seq : 0);
1293
1294         if (write) {
1295                 int ret2;
1296
1297                 ceph_fscache_invalidate(inode, true);
1298
1299                 ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1300                                         pos >> PAGE_SHIFT,
1301                                         (pos + count - 1) >> PAGE_SHIFT);
1302                 if (ret2 < 0)
1303                         dout("invalidate_inode_pages2_range returned %d\n", ret2);
1304
1305                 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1306         } else {
1307                 flags = CEPH_OSD_FLAG_READ;
1308         }
1309
1310         while (iov_iter_count(iter) > 0) {
1311                 u64 size = iov_iter_count(iter);
1312                 ssize_t len;
1313
1314                 if (write)
1315                         size = min_t(u64, size, fsc->mount_options->wsize);
1316                 else
1317                         size = min_t(u64, size, fsc->mount_options->rsize);
1318
1319                 vino = ceph_vino(inode);
1320                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1321                                             vino, pos, &size, 0,
1322                                             1,
1323                                             write ? CEPH_OSD_OP_WRITE :
1324                                                     CEPH_OSD_OP_READ,
1325                                             flags, snapc,
1326                                             ci->i_truncate_seq,
1327                                             ci->i_truncate_size,
1328                                             false);
1329                 if (IS_ERR(req)) {
1330                         ret = PTR_ERR(req);
1331                         break;
1332                 }
1333
1334                 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1335                 if (len < 0) {
1336                         ceph_osdc_put_request(req);
1337                         ret = len;
1338                         break;
1339                 }
1340                 if (len != size)
1341                         osd_req_op_extent_update(req, 0, len);
1342
1343                 /*
1344                  * To simplify error handling, allow AIO when IO within i_size
1345                  * or IO can be satisfied by single OSD request.
1346                  */
1347                 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1348                     (len == count || pos + count <= i_size_read(inode))) {
1349                         aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1350                         if (aio_req) {
1351                                 aio_req->iocb = iocb;
1352                                 aio_req->write = write;
1353                                 aio_req->should_dirty = should_dirty;
1354                                 INIT_LIST_HEAD(&aio_req->osd_reqs);
1355                                 if (write) {
1356                                         aio_req->mtime = mtime;
1357                                         swap(aio_req->prealloc_cf, *pcf);
1358                                 }
1359                         }
1360                         /* ignore error */
1361                 }
1362
1363                 if (write) {
1364                         /*
1365                          * throw out any page cache pages in this range. this
1366                          * may block.
1367                          */
1368                         truncate_inode_pages_range(inode->i_mapping, pos,
1369                                                    PAGE_ALIGN(pos + len) - 1);
1370
1371                         req->r_mtime = mtime;
1372                 }
1373
1374                 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1375
1376                 if (aio_req) {
1377                         aio_req->total_len += len;
1378                         aio_req->num_reqs++;
1379                         atomic_inc(&aio_req->pending_reqs);
1380
1381                         req->r_callback = ceph_aio_complete_req;
1382                         req->r_inode = inode;
1383                         req->r_priv = aio_req;
1384                         list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1385
1386                         pos += len;
1387                         continue;
1388                 }
1389
1390                 ceph_osdc_start_request(req->r_osdc, req);
1391                 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1392
1393                 if (write)
1394                         ceph_update_write_metrics(metric, req->r_start_latency,
1395                                                   req->r_end_latency, len, ret);
1396                 else
1397                         ceph_update_read_metrics(metric, req->r_start_latency,
1398                                                  req->r_end_latency, len, ret);
1399
1400                 size = i_size_read(inode);
1401                 if (!write) {
1402                         if (ret == -ENOENT)
1403                                 ret = 0;
1404                         if (ret >= 0 && ret < len && pos + ret < size) {
1405                                 struct iov_iter i;
1406                                 int zlen = min_t(size_t, len - ret,
1407                                                  size - pos - ret);
1408
1409                                 iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len);
1410                                 iov_iter_advance(&i, ret);
1411                                 iov_iter_zero(zlen, &i);
1412                                 ret += zlen;
1413                         }
1414                         if (ret >= 0)
1415                                 len = ret;
1416                 }
1417
1418                 put_bvecs(bvecs, num_pages, should_dirty);
1419                 ceph_osdc_put_request(req);
1420                 if (ret < 0)
1421                         break;
1422
1423                 pos += len;
1424                 if (!write && pos >= size)
1425                         break;
1426
1427                 if (write && pos > size) {
1428                         if (ceph_inode_set_size(inode, pos))
1429                                 ceph_check_caps(ceph_inode(inode),
1430                                                 CHECK_CAPS_AUTHONLY);
1431                 }
1432         }
1433
1434         if (aio_req) {
1435                 LIST_HEAD(osd_reqs);
1436
1437                 if (aio_req->num_reqs == 0) {
1438                         kfree(aio_req);
1439                         return ret;
1440                 }
1441
1442                 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1443                                               CEPH_CAP_FILE_RD);
1444
1445                 list_splice(&aio_req->osd_reqs, &osd_reqs);
1446                 inode_dio_begin(inode);
1447                 while (!list_empty(&osd_reqs)) {
1448                         req = list_first_entry(&osd_reqs,
1449                                                struct ceph_osd_request,
1450                                                r_private_item);
1451                         list_del_init(&req->r_private_item);
1452                         if (ret >= 0)
1453                                 ceph_osdc_start_request(req->r_osdc, req);
1454                         if (ret < 0) {
1455                                 req->r_result = ret;
1456                                 ceph_aio_complete_req(req);
1457                         }
1458                 }
1459                 return -EIOCBQUEUED;
1460         }
1461
1462         if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1463                 ret = pos - iocb->ki_pos;
1464                 iocb->ki_pos = pos;
1465         }
1466         return ret;
1467 }
1468
1469 /*
1470  * Synchronous write, straight from __user pointer or user pages.
1471  *
1472  * If write spans object boundary, just do multiple writes.  (For a
1473  * correct atomic write, we should e.g. take write locks on all
1474  * objects, rollback on failure, etc.)
1475  */
1476 static ssize_t
1477 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1478                 struct ceph_snap_context *snapc)
1479 {
1480         struct file *file = iocb->ki_filp;
1481         struct inode *inode = file_inode(file);
1482         struct ceph_inode_info *ci = ceph_inode(inode);
1483         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1484         struct ceph_vino vino;
1485         struct ceph_osd_request *req;
1486         struct page **pages;
1487         u64 len;
1488         int num_pages;
1489         int written = 0;
1490         int flags;
1491         int ret;
1492         bool check_caps = false;
1493         struct timespec64 mtime = current_time(inode);
1494         size_t count = iov_iter_count(from);
1495
1496         if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1497                 return -EROFS;
1498
1499         dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1500              file, pos, (unsigned)count, snapc, snapc->seq);
1501
1502         ret = filemap_write_and_wait_range(inode->i_mapping,
1503                                            pos, pos + count - 1);
1504         if (ret < 0)
1505                 return ret;
1506
1507         ceph_fscache_invalidate(inode, false);
1508         ret = invalidate_inode_pages2_range(inode->i_mapping,
1509                                             pos >> PAGE_SHIFT,
1510                                             (pos + count - 1) >> PAGE_SHIFT);
1511         if (ret < 0)
1512                 dout("invalidate_inode_pages2_range returned %d\n", ret);
1513
1514         flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1515
1516         while ((len = iov_iter_count(from)) > 0) {
1517                 size_t left;
1518                 int n;
1519
1520                 vino = ceph_vino(inode);
1521                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1522                                             vino, pos, &len, 0, 1,
1523                                             CEPH_OSD_OP_WRITE, flags, snapc,
1524                                             ci->i_truncate_seq,
1525                                             ci->i_truncate_size,
1526                                             false);
1527                 if (IS_ERR(req)) {
1528                         ret = PTR_ERR(req);
1529                         break;
1530                 }
1531
1532                 /*
1533                  * write from beginning of first page,
1534                  * regardless of io alignment
1535                  */
1536                 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1537
1538                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1539                 if (IS_ERR(pages)) {
1540                         ret = PTR_ERR(pages);
1541                         goto out;
1542                 }
1543
1544                 left = len;
1545                 for (n = 0; n < num_pages; n++) {
1546                         size_t plen = min_t(size_t, left, PAGE_SIZE);
1547                         ret = copy_page_from_iter(pages[n], 0, plen, from);
1548                         if (ret != plen) {
1549                                 ret = -EFAULT;
1550                                 break;
1551                         }
1552                         left -= ret;
1553                 }
1554
1555                 if (ret < 0) {
1556                         ceph_release_page_vector(pages, num_pages);
1557                         goto out;
1558                 }
1559
1560                 req->r_inode = inode;
1561
1562                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1563                                                 false, true);
1564
1565                 req->r_mtime = mtime;
1566                 ceph_osdc_start_request(&fsc->client->osdc, req);
1567                 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1568
1569                 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1570                                           req->r_end_latency, len, ret);
1571 out:
1572                 ceph_osdc_put_request(req);
1573                 if (ret != 0) {
1574                         ceph_set_error_write(ci);
1575                         break;
1576                 }
1577
1578                 ceph_clear_error_write(ci);
1579                 pos += len;
1580                 written += len;
1581                 if (pos > i_size_read(inode)) {
1582                         check_caps = ceph_inode_set_size(inode, pos);
1583                         if (check_caps)
1584                                 ceph_check_caps(ceph_inode(inode),
1585                                                 CHECK_CAPS_AUTHONLY);
1586                 }
1587
1588         }
1589
1590         if (ret != -EOLDSNAPC && written > 0) {
1591                 ret = written;
1592                 iocb->ki_pos = pos;
1593         }
1594         return ret;
1595 }
1596
1597 /*
1598  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1599  * Atomically grab references, so that those bits are not released
1600  * back to the MDS mid-read.
1601  *
1602  * Hmm, the sync read case isn't actually async... should it be?
1603  */
1604 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1605 {
1606         struct file *filp = iocb->ki_filp;
1607         struct ceph_file_info *fi = filp->private_data;
1608         size_t len = iov_iter_count(to);
1609         struct inode *inode = file_inode(filp);
1610         struct ceph_inode_info *ci = ceph_inode(inode);
1611         bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1612         ssize_t ret;
1613         int want = 0, got = 0;
1614         int retry_op = 0, read = 0;
1615
1616 again:
1617         dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1618              inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1619
1620         if (ceph_inode_is_shutdown(inode))
1621                 return -ESTALE;
1622
1623         if (direct_lock)
1624                 ceph_start_io_direct(inode);
1625         else
1626                 ceph_start_io_read(inode);
1627
1628         if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1629                 want |= CEPH_CAP_FILE_CACHE;
1630         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1631                 want |= CEPH_CAP_FILE_LAZYIO;
1632
1633         ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1634         if (ret < 0) {
1635                 if (direct_lock)
1636                         ceph_end_io_direct(inode);
1637                 else
1638                         ceph_end_io_read(inode);
1639                 return ret;
1640         }
1641
1642         if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1643             (iocb->ki_flags & IOCB_DIRECT) ||
1644             (fi->flags & CEPH_F_SYNC)) {
1645
1646                 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1647                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1648                      ceph_cap_string(got));
1649
1650                 if (!ceph_has_inline_data(ci)) {
1651                         if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1652                                 ret = ceph_direct_read_write(iocb, to,
1653                                                              NULL, NULL);
1654                                 if (ret >= 0 && ret < len)
1655                                         retry_op = CHECK_EOF;
1656                         } else {
1657                                 ret = ceph_sync_read(iocb, to, &retry_op);
1658                         }
1659                 } else {
1660                         retry_op = READ_INLINE;
1661                 }
1662         } else {
1663                 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1664                 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1665                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1666                      ceph_cap_string(got));
1667                 ceph_add_rw_context(fi, &rw_ctx);
1668                 ret = generic_file_read_iter(iocb, to);
1669                 ceph_del_rw_context(fi, &rw_ctx);
1670         }
1671
1672         dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1673              inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1674         ceph_put_cap_refs(ci, got);
1675
1676         if (direct_lock)
1677                 ceph_end_io_direct(inode);
1678         else
1679                 ceph_end_io_read(inode);
1680
1681         if (retry_op > HAVE_RETRIED && ret >= 0) {
1682                 int statret;
1683                 struct page *page = NULL;
1684                 loff_t i_size;
1685                 if (retry_op == READ_INLINE) {
1686                         page = __page_cache_alloc(GFP_KERNEL);
1687                         if (!page)
1688                                 return -ENOMEM;
1689                 }
1690
1691                 statret = __ceph_do_getattr(inode, page,
1692                                             CEPH_STAT_CAP_INLINE_DATA, !!page);
1693                 if (statret < 0) {
1694                         if (page)
1695                                 __free_page(page);
1696                         if (statret == -ENODATA) {
1697                                 BUG_ON(retry_op != READ_INLINE);
1698                                 goto again;
1699                         }
1700                         return statret;
1701                 }
1702
1703                 i_size = i_size_read(inode);
1704                 if (retry_op == READ_INLINE) {
1705                         BUG_ON(ret > 0 || read > 0);
1706                         if (iocb->ki_pos < i_size &&
1707                             iocb->ki_pos < PAGE_SIZE) {
1708                                 loff_t end = min_t(loff_t, i_size,
1709                                                    iocb->ki_pos + len);
1710                                 end = min_t(loff_t, end, PAGE_SIZE);
1711                                 if (statret < end)
1712                                         zero_user_segment(page, statret, end);
1713                                 ret = copy_page_to_iter(page,
1714                                                 iocb->ki_pos & ~PAGE_MASK,
1715                                                 end - iocb->ki_pos, to);
1716                                 iocb->ki_pos += ret;
1717                                 read += ret;
1718                         }
1719                         if (iocb->ki_pos < i_size && read < len) {
1720                                 size_t zlen = min_t(size_t, len - read,
1721                                                     i_size - iocb->ki_pos);
1722                                 ret = iov_iter_zero(zlen, to);
1723                                 iocb->ki_pos += ret;
1724                                 read += ret;
1725                         }
1726                         __free_pages(page, 0);
1727                         return read;
1728                 }
1729
1730                 /* hit EOF or hole? */
1731                 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1732                     ret < len) {
1733                         dout("sync_read hit hole, ppos %lld < size %lld"
1734                              ", reading more\n", iocb->ki_pos, i_size);
1735
1736                         read += ret;
1737                         len -= ret;
1738                         retry_op = HAVE_RETRIED;
1739                         goto again;
1740                 }
1741         }
1742
1743         if (ret >= 0)
1744                 ret += read;
1745
1746         return ret;
1747 }
1748
1749 /*
1750  * Wrap filemap_splice_read with checks for cap bits on the inode.
1751  * Atomically grab references, so that those bits are not released
1752  * back to the MDS mid-read.
1753  */
1754 static ssize_t ceph_splice_read(struct file *in, loff_t *ppos,
1755                                 struct pipe_inode_info *pipe,
1756                                 size_t len, unsigned int flags)
1757 {
1758         struct ceph_file_info *fi = in->private_data;
1759         struct inode *inode = file_inode(in);
1760         struct ceph_inode_info *ci = ceph_inode(inode);
1761         ssize_t ret;
1762         int want = 0, got = 0;
1763         CEPH_DEFINE_RW_CONTEXT(rw_ctx, 0);
1764
1765         dout("splice_read %p %llx.%llx %llu~%zu trying to get caps on %p\n",
1766              inode, ceph_vinop(inode), *ppos, len, inode);
1767
1768         if (ceph_inode_is_shutdown(inode))
1769                 return -ESTALE;
1770
1771         if (ceph_has_inline_data(ci) ||
1772             (fi->flags & CEPH_F_SYNC))
1773                 return copy_splice_read(in, ppos, pipe, len, flags);
1774
1775         ceph_start_io_read(inode);
1776
1777         want = CEPH_CAP_FILE_CACHE;
1778         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1779                 want |= CEPH_CAP_FILE_LAZYIO;
1780
1781         ret = ceph_get_caps(in, CEPH_CAP_FILE_RD, want, -1, &got);
1782         if (ret < 0)
1783                 goto out_end;
1784
1785         if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) == 0) {
1786                 dout("splice_read/sync %p %llx.%llx %llu~%zu got cap refs on %s\n",
1787                      inode, ceph_vinop(inode), *ppos, len,
1788                      ceph_cap_string(got));
1789
1790                 ceph_put_cap_refs(ci, got);
1791                 ceph_end_io_read(inode);
1792                 return copy_splice_read(in, ppos, pipe, len, flags);
1793         }
1794
1795         dout("splice_read %p %llx.%llx %llu~%zu got cap refs on %s\n",
1796              inode, ceph_vinop(inode), *ppos, len, ceph_cap_string(got));
1797
1798         rw_ctx.caps = got;
1799         ceph_add_rw_context(fi, &rw_ctx);
1800         ret = filemap_splice_read(in, ppos, pipe, len, flags);
1801         ceph_del_rw_context(fi, &rw_ctx);
1802
1803         dout("splice_read %p %llx.%llx dropping cap refs on %s = %zd\n",
1804              inode, ceph_vinop(inode), ceph_cap_string(got), ret);
1805
1806         ceph_put_cap_refs(ci, got);
1807 out_end:
1808         ceph_end_io_read(inode);
1809         return ret;
1810 }
1811
1812 /*
1813  * Take cap references to avoid releasing caps to MDS mid-write.
1814  *
1815  * If we are synchronous, and write with an old snap context, the OSD
1816  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1817  * dropping our cap refs and allowing the pending snap to logically
1818  * complete _before_ this write occurs.
1819  *
1820  * If we are near ENOSPC, write synchronously.
1821  */
1822 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1823 {
1824         struct file *file = iocb->ki_filp;
1825         struct ceph_file_info *fi = file->private_data;
1826         struct inode *inode = file_inode(file);
1827         struct ceph_inode_info *ci = ceph_inode(inode);
1828         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1829         struct ceph_osd_client *osdc = &fsc->client->osdc;
1830         struct ceph_cap_flush *prealloc_cf;
1831         ssize_t count, written = 0;
1832         int err, want = 0, got;
1833         bool direct_lock = false;
1834         u32 map_flags;
1835         u64 pool_flags;
1836         loff_t pos;
1837         loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1838
1839         if (ceph_inode_is_shutdown(inode))
1840                 return -ESTALE;
1841
1842         if (ceph_snap(inode) != CEPH_NOSNAP)
1843                 return -EROFS;
1844
1845         prealloc_cf = ceph_alloc_cap_flush();
1846         if (!prealloc_cf)
1847                 return -ENOMEM;
1848
1849         if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1850                 direct_lock = true;
1851
1852 retry_snap:
1853         if (direct_lock)
1854                 ceph_start_io_direct(inode);
1855         else
1856                 ceph_start_io_write(inode);
1857
1858         if (iocb->ki_flags & IOCB_APPEND) {
1859                 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1860                 if (err < 0)
1861                         goto out;
1862         }
1863
1864         err = generic_write_checks(iocb, from);
1865         if (err <= 0)
1866                 goto out;
1867
1868         pos = iocb->ki_pos;
1869         if (unlikely(pos >= limit)) {
1870                 err = -EFBIG;
1871                 goto out;
1872         } else {
1873                 iov_iter_truncate(from, limit - pos);
1874         }
1875
1876         count = iov_iter_count(from);
1877         if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1878                 err = -EDQUOT;
1879                 goto out;
1880         }
1881
1882         down_read(&osdc->lock);
1883         map_flags = osdc->osdmap->flags;
1884         pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1885         up_read(&osdc->lock);
1886         if ((map_flags & CEPH_OSDMAP_FULL) ||
1887             (pool_flags & CEPH_POOL_FLAG_FULL)) {
1888                 err = -ENOSPC;
1889                 goto out;
1890         }
1891
1892         err = file_remove_privs(file);
1893         if (err)
1894                 goto out;
1895
1896         dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1897              inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1898         if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1899                 want |= CEPH_CAP_FILE_BUFFER;
1900         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1901                 want |= CEPH_CAP_FILE_LAZYIO;
1902         got = 0;
1903         err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1904         if (err < 0)
1905                 goto out;
1906
1907         err = file_update_time(file);
1908         if (err)
1909                 goto out_caps;
1910
1911         inode_inc_iversion_raw(inode);
1912
1913         dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1914              inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1915
1916         if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1917             (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1918             (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1919                 struct ceph_snap_context *snapc;
1920                 struct iov_iter data;
1921
1922                 spin_lock(&ci->i_ceph_lock);
1923                 if (__ceph_have_pending_cap_snap(ci)) {
1924                         struct ceph_cap_snap *capsnap =
1925                                         list_last_entry(&ci->i_cap_snaps,
1926                                                         struct ceph_cap_snap,
1927                                                         ci_item);
1928                         snapc = ceph_get_snap_context(capsnap->context);
1929                 } else {
1930                         BUG_ON(!ci->i_head_snapc);
1931                         snapc = ceph_get_snap_context(ci->i_head_snapc);
1932                 }
1933                 spin_unlock(&ci->i_ceph_lock);
1934
1935                 /* we might need to revert back to that point */
1936                 data = *from;
1937                 if (iocb->ki_flags & IOCB_DIRECT)
1938                         written = ceph_direct_read_write(iocb, &data, snapc,
1939                                                          &prealloc_cf);
1940                 else
1941                         written = ceph_sync_write(iocb, &data, pos, snapc);
1942                 if (direct_lock)
1943                         ceph_end_io_direct(inode);
1944                 else
1945                         ceph_end_io_write(inode);
1946                 if (written > 0)
1947                         iov_iter_advance(from, written);
1948                 ceph_put_snap_context(snapc);
1949         } else {
1950                 /*
1951                  * No need to acquire the i_truncate_mutex. Because
1952                  * the MDS revokes Fwb caps before sending truncate
1953                  * message to us. We can't get Fwb cap while there
1954                  * are pending vmtruncate. So write and vmtruncate
1955                  * can not run at the same time
1956                  */
1957                 written = generic_perform_write(iocb, from);
1958                 ceph_end_io_write(inode);
1959         }
1960
1961         if (written >= 0) {
1962                 int dirty;
1963
1964                 spin_lock(&ci->i_ceph_lock);
1965                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1966                                                &prealloc_cf);
1967                 spin_unlock(&ci->i_ceph_lock);
1968                 if (dirty)
1969                         __mark_inode_dirty(inode, dirty);
1970                 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1971                         ceph_check_caps(ci, CHECK_CAPS_FLUSH);
1972         }
1973
1974         dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1975              inode, ceph_vinop(inode), pos, (unsigned)count,
1976              ceph_cap_string(got));
1977         ceph_put_cap_refs(ci, got);
1978
1979         if (written == -EOLDSNAPC) {
1980                 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1981                      inode, ceph_vinop(inode), pos, (unsigned)count);
1982                 goto retry_snap;
1983         }
1984
1985         if (written >= 0) {
1986                 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1987                     (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1988                         iocb->ki_flags |= IOCB_DSYNC;
1989                 written = generic_write_sync(iocb, written);
1990         }
1991
1992         goto out_unlocked;
1993 out_caps:
1994         ceph_put_cap_refs(ci, got);
1995 out:
1996         if (direct_lock)
1997                 ceph_end_io_direct(inode);
1998         else
1999                 ceph_end_io_write(inode);
2000 out_unlocked:
2001         ceph_free_cap_flush(prealloc_cf);
2002         return written ? written : err;
2003 }
2004
2005 /*
2006  * llseek.  be sure to verify file size on SEEK_END.
2007  */
2008 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
2009 {
2010         if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
2011                 struct inode *inode = file_inode(file);
2012                 int ret;
2013
2014                 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
2015                 if (ret < 0)
2016                         return ret;
2017         }
2018         return generic_file_llseek(file, offset, whence);
2019 }
2020
2021 static inline void ceph_zero_partial_page(
2022         struct inode *inode, loff_t offset, unsigned size)
2023 {
2024         struct page *page;
2025         pgoff_t index = offset >> PAGE_SHIFT;
2026
2027         page = find_lock_page(inode->i_mapping, index);
2028         if (page) {
2029                 wait_on_page_writeback(page);
2030                 zero_user(page, offset & (PAGE_SIZE - 1), size);
2031                 unlock_page(page);
2032                 put_page(page);
2033         }
2034 }
2035
2036 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
2037                                       loff_t length)
2038 {
2039         loff_t nearly = round_up(offset, PAGE_SIZE);
2040         if (offset < nearly) {
2041                 loff_t size = nearly - offset;
2042                 if (length < size)
2043                         size = length;
2044                 ceph_zero_partial_page(inode, offset, size);
2045                 offset += size;
2046                 length -= size;
2047         }
2048         if (length >= PAGE_SIZE) {
2049                 loff_t size = round_down(length, PAGE_SIZE);
2050                 truncate_pagecache_range(inode, offset, offset + size - 1);
2051                 offset += size;
2052                 length -= size;
2053         }
2054         if (length)
2055                 ceph_zero_partial_page(inode, offset, length);
2056 }
2057
2058 static int ceph_zero_partial_object(struct inode *inode,
2059                                     loff_t offset, loff_t *length)
2060 {
2061         struct ceph_inode_info *ci = ceph_inode(inode);
2062         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2063         struct ceph_osd_request *req;
2064         int ret = 0;
2065         loff_t zero = 0;
2066         int op;
2067
2068         if (ceph_inode_is_shutdown(inode))
2069                 return -EIO;
2070
2071         if (!length) {
2072                 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2073                 length = &zero;
2074         } else {
2075                 op = CEPH_OSD_OP_ZERO;
2076         }
2077
2078         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2079                                         ceph_vino(inode),
2080                                         offset, length,
2081                                         0, 1, op,
2082                                         CEPH_OSD_FLAG_WRITE,
2083                                         NULL, 0, 0, false);
2084         if (IS_ERR(req)) {
2085                 ret = PTR_ERR(req);
2086                 goto out;
2087         }
2088
2089         req->r_mtime = inode->i_mtime;
2090         ceph_osdc_start_request(&fsc->client->osdc, req);
2091         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2092         if (ret == -ENOENT)
2093                 ret = 0;
2094         ceph_osdc_put_request(req);
2095
2096 out:
2097         return ret;
2098 }
2099
2100 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2101 {
2102         int ret = 0;
2103         struct ceph_inode_info *ci = ceph_inode(inode);
2104         s32 stripe_unit = ci->i_layout.stripe_unit;
2105         s32 stripe_count = ci->i_layout.stripe_count;
2106         s32 object_size = ci->i_layout.object_size;
2107         u64 object_set_size = object_size * stripe_count;
2108         u64 nearly, t;
2109
2110         /* round offset up to next period boundary */
2111         nearly = offset + object_set_size - 1;
2112         t = nearly;
2113         nearly -= do_div(t, object_set_size);
2114
2115         while (length && offset < nearly) {
2116                 loff_t size = length;
2117                 ret = ceph_zero_partial_object(inode, offset, &size);
2118                 if (ret < 0)
2119                         return ret;
2120                 offset += size;
2121                 length -= size;
2122         }
2123         while (length >= object_set_size) {
2124                 int i;
2125                 loff_t pos = offset;
2126                 for (i = 0; i < stripe_count; ++i) {
2127                         ret = ceph_zero_partial_object(inode, pos, NULL);
2128                         if (ret < 0)
2129                                 return ret;
2130                         pos += stripe_unit;
2131                 }
2132                 offset += object_set_size;
2133                 length -= object_set_size;
2134         }
2135         while (length) {
2136                 loff_t size = length;
2137                 ret = ceph_zero_partial_object(inode, offset, &size);
2138                 if (ret < 0)
2139                         return ret;
2140                 offset += size;
2141                 length -= size;
2142         }
2143         return ret;
2144 }
2145
2146 static long ceph_fallocate(struct file *file, int mode,
2147                                 loff_t offset, loff_t length)
2148 {
2149         struct ceph_file_info *fi = file->private_data;
2150         struct inode *inode = file_inode(file);
2151         struct ceph_inode_info *ci = ceph_inode(inode);
2152         struct ceph_cap_flush *prealloc_cf;
2153         int want, got = 0;
2154         int dirty;
2155         int ret = 0;
2156         loff_t endoff = 0;
2157         loff_t size;
2158
2159         dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__,
2160              inode, ceph_vinop(inode), mode, offset, length);
2161
2162         if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2163                 return -EOPNOTSUPP;
2164
2165         if (!S_ISREG(inode->i_mode))
2166                 return -EOPNOTSUPP;
2167
2168         prealloc_cf = ceph_alloc_cap_flush();
2169         if (!prealloc_cf)
2170                 return -ENOMEM;
2171
2172         inode_lock(inode);
2173
2174         if (ceph_snap(inode) != CEPH_NOSNAP) {
2175                 ret = -EROFS;
2176                 goto unlock;
2177         }
2178
2179         size = i_size_read(inode);
2180
2181         /* Are we punching a hole beyond EOF? */
2182         if (offset >= size)
2183                 goto unlock;
2184         if ((offset + length) > size)
2185                 length = size - offset;
2186
2187         if (fi->fmode & CEPH_FILE_MODE_LAZY)
2188                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2189         else
2190                 want = CEPH_CAP_FILE_BUFFER;
2191
2192         ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2193         if (ret < 0)
2194                 goto unlock;
2195
2196         ret = file_modified(file);
2197         if (ret)
2198                 goto put_caps;
2199
2200         filemap_invalidate_lock(inode->i_mapping);
2201         ceph_fscache_invalidate(inode, false);
2202         ceph_zero_pagecache_range(inode, offset, length);
2203         ret = ceph_zero_objects(inode, offset, length);
2204
2205         if (!ret) {
2206                 spin_lock(&ci->i_ceph_lock);
2207                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2208                                                &prealloc_cf);
2209                 spin_unlock(&ci->i_ceph_lock);
2210                 if (dirty)
2211                         __mark_inode_dirty(inode, dirty);
2212         }
2213         filemap_invalidate_unlock(inode->i_mapping);
2214
2215 put_caps:
2216         ceph_put_cap_refs(ci, got);
2217 unlock:
2218         inode_unlock(inode);
2219         ceph_free_cap_flush(prealloc_cf);
2220         return ret;
2221 }
2222
2223 /*
2224  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2225  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2226  * this fails; zero is returned on success.
2227  */
2228 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2229                           struct file *dst_filp,
2230                           loff_t dst_endoff, int *dst_got)
2231 {
2232         int ret = 0;
2233         bool retrying = false;
2234
2235 retry_caps:
2236         ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2237                             dst_endoff, dst_got);
2238         if (ret < 0)
2239                 return ret;
2240
2241         /*
2242          * Since we're already holding the FILE_WR capability for the dst file,
2243          * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2244          * retry dance instead to try to get both capabilities.
2245          */
2246         ret = ceph_try_get_caps(file_inode(src_filp),
2247                                 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2248                                 false, src_got);
2249         if (ret <= 0) {
2250                 /* Start by dropping dst_ci caps and getting src_ci caps */
2251                 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2252                 if (retrying) {
2253                         if (!ret)
2254                                 /* ceph_try_get_caps masks EAGAIN */
2255                                 ret = -EAGAIN;
2256                         return ret;
2257                 }
2258                 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2259                                     CEPH_CAP_FILE_SHARED, -1, src_got);
2260                 if (ret < 0)
2261                         return ret;
2262                 /*... drop src_ci caps too, and retry */
2263                 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2264                 retrying = true;
2265                 goto retry_caps;
2266         }
2267         return ret;
2268 }
2269
2270 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2271                            struct ceph_inode_info *dst_ci, int dst_got)
2272 {
2273         ceph_put_cap_refs(src_ci, src_got);
2274         ceph_put_cap_refs(dst_ci, dst_got);
2275 }
2276
2277 /*
2278  * This function does several size-related checks, returning an error if:
2279  *  - source file is smaller than off+len
2280  *  - destination file size is not OK (inode_newsize_ok())
2281  *  - max bytes quotas is exceeded
2282  */
2283 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2284                            loff_t src_off, loff_t dst_off, size_t len)
2285 {
2286         loff_t size, endoff;
2287
2288         size = i_size_read(src_inode);
2289         /*
2290          * Don't copy beyond source file EOF.  Instead of simply setting length
2291          * to (size - src_off), just drop to VFS default implementation, as the
2292          * local i_size may be stale due to other clients writing to the source
2293          * inode.
2294          */
2295         if (src_off + len > size) {
2296                 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2297                      src_off, len, size);
2298                 return -EOPNOTSUPP;
2299         }
2300         size = i_size_read(dst_inode);
2301
2302         endoff = dst_off + len;
2303         if (inode_newsize_ok(dst_inode, endoff))
2304                 return -EOPNOTSUPP;
2305
2306         if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2307                 return -EDQUOT;
2308
2309         return 0;
2310 }
2311
2312 static struct ceph_osd_request *
2313 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2314                             u64 src_snapid,
2315                             struct ceph_object_id *src_oid,
2316                             struct ceph_object_locator *src_oloc,
2317                             struct ceph_object_id *dst_oid,
2318                             struct ceph_object_locator *dst_oloc,
2319                             u32 truncate_seq, u64 truncate_size)
2320 {
2321         struct ceph_osd_request *req;
2322         int ret;
2323         u32 src_fadvise_flags =
2324                 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2325                 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2326         u32 dst_fadvise_flags =
2327                 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2328                 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2329
2330         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2331         if (!req)
2332                 return ERR_PTR(-ENOMEM);
2333
2334         req->r_flags = CEPH_OSD_FLAG_WRITE;
2335
2336         ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2337         ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2338
2339         ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2340                                         src_oid, src_oloc,
2341                                         src_fadvise_flags,
2342                                         dst_fadvise_flags,
2343                                         truncate_seq,
2344                                         truncate_size,
2345                                         CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2346         if (ret)
2347                 goto out;
2348
2349         ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2350         if (ret)
2351                 goto out;
2352
2353         return req;
2354
2355 out:
2356         ceph_osdc_put_request(req);
2357         return ERR_PTR(ret);
2358 }
2359
2360 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2361                                     struct ceph_inode_info *dst_ci, u64 *dst_off,
2362                                     struct ceph_fs_client *fsc,
2363                                     size_t len, unsigned int flags)
2364 {
2365         struct ceph_object_locator src_oloc, dst_oloc;
2366         struct ceph_object_id src_oid, dst_oid;
2367         struct ceph_osd_client *osdc;
2368         struct ceph_osd_request *req;
2369         size_t bytes = 0;
2370         u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2371         u32 src_objlen, dst_objlen;
2372         u32 object_size = src_ci->i_layout.object_size;
2373         int ret;
2374
2375         src_oloc.pool = src_ci->i_layout.pool_id;
2376         src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2377         dst_oloc.pool = dst_ci->i_layout.pool_id;
2378         dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2379         osdc = &fsc->client->osdc;
2380
2381         while (len >= object_size) {
2382                 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2383                                               object_size, &src_objnum,
2384                                               &src_objoff, &src_objlen);
2385                 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2386                                               object_size, &dst_objnum,
2387                                               &dst_objoff, &dst_objlen);
2388                 ceph_oid_init(&src_oid);
2389                 ceph_oid_printf(&src_oid, "%llx.%08llx",
2390                                 src_ci->i_vino.ino, src_objnum);
2391                 ceph_oid_init(&dst_oid);
2392                 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2393                                 dst_ci->i_vino.ino, dst_objnum);
2394                 /* Do an object remote copy */
2395                 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2396                                                   &src_oid, &src_oloc,
2397                                                   &dst_oid, &dst_oloc,
2398                                                   dst_ci->i_truncate_seq,
2399                                                   dst_ci->i_truncate_size);
2400                 if (IS_ERR(req))
2401                         ret = PTR_ERR(req);
2402                 else {
2403                         ceph_osdc_start_request(osdc, req);
2404                         ret = ceph_osdc_wait_request(osdc, req);
2405                         ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2406                                                      req->r_start_latency,
2407                                                      req->r_end_latency,
2408                                                      object_size, ret);
2409                         ceph_osdc_put_request(req);
2410                 }
2411                 if (ret) {
2412                         if (ret == -EOPNOTSUPP) {
2413                                 fsc->have_copy_from2 = false;
2414                                 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2415                         }
2416                         dout("ceph_osdc_copy_from returned %d\n", ret);
2417                         if (!bytes)
2418                                 bytes = ret;
2419                         goto out;
2420                 }
2421                 len -= object_size;
2422                 bytes += object_size;
2423                 *src_off += object_size;
2424                 *dst_off += object_size;
2425         }
2426
2427 out:
2428         ceph_oloc_destroy(&src_oloc);
2429         ceph_oloc_destroy(&dst_oloc);
2430         return bytes;
2431 }
2432
2433 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2434                                       struct file *dst_file, loff_t dst_off,
2435                                       size_t len, unsigned int flags)
2436 {
2437         struct inode *src_inode = file_inode(src_file);
2438         struct inode *dst_inode = file_inode(dst_file);
2439         struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2440         struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2441         struct ceph_cap_flush *prealloc_cf;
2442         struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2443         loff_t size;
2444         ssize_t ret = -EIO, bytes;
2445         u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2446         u32 src_objlen, dst_objlen;
2447         int src_got = 0, dst_got = 0, err, dirty;
2448
2449         if (src_inode->i_sb != dst_inode->i_sb) {
2450                 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2451
2452                 if (ceph_fsid_compare(&src_fsc->client->fsid,
2453                                       &dst_fsc->client->fsid)) {
2454                         dout("Copying files across clusters: src: %pU dst: %pU\n",
2455                              &src_fsc->client->fsid, &dst_fsc->client->fsid);
2456                         return -EXDEV;
2457                 }
2458         }
2459         if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2460                 return -EROFS;
2461
2462         /*
2463          * Some of the checks below will return -EOPNOTSUPP, which will force a
2464          * fallback to the default VFS copy_file_range implementation.  This is
2465          * desirable in several cases (for ex, the 'len' is smaller than the
2466          * size of the objects, or in cases where that would be more
2467          * efficient).
2468          */
2469
2470         if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2471                 return -EOPNOTSUPP;
2472
2473         if (!src_fsc->have_copy_from2)
2474                 return -EOPNOTSUPP;
2475
2476         /*
2477          * Striped file layouts require that we copy partial objects, but the
2478          * OSD copy-from operation only supports full-object copies.  Limit
2479          * this to non-striped file layouts for now.
2480          */
2481         if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2482             (src_ci->i_layout.stripe_count != 1) ||
2483             (dst_ci->i_layout.stripe_count != 1) ||
2484             (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2485                 dout("Invalid src/dst files layout\n");
2486                 return -EOPNOTSUPP;
2487         }
2488
2489         if (len < src_ci->i_layout.object_size)
2490                 return -EOPNOTSUPP; /* no remote copy will be done */
2491
2492         prealloc_cf = ceph_alloc_cap_flush();
2493         if (!prealloc_cf)
2494                 return -ENOMEM;
2495
2496         /* Start by sync'ing the source and destination files */
2497         ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2498         if (ret < 0) {
2499                 dout("failed to write src file (%zd)\n", ret);
2500                 goto out;
2501         }
2502         ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2503         if (ret < 0) {
2504                 dout("failed to write dst file (%zd)\n", ret);
2505                 goto out;
2506         }
2507
2508         /*
2509          * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2510          * clients may have dirty data in their caches.  And OSDs know nothing
2511          * about caps, so they can't safely do the remote object copies.
2512          */
2513         err = get_rd_wr_caps(src_file, &src_got,
2514                              dst_file, (dst_off + len), &dst_got);
2515         if (err < 0) {
2516                 dout("get_rd_wr_caps returned %d\n", err);
2517                 ret = -EOPNOTSUPP;
2518                 goto out;
2519         }
2520
2521         ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2522         if (ret < 0)
2523                 goto out_caps;
2524
2525         /* Drop dst file cached pages */
2526         ceph_fscache_invalidate(dst_inode, false);
2527         ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2528                                             dst_off >> PAGE_SHIFT,
2529                                             (dst_off + len) >> PAGE_SHIFT);
2530         if (ret < 0) {
2531                 dout("Failed to invalidate inode pages (%zd)\n", ret);
2532                 ret = 0; /* XXX */
2533         }
2534         ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2535                                       src_ci->i_layout.object_size,
2536                                       &src_objnum, &src_objoff, &src_objlen);
2537         ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2538                                       dst_ci->i_layout.object_size,
2539                                       &dst_objnum, &dst_objoff, &dst_objlen);
2540         /* object-level offsets need to the same */
2541         if (src_objoff != dst_objoff) {
2542                 ret = -EOPNOTSUPP;
2543                 goto out_caps;
2544         }
2545
2546         /*
2547          * Do a manual copy if the object offset isn't object aligned.
2548          * 'src_objlen' contains the bytes left until the end of the object,
2549          * starting at the src_off
2550          */
2551         if (src_objoff) {
2552                 dout("Initial partial copy of %u bytes\n", src_objlen);
2553
2554                 /*
2555                  * we need to temporarily drop all caps as we'll be calling
2556                  * {read,write}_iter, which will get caps again.
2557                  */
2558                 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2559                 ret = do_splice_direct(src_file, &src_off, dst_file,
2560                                        &dst_off, src_objlen, flags);
2561                 /* Abort on short copies or on error */
2562                 if (ret < src_objlen) {
2563                         dout("Failed partial copy (%zd)\n", ret);
2564                         goto out;
2565                 }
2566                 len -= ret;
2567                 err = get_rd_wr_caps(src_file, &src_got,
2568                                      dst_file, (dst_off + len), &dst_got);
2569                 if (err < 0)
2570                         goto out;
2571                 err = is_file_size_ok(src_inode, dst_inode,
2572                                       src_off, dst_off, len);
2573                 if (err < 0)
2574                         goto out_caps;
2575         }
2576
2577         size = i_size_read(dst_inode);
2578         bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2579                                      src_fsc, len, flags);
2580         if (bytes <= 0) {
2581                 if (!ret)
2582                         ret = bytes;
2583                 goto out_caps;
2584         }
2585         dout("Copied %zu bytes out of %zu\n", bytes, len);
2586         len -= bytes;
2587         ret += bytes;
2588
2589         file_update_time(dst_file);
2590         inode_inc_iversion_raw(dst_inode);
2591
2592         if (dst_off > size) {
2593                 /* Let the MDS know about dst file size change */
2594                 if (ceph_inode_set_size(dst_inode, dst_off) ||
2595                     ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2596                         ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH);
2597         }
2598         /* Mark Fw dirty */
2599         spin_lock(&dst_ci->i_ceph_lock);
2600         dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2601         spin_unlock(&dst_ci->i_ceph_lock);
2602         if (dirty)
2603                 __mark_inode_dirty(dst_inode, dirty);
2604
2605 out_caps:
2606         put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2607
2608         /*
2609          * Do the final manual copy if we still have some bytes left, unless
2610          * there were errors in remote object copies (len >= object_size).
2611          */
2612         if (len && (len < src_ci->i_layout.object_size)) {
2613                 dout("Final partial copy of %zu bytes\n", len);
2614                 bytes = do_splice_direct(src_file, &src_off, dst_file,
2615                                          &dst_off, len, flags);
2616                 if (bytes > 0)
2617                         ret += bytes;
2618                 else
2619                         dout("Failed partial copy (%zd)\n", bytes);
2620         }
2621
2622 out:
2623         ceph_free_cap_flush(prealloc_cf);
2624
2625         return ret;
2626 }
2627
2628 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2629                                     struct file *dst_file, loff_t dst_off,
2630                                     size_t len, unsigned int flags)
2631 {
2632         ssize_t ret;
2633
2634         ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2635                                      len, flags);
2636
2637         if (ret == -EOPNOTSUPP || ret == -EXDEV)
2638                 ret = generic_copy_file_range(src_file, src_off, dst_file,
2639                                               dst_off, len, flags);
2640         return ret;
2641 }
2642
2643 const struct file_operations ceph_file_fops = {
2644         .open = ceph_open,
2645         .release = ceph_release,
2646         .llseek = ceph_llseek,
2647         .read_iter = ceph_read_iter,
2648         .write_iter = ceph_write_iter,
2649         .mmap = ceph_mmap,
2650         .fsync = ceph_fsync,
2651         .lock = ceph_lock,
2652         .setlease = simple_nosetlease,
2653         .flock = ceph_flock,
2654         .splice_read = ceph_splice_read,
2655         .splice_write = iter_file_splice_write,
2656         .unlocked_ioctl = ceph_ioctl,
2657         .compat_ioctl = compat_ptr_ioctl,
2658         .fallocate      = ceph_fallocate,
2659         .copy_file_range = ceph_copy_file_range,
2660 };