ceph: invalidate pages when doing direct/sync writes
[platform/kernel/linux-starfive.git] / fs / ceph / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21
22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24         u32 wire_flags = 0;
25
26         switch (flags & O_ACCMODE) {
27         case O_RDONLY:
28                 wire_flags |= CEPH_O_RDONLY;
29                 break;
30         case O_WRONLY:
31                 wire_flags |= CEPH_O_WRONLY;
32                 break;
33         case O_RDWR:
34                 wire_flags |= CEPH_O_RDWR;
35                 break;
36         }
37
38         flags &= ~O_ACCMODE;
39
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41
42         ceph_sys2wire(O_CREAT);
43         ceph_sys2wire(O_EXCL);
44         ceph_sys2wire(O_TRUNC);
45         ceph_sys2wire(O_DIRECTORY);
46         ceph_sys2wire(O_NOFOLLOW);
47
48 #undef ceph_sys2wire
49
50         if (flags)
51                 dout("unused open flags: %x\n", flags);
52
53         return cpu_to_le32(wire_flags);
54 }
55
56 /*
57  * Ceph file operations
58  *
59  * Implement basic open/close functionality, and implement
60  * read/write.
61  *
62  * We implement three modes of file I/O:
63  *  - buffered uses the generic_file_aio_{read,write} helpers
64  *
65  *  - synchronous is used when there is multi-client read/write
66  *    sharing, avoids the page cache, and synchronously waits for an
67  *    ack from the OSD.
68  *
69  *  - direct io takes the variant of the sync path that references
70  *    user pages directly.
71  *
72  * fsync() flushes and waits on dirty pages, but just queues metadata
73  * for writeback: since the MDS can recover size and mtime there is no
74  * need to wait for MDS acknowledgement.
75  */
76
77 /*
78  * How many pages to get in one call to iov_iter_get_pages().  This
79  * determines the size of the on-stack array used as a buffer.
80  */
81 #define ITER_GET_BVECS_PAGES    64
82
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84                                 struct bio_vec *bvecs)
85 {
86         size_t size = 0;
87         int bvec_idx = 0;
88
89         if (maxsize > iov_iter_count(iter))
90                 maxsize = iov_iter_count(iter);
91
92         while (size < maxsize) {
93                 struct page *pages[ITER_GET_BVECS_PAGES];
94                 ssize_t bytes;
95                 size_t start;
96                 int idx = 0;
97
98                 bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
99                                            ITER_GET_BVECS_PAGES, &start);
100                 if (bytes < 0)
101                         return size ?: bytes;
102
103                 size += bytes;
104
105                 for ( ; bytes; idx++, bvec_idx++) {
106                         int len = min_t(int, bytes, PAGE_SIZE - start);
107
108                         bvec_set_page(&bvecs[bvec_idx], pages[idx], len, start);
109                         bytes -= len;
110                         start = 0;
111                 }
112         }
113
114         return size;
115 }
116
117 /*
118  * iov_iter_get_pages() only considers one iov_iter segment, no matter
119  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
120  * page.
121  *
122  * Attempt to get up to @maxsize bytes worth of pages from @iter.
123  * Return the number of bytes in the created bio_vec array, or an error.
124  */
125 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
126                                     struct bio_vec **bvecs, int *num_bvecs)
127 {
128         struct bio_vec *bv;
129         size_t orig_count = iov_iter_count(iter);
130         ssize_t bytes;
131         int npages;
132
133         iov_iter_truncate(iter, maxsize);
134         npages = iov_iter_npages(iter, INT_MAX);
135         iov_iter_reexpand(iter, orig_count);
136
137         /*
138          * __iter_get_bvecs() may populate only part of the array -- zero it
139          * out.
140          */
141         bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
142         if (!bv)
143                 return -ENOMEM;
144
145         bytes = __iter_get_bvecs(iter, maxsize, bv);
146         if (bytes < 0) {
147                 /*
148                  * No pages were pinned -- just free the array.
149                  */
150                 kvfree(bv);
151                 return bytes;
152         }
153
154         *bvecs = bv;
155         *num_bvecs = npages;
156         return bytes;
157 }
158
159 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
160 {
161         int i;
162
163         for (i = 0; i < num_bvecs; i++) {
164                 if (bvecs[i].bv_page) {
165                         if (should_dirty)
166                                 set_page_dirty_lock(bvecs[i].bv_page);
167                         put_page(bvecs[i].bv_page);
168                 }
169         }
170         kvfree(bvecs);
171 }
172
173 /*
174  * Prepare an open request.  Preallocate ceph_cap to avoid an
175  * inopportune ENOMEM later.
176  */
177 static struct ceph_mds_request *
178 prepare_open_request(struct super_block *sb, int flags, int create_mode)
179 {
180         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
181         struct ceph_mds_request *req;
182         int want_auth = USE_ANY_MDS;
183         int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
184
185         if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
186                 want_auth = USE_AUTH_MDS;
187
188         req = ceph_mdsc_create_request(mdsc, op, want_auth);
189         if (IS_ERR(req))
190                 goto out;
191         req->r_fmode = ceph_flags_to_mode(flags);
192         req->r_args.open.flags = ceph_flags_sys2wire(flags);
193         req->r_args.open.mode = cpu_to_le32(create_mode);
194 out:
195         return req;
196 }
197
198 static int ceph_init_file_info(struct inode *inode, struct file *file,
199                                         int fmode, bool isdir)
200 {
201         struct ceph_inode_info *ci = ceph_inode(inode);
202         struct ceph_mount_options *opt =
203                 ceph_inode_to_client(&ci->netfs.inode)->mount_options;
204         struct ceph_file_info *fi;
205         int ret;
206
207         dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
208                         inode->i_mode, isdir ? "dir" : "regular");
209         BUG_ON(inode->i_fop->release != ceph_release);
210
211         if (isdir) {
212                 struct ceph_dir_file_info *dfi =
213                         kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
214                 if (!dfi)
215                         return -ENOMEM;
216
217                 file->private_data = dfi;
218                 fi = &dfi->file_info;
219                 dfi->next_offset = 2;
220                 dfi->readdir_cache_idx = -1;
221         } else {
222                 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
223                 if (!fi)
224                         return -ENOMEM;
225
226                 if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
227                         fi->flags |= CEPH_F_SYNC;
228
229                 file->private_data = fi;
230         }
231
232         ceph_get_fmode(ci, fmode, 1);
233         fi->fmode = fmode;
234
235         spin_lock_init(&fi->rw_contexts_lock);
236         INIT_LIST_HEAD(&fi->rw_contexts);
237         fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
238
239         if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
240                 ret = ceph_uninline_data(file);
241                 if (ret < 0)
242                         goto error;
243         }
244
245         return 0;
246
247 error:
248         ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
249         ceph_put_fmode(ci, fi->fmode, 1);
250         kmem_cache_free(ceph_file_cachep, fi);
251         /* wake up anyone waiting for caps on this inode */
252         wake_up_all(&ci->i_cap_wq);
253         return ret;
254 }
255
256 /*
257  * initialize private struct file data.
258  * if we fail, clean up by dropping fmode reference on the ceph_inode
259  */
260 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
261 {
262         int ret = 0;
263
264         switch (inode->i_mode & S_IFMT) {
265         case S_IFREG:
266                 ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
267                 fallthrough;
268         case S_IFDIR:
269                 ret = ceph_init_file_info(inode, file, fmode,
270                                                 S_ISDIR(inode->i_mode));
271                 break;
272
273         case S_IFLNK:
274                 dout("init_file %p %p 0%o (symlink)\n", inode, file,
275                      inode->i_mode);
276                 break;
277
278         default:
279                 dout("init_file %p %p 0%o (special)\n", inode, file,
280                      inode->i_mode);
281                 /*
282                  * we need to drop the open ref now, since we don't
283                  * have .release set to ceph_release.
284                  */
285                 BUG_ON(inode->i_fop->release == ceph_release);
286
287                 /* call the proper open fop */
288                 ret = inode->i_fop->open(inode, file);
289         }
290         return ret;
291 }
292
293 /*
294  * try renew caps after session gets killed.
295  */
296 int ceph_renew_caps(struct inode *inode, int fmode)
297 {
298         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
299         struct ceph_inode_info *ci = ceph_inode(inode);
300         struct ceph_mds_request *req;
301         int err, flags, wanted;
302
303         spin_lock(&ci->i_ceph_lock);
304         __ceph_touch_fmode(ci, mdsc, fmode);
305         wanted = __ceph_caps_file_wanted(ci);
306         if (__ceph_is_any_real_caps(ci) &&
307             (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
308                 int issued = __ceph_caps_issued(ci, NULL);
309                 spin_unlock(&ci->i_ceph_lock);
310                 dout("renew caps %p want %s issued %s updating mds_wanted\n",
311                      inode, ceph_cap_string(wanted), ceph_cap_string(issued));
312                 ceph_check_caps(ci, 0);
313                 return 0;
314         }
315         spin_unlock(&ci->i_ceph_lock);
316
317         flags = 0;
318         if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
319                 flags = O_RDWR;
320         else if (wanted & CEPH_CAP_FILE_RD)
321                 flags = O_RDONLY;
322         else if (wanted & CEPH_CAP_FILE_WR)
323                 flags = O_WRONLY;
324 #ifdef O_LAZY
325         if (wanted & CEPH_CAP_FILE_LAZYIO)
326                 flags |= O_LAZY;
327 #endif
328
329         req = prepare_open_request(inode->i_sb, flags, 0);
330         if (IS_ERR(req)) {
331                 err = PTR_ERR(req);
332                 goto out;
333         }
334
335         req->r_inode = inode;
336         ihold(inode);
337         req->r_num_caps = 1;
338
339         err = ceph_mdsc_do_request(mdsc, NULL, req);
340         ceph_mdsc_put_request(req);
341 out:
342         dout("renew caps %p open result=%d\n", inode, err);
343         return err < 0 ? err : 0;
344 }
345
346 /*
347  * If we already have the requisite capabilities, we can satisfy
348  * the open request locally (no need to request new caps from the
349  * MDS).  We do, however, need to inform the MDS (asynchronously)
350  * if our wanted caps set expands.
351  */
352 int ceph_open(struct inode *inode, struct file *file)
353 {
354         struct ceph_inode_info *ci = ceph_inode(inode);
355         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
356         struct ceph_mds_client *mdsc = fsc->mdsc;
357         struct ceph_mds_request *req;
358         struct ceph_file_info *fi = file->private_data;
359         int err;
360         int flags, fmode, wanted;
361
362         if (fi) {
363                 dout("open file %p is already opened\n", file);
364                 return 0;
365         }
366
367         /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
368         flags = file->f_flags & ~(O_CREAT|O_EXCL);
369         if (S_ISDIR(inode->i_mode)) {
370                 flags = O_DIRECTORY;  /* mds likes to know */
371         } else if (S_ISREG(inode->i_mode)) {
372                 err = fscrypt_file_open(inode, file);
373                 if (err)
374                         return err;
375         }
376
377         dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
378              ceph_vinop(inode), file, flags, file->f_flags);
379         fmode = ceph_flags_to_mode(flags);
380         wanted = ceph_caps_for_mode(fmode);
381
382         /* snapped files are read-only */
383         if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
384                 return -EROFS;
385
386         /* trivially open snapdir */
387         if (ceph_snap(inode) == CEPH_SNAPDIR) {
388                 return ceph_init_file(inode, file, fmode);
389         }
390
391         /*
392          * No need to block if we have caps on the auth MDS (for
393          * write) or any MDS (for read).  Update wanted set
394          * asynchronously.
395          */
396         spin_lock(&ci->i_ceph_lock);
397         if (__ceph_is_any_real_caps(ci) &&
398             (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
399                 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
400                 int issued = __ceph_caps_issued(ci, NULL);
401
402                 dout("open %p fmode %d want %s issued %s using existing\n",
403                      inode, fmode, ceph_cap_string(wanted),
404                      ceph_cap_string(issued));
405                 __ceph_touch_fmode(ci, mdsc, fmode);
406                 spin_unlock(&ci->i_ceph_lock);
407
408                 /* adjust wanted? */
409                 if ((issued & wanted) != wanted &&
410                     (mds_wanted & wanted) != wanted &&
411                     ceph_snap(inode) != CEPH_SNAPDIR)
412                         ceph_check_caps(ci, 0);
413
414                 return ceph_init_file(inode, file, fmode);
415         } else if (ceph_snap(inode) != CEPH_NOSNAP &&
416                    (ci->i_snap_caps & wanted) == wanted) {
417                 __ceph_touch_fmode(ci, mdsc, fmode);
418                 spin_unlock(&ci->i_ceph_lock);
419                 return ceph_init_file(inode, file, fmode);
420         }
421
422         spin_unlock(&ci->i_ceph_lock);
423
424         dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
425         req = prepare_open_request(inode->i_sb, flags, 0);
426         if (IS_ERR(req)) {
427                 err = PTR_ERR(req);
428                 goto out;
429         }
430         req->r_inode = inode;
431         ihold(inode);
432
433         req->r_num_caps = 1;
434         err = ceph_mdsc_do_request(mdsc, NULL, req);
435         if (!err)
436                 err = ceph_init_file(inode, file, req->r_fmode);
437         ceph_mdsc_put_request(req);
438         dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
439 out:
440         return err;
441 }
442
443 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
444 static void
445 cache_file_layout(struct inode *dst, struct inode *src)
446 {
447         struct ceph_inode_info *cdst = ceph_inode(dst);
448         struct ceph_inode_info *csrc = ceph_inode(src);
449
450         spin_lock(&cdst->i_ceph_lock);
451         if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
452             !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
453                 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
454                         sizeof(cdst->i_cached_layout));
455                 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
456                                    ceph_try_get_string(csrc->i_layout.pool_ns));
457         }
458         spin_unlock(&cdst->i_ceph_lock);
459 }
460
461 /*
462  * Try to set up an async create. We need caps, a file layout, and inode number,
463  * and either a lease on the dentry or complete dir info. If any of those
464  * criteria are not satisfied, then return false and the caller can go
465  * synchronous.
466  */
467 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
468                                  struct ceph_file_layout *lo, u64 *pino)
469 {
470         struct ceph_inode_info *ci = ceph_inode(dir);
471         struct ceph_dentry_info *di = ceph_dentry(dentry);
472         int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
473         u64 ino;
474
475         spin_lock(&ci->i_ceph_lock);
476         /* No auth cap means no chance for Dc caps */
477         if (!ci->i_auth_cap)
478                 goto no_async;
479
480         /* Any delegated inos? */
481         if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
482                 goto no_async;
483
484         if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
485                 goto no_async;
486
487         if ((__ceph_caps_issued(ci, NULL) & want) != want)
488                 goto no_async;
489
490         if (d_in_lookup(dentry)) {
491                 if (!__ceph_dir_is_complete(ci))
492                         goto no_async;
493                 spin_lock(&dentry->d_lock);
494                 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
495                 spin_unlock(&dentry->d_lock);
496         } else if (atomic_read(&ci->i_shared_gen) !=
497                    READ_ONCE(di->lease_shared_gen)) {
498                 goto no_async;
499         }
500
501         ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
502         if (!ino)
503                 goto no_async;
504
505         *pino = ino;
506         ceph_take_cap_refs(ci, want, false);
507         memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
508         rcu_assign_pointer(lo->pool_ns,
509                            ceph_try_get_string(ci->i_cached_layout.pool_ns));
510         got = want;
511 no_async:
512         spin_unlock(&ci->i_ceph_lock);
513         return got;
514 }
515
516 static void restore_deleg_ino(struct inode *dir, u64 ino)
517 {
518         struct ceph_inode_info *ci = ceph_inode(dir);
519         struct ceph_mds_session *s = NULL;
520
521         spin_lock(&ci->i_ceph_lock);
522         if (ci->i_auth_cap)
523                 s = ceph_get_mds_session(ci->i_auth_cap->session);
524         spin_unlock(&ci->i_ceph_lock);
525         if (s) {
526                 int err = ceph_restore_deleg_ino(s, ino);
527                 if (err)
528                         pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
529                                 ino, err);
530                 ceph_put_mds_session(s);
531         }
532 }
533
534 static void wake_async_create_waiters(struct inode *inode,
535                                       struct ceph_mds_session *session)
536 {
537         struct ceph_inode_info *ci = ceph_inode(inode);
538         bool check_cap = false;
539
540         spin_lock(&ci->i_ceph_lock);
541         if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
542                 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
543                 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
544
545                 if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
546                         ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
547                         check_cap = true;
548                 }
549         }
550         ceph_kick_flushing_inode_caps(session, ci);
551         spin_unlock(&ci->i_ceph_lock);
552
553         if (check_cap)
554                 ceph_check_caps(ci, CHECK_CAPS_FLUSH);
555 }
556
557 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
558                                  struct ceph_mds_request *req)
559 {
560         struct dentry *dentry = req->r_dentry;
561         struct inode *dinode = d_inode(dentry);
562         struct inode *tinode = req->r_target_inode;
563         int result = req->r_err ? req->r_err :
564                         le32_to_cpu(req->r_reply_info.head->result);
565
566         WARN_ON_ONCE(dinode && tinode && dinode != tinode);
567
568         /* MDS changed -- caller must resubmit */
569         if (result == -EJUKEBOX)
570                 goto out;
571
572         mapping_set_error(req->r_parent->i_mapping, result);
573
574         if (result) {
575                 int pathlen = 0;
576                 u64 base = 0;
577                 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
578                                                   &base, 0);
579
580                 pr_warn("async create failure path=(%llx)%s result=%d!\n",
581                         base, IS_ERR(path) ? "<<bad>>" : path, result);
582                 ceph_mdsc_free_path(path, pathlen);
583
584                 ceph_dir_clear_complete(req->r_parent);
585                 if (!d_unhashed(dentry))
586                         d_drop(dentry);
587
588                 if (dinode) {
589                         mapping_set_error(dinode->i_mapping, result);
590                         ceph_inode_shutdown(dinode);
591                         wake_async_create_waiters(dinode, req->r_session);
592                 }
593         }
594
595         if (tinode) {
596                 u64 ino = ceph_vino(tinode).ino;
597
598                 if (req->r_deleg_ino != ino)
599                         pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
600                                 __func__, req->r_err, req->r_deleg_ino, ino);
601
602                 mapping_set_error(tinode->i_mapping, result);
603                 wake_async_create_waiters(tinode, req->r_session);
604         } else if (!result) {
605                 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
606                         req->r_deleg_ino);
607         }
608 out:
609         ceph_mdsc_release_dir_caps(req);
610 }
611
612 static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
613                                     struct dentry *dentry,
614                                     struct file *file, umode_t mode,
615                                     struct ceph_mds_request *req,
616                                     struct ceph_acl_sec_ctx *as_ctx,
617                                     struct ceph_file_layout *lo)
618 {
619         int ret;
620         char xattr_buf[4];
621         struct ceph_mds_reply_inode in = { };
622         struct ceph_mds_reply_info_in iinfo = { .in = &in };
623         struct ceph_inode_info *ci = ceph_inode(dir);
624         struct ceph_dentry_info *di = ceph_dentry(dentry);
625         struct timespec64 now;
626         struct ceph_string *pool_ns;
627         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
628         struct ceph_vino vino = { .ino = req->r_deleg_ino,
629                                   .snap = CEPH_NOSNAP };
630
631         ktime_get_real_ts64(&now);
632
633         iinfo.inline_version = CEPH_INLINE_NONE;
634         iinfo.change_attr = 1;
635         ceph_encode_timespec64(&iinfo.btime, &now);
636
637         if (req->r_pagelist) {
638                 iinfo.xattr_len = req->r_pagelist->length;
639                 iinfo.xattr_data = req->r_pagelist->mapped_tail;
640         } else {
641                 /* fake it */
642                 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
643                 iinfo.xattr_data = xattr_buf;
644                 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
645         }
646
647         in.ino = cpu_to_le64(vino.ino);
648         in.snapid = cpu_to_le64(CEPH_NOSNAP);
649         in.version = cpu_to_le64(1);    // ???
650         in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
651         in.cap.cap_id = cpu_to_le64(1);
652         in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
653         in.cap.flags = CEPH_CAP_FLAG_AUTH;
654         in.ctime = in.mtime = in.atime = iinfo.btime;
655         in.truncate_seq = cpu_to_le32(1);
656         in.truncate_size = cpu_to_le64(-1ULL);
657         in.xattr_version = cpu_to_le64(1);
658         in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
659         if (dir->i_mode & S_ISGID) {
660                 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
661
662                 /* Directories always inherit the setgid bit. */
663                 if (S_ISDIR(mode))
664                         mode |= S_ISGID;
665         } else {
666                 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
667         }
668         in.mode = cpu_to_le32((u32)mode);
669
670         in.nlink = cpu_to_le32(1);
671         in.max_size = cpu_to_le64(lo->stripe_unit);
672
673         ceph_file_layout_to_legacy(lo, &in.layout);
674         /* lo is private, so pool_ns can't change */
675         pool_ns = rcu_dereference_raw(lo->pool_ns);
676         if (pool_ns) {
677                 iinfo.pool_ns_len = pool_ns->len;
678                 iinfo.pool_ns_data = pool_ns->str;
679         }
680
681         down_read(&mdsc->snap_rwsem);
682         ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
683                               req->r_fmode, NULL);
684         up_read(&mdsc->snap_rwsem);
685         if (ret) {
686                 dout("%s failed to fill inode: %d\n", __func__, ret);
687                 ceph_dir_clear_complete(dir);
688                 if (!d_unhashed(dentry))
689                         d_drop(dentry);
690                 discard_new_inode(inode);
691         } else {
692                 struct dentry *dn;
693
694                 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
695                         vino.ino, ceph_ino(dir), dentry->d_name.name);
696                 ceph_dir_clear_ordered(dir);
697                 ceph_init_inode_acls(inode, as_ctx);
698                 if (inode->i_state & I_NEW) {
699                         /*
700                          * If it's not I_NEW, then someone created this before
701                          * we got here. Assume the server is aware of it at
702                          * that point and don't worry about setting
703                          * CEPH_I_ASYNC_CREATE.
704                          */
705                         ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
706                         unlock_new_inode(inode);
707                 }
708                 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
709                         if (!d_unhashed(dentry))
710                                 d_drop(dentry);
711                         dn = d_splice_alias(inode, dentry);
712                         WARN_ON_ONCE(dn && dn != dentry);
713                 }
714                 file->f_mode |= FMODE_CREATED;
715                 ret = finish_open(file, dentry, ceph_open);
716         }
717
718         spin_lock(&dentry->d_lock);
719         di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
720         wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
721         spin_unlock(&dentry->d_lock);
722
723         return ret;
724 }
725
726 /*
727  * Do a lookup + open with a single request.  If we get a non-existent
728  * file or symlink, return 1 so the VFS can retry.
729  */
730 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
731                      struct file *file, unsigned flags, umode_t mode)
732 {
733         struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
734         struct ceph_mds_client *mdsc = fsc->mdsc;
735         struct ceph_mds_request *req;
736         struct inode *new_inode = NULL;
737         struct dentry *dn;
738         struct ceph_acl_sec_ctx as_ctx = {};
739         bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
740         int mask;
741         int err;
742
743         dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
744              dir, dentry, dentry,
745              d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
746
747         if (dentry->d_name.len > NAME_MAX)
748                 return -ENAMETOOLONG;
749
750         err = ceph_wait_on_conflict_unlink(dentry);
751         if (err)
752                 return err;
753         /*
754          * Do not truncate the file, since atomic_open is called before the
755          * permission check. The caller will do the truncation afterward.
756          */
757         flags &= ~O_TRUNC;
758
759 retry:
760         if (flags & O_CREAT) {
761                 if (ceph_quota_is_max_files_exceeded(dir))
762                         return -EDQUOT;
763
764                 new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
765                 if (IS_ERR(new_inode)) {
766                         err = PTR_ERR(new_inode);
767                         goto out_ctx;
768                 }
769                 /* Async create can't handle more than a page of xattrs */
770                 if (as_ctx.pagelist &&
771                     !list_is_singular(&as_ctx.pagelist->head))
772                         try_async = false;
773         } else if (!d_in_lookup(dentry)) {
774                 /* If it's not being looked up, it's negative */
775                 return -ENOENT;
776         }
777
778         /* do the open */
779         req = prepare_open_request(dir->i_sb, flags, mode);
780         if (IS_ERR(req)) {
781                 err = PTR_ERR(req);
782                 goto out_ctx;
783         }
784         req->r_dentry = dget(dentry);
785         req->r_num_caps = 2;
786         mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
787         if (ceph_security_xattr_wanted(dir))
788                 mask |= CEPH_CAP_XATTR_SHARED;
789         req->r_args.open.mask = cpu_to_le32(mask);
790         req->r_parent = dir;
791         ihold(dir);
792         if (IS_ENCRYPTED(dir)) {
793                 set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
794                 if (!fscrypt_has_encryption_key(dir)) {
795                         spin_lock(&dentry->d_lock);
796                         dentry->d_flags |= DCACHE_NOKEY_NAME;
797                         spin_unlock(&dentry->d_lock);
798                 }
799         }
800
801         if (flags & O_CREAT) {
802                 struct ceph_file_layout lo;
803
804                 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
805                                      CEPH_CAP_XATTR_EXCL;
806                 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
807
808                 ceph_as_ctx_to_req(req, &as_ctx);
809
810                 if (try_async && (req->r_dir_caps =
811                                   try_prep_async_create(dir, dentry, &lo,
812                                                         &req->r_deleg_ino))) {
813                         struct ceph_vino vino = { .ino = req->r_deleg_ino,
814                                                   .snap = CEPH_NOSNAP };
815                         struct ceph_dentry_info *di = ceph_dentry(dentry);
816
817                         set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
818                         req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
819                         req->r_callback = ceph_async_create_cb;
820
821                         /* Hash inode before RPC */
822                         new_inode = ceph_get_inode(dir->i_sb, vino, new_inode);
823                         if (IS_ERR(new_inode)) {
824                                 err = PTR_ERR(new_inode);
825                                 new_inode = NULL;
826                                 goto out_req;
827                         }
828                         WARN_ON_ONCE(!(new_inode->i_state & I_NEW));
829
830                         spin_lock(&dentry->d_lock);
831                         di->flags |= CEPH_DENTRY_ASYNC_CREATE;
832                         spin_unlock(&dentry->d_lock);
833
834                         err = ceph_mdsc_submit_request(mdsc, dir, req);
835                         if (!err) {
836                                 err = ceph_finish_async_create(dir, new_inode,
837                                                                dentry, file,
838                                                                mode, req,
839                                                                &as_ctx, &lo);
840                                 new_inode = NULL;
841                         } else if (err == -EJUKEBOX) {
842                                 restore_deleg_ino(dir, req->r_deleg_ino);
843                                 ceph_mdsc_put_request(req);
844                                 discard_new_inode(new_inode);
845                                 ceph_release_acl_sec_ctx(&as_ctx);
846                                 memset(&as_ctx, 0, sizeof(as_ctx));
847                                 new_inode = NULL;
848                                 try_async = false;
849                                 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
850                                 goto retry;
851                         }
852                         ceph_put_string(rcu_dereference_raw(lo.pool_ns));
853                         goto out_req;
854                 }
855         }
856
857         set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
858         req->r_new_inode = new_inode;
859         new_inode = NULL;
860         err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
861         if (err == -ENOENT) {
862                 dentry = ceph_handle_snapdir(req, dentry);
863                 if (IS_ERR(dentry)) {
864                         err = PTR_ERR(dentry);
865                         goto out_req;
866                 }
867                 err = 0;
868         }
869
870         if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
871                 err = ceph_handle_notrace_create(dir, dentry);
872
873         if (d_in_lookup(dentry)) {
874                 dn = ceph_finish_lookup(req, dentry, err);
875                 if (IS_ERR(dn))
876                         err = PTR_ERR(dn);
877         } else {
878                 /* we were given a hashed negative dentry */
879                 dn = NULL;
880         }
881         if (err)
882                 goto out_req;
883         if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
884                 /* make vfs retry on splice, ENOENT, or symlink */
885                 dout("atomic_open finish_no_open on dn %p\n", dn);
886                 err = finish_no_open(file, dn);
887         } else {
888                 if (IS_ENCRYPTED(dir) &&
889                     !fscrypt_has_permitted_context(dir, d_inode(dentry))) {
890                         pr_warn("Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
891                                 ceph_vinop(dir), ceph_vinop(d_inode(dentry)));
892                         goto out_req;
893                 }
894
895                 dout("atomic_open finish_open on dn %p\n", dn);
896                 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
897                         struct inode *newino = d_inode(dentry);
898
899                         cache_file_layout(dir, newino);
900                         ceph_init_inode_acls(newino, &as_ctx);
901                         file->f_mode |= FMODE_CREATED;
902                 }
903                 err = finish_open(file, dentry, ceph_open);
904         }
905 out_req:
906         ceph_mdsc_put_request(req);
907         iput(new_inode);
908 out_ctx:
909         ceph_release_acl_sec_ctx(&as_ctx);
910         dout("atomic_open result=%d\n", err);
911         return err;
912 }
913
914 int ceph_release(struct inode *inode, struct file *file)
915 {
916         struct ceph_inode_info *ci = ceph_inode(inode);
917
918         if (S_ISDIR(inode->i_mode)) {
919                 struct ceph_dir_file_info *dfi = file->private_data;
920                 dout("release inode %p dir file %p\n", inode, file);
921                 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
922
923                 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
924
925                 if (dfi->last_readdir)
926                         ceph_mdsc_put_request(dfi->last_readdir);
927                 kfree(dfi->last_name);
928                 kfree(dfi->dir_info);
929                 kmem_cache_free(ceph_dir_file_cachep, dfi);
930         } else {
931                 struct ceph_file_info *fi = file->private_data;
932                 dout("release inode %p regular file %p\n", inode, file);
933                 WARN_ON(!list_empty(&fi->rw_contexts));
934
935                 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
936                 ceph_put_fmode(ci, fi->fmode, 1);
937
938                 kmem_cache_free(ceph_file_cachep, fi);
939         }
940
941         /* wake up anyone waiting for caps on this inode */
942         wake_up_all(&ci->i_cap_wq);
943         return 0;
944 }
945
946 enum {
947         HAVE_RETRIED = 1,
948         CHECK_EOF =    2,
949         READ_INLINE =  3,
950 };
951
952 /*
953  * Completely synchronous read and write methods.  Direct from __user
954  * buffer to osd, or directly to user pages (if O_DIRECT).
955  *
956  * If the read spans object boundary, just do multiple reads.  (That's not
957  * atomic, but good enough for now.)
958  *
959  * If we get a short result from the OSD, check against i_size; we need to
960  * only return a short read to the caller if we hit EOF.
961  */
962 ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
963                          struct iov_iter *to, int *retry_op,
964                          u64 *last_objver)
965 {
966         struct ceph_inode_info *ci = ceph_inode(inode);
967         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
968         struct ceph_osd_client *osdc = &fsc->client->osdc;
969         ssize_t ret;
970         u64 off = *ki_pos;
971         u64 len = iov_iter_count(to);
972         u64 i_size = i_size_read(inode);
973         bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
974         u64 objver = 0;
975
976         dout("sync_read on inode %p %llx~%llx\n", inode, *ki_pos, len);
977
978         if (ceph_inode_is_shutdown(inode))
979                 return -EIO;
980
981         if (!len)
982                 return 0;
983         /*
984          * flush any page cache pages in this range.  this
985          * will make concurrent normal and sync io slow,
986          * but it will at least behave sensibly when they are
987          * in sequence.
988          */
989         ret = filemap_write_and_wait_range(inode->i_mapping,
990                                            off, off + len - 1);
991         if (ret < 0)
992                 return ret;
993
994         ret = 0;
995         while ((len = iov_iter_count(to)) > 0) {
996                 struct ceph_osd_request *req;
997                 struct page **pages;
998                 int num_pages;
999                 size_t page_off;
1000                 bool more;
1001                 int idx;
1002                 size_t left;
1003                 struct ceph_osd_req_op *op;
1004                 u64 read_off = off;
1005                 u64 read_len = len;
1006
1007                 /* determine new offset/length if encrypted */
1008                 ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len);
1009
1010                 dout("sync_read orig %llu~%llu reading %llu~%llu",
1011                      off, len, read_off, read_len);
1012
1013                 req = ceph_osdc_new_request(osdc, &ci->i_layout,
1014                                         ci->i_vino, read_off, &read_len, 0, 1,
1015                                         sparse ? CEPH_OSD_OP_SPARSE_READ :
1016                                                  CEPH_OSD_OP_READ,
1017                                         CEPH_OSD_FLAG_READ,
1018                                         NULL, ci->i_truncate_seq,
1019                                         ci->i_truncate_size, false);
1020                 if (IS_ERR(req)) {
1021                         ret = PTR_ERR(req);
1022                         break;
1023                 }
1024
1025                 /* adjust len downward if the request truncated the len */
1026                 if (off + len > read_off + read_len)
1027                         len = read_off + read_len - off;
1028                 more = len < iov_iter_count(to);
1029
1030                 num_pages = calc_pages_for(read_off, read_len);
1031                 page_off = offset_in_page(off);
1032                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1033                 if (IS_ERR(pages)) {
1034                         ceph_osdc_put_request(req);
1035                         ret = PTR_ERR(pages);
1036                         break;
1037                 }
1038
1039                 osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
1040                                                  offset_in_page(read_off),
1041                                                  false, false);
1042
1043                 op = &req->r_ops[0];
1044                 if (sparse) {
1045                         ret = ceph_alloc_sparse_ext_map(op);
1046                         if (ret) {
1047                                 ceph_osdc_put_request(req);
1048                                 break;
1049                         }
1050                 }
1051
1052                 ceph_osdc_start_request(osdc, req);
1053                 ret = ceph_osdc_wait_request(osdc, req);
1054
1055                 ceph_update_read_metrics(&fsc->mdsc->metric,
1056                                          req->r_start_latency,
1057                                          req->r_end_latency,
1058                                          read_len, ret);
1059
1060                 if (ret > 0)
1061                         objver = req->r_version;
1062
1063                 i_size = i_size_read(inode);
1064                 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
1065                      off, len, ret, i_size, (more ? " MORE" : ""));
1066
1067                 /* Fix it to go to end of extent map */
1068                 if (sparse && ret >= 0)
1069                         ret = ceph_sparse_ext_map_end(op);
1070                 else if (ret == -ENOENT)
1071                         ret = 0;
1072
1073                 if (ret > 0 && IS_ENCRYPTED(inode)) {
1074                         int fret;
1075
1076                         fret = ceph_fscrypt_decrypt_extents(inode, pages,
1077                                         read_off, op->extent.sparse_ext,
1078                                         op->extent.sparse_ext_cnt);
1079                         if (fret < 0) {
1080                                 ret = fret;
1081                                 ceph_osdc_put_request(req);
1082                                 break;
1083                         }
1084
1085                         /* account for any partial block at the beginning */
1086                         fret -= (off - read_off);
1087
1088                         /*
1089                          * Short read after big offset adjustment?
1090                          * Nothing is usable, just call it a zero
1091                          * len read.
1092                          */
1093                         fret = max(fret, 0);
1094
1095                         /* account for partial block at the end */
1096                         ret = min_t(ssize_t, fret, len);
1097                 }
1098
1099                 ceph_osdc_put_request(req);
1100
1101                 /* Short read but not EOF? Zero out the remainder. */
1102                 if (ret >= 0 && ret < len && (off + ret < i_size)) {
1103                         int zlen = min(len - ret, i_size - off - ret);
1104                         int zoff = page_off + ret;
1105
1106                         dout("sync_read zero gap %llu~%llu\n",
1107                                 off + ret, off + ret + zlen);
1108                         ceph_zero_page_vector_range(zoff, zlen, pages);
1109                         ret += zlen;
1110                 }
1111
1112                 idx = 0;
1113                 left = ret > 0 ? ret : 0;
1114                 while (left > 0) {
1115                         size_t plen, copied;
1116
1117                         plen = min_t(size_t, left, PAGE_SIZE - page_off);
1118                         SetPageUptodate(pages[idx]);
1119                         copied = copy_page_to_iter(pages[idx++],
1120                                                    page_off, plen, to);
1121                         off += copied;
1122                         left -= copied;
1123                         page_off = 0;
1124                         if (copied < plen) {
1125                                 ret = -EFAULT;
1126                                 break;
1127                         }
1128                 }
1129                 ceph_release_page_vector(pages, num_pages);
1130
1131                 if (ret < 0) {
1132                         if (ret == -EBLOCKLISTED)
1133                                 fsc->blocklisted = true;
1134                         break;
1135                 }
1136
1137                 if (off >= i_size || !more)
1138                         break;
1139         }
1140
1141         if (ret > 0) {
1142                 if (off > *ki_pos) {
1143                         if (off >= i_size) {
1144                                 *retry_op = CHECK_EOF;
1145                                 ret = i_size - *ki_pos;
1146                                 *ki_pos = i_size;
1147                         } else {
1148                                 ret = off - *ki_pos;
1149                                 *ki_pos = off;
1150                         }
1151                 }
1152
1153                 if (last_objver)
1154                         *last_objver = objver;
1155         }
1156         dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
1157         return ret;
1158 }
1159
1160 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
1161                               int *retry_op)
1162 {
1163         struct file *file = iocb->ki_filp;
1164         struct inode *inode = file_inode(file);
1165
1166         dout("sync_read on file %p %llx~%zx %s\n", file, iocb->ki_pos,
1167              iov_iter_count(to), (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
1168
1169         return __ceph_sync_read(inode, &iocb->ki_pos, to, retry_op, NULL);
1170 }
1171
1172 struct ceph_aio_request {
1173         struct kiocb *iocb;
1174         size_t total_len;
1175         bool write;
1176         bool should_dirty;
1177         int error;
1178         struct list_head osd_reqs;
1179         unsigned num_reqs;
1180         atomic_t pending_reqs;
1181         struct timespec64 mtime;
1182         struct ceph_cap_flush *prealloc_cf;
1183 };
1184
1185 struct ceph_aio_work {
1186         struct work_struct work;
1187         struct ceph_osd_request *req;
1188 };
1189
1190 static void ceph_aio_retry_work(struct work_struct *work);
1191
1192 static void ceph_aio_complete(struct inode *inode,
1193                               struct ceph_aio_request *aio_req)
1194 {
1195         struct ceph_inode_info *ci = ceph_inode(inode);
1196         int ret;
1197
1198         if (!atomic_dec_and_test(&aio_req->pending_reqs))
1199                 return;
1200
1201         if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1202                 inode_dio_end(inode);
1203
1204         ret = aio_req->error;
1205         if (!ret)
1206                 ret = aio_req->total_len;
1207
1208         dout("ceph_aio_complete %p rc %d\n", inode, ret);
1209
1210         if (ret >= 0 && aio_req->write) {
1211                 int dirty;
1212
1213                 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1214                 if (endoff > i_size_read(inode)) {
1215                         if (ceph_inode_set_size(inode, endoff))
1216                                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
1217                 }
1218
1219                 spin_lock(&ci->i_ceph_lock);
1220                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1221                                                &aio_req->prealloc_cf);
1222                 spin_unlock(&ci->i_ceph_lock);
1223                 if (dirty)
1224                         __mark_inode_dirty(inode, dirty);
1225
1226         }
1227
1228         ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1229                                                 CEPH_CAP_FILE_RD));
1230
1231         aio_req->iocb->ki_complete(aio_req->iocb, ret);
1232
1233         ceph_free_cap_flush(aio_req->prealloc_cf);
1234         kfree(aio_req);
1235 }
1236
1237 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1238 {
1239         int rc = req->r_result;
1240         struct inode *inode = req->r_inode;
1241         struct ceph_aio_request *aio_req = req->r_priv;
1242         struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1243         struct ceph_osd_req_op *op = &req->r_ops[0];
1244         struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1245         unsigned int len = osd_data->bvec_pos.iter.bi_size;
1246         bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
1247
1248         BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1249         BUG_ON(!osd_data->num_bvecs);
1250
1251         dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1252
1253         if (rc == -EOLDSNAPC) {
1254                 struct ceph_aio_work *aio_work;
1255                 BUG_ON(!aio_req->write);
1256
1257                 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1258                 if (aio_work) {
1259                         INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1260                         aio_work->req = req;
1261                         queue_work(ceph_inode_to_client(inode)->inode_wq,
1262                                    &aio_work->work);
1263                         return;
1264                 }
1265                 rc = -ENOMEM;
1266         } else if (!aio_req->write) {
1267                 if (sparse && rc >= 0)
1268                         rc = ceph_sparse_ext_map_end(op);
1269                 if (rc == -ENOENT)
1270                         rc = 0;
1271                 if (rc >= 0 && len > rc) {
1272                         struct iov_iter i;
1273                         int zlen = len - rc;
1274
1275                         /*
1276                          * If read is satisfied by single OSD request,
1277                          * it can pass EOF. Otherwise read is within
1278                          * i_size.
1279                          */
1280                         if (aio_req->num_reqs == 1) {
1281                                 loff_t i_size = i_size_read(inode);
1282                                 loff_t endoff = aio_req->iocb->ki_pos + rc;
1283                                 if (endoff < i_size)
1284                                         zlen = min_t(size_t, zlen,
1285                                                      i_size - endoff);
1286                                 aio_req->total_len = rc + zlen;
1287                         }
1288
1289                         iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs,
1290                                       osd_data->num_bvecs, len);
1291                         iov_iter_advance(&i, rc);
1292                         iov_iter_zero(zlen, &i);
1293                 }
1294         }
1295
1296         /* r_start_latency == 0 means the request was not submitted */
1297         if (req->r_start_latency) {
1298                 if (aio_req->write)
1299                         ceph_update_write_metrics(metric, req->r_start_latency,
1300                                                   req->r_end_latency, len, rc);
1301                 else
1302                         ceph_update_read_metrics(metric, req->r_start_latency,
1303                                                  req->r_end_latency, len, rc);
1304         }
1305
1306         put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1307                   aio_req->should_dirty);
1308         ceph_osdc_put_request(req);
1309
1310         if (rc < 0)
1311                 cmpxchg(&aio_req->error, 0, rc);
1312
1313         ceph_aio_complete(inode, aio_req);
1314         return;
1315 }
1316
1317 static void ceph_aio_retry_work(struct work_struct *work)
1318 {
1319         struct ceph_aio_work *aio_work =
1320                 container_of(work, struct ceph_aio_work, work);
1321         struct ceph_osd_request *orig_req = aio_work->req;
1322         struct ceph_aio_request *aio_req = orig_req->r_priv;
1323         struct inode *inode = orig_req->r_inode;
1324         struct ceph_inode_info *ci = ceph_inode(inode);
1325         struct ceph_snap_context *snapc;
1326         struct ceph_osd_request *req;
1327         int ret;
1328
1329         spin_lock(&ci->i_ceph_lock);
1330         if (__ceph_have_pending_cap_snap(ci)) {
1331                 struct ceph_cap_snap *capsnap =
1332                         list_last_entry(&ci->i_cap_snaps,
1333                                         struct ceph_cap_snap,
1334                                         ci_item);
1335                 snapc = ceph_get_snap_context(capsnap->context);
1336         } else {
1337                 BUG_ON(!ci->i_head_snapc);
1338                 snapc = ceph_get_snap_context(ci->i_head_snapc);
1339         }
1340         spin_unlock(&ci->i_ceph_lock);
1341
1342         req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1343                         false, GFP_NOFS);
1344         if (!req) {
1345                 ret = -ENOMEM;
1346                 req = orig_req;
1347                 goto out;
1348         }
1349
1350         req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1351         ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1352         ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1353
1354         req->r_ops[0] = orig_req->r_ops[0];
1355
1356         req->r_mtime = aio_req->mtime;
1357         req->r_data_offset = req->r_ops[0].extent.offset;
1358
1359         ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1360         if (ret) {
1361                 ceph_osdc_put_request(req);
1362                 req = orig_req;
1363                 goto out;
1364         }
1365
1366         ceph_osdc_put_request(orig_req);
1367
1368         req->r_callback = ceph_aio_complete_req;
1369         req->r_inode = inode;
1370         req->r_priv = aio_req;
1371
1372         ceph_osdc_start_request(req->r_osdc, req);
1373 out:
1374         if (ret < 0) {
1375                 req->r_result = ret;
1376                 ceph_aio_complete_req(req);
1377         }
1378
1379         ceph_put_snap_context(snapc);
1380         kfree(aio_work);
1381 }
1382
1383 static ssize_t
1384 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1385                        struct ceph_snap_context *snapc,
1386                        struct ceph_cap_flush **pcf)
1387 {
1388         struct file *file = iocb->ki_filp;
1389         struct inode *inode = file_inode(file);
1390         struct ceph_inode_info *ci = ceph_inode(inode);
1391         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1392         struct ceph_client_metric *metric = &fsc->mdsc->metric;
1393         struct ceph_vino vino;
1394         struct ceph_osd_request *req;
1395         struct bio_vec *bvecs;
1396         struct ceph_aio_request *aio_req = NULL;
1397         int num_pages = 0;
1398         int flags;
1399         int ret = 0;
1400         struct timespec64 mtime = current_time(inode);
1401         size_t count = iov_iter_count(iter);
1402         loff_t pos = iocb->ki_pos;
1403         bool write = iov_iter_rw(iter) == WRITE;
1404         bool should_dirty = !write && user_backed_iter(iter);
1405         bool sparse = ceph_test_mount_opt(fsc, SPARSEREAD);
1406
1407         if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1408                 return -EROFS;
1409
1410         dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1411              (write ? "write" : "read"), file, pos, (unsigned)count,
1412              snapc, snapc ? snapc->seq : 0);
1413
1414         if (write) {
1415                 int ret2;
1416
1417                 ceph_fscache_invalidate(inode, true);
1418
1419                 ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1420                                         pos >> PAGE_SHIFT,
1421                                         (pos + count - 1) >> PAGE_SHIFT);
1422                 if (ret2 < 0)
1423                         dout("invalidate_inode_pages2_range returned %d\n", ret2);
1424
1425                 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1426         } else {
1427                 flags = CEPH_OSD_FLAG_READ;
1428         }
1429
1430         while (iov_iter_count(iter) > 0) {
1431                 u64 size = iov_iter_count(iter);
1432                 ssize_t len;
1433                 struct ceph_osd_req_op *op;
1434                 int readop = sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ;
1435
1436                 if (write)
1437                         size = min_t(u64, size, fsc->mount_options->wsize);
1438                 else
1439                         size = min_t(u64, size, fsc->mount_options->rsize);
1440
1441                 vino = ceph_vino(inode);
1442                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1443                                             vino, pos, &size, 0,
1444                                             1,
1445                                             write ? CEPH_OSD_OP_WRITE : readop,
1446                                             flags, snapc,
1447                                             ci->i_truncate_seq,
1448                                             ci->i_truncate_size,
1449                                             false);
1450                 if (IS_ERR(req)) {
1451                         ret = PTR_ERR(req);
1452                         break;
1453                 }
1454
1455                 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1456                 if (len < 0) {
1457                         ceph_osdc_put_request(req);
1458                         ret = len;
1459                         break;
1460                 }
1461                 if (len != size)
1462                         osd_req_op_extent_update(req, 0, len);
1463
1464                 /*
1465                  * To simplify error handling, allow AIO when IO within i_size
1466                  * or IO can be satisfied by single OSD request.
1467                  */
1468                 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1469                     (len == count || pos + count <= i_size_read(inode))) {
1470                         aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1471                         if (aio_req) {
1472                                 aio_req->iocb = iocb;
1473                                 aio_req->write = write;
1474                                 aio_req->should_dirty = should_dirty;
1475                                 INIT_LIST_HEAD(&aio_req->osd_reqs);
1476                                 if (write) {
1477                                         aio_req->mtime = mtime;
1478                                         swap(aio_req->prealloc_cf, *pcf);
1479                                 }
1480                         }
1481                         /* ignore error */
1482                 }
1483
1484                 if (write) {
1485                         /*
1486                          * throw out any page cache pages in this range. this
1487                          * may block.
1488                          */
1489                         truncate_inode_pages_range(inode->i_mapping, pos,
1490                                                    PAGE_ALIGN(pos + len) - 1);
1491
1492                         req->r_mtime = mtime;
1493                 }
1494
1495                 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1496                 op = &req->r_ops[0];
1497                 if (sparse) {
1498                         ret = ceph_alloc_sparse_ext_map(op);
1499                         if (ret) {
1500                                 ceph_osdc_put_request(req);
1501                                 break;
1502                         }
1503                 }
1504
1505                 if (aio_req) {
1506                         aio_req->total_len += len;
1507                         aio_req->num_reqs++;
1508                         atomic_inc(&aio_req->pending_reqs);
1509
1510                         req->r_callback = ceph_aio_complete_req;
1511                         req->r_inode = inode;
1512                         req->r_priv = aio_req;
1513                         list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1514
1515                         pos += len;
1516                         continue;
1517                 }
1518
1519                 ceph_osdc_start_request(req->r_osdc, req);
1520                 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1521
1522                 if (write)
1523                         ceph_update_write_metrics(metric, req->r_start_latency,
1524                                                   req->r_end_latency, len, ret);
1525                 else
1526                         ceph_update_read_metrics(metric, req->r_start_latency,
1527                                                  req->r_end_latency, len, ret);
1528
1529                 size = i_size_read(inode);
1530                 if (!write) {
1531                         if (sparse && ret >= 0)
1532                                 ret = ceph_sparse_ext_map_end(op);
1533                         else if (ret == -ENOENT)
1534                                 ret = 0;
1535
1536                         if (ret >= 0 && ret < len && pos + ret < size) {
1537                                 struct iov_iter i;
1538                                 int zlen = min_t(size_t, len - ret,
1539                                                  size - pos - ret);
1540
1541                                 iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len);
1542                                 iov_iter_advance(&i, ret);
1543                                 iov_iter_zero(zlen, &i);
1544                                 ret += zlen;
1545                         }
1546                         if (ret >= 0)
1547                                 len = ret;
1548                 }
1549
1550                 put_bvecs(bvecs, num_pages, should_dirty);
1551                 ceph_osdc_put_request(req);
1552                 if (ret < 0)
1553                         break;
1554
1555                 pos += len;
1556                 if (!write && pos >= size)
1557                         break;
1558
1559                 if (write && pos > size) {
1560                         if (ceph_inode_set_size(inode, pos))
1561                                 ceph_check_caps(ceph_inode(inode),
1562                                                 CHECK_CAPS_AUTHONLY);
1563                 }
1564         }
1565
1566         if (aio_req) {
1567                 LIST_HEAD(osd_reqs);
1568
1569                 if (aio_req->num_reqs == 0) {
1570                         kfree(aio_req);
1571                         return ret;
1572                 }
1573
1574                 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1575                                               CEPH_CAP_FILE_RD);
1576
1577                 list_splice(&aio_req->osd_reqs, &osd_reqs);
1578                 inode_dio_begin(inode);
1579                 while (!list_empty(&osd_reqs)) {
1580                         req = list_first_entry(&osd_reqs,
1581                                                struct ceph_osd_request,
1582                                                r_private_item);
1583                         list_del_init(&req->r_private_item);
1584                         if (ret >= 0)
1585                                 ceph_osdc_start_request(req->r_osdc, req);
1586                         if (ret < 0) {
1587                                 req->r_result = ret;
1588                                 ceph_aio_complete_req(req);
1589                         }
1590                 }
1591                 return -EIOCBQUEUED;
1592         }
1593
1594         if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1595                 ret = pos - iocb->ki_pos;
1596                 iocb->ki_pos = pos;
1597         }
1598         return ret;
1599 }
1600
1601 /*
1602  * Synchronous write, straight from __user pointer or user pages.
1603  *
1604  * If write spans object boundary, just do multiple writes.  (For a
1605  * correct atomic write, we should e.g. take write locks on all
1606  * objects, rollback on failure, etc.)
1607  */
1608 static ssize_t
1609 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1610                 struct ceph_snap_context *snapc)
1611 {
1612         struct file *file = iocb->ki_filp;
1613         struct inode *inode = file_inode(file);
1614         struct ceph_inode_info *ci = ceph_inode(inode);
1615         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1616         struct ceph_osd_client *osdc = &fsc->client->osdc;
1617         struct ceph_osd_request *req;
1618         struct page **pages;
1619         u64 len;
1620         int num_pages;
1621         int written = 0;
1622         int ret;
1623         bool check_caps = false;
1624         struct timespec64 mtime = current_time(inode);
1625         size_t count = iov_iter_count(from);
1626
1627         if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1628                 return -EROFS;
1629
1630         dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1631              file, pos, (unsigned)count, snapc, snapc->seq);
1632
1633         ret = filemap_write_and_wait_range(inode->i_mapping,
1634                                            pos, pos + count - 1);
1635         if (ret < 0)
1636                 return ret;
1637
1638         ceph_fscache_invalidate(inode, false);
1639
1640         while ((len = iov_iter_count(from)) > 0) {
1641                 size_t left;
1642                 int n;
1643                 u64 write_pos = pos;
1644                 u64 write_len = len;
1645                 u64 objnum, objoff;
1646                 u32 xlen;
1647                 u64 assert_ver = 0;
1648                 bool rmw;
1649                 bool first, last;
1650                 struct iov_iter saved_iter = *from;
1651                 size_t off;
1652
1653                 ceph_fscrypt_adjust_off_and_len(inode, &write_pos, &write_len);
1654
1655                 /* clamp the length to the end of first object */
1656                 ceph_calc_file_object_mapping(&ci->i_layout, write_pos,
1657                                               write_len, &objnum, &objoff,
1658                                               &xlen);
1659                 write_len = xlen;
1660
1661                 /* adjust len downward if it goes beyond current object */
1662                 if (pos + len > write_pos + write_len)
1663                         len = write_pos + write_len - pos;
1664
1665                 /*
1666                  * If we had to adjust the length or position to align with a
1667                  * crypto block, then we must do a read/modify/write cycle. We
1668                  * use a version assertion to redrive the thing if something
1669                  * changes in between.
1670                  */
1671                 first = pos != write_pos;
1672                 last = (pos + len) != (write_pos + write_len);
1673                 rmw = first || last;
1674
1675                 dout("sync_write ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
1676                      ci->i_vino.ino, pos, len, write_pos, write_len,
1677                      rmw ? "" : "no ");
1678
1679                 /*
1680                  * The data is emplaced into the page as it would be if it were
1681                  * in an array of pagecache pages.
1682                  */
1683                 num_pages = calc_pages_for(write_pos, write_len);
1684                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1685                 if (IS_ERR(pages)) {
1686                         ret = PTR_ERR(pages);
1687                         break;
1688                 }
1689
1690                 /* Do we need to preload the pages? */
1691                 if (rmw) {
1692                         u64 first_pos = write_pos;
1693                         u64 last_pos = (write_pos + write_len) - CEPH_FSCRYPT_BLOCK_SIZE;
1694                         u64 read_len = CEPH_FSCRYPT_BLOCK_SIZE;
1695                         struct ceph_osd_req_op *op;
1696
1697                         /* We should only need to do this for encrypted inodes */
1698                         WARN_ON_ONCE(!IS_ENCRYPTED(inode));
1699
1700                         /* No need to do two reads if first and last blocks are same */
1701                         if (first && last_pos == first_pos)
1702                                 last = false;
1703
1704                         /*
1705                          * Allocate a read request for one or two extents,
1706                          * depending on how the request was aligned.
1707                          */
1708                         req = ceph_osdc_new_request(osdc, &ci->i_layout,
1709                                         ci->i_vino, first ? first_pos : last_pos,
1710                                         &read_len, 0, (first && last) ? 2 : 1,
1711                                         CEPH_OSD_OP_SPARSE_READ, CEPH_OSD_FLAG_READ,
1712                                         NULL, ci->i_truncate_seq,
1713                                         ci->i_truncate_size, false);
1714                         if (IS_ERR(req)) {
1715                                 ceph_release_page_vector(pages, num_pages);
1716                                 ret = PTR_ERR(req);
1717                                 break;
1718                         }
1719
1720                         /* Something is misaligned! */
1721                         if (read_len != CEPH_FSCRYPT_BLOCK_SIZE) {
1722                                 ceph_osdc_put_request(req);
1723                                 ceph_release_page_vector(pages, num_pages);
1724                                 ret = -EIO;
1725                                 break;
1726                         }
1727
1728                         /* Add extent for first block? */
1729                         op = &req->r_ops[0];
1730
1731                         if (first) {
1732                                 osd_req_op_extent_osd_data_pages(req, 0, pages,
1733                                                          CEPH_FSCRYPT_BLOCK_SIZE,
1734                                                          offset_in_page(first_pos),
1735                                                          false, false);
1736                                 /* We only expect a single extent here */
1737                                 ret = __ceph_alloc_sparse_ext_map(op, 1);
1738                                 if (ret) {
1739                                         ceph_osdc_put_request(req);
1740                                         ceph_release_page_vector(pages, num_pages);
1741                                         break;
1742                                 }
1743                         }
1744
1745                         /* Add extent for last block */
1746                         if (last) {
1747                                 /* Init the other extent if first extent has been used */
1748                                 if (first) {
1749                                         op = &req->r_ops[1];
1750                                         osd_req_op_extent_init(req, 1,
1751                                                         CEPH_OSD_OP_SPARSE_READ,
1752                                                         last_pos, CEPH_FSCRYPT_BLOCK_SIZE,
1753                                                         ci->i_truncate_size,
1754                                                         ci->i_truncate_seq);
1755                                 }
1756
1757                                 ret = __ceph_alloc_sparse_ext_map(op, 1);
1758                                 if (ret) {
1759                                         ceph_osdc_put_request(req);
1760                                         ceph_release_page_vector(pages, num_pages);
1761                                         break;
1762                                 }
1763
1764                                 osd_req_op_extent_osd_data_pages(req, first ? 1 : 0,
1765                                                         &pages[num_pages - 1],
1766                                                         CEPH_FSCRYPT_BLOCK_SIZE,
1767                                                         offset_in_page(last_pos),
1768                                                         false, false);
1769                         }
1770
1771                         ceph_osdc_start_request(osdc, req);
1772                         ret = ceph_osdc_wait_request(osdc, req);
1773
1774                         /* FIXME: length field is wrong if there are 2 extents */
1775                         ceph_update_read_metrics(&fsc->mdsc->metric,
1776                                                  req->r_start_latency,
1777                                                  req->r_end_latency,
1778                                                  read_len, ret);
1779
1780                         /* Ok if object is not already present */
1781                         if (ret == -ENOENT) {
1782                                 /*
1783                                  * If there is no object, then we can't assert
1784                                  * on its version. Set it to 0, and we'll use an
1785                                  * exclusive create instead.
1786                                  */
1787                                 ceph_osdc_put_request(req);
1788                                 ret = 0;
1789
1790                                 /*
1791                                  * zero out the soon-to-be uncopied parts of the
1792                                  * first and last pages.
1793                                  */
1794                                 if (first)
1795                                         zero_user_segment(pages[0], 0,
1796                                                           offset_in_page(first_pos));
1797                                 if (last)
1798                                         zero_user_segment(pages[num_pages - 1],
1799                                                           offset_in_page(last_pos),
1800                                                           PAGE_SIZE);
1801                         } else {
1802                                 if (ret < 0) {
1803                                         ceph_osdc_put_request(req);
1804                                         ceph_release_page_vector(pages, num_pages);
1805                                         break;
1806                                 }
1807
1808                                 op = &req->r_ops[0];
1809                                 if (op->extent.sparse_ext_cnt == 0) {
1810                                         if (first)
1811                                                 zero_user_segment(pages[0], 0,
1812                                                                   offset_in_page(first_pos));
1813                                         else
1814                                                 zero_user_segment(pages[num_pages - 1],
1815                                                                   offset_in_page(last_pos),
1816                                                                   PAGE_SIZE);
1817                                 } else if (op->extent.sparse_ext_cnt != 1 ||
1818                                            ceph_sparse_ext_map_end(op) !=
1819                                                 CEPH_FSCRYPT_BLOCK_SIZE) {
1820                                         ret = -EIO;
1821                                         ceph_osdc_put_request(req);
1822                                         ceph_release_page_vector(pages, num_pages);
1823                                         break;
1824                                 }
1825
1826                                 if (first && last) {
1827                                         op = &req->r_ops[1];
1828                                         if (op->extent.sparse_ext_cnt == 0) {
1829                                                 zero_user_segment(pages[num_pages - 1],
1830                                                                   offset_in_page(last_pos),
1831                                                                   PAGE_SIZE);
1832                                         } else if (op->extent.sparse_ext_cnt != 1 ||
1833                                                    ceph_sparse_ext_map_end(op) !=
1834                                                         CEPH_FSCRYPT_BLOCK_SIZE) {
1835                                                 ret = -EIO;
1836                                                 ceph_osdc_put_request(req);
1837                                                 ceph_release_page_vector(pages, num_pages);
1838                                                 break;
1839                                         }
1840                                 }
1841
1842                                 /* Grab assert version. It must be non-zero. */
1843                                 assert_ver = req->r_version;
1844                                 WARN_ON_ONCE(ret > 0 && assert_ver == 0);
1845
1846                                 ceph_osdc_put_request(req);
1847                                 if (first) {
1848                                         ret = ceph_fscrypt_decrypt_block_inplace(inode,
1849                                                         pages[0], CEPH_FSCRYPT_BLOCK_SIZE,
1850                                                         offset_in_page(first_pos),
1851                                                         first_pos >> CEPH_FSCRYPT_BLOCK_SHIFT);
1852                                         if (ret < 0) {
1853                                                 ceph_release_page_vector(pages, num_pages);
1854                                                 break;
1855                                         }
1856                                 }
1857                                 if (last) {
1858                                         ret = ceph_fscrypt_decrypt_block_inplace(inode,
1859                                                         pages[num_pages - 1],
1860                                                         CEPH_FSCRYPT_BLOCK_SIZE,
1861                                                         offset_in_page(last_pos),
1862                                                         last_pos >> CEPH_FSCRYPT_BLOCK_SHIFT);
1863                                         if (ret < 0) {
1864                                                 ceph_release_page_vector(pages, num_pages);
1865                                                 break;
1866                                         }
1867                                 }
1868                         }
1869                 }
1870
1871                 left = len;
1872                 off = offset_in_page(pos);
1873                 for (n = 0; n < num_pages; n++) {
1874                         size_t plen = min_t(size_t, left, PAGE_SIZE - off);
1875
1876                         /* copy the data */
1877                         ret = copy_page_from_iter(pages[n], off, plen, from);
1878                         if (ret != plen) {
1879                                 ret = -EFAULT;
1880                                 break;
1881                         }
1882                         off = 0;
1883                         left -= ret;
1884                 }
1885                 if (ret < 0) {
1886                         dout("sync_write write failed with %d\n", ret);
1887                         ceph_release_page_vector(pages, num_pages);
1888                         break;
1889                 }
1890
1891                 if (IS_ENCRYPTED(inode)) {
1892                         ret = ceph_fscrypt_encrypt_pages(inode, pages,
1893                                                          write_pos, write_len,
1894                                                          GFP_KERNEL);
1895                         if (ret < 0) {
1896                                 dout("encryption failed with %d\n", ret);
1897                                 ceph_release_page_vector(pages, num_pages);
1898                                 break;
1899                         }
1900                 }
1901
1902                 req = ceph_osdc_new_request(osdc, &ci->i_layout,
1903                                             ci->i_vino, write_pos, &write_len,
1904                                             rmw ? 1 : 0, rmw ? 2 : 1,
1905                                             CEPH_OSD_OP_WRITE,
1906                                             CEPH_OSD_FLAG_WRITE,
1907                                             snapc, ci->i_truncate_seq,
1908                                             ci->i_truncate_size, false);
1909                 if (IS_ERR(req)) {
1910                         ret = PTR_ERR(req);
1911                         ceph_release_page_vector(pages, num_pages);
1912                         break;
1913                 }
1914
1915                 dout("sync_write write op %lld~%llu\n", write_pos, write_len);
1916                 osd_req_op_extent_osd_data_pages(req, rmw ? 1 : 0, pages, write_len,
1917                                                  offset_in_page(write_pos), false,
1918                                                  true);
1919                 req->r_inode = inode;
1920                 req->r_mtime = mtime;
1921
1922                 /* Set up the assertion */
1923                 if (rmw) {
1924                         /*
1925                          * Set up the assertion. If we don't have a version
1926                          * number, then the object doesn't exist yet. Use an
1927                          * exclusive create instead of a version assertion in
1928                          * that case.
1929                          */
1930                         if (assert_ver) {
1931                                 osd_req_op_init(req, 0, CEPH_OSD_OP_ASSERT_VER, 0);
1932                                 req->r_ops[0].assert_ver.ver = assert_ver;
1933                         } else {
1934                                 osd_req_op_init(req, 0, CEPH_OSD_OP_CREATE,
1935                                                 CEPH_OSD_OP_FLAG_EXCL);
1936                         }
1937                 }
1938
1939                 ceph_osdc_start_request(osdc, req);
1940                 ret = ceph_osdc_wait_request(osdc, req);
1941
1942                 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1943                                           req->r_end_latency, len, ret);
1944                 ceph_osdc_put_request(req);
1945                 if (ret != 0) {
1946                         dout("sync_write osd write returned %d\n", ret);
1947                         /* Version changed! Must re-do the rmw cycle */
1948                         if ((assert_ver && (ret == -ERANGE || ret == -EOVERFLOW)) ||
1949                             (!assert_ver && ret == -EEXIST)) {
1950                                 /* We should only ever see this on a rmw */
1951                                 WARN_ON_ONCE(!rmw);
1952
1953                                 /* The version should never go backward */
1954                                 WARN_ON_ONCE(ret == -EOVERFLOW);
1955
1956                                 *from = saved_iter;
1957
1958                                 /* FIXME: limit number of times we loop? */
1959                                 continue;
1960                         }
1961                         ceph_set_error_write(ci);
1962                         break;
1963                 }
1964
1965                 ceph_clear_error_write(ci);
1966
1967                 /*
1968                  * We successfully wrote to a range of the file. Declare
1969                  * that region of the pagecache invalid.
1970                  */
1971                 ret = invalidate_inode_pages2_range(
1972                                 inode->i_mapping,
1973                                 pos >> PAGE_SHIFT,
1974                                 (pos + len - 1) >> PAGE_SHIFT);
1975                 if (ret < 0) {
1976                         dout("invalidate_inode_pages2_range returned %d\n",
1977                              ret);
1978                         ret = 0;
1979                 }
1980                 pos += len;
1981                 written += len;
1982                 dout("sync_write written %d\n", written);
1983                 if (pos > i_size_read(inode)) {
1984                         check_caps = ceph_inode_set_size(inode, pos);
1985                         if (check_caps)
1986                                 ceph_check_caps(ceph_inode(inode),
1987                                                 CHECK_CAPS_AUTHONLY);
1988                 }
1989
1990         }
1991
1992         if (ret != -EOLDSNAPC && written > 0) {
1993                 ret = written;
1994                 iocb->ki_pos = pos;
1995         }
1996         dout("sync_write returning %d\n", ret);
1997         return ret;
1998 }
1999
2000 /*
2001  * Wrap generic_file_aio_read with checks for cap bits on the inode.
2002  * Atomically grab references, so that those bits are not released
2003  * back to the MDS mid-read.
2004  *
2005  * Hmm, the sync read case isn't actually async... should it be?
2006  */
2007 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
2008 {
2009         struct file *filp = iocb->ki_filp;
2010         struct ceph_file_info *fi = filp->private_data;
2011         size_t len = iov_iter_count(to);
2012         struct inode *inode = file_inode(filp);
2013         struct ceph_inode_info *ci = ceph_inode(inode);
2014         bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
2015         ssize_t ret;
2016         int want = 0, got = 0;
2017         int retry_op = 0, read = 0;
2018
2019 again:
2020         dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
2021              inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
2022
2023         if (ceph_inode_is_shutdown(inode))
2024                 return -ESTALE;
2025
2026         if (direct_lock)
2027                 ceph_start_io_direct(inode);
2028         else
2029                 ceph_start_io_read(inode);
2030
2031         if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
2032                 want |= CEPH_CAP_FILE_CACHE;
2033         if (fi->fmode & CEPH_FILE_MODE_LAZY)
2034                 want |= CEPH_CAP_FILE_LAZYIO;
2035
2036         ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
2037         if (ret < 0) {
2038                 if (direct_lock)
2039                         ceph_end_io_direct(inode);
2040                 else
2041                         ceph_end_io_read(inode);
2042                 return ret;
2043         }
2044
2045         if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
2046             (iocb->ki_flags & IOCB_DIRECT) ||
2047             (fi->flags & CEPH_F_SYNC)) {
2048
2049                 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
2050                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
2051                      ceph_cap_string(got));
2052
2053                 if (!ceph_has_inline_data(ci)) {
2054                         if (!retry_op &&
2055                             (iocb->ki_flags & IOCB_DIRECT) &&
2056                             !IS_ENCRYPTED(inode)) {
2057                                 ret = ceph_direct_read_write(iocb, to,
2058                                                              NULL, NULL);
2059                                 if (ret >= 0 && ret < len)
2060                                         retry_op = CHECK_EOF;
2061                         } else {
2062                                 ret = ceph_sync_read(iocb, to, &retry_op);
2063                         }
2064                 } else {
2065                         retry_op = READ_INLINE;
2066                 }
2067         } else {
2068                 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
2069                 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
2070                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
2071                      ceph_cap_string(got));
2072                 ceph_add_rw_context(fi, &rw_ctx);
2073                 ret = generic_file_read_iter(iocb, to);
2074                 ceph_del_rw_context(fi, &rw_ctx);
2075         }
2076
2077         dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
2078              inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
2079         ceph_put_cap_refs(ci, got);
2080
2081         if (direct_lock)
2082                 ceph_end_io_direct(inode);
2083         else
2084                 ceph_end_io_read(inode);
2085
2086         if (retry_op > HAVE_RETRIED && ret >= 0) {
2087                 int statret;
2088                 struct page *page = NULL;
2089                 loff_t i_size;
2090                 if (retry_op == READ_INLINE) {
2091                         page = __page_cache_alloc(GFP_KERNEL);
2092                         if (!page)
2093                                 return -ENOMEM;
2094                 }
2095
2096                 statret = __ceph_do_getattr(inode, page,
2097                                             CEPH_STAT_CAP_INLINE_DATA, !!page);
2098                 if (statret < 0) {
2099                         if (page)
2100                                 __free_page(page);
2101                         if (statret == -ENODATA) {
2102                                 BUG_ON(retry_op != READ_INLINE);
2103                                 goto again;
2104                         }
2105                         return statret;
2106                 }
2107
2108                 i_size = i_size_read(inode);
2109                 if (retry_op == READ_INLINE) {
2110                         BUG_ON(ret > 0 || read > 0);
2111                         if (iocb->ki_pos < i_size &&
2112                             iocb->ki_pos < PAGE_SIZE) {
2113                                 loff_t end = min_t(loff_t, i_size,
2114                                                    iocb->ki_pos + len);
2115                                 end = min_t(loff_t, end, PAGE_SIZE);
2116                                 if (statret < end)
2117                                         zero_user_segment(page, statret, end);
2118                                 ret = copy_page_to_iter(page,
2119                                                 iocb->ki_pos & ~PAGE_MASK,
2120                                                 end - iocb->ki_pos, to);
2121                                 iocb->ki_pos += ret;
2122                                 read += ret;
2123                         }
2124                         if (iocb->ki_pos < i_size && read < len) {
2125                                 size_t zlen = min_t(size_t, len - read,
2126                                                     i_size - iocb->ki_pos);
2127                                 ret = iov_iter_zero(zlen, to);
2128                                 iocb->ki_pos += ret;
2129                                 read += ret;
2130                         }
2131                         __free_pages(page, 0);
2132                         return read;
2133                 }
2134
2135                 /* hit EOF or hole? */
2136                 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
2137                     ret < len) {
2138                         dout("sync_read hit hole, ppos %lld < size %lld"
2139                              ", reading more\n", iocb->ki_pos, i_size);
2140
2141                         read += ret;
2142                         len -= ret;
2143                         retry_op = HAVE_RETRIED;
2144                         goto again;
2145                 }
2146         }
2147
2148         if (ret >= 0)
2149                 ret += read;
2150
2151         return ret;
2152 }
2153
2154 /*
2155  * Wrap filemap_splice_read with checks for cap bits on the inode.
2156  * Atomically grab references, so that those bits are not released
2157  * back to the MDS mid-read.
2158  */
2159 static ssize_t ceph_splice_read(struct file *in, loff_t *ppos,
2160                                 struct pipe_inode_info *pipe,
2161                                 size_t len, unsigned int flags)
2162 {
2163         struct ceph_file_info *fi = in->private_data;
2164         struct inode *inode = file_inode(in);
2165         struct ceph_inode_info *ci = ceph_inode(inode);
2166         ssize_t ret;
2167         int want = 0, got = 0;
2168         CEPH_DEFINE_RW_CONTEXT(rw_ctx, 0);
2169
2170         dout("splice_read %p %llx.%llx %llu~%zu trying to get caps on %p\n",
2171              inode, ceph_vinop(inode), *ppos, len, inode);
2172
2173         if (ceph_inode_is_shutdown(inode))
2174                 return -ESTALE;
2175
2176         if (ceph_has_inline_data(ci) ||
2177             (fi->flags & CEPH_F_SYNC))
2178                 return copy_splice_read(in, ppos, pipe, len, flags);
2179
2180         ceph_start_io_read(inode);
2181
2182         want = CEPH_CAP_FILE_CACHE;
2183         if (fi->fmode & CEPH_FILE_MODE_LAZY)
2184                 want |= CEPH_CAP_FILE_LAZYIO;
2185
2186         ret = ceph_get_caps(in, CEPH_CAP_FILE_RD, want, -1, &got);
2187         if (ret < 0)
2188                 goto out_end;
2189
2190         if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) == 0) {
2191                 dout("splice_read/sync %p %llx.%llx %llu~%zu got cap refs on %s\n",
2192                      inode, ceph_vinop(inode), *ppos, len,
2193                      ceph_cap_string(got));
2194
2195                 ceph_put_cap_refs(ci, got);
2196                 ceph_end_io_read(inode);
2197                 return copy_splice_read(in, ppos, pipe, len, flags);
2198         }
2199
2200         dout("splice_read %p %llx.%llx %llu~%zu got cap refs on %s\n",
2201              inode, ceph_vinop(inode), *ppos, len, ceph_cap_string(got));
2202
2203         rw_ctx.caps = got;
2204         ceph_add_rw_context(fi, &rw_ctx);
2205         ret = filemap_splice_read(in, ppos, pipe, len, flags);
2206         ceph_del_rw_context(fi, &rw_ctx);
2207
2208         dout("splice_read %p %llx.%llx dropping cap refs on %s = %zd\n",
2209              inode, ceph_vinop(inode), ceph_cap_string(got), ret);
2210
2211         ceph_put_cap_refs(ci, got);
2212 out_end:
2213         ceph_end_io_read(inode);
2214         return ret;
2215 }
2216
2217 /*
2218  * Take cap references to avoid releasing caps to MDS mid-write.
2219  *
2220  * If we are synchronous, and write with an old snap context, the OSD
2221  * may return EOLDSNAPC.  In that case, retry the write.. _after_
2222  * dropping our cap refs and allowing the pending snap to logically
2223  * complete _before_ this write occurs.
2224  *
2225  * If we are near ENOSPC, write synchronously.
2226  */
2227 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
2228 {
2229         struct file *file = iocb->ki_filp;
2230         struct ceph_file_info *fi = file->private_data;
2231         struct inode *inode = file_inode(file);
2232         struct ceph_inode_info *ci = ceph_inode(inode);
2233         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2234         struct ceph_osd_client *osdc = &fsc->client->osdc;
2235         struct ceph_cap_flush *prealloc_cf;
2236         ssize_t count, written = 0;
2237         int err, want = 0, got;
2238         bool direct_lock = false;
2239         u32 map_flags;
2240         u64 pool_flags;
2241         loff_t pos;
2242         loff_t limit = max(i_size_read(inode), fsc->max_file_size);
2243
2244         if (ceph_inode_is_shutdown(inode))
2245                 return -ESTALE;
2246
2247         if (ceph_snap(inode) != CEPH_NOSNAP)
2248                 return -EROFS;
2249
2250         prealloc_cf = ceph_alloc_cap_flush();
2251         if (!prealloc_cf)
2252                 return -ENOMEM;
2253
2254         if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
2255                 direct_lock = true;
2256
2257 retry_snap:
2258         if (direct_lock)
2259                 ceph_start_io_direct(inode);
2260         else
2261                 ceph_start_io_write(inode);
2262
2263         if (iocb->ki_flags & IOCB_APPEND) {
2264                 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
2265                 if (err < 0)
2266                         goto out;
2267         }
2268
2269         err = generic_write_checks(iocb, from);
2270         if (err <= 0)
2271                 goto out;
2272
2273         pos = iocb->ki_pos;
2274         if (unlikely(pos >= limit)) {
2275                 err = -EFBIG;
2276                 goto out;
2277         } else {
2278                 iov_iter_truncate(from, limit - pos);
2279         }
2280
2281         count = iov_iter_count(from);
2282         if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
2283                 err = -EDQUOT;
2284                 goto out;
2285         }
2286
2287         down_read(&osdc->lock);
2288         map_flags = osdc->osdmap->flags;
2289         pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
2290         up_read(&osdc->lock);
2291         if ((map_flags & CEPH_OSDMAP_FULL) ||
2292             (pool_flags & CEPH_POOL_FLAG_FULL)) {
2293                 err = -ENOSPC;
2294                 goto out;
2295         }
2296
2297         err = file_remove_privs(file);
2298         if (err)
2299                 goto out;
2300
2301         dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
2302              inode, ceph_vinop(inode), pos, count, i_size_read(inode));
2303         if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
2304                 want |= CEPH_CAP_FILE_BUFFER;
2305         if (fi->fmode & CEPH_FILE_MODE_LAZY)
2306                 want |= CEPH_CAP_FILE_LAZYIO;
2307         got = 0;
2308         err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
2309         if (err < 0)
2310                 goto out;
2311
2312         err = file_update_time(file);
2313         if (err)
2314                 goto out_caps;
2315
2316         inode_inc_iversion_raw(inode);
2317
2318         dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
2319              inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
2320
2321         if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
2322             (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
2323             (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
2324                 struct ceph_snap_context *snapc;
2325                 struct iov_iter data;
2326
2327                 spin_lock(&ci->i_ceph_lock);
2328                 if (__ceph_have_pending_cap_snap(ci)) {
2329                         struct ceph_cap_snap *capsnap =
2330                                         list_last_entry(&ci->i_cap_snaps,
2331                                                         struct ceph_cap_snap,
2332                                                         ci_item);
2333                         snapc = ceph_get_snap_context(capsnap->context);
2334                 } else {
2335                         BUG_ON(!ci->i_head_snapc);
2336                         snapc = ceph_get_snap_context(ci->i_head_snapc);
2337                 }
2338                 spin_unlock(&ci->i_ceph_lock);
2339
2340                 /* we might need to revert back to that point */
2341                 data = *from;
2342                 if ((iocb->ki_flags & IOCB_DIRECT) && !IS_ENCRYPTED(inode))
2343                         written = ceph_direct_read_write(iocb, &data, snapc,
2344                                                          &prealloc_cf);
2345                 else
2346                         written = ceph_sync_write(iocb, &data, pos, snapc);
2347                 if (direct_lock)
2348                         ceph_end_io_direct(inode);
2349                 else
2350                         ceph_end_io_write(inode);
2351                 if (written > 0)
2352                         iov_iter_advance(from, written);
2353                 ceph_put_snap_context(snapc);
2354         } else {
2355                 /*
2356                  * No need to acquire the i_truncate_mutex. Because
2357                  * the MDS revokes Fwb caps before sending truncate
2358                  * message to us. We can't get Fwb cap while there
2359                  * are pending vmtruncate. So write and vmtruncate
2360                  * can not run at the same time
2361                  */
2362                 written = generic_perform_write(iocb, from);
2363                 ceph_end_io_write(inode);
2364         }
2365
2366         if (written >= 0) {
2367                 int dirty;
2368
2369                 spin_lock(&ci->i_ceph_lock);
2370                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2371                                                &prealloc_cf);
2372                 spin_unlock(&ci->i_ceph_lock);
2373                 if (dirty)
2374                         __mark_inode_dirty(inode, dirty);
2375                 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
2376                         ceph_check_caps(ci, CHECK_CAPS_FLUSH);
2377         }
2378
2379         dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
2380              inode, ceph_vinop(inode), pos, (unsigned)count,
2381              ceph_cap_string(got));
2382         ceph_put_cap_refs(ci, got);
2383
2384         if (written == -EOLDSNAPC) {
2385                 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
2386                      inode, ceph_vinop(inode), pos, (unsigned)count);
2387                 goto retry_snap;
2388         }
2389
2390         if (written >= 0) {
2391                 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
2392                     (pool_flags & CEPH_POOL_FLAG_NEARFULL))
2393                         iocb->ki_flags |= IOCB_DSYNC;
2394                 written = generic_write_sync(iocb, written);
2395         }
2396
2397         goto out_unlocked;
2398 out_caps:
2399         ceph_put_cap_refs(ci, got);
2400 out:
2401         if (direct_lock)
2402                 ceph_end_io_direct(inode);
2403         else
2404                 ceph_end_io_write(inode);
2405 out_unlocked:
2406         ceph_free_cap_flush(prealloc_cf);
2407         return written ? written : err;
2408 }
2409
2410 /*
2411  * llseek.  be sure to verify file size on SEEK_END.
2412  */
2413 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
2414 {
2415         if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
2416                 struct inode *inode = file_inode(file);
2417                 int ret;
2418
2419                 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
2420                 if (ret < 0)
2421                         return ret;
2422         }
2423         return generic_file_llseek(file, offset, whence);
2424 }
2425
2426 static inline void ceph_zero_partial_page(
2427         struct inode *inode, loff_t offset, unsigned size)
2428 {
2429         struct page *page;
2430         pgoff_t index = offset >> PAGE_SHIFT;
2431
2432         page = find_lock_page(inode->i_mapping, index);
2433         if (page) {
2434                 wait_on_page_writeback(page);
2435                 zero_user(page, offset & (PAGE_SIZE - 1), size);
2436                 unlock_page(page);
2437                 put_page(page);
2438         }
2439 }
2440
2441 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
2442                                       loff_t length)
2443 {
2444         loff_t nearly = round_up(offset, PAGE_SIZE);
2445         if (offset < nearly) {
2446                 loff_t size = nearly - offset;
2447                 if (length < size)
2448                         size = length;
2449                 ceph_zero_partial_page(inode, offset, size);
2450                 offset += size;
2451                 length -= size;
2452         }
2453         if (length >= PAGE_SIZE) {
2454                 loff_t size = round_down(length, PAGE_SIZE);
2455                 truncate_pagecache_range(inode, offset, offset + size - 1);
2456                 offset += size;
2457                 length -= size;
2458         }
2459         if (length)
2460                 ceph_zero_partial_page(inode, offset, length);
2461 }
2462
2463 static int ceph_zero_partial_object(struct inode *inode,
2464                                     loff_t offset, loff_t *length)
2465 {
2466         struct ceph_inode_info *ci = ceph_inode(inode);
2467         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2468         struct ceph_osd_request *req;
2469         int ret = 0;
2470         loff_t zero = 0;
2471         int op;
2472
2473         if (ceph_inode_is_shutdown(inode))
2474                 return -EIO;
2475
2476         if (!length) {
2477                 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2478                 length = &zero;
2479         } else {
2480                 op = CEPH_OSD_OP_ZERO;
2481         }
2482
2483         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2484                                         ceph_vino(inode),
2485                                         offset, length,
2486                                         0, 1, op,
2487                                         CEPH_OSD_FLAG_WRITE,
2488                                         NULL, 0, 0, false);
2489         if (IS_ERR(req)) {
2490                 ret = PTR_ERR(req);
2491                 goto out;
2492         }
2493
2494         req->r_mtime = inode->i_mtime;
2495         ceph_osdc_start_request(&fsc->client->osdc, req);
2496         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2497         if (ret == -ENOENT)
2498                 ret = 0;
2499         ceph_osdc_put_request(req);
2500
2501 out:
2502         return ret;
2503 }
2504
2505 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2506 {
2507         int ret = 0;
2508         struct ceph_inode_info *ci = ceph_inode(inode);
2509         s32 stripe_unit = ci->i_layout.stripe_unit;
2510         s32 stripe_count = ci->i_layout.stripe_count;
2511         s32 object_size = ci->i_layout.object_size;
2512         u64 object_set_size = object_size * stripe_count;
2513         u64 nearly, t;
2514
2515         /* round offset up to next period boundary */
2516         nearly = offset + object_set_size - 1;
2517         t = nearly;
2518         nearly -= do_div(t, object_set_size);
2519
2520         while (length && offset < nearly) {
2521                 loff_t size = length;
2522                 ret = ceph_zero_partial_object(inode, offset, &size);
2523                 if (ret < 0)
2524                         return ret;
2525                 offset += size;
2526                 length -= size;
2527         }
2528         while (length >= object_set_size) {
2529                 int i;
2530                 loff_t pos = offset;
2531                 for (i = 0; i < stripe_count; ++i) {
2532                         ret = ceph_zero_partial_object(inode, pos, NULL);
2533                         if (ret < 0)
2534                                 return ret;
2535                         pos += stripe_unit;
2536                 }
2537                 offset += object_set_size;
2538                 length -= object_set_size;
2539         }
2540         while (length) {
2541                 loff_t size = length;
2542                 ret = ceph_zero_partial_object(inode, offset, &size);
2543                 if (ret < 0)
2544                         return ret;
2545                 offset += size;
2546                 length -= size;
2547         }
2548         return ret;
2549 }
2550
2551 static long ceph_fallocate(struct file *file, int mode,
2552                                 loff_t offset, loff_t length)
2553 {
2554         struct ceph_file_info *fi = file->private_data;
2555         struct inode *inode = file_inode(file);
2556         struct ceph_inode_info *ci = ceph_inode(inode);
2557         struct ceph_cap_flush *prealloc_cf;
2558         int want, got = 0;
2559         int dirty;
2560         int ret = 0;
2561         loff_t endoff = 0;
2562         loff_t size;
2563
2564         dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__,
2565              inode, ceph_vinop(inode), mode, offset, length);
2566
2567         if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2568                 return -EOPNOTSUPP;
2569
2570         if (!S_ISREG(inode->i_mode))
2571                 return -EOPNOTSUPP;
2572
2573         if (IS_ENCRYPTED(inode))
2574                 return -EOPNOTSUPP;
2575
2576         prealloc_cf = ceph_alloc_cap_flush();
2577         if (!prealloc_cf)
2578                 return -ENOMEM;
2579
2580         inode_lock(inode);
2581
2582         if (ceph_snap(inode) != CEPH_NOSNAP) {
2583                 ret = -EROFS;
2584                 goto unlock;
2585         }
2586
2587         size = i_size_read(inode);
2588
2589         /* Are we punching a hole beyond EOF? */
2590         if (offset >= size)
2591                 goto unlock;
2592         if ((offset + length) > size)
2593                 length = size - offset;
2594
2595         if (fi->fmode & CEPH_FILE_MODE_LAZY)
2596                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2597         else
2598                 want = CEPH_CAP_FILE_BUFFER;
2599
2600         ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2601         if (ret < 0)
2602                 goto unlock;
2603
2604         ret = file_modified(file);
2605         if (ret)
2606                 goto put_caps;
2607
2608         filemap_invalidate_lock(inode->i_mapping);
2609         ceph_fscache_invalidate(inode, false);
2610         ceph_zero_pagecache_range(inode, offset, length);
2611         ret = ceph_zero_objects(inode, offset, length);
2612
2613         if (!ret) {
2614                 spin_lock(&ci->i_ceph_lock);
2615                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2616                                                &prealloc_cf);
2617                 spin_unlock(&ci->i_ceph_lock);
2618                 if (dirty)
2619                         __mark_inode_dirty(inode, dirty);
2620         }
2621         filemap_invalidate_unlock(inode->i_mapping);
2622
2623 put_caps:
2624         ceph_put_cap_refs(ci, got);
2625 unlock:
2626         inode_unlock(inode);
2627         ceph_free_cap_flush(prealloc_cf);
2628         return ret;
2629 }
2630
2631 /*
2632  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2633  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2634  * this fails; zero is returned on success.
2635  */
2636 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2637                           struct file *dst_filp,
2638                           loff_t dst_endoff, int *dst_got)
2639 {
2640         int ret = 0;
2641         bool retrying = false;
2642
2643 retry_caps:
2644         ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2645                             dst_endoff, dst_got);
2646         if (ret < 0)
2647                 return ret;
2648
2649         /*
2650          * Since we're already holding the FILE_WR capability for the dst file,
2651          * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2652          * retry dance instead to try to get both capabilities.
2653          */
2654         ret = ceph_try_get_caps(file_inode(src_filp),
2655                                 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2656                                 false, src_got);
2657         if (ret <= 0) {
2658                 /* Start by dropping dst_ci caps and getting src_ci caps */
2659                 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2660                 if (retrying) {
2661                         if (!ret)
2662                                 /* ceph_try_get_caps masks EAGAIN */
2663                                 ret = -EAGAIN;
2664                         return ret;
2665                 }
2666                 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2667                                     CEPH_CAP_FILE_SHARED, -1, src_got);
2668                 if (ret < 0)
2669                         return ret;
2670                 /*... drop src_ci caps too, and retry */
2671                 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2672                 retrying = true;
2673                 goto retry_caps;
2674         }
2675         return ret;
2676 }
2677
2678 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2679                            struct ceph_inode_info *dst_ci, int dst_got)
2680 {
2681         ceph_put_cap_refs(src_ci, src_got);
2682         ceph_put_cap_refs(dst_ci, dst_got);
2683 }
2684
2685 /*
2686  * This function does several size-related checks, returning an error if:
2687  *  - source file is smaller than off+len
2688  *  - destination file size is not OK (inode_newsize_ok())
2689  *  - max bytes quotas is exceeded
2690  */
2691 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2692                            loff_t src_off, loff_t dst_off, size_t len)
2693 {
2694         loff_t size, endoff;
2695
2696         size = i_size_read(src_inode);
2697         /*
2698          * Don't copy beyond source file EOF.  Instead of simply setting length
2699          * to (size - src_off), just drop to VFS default implementation, as the
2700          * local i_size may be stale due to other clients writing to the source
2701          * inode.
2702          */
2703         if (src_off + len > size) {
2704                 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2705                      src_off, len, size);
2706                 return -EOPNOTSUPP;
2707         }
2708         size = i_size_read(dst_inode);
2709
2710         endoff = dst_off + len;
2711         if (inode_newsize_ok(dst_inode, endoff))
2712                 return -EOPNOTSUPP;
2713
2714         if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2715                 return -EDQUOT;
2716
2717         return 0;
2718 }
2719
2720 static struct ceph_osd_request *
2721 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2722                             u64 src_snapid,
2723                             struct ceph_object_id *src_oid,
2724                             struct ceph_object_locator *src_oloc,
2725                             struct ceph_object_id *dst_oid,
2726                             struct ceph_object_locator *dst_oloc,
2727                             u32 truncate_seq, u64 truncate_size)
2728 {
2729         struct ceph_osd_request *req;
2730         int ret;
2731         u32 src_fadvise_flags =
2732                 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2733                 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2734         u32 dst_fadvise_flags =
2735                 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2736                 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2737
2738         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2739         if (!req)
2740                 return ERR_PTR(-ENOMEM);
2741
2742         req->r_flags = CEPH_OSD_FLAG_WRITE;
2743
2744         ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2745         ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2746
2747         ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2748                                         src_oid, src_oloc,
2749                                         src_fadvise_flags,
2750                                         dst_fadvise_flags,
2751                                         truncate_seq,
2752                                         truncate_size,
2753                                         CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2754         if (ret)
2755                 goto out;
2756
2757         ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2758         if (ret)
2759                 goto out;
2760
2761         return req;
2762
2763 out:
2764         ceph_osdc_put_request(req);
2765         return ERR_PTR(ret);
2766 }
2767
2768 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2769                                     struct ceph_inode_info *dst_ci, u64 *dst_off,
2770                                     struct ceph_fs_client *fsc,
2771                                     size_t len, unsigned int flags)
2772 {
2773         struct ceph_object_locator src_oloc, dst_oloc;
2774         struct ceph_object_id src_oid, dst_oid;
2775         struct ceph_osd_client *osdc;
2776         struct ceph_osd_request *req;
2777         size_t bytes = 0;
2778         u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2779         u32 src_objlen, dst_objlen;
2780         u32 object_size = src_ci->i_layout.object_size;
2781         int ret;
2782
2783         src_oloc.pool = src_ci->i_layout.pool_id;
2784         src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2785         dst_oloc.pool = dst_ci->i_layout.pool_id;
2786         dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2787         osdc = &fsc->client->osdc;
2788
2789         while (len >= object_size) {
2790                 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2791                                               object_size, &src_objnum,
2792                                               &src_objoff, &src_objlen);
2793                 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2794                                               object_size, &dst_objnum,
2795                                               &dst_objoff, &dst_objlen);
2796                 ceph_oid_init(&src_oid);
2797                 ceph_oid_printf(&src_oid, "%llx.%08llx",
2798                                 src_ci->i_vino.ino, src_objnum);
2799                 ceph_oid_init(&dst_oid);
2800                 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2801                                 dst_ci->i_vino.ino, dst_objnum);
2802                 /* Do an object remote copy */
2803                 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2804                                                   &src_oid, &src_oloc,
2805                                                   &dst_oid, &dst_oloc,
2806                                                   dst_ci->i_truncate_seq,
2807                                                   dst_ci->i_truncate_size);
2808                 if (IS_ERR(req))
2809                         ret = PTR_ERR(req);
2810                 else {
2811                         ceph_osdc_start_request(osdc, req);
2812                         ret = ceph_osdc_wait_request(osdc, req);
2813                         ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2814                                                      req->r_start_latency,
2815                                                      req->r_end_latency,
2816                                                      object_size, ret);
2817                         ceph_osdc_put_request(req);
2818                 }
2819                 if (ret) {
2820                         if (ret == -EOPNOTSUPP) {
2821                                 fsc->have_copy_from2 = false;
2822                                 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2823                         }
2824                         dout("ceph_osdc_copy_from returned %d\n", ret);
2825                         if (!bytes)
2826                                 bytes = ret;
2827                         goto out;
2828                 }
2829                 len -= object_size;
2830                 bytes += object_size;
2831                 *src_off += object_size;
2832                 *dst_off += object_size;
2833         }
2834
2835 out:
2836         ceph_oloc_destroy(&src_oloc);
2837         ceph_oloc_destroy(&dst_oloc);
2838         return bytes;
2839 }
2840
2841 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2842                                       struct file *dst_file, loff_t dst_off,
2843                                       size_t len, unsigned int flags)
2844 {
2845         struct inode *src_inode = file_inode(src_file);
2846         struct inode *dst_inode = file_inode(dst_file);
2847         struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2848         struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2849         struct ceph_cap_flush *prealloc_cf;
2850         struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2851         loff_t size;
2852         ssize_t ret = -EIO, bytes;
2853         u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2854         u32 src_objlen, dst_objlen;
2855         int src_got = 0, dst_got = 0, err, dirty;
2856
2857         if (src_inode->i_sb != dst_inode->i_sb) {
2858                 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2859
2860                 if (ceph_fsid_compare(&src_fsc->client->fsid,
2861                                       &dst_fsc->client->fsid)) {
2862                         dout("Copying files across clusters: src: %pU dst: %pU\n",
2863                              &src_fsc->client->fsid, &dst_fsc->client->fsid);
2864                         return -EXDEV;
2865                 }
2866         }
2867         if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2868                 return -EROFS;
2869
2870         /*
2871          * Some of the checks below will return -EOPNOTSUPP, which will force a
2872          * fallback to the default VFS copy_file_range implementation.  This is
2873          * desirable in several cases (for ex, the 'len' is smaller than the
2874          * size of the objects, or in cases where that would be more
2875          * efficient).
2876          */
2877
2878         if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2879                 return -EOPNOTSUPP;
2880
2881         if (!src_fsc->have_copy_from2)
2882                 return -EOPNOTSUPP;
2883
2884         /*
2885          * Striped file layouts require that we copy partial objects, but the
2886          * OSD copy-from operation only supports full-object copies.  Limit
2887          * this to non-striped file layouts for now.
2888          */
2889         if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2890             (src_ci->i_layout.stripe_count != 1) ||
2891             (dst_ci->i_layout.stripe_count != 1) ||
2892             (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2893                 dout("Invalid src/dst files layout\n");
2894                 return -EOPNOTSUPP;
2895         }
2896
2897         /* Every encrypted inode gets its own key, so we can't offload them */
2898         if (IS_ENCRYPTED(src_inode) || IS_ENCRYPTED(dst_inode))
2899                 return -EOPNOTSUPP;
2900
2901         if (len < src_ci->i_layout.object_size)
2902                 return -EOPNOTSUPP; /* no remote copy will be done */
2903
2904         prealloc_cf = ceph_alloc_cap_flush();
2905         if (!prealloc_cf)
2906                 return -ENOMEM;
2907
2908         /* Start by sync'ing the source and destination files */
2909         ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2910         if (ret < 0) {
2911                 dout("failed to write src file (%zd)\n", ret);
2912                 goto out;
2913         }
2914         ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2915         if (ret < 0) {
2916                 dout("failed to write dst file (%zd)\n", ret);
2917                 goto out;
2918         }
2919
2920         /*
2921          * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2922          * clients may have dirty data in their caches.  And OSDs know nothing
2923          * about caps, so they can't safely do the remote object copies.
2924          */
2925         err = get_rd_wr_caps(src_file, &src_got,
2926                              dst_file, (dst_off + len), &dst_got);
2927         if (err < 0) {
2928                 dout("get_rd_wr_caps returned %d\n", err);
2929                 ret = -EOPNOTSUPP;
2930                 goto out;
2931         }
2932
2933         ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2934         if (ret < 0)
2935                 goto out_caps;
2936
2937         /* Drop dst file cached pages */
2938         ceph_fscache_invalidate(dst_inode, false);
2939         ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2940                                             dst_off >> PAGE_SHIFT,
2941                                             (dst_off + len) >> PAGE_SHIFT);
2942         if (ret < 0) {
2943                 dout("Failed to invalidate inode pages (%zd)\n", ret);
2944                 ret = 0; /* XXX */
2945         }
2946         ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2947                                       src_ci->i_layout.object_size,
2948                                       &src_objnum, &src_objoff, &src_objlen);
2949         ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2950                                       dst_ci->i_layout.object_size,
2951                                       &dst_objnum, &dst_objoff, &dst_objlen);
2952         /* object-level offsets need to the same */
2953         if (src_objoff != dst_objoff) {
2954                 ret = -EOPNOTSUPP;
2955                 goto out_caps;
2956         }
2957
2958         /*
2959          * Do a manual copy if the object offset isn't object aligned.
2960          * 'src_objlen' contains the bytes left until the end of the object,
2961          * starting at the src_off
2962          */
2963         if (src_objoff) {
2964                 dout("Initial partial copy of %u bytes\n", src_objlen);
2965
2966                 /*
2967                  * we need to temporarily drop all caps as we'll be calling
2968                  * {read,write}_iter, which will get caps again.
2969                  */
2970                 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2971                 ret = do_splice_direct(src_file, &src_off, dst_file,
2972                                        &dst_off, src_objlen, flags);
2973                 /* Abort on short copies or on error */
2974                 if (ret < src_objlen) {
2975                         dout("Failed partial copy (%zd)\n", ret);
2976                         goto out;
2977                 }
2978                 len -= ret;
2979                 err = get_rd_wr_caps(src_file, &src_got,
2980                                      dst_file, (dst_off + len), &dst_got);
2981                 if (err < 0)
2982                         goto out;
2983                 err = is_file_size_ok(src_inode, dst_inode,
2984                                       src_off, dst_off, len);
2985                 if (err < 0)
2986                         goto out_caps;
2987         }
2988
2989         size = i_size_read(dst_inode);
2990         bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2991                                      src_fsc, len, flags);
2992         if (bytes <= 0) {
2993                 if (!ret)
2994                         ret = bytes;
2995                 goto out_caps;
2996         }
2997         dout("Copied %zu bytes out of %zu\n", bytes, len);
2998         len -= bytes;
2999         ret += bytes;
3000
3001         file_update_time(dst_file);
3002         inode_inc_iversion_raw(dst_inode);
3003
3004         if (dst_off > size) {
3005                 /* Let the MDS know about dst file size change */
3006                 if (ceph_inode_set_size(dst_inode, dst_off) ||
3007                     ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
3008                         ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH);
3009         }
3010         /* Mark Fw dirty */
3011         spin_lock(&dst_ci->i_ceph_lock);
3012         dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
3013         spin_unlock(&dst_ci->i_ceph_lock);
3014         if (dirty)
3015                 __mark_inode_dirty(dst_inode, dirty);
3016
3017 out_caps:
3018         put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
3019
3020         /*
3021          * Do the final manual copy if we still have some bytes left, unless
3022          * there were errors in remote object copies (len >= object_size).
3023          */
3024         if (len && (len < src_ci->i_layout.object_size)) {
3025                 dout("Final partial copy of %zu bytes\n", len);
3026                 bytes = do_splice_direct(src_file, &src_off, dst_file,
3027                                          &dst_off, len, flags);
3028                 if (bytes > 0)
3029                         ret += bytes;
3030                 else
3031                         dout("Failed partial copy (%zd)\n", bytes);
3032         }
3033
3034 out:
3035         ceph_free_cap_flush(prealloc_cf);
3036
3037         return ret;
3038 }
3039
3040 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
3041                                     struct file *dst_file, loff_t dst_off,
3042                                     size_t len, unsigned int flags)
3043 {
3044         ssize_t ret;
3045
3046         ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
3047                                      len, flags);
3048
3049         if (ret == -EOPNOTSUPP || ret == -EXDEV)
3050                 ret = generic_copy_file_range(src_file, src_off, dst_file,
3051                                               dst_off, len, flags);
3052         return ret;
3053 }
3054
3055 const struct file_operations ceph_file_fops = {
3056         .open = ceph_open,
3057         .release = ceph_release,
3058         .llseek = ceph_llseek,
3059         .read_iter = ceph_read_iter,
3060         .write_iter = ceph_write_iter,
3061         .mmap = ceph_mmap,
3062         .fsync = ceph_fsync,
3063         .lock = ceph_lock,
3064         .setlease = simple_nosetlease,
3065         .flock = ceph_flock,
3066         .splice_read = ceph_splice_read,
3067         .splice_write = iter_file_splice_write,
3068         .unlocked_ioctl = ceph_ioctl,
3069         .compat_ioctl = compat_ptr_ioctl,
3070         .fallocate      = ceph_fallocate,
3071         .copy_file_range = ceph_copy_file_range,
3072 };