1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
17 #include "mds_client.h"
22 static __le32 ceph_flags_sys2wire(u32 flags)
26 switch (flags & O_ACCMODE) {
28 wire_flags |= CEPH_O_RDONLY;
31 wire_flags |= CEPH_O_WRONLY;
34 wire_flags |= CEPH_O_RDWR;
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
42 ceph_sys2wire(O_CREAT);
43 ceph_sys2wire(O_EXCL);
44 ceph_sys2wire(O_TRUNC);
45 ceph_sys2wire(O_DIRECTORY);
46 ceph_sys2wire(O_NOFOLLOW);
51 dout("unused open flags: %x\n", flags);
53 return cpu_to_le32(wire_flags);
57 * Ceph file operations
59 * Implement basic open/close functionality, and implement
62 * We implement three modes of file I/O:
63 * - buffered uses the generic_file_aio_{read,write} helpers
65 * - synchronous is used when there is multi-client read/write
66 * sharing, avoids the page cache, and synchronously waits for an
69 * - direct io takes the variant of the sync path that references
70 * user pages directly.
72 * fsync() flushes and waits on dirty pages, but just queues metadata
73 * for writeback: since the MDS can recover size and mtime there is no
74 * need to wait for MDS acknowledgement.
78 * How many pages to get in one call to iov_iter_get_pages(). This
79 * determines the size of the on-stack array used as a buffer.
81 #define ITER_GET_BVECS_PAGES 64
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84 struct bio_vec *bvecs)
89 if (maxsize > iov_iter_count(iter))
90 maxsize = iov_iter_count(iter);
92 while (size < maxsize) {
93 struct page *pages[ITER_GET_BVECS_PAGES];
98 bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
99 ITER_GET_BVECS_PAGES, &start);
101 return size ?: bytes;
105 for ( ; bytes; idx++, bvec_idx++) {
106 int len = min_t(int, bytes, PAGE_SIZE - start);
108 bvec_set_page(&bvecs[bvec_idx], pages[idx], len, start);
118 * iov_iter_get_pages() only considers one iov_iter segment, no matter
119 * what maxsize or maxpages are given. For ITER_BVEC that is a single
122 * Attempt to get up to @maxsize bytes worth of pages from @iter.
123 * Return the number of bytes in the created bio_vec array, or an error.
125 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
126 struct bio_vec **bvecs, int *num_bvecs)
129 size_t orig_count = iov_iter_count(iter);
133 iov_iter_truncate(iter, maxsize);
134 npages = iov_iter_npages(iter, INT_MAX);
135 iov_iter_reexpand(iter, orig_count);
138 * __iter_get_bvecs() may populate only part of the array -- zero it
141 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
145 bytes = __iter_get_bvecs(iter, maxsize, bv);
148 * No pages were pinned -- just free the array.
159 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
163 for (i = 0; i < num_bvecs; i++) {
164 if (bvecs[i].bv_page) {
166 set_page_dirty_lock(bvecs[i].bv_page);
167 put_page(bvecs[i].bv_page);
174 * Prepare an open request. Preallocate ceph_cap to avoid an
175 * inopportune ENOMEM later.
177 static struct ceph_mds_request *
178 prepare_open_request(struct super_block *sb, int flags, int create_mode)
180 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
181 struct ceph_mds_request *req;
182 int want_auth = USE_ANY_MDS;
183 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
185 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
186 want_auth = USE_AUTH_MDS;
188 req = ceph_mdsc_create_request(mdsc, op, want_auth);
191 req->r_fmode = ceph_flags_to_mode(flags);
192 req->r_args.open.flags = ceph_flags_sys2wire(flags);
193 req->r_args.open.mode = cpu_to_le32(create_mode);
198 static int ceph_init_file_info(struct inode *inode, struct file *file,
199 int fmode, bool isdir)
201 struct ceph_inode_info *ci = ceph_inode(inode);
202 struct ceph_mount_options *opt =
203 ceph_inode_to_client(&ci->netfs.inode)->mount_options;
204 struct ceph_file_info *fi;
207 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
208 inode->i_mode, isdir ? "dir" : "regular");
209 BUG_ON(inode->i_fop->release != ceph_release);
212 struct ceph_dir_file_info *dfi =
213 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
217 file->private_data = dfi;
218 fi = &dfi->file_info;
219 dfi->next_offset = 2;
220 dfi->readdir_cache_idx = -1;
222 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
226 if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
227 fi->flags |= CEPH_F_SYNC;
229 file->private_data = fi;
232 ceph_get_fmode(ci, fmode, 1);
235 spin_lock_init(&fi->rw_contexts_lock);
236 INIT_LIST_HEAD(&fi->rw_contexts);
237 fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
239 if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
240 ret = ceph_uninline_data(file);
248 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
249 ceph_put_fmode(ci, fi->fmode, 1);
250 kmem_cache_free(ceph_file_cachep, fi);
251 /* wake up anyone waiting for caps on this inode */
252 wake_up_all(&ci->i_cap_wq);
257 * initialize private struct file data.
258 * if we fail, clean up by dropping fmode reference on the ceph_inode
260 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
264 switch (inode->i_mode & S_IFMT) {
266 ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
269 ret = ceph_init_file_info(inode, file, fmode,
270 S_ISDIR(inode->i_mode));
274 dout("init_file %p %p 0%o (symlink)\n", inode, file,
279 dout("init_file %p %p 0%o (special)\n", inode, file,
282 * we need to drop the open ref now, since we don't
283 * have .release set to ceph_release.
285 BUG_ON(inode->i_fop->release == ceph_release);
287 /* call the proper open fop */
288 ret = inode->i_fop->open(inode, file);
294 * try renew caps after session gets killed.
296 int ceph_renew_caps(struct inode *inode, int fmode)
298 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
299 struct ceph_inode_info *ci = ceph_inode(inode);
300 struct ceph_mds_request *req;
301 int err, flags, wanted;
303 spin_lock(&ci->i_ceph_lock);
304 __ceph_touch_fmode(ci, mdsc, fmode);
305 wanted = __ceph_caps_file_wanted(ci);
306 if (__ceph_is_any_real_caps(ci) &&
307 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
308 int issued = __ceph_caps_issued(ci, NULL);
309 spin_unlock(&ci->i_ceph_lock);
310 dout("renew caps %p want %s issued %s updating mds_wanted\n",
311 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
312 ceph_check_caps(ci, 0);
315 spin_unlock(&ci->i_ceph_lock);
318 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
320 else if (wanted & CEPH_CAP_FILE_RD)
322 else if (wanted & CEPH_CAP_FILE_WR)
325 if (wanted & CEPH_CAP_FILE_LAZYIO)
329 req = prepare_open_request(inode->i_sb, flags, 0);
335 req->r_inode = inode;
339 err = ceph_mdsc_do_request(mdsc, NULL, req);
340 ceph_mdsc_put_request(req);
342 dout("renew caps %p open result=%d\n", inode, err);
343 return err < 0 ? err : 0;
347 * If we already have the requisite capabilities, we can satisfy
348 * the open request locally (no need to request new caps from the
349 * MDS). We do, however, need to inform the MDS (asynchronously)
350 * if our wanted caps set expands.
352 int ceph_open(struct inode *inode, struct file *file)
354 struct ceph_inode_info *ci = ceph_inode(inode);
355 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
356 struct ceph_mds_client *mdsc = fsc->mdsc;
357 struct ceph_mds_request *req;
358 struct ceph_file_info *fi = file->private_data;
360 int flags, fmode, wanted;
363 dout("open file %p is already opened\n", file);
367 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
368 flags = file->f_flags & ~(O_CREAT|O_EXCL);
369 if (S_ISDIR(inode->i_mode)) {
370 flags = O_DIRECTORY; /* mds likes to know */
371 } else if (S_ISREG(inode->i_mode)) {
372 err = fscrypt_file_open(inode, file);
377 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
378 ceph_vinop(inode), file, flags, file->f_flags);
379 fmode = ceph_flags_to_mode(flags);
380 wanted = ceph_caps_for_mode(fmode);
382 /* snapped files are read-only */
383 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
386 /* trivially open snapdir */
387 if (ceph_snap(inode) == CEPH_SNAPDIR) {
388 return ceph_init_file(inode, file, fmode);
392 * No need to block if we have caps on the auth MDS (for
393 * write) or any MDS (for read). Update wanted set
396 spin_lock(&ci->i_ceph_lock);
397 if (__ceph_is_any_real_caps(ci) &&
398 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
399 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
400 int issued = __ceph_caps_issued(ci, NULL);
402 dout("open %p fmode %d want %s issued %s using existing\n",
403 inode, fmode, ceph_cap_string(wanted),
404 ceph_cap_string(issued));
405 __ceph_touch_fmode(ci, mdsc, fmode);
406 spin_unlock(&ci->i_ceph_lock);
409 if ((issued & wanted) != wanted &&
410 (mds_wanted & wanted) != wanted &&
411 ceph_snap(inode) != CEPH_SNAPDIR)
412 ceph_check_caps(ci, 0);
414 return ceph_init_file(inode, file, fmode);
415 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
416 (ci->i_snap_caps & wanted) == wanted) {
417 __ceph_touch_fmode(ci, mdsc, fmode);
418 spin_unlock(&ci->i_ceph_lock);
419 return ceph_init_file(inode, file, fmode);
422 spin_unlock(&ci->i_ceph_lock);
424 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
425 req = prepare_open_request(inode->i_sb, flags, 0);
430 req->r_inode = inode;
434 err = ceph_mdsc_do_request(mdsc, NULL, req);
436 err = ceph_init_file(inode, file, req->r_fmode);
437 ceph_mdsc_put_request(req);
438 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
443 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
445 cache_file_layout(struct inode *dst, struct inode *src)
447 struct ceph_inode_info *cdst = ceph_inode(dst);
448 struct ceph_inode_info *csrc = ceph_inode(src);
450 spin_lock(&cdst->i_ceph_lock);
451 if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
452 !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
453 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
454 sizeof(cdst->i_cached_layout));
455 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
456 ceph_try_get_string(csrc->i_layout.pool_ns));
458 spin_unlock(&cdst->i_ceph_lock);
462 * Try to set up an async create. We need caps, a file layout, and inode number,
463 * and either a lease on the dentry or complete dir info. If any of those
464 * criteria are not satisfied, then return false and the caller can go
467 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
468 struct ceph_file_layout *lo, u64 *pino)
470 struct ceph_inode_info *ci = ceph_inode(dir);
471 struct ceph_dentry_info *di = ceph_dentry(dentry);
472 int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
475 spin_lock(&ci->i_ceph_lock);
476 /* No auth cap means no chance for Dc caps */
480 /* Any delegated inos? */
481 if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
484 if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
487 if ((__ceph_caps_issued(ci, NULL) & want) != want)
490 if (d_in_lookup(dentry)) {
491 if (!__ceph_dir_is_complete(ci))
493 spin_lock(&dentry->d_lock);
494 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
495 spin_unlock(&dentry->d_lock);
496 } else if (atomic_read(&ci->i_shared_gen) !=
497 READ_ONCE(di->lease_shared_gen)) {
501 ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
506 ceph_take_cap_refs(ci, want, false);
507 memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
508 rcu_assign_pointer(lo->pool_ns,
509 ceph_try_get_string(ci->i_cached_layout.pool_ns));
512 spin_unlock(&ci->i_ceph_lock);
516 static void restore_deleg_ino(struct inode *dir, u64 ino)
518 struct ceph_inode_info *ci = ceph_inode(dir);
519 struct ceph_mds_session *s = NULL;
521 spin_lock(&ci->i_ceph_lock);
523 s = ceph_get_mds_session(ci->i_auth_cap->session);
524 spin_unlock(&ci->i_ceph_lock);
526 int err = ceph_restore_deleg_ino(s, ino);
528 pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
530 ceph_put_mds_session(s);
534 static void wake_async_create_waiters(struct inode *inode,
535 struct ceph_mds_session *session)
537 struct ceph_inode_info *ci = ceph_inode(inode);
538 bool check_cap = false;
540 spin_lock(&ci->i_ceph_lock);
541 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
542 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
543 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
545 if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
546 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
550 ceph_kick_flushing_inode_caps(session, ci);
551 spin_unlock(&ci->i_ceph_lock);
554 ceph_check_caps(ci, CHECK_CAPS_FLUSH);
557 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
558 struct ceph_mds_request *req)
560 struct dentry *dentry = req->r_dentry;
561 struct inode *dinode = d_inode(dentry);
562 struct inode *tinode = req->r_target_inode;
563 int result = req->r_err ? req->r_err :
564 le32_to_cpu(req->r_reply_info.head->result);
566 WARN_ON_ONCE(dinode && tinode && dinode != tinode);
568 /* MDS changed -- caller must resubmit */
569 if (result == -EJUKEBOX)
572 mapping_set_error(req->r_parent->i_mapping, result);
577 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
580 pr_warn("async create failure path=(%llx)%s result=%d!\n",
581 base, IS_ERR(path) ? "<<bad>>" : path, result);
582 ceph_mdsc_free_path(path, pathlen);
584 ceph_dir_clear_complete(req->r_parent);
585 if (!d_unhashed(dentry))
589 mapping_set_error(dinode->i_mapping, result);
590 ceph_inode_shutdown(dinode);
591 wake_async_create_waiters(dinode, req->r_session);
596 u64 ino = ceph_vino(tinode).ino;
598 if (req->r_deleg_ino != ino)
599 pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
600 __func__, req->r_err, req->r_deleg_ino, ino);
602 mapping_set_error(tinode->i_mapping, result);
603 wake_async_create_waiters(tinode, req->r_session);
604 } else if (!result) {
605 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
609 ceph_mdsc_release_dir_caps(req);
612 static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
613 struct dentry *dentry,
614 struct file *file, umode_t mode,
615 struct ceph_mds_request *req,
616 struct ceph_acl_sec_ctx *as_ctx,
617 struct ceph_file_layout *lo)
621 struct ceph_mds_reply_inode in = { };
622 struct ceph_mds_reply_info_in iinfo = { .in = &in };
623 struct ceph_inode_info *ci = ceph_inode(dir);
624 struct ceph_dentry_info *di = ceph_dentry(dentry);
625 struct timespec64 now;
626 struct ceph_string *pool_ns;
627 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
628 struct ceph_vino vino = { .ino = req->r_deleg_ino,
629 .snap = CEPH_NOSNAP };
631 ktime_get_real_ts64(&now);
633 iinfo.inline_version = CEPH_INLINE_NONE;
634 iinfo.change_attr = 1;
635 ceph_encode_timespec64(&iinfo.btime, &now);
637 if (req->r_pagelist) {
638 iinfo.xattr_len = req->r_pagelist->length;
639 iinfo.xattr_data = req->r_pagelist->mapped_tail;
642 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
643 iinfo.xattr_data = xattr_buf;
644 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
647 in.ino = cpu_to_le64(vino.ino);
648 in.snapid = cpu_to_le64(CEPH_NOSNAP);
649 in.version = cpu_to_le64(1); // ???
650 in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
651 in.cap.cap_id = cpu_to_le64(1);
652 in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
653 in.cap.flags = CEPH_CAP_FLAG_AUTH;
654 in.ctime = in.mtime = in.atime = iinfo.btime;
655 in.truncate_seq = cpu_to_le32(1);
656 in.truncate_size = cpu_to_le64(-1ULL);
657 in.xattr_version = cpu_to_le64(1);
658 in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
659 if (dir->i_mode & S_ISGID) {
660 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
662 /* Directories always inherit the setgid bit. */
666 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
668 in.mode = cpu_to_le32((u32)mode);
670 in.nlink = cpu_to_le32(1);
671 in.max_size = cpu_to_le64(lo->stripe_unit);
673 ceph_file_layout_to_legacy(lo, &in.layout);
674 /* lo is private, so pool_ns can't change */
675 pool_ns = rcu_dereference_raw(lo->pool_ns);
677 iinfo.pool_ns_len = pool_ns->len;
678 iinfo.pool_ns_data = pool_ns->str;
681 down_read(&mdsc->snap_rwsem);
682 ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
684 up_read(&mdsc->snap_rwsem);
686 dout("%s failed to fill inode: %d\n", __func__, ret);
687 ceph_dir_clear_complete(dir);
688 if (!d_unhashed(dentry))
690 discard_new_inode(inode);
694 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
695 vino.ino, ceph_ino(dir), dentry->d_name.name);
696 ceph_dir_clear_ordered(dir);
697 ceph_init_inode_acls(inode, as_ctx);
698 if (inode->i_state & I_NEW) {
700 * If it's not I_NEW, then someone created this before
701 * we got here. Assume the server is aware of it at
702 * that point and don't worry about setting
703 * CEPH_I_ASYNC_CREATE.
705 ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
706 unlock_new_inode(inode);
708 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
709 if (!d_unhashed(dentry))
711 dn = d_splice_alias(inode, dentry);
712 WARN_ON_ONCE(dn && dn != dentry);
714 file->f_mode |= FMODE_CREATED;
715 ret = finish_open(file, dentry, ceph_open);
718 spin_lock(&dentry->d_lock);
719 di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
720 wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
721 spin_unlock(&dentry->d_lock);
727 * Do a lookup + open with a single request. If we get a non-existent
728 * file or symlink, return 1 so the VFS can retry.
730 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
731 struct file *file, unsigned flags, umode_t mode)
733 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
734 struct ceph_mds_client *mdsc = fsc->mdsc;
735 struct ceph_mds_request *req;
736 struct inode *new_inode = NULL;
738 struct ceph_acl_sec_ctx as_ctx = {};
739 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
743 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
745 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
747 if (dentry->d_name.len > NAME_MAX)
748 return -ENAMETOOLONG;
750 err = ceph_wait_on_conflict_unlink(dentry);
754 * Do not truncate the file, since atomic_open is called before the
755 * permission check. The caller will do the truncation afterward.
760 if (flags & O_CREAT) {
761 if (ceph_quota_is_max_files_exceeded(dir))
764 new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
765 if (IS_ERR(new_inode)) {
766 err = PTR_ERR(new_inode);
769 /* Async create can't handle more than a page of xattrs */
770 if (as_ctx.pagelist &&
771 !list_is_singular(&as_ctx.pagelist->head))
773 } else if (!d_in_lookup(dentry)) {
774 /* If it's not being looked up, it's negative */
779 req = prepare_open_request(dir->i_sb, flags, mode);
784 req->r_dentry = dget(dentry);
786 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
787 if (ceph_security_xattr_wanted(dir))
788 mask |= CEPH_CAP_XATTR_SHARED;
789 req->r_args.open.mask = cpu_to_le32(mask);
792 if (IS_ENCRYPTED(dir)) {
793 set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
794 if (!fscrypt_has_encryption_key(dir)) {
795 spin_lock(&dentry->d_lock);
796 dentry->d_flags |= DCACHE_NOKEY_NAME;
797 spin_unlock(&dentry->d_lock);
801 if (flags & O_CREAT) {
802 struct ceph_file_layout lo;
804 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
806 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
808 ceph_as_ctx_to_req(req, &as_ctx);
810 if (try_async && (req->r_dir_caps =
811 try_prep_async_create(dir, dentry, &lo,
812 &req->r_deleg_ino))) {
813 struct ceph_vino vino = { .ino = req->r_deleg_ino,
814 .snap = CEPH_NOSNAP };
815 struct ceph_dentry_info *di = ceph_dentry(dentry);
817 set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
818 req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
819 req->r_callback = ceph_async_create_cb;
821 /* Hash inode before RPC */
822 new_inode = ceph_get_inode(dir->i_sb, vino, new_inode);
823 if (IS_ERR(new_inode)) {
824 err = PTR_ERR(new_inode);
828 WARN_ON_ONCE(!(new_inode->i_state & I_NEW));
830 spin_lock(&dentry->d_lock);
831 di->flags |= CEPH_DENTRY_ASYNC_CREATE;
832 spin_unlock(&dentry->d_lock);
834 err = ceph_mdsc_submit_request(mdsc, dir, req);
836 err = ceph_finish_async_create(dir, new_inode,
841 } else if (err == -EJUKEBOX) {
842 restore_deleg_ino(dir, req->r_deleg_ino);
843 ceph_mdsc_put_request(req);
844 discard_new_inode(new_inode);
845 ceph_release_acl_sec_ctx(&as_ctx);
846 memset(&as_ctx, 0, sizeof(as_ctx));
849 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
852 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
857 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
858 req->r_new_inode = new_inode;
860 err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
861 if (err == -ENOENT) {
862 dentry = ceph_handle_snapdir(req, dentry);
863 if (IS_ERR(dentry)) {
864 err = PTR_ERR(dentry);
870 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
871 err = ceph_handle_notrace_create(dir, dentry);
873 if (d_in_lookup(dentry)) {
874 dn = ceph_finish_lookup(req, dentry, err);
878 /* we were given a hashed negative dentry */
883 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
884 /* make vfs retry on splice, ENOENT, or symlink */
885 dout("atomic_open finish_no_open on dn %p\n", dn);
886 err = finish_no_open(file, dn);
888 if (IS_ENCRYPTED(dir) &&
889 !fscrypt_has_permitted_context(dir, d_inode(dentry))) {
890 pr_warn("Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
891 ceph_vinop(dir), ceph_vinop(d_inode(dentry)));
895 dout("atomic_open finish_open on dn %p\n", dn);
896 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
897 struct inode *newino = d_inode(dentry);
899 cache_file_layout(dir, newino);
900 ceph_init_inode_acls(newino, &as_ctx);
901 file->f_mode |= FMODE_CREATED;
903 err = finish_open(file, dentry, ceph_open);
906 ceph_mdsc_put_request(req);
909 ceph_release_acl_sec_ctx(&as_ctx);
910 dout("atomic_open result=%d\n", err);
914 int ceph_release(struct inode *inode, struct file *file)
916 struct ceph_inode_info *ci = ceph_inode(inode);
918 if (S_ISDIR(inode->i_mode)) {
919 struct ceph_dir_file_info *dfi = file->private_data;
920 dout("release inode %p dir file %p\n", inode, file);
921 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
923 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
925 if (dfi->last_readdir)
926 ceph_mdsc_put_request(dfi->last_readdir);
927 kfree(dfi->last_name);
928 kfree(dfi->dir_info);
929 kmem_cache_free(ceph_dir_file_cachep, dfi);
931 struct ceph_file_info *fi = file->private_data;
932 dout("release inode %p regular file %p\n", inode, file);
933 WARN_ON(!list_empty(&fi->rw_contexts));
935 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
936 ceph_put_fmode(ci, fi->fmode, 1);
938 kmem_cache_free(ceph_file_cachep, fi);
941 /* wake up anyone waiting for caps on this inode */
942 wake_up_all(&ci->i_cap_wq);
953 * Completely synchronous read and write methods. Direct from __user
954 * buffer to osd, or directly to user pages (if O_DIRECT).
956 * If the read spans object boundary, just do multiple reads. (That's not
957 * atomic, but good enough for now.)
959 * If we get a short result from the OSD, check against i_size; we need to
960 * only return a short read to the caller if we hit EOF.
962 ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
963 struct iov_iter *to, int *retry_op,
966 struct ceph_inode_info *ci = ceph_inode(inode);
967 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
968 struct ceph_osd_client *osdc = &fsc->client->osdc;
971 u64 len = iov_iter_count(to);
972 u64 i_size = i_size_read(inode);
973 bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
976 dout("sync_read on inode %p %llx~%llx\n", inode, *ki_pos, len);
978 if (ceph_inode_is_shutdown(inode))
984 * flush any page cache pages in this range. this
985 * will make concurrent normal and sync io slow,
986 * but it will at least behave sensibly when they are
989 ret = filemap_write_and_wait_range(inode->i_mapping,
995 while ((len = iov_iter_count(to)) > 0) {
996 struct ceph_osd_request *req;
1003 struct ceph_osd_req_op *op;
1007 /* determine new offset/length if encrypted */
1008 ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len);
1010 dout("sync_read orig %llu~%llu reading %llu~%llu",
1011 off, len, read_off, read_len);
1013 req = ceph_osdc_new_request(osdc, &ci->i_layout,
1014 ci->i_vino, read_off, &read_len, 0, 1,
1015 sparse ? CEPH_OSD_OP_SPARSE_READ :
1018 NULL, ci->i_truncate_seq,
1019 ci->i_truncate_size, false);
1025 /* adjust len downward if the request truncated the len */
1026 if (off + len > read_off + read_len)
1027 len = read_off + read_len - off;
1028 more = len < iov_iter_count(to);
1030 num_pages = calc_pages_for(read_off, read_len);
1031 page_off = offset_in_page(off);
1032 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1033 if (IS_ERR(pages)) {
1034 ceph_osdc_put_request(req);
1035 ret = PTR_ERR(pages);
1039 osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
1040 offset_in_page(read_off),
1043 op = &req->r_ops[0];
1045 ret = ceph_alloc_sparse_ext_map(op);
1047 ceph_osdc_put_request(req);
1052 ceph_osdc_start_request(osdc, req);
1053 ret = ceph_osdc_wait_request(osdc, req);
1055 ceph_update_read_metrics(&fsc->mdsc->metric,
1056 req->r_start_latency,
1061 objver = req->r_version;
1063 i_size = i_size_read(inode);
1064 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
1065 off, len, ret, i_size, (more ? " MORE" : ""));
1067 /* Fix it to go to end of extent map */
1068 if (sparse && ret >= 0)
1069 ret = ceph_sparse_ext_map_end(op);
1070 else if (ret == -ENOENT)
1073 if (ret > 0 && IS_ENCRYPTED(inode)) {
1076 fret = ceph_fscrypt_decrypt_extents(inode, pages,
1077 read_off, op->extent.sparse_ext,
1078 op->extent.sparse_ext_cnt);
1081 ceph_osdc_put_request(req);
1085 /* account for any partial block at the beginning */
1086 fret -= (off - read_off);
1089 * Short read after big offset adjustment?
1090 * Nothing is usable, just call it a zero
1093 fret = max(fret, 0);
1095 /* account for partial block at the end */
1096 ret = min_t(ssize_t, fret, len);
1099 ceph_osdc_put_request(req);
1101 /* Short read but not EOF? Zero out the remainder. */
1102 if (ret >= 0 && ret < len && (off + ret < i_size)) {
1103 int zlen = min(len - ret, i_size - off - ret);
1104 int zoff = page_off + ret;
1106 dout("sync_read zero gap %llu~%llu\n",
1107 off + ret, off + ret + zlen);
1108 ceph_zero_page_vector_range(zoff, zlen, pages);
1113 left = ret > 0 ? ret : 0;
1115 size_t plen, copied;
1117 plen = min_t(size_t, left, PAGE_SIZE - page_off);
1118 SetPageUptodate(pages[idx]);
1119 copied = copy_page_to_iter(pages[idx++],
1120 page_off, plen, to);
1124 if (copied < plen) {
1129 ceph_release_page_vector(pages, num_pages);
1132 if (ret == -EBLOCKLISTED)
1133 fsc->blocklisted = true;
1137 if (off >= i_size || !more)
1142 if (off > *ki_pos) {
1143 if (off >= i_size) {
1144 *retry_op = CHECK_EOF;
1145 ret = i_size - *ki_pos;
1148 ret = off - *ki_pos;
1154 *last_objver = objver;
1156 dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
1160 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
1163 struct file *file = iocb->ki_filp;
1164 struct inode *inode = file_inode(file);
1166 dout("sync_read on file %p %llx~%zx %s\n", file, iocb->ki_pos,
1167 iov_iter_count(to), (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
1169 return __ceph_sync_read(inode, &iocb->ki_pos, to, retry_op, NULL);
1172 struct ceph_aio_request {
1178 struct list_head osd_reqs;
1180 atomic_t pending_reqs;
1181 struct timespec64 mtime;
1182 struct ceph_cap_flush *prealloc_cf;
1185 struct ceph_aio_work {
1186 struct work_struct work;
1187 struct ceph_osd_request *req;
1190 static void ceph_aio_retry_work(struct work_struct *work);
1192 static void ceph_aio_complete(struct inode *inode,
1193 struct ceph_aio_request *aio_req)
1195 struct ceph_inode_info *ci = ceph_inode(inode);
1198 if (!atomic_dec_and_test(&aio_req->pending_reqs))
1201 if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1202 inode_dio_end(inode);
1204 ret = aio_req->error;
1206 ret = aio_req->total_len;
1208 dout("ceph_aio_complete %p rc %d\n", inode, ret);
1210 if (ret >= 0 && aio_req->write) {
1213 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1214 if (endoff > i_size_read(inode)) {
1215 if (ceph_inode_set_size(inode, endoff))
1216 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
1219 spin_lock(&ci->i_ceph_lock);
1220 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1221 &aio_req->prealloc_cf);
1222 spin_unlock(&ci->i_ceph_lock);
1224 __mark_inode_dirty(inode, dirty);
1228 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1231 aio_req->iocb->ki_complete(aio_req->iocb, ret);
1233 ceph_free_cap_flush(aio_req->prealloc_cf);
1237 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1239 int rc = req->r_result;
1240 struct inode *inode = req->r_inode;
1241 struct ceph_aio_request *aio_req = req->r_priv;
1242 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1243 struct ceph_osd_req_op *op = &req->r_ops[0];
1244 struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1245 unsigned int len = osd_data->bvec_pos.iter.bi_size;
1246 bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
1248 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1249 BUG_ON(!osd_data->num_bvecs);
1251 dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1253 if (rc == -EOLDSNAPC) {
1254 struct ceph_aio_work *aio_work;
1255 BUG_ON(!aio_req->write);
1257 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1259 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1260 aio_work->req = req;
1261 queue_work(ceph_inode_to_client(inode)->inode_wq,
1266 } else if (!aio_req->write) {
1267 if (sparse && rc >= 0)
1268 rc = ceph_sparse_ext_map_end(op);
1271 if (rc >= 0 && len > rc) {
1273 int zlen = len - rc;
1276 * If read is satisfied by single OSD request,
1277 * it can pass EOF. Otherwise read is within
1280 if (aio_req->num_reqs == 1) {
1281 loff_t i_size = i_size_read(inode);
1282 loff_t endoff = aio_req->iocb->ki_pos + rc;
1283 if (endoff < i_size)
1284 zlen = min_t(size_t, zlen,
1286 aio_req->total_len = rc + zlen;
1289 iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs,
1290 osd_data->num_bvecs, len);
1291 iov_iter_advance(&i, rc);
1292 iov_iter_zero(zlen, &i);
1296 /* r_start_latency == 0 means the request was not submitted */
1297 if (req->r_start_latency) {
1299 ceph_update_write_metrics(metric, req->r_start_latency,
1300 req->r_end_latency, len, rc);
1302 ceph_update_read_metrics(metric, req->r_start_latency,
1303 req->r_end_latency, len, rc);
1306 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1307 aio_req->should_dirty);
1308 ceph_osdc_put_request(req);
1311 cmpxchg(&aio_req->error, 0, rc);
1313 ceph_aio_complete(inode, aio_req);
1317 static void ceph_aio_retry_work(struct work_struct *work)
1319 struct ceph_aio_work *aio_work =
1320 container_of(work, struct ceph_aio_work, work);
1321 struct ceph_osd_request *orig_req = aio_work->req;
1322 struct ceph_aio_request *aio_req = orig_req->r_priv;
1323 struct inode *inode = orig_req->r_inode;
1324 struct ceph_inode_info *ci = ceph_inode(inode);
1325 struct ceph_snap_context *snapc;
1326 struct ceph_osd_request *req;
1329 spin_lock(&ci->i_ceph_lock);
1330 if (__ceph_have_pending_cap_snap(ci)) {
1331 struct ceph_cap_snap *capsnap =
1332 list_last_entry(&ci->i_cap_snaps,
1333 struct ceph_cap_snap,
1335 snapc = ceph_get_snap_context(capsnap->context);
1337 BUG_ON(!ci->i_head_snapc);
1338 snapc = ceph_get_snap_context(ci->i_head_snapc);
1340 spin_unlock(&ci->i_ceph_lock);
1342 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1350 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1351 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1352 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1354 req->r_ops[0] = orig_req->r_ops[0];
1356 req->r_mtime = aio_req->mtime;
1357 req->r_data_offset = req->r_ops[0].extent.offset;
1359 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1361 ceph_osdc_put_request(req);
1366 ceph_osdc_put_request(orig_req);
1368 req->r_callback = ceph_aio_complete_req;
1369 req->r_inode = inode;
1370 req->r_priv = aio_req;
1372 ceph_osdc_start_request(req->r_osdc, req);
1375 req->r_result = ret;
1376 ceph_aio_complete_req(req);
1379 ceph_put_snap_context(snapc);
1384 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1385 struct ceph_snap_context *snapc,
1386 struct ceph_cap_flush **pcf)
1388 struct file *file = iocb->ki_filp;
1389 struct inode *inode = file_inode(file);
1390 struct ceph_inode_info *ci = ceph_inode(inode);
1391 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1392 struct ceph_client_metric *metric = &fsc->mdsc->metric;
1393 struct ceph_vino vino;
1394 struct ceph_osd_request *req;
1395 struct bio_vec *bvecs;
1396 struct ceph_aio_request *aio_req = NULL;
1400 struct timespec64 mtime = current_time(inode);
1401 size_t count = iov_iter_count(iter);
1402 loff_t pos = iocb->ki_pos;
1403 bool write = iov_iter_rw(iter) == WRITE;
1404 bool should_dirty = !write && user_backed_iter(iter);
1405 bool sparse = ceph_test_mount_opt(fsc, SPARSEREAD);
1407 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1410 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1411 (write ? "write" : "read"), file, pos, (unsigned)count,
1412 snapc, snapc ? snapc->seq : 0);
1417 ceph_fscache_invalidate(inode, true);
1419 ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1421 (pos + count - 1) >> PAGE_SHIFT);
1423 dout("invalidate_inode_pages2_range returned %d\n", ret2);
1425 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1427 flags = CEPH_OSD_FLAG_READ;
1430 while (iov_iter_count(iter) > 0) {
1431 u64 size = iov_iter_count(iter);
1433 struct ceph_osd_req_op *op;
1434 int readop = sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ;
1437 size = min_t(u64, size, fsc->mount_options->wsize);
1439 size = min_t(u64, size, fsc->mount_options->rsize);
1441 vino = ceph_vino(inode);
1442 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1443 vino, pos, &size, 0,
1445 write ? CEPH_OSD_OP_WRITE : readop,
1448 ci->i_truncate_size,
1455 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1457 ceph_osdc_put_request(req);
1462 osd_req_op_extent_update(req, 0, len);
1465 * To simplify error handling, allow AIO when IO within i_size
1466 * or IO can be satisfied by single OSD request.
1468 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1469 (len == count || pos + count <= i_size_read(inode))) {
1470 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1472 aio_req->iocb = iocb;
1473 aio_req->write = write;
1474 aio_req->should_dirty = should_dirty;
1475 INIT_LIST_HEAD(&aio_req->osd_reqs);
1477 aio_req->mtime = mtime;
1478 swap(aio_req->prealloc_cf, *pcf);
1486 * throw out any page cache pages in this range. this
1489 truncate_inode_pages_range(inode->i_mapping, pos,
1490 PAGE_ALIGN(pos + len) - 1);
1492 req->r_mtime = mtime;
1495 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1496 op = &req->r_ops[0];
1498 ret = ceph_alloc_sparse_ext_map(op);
1500 ceph_osdc_put_request(req);
1506 aio_req->total_len += len;
1507 aio_req->num_reqs++;
1508 atomic_inc(&aio_req->pending_reqs);
1510 req->r_callback = ceph_aio_complete_req;
1511 req->r_inode = inode;
1512 req->r_priv = aio_req;
1513 list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1519 ceph_osdc_start_request(req->r_osdc, req);
1520 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1523 ceph_update_write_metrics(metric, req->r_start_latency,
1524 req->r_end_latency, len, ret);
1526 ceph_update_read_metrics(metric, req->r_start_latency,
1527 req->r_end_latency, len, ret);
1529 size = i_size_read(inode);
1531 if (sparse && ret >= 0)
1532 ret = ceph_sparse_ext_map_end(op);
1533 else if (ret == -ENOENT)
1536 if (ret >= 0 && ret < len && pos + ret < size) {
1538 int zlen = min_t(size_t, len - ret,
1541 iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len);
1542 iov_iter_advance(&i, ret);
1543 iov_iter_zero(zlen, &i);
1550 put_bvecs(bvecs, num_pages, should_dirty);
1551 ceph_osdc_put_request(req);
1556 if (!write && pos >= size)
1559 if (write && pos > size) {
1560 if (ceph_inode_set_size(inode, pos))
1561 ceph_check_caps(ceph_inode(inode),
1562 CHECK_CAPS_AUTHONLY);
1567 LIST_HEAD(osd_reqs);
1569 if (aio_req->num_reqs == 0) {
1574 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1577 list_splice(&aio_req->osd_reqs, &osd_reqs);
1578 inode_dio_begin(inode);
1579 while (!list_empty(&osd_reqs)) {
1580 req = list_first_entry(&osd_reqs,
1581 struct ceph_osd_request,
1583 list_del_init(&req->r_private_item);
1585 ceph_osdc_start_request(req->r_osdc, req);
1587 req->r_result = ret;
1588 ceph_aio_complete_req(req);
1591 return -EIOCBQUEUED;
1594 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1595 ret = pos - iocb->ki_pos;
1602 * Synchronous write, straight from __user pointer or user pages.
1604 * If write spans object boundary, just do multiple writes. (For a
1605 * correct atomic write, we should e.g. take write locks on all
1606 * objects, rollback on failure, etc.)
1609 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1610 struct ceph_snap_context *snapc)
1612 struct file *file = iocb->ki_filp;
1613 struct inode *inode = file_inode(file);
1614 struct ceph_inode_info *ci = ceph_inode(inode);
1615 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1616 struct ceph_osd_client *osdc = &fsc->client->osdc;
1617 struct ceph_osd_request *req;
1618 struct page **pages;
1623 bool check_caps = false;
1624 struct timespec64 mtime = current_time(inode);
1625 size_t count = iov_iter_count(from);
1627 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1630 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1631 file, pos, (unsigned)count, snapc, snapc->seq);
1633 ret = filemap_write_and_wait_range(inode->i_mapping,
1634 pos, pos + count - 1);
1638 ceph_fscache_invalidate(inode, false);
1640 while ((len = iov_iter_count(from)) > 0) {
1643 u64 write_pos = pos;
1644 u64 write_len = len;
1650 struct iov_iter saved_iter = *from;
1653 ceph_fscrypt_adjust_off_and_len(inode, &write_pos, &write_len);
1655 /* clamp the length to the end of first object */
1656 ceph_calc_file_object_mapping(&ci->i_layout, write_pos,
1657 write_len, &objnum, &objoff,
1661 /* adjust len downward if it goes beyond current object */
1662 if (pos + len > write_pos + write_len)
1663 len = write_pos + write_len - pos;
1666 * If we had to adjust the length or position to align with a
1667 * crypto block, then we must do a read/modify/write cycle. We
1668 * use a version assertion to redrive the thing if something
1669 * changes in between.
1671 first = pos != write_pos;
1672 last = (pos + len) != (write_pos + write_len);
1673 rmw = first || last;
1675 dout("sync_write ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
1676 ci->i_vino.ino, pos, len, write_pos, write_len,
1680 * The data is emplaced into the page as it would be if it were
1681 * in an array of pagecache pages.
1683 num_pages = calc_pages_for(write_pos, write_len);
1684 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1685 if (IS_ERR(pages)) {
1686 ret = PTR_ERR(pages);
1690 /* Do we need to preload the pages? */
1692 u64 first_pos = write_pos;
1693 u64 last_pos = (write_pos + write_len) - CEPH_FSCRYPT_BLOCK_SIZE;
1694 u64 read_len = CEPH_FSCRYPT_BLOCK_SIZE;
1695 struct ceph_osd_req_op *op;
1697 /* We should only need to do this for encrypted inodes */
1698 WARN_ON_ONCE(!IS_ENCRYPTED(inode));
1700 /* No need to do two reads if first and last blocks are same */
1701 if (first && last_pos == first_pos)
1705 * Allocate a read request for one or two extents,
1706 * depending on how the request was aligned.
1708 req = ceph_osdc_new_request(osdc, &ci->i_layout,
1709 ci->i_vino, first ? first_pos : last_pos,
1710 &read_len, 0, (first && last) ? 2 : 1,
1711 CEPH_OSD_OP_SPARSE_READ, CEPH_OSD_FLAG_READ,
1712 NULL, ci->i_truncate_seq,
1713 ci->i_truncate_size, false);
1715 ceph_release_page_vector(pages, num_pages);
1720 /* Something is misaligned! */
1721 if (read_len != CEPH_FSCRYPT_BLOCK_SIZE) {
1722 ceph_osdc_put_request(req);
1723 ceph_release_page_vector(pages, num_pages);
1728 /* Add extent for first block? */
1729 op = &req->r_ops[0];
1732 osd_req_op_extent_osd_data_pages(req, 0, pages,
1733 CEPH_FSCRYPT_BLOCK_SIZE,
1734 offset_in_page(first_pos),
1736 /* We only expect a single extent here */
1737 ret = __ceph_alloc_sparse_ext_map(op, 1);
1739 ceph_osdc_put_request(req);
1740 ceph_release_page_vector(pages, num_pages);
1745 /* Add extent for last block */
1747 /* Init the other extent if first extent has been used */
1749 op = &req->r_ops[1];
1750 osd_req_op_extent_init(req, 1,
1751 CEPH_OSD_OP_SPARSE_READ,
1752 last_pos, CEPH_FSCRYPT_BLOCK_SIZE,
1753 ci->i_truncate_size,
1754 ci->i_truncate_seq);
1757 ret = __ceph_alloc_sparse_ext_map(op, 1);
1759 ceph_osdc_put_request(req);
1760 ceph_release_page_vector(pages, num_pages);
1764 osd_req_op_extent_osd_data_pages(req, first ? 1 : 0,
1765 &pages[num_pages - 1],
1766 CEPH_FSCRYPT_BLOCK_SIZE,
1767 offset_in_page(last_pos),
1771 ceph_osdc_start_request(osdc, req);
1772 ret = ceph_osdc_wait_request(osdc, req);
1774 /* FIXME: length field is wrong if there are 2 extents */
1775 ceph_update_read_metrics(&fsc->mdsc->metric,
1776 req->r_start_latency,
1780 /* Ok if object is not already present */
1781 if (ret == -ENOENT) {
1783 * If there is no object, then we can't assert
1784 * on its version. Set it to 0, and we'll use an
1785 * exclusive create instead.
1787 ceph_osdc_put_request(req);
1791 * zero out the soon-to-be uncopied parts of the
1792 * first and last pages.
1795 zero_user_segment(pages[0], 0,
1796 offset_in_page(first_pos));
1798 zero_user_segment(pages[num_pages - 1],
1799 offset_in_page(last_pos),
1803 ceph_osdc_put_request(req);
1804 ceph_release_page_vector(pages, num_pages);
1808 op = &req->r_ops[0];
1809 if (op->extent.sparse_ext_cnt == 0) {
1811 zero_user_segment(pages[0], 0,
1812 offset_in_page(first_pos));
1814 zero_user_segment(pages[num_pages - 1],
1815 offset_in_page(last_pos),
1817 } else if (op->extent.sparse_ext_cnt != 1 ||
1818 ceph_sparse_ext_map_end(op) !=
1819 CEPH_FSCRYPT_BLOCK_SIZE) {
1821 ceph_osdc_put_request(req);
1822 ceph_release_page_vector(pages, num_pages);
1826 if (first && last) {
1827 op = &req->r_ops[1];
1828 if (op->extent.sparse_ext_cnt == 0) {
1829 zero_user_segment(pages[num_pages - 1],
1830 offset_in_page(last_pos),
1832 } else if (op->extent.sparse_ext_cnt != 1 ||
1833 ceph_sparse_ext_map_end(op) !=
1834 CEPH_FSCRYPT_BLOCK_SIZE) {
1836 ceph_osdc_put_request(req);
1837 ceph_release_page_vector(pages, num_pages);
1842 /* Grab assert version. It must be non-zero. */
1843 assert_ver = req->r_version;
1844 WARN_ON_ONCE(ret > 0 && assert_ver == 0);
1846 ceph_osdc_put_request(req);
1848 ret = ceph_fscrypt_decrypt_block_inplace(inode,
1849 pages[0], CEPH_FSCRYPT_BLOCK_SIZE,
1850 offset_in_page(first_pos),
1851 first_pos >> CEPH_FSCRYPT_BLOCK_SHIFT);
1853 ceph_release_page_vector(pages, num_pages);
1858 ret = ceph_fscrypt_decrypt_block_inplace(inode,
1859 pages[num_pages - 1],
1860 CEPH_FSCRYPT_BLOCK_SIZE,
1861 offset_in_page(last_pos),
1862 last_pos >> CEPH_FSCRYPT_BLOCK_SHIFT);
1864 ceph_release_page_vector(pages, num_pages);
1872 off = offset_in_page(pos);
1873 for (n = 0; n < num_pages; n++) {
1874 size_t plen = min_t(size_t, left, PAGE_SIZE - off);
1877 ret = copy_page_from_iter(pages[n], off, plen, from);
1886 dout("sync_write write failed with %d\n", ret);
1887 ceph_release_page_vector(pages, num_pages);
1891 if (IS_ENCRYPTED(inode)) {
1892 ret = ceph_fscrypt_encrypt_pages(inode, pages,
1893 write_pos, write_len,
1896 dout("encryption failed with %d\n", ret);
1897 ceph_release_page_vector(pages, num_pages);
1902 req = ceph_osdc_new_request(osdc, &ci->i_layout,
1903 ci->i_vino, write_pos, &write_len,
1904 rmw ? 1 : 0, rmw ? 2 : 1,
1906 CEPH_OSD_FLAG_WRITE,
1907 snapc, ci->i_truncate_seq,
1908 ci->i_truncate_size, false);
1911 ceph_release_page_vector(pages, num_pages);
1915 dout("sync_write write op %lld~%llu\n", write_pos, write_len);
1916 osd_req_op_extent_osd_data_pages(req, rmw ? 1 : 0, pages, write_len,
1917 offset_in_page(write_pos), false,
1919 req->r_inode = inode;
1920 req->r_mtime = mtime;
1922 /* Set up the assertion */
1925 * Set up the assertion. If we don't have a version
1926 * number, then the object doesn't exist yet. Use an
1927 * exclusive create instead of a version assertion in
1931 osd_req_op_init(req, 0, CEPH_OSD_OP_ASSERT_VER, 0);
1932 req->r_ops[0].assert_ver.ver = assert_ver;
1934 osd_req_op_init(req, 0, CEPH_OSD_OP_CREATE,
1935 CEPH_OSD_OP_FLAG_EXCL);
1939 ceph_osdc_start_request(osdc, req);
1940 ret = ceph_osdc_wait_request(osdc, req);
1942 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1943 req->r_end_latency, len, ret);
1944 ceph_osdc_put_request(req);
1946 dout("sync_write osd write returned %d\n", ret);
1947 /* Version changed! Must re-do the rmw cycle */
1948 if ((assert_ver && (ret == -ERANGE || ret == -EOVERFLOW)) ||
1949 (!assert_ver && ret == -EEXIST)) {
1950 /* We should only ever see this on a rmw */
1953 /* The version should never go backward */
1954 WARN_ON_ONCE(ret == -EOVERFLOW);
1958 /* FIXME: limit number of times we loop? */
1961 ceph_set_error_write(ci);
1965 ceph_clear_error_write(ci);
1968 * We successfully wrote to a range of the file. Declare
1969 * that region of the pagecache invalid.
1971 ret = invalidate_inode_pages2_range(
1974 (pos + len - 1) >> PAGE_SHIFT);
1976 dout("invalidate_inode_pages2_range returned %d\n",
1982 dout("sync_write written %d\n", written);
1983 if (pos > i_size_read(inode)) {
1984 check_caps = ceph_inode_set_size(inode, pos);
1986 ceph_check_caps(ceph_inode(inode),
1987 CHECK_CAPS_AUTHONLY);
1992 if (ret != -EOLDSNAPC && written > 0) {
1996 dout("sync_write returning %d\n", ret);
2001 * Wrap generic_file_aio_read with checks for cap bits on the inode.
2002 * Atomically grab references, so that those bits are not released
2003 * back to the MDS mid-read.
2005 * Hmm, the sync read case isn't actually async... should it be?
2007 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
2009 struct file *filp = iocb->ki_filp;
2010 struct ceph_file_info *fi = filp->private_data;
2011 size_t len = iov_iter_count(to);
2012 struct inode *inode = file_inode(filp);
2013 struct ceph_inode_info *ci = ceph_inode(inode);
2014 bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
2016 int want = 0, got = 0;
2017 int retry_op = 0, read = 0;
2020 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
2021 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
2023 if (ceph_inode_is_shutdown(inode))
2027 ceph_start_io_direct(inode);
2029 ceph_start_io_read(inode);
2031 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
2032 want |= CEPH_CAP_FILE_CACHE;
2033 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2034 want |= CEPH_CAP_FILE_LAZYIO;
2036 ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
2039 ceph_end_io_direct(inode);
2041 ceph_end_io_read(inode);
2045 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
2046 (iocb->ki_flags & IOCB_DIRECT) ||
2047 (fi->flags & CEPH_F_SYNC)) {
2049 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
2050 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
2051 ceph_cap_string(got));
2053 if (!ceph_has_inline_data(ci)) {
2055 (iocb->ki_flags & IOCB_DIRECT) &&
2056 !IS_ENCRYPTED(inode)) {
2057 ret = ceph_direct_read_write(iocb, to,
2059 if (ret >= 0 && ret < len)
2060 retry_op = CHECK_EOF;
2062 ret = ceph_sync_read(iocb, to, &retry_op);
2065 retry_op = READ_INLINE;
2068 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
2069 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
2070 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
2071 ceph_cap_string(got));
2072 ceph_add_rw_context(fi, &rw_ctx);
2073 ret = generic_file_read_iter(iocb, to);
2074 ceph_del_rw_context(fi, &rw_ctx);
2077 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
2078 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
2079 ceph_put_cap_refs(ci, got);
2082 ceph_end_io_direct(inode);
2084 ceph_end_io_read(inode);
2086 if (retry_op > HAVE_RETRIED && ret >= 0) {
2088 struct page *page = NULL;
2090 if (retry_op == READ_INLINE) {
2091 page = __page_cache_alloc(GFP_KERNEL);
2096 statret = __ceph_do_getattr(inode, page,
2097 CEPH_STAT_CAP_INLINE_DATA, !!page);
2101 if (statret == -ENODATA) {
2102 BUG_ON(retry_op != READ_INLINE);
2108 i_size = i_size_read(inode);
2109 if (retry_op == READ_INLINE) {
2110 BUG_ON(ret > 0 || read > 0);
2111 if (iocb->ki_pos < i_size &&
2112 iocb->ki_pos < PAGE_SIZE) {
2113 loff_t end = min_t(loff_t, i_size,
2114 iocb->ki_pos + len);
2115 end = min_t(loff_t, end, PAGE_SIZE);
2117 zero_user_segment(page, statret, end);
2118 ret = copy_page_to_iter(page,
2119 iocb->ki_pos & ~PAGE_MASK,
2120 end - iocb->ki_pos, to);
2121 iocb->ki_pos += ret;
2124 if (iocb->ki_pos < i_size && read < len) {
2125 size_t zlen = min_t(size_t, len - read,
2126 i_size - iocb->ki_pos);
2127 ret = iov_iter_zero(zlen, to);
2128 iocb->ki_pos += ret;
2131 __free_pages(page, 0);
2135 /* hit EOF or hole? */
2136 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
2138 dout("sync_read hit hole, ppos %lld < size %lld"
2139 ", reading more\n", iocb->ki_pos, i_size);
2143 retry_op = HAVE_RETRIED;
2155 * Wrap filemap_splice_read with checks for cap bits on the inode.
2156 * Atomically grab references, so that those bits are not released
2157 * back to the MDS mid-read.
2159 static ssize_t ceph_splice_read(struct file *in, loff_t *ppos,
2160 struct pipe_inode_info *pipe,
2161 size_t len, unsigned int flags)
2163 struct ceph_file_info *fi = in->private_data;
2164 struct inode *inode = file_inode(in);
2165 struct ceph_inode_info *ci = ceph_inode(inode);
2167 int want = 0, got = 0;
2168 CEPH_DEFINE_RW_CONTEXT(rw_ctx, 0);
2170 dout("splice_read %p %llx.%llx %llu~%zu trying to get caps on %p\n",
2171 inode, ceph_vinop(inode), *ppos, len, inode);
2173 if (ceph_inode_is_shutdown(inode))
2176 if (ceph_has_inline_data(ci) ||
2177 (fi->flags & CEPH_F_SYNC))
2178 return copy_splice_read(in, ppos, pipe, len, flags);
2180 ceph_start_io_read(inode);
2182 want = CEPH_CAP_FILE_CACHE;
2183 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2184 want |= CEPH_CAP_FILE_LAZYIO;
2186 ret = ceph_get_caps(in, CEPH_CAP_FILE_RD, want, -1, &got);
2190 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) == 0) {
2191 dout("splice_read/sync %p %llx.%llx %llu~%zu got cap refs on %s\n",
2192 inode, ceph_vinop(inode), *ppos, len,
2193 ceph_cap_string(got));
2195 ceph_put_cap_refs(ci, got);
2196 ceph_end_io_read(inode);
2197 return copy_splice_read(in, ppos, pipe, len, flags);
2200 dout("splice_read %p %llx.%llx %llu~%zu got cap refs on %s\n",
2201 inode, ceph_vinop(inode), *ppos, len, ceph_cap_string(got));
2204 ceph_add_rw_context(fi, &rw_ctx);
2205 ret = filemap_splice_read(in, ppos, pipe, len, flags);
2206 ceph_del_rw_context(fi, &rw_ctx);
2208 dout("splice_read %p %llx.%llx dropping cap refs on %s = %zd\n",
2209 inode, ceph_vinop(inode), ceph_cap_string(got), ret);
2211 ceph_put_cap_refs(ci, got);
2213 ceph_end_io_read(inode);
2218 * Take cap references to avoid releasing caps to MDS mid-write.
2220 * If we are synchronous, and write with an old snap context, the OSD
2221 * may return EOLDSNAPC. In that case, retry the write.. _after_
2222 * dropping our cap refs and allowing the pending snap to logically
2223 * complete _before_ this write occurs.
2225 * If we are near ENOSPC, write synchronously.
2227 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
2229 struct file *file = iocb->ki_filp;
2230 struct ceph_file_info *fi = file->private_data;
2231 struct inode *inode = file_inode(file);
2232 struct ceph_inode_info *ci = ceph_inode(inode);
2233 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2234 struct ceph_osd_client *osdc = &fsc->client->osdc;
2235 struct ceph_cap_flush *prealloc_cf;
2236 ssize_t count, written = 0;
2237 int err, want = 0, got;
2238 bool direct_lock = false;
2242 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
2244 if (ceph_inode_is_shutdown(inode))
2247 if (ceph_snap(inode) != CEPH_NOSNAP)
2250 prealloc_cf = ceph_alloc_cap_flush();
2254 if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
2259 ceph_start_io_direct(inode);
2261 ceph_start_io_write(inode);
2263 if (iocb->ki_flags & IOCB_APPEND) {
2264 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
2269 err = generic_write_checks(iocb, from);
2274 if (unlikely(pos >= limit)) {
2278 iov_iter_truncate(from, limit - pos);
2281 count = iov_iter_count(from);
2282 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
2287 down_read(&osdc->lock);
2288 map_flags = osdc->osdmap->flags;
2289 pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
2290 up_read(&osdc->lock);
2291 if ((map_flags & CEPH_OSDMAP_FULL) ||
2292 (pool_flags & CEPH_POOL_FLAG_FULL)) {
2297 err = file_remove_privs(file);
2301 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
2302 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
2303 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
2304 want |= CEPH_CAP_FILE_BUFFER;
2305 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2306 want |= CEPH_CAP_FILE_LAZYIO;
2308 err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
2312 err = file_update_time(file);
2316 inode_inc_iversion_raw(inode);
2318 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
2319 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
2321 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
2322 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
2323 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
2324 struct ceph_snap_context *snapc;
2325 struct iov_iter data;
2327 spin_lock(&ci->i_ceph_lock);
2328 if (__ceph_have_pending_cap_snap(ci)) {
2329 struct ceph_cap_snap *capsnap =
2330 list_last_entry(&ci->i_cap_snaps,
2331 struct ceph_cap_snap,
2333 snapc = ceph_get_snap_context(capsnap->context);
2335 BUG_ON(!ci->i_head_snapc);
2336 snapc = ceph_get_snap_context(ci->i_head_snapc);
2338 spin_unlock(&ci->i_ceph_lock);
2340 /* we might need to revert back to that point */
2342 if ((iocb->ki_flags & IOCB_DIRECT) && !IS_ENCRYPTED(inode))
2343 written = ceph_direct_read_write(iocb, &data, snapc,
2346 written = ceph_sync_write(iocb, &data, pos, snapc);
2348 ceph_end_io_direct(inode);
2350 ceph_end_io_write(inode);
2352 iov_iter_advance(from, written);
2353 ceph_put_snap_context(snapc);
2356 * No need to acquire the i_truncate_mutex. Because
2357 * the MDS revokes Fwb caps before sending truncate
2358 * message to us. We can't get Fwb cap while there
2359 * are pending vmtruncate. So write and vmtruncate
2360 * can not run at the same time
2362 written = generic_perform_write(iocb, from);
2363 ceph_end_io_write(inode);
2369 spin_lock(&ci->i_ceph_lock);
2370 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2372 spin_unlock(&ci->i_ceph_lock);
2374 __mark_inode_dirty(inode, dirty);
2375 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
2376 ceph_check_caps(ci, CHECK_CAPS_FLUSH);
2379 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
2380 inode, ceph_vinop(inode), pos, (unsigned)count,
2381 ceph_cap_string(got));
2382 ceph_put_cap_refs(ci, got);
2384 if (written == -EOLDSNAPC) {
2385 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
2386 inode, ceph_vinop(inode), pos, (unsigned)count);
2391 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
2392 (pool_flags & CEPH_POOL_FLAG_NEARFULL))
2393 iocb->ki_flags |= IOCB_DSYNC;
2394 written = generic_write_sync(iocb, written);
2399 ceph_put_cap_refs(ci, got);
2402 ceph_end_io_direct(inode);
2404 ceph_end_io_write(inode);
2406 ceph_free_cap_flush(prealloc_cf);
2407 return written ? written : err;
2411 * llseek. be sure to verify file size on SEEK_END.
2413 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
2415 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
2416 struct inode *inode = file_inode(file);
2419 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
2423 return generic_file_llseek(file, offset, whence);
2426 static inline void ceph_zero_partial_page(
2427 struct inode *inode, loff_t offset, unsigned size)
2430 pgoff_t index = offset >> PAGE_SHIFT;
2432 page = find_lock_page(inode->i_mapping, index);
2434 wait_on_page_writeback(page);
2435 zero_user(page, offset & (PAGE_SIZE - 1), size);
2441 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
2444 loff_t nearly = round_up(offset, PAGE_SIZE);
2445 if (offset < nearly) {
2446 loff_t size = nearly - offset;
2449 ceph_zero_partial_page(inode, offset, size);
2453 if (length >= PAGE_SIZE) {
2454 loff_t size = round_down(length, PAGE_SIZE);
2455 truncate_pagecache_range(inode, offset, offset + size - 1);
2460 ceph_zero_partial_page(inode, offset, length);
2463 static int ceph_zero_partial_object(struct inode *inode,
2464 loff_t offset, loff_t *length)
2466 struct ceph_inode_info *ci = ceph_inode(inode);
2467 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2468 struct ceph_osd_request *req;
2473 if (ceph_inode_is_shutdown(inode))
2477 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2480 op = CEPH_OSD_OP_ZERO;
2483 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2487 CEPH_OSD_FLAG_WRITE,
2494 req->r_mtime = inode->i_mtime;
2495 ceph_osdc_start_request(&fsc->client->osdc, req);
2496 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2499 ceph_osdc_put_request(req);
2505 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2508 struct ceph_inode_info *ci = ceph_inode(inode);
2509 s32 stripe_unit = ci->i_layout.stripe_unit;
2510 s32 stripe_count = ci->i_layout.stripe_count;
2511 s32 object_size = ci->i_layout.object_size;
2512 u64 object_set_size = object_size * stripe_count;
2515 /* round offset up to next period boundary */
2516 nearly = offset + object_set_size - 1;
2518 nearly -= do_div(t, object_set_size);
2520 while (length && offset < nearly) {
2521 loff_t size = length;
2522 ret = ceph_zero_partial_object(inode, offset, &size);
2528 while (length >= object_set_size) {
2530 loff_t pos = offset;
2531 for (i = 0; i < stripe_count; ++i) {
2532 ret = ceph_zero_partial_object(inode, pos, NULL);
2537 offset += object_set_size;
2538 length -= object_set_size;
2541 loff_t size = length;
2542 ret = ceph_zero_partial_object(inode, offset, &size);
2551 static long ceph_fallocate(struct file *file, int mode,
2552 loff_t offset, loff_t length)
2554 struct ceph_file_info *fi = file->private_data;
2555 struct inode *inode = file_inode(file);
2556 struct ceph_inode_info *ci = ceph_inode(inode);
2557 struct ceph_cap_flush *prealloc_cf;
2564 dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__,
2565 inode, ceph_vinop(inode), mode, offset, length);
2567 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2570 if (!S_ISREG(inode->i_mode))
2573 if (IS_ENCRYPTED(inode))
2576 prealloc_cf = ceph_alloc_cap_flush();
2582 if (ceph_snap(inode) != CEPH_NOSNAP) {
2587 size = i_size_read(inode);
2589 /* Are we punching a hole beyond EOF? */
2592 if ((offset + length) > size)
2593 length = size - offset;
2595 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2596 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2598 want = CEPH_CAP_FILE_BUFFER;
2600 ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2604 ret = file_modified(file);
2608 filemap_invalidate_lock(inode->i_mapping);
2609 ceph_fscache_invalidate(inode, false);
2610 ceph_zero_pagecache_range(inode, offset, length);
2611 ret = ceph_zero_objects(inode, offset, length);
2614 spin_lock(&ci->i_ceph_lock);
2615 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2617 spin_unlock(&ci->i_ceph_lock);
2619 __mark_inode_dirty(inode, dirty);
2621 filemap_invalidate_unlock(inode->i_mapping);
2624 ceph_put_cap_refs(ci, got);
2626 inode_unlock(inode);
2627 ceph_free_cap_flush(prealloc_cf);
2632 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2633 * src_ci. Two attempts are made to obtain both caps, and an error is return if
2634 * this fails; zero is returned on success.
2636 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2637 struct file *dst_filp,
2638 loff_t dst_endoff, int *dst_got)
2641 bool retrying = false;
2644 ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2645 dst_endoff, dst_got);
2650 * Since we're already holding the FILE_WR capability for the dst file,
2651 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
2652 * retry dance instead to try to get both capabilities.
2654 ret = ceph_try_get_caps(file_inode(src_filp),
2655 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2658 /* Start by dropping dst_ci caps and getting src_ci caps */
2659 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2662 /* ceph_try_get_caps masks EAGAIN */
2666 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2667 CEPH_CAP_FILE_SHARED, -1, src_got);
2670 /*... drop src_ci caps too, and retry */
2671 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2678 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2679 struct ceph_inode_info *dst_ci, int dst_got)
2681 ceph_put_cap_refs(src_ci, src_got);
2682 ceph_put_cap_refs(dst_ci, dst_got);
2686 * This function does several size-related checks, returning an error if:
2687 * - source file is smaller than off+len
2688 * - destination file size is not OK (inode_newsize_ok())
2689 * - max bytes quotas is exceeded
2691 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2692 loff_t src_off, loff_t dst_off, size_t len)
2694 loff_t size, endoff;
2696 size = i_size_read(src_inode);
2698 * Don't copy beyond source file EOF. Instead of simply setting length
2699 * to (size - src_off), just drop to VFS default implementation, as the
2700 * local i_size may be stale due to other clients writing to the source
2703 if (src_off + len > size) {
2704 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2705 src_off, len, size);
2708 size = i_size_read(dst_inode);
2710 endoff = dst_off + len;
2711 if (inode_newsize_ok(dst_inode, endoff))
2714 if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2720 static struct ceph_osd_request *
2721 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2723 struct ceph_object_id *src_oid,
2724 struct ceph_object_locator *src_oloc,
2725 struct ceph_object_id *dst_oid,
2726 struct ceph_object_locator *dst_oloc,
2727 u32 truncate_seq, u64 truncate_size)
2729 struct ceph_osd_request *req;
2731 u32 src_fadvise_flags =
2732 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2733 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2734 u32 dst_fadvise_flags =
2735 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2736 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2738 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2740 return ERR_PTR(-ENOMEM);
2742 req->r_flags = CEPH_OSD_FLAG_WRITE;
2744 ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2745 ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2747 ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2753 CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2757 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2764 ceph_osdc_put_request(req);
2765 return ERR_PTR(ret);
2768 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2769 struct ceph_inode_info *dst_ci, u64 *dst_off,
2770 struct ceph_fs_client *fsc,
2771 size_t len, unsigned int flags)
2773 struct ceph_object_locator src_oloc, dst_oloc;
2774 struct ceph_object_id src_oid, dst_oid;
2775 struct ceph_osd_client *osdc;
2776 struct ceph_osd_request *req;
2778 u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2779 u32 src_objlen, dst_objlen;
2780 u32 object_size = src_ci->i_layout.object_size;
2783 src_oloc.pool = src_ci->i_layout.pool_id;
2784 src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2785 dst_oloc.pool = dst_ci->i_layout.pool_id;
2786 dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2787 osdc = &fsc->client->osdc;
2789 while (len >= object_size) {
2790 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2791 object_size, &src_objnum,
2792 &src_objoff, &src_objlen);
2793 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2794 object_size, &dst_objnum,
2795 &dst_objoff, &dst_objlen);
2796 ceph_oid_init(&src_oid);
2797 ceph_oid_printf(&src_oid, "%llx.%08llx",
2798 src_ci->i_vino.ino, src_objnum);
2799 ceph_oid_init(&dst_oid);
2800 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2801 dst_ci->i_vino.ino, dst_objnum);
2802 /* Do an object remote copy */
2803 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2804 &src_oid, &src_oloc,
2805 &dst_oid, &dst_oloc,
2806 dst_ci->i_truncate_seq,
2807 dst_ci->i_truncate_size);
2811 ceph_osdc_start_request(osdc, req);
2812 ret = ceph_osdc_wait_request(osdc, req);
2813 ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2814 req->r_start_latency,
2817 ceph_osdc_put_request(req);
2820 if (ret == -EOPNOTSUPP) {
2821 fsc->have_copy_from2 = false;
2822 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2824 dout("ceph_osdc_copy_from returned %d\n", ret);
2830 bytes += object_size;
2831 *src_off += object_size;
2832 *dst_off += object_size;
2836 ceph_oloc_destroy(&src_oloc);
2837 ceph_oloc_destroy(&dst_oloc);
2841 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2842 struct file *dst_file, loff_t dst_off,
2843 size_t len, unsigned int flags)
2845 struct inode *src_inode = file_inode(src_file);
2846 struct inode *dst_inode = file_inode(dst_file);
2847 struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2848 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2849 struct ceph_cap_flush *prealloc_cf;
2850 struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2852 ssize_t ret = -EIO, bytes;
2853 u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2854 u32 src_objlen, dst_objlen;
2855 int src_got = 0, dst_got = 0, err, dirty;
2857 if (src_inode->i_sb != dst_inode->i_sb) {
2858 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2860 if (ceph_fsid_compare(&src_fsc->client->fsid,
2861 &dst_fsc->client->fsid)) {
2862 dout("Copying files across clusters: src: %pU dst: %pU\n",
2863 &src_fsc->client->fsid, &dst_fsc->client->fsid);
2867 if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2871 * Some of the checks below will return -EOPNOTSUPP, which will force a
2872 * fallback to the default VFS copy_file_range implementation. This is
2873 * desirable in several cases (for ex, the 'len' is smaller than the
2874 * size of the objects, or in cases where that would be more
2878 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2881 if (!src_fsc->have_copy_from2)
2885 * Striped file layouts require that we copy partial objects, but the
2886 * OSD copy-from operation only supports full-object copies. Limit
2887 * this to non-striped file layouts for now.
2889 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2890 (src_ci->i_layout.stripe_count != 1) ||
2891 (dst_ci->i_layout.stripe_count != 1) ||
2892 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2893 dout("Invalid src/dst files layout\n");
2897 /* Every encrypted inode gets its own key, so we can't offload them */
2898 if (IS_ENCRYPTED(src_inode) || IS_ENCRYPTED(dst_inode))
2901 if (len < src_ci->i_layout.object_size)
2902 return -EOPNOTSUPP; /* no remote copy will be done */
2904 prealloc_cf = ceph_alloc_cap_flush();
2908 /* Start by sync'ing the source and destination files */
2909 ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2911 dout("failed to write src file (%zd)\n", ret);
2914 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2916 dout("failed to write dst file (%zd)\n", ret);
2921 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2922 * clients may have dirty data in their caches. And OSDs know nothing
2923 * about caps, so they can't safely do the remote object copies.
2925 err = get_rd_wr_caps(src_file, &src_got,
2926 dst_file, (dst_off + len), &dst_got);
2928 dout("get_rd_wr_caps returned %d\n", err);
2933 ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2937 /* Drop dst file cached pages */
2938 ceph_fscache_invalidate(dst_inode, false);
2939 ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2940 dst_off >> PAGE_SHIFT,
2941 (dst_off + len) >> PAGE_SHIFT);
2943 dout("Failed to invalidate inode pages (%zd)\n", ret);
2946 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2947 src_ci->i_layout.object_size,
2948 &src_objnum, &src_objoff, &src_objlen);
2949 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2950 dst_ci->i_layout.object_size,
2951 &dst_objnum, &dst_objoff, &dst_objlen);
2952 /* object-level offsets need to the same */
2953 if (src_objoff != dst_objoff) {
2959 * Do a manual copy if the object offset isn't object aligned.
2960 * 'src_objlen' contains the bytes left until the end of the object,
2961 * starting at the src_off
2964 dout("Initial partial copy of %u bytes\n", src_objlen);
2967 * we need to temporarily drop all caps as we'll be calling
2968 * {read,write}_iter, which will get caps again.
2970 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2971 ret = do_splice_direct(src_file, &src_off, dst_file,
2972 &dst_off, src_objlen, flags);
2973 /* Abort on short copies or on error */
2974 if (ret < src_objlen) {
2975 dout("Failed partial copy (%zd)\n", ret);
2979 err = get_rd_wr_caps(src_file, &src_got,
2980 dst_file, (dst_off + len), &dst_got);
2983 err = is_file_size_ok(src_inode, dst_inode,
2984 src_off, dst_off, len);
2989 size = i_size_read(dst_inode);
2990 bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2991 src_fsc, len, flags);
2997 dout("Copied %zu bytes out of %zu\n", bytes, len);
3001 file_update_time(dst_file);
3002 inode_inc_iversion_raw(dst_inode);
3004 if (dst_off > size) {
3005 /* Let the MDS know about dst file size change */
3006 if (ceph_inode_set_size(dst_inode, dst_off) ||
3007 ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
3008 ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH);
3011 spin_lock(&dst_ci->i_ceph_lock);
3012 dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
3013 spin_unlock(&dst_ci->i_ceph_lock);
3015 __mark_inode_dirty(dst_inode, dirty);
3018 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
3021 * Do the final manual copy if we still have some bytes left, unless
3022 * there were errors in remote object copies (len >= object_size).
3024 if (len && (len < src_ci->i_layout.object_size)) {
3025 dout("Final partial copy of %zu bytes\n", len);
3026 bytes = do_splice_direct(src_file, &src_off, dst_file,
3027 &dst_off, len, flags);
3031 dout("Failed partial copy (%zd)\n", bytes);
3035 ceph_free_cap_flush(prealloc_cf);
3040 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
3041 struct file *dst_file, loff_t dst_off,
3042 size_t len, unsigned int flags)
3046 ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
3049 if (ret == -EOPNOTSUPP || ret == -EXDEV)
3050 ret = generic_copy_file_range(src_file, src_off, dst_file,
3051 dst_off, len, flags);
3055 const struct file_operations ceph_file_fops = {
3057 .release = ceph_release,
3058 .llseek = ceph_llseek,
3059 .read_iter = ceph_read_iter,
3060 .write_iter = ceph_write_iter,
3062 .fsync = ceph_fsync,
3064 .setlease = simple_nosetlease,
3065 .flock = ceph_flock,
3066 .splice_read = ceph_splice_read,
3067 .splice_write = iter_file_splice_write,
3068 .unlocked_ioctl = ceph_ioctl,
3069 .compat_ioctl = compat_ptr_ioctl,
3070 .fallocate = ceph_fallocate,
3071 .copy_file_range = ceph_copy_file_range,