1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/uaccess.h>
8 #include <linux/kernel.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
14 #include "mds_client.h"
15 #include <linux/ceph/decode.h>
18 * Ceph inode operations
20 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
21 * setattr, etc.), xattr helpers, and helpers for assimilating
22 * metadata returned by the MDS into our cache.
24 * Also define helpers for doing asynchronous writeback, invalidation,
25 * and truncation for the benefit of those who can't afford to block
26 * (typically because they are in the message handler path).
29 static const struct inode_operations ceph_symlink_iops;
31 static void ceph_invalidate_work(struct work_struct *work);
32 static void ceph_writeback_work(struct work_struct *work);
33 static void ceph_vmtruncate_work(struct work_struct *work);
36 * find or create an inode, given the ceph ino number
38 static int ceph_set_ino_cb(struct inode *inode, void *data)
40 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
41 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
45 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
48 ino_t t = ceph_vino_to_ino(vino);
50 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
52 return ERR_PTR(-ENOMEM);
53 if (inode->i_state & I_NEW) {
54 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
55 inode, ceph_vinop(inode), (u64)inode->i_ino);
56 unlock_new_inode(inode);
59 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
65 * get/constuct snapdir inode for a given directory
67 struct inode *ceph_get_snapdir(struct inode *parent)
69 struct ceph_vino vino = {
70 .ino = ceph_ino(parent),
73 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
74 struct ceph_inode_info *ci = ceph_inode(inode);
76 BUG_ON(!S_ISDIR(parent->i_mode));
79 inode->i_mode = parent->i_mode;
80 inode->i_uid = parent->i_uid;
81 inode->i_gid = parent->i_gid;
82 inode->i_op = &ceph_dir_iops;
83 inode->i_fop = &ceph_dir_fops;
84 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
89 const struct inode_operations ceph_file_iops = {
90 .permission = ceph_permission,
91 .setattr = ceph_setattr,
92 .getattr = ceph_getattr,
93 .setxattr = ceph_setxattr,
94 .getxattr = ceph_getxattr,
95 .listxattr = ceph_listxattr,
96 .removexattr = ceph_removexattr,
101 * We use a 'frag tree' to keep track of the MDS's directory fragments
102 * for a given inode (usually there is just a single fragment). We
103 * need to know when a child frag is delegated to a new MDS, or when
104 * it is flagged as replicated, so we can direct our requests
109 * find/create a frag in the tree
111 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
115 struct rb_node *parent = NULL;
116 struct ceph_inode_frag *frag;
119 p = &ci->i_fragtree.rb_node;
122 frag = rb_entry(parent, struct ceph_inode_frag, node);
123 c = ceph_frag_compare(f, frag->frag);
132 frag = kmalloc(sizeof(*frag), GFP_NOFS);
134 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
135 "frag %x\n", &ci->vfs_inode,
136 ceph_vinop(&ci->vfs_inode), f);
137 return ERR_PTR(-ENOMEM);
144 rb_link_node(&frag->node, parent, p);
145 rb_insert_color(&frag->node, &ci->i_fragtree);
147 dout("get_or_create_frag added %llx.%llx frag %x\n",
148 ceph_vinop(&ci->vfs_inode), f);
153 * find a specific frag @f
155 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
157 struct rb_node *n = ci->i_fragtree.rb_node;
160 struct ceph_inode_frag *frag =
161 rb_entry(n, struct ceph_inode_frag, node);
162 int c = ceph_frag_compare(f, frag->frag);
174 * Choose frag containing the given value @v. If @pfrag is
175 * specified, copy the frag delegation info to the caller if
178 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
179 struct ceph_inode_frag *pfrag,
182 u32 t = ceph_frag_make(0, 0);
183 struct ceph_inode_frag *frag;
190 mutex_lock(&ci->i_fragtree_mutex);
192 WARN_ON(!ceph_frag_contains_value(t, v));
193 frag = __ceph_find_frag(ci, t);
195 break; /* t is a leaf */
196 if (frag->split_by == 0) {
198 memcpy(pfrag, frag, sizeof(*pfrag));
205 nway = 1 << frag->split_by;
206 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
207 frag->split_by, nway);
208 for (i = 0; i < nway; i++) {
209 n = ceph_frag_make_child(t, frag->split_by, i);
210 if (ceph_frag_contains_value(n, v)) {
217 dout("choose_frag(%x) = %x\n", v, t);
219 mutex_unlock(&ci->i_fragtree_mutex);
224 * Process dirfrag (delegation) info from the mds. Include leaf
225 * fragment in tree ONLY if ndist > 0. Otherwise, only
226 * branches/splits are included in i_fragtree)
228 static int ceph_fill_dirfrag(struct inode *inode,
229 struct ceph_mds_reply_dirfrag *dirinfo)
231 struct ceph_inode_info *ci = ceph_inode(inode);
232 struct ceph_inode_frag *frag;
233 u32 id = le32_to_cpu(dirinfo->frag);
234 int mds = le32_to_cpu(dirinfo->auth);
235 int ndist = le32_to_cpu(dirinfo->ndist);
239 mutex_lock(&ci->i_fragtree_mutex);
241 /* no delegation info needed. */
242 frag = __ceph_find_frag(ci, id);
245 if (frag->split_by == 0) {
246 /* tree leaf, remove */
247 dout("fill_dirfrag removed %llx.%llx frag %x"
248 " (no ref)\n", ceph_vinop(inode), id);
249 rb_erase(&frag->node, &ci->i_fragtree);
252 /* tree branch, keep and clear */
253 dout("fill_dirfrag cleared %llx.%llx frag %x"
254 " referral\n", ceph_vinop(inode), id);
262 /* find/add this frag to store mds delegation info */
263 frag = __get_or_create_frag(ci, id);
265 /* this is not the end of the world; we can continue
266 with bad/inaccurate delegation info */
267 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
268 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
274 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
275 for (i = 0; i < frag->ndist; i++)
276 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
277 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
278 ceph_vinop(inode), frag->frag, frag->ndist);
281 mutex_unlock(&ci->i_fragtree_mutex);
287 * initialize a newly allocated inode.
289 struct inode *ceph_alloc_inode(struct super_block *sb)
291 struct ceph_inode_info *ci;
294 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
298 dout("alloc_inode %p\n", &ci->vfs_inode);
300 spin_lock_init(&ci->i_ceph_lock);
303 ci->i_time_warp_seq = 0;
304 ci->i_ceph_flags = 0;
305 ci->i_release_count = 0;
306 ci->i_symlink = NULL;
308 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
310 ci->i_fragtree = RB_ROOT;
311 mutex_init(&ci->i_fragtree_mutex);
313 ci->i_xattrs.blob = NULL;
314 ci->i_xattrs.prealloc_blob = NULL;
315 ci->i_xattrs.dirty = false;
316 ci->i_xattrs.index = RB_ROOT;
317 ci->i_xattrs.count = 0;
318 ci->i_xattrs.names_size = 0;
319 ci->i_xattrs.vals_size = 0;
320 ci->i_xattrs.version = 0;
321 ci->i_xattrs.index_version = 0;
323 ci->i_caps = RB_ROOT;
324 ci->i_auth_cap = NULL;
325 ci->i_dirty_caps = 0;
326 ci->i_flushing_caps = 0;
327 INIT_LIST_HEAD(&ci->i_dirty_item);
328 INIT_LIST_HEAD(&ci->i_flushing_item);
329 ci->i_cap_flush_seq = 0;
330 ci->i_cap_flush_last_tid = 0;
331 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
332 init_waitqueue_head(&ci->i_cap_wq);
333 ci->i_hold_caps_min = 0;
334 ci->i_hold_caps_max = 0;
335 INIT_LIST_HEAD(&ci->i_cap_delay_list);
336 ci->i_cap_exporting_mds = 0;
337 ci->i_cap_exporting_mseq = 0;
338 ci->i_cap_exporting_issued = 0;
339 INIT_LIST_HEAD(&ci->i_cap_snaps);
340 ci->i_head_snapc = NULL;
343 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
344 ci->i_nr_by_mode[i] = 0;
346 ci->i_truncate_seq = 0;
347 ci->i_truncate_size = 0;
348 ci->i_truncate_pending = 0;
351 ci->i_reported_size = 0;
352 ci->i_wanted_max_size = 0;
353 ci->i_requested_max_size = 0;
357 ci->i_rdcache_ref = 0;
360 ci->i_wrbuffer_ref = 0;
361 ci->i_wrbuffer_ref_head = 0;
362 ci->i_shared_gen = 0;
363 ci->i_rdcache_gen = 0;
364 ci->i_rdcache_revoking = 0;
366 INIT_LIST_HEAD(&ci->i_unsafe_writes);
367 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
368 spin_lock_init(&ci->i_unsafe_lock);
370 ci->i_snap_realm = NULL;
371 INIT_LIST_HEAD(&ci->i_snap_realm_item);
372 INIT_LIST_HEAD(&ci->i_snap_flush_item);
374 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
375 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
377 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
379 return &ci->vfs_inode;
382 static void ceph_i_callback(struct rcu_head *head)
384 struct inode *inode = container_of(head, struct inode, i_rcu);
385 struct ceph_inode_info *ci = ceph_inode(inode);
387 kmem_cache_free(ceph_inode_cachep, ci);
390 void ceph_destroy_inode(struct inode *inode)
392 struct ceph_inode_info *ci = ceph_inode(inode);
393 struct ceph_inode_frag *frag;
396 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
398 ceph_queue_caps_release(inode);
401 * we may still have a snap_realm reference if there are stray
402 * caps in i_cap_exporting_issued or i_snap_caps.
404 if (ci->i_snap_realm) {
405 struct ceph_mds_client *mdsc =
406 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
407 struct ceph_snap_realm *realm = ci->i_snap_realm;
409 dout(" dropping residual ref to snap realm %p\n", realm);
410 spin_lock(&realm->inodes_with_caps_lock);
411 list_del_init(&ci->i_snap_realm_item);
412 spin_unlock(&realm->inodes_with_caps_lock);
413 ceph_put_snap_realm(mdsc, realm);
416 kfree(ci->i_symlink);
417 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
418 frag = rb_entry(n, struct ceph_inode_frag, node);
419 rb_erase(n, &ci->i_fragtree);
423 __ceph_destroy_xattrs(ci);
424 if (ci->i_xattrs.blob)
425 ceph_buffer_put(ci->i_xattrs.blob);
426 if (ci->i_xattrs.prealloc_blob)
427 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
429 call_rcu(&inode->i_rcu, ceph_i_callback);
434 * Helpers to fill in size, ctime, mtime, and atime. We have to be
435 * careful because either the client or MDS may have more up to date
436 * info, depending on which capabilities are held, and whether
437 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
438 * and size are monotonically increasing, except when utimes() or
439 * truncate() increments the corresponding _seq values.)
441 int ceph_fill_file_size(struct inode *inode, int issued,
442 u32 truncate_seq, u64 truncate_size, u64 size)
444 struct ceph_inode_info *ci = ceph_inode(inode);
447 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
448 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
449 dout("size %lld -> %llu\n", inode->i_size, size);
450 inode->i_size = size;
451 inode->i_blocks = (size + (1<<9) - 1) >> 9;
452 ci->i_reported_size = size;
453 if (truncate_seq != ci->i_truncate_seq) {
454 dout("truncate_seq %u -> %u\n",
455 ci->i_truncate_seq, truncate_seq);
456 ci->i_truncate_seq = truncate_seq;
458 * If we hold relevant caps, or in the case where we're
459 * not the only client referencing this file and we
460 * don't hold those caps, then we need to check whether
461 * the file is either opened or mmaped
463 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
464 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
466 CEPH_CAP_FILE_LAZYIO)) ||
467 mapping_mapped(inode->i_mapping) ||
468 __ceph_caps_file_wanted(ci)) {
469 ci->i_truncate_pending++;
474 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
475 ci->i_truncate_size != truncate_size) {
476 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
478 ci->i_truncate_size = truncate_size;
483 void ceph_fill_file_time(struct inode *inode, int issued,
484 u64 time_warp_seq, struct timespec *ctime,
485 struct timespec *mtime, struct timespec *atime)
487 struct ceph_inode_info *ci = ceph_inode(inode);
490 if (issued & (CEPH_CAP_FILE_EXCL|
492 CEPH_CAP_FILE_BUFFER|
494 CEPH_CAP_XATTR_EXCL)) {
495 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
496 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
497 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
498 ctime->tv_sec, ctime->tv_nsec);
499 inode->i_ctime = *ctime;
501 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
502 /* the MDS did a utimes() */
503 dout("mtime %ld.%09ld -> %ld.%09ld "
505 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
506 mtime->tv_sec, mtime->tv_nsec,
507 ci->i_time_warp_seq, (int)time_warp_seq);
509 inode->i_mtime = *mtime;
510 inode->i_atime = *atime;
511 ci->i_time_warp_seq = time_warp_seq;
512 } else if (time_warp_seq == ci->i_time_warp_seq) {
513 /* nobody did utimes(); take the max */
514 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
515 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
516 inode->i_mtime.tv_sec,
517 inode->i_mtime.tv_nsec,
518 mtime->tv_sec, mtime->tv_nsec);
519 inode->i_mtime = *mtime;
521 if (timespec_compare(atime, &inode->i_atime) > 0) {
522 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
523 inode->i_atime.tv_sec,
524 inode->i_atime.tv_nsec,
525 atime->tv_sec, atime->tv_nsec);
526 inode->i_atime = *atime;
528 } else if (issued & CEPH_CAP_FILE_EXCL) {
529 /* we did a utimes(); ignore mds values */
534 /* we have no write|excl caps; whatever the MDS says is true */
535 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
536 inode->i_ctime = *ctime;
537 inode->i_mtime = *mtime;
538 inode->i_atime = *atime;
539 ci->i_time_warp_seq = time_warp_seq;
544 if (warn) /* time_warp_seq shouldn't go backwards */
545 dout("%p mds time_warp_seq %llu < %u\n",
546 inode, time_warp_seq, ci->i_time_warp_seq);
550 * Populate an inode based on info from mds. May be called on new or
553 static int fill_inode(struct inode *inode,
554 struct ceph_mds_reply_info_in *iinfo,
555 struct ceph_mds_reply_dirfrag *dirinfo,
556 struct ceph_mds_session *session,
557 unsigned long ttl_from, int cap_fmode,
558 struct ceph_cap_reservation *caps_reservation)
560 struct ceph_mds_reply_inode *info = iinfo->in;
561 struct ceph_inode_info *ci = ceph_inode(inode);
563 int issued = 0, implemented;
564 int updating_inode = 0;
565 struct timespec mtime, atime, ctime;
567 struct ceph_buffer *xattr_blob = NULL;
571 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
572 inode, ceph_vinop(inode), le64_to_cpu(info->version),
576 * prealloc xattr data, if it looks like we'll need it. only
577 * if len > 4 (meaning there are actually xattrs; the first 4
578 * bytes are the xattr count).
580 if (iinfo->xattr_len > 4) {
581 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
583 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
587 spin_lock(&ci->i_ceph_lock);
590 * provided version will be odd if inode value is projected,
591 * even if stable. skip the update if we have newer stable
592 * info (ours>=theirs, e.g. due to racing mds replies), unless
593 * we are getting projected (unstable) info (in which case the
594 * version is odd, and we want ours>theirs).
600 if (le64_to_cpu(info->version) > 0 &&
601 (ci->i_version & ~1) >= le64_to_cpu(info->version))
605 issued = __ceph_caps_issued(ci, &implemented);
606 issued |= implemented | __ceph_caps_dirty(ci);
609 ci->i_version = le64_to_cpu(info->version);
611 inode->i_rdev = le32_to_cpu(info->rdev);
613 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
614 inode->i_mode = le32_to_cpu(info->mode);
615 inode->i_uid = le32_to_cpu(info->uid);
616 inode->i_gid = le32_to_cpu(info->gid);
617 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
618 inode->i_uid, inode->i_gid);
621 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
622 set_nlink(inode, le32_to_cpu(info->nlink));
624 /* be careful with mtime, atime, size */
625 ceph_decode_timespec(&atime, &info->atime);
626 ceph_decode_timespec(&mtime, &info->mtime);
627 ceph_decode_timespec(&ctime, &info->ctime);
628 queue_trunc = ceph_fill_file_size(inode, issued,
629 le32_to_cpu(info->truncate_seq),
630 le64_to_cpu(info->truncate_size),
631 le64_to_cpu(info->size));
632 ceph_fill_file_time(inode, issued,
633 le32_to_cpu(info->time_warp_seq),
634 &ctime, &mtime, &atime);
636 /* only update max_size on auth cap */
637 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
638 ci->i_max_size != le64_to_cpu(info->max_size)) {
639 dout("max_size %lld -> %llu\n", ci->i_max_size,
640 le64_to_cpu(info->max_size));
641 ci->i_max_size = le64_to_cpu(info->max_size);
644 ci->i_layout = info->layout;
645 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
648 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
649 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
650 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
651 if (ci->i_xattrs.blob)
652 ceph_buffer_put(ci->i_xattrs.blob);
653 ci->i_xattrs.blob = xattr_blob;
655 memcpy(ci->i_xattrs.blob->vec.iov_base,
656 iinfo->xattr_data, iinfo->xattr_len);
657 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
661 inode->i_mapping->a_ops = &ceph_aops;
662 inode->i_mapping->backing_dev_info =
663 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
665 switch (inode->i_mode & S_IFMT) {
670 init_special_inode(inode, inode->i_mode, inode->i_rdev);
671 inode->i_op = &ceph_file_iops;
674 inode->i_op = &ceph_file_iops;
675 inode->i_fop = &ceph_file_fops;
678 inode->i_op = &ceph_symlink_iops;
679 if (!ci->i_symlink) {
680 u32 symlen = iinfo->symlink_len;
683 spin_unlock(&ci->i_ceph_lock);
686 if (WARN_ON(symlen != inode->i_size))
690 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
694 spin_lock(&ci->i_ceph_lock);
698 kfree(sym); /* lost a race */
702 inode->i_op = &ceph_dir_iops;
703 inode->i_fop = &ceph_dir_fops;
705 ci->i_dir_layout = iinfo->dir_layout;
707 ci->i_files = le64_to_cpu(info->files);
708 ci->i_subdirs = le64_to_cpu(info->subdirs);
709 ci->i_rbytes = le64_to_cpu(info->rbytes);
710 ci->i_rfiles = le64_to_cpu(info->rfiles);
711 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
712 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
715 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
716 ceph_vinop(inode), inode->i_mode);
720 spin_unlock(&ci->i_ceph_lock);
722 /* queue truncate if we saw i_size decrease */
724 ceph_queue_vmtruncate(inode);
726 /* populate frag tree */
727 /* FIXME: move me up, if/when version reflects fragtree changes */
728 nsplits = le32_to_cpu(info->fragtree.nsplits);
729 mutex_lock(&ci->i_fragtree_mutex);
730 for (i = 0; i < nsplits; i++) {
731 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
732 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
736 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
737 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
739 mutex_unlock(&ci->i_fragtree_mutex);
741 /* were we issued a capability? */
742 if (info->cap.caps) {
743 if (ceph_snap(inode) == CEPH_NOSNAP) {
744 ceph_add_cap(inode, session,
745 le64_to_cpu(info->cap.cap_id),
747 le32_to_cpu(info->cap.caps),
748 le32_to_cpu(info->cap.wanted),
749 le32_to_cpu(info->cap.seq),
750 le32_to_cpu(info->cap.mseq),
751 le64_to_cpu(info->cap.realm),
755 spin_lock(&ci->i_ceph_lock);
756 dout(" %p got snap_caps %s\n", inode,
757 ceph_cap_string(le32_to_cpu(info->cap.caps)));
758 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
760 __ceph_get_fmode(ci, cap_fmode);
761 spin_unlock(&ci->i_ceph_lock);
763 } else if (cap_fmode >= 0) {
764 pr_warning("mds issued no caps on %llx.%llx\n",
766 __ceph_get_fmode(ci, cap_fmode);
769 /* set dir completion flag? */
770 if (S_ISDIR(inode->i_mode) &&
771 updating_inode && /* didn't jump to no_change */
772 ci->i_files == 0 && ci->i_subdirs == 0 &&
773 ceph_snap(inode) == CEPH_NOSNAP &&
774 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
775 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
776 !ceph_dir_test_complete(inode)) {
777 dout(" marking %p complete (empty)\n", inode);
778 ceph_dir_set_complete(inode);
779 ci->i_max_offset = 2;
782 /* update delegation info? */
784 ceph_fill_dirfrag(inode, dirinfo);
790 ceph_buffer_put(xattr_blob);
795 * caller should hold session s_mutex.
797 static void update_dentry_lease(struct dentry *dentry,
798 struct ceph_mds_reply_lease *lease,
799 struct ceph_mds_session *session,
800 unsigned long from_time)
802 struct ceph_dentry_info *di = ceph_dentry(dentry);
803 long unsigned duration = le32_to_cpu(lease->duration_ms);
804 long unsigned ttl = from_time + (duration * HZ) / 1000;
805 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
808 /* only track leases on regular dentries */
809 if (dentry->d_op != &ceph_dentry_ops)
812 spin_lock(&dentry->d_lock);
813 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
814 dentry, duration, ttl);
816 /* make lease_rdcache_gen match directory */
817 dir = dentry->d_parent->d_inode;
818 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
823 if (di->lease_gen == session->s_cap_gen &&
824 time_before(ttl, dentry->d_time))
825 goto out_unlock; /* we already have a newer lease. */
827 if (di->lease_session && di->lease_session != session)
830 ceph_dentry_lru_touch(dentry);
832 if (!di->lease_session)
833 di->lease_session = ceph_get_mds_session(session);
834 di->lease_gen = session->s_cap_gen;
835 di->lease_seq = le32_to_cpu(lease->seq);
836 di->lease_renew_after = half_ttl;
837 di->lease_renew_from = 0;
838 dentry->d_time = ttl;
840 spin_unlock(&dentry->d_lock);
845 * Set dentry's directory position based on the current dir's max, and
846 * order it in d_subdirs, so that dcache_readdir behaves.
848 * Always called under directory's i_mutex.
850 static void ceph_set_dentry_offset(struct dentry *dn)
852 struct dentry *dir = dn->d_parent;
853 struct inode *inode = dir->d_inode;
854 struct ceph_inode_info *ci;
855 struct ceph_dentry_info *di;
859 ci = ceph_inode(inode);
860 di = ceph_dentry(dn);
862 spin_lock(&ci->i_ceph_lock);
863 if (!ceph_dir_test_complete(inode)) {
864 spin_unlock(&ci->i_ceph_lock);
867 di->offset = ceph_inode(inode)->i_max_offset++;
868 spin_unlock(&ci->i_ceph_lock);
870 spin_lock(&dir->d_lock);
871 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
872 list_move(&dn->d_u.d_child, &dir->d_subdirs);
873 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
874 dn->d_u.d_child.prev, dn->d_u.d_child.next);
875 spin_unlock(&dn->d_lock);
876 spin_unlock(&dir->d_lock);
880 * splice a dentry to an inode.
881 * caller must hold directory i_mutex for this to be safe.
883 * we will only rehash the resulting dentry if @prehash is
884 * true; @prehash will be set to false (for the benefit of
885 * the caller) if we fail.
887 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
888 bool *prehash, bool set_offset)
890 struct dentry *realdn;
894 /* dn must be unhashed */
897 realdn = d_materialise_unique(dn, in);
898 if (IS_ERR(realdn)) {
899 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
900 PTR_ERR(realdn), dn, in, ceph_vinop(in));
902 *prehash = false; /* don't rehash on error */
903 dn = realdn; /* note realdn contains the error */
906 dout("dn %p (%d) spliced with %p (%d) "
907 "inode %p ino %llx.%llx\n",
909 realdn, realdn->d_count,
910 realdn->d_inode, ceph_vinop(realdn->d_inode));
914 BUG_ON(!ceph_dentry(dn));
915 dout("dn %p attached to %p ino %llx.%llx\n",
916 dn, dn->d_inode, ceph_vinop(dn->d_inode));
918 if ((!prehash || *prehash) && d_unhashed(dn))
921 ceph_set_dentry_offset(dn);
927 * Incorporate results into the local cache. This is either just
928 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
931 * A reply may contain
932 * a directory inode along with a dentry.
933 * and/or a target inode
935 * Called with snap_rwsem (read).
937 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
938 struct ceph_mds_session *session)
940 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
941 struct inode *in = NULL;
942 struct ceph_mds_reply_inode *ininfo;
943 struct ceph_vino vino;
944 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
948 dout("fill_trace %p is_dentry %d is_target %d\n", req,
949 rinfo->head->is_dentry, rinfo->head->is_target);
955 * If we resend completed ops to a recovering mds, we get no
956 * trace. Since that is very rare, pretend this is the case
957 * to ensure the 'no trace' handlers in the callers behave.
959 * Fill in inodes unconditionally to avoid breaking cap
962 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
963 pr_info("fill_trace faking empty trace on %lld %s\n",
964 req->r_tid, ceph_mds_op_name(rinfo->head->op));
965 if (rinfo->head->is_dentry) {
966 rinfo->head->is_dentry = 0;
967 err = fill_inode(req->r_locked_dir,
968 &rinfo->diri, rinfo->dirfrag,
969 session, req->r_request_started, -1);
971 if (rinfo->head->is_target) {
972 rinfo->head->is_target = 0;
973 ininfo = rinfo->targeti.in;
974 vino.ino = le64_to_cpu(ininfo->ino);
975 vino.snap = le64_to_cpu(ininfo->snapid);
976 in = ceph_get_inode(sb, vino);
977 err = fill_inode(in, &rinfo->targeti, NULL,
978 session, req->r_request_started,
985 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
986 dout("fill_trace reply is empty!\n");
987 if (rinfo->head->result == 0 && req->r_locked_dir)
988 ceph_invalidate_dir_request(req);
992 if (rinfo->head->is_dentry) {
993 struct inode *dir = req->r_locked_dir;
996 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
997 session, req->r_request_started, -1,
998 &req->r_caps_reservation);
1007 * ignore null lease/binding on snapdir ENOENT, or else we
1008 * will have trouble splicing in the virtual snapdir later
1010 if (rinfo->head->is_dentry && !req->r_aborted &&
1011 req->r_locked_dir &&
1012 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1013 fsc->mount_options->snapdir_name,
1014 req->r_dentry->d_name.len))) {
1016 * lookup link rename : null -> possibly existing inode
1017 * mknod symlink mkdir : null -> new inode
1018 * unlink : linked -> null
1020 struct inode *dir = req->r_locked_dir;
1021 struct dentry *dn = req->r_dentry;
1022 bool have_dir_cap, have_lease;
1026 BUG_ON(dn->d_parent->d_inode != dir);
1027 BUG_ON(ceph_ino(dir) !=
1028 le64_to_cpu(rinfo->diri.in->ino));
1029 BUG_ON(ceph_snap(dir) !=
1030 le64_to_cpu(rinfo->diri.in->snapid));
1032 /* do we have a lease on the whole dir? */
1034 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1035 CEPH_CAP_FILE_SHARED);
1037 /* do we have a dn lease? */
1038 have_lease = have_dir_cap ||
1039 le32_to_cpu(rinfo->dlease->duration_ms);
1041 dout("fill_trace no dentry lease or dir cap\n");
1044 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1045 dout(" src %p '%.*s' dst %p '%.*s'\n",
1047 req->r_old_dentry->d_name.len,
1048 req->r_old_dentry->d_name.name,
1049 dn, dn->d_name.len, dn->d_name.name);
1050 dout("fill_trace doing d_move %p -> %p\n",
1051 req->r_old_dentry, dn);
1053 d_move(req->r_old_dentry, dn);
1054 dout(" src %p '%.*s' dst %p '%.*s'\n",
1056 req->r_old_dentry->d_name.len,
1057 req->r_old_dentry->d_name.name,
1058 dn, dn->d_name.len, dn->d_name.name);
1060 /* ensure target dentry is invalidated, despite
1061 rehashing bug in vfs_rename_dir */
1062 ceph_invalidate_dentry_lease(dn);
1065 * d_move() puts the renamed dentry at the end of
1066 * d_subdirs. We need to assign it an appropriate
1067 * directory offset so we can behave when holding
1070 ceph_set_dentry_offset(req->r_old_dentry);
1071 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1072 ceph_dentry(req->r_old_dentry)->offset);
1074 dn = req->r_old_dentry; /* use old_dentry */
1079 if (!rinfo->head->is_target) {
1080 dout("fill_trace null dentry\n");
1082 dout("d_delete %p\n", dn);
1085 dout("d_instantiate %p NULL\n", dn);
1086 d_instantiate(dn, NULL);
1087 if (have_lease && d_unhashed(dn))
1089 update_dentry_lease(dn, rinfo->dlease,
1091 req->r_request_started);
1096 /* attach proper inode */
1097 ininfo = rinfo->targeti.in;
1098 vino.ino = le64_to_cpu(ininfo->ino);
1099 vino.snap = le64_to_cpu(ininfo->snapid);
1102 in = ceph_get_inode(sb, vino);
1104 pr_err("fill_trace bad get_inode "
1105 "%llx.%llx\n", vino.ino, vino.snap);
1110 dn = splice_dentry(dn, in, &have_lease, true);
1115 req->r_dentry = dn; /* may have spliced */
1117 } else if (ceph_ino(in) == vino.ino &&
1118 ceph_snap(in) == vino.snap) {
1121 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1122 dn, in, ceph_ino(in), ceph_snap(in),
1123 vino.ino, vino.snap);
1129 update_dentry_lease(dn, rinfo->dlease, session,
1130 req->r_request_started);
1131 dout(" final dn %p\n", dn);
1133 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1134 req->r_op == CEPH_MDS_OP_MKSNAP) {
1135 struct dentry *dn = req->r_dentry;
1137 /* fill out a snapdir LOOKUPSNAP dentry */
1139 BUG_ON(!req->r_locked_dir);
1140 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1141 ininfo = rinfo->targeti.in;
1142 vino.ino = le64_to_cpu(ininfo->ino);
1143 vino.snap = le64_to_cpu(ininfo->snapid);
1144 in = ceph_get_inode(sb, vino);
1146 pr_err("fill_inode get_inode badness %llx.%llx\n",
1147 vino.ino, vino.snap);
1152 dout(" linking snapped dir %p to dn %p\n", in, dn);
1153 dn = splice_dentry(dn, in, NULL, true);
1158 req->r_dentry = dn; /* may have spliced */
1160 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1163 if (rinfo->head->is_target) {
1164 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1165 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1167 if (in == NULL || ceph_ino(in) != vino.ino ||
1168 ceph_snap(in) != vino.snap) {
1169 in = ceph_get_inode(sb, vino);
1175 req->r_target_inode = in;
1177 err = fill_inode(in,
1178 &rinfo->targeti, NULL,
1179 session, req->r_request_started,
1180 (le32_to_cpu(rinfo->head->result) == 0) ?
1182 &req->r_caps_reservation);
1184 pr_err("fill_inode badness %p %llx.%llx\n",
1185 in, ceph_vinop(in));
1191 dout("fill_trace done err=%d\n", err);
1196 * Prepopulate our cache with readdir results, leases, etc.
1198 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1199 struct ceph_mds_session *session)
1201 struct dentry *parent = req->r_dentry;
1202 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1207 struct inode *snapdir = NULL;
1208 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1209 u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1210 struct ceph_dentry_info *di;
1212 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1213 snapdir = ceph_get_snapdir(parent->d_inode);
1214 parent = d_find_alias(snapdir);
1215 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1216 rinfo->dir_nr, parent);
1218 dout("readdir_prepopulate %d items under dn %p\n",
1219 rinfo->dir_nr, parent);
1221 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1224 for (i = 0; i < rinfo->dir_nr; i++) {
1225 struct ceph_vino vino;
1227 dname.name = rinfo->dir_dname[i];
1228 dname.len = rinfo->dir_dname_len[i];
1229 dname.hash = full_name_hash(dname.name, dname.len);
1231 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1232 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1235 dn = d_lookup(parent, &dname);
1236 dout("d_lookup on parent=%p name=%.*s got %p\n",
1237 parent, dname.len, dname.name, dn);
1240 dn = d_alloc(parent, &dname);
1241 dout("d_alloc %p '%.*s' = %p\n", parent,
1242 dname.len, dname.name, dn);
1244 dout("d_alloc badness\n");
1248 err = ceph_init_dentry(dn);
1253 } else if (dn->d_inode &&
1254 (ceph_ino(dn->d_inode) != vino.ino ||
1255 ceph_snap(dn->d_inode) != vino.snap)) {
1256 dout(" dn %p points to wrong inode %p\n",
1262 /* reorder parent's d_subdirs */
1263 spin_lock(&parent->d_lock);
1264 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
1265 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1266 spin_unlock(&dn->d_lock);
1267 spin_unlock(&parent->d_lock);
1271 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1277 in = ceph_get_inode(parent->d_sb, vino);
1279 dout("new_inode badness\n");
1285 dn = splice_dentry(dn, in, NULL, false);
1290 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1291 req->r_request_started, -1,
1292 &req->r_caps_reservation) < 0) {
1293 pr_err("fill_inode badness on %p\n", in);
1297 update_dentry_lease(dn, rinfo->dir_dlease[i],
1299 req->r_request_started);
1304 req->r_did_prepopulate = true;
1311 dout("readdir_prepopulate done\n");
1315 int ceph_inode_set_size(struct inode *inode, loff_t size)
1317 struct ceph_inode_info *ci = ceph_inode(inode);
1320 spin_lock(&ci->i_ceph_lock);
1321 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1322 inode->i_size = size;
1323 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1325 /* tell the MDS if we are approaching max_size */
1326 if ((size << 1) >= ci->i_max_size &&
1327 (ci->i_reported_size << 1) < ci->i_max_size)
1330 spin_unlock(&ci->i_ceph_lock);
1335 * Write back inode data in a worker thread. (This can't be done
1336 * in the message handler context.)
1338 void ceph_queue_writeback(struct inode *inode)
1341 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1342 &ceph_inode(inode)->i_wb_work)) {
1343 dout("ceph_queue_writeback %p\n", inode);
1345 dout("ceph_queue_writeback %p failed\n", inode);
1350 static void ceph_writeback_work(struct work_struct *work)
1352 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1354 struct inode *inode = &ci->vfs_inode;
1356 dout("writeback %p\n", inode);
1357 filemap_fdatawrite(&inode->i_data);
1362 * queue an async invalidation
1364 void ceph_queue_invalidate(struct inode *inode)
1367 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1368 &ceph_inode(inode)->i_pg_inv_work)) {
1369 dout("ceph_queue_invalidate %p\n", inode);
1371 dout("ceph_queue_invalidate %p failed\n", inode);
1377 * Invalidate inode pages in a worker thread. (This can't be done
1378 * in the message handler context.)
1380 static void ceph_invalidate_work(struct work_struct *work)
1382 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1384 struct inode *inode = &ci->vfs_inode;
1388 spin_lock(&ci->i_ceph_lock);
1389 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1390 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1391 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1393 spin_unlock(&ci->i_ceph_lock);
1396 orig_gen = ci->i_rdcache_gen;
1397 spin_unlock(&ci->i_ceph_lock);
1399 truncate_inode_pages(&inode->i_data, 0);
1401 spin_lock(&ci->i_ceph_lock);
1402 if (orig_gen == ci->i_rdcache_gen &&
1403 orig_gen == ci->i_rdcache_revoking) {
1404 dout("invalidate_pages %p gen %d successful\n", inode,
1406 ci->i_rdcache_revoking--;
1409 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1410 inode, orig_gen, ci->i_rdcache_gen,
1411 ci->i_rdcache_revoking);
1413 spin_unlock(&ci->i_ceph_lock);
1416 ceph_check_caps(ci, 0, NULL);
1423 * called by trunc_wq; take i_mutex ourselves
1425 * We also truncate in a separate thread as well.
1427 static void ceph_vmtruncate_work(struct work_struct *work)
1429 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1431 struct inode *inode = &ci->vfs_inode;
1433 dout("vmtruncate_work %p\n", inode);
1434 mutex_lock(&inode->i_mutex);
1435 __ceph_do_pending_vmtruncate(inode);
1436 mutex_unlock(&inode->i_mutex);
1441 * Queue an async vmtruncate. If we fail to queue work, we will handle
1442 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1444 void ceph_queue_vmtruncate(struct inode *inode)
1446 struct ceph_inode_info *ci = ceph_inode(inode);
1449 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1450 &ci->i_vmtruncate_work)) {
1451 dout("ceph_queue_vmtruncate %p\n", inode);
1453 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1454 inode, ci->i_truncate_pending);
1460 * called with i_mutex held.
1462 * Make sure any pending truncation is applied before doing anything
1463 * that may depend on it.
1465 void __ceph_do_pending_vmtruncate(struct inode *inode)
1467 struct ceph_inode_info *ci = ceph_inode(inode);
1469 int wrbuffer_refs, finish = 0;
1472 spin_lock(&ci->i_ceph_lock);
1473 if (ci->i_truncate_pending == 0) {
1474 dout("__do_pending_vmtruncate %p none pending\n", inode);
1475 spin_unlock(&ci->i_ceph_lock);
1480 * make sure any dirty snapped pages are flushed before we
1481 * possibly truncate them.. so write AND block!
1483 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1484 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1486 spin_unlock(&ci->i_ceph_lock);
1487 filemap_write_and_wait_range(&inode->i_data, 0,
1488 inode->i_sb->s_maxbytes);
1492 to = ci->i_truncate_size;
1493 wrbuffer_refs = ci->i_wrbuffer_ref;
1494 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1495 ci->i_truncate_pending, to);
1496 spin_unlock(&ci->i_ceph_lock);
1498 truncate_inode_pages(inode->i_mapping, to);
1500 spin_lock(&ci->i_ceph_lock);
1501 if (to == ci->i_truncate_size) {
1502 ci->i_truncate_pending = 0;
1505 spin_unlock(&ci->i_ceph_lock);
1509 if (wrbuffer_refs == 0)
1510 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1512 wake_up_all(&ci->i_cap_wq);
1519 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1521 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1522 nd_set_link(nd, ci->i_symlink);
1526 static const struct inode_operations ceph_symlink_iops = {
1527 .readlink = generic_readlink,
1528 .follow_link = ceph_sym_follow_link,
1534 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1536 struct inode *inode = dentry->d_inode;
1537 struct ceph_inode_info *ci = ceph_inode(inode);
1538 struct inode *parent_inode;
1539 const unsigned int ia_valid = attr->ia_valid;
1540 struct ceph_mds_request *req;
1541 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1543 int release = 0, dirtied = 0;
1546 int inode_dirty_flags = 0;
1548 if (ceph_snap(inode) != CEPH_NOSNAP)
1551 __ceph_do_pending_vmtruncate(inode);
1553 err = inode_change_ok(inode, attr);
1557 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1560 return PTR_ERR(req);
1562 spin_lock(&ci->i_ceph_lock);
1563 issued = __ceph_caps_issued(ci, NULL);
1564 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1566 if (ia_valid & ATTR_UID) {
1567 dout("setattr %p uid %d -> %d\n", inode,
1568 inode->i_uid, attr->ia_uid);
1569 if (issued & CEPH_CAP_AUTH_EXCL) {
1570 inode->i_uid = attr->ia_uid;
1571 dirtied |= CEPH_CAP_AUTH_EXCL;
1572 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1573 attr->ia_uid != inode->i_uid) {
1574 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
1575 mask |= CEPH_SETATTR_UID;
1576 release |= CEPH_CAP_AUTH_SHARED;
1579 if (ia_valid & ATTR_GID) {
1580 dout("setattr %p gid %d -> %d\n", inode,
1581 inode->i_gid, attr->ia_gid);
1582 if (issued & CEPH_CAP_AUTH_EXCL) {
1583 inode->i_gid = attr->ia_gid;
1584 dirtied |= CEPH_CAP_AUTH_EXCL;
1585 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1586 attr->ia_gid != inode->i_gid) {
1587 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
1588 mask |= CEPH_SETATTR_GID;
1589 release |= CEPH_CAP_AUTH_SHARED;
1592 if (ia_valid & ATTR_MODE) {
1593 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1595 if (issued & CEPH_CAP_AUTH_EXCL) {
1596 inode->i_mode = attr->ia_mode;
1597 dirtied |= CEPH_CAP_AUTH_EXCL;
1598 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1599 attr->ia_mode != inode->i_mode) {
1600 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1601 mask |= CEPH_SETATTR_MODE;
1602 release |= CEPH_CAP_AUTH_SHARED;
1606 if (ia_valid & ATTR_ATIME) {
1607 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1608 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1609 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1610 if (issued & CEPH_CAP_FILE_EXCL) {
1611 ci->i_time_warp_seq++;
1612 inode->i_atime = attr->ia_atime;
1613 dirtied |= CEPH_CAP_FILE_EXCL;
1614 } else if ((issued & CEPH_CAP_FILE_WR) &&
1615 timespec_compare(&inode->i_atime,
1616 &attr->ia_atime) < 0) {
1617 inode->i_atime = attr->ia_atime;
1618 dirtied |= CEPH_CAP_FILE_WR;
1619 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1620 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1621 ceph_encode_timespec(&req->r_args.setattr.atime,
1623 mask |= CEPH_SETATTR_ATIME;
1624 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1628 if (ia_valid & ATTR_MTIME) {
1629 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1630 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1631 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1632 if (issued & CEPH_CAP_FILE_EXCL) {
1633 ci->i_time_warp_seq++;
1634 inode->i_mtime = attr->ia_mtime;
1635 dirtied |= CEPH_CAP_FILE_EXCL;
1636 } else if ((issued & CEPH_CAP_FILE_WR) &&
1637 timespec_compare(&inode->i_mtime,
1638 &attr->ia_mtime) < 0) {
1639 inode->i_mtime = attr->ia_mtime;
1640 dirtied |= CEPH_CAP_FILE_WR;
1641 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1642 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1643 ceph_encode_timespec(&req->r_args.setattr.mtime,
1645 mask |= CEPH_SETATTR_MTIME;
1646 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1650 if (ia_valid & ATTR_SIZE) {
1651 dout("setattr %p size %lld -> %lld\n", inode,
1652 inode->i_size, attr->ia_size);
1653 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1657 if ((issued & CEPH_CAP_FILE_EXCL) &&
1658 attr->ia_size > inode->i_size) {
1659 inode->i_size = attr->ia_size;
1661 (attr->ia_size + (1 << 9) - 1) >> 9;
1662 inode->i_ctime = attr->ia_ctime;
1663 ci->i_reported_size = attr->ia_size;
1664 dirtied |= CEPH_CAP_FILE_EXCL;
1665 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1666 attr->ia_size != inode->i_size) {
1667 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1668 req->r_args.setattr.old_size =
1669 cpu_to_le64(inode->i_size);
1670 mask |= CEPH_SETATTR_SIZE;
1671 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1676 /* these do nothing */
1677 if (ia_valid & ATTR_CTIME) {
1678 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1679 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1680 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1681 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1682 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1683 only ? "ctime only" : "ignored");
1684 inode->i_ctime = attr->ia_ctime;
1687 * if kernel wants to dirty ctime but nothing else,
1688 * we need to choose a cap to dirty under, or do
1689 * a almost-no-op setattr
1691 if (issued & CEPH_CAP_AUTH_EXCL)
1692 dirtied |= CEPH_CAP_AUTH_EXCL;
1693 else if (issued & CEPH_CAP_FILE_EXCL)
1694 dirtied |= CEPH_CAP_FILE_EXCL;
1695 else if (issued & CEPH_CAP_XATTR_EXCL)
1696 dirtied |= CEPH_CAP_XATTR_EXCL;
1698 mask |= CEPH_SETATTR_CTIME;
1701 if (ia_valid & ATTR_FILE)
1702 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1705 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
1706 inode->i_ctime = CURRENT_TIME;
1710 spin_unlock(&ci->i_ceph_lock);
1712 if (inode_dirty_flags)
1713 __mark_inode_dirty(inode, inode_dirty_flags);
1716 req->r_inode = inode;
1718 req->r_inode_drop = release;
1719 req->r_args.setattr.mask = cpu_to_le32(mask);
1720 req->r_num_caps = 1;
1721 parent_inode = ceph_get_dentry_parent_inode(dentry);
1722 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1725 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1726 ceph_cap_string(dirtied), mask);
1728 ceph_mdsc_put_request(req);
1729 __ceph_do_pending_vmtruncate(inode);
1732 spin_unlock(&ci->i_ceph_lock);
1733 ceph_mdsc_put_request(req);
1738 * Verify that we have a lease on the given mask. If not,
1739 * do a getattr against an mds.
1741 int ceph_do_getattr(struct inode *inode, int mask)
1743 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1744 struct ceph_mds_client *mdsc = fsc->mdsc;
1745 struct ceph_mds_request *req;
1748 if (ceph_snap(inode) == CEPH_SNAPDIR) {
1749 dout("do_getattr inode %p SNAPDIR\n", inode);
1753 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
1754 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1757 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1759 return PTR_ERR(req);
1760 req->r_inode = inode;
1762 req->r_num_caps = 1;
1763 req->r_args.getattr.mask = cpu_to_le32(mask);
1764 err = ceph_mdsc_do_request(mdsc, NULL, req);
1765 ceph_mdsc_put_request(req);
1766 dout("do_getattr result=%d\n", err);
1772 * Check inode permissions. We verify we have a valid value for
1773 * the AUTH cap, then call the generic handler.
1775 int ceph_permission(struct inode *inode, int mask)
1779 if (mask & MAY_NOT_BLOCK)
1782 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1785 err = generic_permission(inode, mask);
1790 * Get all attributes. Hopefully somedata we'll have a statlite()
1791 * and can limit the fields we require to be accurate.
1793 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1796 struct inode *inode = dentry->d_inode;
1797 struct ceph_inode_info *ci = ceph_inode(inode);
1800 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1802 generic_fillattr(inode, stat);
1803 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
1804 if (ceph_snap(inode) != CEPH_NOSNAP)
1805 stat->dev = ceph_snap(inode);
1808 if (S_ISDIR(inode->i_mode)) {
1809 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
1811 stat->size = ci->i_rbytes;
1813 stat->size = ci->i_files + ci->i_subdirs;
1815 stat->blksize = 65536;