1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
10 #include "mds_client.h"
13 * Directory operations: readdir, lookup, create, link, unlink,
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
29 const struct inode_operations ceph_dir_iops;
30 const struct file_operations ceph_dir_fops;
31 const struct dentry_operations ceph_dentry_ops;
34 * Initialize ceph dentry state.
36 int ceph_init_dentry(struct dentry *dentry)
38 struct ceph_dentry_info *di;
43 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
44 dentry->d_op = &ceph_dentry_ops;
45 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
46 dentry->d_op = &ceph_snapdir_dentry_ops;
48 dentry->d_op = &ceph_snap_dentry_ops;
50 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
52 return -ENOMEM; /* oh well */
54 spin_lock(&dentry->d_lock);
55 if (dentry->d_fsdata) {
57 kmem_cache_free(ceph_dentry_cachep, di);
61 di->lease_session = NULL;
62 dentry->d_fsdata = di;
63 dentry->d_time = jiffies;
64 ceph_dentry_lru_add(dentry);
66 spin_unlock(&dentry->d_lock);
73 * for readdir, we encode the directory frag and offset within that
76 static unsigned fpos_frag(loff_t p)
80 static unsigned fpos_off(loff_t p)
82 return p & 0xffffffff;
86 * When possible, we try to satisfy a readdir by peeking at the
87 * dcache. We make this work by carefully ordering dentries on
88 * d_u.d_child when we initially get results back from the MDS, and
89 * falling back to a "normal" sync readdir if any dentries in the dir
92 * I_COMPLETE tells indicates we have all dentries in the dir. It is
93 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
94 * the MDS if/when the directory is modified).
96 static int __dcache_readdir(struct file *filp,
97 void *dirent, filldir_t filldir)
99 struct ceph_file_info *fi = filp->private_data;
100 struct dentry *parent = filp->f_dentry;
101 struct inode *dir = parent->d_inode;
103 struct dentry *dentry, *last;
104 struct ceph_dentry_info *di;
107 /* claim ref on last dentry we returned */
111 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
114 spin_lock(&dcache_lock);
116 /* start at beginning? */
117 if (filp->f_pos == 2 || last == NULL ||
118 filp->f_pos < ceph_dentry(last)->offset) {
119 if (list_empty(&parent->d_subdirs))
121 p = parent->d_subdirs.prev;
122 dout(" initial p %p/%p\n", p->prev, p->next);
124 p = last->d_u.d_child.prev;
128 dentry = list_entry(p, struct dentry, d_u.d_child);
129 di = ceph_dentry(dentry);
131 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
132 d_unhashed(dentry) ? "!hashed" : "hashed",
133 parent->d_subdirs.prev, parent->d_subdirs.next);
134 if (p == &parent->d_subdirs) {
138 if (!d_unhashed(dentry) && dentry->d_inode &&
139 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
140 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
141 filp->f_pos <= di->offset)
143 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
144 dentry->d_name.len, dentry->d_name.name, di->offset,
145 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
146 !dentry->d_inode ? " null" : "");
148 dentry = list_entry(p, struct dentry, d_u.d_child);
149 di = ceph_dentry(dentry);
152 atomic_inc(&dentry->d_count);
153 spin_unlock(&dcache_lock);
155 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
156 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
157 filp->f_pos = di->offset;
158 err = filldir(dirent, dentry->d_name.name,
159 dentry->d_name.len, di->offset,
160 dentry->d_inode->i_ino,
161 dentry->d_inode->i_mode >> 12);
165 /* remember our position */
167 fi->next_offset = di->offset;
179 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
180 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
181 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
186 spin_lock(&dcache_lock);
187 p = p->prev; /* advance to next dentry */
191 spin_unlock(&dcache_lock);
199 * make note of the last dentry we read, so we can
200 * continue at the same lexicographical point,
201 * regardless of what dir changes take place on the
204 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
207 kfree(fi->last_name);
208 fi->last_name = kmalloc(len+1, GFP_NOFS);
211 memcpy(fi->last_name, name, len);
212 fi->last_name[len] = 0;
213 dout("note_last_dentry '%s'\n", fi->last_name);
217 static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
219 struct ceph_file_info *fi = filp->private_data;
220 struct inode *inode = filp->f_dentry->d_inode;
221 struct ceph_inode_info *ci = ceph_inode(inode);
222 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
223 struct ceph_mds_client *mdsc = fsc->mdsc;
224 unsigned frag = fpos_frag(filp->f_pos);
225 int off = fpos_off(filp->f_pos);
228 struct ceph_mds_reply_info_parsed *rinfo;
229 const int max_entries = fsc->mount_options->max_readdir;
230 const int max_bytes = fsc->mount_options->max_readdir_bytes;
232 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
236 /* always start with . and .. */
237 if (filp->f_pos == 0) {
238 /* note dir version at start of readdir so we can tell
239 * if any dentries get dropped */
240 fi->dir_release_count = ci->i_release_count;
242 dout("readdir off 0 -> '.'\n");
243 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
244 inode->i_ino, inode->i_mode >> 12) < 0)
249 if (filp->f_pos == 1) {
250 dout("readdir off 1 -> '..'\n");
251 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
252 filp->f_dentry->d_parent->d_inode->i_ino,
253 inode->i_mode >> 12) < 0)
259 /* can we use the dcache? */
260 spin_lock(&inode->i_lock);
261 if ((filp->f_pos == 2 || fi->dentry) &&
262 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
263 ceph_snap(inode) != CEPH_SNAPDIR &&
264 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
265 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
266 spin_unlock(&inode->i_lock);
267 err = __dcache_readdir(filp, dirent, filldir);
271 spin_unlock(&inode->i_lock);
274 err = note_last_dentry(fi, fi->dentry->d_name.name,
275 fi->dentry->d_name.len);
282 /* proceed with a normal readdir */
285 /* do we have the correct frag content buffered? */
286 if (fi->frag != frag || fi->last_readdir == NULL) {
287 struct ceph_mds_request *req;
288 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
289 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
291 /* discard old result, if any */
292 if (fi->last_readdir) {
293 ceph_mdsc_put_request(fi->last_readdir);
294 fi->last_readdir = NULL;
297 /* requery frag tree, as the frag topology may have changed */
298 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
300 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
301 ceph_vinop(inode), frag, fi->last_name);
302 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
305 req->r_inode = igrab(inode);
306 req->r_dentry = dget(filp->f_dentry);
307 /* hints to request -> mds selection code */
308 req->r_direct_mode = USE_AUTH_MDS;
309 req->r_direct_hash = ceph_frag_value(frag);
310 req->r_direct_is_hash = true;
311 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
312 req->r_readdir_offset = fi->next_offset;
313 req->r_args.readdir.frag = cpu_to_le32(frag);
314 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
315 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
316 req->r_num_caps = max_entries + 1;
317 err = ceph_mdsc_do_request(mdsc, NULL, req);
319 ceph_mdsc_put_request(req);
322 dout("readdir got and parsed readdir result=%d"
323 " on frag %x, end=%d, complete=%d\n", err, frag,
324 (int)req->r_reply_info.dir_end,
325 (int)req->r_reply_info.dir_complete);
327 if (!req->r_did_prepopulate) {
328 dout("readdir !did_prepopulate");
329 fi->dir_release_count--; /* preclude I_COMPLETE */
332 /* note next offset and last dentry name */
333 fi->offset = fi->next_offset;
334 fi->last_readdir = req;
336 if (req->r_reply_info.dir_end) {
337 kfree(fi->last_name);
338 fi->last_name = NULL;
339 if (ceph_frag_is_rightmost(frag))
344 rinfo = &req->r_reply_info;
345 err = note_last_dentry(fi,
346 rinfo->dir_dname[rinfo->dir_nr-1],
347 rinfo->dir_dname_len[rinfo->dir_nr-1]);
350 fi->next_offset += rinfo->dir_nr;
354 rinfo = &fi->last_readdir->r_reply_info;
355 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
356 rinfo->dir_nr, off, fi->offset);
357 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
358 u64 pos = ceph_make_fpos(frag, off);
359 struct ceph_mds_reply_inode *in =
360 rinfo->dir_in[off - fi->offset].in;
361 struct ceph_vino vino;
364 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
365 off, off - fi->offset, rinfo->dir_nr, pos,
366 rinfo->dir_dname_len[off - fi->offset],
367 rinfo->dir_dname[off - fi->offset], in);
369 ftype = le32_to_cpu(in->mode) >> 12;
370 vino.ino = le64_to_cpu(in->ino);
371 vino.snap = le64_to_cpu(in->snapid);
372 ino = ceph_vino_to_ino(vino);
374 rinfo->dir_dname[off - fi->offset],
375 rinfo->dir_dname_len[off - fi->offset],
376 pos, ino, ftype) < 0) {
377 dout("filldir stopping us...\n");
381 filp->f_pos = pos + 1;
385 ceph_mdsc_put_request(fi->last_readdir);
386 fi->last_readdir = NULL;
391 if (!ceph_frag_is_rightmost(frag)) {
392 frag = ceph_frag_next(frag);
394 filp->f_pos = ceph_make_fpos(frag, off);
395 dout("readdir next frag is %x\n", frag);
401 * if dir_release_count still matches the dir, no dentries
402 * were released during the whole readdir, and we should have
403 * the complete dir contents in our cache.
405 spin_lock(&inode->i_lock);
406 if (ci->i_release_count == fi->dir_release_count) {
407 dout(" marking %p complete\n", inode);
408 ci->i_ceph_flags |= CEPH_I_COMPLETE;
409 ci->i_max_offset = filp->f_pos;
411 spin_unlock(&inode->i_lock);
413 dout("readdir %p filp %p done.\n", inode, filp);
417 static void reset_readdir(struct ceph_file_info *fi)
419 if (fi->last_readdir) {
420 ceph_mdsc_put_request(fi->last_readdir);
421 fi->last_readdir = NULL;
423 kfree(fi->last_name);
424 fi->last_name = NULL;
425 fi->next_offset = 2; /* compensate for . and .. */
433 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
435 struct ceph_file_info *fi = file->private_data;
436 struct inode *inode = file->f_mapping->host;
437 loff_t old_offset = offset;
440 mutex_lock(&inode->i_mutex);
443 offset += inode->i_size + 2; /* FIXME */
446 offset += file->f_pos;
449 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
450 if (offset != file->f_pos) {
451 file->f_pos = offset;
458 * discard buffered readdir content on seekdir(0), or
459 * seek to new frag, or seek prior to current chunk.
462 fpos_frag(offset) != fpos_frag(old_offset) ||
463 fpos_off(offset) < fi->offset) {
464 dout("dir_llseek dropping %p content\n", file);
468 /* bump dir_release_count if we did a forward seek */
469 if (offset > old_offset)
470 fi->dir_release_count--;
472 mutex_unlock(&inode->i_mutex);
477 * Process result of a lookup/open request.
479 * Mainly, make sure we return the final req->r_dentry (if it already
480 * existed) in place of the original VFS-provided dentry when they
483 * Gracefully handle the case where the MDS replies with -ENOENT and
484 * no trace (which it may do, at its discretion, e.g., if it doesn't
485 * care to issue a lease on the negative dentry).
487 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
488 struct dentry *dentry, int err)
490 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
491 struct inode *parent = dentry->d_parent->d_inode;
494 if (err == -ENOENT &&
495 strcmp(dentry->d_name.name,
496 fsc->mount_options->snapdir_name) == 0) {
497 struct inode *inode = ceph_get_snapdir(parent);
498 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
499 dentry, dentry->d_name.len, dentry->d_name.name, inode);
500 BUG_ON(!d_unhashed(dentry));
501 d_add(dentry, inode);
505 if (err == -ENOENT) {
508 if (!req->r_reply_info.head->is_dentry) {
509 dout("ENOENT and no trace, dentry %p inode %p\n",
510 dentry, dentry->d_inode);
511 if (dentry->d_inode) {
520 dentry = ERR_PTR(err);
521 else if (dentry != req->r_dentry)
522 dentry = dget(req->r_dentry); /* we got spliced */
528 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
530 return ceph_ino(inode) == CEPH_INO_ROOT &&
531 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
535 * Look up a single dir entry. If there is a lookup intent, inform
536 * the MDS so that it gets our 'caps wanted' value in a single op.
538 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
539 struct nameidata *nd)
541 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
542 struct ceph_mds_client *mdsc = fsc->mdsc;
543 struct ceph_mds_request *req;
547 dout("lookup %p dentry %p '%.*s'\n",
548 dir, dentry, dentry->d_name.len, dentry->d_name.name);
550 if (dentry->d_name.len > NAME_MAX)
551 return ERR_PTR(-ENAMETOOLONG);
553 err = ceph_init_dentry(dentry);
557 /* open (but not create!) intent? */
559 (nd->flags & LOOKUP_OPEN) &&
560 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
561 !(nd->intent.open.flags & O_CREAT)) {
562 int mode = nd->intent.open.create_mode & ~current->fs->umask;
563 return ceph_lookup_open(dir, dentry, nd, mode, 1);
566 /* can we conclude ENOENT locally? */
567 if (dentry->d_inode == NULL) {
568 struct ceph_inode_info *ci = ceph_inode(dir);
569 struct ceph_dentry_info *di = ceph_dentry(dentry);
571 spin_lock(&dir->i_lock);
572 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
573 if (strncmp(dentry->d_name.name,
574 fsc->mount_options->snapdir_name,
575 dentry->d_name.len) &&
576 !is_root_ceph_dentry(dir, dentry) &&
577 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
578 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
579 spin_unlock(&dir->i_lock);
580 dout(" dir %p complete, -ENOENT\n", dir);
582 di->lease_shared_gen = ci->i_shared_gen;
585 spin_unlock(&dir->i_lock);
588 op = ceph_snap(dir) == CEPH_SNAPDIR ?
589 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
590 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
592 return ERR_CAST(req);
593 req->r_dentry = dget(dentry);
595 /* we only need inode linkage */
596 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
597 req->r_locked_dir = dir;
598 err = ceph_mdsc_do_request(mdsc, NULL, req);
599 dentry = ceph_finish_lookup(req, dentry, err);
600 ceph_mdsc_put_request(req); /* will dput(dentry) */
601 dout("lookup result=%p\n", dentry);
606 * If we do a create but get no trace back from the MDS, follow up with
607 * a lookup (the VFS expects us to link up the provided dentry).
609 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
611 struct dentry *result = ceph_lookup(dir, dentry, NULL);
613 if (result && !IS_ERR(result)) {
615 * We created the item, then did a lookup, and found
616 * it was already linked to another inode we already
617 * had in our cache (and thus got spliced). Link our
618 * dentry to that inode, but don't hash it, just in
619 * case the VFS wants to dereference it.
621 BUG_ON(!result->d_inode);
622 d_instantiate(dentry, result->d_inode);
625 return PTR_ERR(result);
628 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
629 int mode, dev_t rdev)
631 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
632 struct ceph_mds_client *mdsc = fsc->mdsc;
633 struct ceph_mds_request *req;
636 if (ceph_snap(dir) != CEPH_NOSNAP)
639 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
640 dir, dentry, mode, rdev);
641 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
646 req->r_dentry = dget(dentry);
648 req->r_locked_dir = dir;
649 req->r_args.mknod.mode = cpu_to_le32(mode);
650 req->r_args.mknod.rdev = cpu_to_le32(rdev);
651 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
652 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
653 err = ceph_mdsc_do_request(mdsc, dir, req);
654 if (!err && !req->r_reply_info.head->is_dentry)
655 err = ceph_handle_notrace_create(dir, dentry);
656 ceph_mdsc_put_request(req);
662 static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
663 struct nameidata *nd)
665 dout("create in dir %p dentry %p name '%.*s'\n",
666 dir, dentry, dentry->d_name.len, dentry->d_name.name);
668 if (ceph_snap(dir) != CEPH_NOSNAP)
672 BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
673 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
674 /* hrm, what should i do here if we get aliased? */
676 return PTR_ERR(dentry);
680 /* fall back to mknod */
681 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
684 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
687 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
688 struct ceph_mds_client *mdsc = fsc->mdsc;
689 struct ceph_mds_request *req;
692 if (ceph_snap(dir) != CEPH_NOSNAP)
695 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
696 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
701 req->r_dentry = dget(dentry);
703 req->r_path2 = kstrdup(dest, GFP_NOFS);
704 req->r_locked_dir = dir;
705 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
706 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
707 err = ceph_mdsc_do_request(mdsc, dir, req);
708 if (!err && !req->r_reply_info.head->is_dentry)
709 err = ceph_handle_notrace_create(dir, dentry);
710 ceph_mdsc_put_request(req);
716 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
718 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
719 struct ceph_mds_client *mdsc = fsc->mdsc;
720 struct ceph_mds_request *req;
724 if (ceph_snap(dir) == CEPH_SNAPDIR) {
725 /* mkdir .snap/foo is a MKSNAP */
726 op = CEPH_MDS_OP_MKSNAP;
727 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
728 dentry->d_name.len, dentry->d_name.name, dentry);
729 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
730 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
731 op = CEPH_MDS_OP_MKDIR;
735 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
741 req->r_dentry = dget(dentry);
743 req->r_locked_dir = dir;
744 req->r_args.mkdir.mode = cpu_to_le32(mode);
745 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
746 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
747 err = ceph_mdsc_do_request(mdsc, dir, req);
748 if (!err && !req->r_reply_info.head->is_dentry)
749 err = ceph_handle_notrace_create(dir, dentry);
750 ceph_mdsc_put_request(req);
757 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
758 struct dentry *dentry)
760 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
761 struct ceph_mds_client *mdsc = fsc->mdsc;
762 struct ceph_mds_request *req;
765 if (ceph_snap(dir) != CEPH_NOSNAP)
768 dout("link in dir %p old_dentry %p dentry %p\n", dir,
770 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
775 req->r_dentry = dget(dentry);
777 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
778 req->r_locked_dir = dir;
779 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
780 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
781 err = ceph_mdsc_do_request(mdsc, dir, req);
784 else if (!req->r_reply_info.head->is_dentry)
785 d_instantiate(dentry, igrab(old_dentry->d_inode));
786 ceph_mdsc_put_request(req);
791 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
792 * looks like the link count will hit 0, drop any other caps (other
793 * than PIN) we don't specifically want (due to the file still being
796 static int drop_caps_for_unlink(struct inode *inode)
798 struct ceph_inode_info *ci = ceph_inode(inode);
799 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
801 spin_lock(&inode->i_lock);
802 if (inode->i_nlink == 1) {
803 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
804 ci->i_ceph_flags |= CEPH_I_NODELAY;
806 spin_unlock(&inode->i_lock);
811 * rmdir and unlink are differ only by the metadata op code
813 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
815 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
816 struct ceph_mds_client *mdsc = fsc->mdsc;
817 struct inode *inode = dentry->d_inode;
818 struct ceph_mds_request *req;
822 if (ceph_snap(dir) == CEPH_SNAPDIR) {
823 /* rmdir .snap/foo is RMSNAP */
824 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
825 dentry->d_name.name, dentry);
826 op = CEPH_MDS_OP_RMSNAP;
827 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
828 dout("unlink/rmdir dir %p dn %p inode %p\n",
830 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
831 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
834 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
839 req->r_dentry = dget(dentry);
841 req->r_locked_dir = dir;
842 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
843 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
844 req->r_inode_drop = drop_caps_for_unlink(inode);
845 err = ceph_mdsc_do_request(mdsc, dir, req);
846 if (!err && !req->r_reply_info.head->is_dentry)
848 ceph_mdsc_put_request(req);
853 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
854 struct inode *new_dir, struct dentry *new_dentry)
856 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
857 struct ceph_mds_client *mdsc = fsc->mdsc;
858 struct ceph_mds_request *req;
861 if (ceph_snap(old_dir) != ceph_snap(new_dir))
863 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
864 ceph_snap(new_dir) != CEPH_NOSNAP)
866 dout("rename dir %p dentry %p to dir %p dentry %p\n",
867 old_dir, old_dentry, new_dir, new_dentry);
868 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
871 req->r_dentry = dget(new_dentry);
873 req->r_old_dentry = dget(old_dentry);
874 req->r_locked_dir = new_dir;
875 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
876 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
877 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
878 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
879 /* release LINK_RDCACHE on source inode (mds will lock it) */
880 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
881 if (new_dentry->d_inode)
882 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
883 err = ceph_mdsc_do_request(mdsc, old_dir, req);
884 if (!err && !req->r_reply_info.head->is_dentry) {
886 * Normally d_move() is done by fill_trace (called by
887 * do_request, above). If there is no trace, we need
891 /* d_move screws up d_subdirs order */
892 ceph_i_clear(new_dir, CEPH_I_COMPLETE);
894 d_move(old_dentry, new_dentry);
896 /* ensure target dentry is invalidated, despite
897 rehashing bug in vfs_rename_dir */
898 ceph_invalidate_dentry_lease(new_dentry);
900 ceph_mdsc_put_request(req);
905 * Ensure a dentry lease will no longer revalidate.
907 void ceph_invalidate_dentry_lease(struct dentry *dentry)
909 spin_lock(&dentry->d_lock);
910 dentry->d_time = jiffies;
911 ceph_dentry(dentry)->lease_shared_gen = 0;
912 spin_unlock(&dentry->d_lock);
916 * Check if dentry lease is valid. If not, delete the lease. Try to
917 * renew if the least is more than half up.
919 static int dentry_lease_is_valid(struct dentry *dentry)
921 struct ceph_dentry_info *di;
922 struct ceph_mds_session *s;
926 struct ceph_mds_session *session = NULL;
927 struct inode *dir = NULL;
930 spin_lock(&dentry->d_lock);
931 di = ceph_dentry(dentry);
932 if (di && di->lease_session) {
933 s = di->lease_session;
934 spin_lock(&s->s_cap_lock);
937 spin_unlock(&s->s_cap_lock);
939 if (di->lease_gen == gen &&
940 time_before(jiffies, dentry->d_time) &&
941 time_before(jiffies, ttl)) {
943 if (di->lease_renew_after &&
944 time_after(jiffies, di->lease_renew_after)) {
945 /* we should renew */
946 dir = dentry->d_parent->d_inode;
947 session = ceph_get_mds_session(s);
949 di->lease_renew_after = 0;
950 di->lease_renew_from = jiffies;
954 spin_unlock(&dentry->d_lock);
957 ceph_mdsc_lease_send_msg(session, dir, dentry,
958 CEPH_MDS_LEASE_RENEW, seq);
959 ceph_put_mds_session(session);
961 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
966 * Check if directory-wide content lease/cap is valid.
968 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
970 struct ceph_inode_info *ci = ceph_inode(dir);
971 struct ceph_dentry_info *di = ceph_dentry(dentry);
974 spin_lock(&dir->i_lock);
975 if (ci->i_shared_gen == di->lease_shared_gen)
976 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
977 spin_unlock(&dir->i_lock);
978 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
979 dir, (unsigned)ci->i_shared_gen, dentry,
980 (unsigned)di->lease_shared_gen, valid);
985 * Check if cached dentry can be trusted.
987 static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
989 struct inode *dir = dentry->d_parent->d_inode;
991 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
992 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
993 ceph_dentry(dentry)->offset);
995 /* always trust cached snapped dentries, snapdir dentry */
996 if (ceph_snap(dir) != CEPH_NOSNAP) {
997 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
998 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
1001 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
1004 if (dentry_lease_is_valid(dentry) ||
1005 dir_lease_is_valid(dir, dentry))
1008 dout("d_revalidate %p invalid\n", dentry);
1012 ceph_dentry_lru_touch(dentry);
1017 * When a dentry is released, clear the dir I_COMPLETE if it was part
1018 * of the current dir gen or if this is in the snapshot namespace.
1020 static void ceph_dentry_release(struct dentry *dentry)
1022 struct ceph_dentry_info *di = ceph_dentry(dentry);
1023 struct inode *parent_inode = NULL;
1024 u64 snapid = CEPH_NOSNAP;
1026 if (!IS_ROOT(dentry)) {
1027 parent_inode = dentry->d_parent->d_inode;
1029 snapid = ceph_snap(parent_inode);
1031 dout("dentry_release %p parent %p\n", dentry, parent_inode);
1032 if (parent_inode && snapid != CEPH_SNAPDIR) {
1033 struct ceph_inode_info *ci = ceph_inode(parent_inode);
1035 spin_lock(&parent_inode->i_lock);
1036 if (ci->i_shared_gen == di->lease_shared_gen ||
1037 snapid <= CEPH_MAXSNAP) {
1038 dout(" clearing %p complete (d_release)\n",
1040 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
1041 ci->i_release_count++;
1043 spin_unlock(&parent_inode->i_lock);
1046 ceph_dentry_lru_del(dentry);
1047 if (di->lease_session)
1048 ceph_put_mds_session(di->lease_session);
1049 kmem_cache_free(ceph_dentry_cachep, di);
1050 dentry->d_fsdata = NULL;
1054 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1055 struct nameidata *nd)
1058 * Eventually, we'll want to revalidate snapped metadata
1059 * too... probably...
1067 * read() on a dir. This weird interface hack only works if mounted
1068 * with '-o dirstat'.
1070 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1073 struct ceph_file_info *cf = file->private_data;
1074 struct inode *inode = file->f_dentry->d_inode;
1075 struct ceph_inode_info *ci = ceph_inode(inode);
1078 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1081 if (!cf->dir_info) {
1082 cf->dir_info = kmalloc(1024, GFP_NOFS);
1086 sprintf(cf->dir_info,
1089 " subdirs: %20lld\n"
1090 "rentries: %20lld\n"
1092 " rsubdirs: %20lld\n"
1094 "rctime: %10ld.%09ld\n",
1095 ci->i_files + ci->i_subdirs,
1098 ci->i_rfiles + ci->i_rsubdirs,
1102 (long)ci->i_rctime.tv_sec,
1103 (long)ci->i_rctime.tv_nsec);
1106 if (*ppos >= cf->dir_info_len)
1108 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1109 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1112 *ppos += (size - left);
1117 * an fsync() on a dir will wait for any uncommitted directory
1118 * operations to commit.
1120 static int ceph_dir_fsync(struct file *file, int datasync)
1122 struct inode *inode = file->f_path.dentry->d_inode;
1123 struct ceph_inode_info *ci = ceph_inode(inode);
1124 struct list_head *head = &ci->i_unsafe_dirops;
1125 struct ceph_mds_request *req;
1129 dout("dir_fsync %p\n", inode);
1130 spin_lock(&ci->i_unsafe_lock);
1131 if (list_empty(head))
1134 req = list_entry(head->prev,
1135 struct ceph_mds_request, r_unsafe_dir_item);
1136 last_tid = req->r_tid;
1139 ceph_mdsc_get_request(req);
1140 spin_unlock(&ci->i_unsafe_lock);
1141 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1142 inode, req->r_tid, last_tid);
1143 if (req->r_timeout) {
1144 ret = wait_for_completion_timeout(
1145 &req->r_safe_completion, req->r_timeout);
1149 ret = -EIO; /* timed out */
1151 wait_for_completion(&req->r_safe_completion);
1153 spin_lock(&ci->i_unsafe_lock);
1154 ceph_mdsc_put_request(req);
1156 if (ret || list_empty(head))
1158 req = list_entry(head->next,
1159 struct ceph_mds_request, r_unsafe_dir_item);
1160 } while (req->r_tid < last_tid);
1162 spin_unlock(&ci->i_unsafe_lock);
1167 * We maintain a private dentry LRU.
1169 * FIXME: this needs to be changed to a per-mds lru to be useful.
1171 void ceph_dentry_lru_add(struct dentry *dn)
1173 struct ceph_dentry_info *di = ceph_dentry(dn);
1174 struct ceph_mds_client *mdsc;
1176 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1177 dn->d_name.len, dn->d_name.name);
1179 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1180 spin_lock(&mdsc->dentry_lru_lock);
1181 list_add_tail(&di->lru, &mdsc->dentry_lru);
1183 spin_unlock(&mdsc->dentry_lru_lock);
1187 void ceph_dentry_lru_touch(struct dentry *dn)
1189 struct ceph_dentry_info *di = ceph_dentry(dn);
1190 struct ceph_mds_client *mdsc;
1192 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1193 dn->d_name.len, dn->d_name.name, di->offset);
1195 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1196 spin_lock(&mdsc->dentry_lru_lock);
1197 list_move_tail(&di->lru, &mdsc->dentry_lru);
1198 spin_unlock(&mdsc->dentry_lru_lock);
1202 void ceph_dentry_lru_del(struct dentry *dn)
1204 struct ceph_dentry_info *di = ceph_dentry(dn);
1205 struct ceph_mds_client *mdsc;
1207 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1208 dn->d_name.len, dn->d_name.name);
1210 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1211 spin_lock(&mdsc->dentry_lru_lock);
1212 list_del_init(&di->lru);
1214 spin_unlock(&mdsc->dentry_lru_lock);
1218 const struct file_operations ceph_dir_fops = {
1219 .read = ceph_read_dir,
1220 .readdir = ceph_readdir,
1221 .llseek = ceph_dir_llseek,
1223 .release = ceph_release,
1224 .unlocked_ioctl = ceph_ioctl,
1225 .fsync = ceph_dir_fsync,
1228 const struct inode_operations ceph_dir_iops = {
1229 .lookup = ceph_lookup,
1230 .permission = ceph_permission,
1231 .getattr = ceph_getattr,
1232 .setattr = ceph_setattr,
1233 .setxattr = ceph_setxattr,
1234 .getxattr = ceph_getxattr,
1235 .listxattr = ceph_listxattr,
1236 .removexattr = ceph_removexattr,
1237 .mknod = ceph_mknod,
1238 .symlink = ceph_symlink,
1239 .mkdir = ceph_mkdir,
1241 .unlink = ceph_unlink,
1242 .rmdir = ceph_unlink,
1243 .rename = ceph_rename,
1244 .create = ceph_create,
1247 const struct dentry_operations ceph_dentry_ops = {
1248 .d_revalidate = ceph_d_revalidate,
1249 .d_release = ceph_dentry_release,
1252 const struct dentry_operations ceph_snapdir_dentry_ops = {
1253 .d_revalidate = ceph_snapdir_d_revalidate,
1254 .d_release = ceph_dentry_release,
1257 const struct dentry_operations ceph_snap_dentry_ops = {
1258 .d_release = ceph_dentry_release,