/* Options that take no arguments */
Opt_nodevmap,
/* Cache options */
- Opt_cache_loose, Opt_fscache,
+ Opt_cache_loose, Opt_fscache, Opt_mmap,
/* Access options */
Opt_access, Opt_posixacl,
/* Error token */
{Opt_cache, "cache=%s"},
{Opt_cache_loose, "loose"},
{Opt_fscache, "fscache"},
+ {Opt_mmap, "mmap"},
{Opt_cachetag, "cachetag=%s"},
{Opt_access, "access=%s"},
{Opt_posixacl, "posixacl"},
} else if (!strcmp(s, "fscache")) {
version = CACHE_FSCACHE;
p9_debug(P9_DEBUG_9P, "Cache mode: fscache\n");
+ } else if (!strcmp(s, "mmap")) {
+ version = CACHE_MMAP;
+ p9_debug(P9_DEBUG_9P, "Cache mode: mmap\n");
} else if (!strcmp(s, "none")) {
version = CACHE_NONE;
p9_debug(P9_DEBUG_9P, "Cache mode: none\n");
case Opt_fscache:
v9ses->cache = CACHE_FSCACHE;
break;
+ case Opt_mmap:
+ v9ses->cache = CACHE_MMAP;
+ break;
case Opt_cachetag:
#ifdef CONFIG_9P_FSCACHE
v9ses->cachetag = match_strdup(&args[0]);
#include "cache.h"
static const struct vm_operations_struct v9fs_file_vm_ops;
+static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
/**
* v9fs_file_open - open a file (or directory)
file->private_data = fid;
mutex_lock(&v9inode->v_mutex);
- if (v9ses->cache && !v9inode->writeback_fid &&
+ if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
+ !v9inode->writeback_fid &&
((file->f_flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
v9inode->writeback_fid = (void *) fid;
}
mutex_unlock(&v9inode->v_mutex);
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(inode, file);
return 0;
out_error:
}
static int
-v9fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
int retval;
- retval = generic_file_mmap(file, vma);
+
+ retval = generic_file_mmap(filp, vma);
if (!retval)
vma->vm_ops = &v9fs_file_vm_ops;
}
static int
+v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int retval;
+ struct inode *inode;
+ struct v9fs_inode *v9inode;
+ struct p9_fid *fid;
+
+ inode = file_inode(filp);
+ v9inode = V9FS_I(inode);
+ mutex_lock(&v9inode->v_mutex);
+ if (!v9inode->writeback_fid &&
+ (vma->vm_flags & VM_WRITE)) {
+ /*
+ * clone a fid and add it to writeback_fid
+ * we do it during mmap instead of
+ * page dirty time via write_begin/page_mkwrite
+ * because we want write after unlink usecase
+ * to work.
+ */
+ fid = v9fs_writeback_fid(filp->f_path.dentry);
+ if (IS_ERR(fid)) {
+ retval = PTR_ERR(fid);
+ mutex_unlock(&v9inode->v_mutex);
+ return retval;
+ }
+ v9inode->writeback_fid = (void *) fid;
+ }
+ mutex_unlock(&v9inode->v_mutex);
+
+ retval = generic_file_mmap(filp, vma);
+ if (!retval)
+ vma->vm_ops = &v9fs_mmap_file_vm_ops;
+
+ return retval;
+}
+
+static int
v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct v9fs_inode *v9inode;
return do_sync_read(filp, data, count, offset);
}
+/**
+ * v9fs_mmap_file_read - read from a file
+ * @filp: file pointer to read
+ * @udata: user data buffer to read data into
+ * @count: size of buffer
+ * @offset: offset at which to read data
+ *
+ */
+static ssize_t
+v9fs_mmap_file_read(struct file *filp, char __user *data, size_t count,
+ loff_t *offset)
+{
+ /* TODO: Check if there are dirty pages */
+ return v9fs_file_read(filp, data, count, offset);
+}
+
static ssize_t
v9fs_direct_write(struct file *filp, const char __user * data,
size_t count, loff_t *offsetp)
return do_sync_write(filp, data, count, offset);
}
+
+/**
+ * v9fs_mmap_file_write - write to a file
+ * @filp: file pointer to write
+ * @data: data buffer to write data from
+ * @count: size of buffer
+ * @offset: offset at which to write data
+ *
+ */
+static ssize_t
+v9fs_mmap_file_write(struct file *filp, const char __user *data,
+ size_t count, loff_t *offset)
+{
+ /*
+ * TODO: invalidate mmaps on filp's inode between
+ * offset and offset+count
+ */
+ return v9fs_file_write(filp, data, count, offset);
+}
+
+static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
+{
+ struct inode *inode;
+
+ struct writeback_control wbc = {
+ .nr_to_write = LONG_MAX,
+ .sync_mode = WB_SYNC_ALL,
+ .range_start = vma->vm_pgoff * PAGE_SIZE,
+ /* absolute end, byte at end included */
+ .range_end = vma->vm_pgoff * PAGE_SIZE +
+ (vma->vm_end - vma->vm_start - 1),
+ };
+
+
+ p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
+
+ inode = file_inode(vma->vm_file);
+
+ if (!mapping_cap_writeback_dirty(inode->i_mapping))
+ wbc.nr_to_write = 0;
+
+ might_sleep();
+ sync_inode(inode, &wbc);
+}
+
+
static const struct vm_operations_struct v9fs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = v9fs_vm_page_mkwrite,
.remap_pages = generic_file_remap_pages,
};
+static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
+ .close = v9fs_mmap_vm_close,
+ .fault = filemap_fault,
+ .page_mkwrite = v9fs_vm_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
+};
+
const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
.mmap = generic_file_readonly_mmap,
.fsync = v9fs_file_fsync_dotl,
};
+
+const struct file_operations v9fs_mmap_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = v9fs_mmap_file_read,
+ .write = v9fs_mmap_file_write,
+ .open = v9fs_file_open,
+ .release = v9fs_dir_release,
+ .lock = v9fs_file_lock,
+ .mmap = v9fs_mmap_file_mmap,
+ .fsync = v9fs_file_fsync,
+};
+
+const struct file_operations v9fs_mmap_file_operations_dotl = {
+ .llseek = generic_file_llseek,
+ .read = v9fs_mmap_file_read,
+ .write = v9fs_mmap_file_write,
+ .open = v9fs_file_open,
+ .release = v9fs_dir_release,
+ .lock = v9fs_file_lock_dotl,
+ .flock = v9fs_file_flock_dotl,
+ .mmap = v9fs_mmap_file_mmap,
+ .fsync = v9fs_file_fsync_dotl,
+};
case S_IFREG:
if (v9fs_proto_dotl(v9ses)) {
inode->i_op = &v9fs_file_inode_operations_dotl;
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE ||
+ v9ses->cache == CACHE_FSCACHE)
inode->i_fop =
&v9fs_cached_file_operations_dotl;
+ else if (v9ses->cache == CACHE_MMAP)
+ inode->i_fop = &v9fs_mmap_file_operations_dotl;
else
inode->i_fop = &v9fs_file_operations_dotl;
} else {
inode->i_op = &v9fs_file_inode_operations;
- if (v9ses->cache)
- inode->i_fop = &v9fs_cached_file_operations;
+ if (v9ses->cache == CACHE_LOOSE ||
+ v9ses->cache == CACHE_FSCACHE)
+ inode->i_fop =
+ &v9fs_cached_file_operations;
+ else if (v9ses->cache == CACHE_MMAP)
+ inode->i_fop = &v9fs_mmap_file_operations;
else
inode->i_fop = &v9fs_file_operations;
}
* unlink. For cached mode create calls request for new
* inode. But with cache disabled, lookup should do this.
*/
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
else
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
v9fs_invalidate_inode_attr(dir);
v9inode = V9FS_I(dentry->d_inode);
mutex_lock(&v9inode->v_mutex);
- if (v9ses->cache && !v9inode->writeback_fid &&
+ if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
+ !v9inode->writeback_fid &&
((flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
goto error;
file->private_data = fid;
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(dentry->d_inode, file);
*opened |= FILE_CREATED;
*/
i_size = inode->i_size;
v9fs_stat2inode(st, inode, inode->i_sb);
- if (v9ses->cache)
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
inode->i_size = i_size;
spin_unlock(&inode->i_lock);
out: