1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
5 #include <linux/pagemap.h>
6 #include <linux/module.h>
7 #include <linux/mount.h>
8 #include <linux/pseudo_fs.h>
9 #include <linux/magic.h>
10 #include <linux/genhd.h>
11 #include <linux/pfn_t.h>
12 #include <linux/cdev.h>
13 #include <linux/hash.h>
14 #include <linux/slab.h>
15 #include <linux/uio.h>
16 #include <linux/dax.h>
18 #include "dax-private.h"
21 * struct dax_device - anchor object for dax services
23 * @cdev: optional character interface for "device dax"
24 * @host: optional name for lookups where the device path is not available
25 * @private: dax driver private data
26 * @flags: state and boolean properties
29 struct hlist_node list;
35 const struct dax_operations *ops;
38 static dev_t dax_devt;
39 DEFINE_STATIC_SRCU(dax_srcu);
40 static struct vfsmount *dax_mnt;
41 static DEFINE_IDA(dax_minor_ida);
42 static struct kmem_cache *dax_cache __read_mostly;
43 static struct super_block *dax_superblock __read_mostly;
45 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
46 static struct hlist_head dax_host_list[DAX_HASH_SIZE];
47 static DEFINE_SPINLOCK(dax_host_lock);
49 int dax_read_lock(void)
51 return srcu_read_lock(&dax_srcu);
53 EXPORT_SYMBOL_GPL(dax_read_lock);
55 void dax_read_unlock(int id)
57 srcu_read_unlock(&dax_srcu, id);
59 EXPORT_SYMBOL_GPL(dax_read_unlock);
61 static int dax_host_hash(const char *host)
63 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
67 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
68 * @host: alternate name for the device registered by a dax driver
70 static struct dax_device *dax_get_by_host(const char *host)
72 struct dax_device *dax_dev, *found = NULL;
78 hash = dax_host_hash(host);
81 spin_lock(&dax_host_lock);
82 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
83 if (!dax_alive(dax_dev)
84 || strcmp(host, dax_dev->host) != 0)
87 if (igrab(&dax_dev->inode))
91 spin_unlock(&dax_host_lock);
98 #include <linux/blkdev.h>
100 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
103 sector_t start_sect = bdev ? get_start_sect(bdev) : 0;
104 phys_addr_t phys_off = (start_sect + sector) * 512;
107 *pgoff = PHYS_PFN(phys_off);
108 if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
112 EXPORT_SYMBOL(bdev_dax_pgoff);
114 #if IS_ENABLED(CONFIG_FS_DAX)
115 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
117 if (!blk_queue_dax(bdev->bd_disk->queue))
119 return dax_get_by_host(bdev->bd_disk->disk_name);
121 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
123 bool generic_fsdax_supported(struct dax_device *dax_dev,
124 struct block_device *bdev, int blocksize, sector_t start,
127 bool dax_enabled = false;
128 pgoff_t pgoff, pgoff_end;
129 void *kaddr, *end_kaddr;
135 if (blocksize != PAGE_SIZE) {
136 pr_info("%pg: error: unsupported blocksize for dax\n", bdev);
141 pr_debug("%pg: error: dax unsupported by block device\n", bdev);
145 err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
147 pr_info("%pg: error: unaligned partition for dax\n", bdev);
151 last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
152 err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
154 pr_info("%pg: error: unaligned partition for dax\n", bdev);
158 id = dax_read_lock();
159 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
160 len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
162 if (len < 1 || len2 < 1) {
163 pr_info("%pg: error: dax access failed (%ld)\n",
164 bdev, len < 1 ? len : len2);
169 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
171 * An arch that has enabled the pmem api should also
172 * have its drivers support pfn_t_devmap()
174 * This is a developer warning and should not trigger in
175 * production. dax_flush() will crash since it depends
176 * on being able to do (page_address(pfn_to_page())).
178 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
180 } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
181 struct dev_pagemap *pgmap, *end_pgmap;
183 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
184 end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
185 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
186 && pfn_t_to_page(pfn)->pgmap == pgmap
187 && pfn_t_to_page(end_pfn)->pgmap == pgmap
188 && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
189 && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
191 put_dev_pagemap(pgmap);
192 put_dev_pagemap(end_pgmap);
198 pr_info("%pg: error: dax support not enabled\n", bdev);
203 EXPORT_SYMBOL_GPL(generic_fsdax_supported);
205 bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
206 int blocksize, sector_t start, sector_t len)
214 id = dax_read_lock();
215 if (dax_alive(dax_dev) && dax_dev->ops->dax_supported)
216 ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize,
221 EXPORT_SYMBOL_GPL(dax_supported);
222 #endif /* CONFIG_FS_DAX */
223 #endif /* CONFIG_BLOCK */
225 enum dax_device_flags {
226 /* !alive + rcu grace period == no new operations / mappings */
228 /* gate whether dax_flush() calls the low level flush routine */
230 /* flag to check if device supports synchronous flush */
234 static ssize_t write_cache_show(struct device *dev,
235 struct device_attribute *attr, char *buf)
237 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
240 WARN_ON_ONCE(!dax_dev);
244 rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
249 static ssize_t write_cache_store(struct device *dev,
250 struct device_attribute *attr, const char *buf, size_t len)
253 int rc = strtobool(buf, &write_cache);
254 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
256 WARN_ON_ONCE(!dax_dev);
263 dax_write_cache(dax_dev, write_cache);
268 static DEVICE_ATTR_RW(write_cache);
270 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
272 struct device *dev = container_of(kobj, typeof(*dev), kobj);
273 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
275 WARN_ON_ONCE(!dax_dev);
279 #ifndef CONFIG_ARCH_HAS_PMEM_API
280 if (a == &dev_attr_write_cache.attr)
286 static struct attribute *dax_attributes[] = {
287 &dev_attr_write_cache.attr,
291 struct attribute_group dax_attribute_group = {
293 .attrs = dax_attributes,
294 .is_visible = dax_visible,
296 EXPORT_SYMBOL_GPL(dax_attribute_group);
299 * dax_direct_access() - translate a device pgoff to an absolute pfn
300 * @dax_dev: a dax_device instance representing the logical memory range
301 * @pgoff: offset in pages from the start of the device to translate
302 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
303 * @kaddr: output parameter that returns a virtual address mapping of pfn
304 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
306 * Return: negative errno if an error occurs, otherwise the number of
307 * pages accessible at the device relative @pgoff.
309 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
310 void **kaddr, pfn_t *pfn)
317 if (!dax_alive(dax_dev))
323 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
327 return min(avail, nr_pages);
329 EXPORT_SYMBOL_GPL(dax_direct_access);
331 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
332 size_t bytes, struct iov_iter *i)
334 if (!dax_alive(dax_dev))
337 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
339 EXPORT_SYMBOL_GPL(dax_copy_from_iter);
341 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
342 size_t bytes, struct iov_iter *i)
344 if (!dax_alive(dax_dev))
347 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
349 EXPORT_SYMBOL_GPL(dax_copy_to_iter);
351 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
354 if (!dax_alive(dax_dev))
357 * There are no callers that want to zero more than one page as of now.
358 * Once users are there, this check can be removed after the
359 * device mapper code has been updated to split ranges across targets.
364 return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
366 EXPORT_SYMBOL_GPL(dax_zero_page_range);
368 #ifdef CONFIG_ARCH_HAS_PMEM_API
369 void arch_wb_cache_pmem(void *addr, size_t size);
370 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
372 if (unlikely(!dax_write_cache_enabled(dax_dev)))
375 arch_wb_cache_pmem(addr, size);
378 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
382 EXPORT_SYMBOL_GPL(dax_flush);
384 void dax_write_cache(struct dax_device *dax_dev, bool wc)
387 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
389 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
391 EXPORT_SYMBOL_GPL(dax_write_cache);
393 bool dax_write_cache_enabled(struct dax_device *dax_dev)
395 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
397 EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
399 bool __dax_synchronous(struct dax_device *dax_dev)
401 return test_bit(DAXDEV_SYNC, &dax_dev->flags);
403 EXPORT_SYMBOL_GPL(__dax_synchronous);
405 void __set_dax_synchronous(struct dax_device *dax_dev)
407 set_bit(DAXDEV_SYNC, &dax_dev->flags);
409 EXPORT_SYMBOL_GPL(__set_dax_synchronous);
411 bool dax_alive(struct dax_device *dax_dev)
413 lockdep_assert_held(&dax_srcu);
414 return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
416 EXPORT_SYMBOL_GPL(dax_alive);
419 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
420 * that any fault handlers or operations that might have seen
421 * dax_alive(), have completed. Any operations that start after
422 * synchronize_srcu() has run will abort upon seeing !dax_alive().
424 void kill_dax(struct dax_device *dax_dev)
429 clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
431 synchronize_srcu(&dax_srcu);
433 spin_lock(&dax_host_lock);
434 hlist_del_init(&dax_dev->list);
435 spin_unlock(&dax_host_lock);
437 EXPORT_SYMBOL_GPL(kill_dax);
439 void run_dax(struct dax_device *dax_dev)
441 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
443 EXPORT_SYMBOL_GPL(run_dax);
445 static struct inode *dax_alloc_inode(struct super_block *sb)
447 struct dax_device *dax_dev;
450 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
454 inode = &dax_dev->inode;
459 static struct dax_device *to_dax_dev(struct inode *inode)
461 return container_of(inode, struct dax_device, inode);
464 static void dax_free_inode(struct inode *inode)
466 struct dax_device *dax_dev = to_dax_dev(inode);
467 kfree(dax_dev->host);
468 dax_dev->host = NULL;
470 ida_simple_remove(&dax_minor_ida, iminor(inode));
471 kmem_cache_free(dax_cache, dax_dev);
474 static void dax_destroy_inode(struct inode *inode)
476 struct dax_device *dax_dev = to_dax_dev(inode);
477 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
478 "kill_dax() must be called before final iput()\n");
481 static const struct super_operations dax_sops = {
482 .statfs = simple_statfs,
483 .alloc_inode = dax_alloc_inode,
484 .destroy_inode = dax_destroy_inode,
485 .free_inode = dax_free_inode,
486 .drop_inode = generic_delete_inode,
489 static int dax_init_fs_context(struct fs_context *fc)
491 struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
494 ctx->ops = &dax_sops;
498 static struct file_system_type dax_fs_type = {
500 .init_fs_context = dax_init_fs_context,
501 .kill_sb = kill_anon_super,
504 static int dax_test(struct inode *inode, void *data)
506 dev_t devt = *(dev_t *) data;
508 return inode->i_rdev == devt;
511 static int dax_set(struct inode *inode, void *data)
513 dev_t devt = *(dev_t *) data;
515 inode->i_rdev = devt;
519 static struct dax_device *dax_dev_get(dev_t devt)
521 struct dax_device *dax_dev;
524 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
525 dax_test, dax_set, &devt);
530 dax_dev = to_dax_dev(inode);
531 if (inode->i_state & I_NEW) {
532 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
533 inode->i_cdev = &dax_dev->cdev;
534 inode->i_mode = S_IFCHR;
535 inode->i_flags = S_DAX;
536 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
537 unlock_new_inode(inode);
543 static void dax_add_host(struct dax_device *dax_dev, const char *host)
548 * Unconditionally init dax_dev since it's coming from a
549 * non-zeroed slab cache
551 INIT_HLIST_NODE(&dax_dev->list);
552 dax_dev->host = host;
556 hash = dax_host_hash(host);
557 spin_lock(&dax_host_lock);
558 hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
559 spin_unlock(&dax_host_lock);
562 struct dax_device *alloc_dax(void *private, const char *__host,
563 const struct dax_operations *ops, unsigned long flags)
565 struct dax_device *dax_dev;
570 if (ops && !ops->zero_page_range) {
571 pr_debug("%s: error: device does not provide dax"
572 " operation zero_page_range()\n",
573 __host ? __host : "Unknown");
574 return ERR_PTR(-EINVAL);
577 host = kstrdup(__host, GFP_KERNEL);
579 return ERR_PTR(-ENOMEM);
581 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
585 devt = MKDEV(MAJOR(dax_devt), minor);
586 dax_dev = dax_dev_get(devt);
590 dax_add_host(dax_dev, host);
592 dax_dev->private = private;
593 if (flags & DAXDEV_F_SYNC)
594 set_dax_synchronous(dax_dev);
599 ida_simple_remove(&dax_minor_ida, minor);
602 return ERR_PTR(-ENOMEM);
604 EXPORT_SYMBOL_GPL(alloc_dax);
606 void put_dax(struct dax_device *dax_dev)
610 iput(&dax_dev->inode);
612 EXPORT_SYMBOL_GPL(put_dax);
615 * inode_dax: convert a public inode into its dax_dev
616 * @inode: An inode with i_cdev pointing to a dax_dev
618 * Note this is not equivalent to to_dax_dev() which is for private
619 * internal use where we know the inode filesystem type == dax_fs_type.
621 struct dax_device *inode_dax(struct inode *inode)
623 struct cdev *cdev = inode->i_cdev;
625 return container_of(cdev, struct dax_device, cdev);
627 EXPORT_SYMBOL_GPL(inode_dax);
629 struct inode *dax_inode(struct dax_device *dax_dev)
631 return &dax_dev->inode;
633 EXPORT_SYMBOL_GPL(dax_inode);
635 void *dax_get_private(struct dax_device *dax_dev)
637 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
639 return dax_dev->private;
641 EXPORT_SYMBOL_GPL(dax_get_private);
643 static void init_once(void *_dax_dev)
645 struct dax_device *dax_dev = _dax_dev;
646 struct inode *inode = &dax_dev->inode;
648 memset(dax_dev, 0, sizeof(*dax_dev));
649 inode_init_once(inode);
652 static int dax_fs_init(void)
656 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
657 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
658 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
663 dax_mnt = kern_mount(&dax_fs_type);
664 if (IS_ERR(dax_mnt)) {
665 rc = PTR_ERR(dax_mnt);
668 dax_superblock = dax_mnt->mnt_sb;
673 kmem_cache_destroy(dax_cache);
678 static void dax_fs_exit(void)
680 kern_unmount(dax_mnt);
681 kmem_cache_destroy(dax_cache);
684 static int __init dax_core_init(void)
692 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
702 unregister_chrdev_region(dax_devt, MINORMASK+1);
708 static void __exit dax_core_exit(void)
711 unregister_chrdev_region(dax_devt, MINORMASK+1);
712 ida_destroy(&dax_minor_ida);
716 MODULE_AUTHOR("Intel Corporation");
717 MODULE_LICENSE("GPL v2");
718 subsys_initcall(dax_core_init);
719 module_exit(dax_core_exit);