2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/mount.h>
16 #include <linux/magic.h>
17 #include <linux/genhd.h>
18 #include <linux/pfn_t.h>
19 #include <linux/cdev.h>
20 #include <linux/hash.h>
21 #include <linux/slab.h>
22 #include <linux/uio.h>
23 #include <linux/dax.h>
25 #include "dax-private.h"
27 static dev_t dax_devt;
28 DEFINE_STATIC_SRCU(dax_srcu);
29 static struct vfsmount *dax_mnt;
30 static DEFINE_IDA(dax_minor_ida);
31 static struct kmem_cache *dax_cache __read_mostly;
32 static struct super_block *dax_superblock __read_mostly;
34 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
35 static struct hlist_head dax_host_list[DAX_HASH_SIZE];
36 static DEFINE_SPINLOCK(dax_host_lock);
38 int dax_read_lock(void)
40 return srcu_read_lock(&dax_srcu);
42 EXPORT_SYMBOL_GPL(dax_read_lock);
44 void dax_read_unlock(int id)
46 srcu_read_unlock(&dax_srcu, id);
48 EXPORT_SYMBOL_GPL(dax_read_unlock);
51 #include <linux/blkdev.h>
53 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
56 phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
59 *pgoff = PHYS_PFN(phys_off);
60 if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
64 EXPORT_SYMBOL(bdev_dax_pgoff);
66 #if IS_ENABLED(CONFIG_FS_DAX)
67 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
69 if (!blk_queue_dax(bdev->bd_queue))
71 return fs_dax_get_by_host(bdev->bd_disk->disk_name);
73 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
77 * __bdev_dax_supported() - Check if the device supports dax for filesystem
78 * @bdev: block device to check
79 * @blocksize: The block size of the device
81 * This is a library function for filesystems to check if the block device
82 * can be mounted with dax option.
84 * Return: true if supported, false if unsupported
86 bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
88 struct dax_device *dax_dev;
89 bool dax_enabled = false;
90 struct request_queue *q;
95 char buf[BDEVNAME_SIZE];
97 if (blocksize != PAGE_SIZE) {
98 pr_debug("%s: error: unsupported blocksize for dax\n",
103 q = bdev_get_queue(bdev);
104 if (!q || !blk_queue_dax(q)) {
105 pr_debug("%s: error: request queue doesn't support dax\n",
106 bdevname(bdev, buf));
110 err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
112 pr_debug("%s: error: unaligned partition for dax\n",
113 bdevname(bdev, buf));
117 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
119 pr_debug("%s: error: device does not support dax\n",
120 bdevname(bdev, buf));
124 id = dax_read_lock();
125 len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
131 pr_debug("%s: error: dax access failed (%ld)\n",
132 bdevname(bdev, buf), len);
136 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
138 * An arch that has enabled the pmem api should also
139 * have its drivers support pfn_t_devmap()
141 * This is a developer warning and should not trigger in
142 * production. dax_flush() will crash since it depends
143 * on being able to do (page_address(pfn_to_page())).
145 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
147 } else if (pfn_t_devmap(pfn)) {
148 struct dev_pagemap *pgmap;
150 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
151 if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX)
153 put_dev_pagemap(pgmap);
157 pr_debug("%s: error: dax support not enabled\n",
158 bdevname(bdev, buf));
163 EXPORT_SYMBOL_GPL(__bdev_dax_supported);
166 enum dax_device_flags {
167 /* !alive + rcu grace period == no new operations / mappings */
169 /* gate whether dax_flush() calls the low level flush routine */
174 * struct dax_device - anchor object for dax services
176 * @cdev: optional character interface for "device dax"
177 * @host: optional name for lookups where the device path is not available
178 * @private: dax driver private data
179 * @flags: state and boolean properties
182 struct hlist_node list;
188 const struct dax_operations *ops;
191 static ssize_t write_cache_show(struct device *dev,
192 struct device_attribute *attr, char *buf)
194 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
197 WARN_ON_ONCE(!dax_dev);
201 rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
206 static ssize_t write_cache_store(struct device *dev,
207 struct device_attribute *attr, const char *buf, size_t len)
210 int rc = strtobool(buf, &write_cache);
211 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
213 WARN_ON_ONCE(!dax_dev);
220 dax_write_cache(dax_dev, write_cache);
225 static DEVICE_ATTR_RW(write_cache);
227 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
229 struct device *dev = container_of(kobj, typeof(*dev), kobj);
230 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
232 WARN_ON_ONCE(!dax_dev);
236 #ifndef CONFIG_ARCH_HAS_PMEM_API
237 if (a == &dev_attr_write_cache.attr)
243 static struct attribute *dax_attributes[] = {
244 &dev_attr_write_cache.attr,
248 struct attribute_group dax_attribute_group = {
250 .attrs = dax_attributes,
251 .is_visible = dax_visible,
253 EXPORT_SYMBOL_GPL(dax_attribute_group);
256 * dax_direct_access() - translate a device pgoff to an absolute pfn
257 * @dax_dev: a dax_device instance representing the logical memory range
258 * @pgoff: offset in pages from the start of the device to translate
259 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
260 * @kaddr: output parameter that returns a virtual address mapping of pfn
261 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
263 * Return: negative errno if an error occurs, otherwise the number of
264 * pages accessible at the device relative @pgoff.
266 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
267 void **kaddr, pfn_t *pfn)
274 if (!dax_alive(dax_dev))
280 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
284 return min(avail, nr_pages);
286 EXPORT_SYMBOL_GPL(dax_direct_access);
288 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
289 size_t bytes, struct iov_iter *i)
291 if (!dax_alive(dax_dev))
294 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
296 EXPORT_SYMBOL_GPL(dax_copy_from_iter);
298 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
299 size_t bytes, struct iov_iter *i)
301 if (!dax_alive(dax_dev))
304 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
306 EXPORT_SYMBOL_GPL(dax_copy_to_iter);
308 #ifdef CONFIG_ARCH_HAS_PMEM_API
309 void arch_wb_cache_pmem(void *addr, size_t size);
310 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
312 if (unlikely(!dax_write_cache_enabled(dax_dev)))
315 arch_wb_cache_pmem(addr, size);
318 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
322 EXPORT_SYMBOL_GPL(dax_flush);
324 void dax_write_cache(struct dax_device *dax_dev, bool wc)
327 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
329 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
331 EXPORT_SYMBOL_GPL(dax_write_cache);
333 bool dax_write_cache_enabled(struct dax_device *dax_dev)
335 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
337 EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
339 bool dax_alive(struct dax_device *dax_dev)
341 lockdep_assert_held(&dax_srcu);
342 return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
344 EXPORT_SYMBOL_GPL(dax_alive);
346 static int dax_host_hash(const char *host)
348 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
352 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
353 * that any fault handlers or operations that might have seen
354 * dax_alive(), have completed. Any operations that start after
355 * synchronize_srcu() has run will abort upon seeing !dax_alive().
357 void kill_dax(struct dax_device *dax_dev)
362 clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
364 synchronize_srcu(&dax_srcu);
366 spin_lock(&dax_host_lock);
367 hlist_del_init(&dax_dev->list);
368 spin_unlock(&dax_host_lock);
370 EXPORT_SYMBOL_GPL(kill_dax);
372 void run_dax(struct dax_device *dax_dev)
374 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
376 EXPORT_SYMBOL_GPL(run_dax);
378 static struct inode *dax_alloc_inode(struct super_block *sb)
380 struct dax_device *dax_dev;
383 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
387 inode = &dax_dev->inode;
392 static struct dax_device *to_dax_dev(struct inode *inode)
394 return container_of(inode, struct dax_device, inode);
397 static void dax_i_callback(struct rcu_head *head)
399 struct inode *inode = container_of(head, struct inode, i_rcu);
400 struct dax_device *dax_dev = to_dax_dev(inode);
402 kfree(dax_dev->host);
403 dax_dev->host = NULL;
405 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
406 kmem_cache_free(dax_cache, dax_dev);
409 static void dax_destroy_inode(struct inode *inode)
411 struct dax_device *dax_dev = to_dax_dev(inode);
413 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
414 "kill_dax() must be called before final iput()\n");
415 call_rcu(&inode->i_rcu, dax_i_callback);
418 static const struct super_operations dax_sops = {
419 .statfs = simple_statfs,
420 .alloc_inode = dax_alloc_inode,
421 .destroy_inode = dax_destroy_inode,
422 .drop_inode = generic_delete_inode,
425 static struct dentry *dax_mount(struct file_system_type *fs_type,
426 int flags, const char *dev_name, void *data)
428 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
431 static struct file_system_type dax_fs_type = {
434 .kill_sb = kill_anon_super,
437 static int dax_test(struct inode *inode, void *data)
439 dev_t devt = *(dev_t *) data;
441 return inode->i_rdev == devt;
444 static int dax_set(struct inode *inode, void *data)
446 dev_t devt = *(dev_t *) data;
448 inode->i_rdev = devt;
452 static struct dax_device *dax_dev_get(dev_t devt)
454 struct dax_device *dax_dev;
457 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
458 dax_test, dax_set, &devt);
463 dax_dev = to_dax_dev(inode);
464 if (inode->i_state & I_NEW) {
465 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
466 inode->i_cdev = &dax_dev->cdev;
467 inode->i_mode = S_IFCHR;
468 inode->i_flags = S_DAX;
469 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
470 unlock_new_inode(inode);
476 static void dax_add_host(struct dax_device *dax_dev, const char *host)
481 * Unconditionally init dax_dev since it's coming from a
482 * non-zeroed slab cache
484 INIT_HLIST_NODE(&dax_dev->list);
485 dax_dev->host = host;
489 hash = dax_host_hash(host);
490 spin_lock(&dax_host_lock);
491 hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
492 spin_unlock(&dax_host_lock);
495 struct dax_device *alloc_dax(void *private, const char *__host,
496 const struct dax_operations *ops)
498 struct dax_device *dax_dev;
503 host = kstrdup(__host, GFP_KERNEL);
507 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
511 devt = MKDEV(MAJOR(dax_devt), minor);
512 dax_dev = dax_dev_get(devt);
516 dax_add_host(dax_dev, host);
518 dax_dev->private = private;
522 ida_simple_remove(&dax_minor_ida, minor);
527 EXPORT_SYMBOL_GPL(alloc_dax);
529 void put_dax(struct dax_device *dax_dev)
533 iput(&dax_dev->inode);
535 EXPORT_SYMBOL_GPL(put_dax);
538 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
539 * @host: alternate name for the device registered by a dax driver
541 struct dax_device *dax_get_by_host(const char *host)
543 struct dax_device *dax_dev, *found = NULL;
549 hash = dax_host_hash(host);
551 id = dax_read_lock();
552 spin_lock(&dax_host_lock);
553 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
554 if (!dax_alive(dax_dev)
555 || strcmp(host, dax_dev->host) != 0)
558 if (igrab(&dax_dev->inode))
562 spin_unlock(&dax_host_lock);
567 EXPORT_SYMBOL_GPL(dax_get_by_host);
570 * inode_dax: convert a public inode into its dax_dev
571 * @inode: An inode with i_cdev pointing to a dax_dev
573 * Note this is not equivalent to to_dax_dev() which is for private
574 * internal use where we know the inode filesystem type == dax_fs_type.
576 struct dax_device *inode_dax(struct inode *inode)
578 struct cdev *cdev = inode->i_cdev;
580 return container_of(cdev, struct dax_device, cdev);
582 EXPORT_SYMBOL_GPL(inode_dax);
584 struct inode *dax_inode(struct dax_device *dax_dev)
586 return &dax_dev->inode;
588 EXPORT_SYMBOL_GPL(dax_inode);
590 void *dax_get_private(struct dax_device *dax_dev)
592 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
594 return dax_dev->private;
596 EXPORT_SYMBOL_GPL(dax_get_private);
598 static void init_once(void *_dax_dev)
600 struct dax_device *dax_dev = _dax_dev;
601 struct inode *inode = &dax_dev->inode;
603 memset(dax_dev, 0, sizeof(*dax_dev));
604 inode_init_once(inode);
607 static int dax_fs_init(void)
611 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
612 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
613 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
618 rc = register_filesystem(&dax_fs_type);
620 goto err_register_fs;
622 dax_mnt = kern_mount(&dax_fs_type);
623 if (IS_ERR(dax_mnt)) {
624 rc = PTR_ERR(dax_mnt);
627 dax_superblock = dax_mnt->mnt_sb;
632 unregister_filesystem(&dax_fs_type);
634 kmem_cache_destroy(dax_cache);
639 static void dax_fs_exit(void)
641 kern_unmount(dax_mnt);
642 unregister_filesystem(&dax_fs_type);
643 kmem_cache_destroy(dax_cache);
646 static int __init dax_core_init(void)
654 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
664 unregister_chrdev_region(dax_devt, MINORMASK+1);
670 static void __exit dax_core_exit(void)
672 unregister_chrdev_region(dax_devt, MINORMASK+1);
673 ida_destroy(&dax_minor_ida);
677 MODULE_AUTHOR("Intel Corporation");
678 MODULE_LICENSE("GPL v2");
679 subsys_initcall(dax_core_init);
680 module_exit(dax_core_exit);