From 3bc52c45bac26bf7ed1dc8d287ad1aeaed1250b6 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sun, 24 Jul 2016 21:55:45 -0700 Subject: dax: define a unified inode/address_space for device-dax mappings In support of enabling resize / truncate of device-dax instances, define a pseudo-fs to provide a unified inode/address space for vm operations. Cc: Al Viro Signed-off-by: Dan Williams --- drivers/dax/dax.c | 150 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 146 insertions(+), 4 deletions(-) (limited to 'drivers/dax') diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 17715773c097..e8b9319aeadb 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -13,7 +13,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -26,6 +28,9 @@ static struct class *dax_class; static DEFINE_IDA(dax_minor_ida); static int nr_dax = CONFIG_NR_DEV_DAX; module_param(nr_dax, int, S_IRUGO); +static struct vfsmount *dax_mnt; +static struct kmem_cache *dax_cache __read_mostly; +static struct super_block *dax_superblock __read_mostly; MODULE_PARM_DESC(nr_dax, "max number of device-dax instances"); /** @@ -61,6 +66,7 @@ struct dax_region { */ struct dax_dev { struct dax_region *region; + struct inode *inode; struct device dev; struct cdev cdev; bool alive; @@ -69,6 +75,117 @@ struct dax_dev { struct resource res[0]; }; +static struct inode *dax_alloc_inode(struct super_block *sb) +{ + return kmem_cache_alloc(dax_cache, GFP_KERNEL); +} + +static void dax_i_callback(struct rcu_head *head) +{ + struct inode *inode = container_of(head, struct inode, i_rcu); + + kmem_cache_free(dax_cache, inode); +} + +static void dax_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, dax_i_callback); +} + +static const struct super_operations dax_sops = { + .statfs = simple_statfs, + .alloc_inode = dax_alloc_inode, + .destroy_inode = dax_destroy_inode, + .drop_inode = generic_delete_inode, +}; + +static struct dentry *dax_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) +{ + return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC); +} + +static struct file_system_type dax_type = { + .name = "dax", + .mount = dax_mount, + .kill_sb = kill_anon_super, +}; + +static int dax_test(struct inode *inode, void *data) +{ + return inode->i_cdev == data; +} + +static int dax_set(struct inode *inode, void *data) +{ + inode->i_cdev = data; + return 0; +} + +static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt) +{ + struct inode *inode; + + inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), + dax_test, dax_set, cdev); + + if (!inode) + return NULL; + + if (inode->i_state & I_NEW) { + inode->i_mode = S_IFCHR; + inode->i_flags = S_DAX; + inode->i_rdev = devt; + mapping_set_gfp_mask(&inode->i_data, GFP_USER); + unlock_new_inode(inode); + } + return inode; +} + +static void init_once(void *inode) +{ + inode_init_once(inode); +} + +static int dax_inode_init(void) +{ + int rc; + + dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0, + (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD|SLAB_ACCOUNT), + init_once); + if (!dax_cache) + return -ENOMEM; + + rc = register_filesystem(&dax_type); + if (rc) + goto err_register_fs; + + dax_mnt = kern_mount(&dax_type); + if (IS_ERR(dax_mnt)) { + rc = PTR_ERR(dax_mnt); + goto err_mount; + } + dax_superblock = dax_mnt->mnt_sb; + + return 0; + + err_mount: + unregister_filesystem(&dax_type); + err_register_fs: + kmem_cache_destroy(dax_cache); + + return rc; +} + +static void dax_inode_exit(void) +{ + kern_unmount(dax_mnt); + unregister_filesystem(&dax_type); + kmem_cache_destroy(dax_cache); +} + static void dax_region_free(struct kref *kref) { struct dax_region *dax_region; @@ -379,6 +496,9 @@ static int dax_open(struct inode *inode, struct file *filp) dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev); dev_dbg(&dax_dev->dev, "%s\n", __func__); + inode->i_mapping = dax_dev->inode->i_mapping; + inode->i_mapping->host = dax_dev->inode; + filp->f_mapping = inode->i_mapping; filp->private_data = dax_dev; inode->i_flags = S_DAX; @@ -410,6 +530,7 @@ static void dax_dev_release(struct device *dev) ida_simple_remove(&dax_region->ida, dax_dev->id); ida_simple_remove(&dax_minor_ida, MINOR(dev->devt)); dax_region_put(dax_region); + iput(dax_dev->inode); kfree(dax_dev); } @@ -459,6 +580,12 @@ int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res, goto err_minor; } + dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t); + if (!dax_dev->inode) { + rc = -ENOMEM; + goto err_inode; + } + /* device_initialize() so cdev can reference kobj parent */ dev_t = MKDEV(MAJOR(dax_devt), minor); dev = &dax_dev->dev; @@ -494,6 +621,8 @@ int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res, return devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev); err_cdev: + iput(dax_dev->inode); + err_inode: ida_simple_remove(&dax_minor_ida, minor); err_minor: ida_simple_remove(&dax_region->ida, dax_dev->id); @@ -508,16 +637,28 @@ static int __init dax_init(void) { int rc; + rc = dax_inode_init(); + if (rc) + return rc; + nr_dax = max(nr_dax, 256); rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax"); if (rc) - return rc; + goto err_chrdev; dax_class = class_create(THIS_MODULE, "dax"); - if (IS_ERR(dax_class)) - unregister_chrdev_region(dax_devt, nr_dax); + if (IS_ERR(dax_class)) { + rc = PTR_ERR(dax_class); + goto err_class; + } - return PTR_ERR_OR_ZERO(dax_class); + return 0; + + err_class: + unregister_chrdev_region(dax_devt, nr_dax); + err_chrdev: + dax_inode_exit(); + return rc; } static void __exit dax_exit(void) @@ -525,6 +666,7 @@ static void __exit dax_exit(void) class_destroy(dax_class); unregister_chrdev_region(dax_devt, nr_dax); ida_destroy(&dax_minor_ida); + dax_inode_exit(); } MODULE_AUTHOR("Intel Corporation"); -- cgit v1.2.3-59-g8ed1b