From 74d71a01abef37f71d914f2105a4cb8712a2beb8 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 6 May 2017 06:14:43 +0200 Subject: device-dax: Tell kbuild DEV_DAX_PMEM depends on DEV_DAX ERROR: "devm_create_dev_dax" [drivers/dax/dax_pmem.ko] undefined! ERROR: "alloc_dax_region" [drivers/dax/dax_pmem.ko] undefined! ERROR: "dax_region_put" [drivers/dax/dax_pmem.ko] undefined! Signed-off-by: Mike Galbraith Signed-off-by: Dan Williams --- drivers/dax/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index b7053eafd88e..6b7e20eae16c 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -19,7 +19,7 @@ config DEV_DAX config DEV_DAX_PMEM tristate "PMEM DAX: direct access to persistent memory" - depends on LIBNVDIMM && NVDIMM_DAX + depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX default DEV_DAX help Support raw access to persistent memory. Note that this -- cgit v1.2.3-59-g8ed1b From ef51042472f55b325fd7f2b26a2e29fd89757234 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 8 May 2017 10:55:27 -0700 Subject: block, dax: move "select DAX" from BLOCK to FS_DAX For configurations that do not enable DAX filesystems or drivers, do not require the DAX core to be built. Given that the 'direct_access' method has been removed from 'block_device_operations', we can also go ahead and remove the block-related dax helper functions from fs/block_dev.c to drivers/dax/super.c. This keeps dax details out of the block layer and lets the DAX core be built as a module in the FS_DAX=n case. Filesystems need to include dax.h to call bdev_dax_supported(). Cc: linux-xfs@vger.kernel.org Cc: Jens Axboe Cc: "Theodore Ts'o" Cc: Matthew Wilcox Cc: Alexander Viro Cc: "Darrick J. Wong" Cc: Ross Zwisler Reviewed-by: Jan Kara Reported-by: Geert Uytterhoeven Signed-off-by: Dan Williams --- block/Kconfig | 1 - drivers/dax/super.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/Kconfig | 1 + fs/block_dev.c | 66 ----------------------------------------------- fs/ext2/super.c | 1 + fs/ext4/super.c | 1 + fs/xfs/xfs_super.c | 1 + include/linux/blkdev.h | 2 -- include/linux/dax.h | 30 ++++++++++++++++++++-- 9 files changed, 102 insertions(+), 71 deletions(-) diff --git a/block/Kconfig b/block/Kconfig index 93da7fc3f254..e9f780f815f5 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -6,7 +6,6 @@ menuconfig BLOCK default y select SBITMAP select SRCU - select DAX help Provide block layer support for the kernel. diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 465dcd7317d5..e8998d15f3cd 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -47,6 +48,75 @@ void dax_read_unlock(int id) } EXPORT_SYMBOL_GPL(dax_read_unlock); +int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, + pgoff_t *pgoff) +{ + phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512; + + if (pgoff) + *pgoff = PHYS_PFN(phys_off); + if (phys_off % PAGE_SIZE || size % PAGE_SIZE) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL(bdev_dax_pgoff); + +/** + * __bdev_dax_supported() - Check if the device supports dax for filesystem + * @sb: The superblock of the device + * @blocksize: The block size of the device + * + * This is a library function for filesystems to check if the block device + * can be mounted with dax option. + * + * Return: negative errno if unsupported, 0 if supported. + */ +int __bdev_dax_supported(struct super_block *sb, int blocksize) +{ + struct block_device *bdev = sb->s_bdev; + struct dax_device *dax_dev; + pgoff_t pgoff; + int err, id; + void *kaddr; + pfn_t pfn; + long len; + + if (blocksize != PAGE_SIZE) { + pr_err("VFS (%s): error: unsupported blocksize for dax\n", + sb->s_id); + return -EINVAL; + } + + err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); + if (err) { + pr_err("VFS (%s): error: unaligned partition for dax\n", + sb->s_id); + return err; + } + + dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); + if (!dax_dev) { + pr_err("VFS (%s): error: device does not support dax\n", + sb->s_id); + return -EOPNOTSUPP; + } + + id = dax_read_lock(); + len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); + dax_read_unlock(id); + + put_dax(dax_dev); + + if (len < 1) { + pr_err("VFS (%s): error: dax access failed (%ld)", + sb->s_id, len); + return len < 0 ? len : -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(__bdev_dax_supported); + /** * struct dax_device - anchor object for dax services * @inode: core vfs diff --git a/fs/Kconfig b/fs/Kconfig index 83eab52fb3f6..b0e42b6a96b9 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -39,6 +39,7 @@ config FS_DAX depends on MMU depends on !(ARM || MIPS || SPARC) select FS_IOMAP + select DAX help Direct Access (DAX) can be used on memory-backed block devices. If the block device supports DAX and the filesystem supports DAX, diff --git a/fs/block_dev.c b/fs/block_dev.c index 666367e13711..3096ecd48304 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -718,72 +718,6 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, } EXPORT_SYMBOL_GPL(bdev_write_page); -int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, - pgoff_t *pgoff) -{ - phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512; - - if (pgoff) - *pgoff = PHYS_PFN(phys_off); - if (phys_off % PAGE_SIZE || size % PAGE_SIZE) - return -EINVAL; - return 0; -} -EXPORT_SYMBOL(bdev_dax_pgoff); - -/** - * bdev_dax_supported() - Check if the device supports dax for filesystem - * @sb: The superblock of the device - * @blocksize: The block size of the device - * - * This is a library function for filesystems to check if the block device - * can be mounted with dax option. - * - * Return: negative errno if unsupported, 0 if supported. - */ -int bdev_dax_supported(struct super_block *sb, int blocksize) -{ - struct block_device *bdev = sb->s_bdev; - struct dax_device *dax_dev; - pgoff_t pgoff; - int err, id; - void *kaddr; - pfn_t pfn; - long len; - - if (blocksize != PAGE_SIZE) { - vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax"); - return -EINVAL; - } - - err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); - if (err) { - vfs_msg(sb, KERN_ERR, "error: unaligned partition for dax"); - return err; - } - - dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); - if (!dax_dev) { - vfs_msg(sb, KERN_ERR, "error: device does not support dax"); - return -EOPNOTSUPP; - } - - id = dax_read_lock(); - len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); - dax_read_unlock(id); - - put_dax(dax_dev); - - if (len < 1) { - vfs_msg(sb, KERN_ERR, - "error: dax access failed (%ld)", len); - return len < 0 ? len : -EIO; - } - - return 0; -} -EXPORT_SYMBOL_GPL(bdev_dax_supported); - /* * pseudo-fs */ diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 9e25a71fe1a2..d07773b81da9 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "ext2.h" #include "xattr.h" #include "acl.h" diff --git a/fs/ext4/super.c b/fs/ext4/super.c index a9448db1cf7e..bf6bb8997124 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 685c042a120f..f5c58d6dcafb 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -52,6 +52,7 @@ #include "xfs_reflink.h" #include +#include #include #include #include diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 848f87eb1905..e4d9899755a7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1940,8 +1940,6 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); -extern int bdev_dax_supported(struct super_block *, int); -int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); #else /* CONFIG_BLOCK */ struct block_device; diff --git a/include/linux/dax.h b/include/linux/dax.h index d3158e74a59e..7fdf1d710042 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -18,12 +18,38 @@ struct dax_operations { void **, pfn_t *); }; +int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); +#if IS_ENABLED(CONFIG_FS_DAX) +int __bdev_dax_supported(struct super_block *sb, int blocksize); +static inline int bdev_dax_supported(struct super_block *sb, int blocksize) +{ + return __bdev_dax_supported(sb, blocksize); +} +#else +static inline int bdev_dax_supported(struct super_block *sb, int blocksize) +{ + return -EOPNOTSUPP; +} +#endif + +#if IS_ENABLED(CONFIG_DAX) +struct dax_device *dax_get_by_host(const char *host); +void put_dax(struct dax_device *dax_dev); +#else +static inline struct dax_device *dax_get_by_host(const char *host) +{ + return NULL; +} + +static inline void put_dax(struct dax_device *dax_dev) +{ +} +#endif + int dax_read_lock(void); void dax_read_unlock(int id); -struct dax_device *dax_get_by_host(const char *host); struct dax_device *alloc_dax(void *private, const char *host, const struct dax_operations *ops); -void put_dax(struct dax_device *dax_dev); bool dax_alive(struct dax_device *dax_dev); void kill_dax(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); -- cgit v1.2.3-59-g8ed1b From cf1e22891bee39f50e058bee0827086fd75a8717 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 8 May 2017 12:33:53 -0700 Subject: device-dax: kill NR_DEV_DAX There is no point to ask how many device-dax instances the kernel should support. Since we are already using a dynamic major number, just allow the max number of minors by default and be done. This also fixes the fact that the proposed max for the NR_DEV_DAX range was larger than what could be supported by alloc_chrdev_region(). Fixes: ba09c01d2fa8 ("dax: convert to the cdev api") Reported-by: Geert Uytterhoeven Tested-by: Geert Uytterhoeven Signed-off-by: Dan Williams --- drivers/dax/Kconfig | 5 ----- drivers/dax/super.c | 11 +++-------- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index 6b7e20eae16c..b79aa8f7a497 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -28,9 +28,4 @@ config DEV_DAX_PMEM Say Y if unsure -config NR_DEV_DAX - int "Maximum number of Device-DAX instances" - default 32768 - range 256 2147483647 - endif diff --git a/drivers/dax/super.c b/drivers/dax/super.c index e8998d15f3cd..ebf43f531ada 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -21,10 +21,6 @@ #include #include -static int nr_dax = CONFIG_NR_DEV_DAX; -module_param(nr_dax, int, S_IRUGO); -MODULE_PARM_DESC(nr_dax, "max number of dax device instances"); - static dev_t dax_devt; DEFINE_STATIC_SRCU(dax_srcu); static struct vfsmount *dax_mnt; @@ -331,7 +327,7 @@ struct dax_device *alloc_dax(void *private, const char *__host, if (__host && !host) return NULL; - minor = ida_simple_get(&dax_minor_ida, 0, nr_dax, GFP_KERNEL); + minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); if (minor < 0) goto err_minor; @@ -475,8 +471,7 @@ static int __init dax_fs_init(void) if (rc) return rc; - nr_dax = max(nr_dax, 256); - rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax"); + rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); if (rc) __dax_fs_exit(); return rc; @@ -484,7 +479,7 @@ static int __init dax_fs_init(void) static void __exit dax_fs_exit(void) { - unregister_chrdev_region(dax_devt, nr_dax); + unregister_chrdev_region(dax_devt, MINORMASK+1); ida_destroy(&dax_minor_ida); __dax_fs_exit(); } -- cgit v1.2.3-59-g8ed1b From 8376efd31d3d7c44bd05be337adde023cc531fa1 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 9 May 2017 18:00:43 +0100 Subject: x86, pmem: Fix cache flushing for iovec write < 8 bytes Commit 11e63f6d920d added cache flushing for unaligned writes from an iovec, covering the first and last cache line of a >= 8 byte write and the first cache line of a < 8 byte write. But an unaligned write of 2-7 bytes can still cover two cache lines, so make sure we flush both in that case. Cc: Fixes: 11e63f6d920d ("x86, pmem: fix broken __copy_user_nocache ...") Signed-off-by: Ben Hutchings Signed-off-by: Dan Williams --- arch/x86/include/asm/pmem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index d5a22bac9988..0ff8fe71b255 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -98,7 +98,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, if (bytes < 8) { if (!IS_ALIGNED(dest, 4) || (bytes != 4)) - arch_wb_cache_pmem(addr, 1); + arch_wb_cache_pmem(addr, bytes); } else { if (!IS_ALIGNED(dest, 8)) { dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); -- cgit v1.2.3-59-g8ed1b From 3ae3d67ba705c754a3c91ac009f9ce73a0e7286a Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Wed, 10 May 2017 15:01:30 -0600 Subject: libnvdimm: add an atomic vs process context flag to rw_bytes nsio_rw_bytes can clear media errors, but this cannot be done while we are in an atomic context due to locking within ACPI. From the BTT, ->rw_bytes may be called either from atomic or process context depending on whether the calls happen during initialization or during IO. During init, we want to ensure error clearing happens, and the flag marking process context allows nsio_rw_bytes to do that. When called during IO, we're in atomic context, and error clearing can be skipped. Cc: Dan Williams Signed-off-by: Vishal Verma Signed-off-by: Dan Williams --- drivers/nvdimm/blk.c | 3 ++- drivers/nvdimm/btt.c | 67 +++++++++++++++++++++++++---------------------- drivers/nvdimm/btt_devs.c | 2 +- drivers/nvdimm/claim.c | 6 +++-- drivers/nvdimm/nd.h | 1 + drivers/nvdimm/pfn_devs.c | 4 +-- include/linux/nd.h | 12 +++++---- 7 files changed, 53 insertions(+), 42 deletions(-) diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 9faaa9694d87..822198a75e96 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -218,7 +218,8 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) } static int nsblk_rw_bytes(struct nd_namespace_common *ndns, - resource_size_t offset, void *iobuf, size_t n, int rw) + resource_size_t offset, void *iobuf, size_t n, int rw, + unsigned long flags) { struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev); struct nd_blk_region *ndbr = to_ndbr(nsblk); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 368795aad5c9..aa977cd4869d 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -32,25 +32,25 @@ enum log_ent_request { }; static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, - void *buf, size_t n) + void *buf, size_t n, unsigned long flags) { struct nd_btt *nd_btt = arena->nd_btt; struct nd_namespace_common *ndns = nd_btt->ndns; /* arena offsets are 4K from the base of the device */ offset += SZ_4K; - return nvdimm_read_bytes(ndns, offset, buf, n); + return nvdimm_read_bytes(ndns, offset, buf, n, flags); } static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, - void *buf, size_t n) + void *buf, size_t n, unsigned long flags) { struct nd_btt *nd_btt = arena->nd_btt; struct nd_namespace_common *ndns = nd_btt->ndns; /* arena offsets are 4K from the base of the device */ offset += SZ_4K; - return nvdimm_write_bytes(ndns, offset, buf, n); + return nvdimm_write_bytes(ndns, offset, buf, n, flags); } static int btt_info_write(struct arena_info *arena, struct btt_sb *super) @@ -58,19 +58,19 @@ static int btt_info_write(struct arena_info *arena, struct btt_sb *super) int ret; ret = arena_write_bytes(arena, arena->info2off, super, - sizeof(struct btt_sb)); + sizeof(struct btt_sb), 0); if (ret) return ret; return arena_write_bytes(arena, arena->infooff, super, - sizeof(struct btt_sb)); + sizeof(struct btt_sb), 0); } static int btt_info_read(struct arena_info *arena, struct btt_sb *super) { WARN_ON(!super); return arena_read_bytes(arena, arena->infooff, super, - sizeof(struct btt_sb)); + sizeof(struct btt_sb), 0); } /* @@ -79,16 +79,17 @@ static int btt_info_read(struct arena_info *arena, struct btt_sb *super) * mapping is in little-endian * mapping contains 'E' and 'Z' flags as desired */ -static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping) +static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping, + unsigned long flags) { u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); WARN_ON(lba >= arena->external_nlba); - return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE); + return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags); } static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, - u32 z_flag, u32 e_flag) + u32 z_flag, u32 e_flag, unsigned long rwb_flags) { u32 ze; __le32 mapping_le; @@ -127,11 +128,11 @@ static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, } mapping_le = cpu_to_le32(mapping); - return __btt_map_write(arena, lba, mapping_le); + return __btt_map_write(arena, lba, mapping_le, rwb_flags); } static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, - int *trim, int *error) + int *trim, int *error, unsigned long rwb_flags) { int ret; __le32 in; @@ -140,7 +141,7 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, WARN_ON(lba >= arena->external_nlba); - ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE); + ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags); if (ret) return ret; @@ -189,7 +190,7 @@ static int btt_log_read_pair(struct arena_info *arena, u32 lane, WARN_ON(!ent); return arena_read_bytes(arena, arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, - 2 * LOG_ENT_SIZE); + 2 * LOG_ENT_SIZE, 0); } static struct dentry *debugfs_root; @@ -335,7 +336,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane, * btt_flog_write is the wrapper for updating the freelist elements */ static int __btt_log_write(struct arena_info *arena, u32 lane, - u32 sub, struct log_entry *ent) + u32 sub, struct log_entry *ent, unsigned long flags) { int ret; /* @@ -350,13 +351,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane, void *src = ent; /* split the 16B write into atomic, durable halves */ - ret = arena_write_bytes(arena, ns_off, src, log_half); + ret = arena_write_bytes(arena, ns_off, src, log_half, flags); if (ret) return ret; ns_off += log_half; src += log_half; - return arena_write_bytes(arena, ns_off, src, log_half); + return arena_write_bytes(arena, ns_off, src, log_half, flags); } static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, @@ -364,7 +365,7 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, { int ret; - ret = __btt_log_write(arena, lane, sub, ent); + ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC); if (ret) return ret; @@ -397,7 +398,7 @@ static int btt_map_init(struct arena_info *arena) size_t size = min(mapsize, chunk_size); ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, - size); + size, 0); if (ret) goto free; @@ -428,10 +429,10 @@ static int btt_log_init(struct arena_info *arena) log.old_map = cpu_to_le32(arena->external_nlba + i); log.new_map = cpu_to_le32(arena->external_nlba + i); log.seq = cpu_to_le32(LOG_SEQ_INIT); - ret = __btt_log_write(arena, i, 0, &log); + ret = __btt_log_write(arena, i, 0, &log, 0); if (ret) return ret; - ret = __btt_log_write(arena, i, 1, &zerolog); + ret = __btt_log_write(arena, i, 1, &zerolog, 0); if (ret) return ret; } @@ -470,7 +471,7 @@ static int btt_freelist_init(struct arena_info *arena) /* Check if map recovery is needed */ ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry, - NULL, NULL); + NULL, NULL, 0); if (ret) return ret; if ((le32_to_cpu(log_new.new_map) != map_entry) && @@ -480,7 +481,7 @@ static int btt_freelist_init(struct arena_info *arena) * to complete the map write. So fix up the map. */ ret = btt_map_write(arena, le32_to_cpu(log_new.lba), - le32_to_cpu(log_new.new_map), 0, 0); + le32_to_cpu(log_new.new_map), 0, 0, 0); if (ret) return ret; } @@ -875,7 +876,7 @@ static int btt_data_read(struct arena_info *arena, struct page *page, u64 nsoff = to_namespace_offset(arena, lba); void *mem = kmap_atomic(page); - ret = arena_read_bytes(arena, nsoff, mem + off, len); + ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC); kunmap_atomic(mem); return ret; @@ -888,7 +889,7 @@ static int btt_data_write(struct arena_info *arena, u32 lba, u64 nsoff = to_namespace_offset(arena, lba); void *mem = kmap_atomic(page); - ret = arena_write_bytes(arena, nsoff, mem + off, len); + ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC); kunmap_atomic(mem); return ret; @@ -931,10 +932,12 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, mem = kmap_atomic(bv.bv_page); if (rw) ret = arena_write_bytes(arena, meta_nsoff, - mem + bv.bv_offset, cur_len); + mem + bv.bv_offset, cur_len, + NVDIMM_IO_ATOMIC); else ret = arena_read_bytes(arena, meta_nsoff, - mem + bv.bv_offset, cur_len); + mem + bv.bv_offset, cur_len, + NVDIMM_IO_ATOMIC); kunmap_atomic(mem); if (ret) @@ -976,7 +979,8 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, cur_len = min(btt->sector_size, len); - ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag); + ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag, + NVDIMM_IO_ATOMIC); if (ret) goto out_lane; @@ -1006,7 +1010,7 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, barrier(); ret = btt_map_read(arena, premap, &new_map, &t_flag, - &e_flag); + &e_flag, NVDIMM_IO_ATOMIC); if (ret) goto out_rtt; @@ -1093,7 +1097,8 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, } lock_map(arena, premap); - ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL); + ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL, + NVDIMM_IO_ATOMIC); if (ret) goto out_map; if (old_postmap >= arena->internal_nlba) { @@ -1110,7 +1115,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, if (ret) goto out_map; - ret = btt_map_write(arena, premap, new_postmap, 0, 0); + ret = btt_map_write(arena, premap, new_postmap, 0, 0, 0); if (ret) goto out_map; diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 4b76af2b8715..ae00dc0d9791 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -273,7 +273,7 @@ static int __nd_btt_probe(struct nd_btt *nd_btt, if (!btt_sb || !ndns || !nd_btt) return -ENODEV; - if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb))) + if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb), 0)) return -ENXIO; if (nvdimm_namespace_capacity(ndns) < SZ_16M) diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 93d128da1c92..7ceb5fa4f2a1 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -228,7 +228,8 @@ u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb) EXPORT_SYMBOL(nd_sb_checksum); static int nsio_rw_bytes(struct nd_namespace_common *ndns, - resource_size_t offset, void *buf, size_t size, int rw) + resource_size_t offset, void *buf, size_t size, int rw, + unsigned long flags) { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); @@ -259,7 +260,8 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, * work around this collision. */ if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) - && (!ndns->claim || !is_nd_btt(ndns->claim))) { + && !(flags & NVDIMM_IO_ATOMIC) + && !ndns->claim) { long cleared; cleared = nvdimm_clear_poison(&ndns->dev, diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 77d032192bf7..03852d738eec 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -31,6 +31,7 @@ enum { ND_MAX_LANES = 256, SECTOR_SHIFT = 9, INT_LBASIZE_ALIGNMENT = 64, + NVDIMM_IO_ATOMIC = 1, }; struct nd_poison { diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 335c8175410b..a6c403600d19 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -357,7 +357,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) if (!is_nd_pmem(nd_pfn->dev.parent)) return -ENODEV; - if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb))) + if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0)) return -ENXIO; if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0) @@ -662,7 +662,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); pfn_sb->checksum = cpu_to_le64(checksum); - return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)); + return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0); } /* diff --git a/include/linux/nd.h b/include/linux/nd.h index fa66aeed441a..194b8e002ea7 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -48,7 +48,7 @@ struct nd_namespace_common { struct device dev; struct device *claim; int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset, - void *buf, size_t size, int rw); + void *buf, size_t size, int rw, unsigned long flags); }; static inline struct nd_namespace_common *to_ndns(struct device *dev) @@ -134,9 +134,10 @@ static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device * * @buf is up-to-date upon return from this routine. */ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns, - resource_size_t offset, void *buf, size_t size) + resource_size_t offset, void *buf, size_t size, + unsigned long flags) { - return ndns->rw_bytes(ndns, offset, buf, size, READ); + return ndns->rw_bytes(ndns, offset, buf, size, READ, flags); } /** @@ -152,9 +153,10 @@ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns, * to media is handled internal to the @ndns driver, if at all. */ static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns, - resource_size_t offset, void *buf, size_t size) + resource_size_t offset, void *buf, size_t size, + unsigned long flags) { - return ndns->rw_bytes(ndns, offset, buf, size, WRITE); + return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags); } #define MODULE_ALIAS_ND_DEVICE(type) \ -- cgit v1.2.3-59-g8ed1b From b177fe85dd27de1ee4c29f59c4e82b3ea3b78784 Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Wed, 10 May 2017 15:01:31 -0600 Subject: libnvdimm, btt: ensure that initializing metadata clears poison If we had badblocks/poison in the metadata area of a BTT, recreating the BTT would not clear the poison in all cases, notably the flog area. This is because rw_bytes will only clear errors if the request being sent down is 512B aligned and sized. Make sure that when writing the map and info blocks, the rw_bytes being sent are of the correct size/alignment. For the flog, instead of doing the smaller log_entry writes only, first do a 'wipe' of the entire area by writing zeroes in large enough chunks so that errors get cleared. Cc: Andy Rudoff Cc: Dan Williams Signed-off-by: Vishal Verma Signed-off-by: Dan Williams --- drivers/nvdimm/btt.c | 54 +++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 7 deletions(-) diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index aa977cd4869d..983718b8fd9b 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -57,6 +57,14 @@ static int btt_info_write(struct arena_info *arena, struct btt_sb *super) { int ret; + /* + * infooff and info2off should always be at least 512B aligned. + * We rely on that to make sure rw_bytes does error clearing + * correctly, so make sure that is the case. + */ + WARN_ON_ONCE(!IS_ALIGNED(arena->infooff, 512)); + WARN_ON_ONCE(!IS_ALIGNED(arena->info2off, 512)); + ret = arena_write_bytes(arena, arena->info2off, super, sizeof(struct btt_sb), 0); if (ret) @@ -394,9 +402,17 @@ static int btt_map_init(struct arena_info *arena) if (!zerobuf) return -ENOMEM; + /* + * mapoff should always be at least 512B aligned. We rely on that to + * make sure rw_bytes does error clearing correctly, so make sure that + * is the case. + */ + WARN_ON_ONCE(!IS_ALIGNED(arena->mapoff, 512)); + while (mapsize) { size_t size = min(mapsize, chunk_size); + WARN_ON_ONCE(size < 512); ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, size, 0); if (ret) @@ -418,11 +434,36 @@ static int btt_map_init(struct arena_info *arena) */ static int btt_log_init(struct arena_info *arena) { + size_t logsize = arena->info2off - arena->logoff; + size_t chunk_size = SZ_4K, offset = 0; + struct log_entry log; + void *zerobuf; int ret; u32 i; - struct log_entry log, zerolog; - memset(&zerolog, 0, sizeof(zerolog)); + zerobuf = kzalloc(chunk_size, GFP_KERNEL); + if (!zerobuf) + return -ENOMEM; + /* + * logoff should always be at least 512B aligned. We rely on that to + * make sure rw_bytes does error clearing correctly, so make sure that + * is the case. + */ + WARN_ON_ONCE(!IS_ALIGNED(arena->logoff, 512)); + + while (logsize) { + size_t size = min(logsize, chunk_size); + + WARN_ON_ONCE(size < 512); + ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf, + size, 0); + if (ret) + goto free; + + offset += size; + logsize -= size; + cond_resched(); + } for (i = 0; i < arena->nfree; i++) { log.lba = cpu_to_le32(i); @@ -431,13 +472,12 @@ static int btt_log_init(struct arena_info *arena) log.seq = cpu_to_le32(LOG_SEQ_INIT); ret = __btt_log_write(arena, i, 0, &log, 0); if (ret) - return ret; - ret = __btt_log_write(arena, i, 1, &zerolog, 0); - if (ret) - return ret; + goto free; } - return 0; + free: + kfree(zerobuf); + return ret; } static int btt_freelist_init(struct arena_info *arena) -- cgit v1.2.3-59-g8ed1b From e84b83b9ee2187817cf895471675f1ccdf64cd53 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 10 May 2017 19:38:13 -0700 Subject: filesystem-dax: fix broken __dax_zero_page_range() conversion The conversion of __dax_zero_page_range() to 'struct dax_operations' caused it to frequently fail. The mistake was treating the @size parameter as a dax mapping length rather than just a length of the clear_pmem() operation. The dax mapping length is assumed to be hard coded as PAGE_SIZE. Without this fix any page unaligned zeroing request will trigger a -EINVAL return from bdev_dax_pgoff(). Cc: Jan Kara Cc: Christoph Hellwig Reported-by: Ross Zwisler Tested-by: Ross Zwisler Fixes: cccbce671582 ("filesystem-dax: convert to dax_direct_access()") Signed-off-by: Dan Williams --- fs/dax.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index ce9dc9c3e829..5ee1d212d81f 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -971,12 +971,12 @@ int __dax_zero_page_range(struct block_device *bdev, void *kaddr; pfn_t pfn; - rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); + rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); if (rc) return rc; id = dax_read_lock(); - rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, + rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); if (rc < 0) { dax_read_unlock(id); -- cgit v1.2.3-59-g8ed1b