From fa0d3fce7ceff0aae62f3e19678713bc5a7f3377 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:31:14 +1100 Subject: dax: remove buffer_size_valid() Now that ext4 properly sets bh.b_size when we call get_block() for a hole, rely on that value and remove the buffer_size_valid() sanity check. Signed-off-by: Ross Zwisler Reviewed-by: Jan Kara Reviewed-by: Christoph Hellwig Signed-off-by: Dave Chinner --- fs/dax.c | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 014defd2e744..b09817ac6f87 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -123,19 +123,6 @@ static bool buffer_written(struct buffer_head *bh) return buffer_mapped(bh) && !buffer_unwritten(bh); } -/* - * When ext4 encounters a hole, it returns without modifying the buffer_head - * which means that we can't trust b_size. To cope with this, we set b_state - * to 0 before calling get_block and, if any bit is set, we know we can trust - * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is - * and would save us time calling get_block repeatedly. - */ -static bool buffer_size_valid(struct buffer_head *bh) -{ - return bh->b_state != 0; -} - - static sector_t to_sector(const struct buffer_head *bh, const struct inode *inode) { @@ -177,8 +164,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, rc = get_block(inode, block, bh, rw == WRITE); if (rc) break; - if (!buffer_size_valid(bh)) - bh->b_size = 1 << blkbits; bh_max = pos - first + bh->b_size; bdev = bh->b_bdev; /* @@ -1012,12 +997,7 @@ int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, bdev = bh.b_bdev; - /* - * If the filesystem isn't willing to tell us the length of a hole, - * just fall back to PTEs. Calling get_block 512 times in a loop - * would be silly. - */ - if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) { + if (bh.b_size < PMD_SIZE) { dax_pmd_dbg(&bh, address, "allocated block too small"); return VM_FAULT_FALLBACK; } -- cgit v1.2.3-59-g8ed1b From ce95ab0fa669698805ff8841a756733a7702b5c9 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:31:44 +1100 Subject: dax: make 'wait_table' global variable static The global 'wait_table' variable is only used within fs/dax.c, and generates the following sparse warning: fs/dax.c:39:19: warning: symbol 'wait_table' was not declared. Should it be static? Make it static so it has scope local to fs/dax.c, and to make sparse happy. Signed-off-by: Ross Zwisler Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index b09817ac6f87..e52e7543c351 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -52,7 +52,7 @@ #define DAX_WAIT_TABLE_BITS 12 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) -wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; +static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; static int __init init_dax_wait_table(void) { -- cgit v1.2.3-59-g8ed1b From aada54f9800497cc8249c99ffcb1431c6d8e98e7 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:32:00 +1100 Subject: dax: remove the last BUG_ON() from fs/dax.c Don't take down the kernel if we get an invalid 'from' and 'length' argument pair. Just warn once and return an error. Signed-off-by: Ross Zwisler Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index e52e7543c351..219fa2bd676a 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1194,7 +1194,8 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, /* Block boundary? Nothing to do */ if (!length) return 0; - BUG_ON((offset + length) > PAGE_SIZE); + if (WARN_ON_ONCE((offset + length) > PAGE_SIZE)) + return -EINVAL; memset(&bh, 0, sizeof(bh)); bh.b_bdev = inode->i_sb->s_bdev; -- cgit v1.2.3-59-g8ed1b From e3ad61c64abceeb5cc122f0bde3700e6e17ca60a Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:32:12 +1100 Subject: dax: consistent variable naming for DAX entries No functional change. Consistently use the variable name 'entry' instead of 'ret' for DAX radix tree entries. This was already happening in most of the code, so update get_unlocked_mapping_entry(), grab_mapping_entry() and dax_unlock_mapping_entry(). Signed-off-by: Ross Zwisler Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 219fa2bd676a..835e7f082cff 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -357,7 +357,7 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot) static void *get_unlocked_mapping_entry(struct address_space *mapping, pgoff_t index, void ***slotp) { - void *ret, **slot; + void *entry, **slot; struct wait_exceptional_entry_queue ewait; wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index); @@ -367,13 +367,13 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping, ewait.key.index = index; for (;;) { - ret = __radix_tree_lookup(&mapping->page_tree, index, NULL, + entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); - if (!ret || !radix_tree_exceptional_entry(ret) || + if (!entry || !radix_tree_exceptional_entry(entry) || !slot_locked(mapping, slot)) { if (slotp) *slotp = slot; - return ret; + return entry; } prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); @@ -396,13 +396,13 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping, */ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index) { - void *ret, **slot; + void *entry, **slot; restart: spin_lock_irq(&mapping->tree_lock); - ret = get_unlocked_mapping_entry(mapping, index, &slot); + entry = get_unlocked_mapping_entry(mapping, index, &slot); /* No entry for given index? Make sure radix tree is big enough. */ - if (!ret) { + if (!entry) { int err; spin_unlock_irq(&mapping->tree_lock); @@ -410,10 +410,10 @@ restart: mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); if (err) return ERR_PTR(err); - ret = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | + entry = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | RADIX_DAX_ENTRY_LOCK); spin_lock_irq(&mapping->tree_lock); - err = radix_tree_insert(&mapping->page_tree, index, ret); + err = radix_tree_insert(&mapping->page_tree, index, entry); radix_tree_preload_end(); if (err) { spin_unlock_irq(&mapping->tree_lock); @@ -425,11 +425,11 @@ restart: /* Good, we have inserted empty locked entry into the tree. */ mapping->nrexceptional++; spin_unlock_irq(&mapping->tree_lock); - return ret; + return entry; } /* Normal page in radix tree? */ - if (!radix_tree_exceptional_entry(ret)) { - struct page *page = ret; + if (!radix_tree_exceptional_entry(entry)) { + struct page *page = entry; get_page(page); spin_unlock_irq(&mapping->tree_lock); @@ -442,9 +442,9 @@ restart: } return page; } - ret = lock_slot(mapping, slot); + entry = lock_slot(mapping, slot); spin_unlock_irq(&mapping->tree_lock); - return ret; + return entry; } void dax_wake_mapping_entry_waiter(struct address_space *mapping, @@ -469,11 +469,11 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping, void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index) { - void *ret, **slot; + void *entry, **slot; spin_lock_irq(&mapping->tree_lock); - ret = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); - if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret) || + entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot); + if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) || !slot_locked(mapping, slot))) { spin_unlock_irq(&mapping->tree_lock); return; -- cgit v1.2.3-59-g8ed1b From 63e95b5c4f16e156b98adcf2f7d820ba941c82a3 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:32:20 +1100 Subject: dax: coordinate locking for offsets in PMD range DAX radix tree locking currently locks entries based on the unique combination of the 'mapping' pointer and the pgoff_t 'index' for the entry. This works for PTEs, but as we move to PMDs we will need to have all the offsets within the range covered by the PMD to map to the same bit lock. To accomplish this, for ranges covered by a PMD entry we will instead lock based on the page offset of the beginning of the PMD entry. The 'mapping' pointer is still used in the same way. Signed-off-by: Ross Zwisler Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 65 +++++++++++++++++++++++++++++++++-------------------- include/linux/dax.h | 2 +- mm/filemap.c | 2 +- 3 files changed, 43 insertions(+), 26 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 835e7f082cff..72387023545e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -64,14 +64,6 @@ static int __init init_dax_wait_table(void) } fs_initcall(init_dax_wait_table); -static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, - pgoff_t index) -{ - unsigned long hash = hash_long((unsigned long)mapping ^ index, - DAX_WAIT_TABLE_BITS); - return wait_table + hash; -} - static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax) { struct request_queue *q = bdev->bd_queue; @@ -285,7 +277,7 @@ EXPORT_SYMBOL_GPL(dax_do_io); */ struct exceptional_entry_key { struct address_space *mapping; - unsigned long index; + pgoff_t entry_start; }; struct wait_exceptional_entry_queue { @@ -293,6 +285,26 @@ struct wait_exceptional_entry_queue { struct exceptional_entry_key key; }; +static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, + pgoff_t index, void *entry, struct exceptional_entry_key *key) +{ + unsigned long hash; + + /* + * If 'entry' is a PMD, align the 'index' that we use for the wait + * queue to the start of that PMD. This ensures that all offsets in + * the range covered by the PMD map to the same bit lock. + */ + if (RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) + index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1); + + key->mapping = mapping; + key->entry_start = index; + + hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); + return wait_table + hash; +} + static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode, int sync, void *keyp) { @@ -301,7 +313,7 @@ static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode, container_of(wait, struct wait_exceptional_entry_queue, wait); if (key->mapping != ewait->key.mapping || - key->index != ewait->key.index) + key->entry_start != ewait->key.entry_start) return 0; return autoremove_wake_function(wait, mode, sync, NULL); } @@ -359,12 +371,10 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping, { void *entry, **slot; struct wait_exceptional_entry_queue ewait; - wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index); + wait_queue_head_t *wq; init_wait(&ewait.wait); ewait.wait.func = wake_exceptional_entry_func; - ewait.key.mapping = mapping; - ewait.key.index = index; for (;;) { entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, @@ -375,6 +385,8 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping, *slotp = slot; return entry; } + + wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&mapping->tree_lock); @@ -447,10 +459,20 @@ restart: return entry; } +/* + * We do not necessarily hold the mapping->tree_lock when we call this + * function so it is possible that 'entry' is no longer a valid item in the + * radix tree. This is okay, though, because all we really need to do is to + * find the correct waitqueue where tasks might be sleeping waiting for that + * old 'entry' and wake them. + */ void dax_wake_mapping_entry_waiter(struct address_space *mapping, - pgoff_t index, bool wake_all) + pgoff_t index, void *entry, bool wake_all) { - wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index); + struct exceptional_entry_key key; + wait_queue_head_t *wq; + + wq = dax_entry_waitqueue(mapping, index, entry, &key); /* * Checking for locked entry and prepare_to_wait_exclusive() happens @@ -458,13 +480,8 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping, * So at this point all tasks that could have seen our entry locked * must be in the waitqueue and the following check will see them. */ - if (waitqueue_active(wq)) { - struct exceptional_entry_key key; - - key.mapping = mapping; - key.index = index; + if (waitqueue_active(wq)) __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); - } } void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index) @@ -480,7 +497,7 @@ void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index) } unlock_slot(mapping, slot); spin_unlock_irq(&mapping->tree_lock); - dax_wake_mapping_entry_waiter(mapping, index, false); + dax_wake_mapping_entry_waiter(mapping, index, entry, false); } static void put_locked_mapping_entry(struct address_space *mapping, @@ -505,7 +522,7 @@ static void put_unlocked_mapping_entry(struct address_space *mapping, return; /* We have to wake up next waiter for the radix tree entry lock */ - dax_wake_mapping_entry_waiter(mapping, index, false); + dax_wake_mapping_entry_waiter(mapping, index, entry, false); } /* @@ -532,7 +549,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) radix_tree_delete(&mapping->page_tree, index); mapping->nrexceptional--; spin_unlock_irq(&mapping->tree_lock); - dax_wake_mapping_entry_waiter(mapping, index, true); + dax_wake_mapping_entry_waiter(mapping, index, entry, true); return 1; } diff --git a/include/linux/dax.h b/include/linux/dax.h index add6c4bc568f..a41a747d6112 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -22,7 +22,7 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); void dax_wake_mapping_entry_waiter(struct address_space *mapping, - pgoff_t index, bool wake_all); + pgoff_t index, void *entry, bool wake_all); #ifdef CONFIG_FS_DAX struct page *read_dax_sector(struct block_device *bdev, sector_t n); diff --git a/mm/filemap.c b/mm/filemap.c index 849f459ad078..1ffb7dcd1b5d 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -143,7 +143,7 @@ static int page_cache_tree_insert(struct address_space *mapping, if (node) workingset_node_pages_dec(node); /* Wakeup waiters for exceptional entry lock */ - dax_wake_mapping_entry_waiter(mapping, page->index, + dax_wake_mapping_entry_waiter(mapping, page->index, p, false); } } -- cgit v1.2.3-59-g8ed1b From b9fde0462e34a05b25c3d68d344971865659abae Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:32:35 +1100 Subject: dax: remove dax_pmd_fault() dax_pmd_fault() is the old struct buffer_head + get_block_t based 2 MiB DAX fault handler. This fault handler has been disabled for several kernel releases, and support for PMDs will be reintroduced using the struct iomap interface instead. Signed-off-by: Ross Zwisler Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 213 ---------------------------------------------------- include/linux/dax.h | 6 +- 2 files changed, 1 insertion(+), 218 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 72387023545e..3d0b1032c555 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -915,219 +915,6 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, } EXPORT_SYMBOL_GPL(dax_fault); -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) -/* - * The 'colour' (ie low bits) within a PMD of a page offset. This comes up - * more often than one might expect in the below function. - */ -#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) - -static void __dax_dbg(struct buffer_head *bh, unsigned long address, - const char *reason, const char *fn) -{ - if (bh) { - char bname[BDEVNAME_SIZE]; - bdevname(bh->b_bdev, bname); - pr_debug("%s: %s addr: %lx dev %s state %lx start %lld " - "length %zd fallback: %s\n", fn, current->comm, - address, bname, bh->b_state, (u64)bh->b_blocknr, - bh->b_size, reason); - } else { - pr_debug("%s: %s addr: %lx fallback: %s\n", fn, - current->comm, address, reason); - } -} - -#define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd") - -/** - * dax_pmd_fault - handle a PMD fault on a DAX file - * @vma: The virtual memory area where the fault occurred - * @vmf: The description of the fault - * @get_block: The filesystem method used to translate file offsets to blocks - * - * When a page fault occurs, filesystems may call this helper in their - * pmd_fault handler for DAX files. - */ -int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmd, unsigned int flags, get_block_t get_block) -{ - struct file *file = vma->vm_file; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - struct buffer_head bh; - unsigned blkbits = inode->i_blkbits; - unsigned long pmd_addr = address & PMD_MASK; - bool write = flags & FAULT_FLAG_WRITE; - struct block_device *bdev; - pgoff_t size, pgoff; - sector_t block; - int result = 0; - bool alloc = false; - - /* dax pmd mappings require pfn_t_devmap() */ - if (!IS_ENABLED(CONFIG_FS_DAX_PMD)) - return VM_FAULT_FALLBACK; - - /* Fall back to PTEs if we're going to COW */ - if (write && !(vma->vm_flags & VM_SHARED)) { - split_huge_pmd(vma, pmd, address); - dax_pmd_dbg(NULL, address, "cow write"); - return VM_FAULT_FALLBACK; - } - /* If the PMD would extend outside the VMA */ - if (pmd_addr < vma->vm_start) { - dax_pmd_dbg(NULL, address, "vma start unaligned"); - return VM_FAULT_FALLBACK; - } - if ((pmd_addr + PMD_SIZE) > vma->vm_end) { - dax_pmd_dbg(NULL, address, "vma end unaligned"); - return VM_FAULT_FALLBACK; - } - - pgoff = linear_page_index(vma, pmd_addr); - size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; - if (pgoff >= size) - return VM_FAULT_SIGBUS; - /* If the PMD would cover blocks out of the file */ - if ((pgoff | PG_PMD_COLOUR) >= size) { - dax_pmd_dbg(NULL, address, - "offset + huge page size > file size"); - return VM_FAULT_FALLBACK; - } - - memset(&bh, 0, sizeof(bh)); - bh.b_bdev = inode->i_sb->s_bdev; - block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); - - bh.b_size = PMD_SIZE; - - if (get_block(inode, block, &bh, 0) != 0) - return VM_FAULT_SIGBUS; - - if (!buffer_mapped(&bh) && write) { - if (get_block(inode, block, &bh, 1) != 0) - return VM_FAULT_SIGBUS; - alloc = true; - WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh)); - } - - bdev = bh.b_bdev; - - if (bh.b_size < PMD_SIZE) { - dax_pmd_dbg(&bh, address, "allocated block too small"); - return VM_FAULT_FALLBACK; - } - - /* - * If we allocated new storage, make sure no process has any - * zero pages covering this hole - */ - if (alloc) { - loff_t lstart = pgoff << PAGE_SHIFT; - loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */ - - truncate_pagecache_range(inode, lstart, lend); - } - - if (!write && !buffer_mapped(&bh)) { - spinlock_t *ptl; - pmd_t entry; - struct page *zero_page = mm_get_huge_zero_page(vma->vm_mm); - - if (unlikely(!zero_page)) { - dax_pmd_dbg(&bh, address, "no zero page"); - goto fallback; - } - - ptl = pmd_lock(vma->vm_mm, pmd); - if (!pmd_none(*pmd)) { - spin_unlock(ptl); - dax_pmd_dbg(&bh, address, "pmd already present"); - goto fallback; - } - - dev_dbg(part_to_dev(bdev->bd_part), - "%s: %s addr: %lx pfn: sect: %llx\n", - __func__, current->comm, address, - (unsigned long long) to_sector(&bh, inode)); - - entry = mk_pmd(zero_page, vma->vm_page_prot); - entry = pmd_mkhuge(entry); - set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry); - result = VM_FAULT_NOPAGE; - spin_unlock(ptl); - } else { - struct blk_dax_ctl dax = { - .sector = to_sector(&bh, inode), - .size = PMD_SIZE, - }; - long length = dax_map_atomic(bdev, &dax); - - if (length < 0) { - dax_pmd_dbg(&bh, address, "dax-error fallback"); - goto fallback; - } - if (length < PMD_SIZE) { - dax_pmd_dbg(&bh, address, "dax-length too small"); - dax_unmap_atomic(bdev, &dax); - goto fallback; - } - if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) { - dax_pmd_dbg(&bh, address, "pfn unaligned"); - dax_unmap_atomic(bdev, &dax); - goto fallback; - } - - if (!pfn_t_devmap(dax.pfn)) { - dax_unmap_atomic(bdev, &dax); - dax_pmd_dbg(&bh, address, "pfn not in memmap"); - goto fallback; - } - dax_unmap_atomic(bdev, &dax); - - /* - * For PTE faults we insert a radix tree entry for reads, and - * leave it clean. Then on the first write we dirty the radix - * tree entry via the dax_pfn_mkwrite() path. This sequence - * allows the dax_pfn_mkwrite() call to be simpler and avoid a - * call into get_block() to translate the pgoff to a sector in - * order to be able to create a new radix tree entry. - * - * The PMD path doesn't have an equivalent to - * dax_pfn_mkwrite(), though, so for a read followed by a - * write we traverse all the way through dax_pmd_fault() - * twice. This means we can just skip inserting a radix tree - * entry completely on the initial read and just wait until - * the write to insert a dirty entry. - */ - if (write) { - /* - * We should insert radix-tree entry and dirty it here. - * For now this is broken... - */ - } - - dev_dbg(part_to_dev(bdev->bd_part), - "%s: %s addr: %lx pfn: %lx sect: %llx\n", - __func__, current->comm, address, - pfn_t_to_pfn(dax.pfn), - (unsigned long long) dax.sector); - result |= vmf_insert_pfn_pmd(vma, address, pmd, - dax.pfn, write); - } - - out: - return result; - - fallback: - count_vm_event(THP_FAULT_FALLBACK); - result = VM_FAULT_FALLBACK; - goto out; -} -EXPORT_SYMBOL_GPL(dax_pmd_fault); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - /** * dax_pfn_mkwrite - handle first write to DAX page * @vma: The virtual memory area where the fault occurred diff --git a/include/linux/dax.h b/include/linux/dax.h index a41a747d6112..0f74866edae6 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -48,16 +48,12 @@ static inline int __dax_zero_page_range(struct block_device *bdev, } #endif -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) -int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned int flags, get_block_t); -#else static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags, get_block_t gb) { return VM_FAULT_FALLBACK; } -#endif + int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) -- cgit v1.2.3-59-g8ed1b From 11c59c92f44d9272db7655a462608658a6d95013 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:32:46 +1100 Subject: dax: correct dax iomap code namespace The recently added DAX functions that use the new struct iomap data structure were named iomap_dax_rw(), iomap_dax_fault() and iomap_dax_actor(). These are actually defined in fs/dax.c, though, so should be part of the "dax" namespace and not the "iomap" namespace. Rename them to dax_iomap_rw(), dax_iomap_fault() and dax_iomap_actor() respectively. Signed-off-by: Ross Zwisler Suggested-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 16 ++++++++-------- fs/ext2/file.c | 6 +++--- fs/xfs/xfs_file.c | 8 ++++---- include/linux/dax.h | 4 ++-- 4 files changed, 17 insertions(+), 17 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 3d0b1032c555..fdbd7a1ec6cf 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1031,7 +1031,7 @@ EXPORT_SYMBOL_GPL(dax_truncate_page); #ifdef CONFIG_FS_IOMAP static loff_t -iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data, +dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, struct iomap *iomap) { struct iov_iter *iter = data; @@ -1088,7 +1088,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data, } /** - * iomap_dax_rw - Perform I/O to a DAX file + * dax_iomap_rw - Perform I/O to a DAX file * @iocb: The control block for this I/O * @iter: The addresses to do I/O from or to * @ops: iomap ops passed from the file system @@ -1098,7 +1098,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data, * and evicting any page cache pages in the region under I/O. */ ssize_t -iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, +dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops) { struct address_space *mapping = iocb->ki_filp->f_mapping; @@ -1128,7 +1128,7 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, while (iov_iter_count(iter)) { ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, - iter, iomap_dax_actor); + iter, dax_iomap_actor); if (ret <= 0) break; pos += ret; @@ -1138,10 +1138,10 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, iocb->ki_pos += done; return done ? done : ret; } -EXPORT_SYMBOL_GPL(iomap_dax_rw); +EXPORT_SYMBOL_GPL(dax_iomap_rw); /** - * iomap_dax_fault - handle a page fault on a DAX file + * dax_iomap_fault - handle a page fault on a DAX file * @vma: The virtual memory area where the fault occurred * @vmf: The description of the fault * @ops: iomap ops passed from the file system @@ -1150,7 +1150,7 @@ EXPORT_SYMBOL_GPL(iomap_dax_rw); * or mkwrite handler for DAX files. Assumes the caller has done all the * necessary locking for the page fault to proceed successfully. */ -int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, +int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, struct iomap_ops *ops) { struct address_space *mapping = vma->vm_file->f_mapping; @@ -1252,5 +1252,5 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, return VM_FAULT_SIGBUS | major; return VM_FAULT_NOPAGE | major; } -EXPORT_SYMBOL_GPL(iomap_dax_fault); +EXPORT_SYMBOL_GPL(dax_iomap_fault); #endif /* CONFIG_FS_IOMAP */ diff --git a/fs/ext2/file.c b/fs/ext2/file.c index fb88b51ca947..b0f241528a30 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -38,7 +38,7 @@ static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) return 0; /* skip atime */ inode_lock_shared(inode); - ret = iomap_dax_rw(iocb, to, &ext2_iomap_ops); + ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops); inode_unlock_shared(inode); file_accessed(iocb->ki_filp); @@ -62,7 +62,7 @@ static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) if (ret) goto out_unlock; - ret = iomap_dax_rw(iocb, from, &ext2_iomap_ops); + ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops); if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { i_size_write(inode, iocb->ki_pos); mark_inode_dirty(inode); @@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } down_read(&ei->dax_sem); - ret = iomap_dax_fault(vma, vmf, &ext2_iomap_ops); + ret = dax_iomap_fault(vma, vmf, &ext2_iomap_ops); up_read(&ei->dax_sem); if (vmf->flags & FAULT_FLAG_WRITE) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index a314fc7b56fa..e7f35d548cfc 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -344,7 +344,7 @@ xfs_file_dax_read( return 0; /* skip atime */ xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); - ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops); + ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops); xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); file_accessed(iocb->ki_filp); @@ -691,7 +691,7 @@ xfs_file_dax_write( trace_xfs_file_dax_write(ip, count, pos); - ret = iomap_dax_rw(iocb, from, &xfs_iomap_ops); + ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops); if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { i_size_write(inode, iocb->ki_pos); error = xfs_setfilesize(ip, pos, ret); @@ -1640,7 +1640,7 @@ xfs_filemap_page_mkwrite( xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); if (IS_DAX(inode)) { - ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops); + ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops); } else { ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops); ret = block_page_mkwrite_return(ret); @@ -1674,7 +1674,7 @@ xfs_filemap_fault( * changes to xfs_get_blocks_direct() to map unwritten extent * ioend for conversion on read-only mappings. */ - ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops); + ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops); } else ret = filemap_fault(vma, vmf); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); diff --git a/include/linux/dax.h b/include/linux/dax.h index 0f74866edae6..a3dfee4cb03f 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -11,13 +11,13 @@ struct iomap_ops; /* We use lowest available exceptional entry bit for locking */ #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) -ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, +ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops); ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, get_block_t, dio_iodone_t, int flags); int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t); -int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, +int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, struct iomap_ops *ops); int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); -- cgit v1.2.3-59-g8ed1b From 333ccc978e1e09af2690e459b6f97d8e91cc01fa Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:33:09 +1100 Subject: dax: add dax_iomap_sector() helper function To be able to correctly calculate the sector from a file position and a struct iomap there is a complex little bit of logic that currently happens in both dax_iomap_actor() and dax_iomap_fault(). This will need to be repeated yet again in the DAX PMD fault handler when it is added, so break it out into a helper function. Signed-off-by: Ross Zwisler Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index fdbd7a1ec6cf..77379546433e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1030,6 +1030,11 @@ int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) EXPORT_SYMBOL_GPL(dax_truncate_page); #ifdef CONFIG_FS_IOMAP +static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) +{ + return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); +} + static loff_t dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, struct iomap *iomap) @@ -1055,8 +1060,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, struct blk_dax_ctl dax = { 0 }; ssize_t map_len; - dax.sector = iomap->blkno + - (((pos & PAGE_MASK) - iomap->offset) >> 9); + dax.sector = dax_iomap_sector(iomap, pos); dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; map_len = dax_map_atomic(iomap->bdev, &dax); if (map_len < 0) { @@ -1193,7 +1197,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, goto unlock_entry; } - sector = iomap.blkno + (((pos & PAGE_MASK) - iomap.offset) >> 9); + sector = dax_iomap_sector(&iomap, pos); if (vmf->cow_page) { switch (iomap.type) { -- cgit v1.2.3-59-g8ed1b From 1550290b08012637e8d741a6a298ec6320dadda2 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:33:26 +1100 Subject: dax: dax_iomap_fault() needs to call iomap_end() Currently iomap_end() doesn't do anything for DAX page faults for both ext2 and XFS. ext2_iomap_end() just checks for a write underrun, and xfs_file_iomap_end() checks to see if it needs to finish a delayed allocation. However, in the future iomap_end() calls might be needed to make sure we have balanced allocations, locks, etc. So, add calls to iomap_end() with appropriate error handling to dax_iomap_fault(). Signed-off-by: Ross Zwisler Suggested-by: Jan Kara Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 77379546433e..6edd89b3b69c 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1165,6 +1165,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, struct iomap iomap = { 0 }; unsigned flags = 0; int error, major = 0; + int locked_status = 0; void *entry; /* @@ -1194,7 +1195,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, goto unlock_entry; if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { error = -EIO; /* fs corruption? */ - goto unlock_entry; + goto finish_iomap; } sector = dax_iomap_sector(&iomap, pos); @@ -1216,13 +1217,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, } if (error) - goto unlock_entry; + goto finish_iomap; if (!radix_tree_exceptional_entry(entry)) { vmf->page = entry; - return VM_FAULT_LOCKED; + locked_status = VM_FAULT_LOCKED; + } else { + vmf->entry = entry; + locked_status = VM_FAULT_DAX_LOCKED; } - vmf->entry = entry; - return VM_FAULT_DAX_LOCKED; + goto finish_iomap; } switch (iomap.type) { @@ -1237,8 +1240,10 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, break; case IOMAP_UNWRITTEN: case IOMAP_HOLE: - if (!(vmf->flags & FAULT_FLAG_WRITE)) - return dax_load_hole(mapping, entry, vmf); + if (!(vmf->flags & FAULT_FLAG_WRITE)) { + locked_status = dax_load_hole(mapping, entry, vmf); + break; + } /*FALLTHRU*/ default: WARN_ON_ONCE(1); @@ -1246,14 +1251,30 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, break; } + finish_iomap: + if (ops->iomap_end) { + if (error) { + /* keep previous error */ + ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags, + &iomap); + } else { + error = ops->iomap_end(inode, pos, PAGE_SIZE, + PAGE_SIZE, flags, &iomap); + } + } unlock_entry: - put_locked_mapping_entry(mapping, vmf->pgoff, entry); + if (!locked_status || error) + put_locked_mapping_entry(mapping, vmf->pgoff, entry); out: if (error == -ENOMEM) return VM_FAULT_OOM | major; /* -EBUSY is fine, somebody else faulted on the same PTE */ if (error < 0 && error != -EBUSY) return VM_FAULT_SIGBUS | major; + if (locked_status) { + WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */ + return locked_status; + } return VM_FAULT_NOPAGE | major; } EXPORT_SYMBOL_GPL(dax_iomap_fault); -- cgit v1.2.3-59-g8ed1b From fa28f7296a7ce38ed15dc06bd2149e04c8db9d4b Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:33:35 +1100 Subject: dax: move RADIX_DAX_* defines to dax.h The RADIX_DAX_* defines currently mostly live in fs/dax.c, with just RADIX_DAX_ENTRY_LOCK being in include/linux/dax.h so it can be used in mm/filemap.c. When we add PMD support, though, mm/filemap.c will also need access to the RADIX_DAX_PTE type so it can properly construct a 4k sized empty entry. Instead of shifting the defines between dax.c and dax.h as they are individually used in other code, just move them wholesale to dax.h so they'll be available when we need them. Signed-off-by: Ross Zwisler Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 14 -------------- include/linux/dax.h | 15 ++++++++++++++- 2 files changed, 14 insertions(+), 15 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 6edd89b3b69c..c45cc4d8e996 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -34,20 +34,6 @@ #include #include "internal.h" -/* - * We use lowest available bit in exceptional entry for locking, other two - * bits to determine entry type. In total 3 special bits. - */ -#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3) -#define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) -#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) -#define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD) -#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK) -#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT)) -#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \ - RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \ - RADIX_TREE_EXCEPTIONAL_ENTRY)) - /* We choose 4096 entries - same as per-zone page wait tables */ #define DAX_WAIT_TABLE_BITS 12 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) diff --git a/include/linux/dax.h b/include/linux/dax.h index a3dfee4cb03f..e9ea78c1cf98 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -8,8 +8,21 @@ struct iomap_ops; -/* We use lowest available exceptional entry bit for locking */ +/* + * We use lowest available bit in exceptional entry for locking, other two + * bits to determine entry type. In total 3 special bits. + */ +#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3) #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) +#define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) +#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) +#define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD) +#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK) +#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT)) +#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \ + RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \ + RADIX_TREE_EXCEPTIONAL_ENTRY)) + ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops); -- cgit v1.2.3-59-g8ed1b From 422476c4641ec65906406f3d266b69a91dd3170c Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:33:44 +1100 Subject: dax: move put_(un)locked_mapping_entry() in dax.c No functional change. The static functions put_locked_mapping_entry() and put_unlocked_mapping_entry() will soon be used in error cases in grab_mapping_entry(), so move their definitions above this function. Signed-off-by: Ross Zwisler Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index c45cc4d8e996..0582c7c2ae40 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -382,6 +382,31 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping, } } +static void put_locked_mapping_entry(struct address_space *mapping, + pgoff_t index, void *entry) +{ + if (!radix_tree_exceptional_entry(entry)) { + unlock_page(entry); + put_page(entry); + } else { + dax_unlock_mapping_entry(mapping, index); + } +} + +/* + * Called when we are done with radix tree entry we looked up via + * get_unlocked_mapping_entry() and which we didn't lock in the end. + */ +static void put_unlocked_mapping_entry(struct address_space *mapping, + pgoff_t index, void *entry) +{ + if (!radix_tree_exceptional_entry(entry)) + return; + + /* We have to wake up next waiter for the radix tree entry lock */ + dax_wake_mapping_entry_waiter(mapping, index, entry, false); +} + /* * Find radix tree entry at given index. If it points to a page, return with * the page locked. If it points to the exceptional entry, return with the @@ -486,31 +511,6 @@ void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index) dax_wake_mapping_entry_waiter(mapping, index, entry, false); } -static void put_locked_mapping_entry(struct address_space *mapping, - pgoff_t index, void *entry) -{ - if (!radix_tree_exceptional_entry(entry)) { - unlock_page(entry); - put_page(entry); - } else { - dax_unlock_mapping_entry(mapping, index); - } -} - -/* - * Called when we are done with radix tree entry we looked up via - * get_unlocked_mapping_entry() and which we didn't lock in the end. - */ -static void put_unlocked_mapping_entry(struct address_space *mapping, - pgoff_t index, void *entry) -{ - if (!radix_tree_exceptional_entry(entry)) - return; - - /* We have to wake up next waiter for the radix tree entry lock */ - dax_wake_mapping_entry_waiter(mapping, index, entry, false); -} - /* * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree * entry to get unlocked before deleting it. -- cgit v1.2.3-59-g8ed1b From 642261ac995e01d7837db1f4b90181496f7e6835 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 8 Nov 2016 11:34:45 +1100 Subject: dax: add struct iomap based DAX PMD support DAX PMDs have been disabled since Jan Kara introduced DAX radix tree based locking. This patch allows DAX PMDs to participate in the DAX radix tree based locking scheme so that they can be re-enabled using the new struct iomap based fault handlers. There are currently three types of DAX 4k entries: 4k zero pages, 4k DAX mappings that have an associated block allocation, and 4k DAX empty entries. The empty entries exist to provide locking for the duration of a given page fault. This patch adds three equivalent 2MiB DAX entries: Huge Zero Page (HZP) entries, PMD DAX entries that have associated block allocations, and 2 MiB DAX empty entries. Unlike the 4k case where we insert a struct page* into the radix tree for 4k zero pages, for HZP we insert a DAX exceptional entry with the new RADIX_DAX_HZP flag set. This is because we use a single 2 MiB zero page in every 2MiB hole mapping, and it doesn't make sense to have that same struct page* with multiple entries in multiple trees. This would cause contention on the single page lock for the one Huge Zero Page, and it would break the page->index and page->mapping associations that are assumed to be valid in many other places in the kernel. One difficult use case is when one thread is trying to use 4k entries in radix tree for a given offset, and another thread is using 2 MiB entries for that same offset. The current code handles this by making the 2 MiB user fall back to 4k entries for most cases. This was done because it is the simplest solution, and because the use of 2MiB pages is already opportunistic. If we were to try to upgrade from 4k pages to 2MiB pages for a given range, we run into the problem of how we lock out 4k page faults for the entire 2MiB range while we clean out the radix tree so we can insert the 2MiB entry. We can solve this problem if we need to, but I think that the cases where both 2MiB entries and 4K entries are being used for the same range will be rare enough and the gain small enough that it probably won't be worth the complexity. Signed-off-by: Ross Zwisler Reviewed-by: Jan Kara Signed-off-by: Dave Chinner --- fs/dax.c | 378 +++++++++++++++++++++++++++++++++++++++++++++++----- include/linux/dax.h | 55 ++++++-- mm/filemap.c | 3 +- 3 files changed, 386 insertions(+), 50 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 0582c7c2ae40..281e91a63367 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -76,6 +76,26 @@ static void dax_unmap_atomic(struct block_device *bdev, blk_queue_exit(bdev->bd_queue); } +static int dax_is_pmd_entry(void *entry) +{ + return (unsigned long)entry & RADIX_DAX_PMD; +} + +static int dax_is_pte_entry(void *entry) +{ + return !((unsigned long)entry & RADIX_DAX_PMD); +} + +static int dax_is_zero_entry(void *entry) +{ + return (unsigned long)entry & RADIX_DAX_HZP; +} + +static int dax_is_empty_entry(void *entry) +{ + return (unsigned long)entry & RADIX_DAX_EMPTY; +} + struct page *read_dax_sector(struct block_device *bdev, sector_t n) { struct page *page = alloc_pages(GFP_KERNEL, 0); @@ -281,7 +301,7 @@ static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, * queue to the start of that PMD. This ensures that all offsets in * the range covered by the PMD map to the same bit lock. */ - if (RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) + if (dax_is_pmd_entry(entry)) index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1); key->mapping = mapping; @@ -413,36 +433,116 @@ static void put_unlocked_mapping_entry(struct address_space *mapping, * radix tree entry locked. If the radix tree doesn't contain given index, * create empty exceptional entry for the index and return with it locked. * + * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will + * either return that locked entry or will return an error. This error will + * happen if there are any 4k entries (either zero pages or DAX entries) + * within the 2MiB range that we are requesting. + * + * We always favor 4k entries over 2MiB entries. There isn't a flow where we + * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB + * insertion will fail if it finds any 4k entries already in the tree, and a + * 4k insertion will cause an existing 2MiB entry to be unmapped and + * downgraded to 4k entries. This happens for both 2MiB huge zero pages as + * well as 2MiB empty entries. + * + * The exception to this downgrade path is for 2MiB DAX PMD entries that have + * real storage backing them. We will leave these real 2MiB DAX entries in + * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry. + * * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For * persistent memory the benefit is doubtful. We can add that later if we can * show it helps. */ -static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index) +static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, + unsigned long size_flag) { + bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */ void *entry, **slot; restart: spin_lock_irq(&mapping->tree_lock); entry = get_unlocked_mapping_entry(mapping, index, &slot); + + if (entry) { + if (size_flag & RADIX_DAX_PMD) { + if (!radix_tree_exceptional_entry(entry) || + dax_is_pte_entry(entry)) { + put_unlocked_mapping_entry(mapping, index, + entry); + entry = ERR_PTR(-EEXIST); + goto out_unlock; + } + } else { /* trying to grab a PTE entry */ + if (radix_tree_exceptional_entry(entry) && + dax_is_pmd_entry(entry) && + (dax_is_zero_entry(entry) || + dax_is_empty_entry(entry))) { + pmd_downgrade = true; + } + } + } + /* No entry for given index? Make sure radix tree is big enough. */ - if (!entry) { + if (!entry || pmd_downgrade) { int err; + if (pmd_downgrade) { + /* + * Make sure 'entry' remains valid while we drop + * mapping->tree_lock. + */ + entry = lock_slot(mapping, slot); + } + spin_unlock_irq(&mapping->tree_lock); err = radix_tree_preload( mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); - if (err) + if (err) { + if (pmd_downgrade) + put_locked_mapping_entry(mapping, index, entry); return ERR_PTR(err); - entry = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | - RADIX_DAX_ENTRY_LOCK); + } + + /* + * Besides huge zero pages the only other thing that gets + * downgraded are empty entries which don't need to be + * unmapped. + */ + if (pmd_downgrade && dax_is_zero_entry(entry)) + unmap_mapping_range(mapping, + (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); + spin_lock_irq(&mapping->tree_lock); - err = radix_tree_insert(&mapping->page_tree, index, entry); + + if (pmd_downgrade) { + radix_tree_delete(&mapping->page_tree, index); + mapping->nrexceptional--; + dax_wake_mapping_entry_waiter(mapping, index, entry, + true); + } + + entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY); + + err = __radix_tree_insert(&mapping->page_tree, index, + dax_radix_order(entry), entry); radix_tree_preload_end(); if (err) { spin_unlock_irq(&mapping->tree_lock); - /* Someone already created the entry? */ - if (err == -EEXIST) + /* + * Someone already created the entry? This is a + * normal failure when inserting PMDs in a range + * that already contains PTEs. In that case we want + * to return -EEXIST immediately. + */ + if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD)) goto restart; + /* + * Our insertion of a DAX PMD entry failed, most + * likely because it collided with a PTE sized entry + * at a different index in the PMD range. We haven't + * inserted anything into the radix tree and have no + * waiters to wake. + */ return ERR_PTR(err); } /* Good, we have inserted empty locked entry into the tree. */ @@ -466,6 +566,7 @@ restart: return page; } entry = lock_slot(mapping, slot); + out_unlock: spin_unlock_irq(&mapping->tree_lock); return entry; } @@ -473,9 +574,9 @@ restart: /* * We do not necessarily hold the mapping->tree_lock when we call this * function so it is possible that 'entry' is no longer a valid item in the - * radix tree. This is okay, though, because all we really need to do is to - * find the correct waitqueue where tasks might be sleeping waiting for that - * old 'entry' and wake them. + * radix tree. This is okay because all we really need to do is to find the + * correct waitqueue where tasks might be waiting for that old 'entry' and + * wake them. */ void dax_wake_mapping_entry_waiter(struct address_space *mapping, pgoff_t index, void *entry, bool wake_all) @@ -588,11 +689,17 @@ static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size return 0; } -#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT)) - +/* + * By this point grab_mapping_entry() has ensured that we have a locked entry + * of the appropriate size so we don't have to worry about downgrading PMDs to + * PTEs. If we happen to be trying to insert a PTE and there is a PMD + * already in the tree, we will skip the insertion and just dirty the PMD as + * appropriate. + */ static void *dax_insert_mapping_entry(struct address_space *mapping, struct vm_fault *vmf, - void *entry, sector_t sector) + void *entry, sector_t sector, + unsigned long flags) { struct radix_tree_root *page_tree = &mapping->page_tree; int error = 0; @@ -615,22 +722,35 @@ static void *dax_insert_mapping_entry(struct address_space *mapping, error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM); if (error) return ERR_PTR(error); + } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) { + /* replacing huge zero page with PMD block mapping */ + unmap_mapping_range(mapping, + (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); } spin_lock_irq(&mapping->tree_lock); - new_entry = (void *)((unsigned long)RADIX_DAX_ENTRY(sector, false) | - RADIX_DAX_ENTRY_LOCK); + new_entry = dax_radix_locked_entry(sector, flags); + if (hole_fill) { __delete_from_page_cache(entry, NULL); /* Drop pagecache reference */ put_page(entry); - error = radix_tree_insert(page_tree, index, new_entry); + error = __radix_tree_insert(page_tree, index, + dax_radix_order(new_entry), new_entry); if (error) { new_entry = ERR_PTR(error); goto unlock; } mapping->nrexceptional++; - } else { + } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { + /* + * Only swap our new entry into the radix tree if the current + * entry is a zero page or an empty entry. If a normal PTE or + * PMD entry is already in the tree, we leave it alone. This + * means that if we are trying to insert a PTE and the + * existing entry is a PMD, we will just leave the PMD in the + * tree and dirty it if necessary. + */ void **slot; void *ret; @@ -660,7 +780,6 @@ static int dax_writeback_one(struct block_device *bdev, struct address_space *mapping, pgoff_t index, void *entry) { struct radix_tree_root *page_tree = &mapping->page_tree; - int type = RADIX_DAX_TYPE(entry); struct radix_tree_node *node; struct blk_dax_ctl dax; void **slot; @@ -681,13 +800,21 @@ static int dax_writeback_one(struct block_device *bdev, if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) goto unlock; - if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) { + if (WARN_ON_ONCE(dax_is_empty_entry(entry) || + dax_is_zero_entry(entry))) { ret = -EIO; goto unlock; } - dax.sector = RADIX_DAX_SECTOR(entry); - dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE); + /* + * Even if dax_writeback_mapping_range() was given a wbc->range_start + * in the middle of a PMD, the 'index' we are given will be aligned to + * the start index of the PMD, as will the sector we pull from + * 'entry'. This allows us to flush for PMD_SIZE and not have to + * worry about partial PMD writebacks. + */ + dax.sector = dax_radix_sector(entry); + dax.size = PAGE_SIZE << dax_radix_order(entry); spin_unlock_irq(&mapping->tree_lock); /* @@ -726,12 +853,11 @@ int dax_writeback_mapping_range(struct address_space *mapping, struct block_device *bdev, struct writeback_control *wbc) { struct inode *inode = mapping->host; - pgoff_t start_index, end_index, pmd_index; + pgoff_t start_index, end_index; pgoff_t indices[PAGEVEC_SIZE]; struct pagevec pvec; bool done = false; int i, ret = 0; - void *entry; if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) return -EIO; @@ -741,15 +867,6 @@ int dax_writeback_mapping_range(struct address_space *mapping, start_index = wbc->range_start >> PAGE_SHIFT; end_index = wbc->range_end >> PAGE_SHIFT; - pmd_index = DAX_PMD_INDEX(start_index); - - rcu_read_lock(); - entry = radix_tree_lookup(&mapping->page_tree, pmd_index); - rcu_read_unlock(); - - /* see if the start of our range is covered by a PMD entry */ - if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) - start_index = pmd_index; tag_pages_for_writeback(mapping, start_index, end_index); @@ -794,7 +911,7 @@ static int dax_insert_mapping(struct address_space *mapping, return PTR_ERR(dax.addr); dax_unmap_atomic(bdev, &dax); - ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector); + ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0); if (IS_ERR(ret)) return PTR_ERR(ret); *entryp = ret; @@ -841,7 +958,7 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, bh.b_bdev = inode->i_sb->s_bdev; bh.b_size = PAGE_SIZE; - entry = grab_mapping_entry(mapping, vmf->pgoff); + entry = grab_mapping_entry(mapping, vmf->pgoff, 0); if (IS_ERR(entry)) { error = PTR_ERR(entry); goto out; @@ -1162,7 +1279,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, if (pos >= i_size_read(inode)) return VM_FAULT_SIGBUS; - entry = grab_mapping_entry(mapping, vmf->pgoff); + entry = grab_mapping_entry(mapping, vmf->pgoff, 0); if (IS_ERR(entry)) { error = PTR_ERR(entry); goto out; @@ -1264,4 +1381,191 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, return VM_FAULT_NOPAGE | major; } EXPORT_SYMBOL_GPL(dax_iomap_fault); + +#ifdef CONFIG_FS_DAX_PMD +/* + * The 'colour' (ie low bits) within a PMD of a page offset. This comes up + * more often than one might expect in the below functions. + */ +#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) + +static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd, + struct vm_fault *vmf, unsigned long address, + struct iomap *iomap, loff_t pos, bool write, void **entryp) +{ + struct address_space *mapping = vma->vm_file->f_mapping; + struct block_device *bdev = iomap->bdev; + struct blk_dax_ctl dax = { + .sector = dax_iomap_sector(iomap, pos), + .size = PMD_SIZE, + }; + long length = dax_map_atomic(bdev, &dax); + void *ret; + + if (length < 0) /* dax_map_atomic() failed */ + return VM_FAULT_FALLBACK; + if (length < PMD_SIZE) + goto unmap_fallback; + if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) + goto unmap_fallback; + if (!pfn_t_devmap(dax.pfn)) + goto unmap_fallback; + + dax_unmap_atomic(bdev, &dax); + + ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector, + RADIX_DAX_PMD); + if (IS_ERR(ret)) + return VM_FAULT_FALLBACK; + *entryp = ret; + + return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write); + + unmap_fallback: + dax_unmap_atomic(bdev, &dax); + return VM_FAULT_FALLBACK; +} + +static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd, + struct vm_fault *vmf, unsigned long address, + struct iomap *iomap, void **entryp) +{ + struct address_space *mapping = vma->vm_file->f_mapping; + unsigned long pmd_addr = address & PMD_MASK; + struct page *zero_page; + spinlock_t *ptl; + pmd_t pmd_entry; + void *ret; + + zero_page = mm_get_huge_zero_page(vma->vm_mm); + + if (unlikely(!zero_page)) + return VM_FAULT_FALLBACK; + + ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0, + RADIX_DAX_PMD | RADIX_DAX_HZP); + if (IS_ERR(ret)) + return VM_FAULT_FALLBACK; + *entryp = ret; + + ptl = pmd_lock(vma->vm_mm, pmd); + if (!pmd_none(*pmd)) { + spin_unlock(ptl); + return VM_FAULT_FALLBACK; + } + + pmd_entry = mk_pmd(zero_page, vma->vm_page_prot); + pmd_entry = pmd_mkhuge(pmd_entry); + set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry); + spin_unlock(ptl); + return VM_FAULT_NOPAGE; +} + +int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmd, unsigned int flags, struct iomap_ops *ops) +{ + struct address_space *mapping = vma->vm_file->f_mapping; + unsigned long pmd_addr = address & PMD_MASK; + bool write = flags & FAULT_FLAG_WRITE; + unsigned int iomap_flags = write ? IOMAP_WRITE : 0; + struct inode *inode = mapping->host; + int result = VM_FAULT_FALLBACK; + struct iomap iomap = { 0 }; + pgoff_t max_pgoff, pgoff; + struct vm_fault vmf; + void *entry; + loff_t pos; + int error; + + /* Fall back to PTEs if we're going to COW */ + if (write && !(vma->vm_flags & VM_SHARED)) + goto fallback; + + /* If the PMD would extend outside the VMA */ + if (pmd_addr < vma->vm_start) + goto fallback; + if ((pmd_addr + PMD_SIZE) > vma->vm_end) + goto fallback; + + /* + * Check whether offset isn't beyond end of file now. Caller is + * supposed to hold locks serializing us with truncate / punch hole so + * this is a reliable test. + */ + pgoff = linear_page_index(vma, pmd_addr); + max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT; + + if (pgoff > max_pgoff) + return VM_FAULT_SIGBUS; + + /* If the PMD would extend beyond the file size */ + if ((pgoff | PG_PMD_COLOUR) > max_pgoff) + goto fallback; + + /* + * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX + * PMD or a HZP entry. If it can't (because a 4k page is already in + * the tree, for instance), it will return -EEXIST and we just fall + * back to 4k entries. + */ + entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); + if (IS_ERR(entry)) + goto fallback; + + /* + * Note that we don't use iomap_apply here. We aren't doing I/O, only + * setting up a mapping, so really we're using iomap_begin() as a way + * to look up our filesystem block. + */ + pos = (loff_t)pgoff << PAGE_SHIFT; + error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); + if (error) + goto unlock_entry; + if (iomap.offset + iomap.length < pos + PMD_SIZE) + goto finish_iomap; + + vmf.pgoff = pgoff; + vmf.flags = flags; + vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO; + + switch (iomap.type) { + case IOMAP_MAPPED: + result = dax_pmd_insert_mapping(vma, pmd, &vmf, address, + &iomap, pos, write, &entry); + break; + case IOMAP_UNWRITTEN: + case IOMAP_HOLE: + if (WARN_ON_ONCE(write)) + goto finish_iomap; + result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap, + &entry); + break; + default: + WARN_ON_ONCE(1); + break; + } + + finish_iomap: + if (ops->iomap_end) { + if (result == VM_FAULT_FALLBACK) { + ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags, + &iomap); + } else { + error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE, + iomap_flags, &iomap); + if (error) + result = VM_FAULT_FALLBACK; + } + } + unlock_entry: + put_locked_mapping_entry(mapping, pgoff, entry); + fallback: + if (result == VM_FAULT_FALLBACK) { + split_huge_pmd(vma, pmd, address); + count_vm_event(THP_FAULT_FALLBACK); + } + return result; +} +EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); +#endif /* CONFIG_FS_DAX_PMD */ #endif /* CONFIG_FS_IOMAP */ diff --git a/include/linux/dax.h b/include/linux/dax.h index e9ea78c1cf98..8d1a5c47945f 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -9,20 +9,32 @@ struct iomap_ops; /* - * We use lowest available bit in exceptional entry for locking, other two - * bits to determine entry type. In total 3 special bits. + * We use lowest available bit in exceptional entry for locking, one bit for + * the entry size (PMD) and two more to tell us if the entry is a huge zero + * page (HZP) or an empty entry that is just used for locking. In total four + * special bits. + * + * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and + * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem + * block allocation. */ -#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3) +#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4) #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) -#define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) -#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) -#define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD) -#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK) -#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT)) -#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \ - RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \ - RADIX_TREE_EXCEPTIONAL_ENTRY)) +#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1)) +#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) +#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) +static inline unsigned long dax_radix_sector(void *entry) +{ + return (unsigned long)entry >> RADIX_DAX_SHIFT; +} + +static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) +{ + return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | + ((unsigned long)sector << RADIX_DAX_SHIFT) | + RADIX_DAX_ENTRY_LOCK); +} ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops); @@ -67,6 +79,27 @@ static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, return VM_FAULT_FALLBACK; } +#ifdef CONFIG_FS_DAX_PMD +static inline unsigned int dax_radix_order(void *entry) +{ + if ((unsigned long)entry & RADIX_DAX_PMD) + return PMD_SHIFT - PAGE_SHIFT; + return 0; +} +int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmd, unsigned int flags, struct iomap_ops *ops); +#else +static inline unsigned int dax_radix_order(void *entry) +{ + return 0; +} +static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, unsigned int flags, + struct iomap_ops *ops) +{ + return VM_FAULT_FALLBACK; +} +#endif int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) diff --git a/mm/filemap.c b/mm/filemap.c index 1ffb7dcd1b5d..00ab94a882de 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -137,8 +137,7 @@ static int page_cache_tree_insert(struct address_space *mapping, } else { /* DAX can replace empty locked entry with a hole */ WARN_ON_ONCE(p != - (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | - RADIX_DAX_ENTRY_LOCK)); + dax_radix_locked_entry(0, RADIX_DAX_EMPTY)); /* DAX accounts exceptional entries as normal pages */ if (node) workingset_node_pages_dec(node); -- cgit v1.2.3-59-g8ed1b From 9484ab1bf4464faae695321dd4fa66365beda74e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 10 Nov 2016 10:26:50 +1100 Subject: dax: Introduce IOMAP_FAULT flag Introduce a flag telling iomap operations whether they are handling a fault or other IO. That may influence behavior wrt inode size and similar things. Signed-off-by: Jan Kara Reviewed-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Dave Chinner --- fs/dax.c | 4 ++-- fs/iomap.c | 5 +++-- include/linux/iomap.h | 1 + 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index 281e91a63367..28af41b9da3a 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1266,7 +1266,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; sector_t sector; struct iomap iomap = { 0 }; - unsigned flags = 0; + unsigned flags = IOMAP_FAULT; int error, major = 0; int locked_status = 0; void *entry; @@ -1467,7 +1467,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, struct address_space *mapping = vma->vm_file->f_mapping; unsigned long pmd_addr = address & PMD_MASK; bool write = flags & FAULT_FLAG_WRITE; - unsigned int iomap_flags = write ? IOMAP_WRITE : 0; + unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT; struct inode *inode = mapping->host; int result = VM_FAULT_FALLBACK; struct iomap iomap = { 0 }; diff --git a/fs/iomap.c b/fs/iomap.c index 013d1d36fbbf..51a02573405e 100644 --- a/fs/iomap.c +++ b/fs/iomap.c @@ -468,8 +468,9 @@ int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, offset = page_offset(page); while (length > 0) { - ret = iomap_apply(inode, offset, length, IOMAP_WRITE, - ops, page, iomap_page_mkwrite_actor); + ret = iomap_apply(inode, offset, length, + IOMAP_WRITE | IOMAP_FAULT, ops, page, + iomap_page_mkwrite_actor); if (unlikely(ret <= 0)) goto out_unlock; offset += ret; diff --git a/include/linux/iomap.h b/include/linux/iomap.h index e63e288dee83..b9e7b8ec8c1d 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -44,6 +44,7 @@ struct iomap { */ #define IOMAP_WRITE (1 << 0) #define IOMAP_ZERO (1 << 1) +#define IOMAP_FAULT (1 << 3) /* mapping for page fault */ struct iomap_ops { /* -- cgit v1.2.3-59-g8ed1b From dd936e4313fa3f60abd6e67abb3cb66fc9a018d1 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sun, 20 Nov 2016 20:48:36 -0500 Subject: dax: rip out get_block based IO support No one uses functions using the get_block callback anymore. Rip them out and update documentation. Reviewed-by: Ross Zwisler Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o --- Documentation/filesystems/dax.txt | 22 +-- fs/dax.c | 315 -------------------------------------- include/linux/dax.h | 12 -- 3 files changed, 11 insertions(+), 338 deletions(-) (limited to 'fs/dax.c') diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt index 23d18b8a49d5..a7e6e14aeb08 100644 --- a/Documentation/filesystems/dax.txt +++ b/Documentation/filesystems/dax.txt @@ -58,22 +58,22 @@ Implementation Tips for Filesystem Writers Filesystem support consists of - adding support to mark inodes as being DAX by setting the S_DAX flag in i_flags -- implementing the direct_IO address space operation, and calling - dax_do_io() instead of blockdev_direct_IO() if S_DAX is set +- implementing ->read_iter and ->write_iter operations which use dax_iomap_rw() + when inode has S_DAX flag set - implementing an mmap file operation for DAX files which sets the VM_MIXEDMAP and VM_HUGEPAGE flags on the VMA, and setting the vm_ops to - include handlers for fault, pmd_fault and page_mkwrite (which should - probably call dax_fault(), dax_pmd_fault() and dax_mkwrite(), passing the - appropriate get_block() callback) -- calling dax_truncate_page() instead of block_truncate_page() for DAX files -- calling dax_zero_page_range() instead of zero_user() for DAX files + include handlers for fault, pmd_fault, page_mkwrite, pfn_mkwrite. These + handlers should probably call dax_iomap_fault() (for fault and page_mkwrite + handlers), dax_iomap_pmd_fault(), dax_pfn_mkwrite() passing the appropriate + iomap operations. +- calling iomap_zero_range() passing appropriate iomap operations instead of + block_truncate_page() for DAX files - ensuring that there is sufficient locking between reads, writes, truncates and page faults -The get_block() callback passed to the DAX functions may return -uninitialised extents. If it does, it must ensure that simultaneous -calls to get_block() (for example by a page-fault racing with a read() -or a write()) work correctly. +The iomap handlers for allocating blocks must make sure that allocated blocks +are zeroed out and converted to written extents before being returned to avoid +exposure of uninitialized data through mmap. These filesystems may be used for inspiration: - ext2: see Documentation/filesystems/ext2.txt diff --git a/fs/dax.c b/fs/dax.c index 28af41b9da3a..ad131cd2605d 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -116,168 +116,6 @@ struct page *read_dax_sector(struct block_device *bdev, sector_t n) return page; } -static bool buffer_written(struct buffer_head *bh) -{ - return buffer_mapped(bh) && !buffer_unwritten(bh); -} - -static sector_t to_sector(const struct buffer_head *bh, - const struct inode *inode) -{ - sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); - - return sector; -} - -static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, - loff_t start, loff_t end, get_block_t get_block, - struct buffer_head *bh) -{ - loff_t pos = start, max = start, bh_max = start; - bool hole = false; - struct block_device *bdev = NULL; - int rw = iov_iter_rw(iter), rc; - long map_len = 0; - struct blk_dax_ctl dax = { - .addr = ERR_PTR(-EIO), - }; - unsigned blkbits = inode->i_blkbits; - sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1) - >> blkbits; - - if (rw == READ) - end = min(end, i_size_read(inode)); - - while (pos < end) { - size_t len; - if (pos == max) { - long page = pos >> PAGE_SHIFT; - sector_t block = page << (PAGE_SHIFT - blkbits); - unsigned first = pos - (block << blkbits); - long size; - - if (pos == bh_max) { - bh->b_size = PAGE_ALIGN(end - pos); - bh->b_state = 0; - rc = get_block(inode, block, bh, rw == WRITE); - if (rc) - break; - bh_max = pos - first + bh->b_size; - bdev = bh->b_bdev; - /* - * We allow uninitialized buffers for writes - * beyond EOF as those cannot race with faults - */ - WARN_ON_ONCE( - (buffer_new(bh) && block < file_blks) || - (rw == WRITE && buffer_unwritten(bh))); - } else { - unsigned done = bh->b_size - - (bh_max - (pos - first)); - bh->b_blocknr += done >> blkbits; - bh->b_size -= done; - } - - hole = rw == READ && !buffer_written(bh); - if (hole) { - size = bh->b_size - first; - } else { - dax_unmap_atomic(bdev, &dax); - dax.sector = to_sector(bh, inode); - dax.size = bh->b_size; - map_len = dax_map_atomic(bdev, &dax); - if (map_len < 0) { - rc = map_len; - break; - } - dax.addr += first; - size = map_len - first; - } - /* - * pos + size is one past the last offset for IO, - * so pos + size can overflow loff_t at extreme offsets. - * Cast to u64 to catch this and get the true minimum. - */ - max = min_t(u64, pos + size, end); - } - - if (iov_iter_rw(iter) == WRITE) { - len = copy_from_iter_pmem(dax.addr, max - pos, iter); - } else if (!hole) - len = copy_to_iter((void __force *) dax.addr, max - pos, - iter); - else - len = iov_iter_zero(max - pos, iter); - - if (!len) { - rc = -EFAULT; - break; - } - - pos += len; - if (!IS_ERR(dax.addr)) - dax.addr += len; - } - - dax_unmap_atomic(bdev, &dax); - - return (pos == start) ? rc : pos - start; -} - -/** - * dax_do_io - Perform I/O to a DAX file - * @iocb: The control block for this I/O - * @inode: The file which the I/O is directed at - * @iter: The addresses to do I/O from or to - * @get_block: The filesystem method used to translate file offsets to blocks - * @end_io: A filesystem callback for I/O completion - * @flags: See below - * - * This function uses the same locking scheme as do_blockdev_direct_IO: - * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the - * caller for writes. For reads, we take and release the i_mutex ourselves. - * If DIO_LOCKING is not set, the filesystem takes care of its own locking. - * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O - * is in progress. - */ -ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, - struct iov_iter *iter, get_block_t get_block, - dio_iodone_t end_io, int flags) -{ - struct buffer_head bh; - ssize_t retval = -EINVAL; - loff_t pos = iocb->ki_pos; - loff_t end = pos + iov_iter_count(iter); - - memset(&bh, 0, sizeof(bh)); - bh.b_bdev = inode->i_sb->s_bdev; - - if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) - inode_lock(inode); - - /* Protects against truncate */ - if (!(flags & DIO_SKIP_DIO_COUNT)) - inode_dio_begin(inode); - - retval = dax_io(inode, iter, pos, end, get_block, &bh); - - if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) - inode_unlock(inode); - - if (end_io) { - int err; - - err = end_io(iocb, pos, retval, bh.b_private); - if (err) - retval = err; - } - - if (!(flags & DIO_SKIP_DIO_COUNT)) - inode_dio_end(inode); - return retval; -} -EXPORT_SYMBOL_GPL(dax_do_io); - /* * DAX radix tree locking */ @@ -919,105 +757,6 @@ static int dax_insert_mapping(struct address_space *mapping, return vm_insert_mixed(vma, vaddr, dax.pfn); } -/** - * dax_fault - handle a page fault on a DAX file - * @vma: The virtual memory area where the fault occurred - * @vmf: The description of the fault - * @get_block: The filesystem method used to translate file offsets to blocks - * - * When a page fault occurs, filesystems may call this helper in their - * fault handler for DAX files. dax_fault() assumes the caller has done all - * the necessary locking for the page fault to proceed successfully. - */ -int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, - get_block_t get_block) -{ - struct file *file = vma->vm_file; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - void *entry; - struct buffer_head bh; - unsigned long vaddr = (unsigned long)vmf->virtual_address; - unsigned blkbits = inode->i_blkbits; - sector_t block; - pgoff_t size; - int error; - int major = 0; - - /* - * Check whether offset isn't beyond end of file now. Caller is supposed - * to hold locks serializing us with truncate / punch hole so this is - * a reliable test. - */ - size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; - if (vmf->pgoff >= size) - return VM_FAULT_SIGBUS; - - memset(&bh, 0, sizeof(bh)); - block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits); - bh.b_bdev = inode->i_sb->s_bdev; - bh.b_size = PAGE_SIZE; - - entry = grab_mapping_entry(mapping, vmf->pgoff, 0); - if (IS_ERR(entry)) { - error = PTR_ERR(entry); - goto out; - } - - error = get_block(inode, block, &bh, 0); - if (!error && (bh.b_size < PAGE_SIZE)) - error = -EIO; /* fs corruption? */ - if (error) - goto unlock_entry; - - if (vmf->cow_page) { - struct page *new_page = vmf->cow_page; - if (buffer_written(&bh)) - error = copy_user_dax(bh.b_bdev, to_sector(&bh, inode), - bh.b_size, new_page, vaddr); - else - clear_user_highpage(new_page, vaddr); - if (error) - goto unlock_entry; - if (!radix_tree_exceptional_entry(entry)) { - vmf->page = entry; - return VM_FAULT_LOCKED; - } - vmf->entry = entry; - return VM_FAULT_DAX_LOCKED; - } - - if (!buffer_mapped(&bh)) { - if (vmf->flags & FAULT_FLAG_WRITE) { - error = get_block(inode, block, &bh, 1); - count_vm_event(PGMAJFAULT); - mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); - major = VM_FAULT_MAJOR; - if (!error && (bh.b_size < PAGE_SIZE)) - error = -EIO; - if (error) - goto unlock_entry; - } else { - return dax_load_hole(mapping, entry, vmf); - } - } - - /* Filesystem should not return unwritten buffers to us! */ - WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh)); - error = dax_insert_mapping(mapping, bh.b_bdev, to_sector(&bh, inode), - bh.b_size, &entry, vma, vmf); - unlock_entry: - put_locked_mapping_entry(mapping, vmf->pgoff, entry); - out: - if (error == -ENOMEM) - return VM_FAULT_OOM | major; - /* -EBUSY is fine, somebody else faulted on the same PTE */ - if ((error < 0) && (error != -EBUSY)) - return VM_FAULT_SIGBUS | major; - return VM_FAULT_NOPAGE | major; -} -EXPORT_SYMBOL_GPL(dax_fault); - /** * dax_pfn_mkwrite - handle first write to DAX page * @vma: The virtual memory area where the fault occurred @@ -1078,60 +817,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector, } EXPORT_SYMBOL_GPL(__dax_zero_page_range); -/** - * dax_zero_page_range - zero a range within a page of a DAX file - * @inode: The file being truncated - * @from: The file offset that is being truncated to - * @length: The number of bytes to zero - * @get_block: The filesystem method used to translate file offsets to blocks - * - * This function can be called by a filesystem when it is zeroing part of a - * page in a DAX file. This is intended for hole-punch operations. If - * you are truncating a file, the helper function dax_truncate_page() may be - * more convenient. - */ -int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, - get_block_t get_block) -{ - struct buffer_head bh; - pgoff_t index = from >> PAGE_SHIFT; - unsigned offset = from & (PAGE_SIZE-1); - int err; - - /* Block boundary? Nothing to do */ - if (!length) - return 0; - if (WARN_ON_ONCE((offset + length) > PAGE_SIZE)) - return -EINVAL; - - memset(&bh, 0, sizeof(bh)); - bh.b_bdev = inode->i_sb->s_bdev; - bh.b_size = PAGE_SIZE; - err = get_block(inode, index, &bh, 0); - if (err < 0 || !buffer_written(&bh)) - return err; - - return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode), - offset, length); -} -EXPORT_SYMBOL_GPL(dax_zero_page_range); - -/** - * dax_truncate_page - handle a partial page being truncated in a DAX file - * @inode: The file being truncated - * @from: The file offset that is being truncated to - * @get_block: The filesystem method used to translate file offsets to blocks - * - * Similar to block_truncate_page(), this function can be called by a - * filesystem when it is truncating a DAX file to handle the partial page. - */ -int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) -{ - unsigned length = PAGE_ALIGN(from) - from; - return dax_zero_page_range(inode, from, length, get_block); -} -EXPORT_SYMBOL_GPL(dax_truncate_page); - #ifdef CONFIG_FS_IOMAP static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) { diff --git a/include/linux/dax.h b/include/linux/dax.h index 8d1a5c47945f..0afade8bd3d7 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -38,13 +38,8 @@ static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops); -ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, - get_block_t, dio_iodone_t, int flags); -int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); -int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, struct iomap_ops *ops); -int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); void dax_wake_mapping_entry_waiter(struct address_space *mapping, pgoff_t index, void *entry, bool wake_all); @@ -73,12 +68,6 @@ static inline int __dax_zero_page_range(struct block_device *bdev, } #endif -static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, unsigned int flags, get_block_t gb) -{ - return VM_FAULT_FALLBACK; -} - #ifdef CONFIG_FS_DAX_PMD static inline unsigned int dax_radix_order(void *entry) { @@ -101,7 +90,6 @@ static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma, } #endif int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); -#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) static inline bool vma_is_dax(struct vm_area_struct *vma) { -- cgit v1.2.3-59-g8ed1b From 0cb80b4847553582830a59da2c022c37a1f4a119 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 12 Dec 2016 21:34:12 -0500 Subject: dax: Fix sleep in atomic contex in grab_mapping_entry() Commit 642261ac995e: "dax: add struct iomap based DAX PMD support" has introduced unmapping of page tables if huge page needs to be split in grab_mapping_entry(). However the unmapping happens after radix_tree_preload() call which disables preemption and thus unmap_mapping_range() tries to acquire i_mmap_lock in atomic context which is a bug. Fix the problem by moving unmapping before radix_tree_preload() call. Fixes: 642261ac995e01d7837db1f4b90181496f7e6835 Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o --- fs/dax.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index ad131cd2605d..5bfd27b4a69c 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -333,14 +333,6 @@ restart: } spin_unlock_irq(&mapping->tree_lock); - err = radix_tree_preload( - mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); - if (err) { - if (pmd_downgrade) - put_locked_mapping_entry(mapping, index, entry); - return ERR_PTR(err); - } - /* * Besides huge zero pages the only other thing that gets * downgraded are empty entries which don't need to be @@ -350,6 +342,13 @@ restart: unmap_mapping_range(mapping, (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); + err = radix_tree_preload( + mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); + if (err) { + if (pmd_downgrade) + put_locked_mapping_entry(mapping, index, entry); + return ERR_PTR(err); + } spin_lock_irq(&mapping->tree_lock); if (pmd_downgrade) { -- cgit v1.2.3-59-g8ed1b