aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-03-28 11:01:43 -0400
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:46:43 -0400
commitec4907ff69fb16161d9d9370260303a73dd5acde (patch)
treee5c1157777be9c99e21800d13b377627cb08f841 /fs
parentdax: Rename some functions (diff)
downloadlinux-dev-ec4907ff69fb16161d9d9370260303a73dd5acde.tar.xz
linux-dev-ec4907ff69fb16161d9d9370260303a73dd5acde.zip
dax: Hash on XArray instead of mapping
Since the XArray is embedded in the struct address_space, its address contains exactly as much entropy as the address of the mapping. This patch is purely preparatory for later patches which will simplify the wait/wake interfaces. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/dax.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/fs/dax.c b/fs/dax.c
index acdb996efad2..fd111ea1da3b 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -116,7 +116,7 @@ static int dax_is_empty_entry(void *entry)
* DAX page cache entry locking
*/
struct exceptional_entry_key {
- struct address_space *mapping;
+ struct xarray *xa;
pgoff_t entry_start;
};
@@ -125,7 +125,7 @@ struct wait_exceptional_entry_queue {
struct exceptional_entry_key key;
};
-static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
+static wait_queue_head_t *dax_entry_waitqueue(struct xarray *xa,
pgoff_t index, void *entry, struct exceptional_entry_key *key)
{
unsigned long hash;
@@ -138,21 +138,21 @@ static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
if (dax_is_pmd_entry(entry))
index &= ~PG_PMD_COLOUR;
- key->mapping = mapping;
+ key->xa = xa;
key->entry_start = index;
- hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
+ hash = hash_long((unsigned long)xa ^ index, DAX_WAIT_TABLE_BITS);
return wait_table + hash;
}
-static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
- int sync, void *keyp)
+static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
+ unsigned int mode, int sync, void *keyp)
{
struct exceptional_entry_key *key = keyp;
struct wait_exceptional_entry_queue *ewait =
container_of(wait, struct wait_exceptional_entry_queue, wait);
- if (key->mapping != ewait->key.mapping ||
+ if (key->xa != ewait->key.xa ||
key->entry_start != ewait->key.entry_start)
return 0;
return autoremove_wake_function(wait, mode, sync, NULL);
@@ -163,13 +163,13 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mo
* The important information it's conveying is whether the entry at
* this index used to be a PMD entry.
*/
-static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
+static void dax_wake_mapping_entry_waiter(struct xarray *xa,
pgoff_t index, void *entry, bool wake_all)
{
struct exceptional_entry_key key;
wait_queue_head_t *wq;
- wq = dax_entry_waitqueue(mapping, index, entry, &key);
+ wq = dax_entry_waitqueue(xa, index, entry, &key);
/*
* Checking for locked entry and prepare_to_wait_exclusive() happens
@@ -248,7 +248,8 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping,
return entry;
}
- wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
+ wq = dax_entry_waitqueue(&mapping->i_pages, index, entry,
+ &ewait.key);
prepare_to_wait_exclusive(wq, &ewait.wait,
TASK_UNINTERRUPTIBLE);
xa_unlock_irq(&mapping->i_pages);
@@ -289,7 +290,7 @@ static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
}
unlock_slot(mapping, slot);
xa_unlock_irq(&mapping->i_pages);
- dax_wake_mapping_entry_waiter(mapping, index, entry, false);
+ dax_wake_mapping_entry_waiter(&mapping->i_pages, index, entry, false);
}
static void put_locked_mapping_entry(struct address_space *mapping,
@@ -309,7 +310,7 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
return;
/* We have to wake up next waiter for the page cache entry lock */
- dax_wake_mapping_entry_waiter(mapping, index, entry, false);
+ dax_wake_mapping_entry_waiter(&mapping->i_pages, index, entry, false);
}
static unsigned long dax_entry_size(void *entry)
@@ -578,8 +579,8 @@ restart:
dax_disassociate_entry(entry, mapping, false);
radix_tree_delete(&mapping->i_pages, index);
mapping->nrexceptional--;
- dax_wake_mapping_entry_waiter(mapping, index, entry,
- true);
+ dax_wake_mapping_entry_waiter(&mapping->i_pages,
+ index, entry, true);
}
entry = dax_make_locked(0, size_flag | DAX_EMPTY);