diff options
author | 2025-02-18 10:39:37 -0500 | |
---|---|---|
committer | 2025-03-10 09:11:08 -0400 | |
commit | fbfdc9fc0f66e7877a6d1a6e44c6acac46c5b811 (patch) | |
tree | c583d008199ac460b3a28f2629d743f7729d107f /fs/nfsd/filecache.c | |
parent | nfsd: filecache: don't repeatedly add/remove files on the lru list (diff) | |
download | wireguard-linux-fbfdc9fc0f66e7877a6d1a6e44c6acac46c5b811.tar.xz wireguard-linux-fbfdc9fc0f66e7877a6d1a6e44c6acac46c5b811.zip |
nfsd: filecache: drop the list_lru lock during lock gc scans
Under a high NFSv3 load with lots of different files being accessed,
the LRU list of garbage-collectable files can become quite long.
Asking list_lru_scan_node() to scan the whole list can result in a long
period during which a spinlock is held, blocking the addition of new LRU
items.
So ask list_lru_scan_node() to scan only a few entries at a time, and
repeat until the scan is complete.
If the shrinker runs between two consecutive calls of
list_lru_scan_node() it could invalidate the "remaining" counter which
could lead to premature freeing. So add a spinlock to avoid that.
Signed-off-by: NeilBrown <neilb@suse.de>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'fs/nfsd/filecache.c')
-rw-r--r-- | fs/nfsd/filecache.c | 27 |
1 files changed, 24 insertions, 3 deletions
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index 2289065ee5e8..ab85e6a2454f 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -544,6 +544,13 @@ nfsd_file_gc_cb(struct list_head *item, struct list_lru_one *lru, return nfsd_file_lru_cb(item, lru, arg); } +/* If the shrinker runs between calls to list_lru_walk_node() in + * nfsd_file_gc(), the "remaining" count will be wrong. This could + * result in premature freeing of some files. This may not matter much + * but is easy to fix with this spinlock which temporarily disables + * the shrinker. + */ +static DEFINE_SPINLOCK(nfsd_gc_lock); static void nfsd_file_gc(void) { @@ -551,12 +558,22 @@ nfsd_file_gc(void) LIST_HEAD(dispose); int nid; + spin_lock(&nfsd_gc_lock); for_each_node_state(nid, N_NORMAL_MEMORY) { - unsigned long nr = list_lru_count_node(&nfsd_file_lru, nid); + unsigned long remaining = list_lru_count_node(&nfsd_file_lru, nid); + + while (remaining > 0) { + unsigned long nr = min(remaining, NFSD_FILE_GC_BATCH); - ret += list_lru_walk_node(&nfsd_file_lru, nid, nfsd_file_gc_cb, - &dispose, &nr); + remaining -= nr; + ret += list_lru_walk_node(&nfsd_file_lru, nid, nfsd_file_gc_cb, + &dispose, &nr); + if (nr) + /* walk aborted early */ + remaining = 0; + } } + spin_unlock(&nfsd_gc_lock); trace_nfsd_file_gc_removed(ret, list_lru_count(&nfsd_file_lru)); nfsd_file_dispose_list_delayed(&dispose); } @@ -581,8 +598,12 @@ nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc) LIST_HEAD(dispose); unsigned long ret; + if (!spin_trylock(&nfsd_gc_lock)) + return SHRINK_STOP; + ret = list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, &dispose); + spin_unlock(&nfsd_gc_lock); trace_nfsd_file_shrinker_removed(ret, list_lru_count(&nfsd_file_lru)); nfsd_file_dispose_list_delayed(&dispose); return ret; |