aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-09 10:11:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-09 10:11:53 -0800
commit59a2ceeef6d6bb8f68550fdbd84246b74a99f06b (patch)
treed8302a240dfe56defb8d56df555bb046a5a7bb5c /mm
parentMerge tag 'backlight-next-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/backlight (diff)
parentipc/ipc_sysctl.c: remove fallback for !CONFIG_PROC_SYSCTL (diff)
downloadlinux-dev-59a2ceeef6d6bb8f68550fdbd84246b74a99f06b.tar.xz
linux-dev-59a2ceeef6d6bb8f68550fdbd84246b74a99f06b.zip
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "87 patches. Subsystems affected by this patch series: mm (pagecache and hugetlb), procfs, misc, MAINTAINERS, lib, checkpatch, binfmt, kallsyms, ramfs, init, codafs, nilfs2, hfs, crash_dump, signals, seq_file, fork, sysvfs, kcov, gdb, resource, selftests, and ipc" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (87 commits) ipc/ipc_sysctl.c: remove fallback for !CONFIG_PROC_SYSCTL ipc: check checkpoint_restore_ns_capable() to modify C/R proc files selftests/kselftest/runner/run_one(): allow running non-executable files virtio-mem: disallow mapping virtio-mem memory via /dev/mem kernel/resource: disallow access to exclusive system RAM regions kernel/resource: clean up and optimize iomem_is_exclusive() scripts/gdb: handle split debug for vmlinux kcov: replace local_irq_save() with a local_lock_t kcov: avoid enable+disable interrupts if !in_task() kcov: allocate per-CPU memory on the relevant node Documentation/kcov: define `ip' in the example Documentation/kcov: include types.h in the example sysv: use BUILD_BUG_ON instead of runtime check kernel/fork.c: unshare(): use swap() to make code cleaner seq_file: fix passing wrong private data seq_file: move seq_escape() to a header signal: remove duplicate include in signal.h crash_dump: remove duplicate include in crash_dump.h crash_dump: fix boolreturn.cocci warning hfs/hfsplus: use WARN_ON for sanity check ...
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig7
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/kasan/report.c17
-rw-r--r--mm/memfd.c4
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/page_owner.c18
-rw-r--r--mm/truncate.c19
-rw-r--r--mm/vmscan.c7
-rw-r--r--mm/workingset.c10
9 files changed, 60 insertions, 33 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index ae1f151c2924..068ce591a13a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -109,6 +109,13 @@ config NUMA_KEEP_MEMINFO
config MEMORY_ISOLATION
bool
+# IORESOURCE_SYSTEM_RAM regions in the kernel resource tree that are marked
+# IORESOURCE_EXCLUSIVE cannot be mapped to user space, for example, via
+# /dev/mem.
+config EXCLUSIVE_SYSTEM_RAM
+ def_bool y
+ depends on !DEVMEM || STRICT_DEVMEM
+
#
# Only be set on architectures that have completely implemented memory hotplug
# feature. If you are not sure, don't touch it.
diff --git a/mm/filemap.c b/mm/filemap.c
index 615512caa0b5..daa0e23a6ee6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -261,9 +261,13 @@ void delete_from_page_cache(struct page *page)
struct address_space *mapping = page_mapping(page);
BUG_ON(!PageLocked(page));
+ spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
__delete_from_page_cache(page, NULL);
xa_unlock_irq(&mapping->i_pages);
+ if (mapping_shrinkable(mapping))
+ inode_add_lru(mapping->host);
+ spin_unlock(&mapping->host->i_lock);
page_cache_free_page(mapping, page);
}
@@ -339,6 +343,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
if (!pagevec_count(pvec))
return;
+ spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
for (i = 0; i < pagevec_count(pvec); i++) {
trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
@@ -347,6 +352,9 @@ void delete_from_page_cache_batch(struct address_space *mapping,
}
page_cache_delete_batch(mapping, pvec);
xa_unlock_irq(&mapping->i_pages);
+ if (mapping_shrinkable(mapping))
+ inode_add_lru(mapping->host);
+ spin_unlock(&mapping->host->i_lock);
for (i = 0; i < pagevec_count(pvec); i++)
page_cache_free_page(mapping, pvec->pages[i]);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 9da071ad930c..0bc10f452f7e 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -132,20 +132,11 @@ static void end_report(unsigned long *flags, unsigned long addr)
kasan_enable_current();
}
-static void print_stack(depot_stack_handle_t stack)
-{
- unsigned long *entries;
- unsigned int nr_entries;
-
- nr_entries = stack_depot_fetch(stack, &entries);
- stack_trace_print(entries, nr_entries, 0);
-}
-
static void print_track(struct kasan_track *track, const char *prefix)
{
pr_err("%s by task %u:\n", prefix, track->pid);
if (track->stack) {
- print_stack(track->stack);
+ stack_depot_print(track->stack);
} else {
pr_err("(stack is not available)\n");
}
@@ -214,12 +205,12 @@ static void describe_object_stacks(struct kmem_cache *cache, void *object,
return;
if (alloc_meta->aux_stack[0]) {
pr_err("Last potentially related work creation:\n");
- print_stack(alloc_meta->aux_stack[0]);
+ stack_depot_print(alloc_meta->aux_stack[0]);
pr_err("\n");
}
if (alloc_meta->aux_stack[1]) {
pr_err("Second to last potentially related work creation:\n");
- print_stack(alloc_meta->aux_stack[1]);
+ stack_depot_print(alloc_meta->aux_stack[1]);
pr_err("\n");
}
#endif
@@ -235,7 +226,7 @@ static void describe_object(struct kmem_cache *cache, void *object,
static inline bool kernel_or_module_addr(const void *addr)
{
- if (addr >= (void *)_stext && addr < (void *)_end)
+ if (is_kernel((unsigned long)addr))
return true;
if (is_module_address((unsigned long)addr))
return true;
diff --git a/mm/memfd.c b/mm/memfd.c
index 081dd33e6a61..9f80f162791a 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -297,9 +297,7 @@ SYSCALL_DEFINE2(memfd_create,
}
if (flags & MFD_HUGETLB) {
- struct ucounts *ucounts = NULL;
-
- file = hugetlb_file_setup(name, 0, VM_NORESERVE, &ucounts,
+ file = hugetlb_file_setup(name, 0, VM_NORESERVE,
HUGETLB_ANONHUGE_INODE,
(flags >> MFD_HUGE_SHIFT) &
MFD_HUGE_MASK);
diff --git a/mm/mmap.c b/mm/mmap.c
index b22a07f5e761..bfb0ea164a90 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1599,7 +1599,6 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
goto out_fput;
}
} else if (flags & MAP_HUGETLB) {
- struct ucounts *ucounts = NULL;
struct hstate *hs;
hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
@@ -1615,7 +1614,7 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
*/
file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
VM_NORESERVE,
- &ucounts, HUGETLB_ANONHUGE_INODE,
+ HUGETLB_ANONHUGE_INODE,
(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
if (IS_ERR(file))
return PTR_ERR(file);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 07b61cdf3d0c..79936db59859 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -329,8 +329,6 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
depot_stack_handle_t handle)
{
int ret, pageblock_mt, page_mt;
- unsigned long *entries;
- unsigned int nr_entries;
char *kbuf;
count = min_t(size_t, count, PAGE_SIZE);
@@ -361,8 +359,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
if (ret >= count)
goto err;
- nr_entries = stack_depot_fetch(handle, &entries);
- ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
+ ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
if (ret >= count)
goto err;
@@ -394,8 +391,6 @@ void __dump_page_owner(const struct page *page)
struct page_ext *page_ext = lookup_page_ext(page);
struct page_owner *page_owner;
depot_stack_handle_t handle;
- unsigned long *entries;
- unsigned int nr_entries;
gfp_t gfp_mask;
int mt;
@@ -423,20 +418,17 @@ void __dump_page_owner(const struct page *page)
page_owner->pid, page_owner->ts_nsec, page_owner->free_ts_nsec);
handle = READ_ONCE(page_owner->handle);
- if (!handle) {
+ if (!handle)
pr_alert("page_owner allocation stack trace missing\n");
- } else {
- nr_entries = stack_depot_fetch(handle, &entries);
- stack_trace_print(entries, nr_entries, 0);
- }
+ else
+ stack_depot_print(handle);
handle = READ_ONCE(page_owner->free_handle);
if (!handle) {
pr_alert("page_owner free stack trace missing\n");
} else {
- nr_entries = stack_depot_fetch(handle, &entries);
pr_alert("page last free stack trace:\n");
- stack_trace_print(entries, nr_entries, 0);
+ stack_depot_print(handle);
}
if (page_owner->last_migrate_reason != -1)
diff --git a/mm/truncate.c b/mm/truncate.c
index 714eaf19821d..cc83a3f7c1ad 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -45,9 +45,13 @@ static inline void __clear_shadow_entry(struct address_space *mapping,
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
void *entry)
{
+ spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
__clear_shadow_entry(mapping, index, entry);
xa_unlock_irq(&mapping->i_pages);
+ if (mapping_shrinkable(mapping))
+ inode_add_lru(mapping->host);
+ spin_unlock(&mapping->host->i_lock);
}
/*
@@ -73,8 +77,10 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
return;
dax = dax_mapping(mapping);
- if (!dax)
+ if (!dax) {
+ spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
+ }
for (i = j; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
@@ -93,8 +99,12 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
__clear_shadow_entry(mapping, index, page);
}
- if (!dax)
+ if (!dax) {
xa_unlock_irq(&mapping->i_pages);
+ if (mapping_shrinkable(mapping))
+ inode_add_lru(mapping->host);
+ spin_unlock(&mapping->host->i_lock);
+ }
pvec->nr = j;
}
@@ -567,6 +577,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
return 0;
+ spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
if (PageDirty(page))
goto failed;
@@ -574,6 +585,9 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
BUG_ON(page_has_private(page));
__delete_from_page_cache(page, NULL);
xa_unlock_irq(&mapping->i_pages);
+ if (mapping_shrinkable(mapping))
+ inode_add_lru(mapping->host);
+ spin_unlock(&mapping->host->i_lock);
if (mapping->a_ops->freepage)
mapping->a_ops->freepage(page);
@@ -582,6 +596,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
return 1;
failed:
xa_unlock_irq(&mapping->i_pages);
+ spin_unlock(&mapping->host->i_lock);
return 0;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ef4a6dc7f000..fb9584641ac7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1205,6 +1205,8 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
BUG_ON(!PageLocked(page));
BUG_ON(mapping != page_mapping(page));
+ if (!PageSwapCache(page))
+ spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
/*
* The non racy check for a busy page.
@@ -1273,6 +1275,9 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
shadow = workingset_eviction(page, target_memcg);
__delete_from_page_cache(page, shadow);
xa_unlock_irq(&mapping->i_pages);
+ if (mapping_shrinkable(mapping))
+ inode_add_lru(mapping->host);
+ spin_unlock(&mapping->host->i_lock);
if (freepage != NULL)
freepage(page);
@@ -1282,6 +1287,8 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
cannot_free:
xa_unlock_irq(&mapping->i_pages);
+ if (!PageSwapCache(page))
+ spin_unlock(&mapping->host->i_lock);
return 0;
}
diff --git a/mm/workingset.c b/mm/workingset.c
index 109ab978251a..8c03afe1d67c 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -543,6 +543,13 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
goto out;
}
+ if (!spin_trylock(&mapping->host->i_lock)) {
+ xa_unlock(&mapping->i_pages);
+ spin_unlock_irq(lru_lock);
+ ret = LRU_RETRY;
+ goto out;
+ }
+
list_lru_isolate(lru, item);
__dec_lruvec_kmem_state(node, WORKINGSET_NODES);
@@ -562,6 +569,9 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
out_invalid:
xa_unlock_irq(&mapping->i_pages);
+ if (mapping_shrinkable(mapping))
+ inode_add_lru(mapping->host);
+ spin_unlock(&mapping->host->i_lock);
ret = LRU_REMOVED_RETRY;
out:
cond_resched();