aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-05 16:32:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-05 16:32:45 -0700
commit6614a3c3164a5df2b54abb0b3559f51041cf705b (patch)
tree1c25c23d9efed988705287fc2ccb78e0e76e311d /arch
parentMerge tag 'mtd/for-5.20' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux (diff)
parenttools/testing/selftests/vm/hmm-tests.c: fix build (diff)
downloadlinux-dev-6614a3c3164a5df2b54abb0b3559f51041cf705b.tar.xz
linux-dev-6614a3c3164a5df2b54abb0b3559f51041cf705b.zip
Merge tag 'mm-stable-2022-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: "Most of the MM queue. A few things are still pending. Liam's maple tree rework didn't make it. This has resulted in a few other minor patch series being held over for next time. Multi-gen LRU still isn't merged as we were waiting for mapletree to stabilize. The current plan is to merge MGLRU into -mm soon and to later reintroduce mapletree, with a view to hopefully getting both into 6.1-rc1. Summary: - The usual batches of cleanups from Baoquan He, Muchun Song, Miaohe Lin, Yang Shi, Anshuman Khandual and Mike Rapoport - Some kmemleak fixes from Patrick Wang and Waiman Long - DAMON updates from SeongJae Park - memcg debug/visibility work from Roman Gushchin - vmalloc speedup from Uladzislau Rezki - more folio conversion work from Matthew Wilcox - enhancements for coherent device memory mapping from Alex Sierra - addition of shared pages tracking and CoW support for fsdax, from Shiyang Ruan - hugetlb optimizations from Mike Kravetz - Mel Gorman has contributed some pagealloc changes to improve latency and realtime behaviour. - mprotect soft-dirty checking has been improved by Peter Xu - Many other singleton patches all over the place" [ XFS merge from hell as per Darrick Wong in https://lore.kernel.org/all/YshKnxb4VwXycPO8@magnolia/ ] * tag 'mm-stable-2022-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (282 commits) tools/testing/selftests/vm/hmm-tests.c: fix build mm: Kconfig: fix typo mm: memory-failure: convert to pr_fmt() mm: use is_zone_movable_page() helper hugetlbfs: fix inaccurate comment in hugetlbfs_statfs() hugetlbfs: cleanup some comments in inode.c hugetlbfs: remove unneeded header file hugetlbfs: remove unneeded hugetlbfs_ops forward declaration hugetlbfs: use helper macro SZ_1{K,M} mm: cleanup is_highmem() mm/hmm: add a test for cross device private faults selftests: add soft-dirty into run_vmtests.sh selftests: soft-dirty: add test for mprotect mm/mprotect: fix soft-dirty check in can_change_pte_writable() mm: memcontrol: fix potential oom_lock recursion deadlock mm/gup.c: fix formatting in check_and_migrate_movable_page() xfs: fail dax mount if reflink is enabled on a partition mm/memcontrol.c: remove the redundant updating of stats_flush_threshold userfaultfd: don't fail on unrecognized features hugetlb_cgroup: fix wrong hugetlb cgroup numa stat ...
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/pgtable.h17
-rw-r--r--arch/alpha/mm/fault.c4
-rw-r--r--arch/alpha/mm/init.c22
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h18
-rw-r--r--arch/arc/mm/fault.c4
-rw-r--r--arch/arc/mm/mmap.c20
-rw-r--r--arch/arm/include/asm/pgtable.h17
-rw-r--r--arch/arm/kernel/head.S34
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c2
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/mm/mmu.c20
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/hugetlb.h3
-rw-r--r--arch/arm64/include/asm/memory.h9
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h18
-rw-r--r--arch/arm64/mm/fault.c4
-rw-r--r--arch/arm64/mm/hugetlbpage.c56
-rw-r--r--arch/arm64/mm/mmap.c21
-rw-r--r--arch/csky/include/asm/pgalloc.h2
-rw-r--r--arch/csky/include/asm/pgtable.h24
-rw-r--r--arch/csky/mm/fault.c4
-rw-r--r--arch/csky/mm/init.c20
-rw-r--r--arch/hexagon/include/asm/pgtable.h27
-rw-r--r--arch/hexagon/mm/init.c42
-rw-r--r--arch/hexagon/mm/vm_fault.c4
-rw-r--r--arch/ia64/include/asm/pgtable.h18
-rw-r--r--arch/ia64/mm/fault.c4
-rw-r--r--arch/ia64/mm/init.c28
-rw-r--r--arch/loongarch/Kconfig2
-rw-r--r--arch/loongarch/include/asm/pgalloc.h6
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h19
-rw-r--r--arch/loongarch/include/asm/pgtable.h27
-rw-r--r--arch/loongarch/kernel/asm-offsets.c6
-rw-r--r--arch/loongarch/mm/cache.c46
-rw-r--r--arch/loongarch/mm/pgtable.c2
-rw-r--r--arch/loongarch/mm/tlbex.S6
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h59
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h29
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h23
-rw-r--r--arch/m68k/mm/fault.c4
-rw-r--r--arch/m68k/mm/mcfmmu.c55
-rw-r--r--arch/m68k/mm/motorola.c29
-rw-r--r--arch/m68k/mm/sun3mmu.c20
-rw-r--r--arch/microblaze/include/asm/pgtable.h17
-rw-r--r--arch/microblaze/mm/fault.c4
-rw-r--r--arch/microblaze/mm/init.c20
-rw-r--r--arch/mips/include/asm/pgalloc.h8
-rw-r--r--arch/mips/include/asm/pgtable-32.h19
-rw-r--r--arch/mips/include/asm/pgtable-64.h61
-rw-r--r--arch/mips/include/asm/pgtable.h22
-rw-r--r--arch/mips/kernel/asm-offsets.c5
-rw-r--r--arch/mips/kernel/mips-mt.c4
-rw-r--r--arch/mips/kvm/mmu.c2
-rw-r--r--arch/mips/mm/cache.c3
-rw-r--r--arch/mips/mm/fault.c4
-rw-r--r--arch/mips/mm/pgtable.c2
-rw-r--r--arch/mips/mm/tlbex.c14
-rw-r--r--arch/nios2/include/asm/pgtable.h23
-rw-r--r--arch/nios2/mm/fault.c4
-rw-r--r--arch/nios2/mm/init.c25
-rw-r--r--arch/nios2/mm/pgtable.c2
-rw-r--r--arch/openrisc/include/asm/pgtable.h18
-rw-r--r--arch/openrisc/mm/fault.c4
-rw-r--r--arch/openrisc/mm/init.c20
-rw-r--r--arch/parisc/include/asm/pgalloc.h6
-rw-r--r--arch/parisc/include/asm/pgtable.h26
-rw-r--r--arch/parisc/mm/fault.c4
-rw-r--r--arch/parisc/mm/init.c20
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h20
-rw-r--r--arch/powerpc/mm/copro_fault.c5
-rw-r--r--arch/powerpc/mm/fault.c5
-rw-r--r--arch/powerpc/mm/pgtable.c24
-rw-r--r--arch/riscv/include/asm/pgtable.h20
-rw-r--r--arch/riscv/mm/fault.c4
-rw-r--r--arch/riscv/mm/init.c20
-rw-r--r--arch/s390/include/asm/pgtable.h17
-rw-r--r--arch/s390/mm/fault.c12
-rw-r--r--arch/s390/mm/mmap.c20
-rw-r--r--arch/sh/include/asm/pgtable.h17
-rw-r--r--arch/sh/mm/fault.c4
-rw-r--r--arch/sh/mm/mmap.c20
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/pgtable_32.h19
-rw-r--r--arch/sparc/include/asm/pgtable_64.h19
-rw-r--r--arch/sparc/mm/fault_32.c4
-rw-r--r--arch/sparc/mm/fault_64.c5
-rw-r--r--arch/sparc/mm/init_32.c20
-rw-r--r--arch/sparc/mm/init_64.c3
-rw-r--r--arch/um/include/asm/pgtable.h17
-rw-r--r--arch/um/kernel/mem.c20
-rw-r--r--arch/um/kernel/trap.c4
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/mem_encrypt.h2
-rw-r--r--arch/x86/include/asm/pgtable_types.h19
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/mm/fault.c4
-rw-r--r--arch/x86/mm/hugetlbpage.c39
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c6
-rw-r--r--arch/x86/mm/pgprot.c28
-rw-r--r--arch/x86/um/mem_32.c2
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h19
-rw-r--r--arch/xtensa/mm/fault.c4
-rw-r--r--arch/xtensa/mm/init.c22
106 files changed, 846 insertions, 724 deletions
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 170451fde043..3ea9661c09ff 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -116,23 +116,6 @@ struct vm_area_struct;
* arch/alpha/mm/fault.c)
*/
/* xwr */
-#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
-#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
-#define __P010 _PAGE_P(_PAGE_FOE)
-#define __P011 _PAGE_P(_PAGE_FOE)
-#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
-#define __P101 _PAGE_P(_PAGE_FOW)
-#define __P110 _PAGE_P(0)
-#define __P111 _PAGE_P(0)
-
-#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
-#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
-#define __S010 _PAGE_S(_PAGE_FOE)
-#define __S011 _PAGE_S(_PAGE_FOE)
-#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
-#define __S101 _PAGE_S(_PAGE_FOW)
-#define __S110 _PAGE_S(0)
-#define __S111 _PAGE_S(0)
/*
* pgprot_noncached() is only for infiniband pci support, and a real
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index ec20c1004abf..ef427a6bdd1a 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -155,6 +155,10 @@ retry:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 7511723b7669..a155180d7a83 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -280,3 +280,25 @@ mem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
memblock_free_all();
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW |
+ _PAGE_FOR),
+ [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW),
+ [VM_WRITE] = _PAGE_P(_PAGE_FOE),
+ [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE),
+ [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR),
+ [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW),
+ [VM_EXEC | VM_WRITE] = _PAGE_P(0),
+ [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0),
+ [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW |
+ _PAGE_FOR),
+ [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW),
+ [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE),
+ [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE),
+ [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR),
+ [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
index 183d23bc1e00..b23be557403e 100644
--- a/arch/arc/include/asm/pgtable-bits-arcv2.h
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -72,24 +72,6 @@
* This is to enable COW mechanism
*/
/* xwr */
-#define __P000 PAGE_U_NONE
-#define __P001 PAGE_U_R
-#define __P010 PAGE_U_R /* Pvt-W => !W */
-#define __P011 PAGE_U_R /* Pvt-W => !W */
-#define __P100 PAGE_U_X_R /* X => R */
-#define __P101 PAGE_U_X_R
-#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
-#define __P111 PAGE_U_X_R /* Pvt-W => !W */
-
-#define __S000 PAGE_U_NONE
-#define __S001 PAGE_U_R
-#define __S010 PAGE_U_W_R /* W => R */
-#define __S011 PAGE_U_W_R
-#define __S100 PAGE_U_X_R /* X => R */
-#define __S101 PAGE_U_X_R
-#define __S110 PAGE_U_X_W_R /* X => R */
-#define __S111 PAGE_U_X_W_R
-
#ifndef __ASSEMBLY__
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index dad27e4d69ff..5ca59a482632 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -146,6 +146,10 @@ retry:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
/*
* Fault retry nuances, mmap_lock already relinquished by core mm
*/
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 722d26b94307..fce5fa2b4f52 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -74,3 +74,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_U_NONE,
+ [VM_READ] = PAGE_U_R,
+ [VM_WRITE] = PAGE_U_R,
+ [VM_WRITE | VM_READ] = PAGE_U_R,
+ [VM_EXEC] = PAGE_U_X_R,
+ [VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED] = PAGE_U_NONE,
+ [VM_SHARED | VM_READ] = PAGE_U_R,
+ [VM_SHARED | VM_WRITE] = PAGE_U_W_R,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R,
+ [VM_SHARED | VM_EXEC] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index cd1f84bb40ae..78a532068fec 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -137,23 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* 2) If we could do execute protection, then read is implied
* 3) write implies read permissions
*/
-#define __P000 __PAGE_NONE
-#define __P001 __PAGE_READONLY
-#define __P010 __PAGE_COPY
-#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY_EXEC
-#define __P101 __PAGE_READONLY_EXEC
-#define __P110 __PAGE_COPY_EXEC
-#define __P111 __PAGE_COPY_EXEC
-
-#define __S000 __PAGE_NONE
-#define __S001 __PAGE_READONLY
-#define __S010 __PAGE_SHARED
-#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY_EXEC
-#define __S101 __PAGE_READONLY_EXEC
-#define __S110 __PAGE_SHARED_EXEC
-#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 500612d3da2e..29e2900178a1 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -38,10 +38,10 @@
#ifdef CONFIG_ARM_LPAE
/* LPAE requires an additional page for the PGD */
#define PG_DIR_SIZE 0x5000
-#define PMD_ORDER 3
+#define PMD_ENTRY_ORDER 3 /* PMD entry size is 2^PMD_ENTRY_ORDER */
#else
#define PG_DIR_SIZE 0x4000
-#define PMD_ORDER 2
+#define PMD_ENTRY_ORDER 2
#endif
.globl swapper_pg_dir
@@ -240,7 +240,7 @@ __create_page_tables:
mov r6, r6, lsr #SECTION_SHIFT
1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base
- str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping
+ str r3, [r4, r5, lsl #PMD_ENTRY_ORDER] @ identity mapping
cmp r5, r6
addlo r5, r5, #1 @ next section
blo 1b
@@ -250,7 +250,7 @@ __create_page_tables:
* set two variables to indicate the physical start and end of the
* kernel.
*/
- add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
ldr r6, =(_end - 1)
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
@@ -259,8 +259,8 @@ __create_page_tables:
str r8, [r5] @ Save physical start of kernel (LE)
#endif
orr r3, r8, r7 @ Add the MMU flags
- add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
-1: str r3, [r0], #1 << PMD_ORDER
+ add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
+1: str r3, [r0], #1 << PMD_ENTRY_ORDER
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
bls 1b
@@ -280,14 +280,14 @@ __create_page_tables:
mov r3, pc
mov r3, r3, lsr #SECTION_SHIFT
orr r3, r7, r3, lsl #SECTION_SHIFT
- add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
- str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
+ add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
+ str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ENTRY_ORDER]!
ldr r6, =(_edata_loc - 1)
- add r0, r0, #1 << PMD_ORDER
- add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
+ add r0, r0, #1 << PMD_ENTRY_ORDER
+ add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
1: cmp r0, r6
add r3, r3, #1 << SECTION_SHIFT
- strls r3, [r0], #1 << PMD_ORDER
+ strls r3, [r0], #1 << PMD_ENTRY_ORDER
bls 1b
#endif
@@ -297,10 +297,10 @@ __create_page_tables:
*/
mov r0, r2, lsr #SECTION_SHIFT
cmp r2, #0
- ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER)
+ ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
addne r3, r3, r4
orrne r6, r7, r0, lsl #SECTION_SHIFT
- strne r6, [r3], #1 << PMD_ORDER
+ strne r6, [r3], #1 << PMD_ENTRY_ORDER
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
@@ -319,7 +319,7 @@ __create_page_tables:
addruart r7, r3, r0
mov r3, r3, lsr #SECTION_SHIFT
- mov r3, r3, lsl #PMD_ORDER
+ mov r3, r3, lsl #PMD_ENTRY_ORDER
add r0, r4, r3
mov r3, r7, lsr #SECTION_SHIFT
@@ -349,7 +349,7 @@ __create_page_tables:
* If we're using the NetWinder or CATS, we also need to map
* in the 16550-type serial port for the debug messages
*/
- add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
orr r3, r7, #0x7c000000
str r3, [r0]
#endif
@@ -359,10 +359,10 @@ __create_page_tables:
* Similar reasons here - for debug. This is
* only for Acorn RiscPC architectures.
*/
- add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
orr r3, r7, #0x02000000
str r3, [r0]
- add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
str r3, [r0]
#endif
#endif
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index c30b689bec2e..14eecaaf295f 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -237,7 +237,7 @@ static int __init test_size_treshold(void)
if (!dst_page)
goto no_dst;
kernel_ptr = page_address(src_page);
- user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
+ user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
if (!user_ptr)
goto no_vmap;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index a062e07516dd..46cccd6bf705 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -322,6 +322,10 @@ retry:
return 0;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return 0;
+
if (!(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cd17e324aa51..a49f0b9c0f75 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -412,6 +412,26 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
}
+static pgprot_t protection_map[16] __ro_after_init = {
+ [VM_NONE] = __PAGE_NONE,
+ [VM_READ] = __PAGE_READONLY,
+ [VM_WRITE] = __PAGE_COPY,
+ [VM_WRITE | VM_READ] = __PAGE_COPY,
+ [VM_EXEC] = __PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = __PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = __PAGE_COPY_EXEC,
+ [VM_SHARED] = __PAGE_NONE,
+ [VM_SHARED | VM_READ] = __PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = __PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = __PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = __PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __PAGE_SHARED_EXEC
+};
+DECLARE_VM_GET_PAGE_PROT
+
/*
* Adjust the PMD section entries according to the CPU in use.
*/
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6bb50124a7e9..571cc234d0b3 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -45,7 +45,6 @@ config ARM64
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_ELF_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 1fd2846dbefe..d20f5da2d76f 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -46,9 +46,6 @@ extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_GET
extern pte_t huge_ptep_get(pte_t *ptep);
-extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz);
-#define set_huge_swap_pte_at set_huge_swap_pte_at
void __init arm64_hugetlb_cma_reserve(void);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 4e4c171f070b..9dd08cd339c3 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -381,6 +381,15 @@ static inline bool defer_reserve_crashkernel(void)
# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
#endif
+/*
+ * memory regions which marked with flag MEMBLOCK_NOMAP(for example, the memory
+ * of the EFI_UNUSABLE_MEMORY type) may divide a continuous memory block into
+ * multiple parts. As a result, the number of memory regions is large.
+ */
+#ifdef CONFIG_EFI
+#define INIT_MEMBLOCK_MEMORY_REGIONS (INIT_MEMBLOCK_REGIONS * 8)
+#endif
+
#include <asm-generic/memory_model.h>
#endif /* __ASM_MEMORY_H */
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 62e0ebeed720..9b165117a454 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -89,24 +89,6 @@ extern bool arm64_use_ng_mappings;
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_READONLY
-#define __P011 PAGE_READONLY
-#define __P100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_READONLY_EXEC
-#define __P111 PAGE_READONLY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PGTABLE_PROT_H */
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index cdf3ffa0c223..c33f1fad2745 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -608,6 +608,10 @@ retry:
return 0;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return 0;
+
if (fault & VM_FAULT_RETRY) {
mm_flags |= FAULT_FLAG_TRIED;
goto retry;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 5307ffdefb8d..0795028f017c 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -241,6 +241,13 @@ static void clear_flush(struct mm_struct *mm,
flush_tlb_range(&vma, saddr, addr);
}
+static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
+{
+ VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
+
+ return page_folio(pfn_to_page(swp_offset(entry)));
+}
+
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
@@ -250,11 +257,16 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
unsigned long pfn, dpfn;
pgprot_t hugeprot;
- /*
- * Code needs to be expanded to handle huge swap and migration
- * entries. Needed for HUGETLB and MEMORY_FAILURE.
- */
- WARN_ON(!pte_present(pte));
+ if (!pte_present(pte)) {
+ struct folio *folio;
+
+ folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte));
+ ncontig = num_contig_ptes(folio_size(folio), &pgsize);
+
+ for (i = 0; i < ncontig; i++, ptep++)
+ set_pte_at(mm, addr, ptep, pte);
+ return;
+ }
if (!pte_cont(pte)) {
set_pte_at(mm, addr, ptep, pte);
@@ -272,18 +284,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
}
-void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
-{
- int i, ncontig;
- size_t pgsize;
-
- ncontig = num_contig_ptes(sz, &pgsize);
-
- for (i = 0; i < ncontig; i++, ptep++)
- set_pte(ptep, pte);
-}
-
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
@@ -371,6 +371,28 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return NULL;
}
+unsigned long hugetlb_mask_last_page(struct hstate *h)
+{
+ unsigned long hp_size = huge_page_size(h);
+
+ switch (hp_size) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ return PGDIR_SIZE - PUD_SIZE;
+#endif
+ case CONT_PMD_SIZE:
+ return PUD_SIZE - CONT_PMD_SIZE;
+ case PMD_SIZE:
+ return PUD_SIZE - PMD_SIZE;
+ case CONT_PTE_SIZE:
+ return PMD_SIZE - CONT_PTE_SIZE;
+ default:
+ break;
+ }
+
+ return 0UL;
+}
+
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
size_t pagesize = 1UL << shift;
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 78e9490f748d..8f5b7ce857ed 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -13,6 +13,27 @@
#include <asm/cpufeature.h>
#include <asm/page.h>
+static pgprot_t protection_map[16] __ro_after_init = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_READONLY,
+ [VM_WRITE | VM_READ] = PAGE_READONLY,
+ /* PAGE_EXECONLY if Enhanced PAN */
+ [VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ /* PAGE_EXECONLY if Enhanced PAN */
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+
/*
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bbbd0698b397..7d57e5da0914 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -44,7 +44,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd_t *ret;
pgd_t *init;
- ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *) __get_free_page(GFP_KERNEL);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long *)ret);
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index bbe245117777..c3d9b92cbe61 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -18,12 +18,10 @@
/*
* C-SKY is two-level paging structure:
*/
-#define PGD_ORDER 0
-#define PTE_ORDER 0
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
#define PTRS_PER_PMD 1
-#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
@@ -77,24 +75,6 @@
#define MAX_SWAPFILES_CHECK() \
BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READ
-#define __P010 PAGE_READ
-#define __P011 PAGE_READ
-#define __P100 PAGE_READ
-#define __P101 PAGE_READ
-#define __P110 PAGE_READ
-#define __P111 PAGE_READ
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READ
-#define __S010 PAGE_WRITE
-#define __S011 PAGE_WRITE
-#define __S100 PAGE_READ
-#define __S101 PAGE_READ
-#define __S110 PAGE_WRITE
-#define __S111 PAGE_WRITE
-
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index 7215a46b6b8e..e15f736cca4b 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -285,6 +285,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index bf2004aa811a..bde7cabd23df 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -197,3 +197,23 @@ void __init fixaddr_init(void)
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READ,
+ [VM_WRITE] = PAGE_READ,
+ [VM_WRITE | VM_READ] = PAGE_READ,
+ [VM_EXEC] = PAGE_READ,
+ [VM_EXEC | VM_READ] = PAGE_READ,
+ [VM_EXEC | VM_WRITE] = PAGE_READ,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READ,
+ [VM_SHARED | VM_WRITE] = PAGE_WRITE,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE,
+ [VM_SHARED | VM_EXEC] = PAGE_READ,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index 0610724d6a28..f7048c18b6f9 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -126,33 +126,6 @@ extern unsigned long _dflt_cache_att;
*/
#define CACHEDEF (CACHE_DEFAULT << 6)
-/* Private (copy-on-write) page protections. */
-#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
-#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
-#define __P010 __P000 /* Write-only copy-on-write */
-#define __P011 __P001 /* Read/Write copy-on-write */
-#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_EXECUTE | CACHEDEF)
-#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
- _PAGE_READ | CACHEDEF)
-#define __P110 __P100 /* Write/execute copy-on-write */
-#define __P111 __P101 /* Read/Write/Execute, copy-on-write */
-
-/* Shared page protections. */
-#define __S000 __P000
-#define __S001 __P001
-#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_WRITE | CACHEDEF)
-#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
- _PAGE_WRITE | CACHEDEF)
-#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_EXECUTE | CACHEDEF)
-#define __S101 __P101
-#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
-#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
- _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
/* HUGETLB not working currently */
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index 3167a3b5c97b..146115c9de61 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -234,3 +234,45 @@ void __init setup_arch_memory(void)
* which is called by start_kernel() later on in the process
*/
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ CACHEDEF),
+ [VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | CACHEDEF),
+ [VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ CACHEDEF),
+ [VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | CACHEDEF),
+ [VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | CACHEDEF),
+ [VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_READ |
+ CACHEDEF),
+ [VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | CACHEDEF),
+ [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_READ |
+ CACHEDEF),
+ [VM_SHARED] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ CACHEDEF),
+ [VM_SHARED | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | CACHEDEF),
+ [VM_SHARED | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_WRITE | CACHEDEF),
+ [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | _PAGE_WRITE |
+ CACHEDEF),
+ [VM_SHARED | VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | CACHEDEF),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_READ |
+ CACHEDEF),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_EXECUTE | _PAGE_WRITE |
+ CACHEDEF),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
+ _PAGE_READ | _PAGE_EXECUTE |
+ _PAGE_WRITE | CACHEDEF)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index 4fac4b9eb316..f73c7cbfe326 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -96,6 +96,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
/* The most common case -- we are done. */
if (likely(!(fault & VM_FAULT_ERROR))) {
if (fault & VM_FAULT_RETRY) {
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 7aa8f2330fb1..6925e28ae61d 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -161,24 +161,6 @@
* attempts to write to the page.
*/
/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
-#define __P011 PAGE_READONLY /* ditto */
-#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
-#define __S011 PAGE_SHARED
-#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
#if CONFIG_PGTABLE_LEVELS == 4
#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 07379d1a227f..ef78c2d66cdd 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -139,6 +139,10 @@ retry:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We ran out of memory, or some other thing happened
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 855d949d81df..fc4e4217e87f 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -273,7 +273,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
- gate_vma.vm_page_prot = __P101;
+ gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
return 0;
}
@@ -490,3 +490,29 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_READONLY,
+ [VM_WRITE | VM_READ] = PAGE_READONLY,
+ [VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_X_RX),
+ [VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RX),
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_X_RX),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RX),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RWX),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RWX)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index fc2465892a60..ae15610a9427 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -407,7 +407,7 @@ config ARCH_SPARSEMEM_ENABLE
Say Y to support efficient handling of sparse physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access)
or have huge holes in the physical address space for other reasons.
- See <file:Documentation/vm/numa.rst> for more.
+ See <file:Documentation/mm/numa.rst> for more.
config ARCH_ENABLE_THP_MIGRATION
def_bool y
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index b0a57b25c131..4bfeb3c9c9ac 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -66,12 +66,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
pmd_t *pmd;
struct page *pg;
- pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+ pg = alloc_page(GFP_KERNEL_ACCOUNT);
if (!pg)
return NULL;
if (!pgtable_pmd_page_ctor(pg)) {
- __free_pages(pg, PMD_ORDER);
+ __free_page(pg);
return NULL;
}
@@ -90,7 +90,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
pud_t *pud;
- pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
+ pud = (pud_t *) __get_free_page(GFP_KERNEL);
if (pud)
pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
return pud;
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 3badd112d9ab..9ca147a29bab 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -83,25 +83,6 @@
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC)
#define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC)
-
-#define __P000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
-#define __P001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
-#define __P010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
-#define __P011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
-#define __P100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-#define __P101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-#define __P110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-#define __P111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-
-#define __S000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ)
-#define __S001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC)
-#define __S010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
-#define __S011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE)
-#define __S100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-#define __S101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT)
-#define __S110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
-#define __S111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE)
-
#ifndef __ASSEMBLY__
#define pgprot_noncached pgprot_noncached
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index d9e86cfa53e2..e03443abaf7d 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -21,41 +21,36 @@
#include <asm-generic/pgtable-nop4d.h>
#endif
-#define PGD_ORDER 0
-#define PUD_ORDER 0
-#define PMD_ORDER 0
-#define PTE_ORDER 0
-
#if CONFIG_PGTABLE_LEVELS == 2
-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#elif CONFIG_PGTABLE_LEVELS == 3
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
#elif CONFIG_PGTABLE_LEVELS == 4
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT + PGD_ORDER - 3))
+#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) >> 3)
+#define PTRS_PER_PGD (PAGE_SIZE >> 3)
#if CONFIG_PGTABLE_LEVELS > 3
-#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) >> 3)
+#define PTRS_PER_PUD (PAGE_SIZE >> 3)
#endif
#if CONFIG_PGTABLE_LEVELS > 2
-#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) >> 3)
+#define PTRS_PER_PMD (PAGE_SIZE >> 3)
#endif
-#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) >> 3)
+#define PTRS_PER_PTE (PAGE_SIZE >> 3)
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
index 20cd9e16a95a..d256b81c397a 100644
--- a/arch/loongarch/kernel/asm-offsets.c
+++ b/arch/loongarch/kernel/asm-offsets.c
@@ -189,12 +189,6 @@ void output_mm_defines(void)
#endif
DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
BLANK();
- DEFINE(_PGD_ORDER, PGD_ORDER);
-#ifndef __PAGETABLE_PMD_FOLDED
- DEFINE(_PMD_ORDER, PMD_ORDER);
-#endif
- DEFINE(_PTE_ORDER, PTE_ORDER);
- BLANK();
DEFINE(_PMD_SHIFT, PMD_SHIFT);
DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
BLANK();
diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c
index 9e5ce5aa73f7..e8c68dcf6ab2 100644
--- a/arch/loongarch/mm/cache.c
+++ b/arch/loongarch/mm/cache.c
@@ -139,3 +139,49 @@ void cpu_cache_init(void)
shm_align_mask = PAGE_SIZE - 1;
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = __pgprot(_CACHE_CC | _PAGE_USER |
+ _PAGE_PROTNONE | _PAGE_NO_EXEC |
+ _PAGE_NO_READ),
+ [VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_SHARED] = __pgprot(_CACHE_CC | _PAGE_USER |
+ _PAGE_PROTNONE | _PAGE_NO_EXEC |
+ _PAGE_NO_READ),
+ [VM_SHARED | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_SHARED | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC | _PAGE_WRITE),
+ [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC | _PAGE_WRITE),
+ [VM_SHARED | VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_WRITE),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_WRITE)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index 0569647152e9..ee179ccd3e3f 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -13,7 +13,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
- ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *) __get_free_page(GFP_KERNEL);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long)ret);
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index de19fa2d7f0d..39743337999e 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -83,7 +83,7 @@ vmalloc_done_load:
bnez t0, tlb_huge_update_load
csrrd t0, LOONGARCH_CSR_BADV
- srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
+ srli.d t0, t0, PAGE_SHIFT
andi t0, t0, (PTRS_PER_PTE - 1)
slli.d t0, t0, _PTE_T_LOG2
add.d t1, ra, t0
@@ -247,7 +247,7 @@ vmalloc_done_store:
bnez t0, tlb_huge_update_store
csrrd t0, LOONGARCH_CSR_BADV
- srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
+ srli.d t0, t0, PAGE_SHIFT
andi t0, t0, (PTRS_PER_PTE - 1)
slli.d t0, t0, _PTE_T_LOG2
add.d t1, ra, t0
@@ -414,7 +414,7 @@ vmalloc_done_modify:
bnez t0, tlb_huge_update_modify
csrrd t0, LOONGARCH_CSR_BADV
- srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER)
+ srli.d t0, t0, PAGE_SHIFT
andi t0, t0, (PTRS_PER_PTE - 1)
slli.d t0, t0, _PTE_T_LOG2
add.d t1, ra, t0
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 94f38d76e278..b619b22823f8 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -86,65 +86,6 @@
| CF_PAGE_READABLE \
| CF_PAGE_DIRTY)
-/*
- * Page protections for initialising protection_map. See mm/mmap.c
- * for use. In general, the bit positions are xwr, and P-items are
- * private, the S-items are shared.
- */
-#define __P000 PAGE_NONE
-#define __P001 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE)
-#define __P010 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_WRITABLE)
-#define __P011 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_WRITABLE)
-#define __P100 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_EXEC)
-#define __P101 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-#define __P110 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_WRITABLE \
- | CF_PAGE_EXEC)
-#define __P111 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_WRITABLE \
- | CF_PAGE_EXEC)
-
-#define __S000 PAGE_NONE
-#define __S001 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE)
-#define __S010 PAGE_SHARED
-#define __S011 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_READABLE)
-#define __S100 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_EXEC)
-#define __S101 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-#define __S110 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_EXEC)
-#define __S111 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-
#define PTE_MASK PAGE_MASK
#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 7c9b56e2a750..7ac3d64c6b33 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -76,35 +76,6 @@ extern unsigned long mm_cachebits;
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits)
-/* Alternate definitions that are compile time constants, for
- initializing protection_map. The cachebits are fixed later. */
-#define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
-#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
-#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
-
-/*
- * The m68k can't do page protection for execute, and considers that the same are read.
- * Also, write permissions imply read permissions. This is the closest we can get..
- */
-#define __P000 PAGE_NONE_C
-#define __P001 PAGE_READONLY_C
-#define __P010 PAGE_COPY_C
-#define __P011 PAGE_COPY_C
-#define __P100 PAGE_READONLY_C
-#define __P101 PAGE_READONLY_C
-#define __P110 PAGE_COPY_C
-#define __P111 PAGE_COPY_C
-
-#define __S000 PAGE_NONE_C
-#define __S001 PAGE_READONLY_C
-#define __S010 PAGE_SHARED_C
-#define __S011 PAGE_SHARED_C
-#define __S100 PAGE_READONLY_C
-#define __S101 PAGE_READONLY_C
-#define __S110 PAGE_SHARED_C
-#define __S111 PAGE_SHARED_C
-
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
/*
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index 5e4e753f0d24..90d57e537eb1 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -66,29 +66,6 @@
| SUN3_PAGE_SYSTEM \
| SUN3_PAGE_NOCACHE)
-/*
- * Page protections for initialising protection_map. The sun3 has only two
- * protection settings, valid (implying read and execute) and writeable. These
- * are as close as we can get...
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
-
/* Use these fake page-protections on PMDs. */
#define SUN3_PMD_VALID (0x00000001)
#define SUN3_PMD_MASK (0x0000003F)
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 71aa9f6315dc..4d2837eb3e2a 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -141,6 +141,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return 0;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return 0;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 6f1f25125294..70aa0979e027 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -234,3 +234,58 @@ void steal_context(void)
destroy_context(mm);
}
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE),
+ [VM_WRITE] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_WRITABLE),
+ [VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_WRITABLE),
+ [VM_EXEC] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_EXEC),
+ [VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_EXEC),
+ [VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_WRITABLE |
+ CF_PAGE_EXEC),
+ [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_WRITABLE |
+ CF_PAGE_EXEC),
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE),
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_SHARED),
+ [VM_SHARED | VM_EXEC] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_EXEC),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_EXEC),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_SHARED |
+ CF_PAGE_EXEC),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
+ CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE |
+ CF_PAGE_SHARED |
+ CF_PAGE_EXEC)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index df7f797c908a..2a375637e007 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -383,6 +383,35 @@ static void __init map_node(int node)
}
/*
+ * Alternate definitions that are compile time constants, for
+ * initializing protection_map. The cachebits are fixed later.
+ */
+#define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
+#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
+
+static pgprot_t protection_map[16] __ro_after_init = {
+ [VM_NONE] = PAGE_NONE_C,
+ [VM_READ] = PAGE_READONLY_C,
+ [VM_WRITE] = PAGE_COPY_C,
+ [VM_WRITE | VM_READ] = PAGE_COPY_C,
+ [VM_EXEC] = PAGE_READONLY_C,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_C,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_C,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_C,
+ [VM_SHARED] = PAGE_NONE_C,
+ [VM_SHARED | VM_READ] = PAGE_READONLY_C,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED_C,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_C,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_C,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_C,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_C,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_C
+};
+DECLARE_VM_GET_PAGE_PROT
+
+/*
* paging_init() continues the virtual memory environment setup which
* was begun by the code in arch/head.S.
*/
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index dad494224497..b619d0d4319c 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -95,3 +95,23 @@ void __init paging_init(void)
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 0c72646370e1..ba348e997dbb 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -204,23 +204,6 @@ extern pte_t *va_to_pte(unsigned long address);
* We consider execute permission the same as read.
* Also, write permissions imply read permissions.
*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY_X
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY_X
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY_X
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED_X
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED_X
#ifndef __ASSEMBLY__
/*
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index a9626e6a68af..5c40c3ebe52f 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -222,6 +222,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index f4e503461d24..353fabdfcbc5 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -285,3 +285,23 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
return p;
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY_X,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 867e9c3db76e..796035784c73 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -51,7 +51,7 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm);
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_pages((unsigned long)pgd, PGD_ORDER);
+ free_pages((unsigned long)pgd, PGD_TABLE_ORDER);
}
#define __pte_free_tlb(tlb,pte,address) \
@@ -67,12 +67,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
pmd_t *pmd;
struct page *pg;
- pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER);
+ pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_TABLE_ORDER);
if (!pg)
return NULL;
if (!pgtable_pmd_page_ctor(pg)) {
- __free_pages(pg, PMD_ORDER);
+ __free_pages(pg, PMD_TABLE_ORDER);
return NULL;
}
@@ -91,7 +91,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
pud_t *pud;
- pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
+ pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_TABLE_ORDER);
if (pud)
pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
return pud;
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 95df9c293d8d..495c603c1a30 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -62,9 +62,9 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
-# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1)
+# define PGDIR_SHIFT (2 * PAGE_SHIFT - PTE_T_LOG2 - 1)
#else
-# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
+# define PGDIR_SHIFT (2 * PAGE_SHIFT - PTE_T_LOG2)
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
@@ -75,21 +75,20 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
* we don't really have any PUD/PMD directory physically.
*/
#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
-# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
+# define __PGD_TABLE_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
#else
-# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
+# define __PGD_TABLE_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
#endif
-#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER aieeee_attempt_to_allocate_pmd
-#define PTE_ORDER 0
+#define PGD_TABLE_ORDER (__PGD_TABLE_ORDER >= 0 ? __PGD_TABLE_ORDER : 0)
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd
#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
-# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2)
+# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t) / 2)
#else
-# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
#endif
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 41921acdc9d8..a259ca4d1272 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -42,24 +42,24 @@
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#ifdef __PAGETABLE_PMD_FOLDED
-#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
+#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#else
/* PMD_SHIFT determines the size of the area a second-level page table can map */
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
# ifdef __PAGETABLE_PUD_FOLDED
-# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3))
# endif
#endif
#ifndef __PAGETABLE_PUD_FOLDED
-#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3))
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_TABLE_ORDER - 3))
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
@@ -85,56 +85,51 @@
*/
#ifdef CONFIG_PAGE_SIZE_4KB
# ifdef CONFIG_MIPS_VA_BITS_48
-# define PGD_ORDER 0
-# define PUD_ORDER 0
+# define PGD_TABLE_ORDER 0
+# define PUD_TABLE_ORDER 0
# else
-# define PGD_ORDER 1
-# define PUD_ORDER aieeee_attempt_to_allocate_pud
+# define PGD_TABLE_ORDER 1
+# define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
# endif
-#define PMD_ORDER 0
-#define PTE_ORDER 0
+#define PMD_TABLE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_8KB
-#define PGD_ORDER 0
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER 0
-#define PTE_ORDER 0
+#define PGD_TABLE_ORDER 0
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_TABLE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_16KB
#ifdef CONFIG_MIPS_VA_BITS_48
-#define PGD_ORDER 1
+#define PGD_TABLE_ORDER 1
#else
-#define PGD_ORDER 0
+#define PGD_TABLE_ORDER 0
#endif
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER 0
-#define PTE_ORDER 0
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_TABLE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_32KB
-#define PGD_ORDER 0
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER 0
-#define PTE_ORDER 0
+#define PGD_TABLE_ORDER 0
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
+#define PMD_TABLE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_64KB
-#define PGD_ORDER 0
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
+#define PGD_TABLE_ORDER 0
+#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud
#ifdef CONFIG_MIPS_VA_BITS_48
-#define PMD_ORDER 0
+#define PMD_TABLE_ORDER 0
#else
-#define PMD_ORDER aieeee_attempt_to_allocate_pmd
+#define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd
#endif
-#define PTE_ORDER 0
#endif
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#define PTRS_PER_PGD ((PAGE_SIZE << PGD_TABLE_ORDER) / sizeof(pgd_t))
#ifndef __PAGETABLE_PUD_FOLDED
-#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
+#define PTRS_PER_PUD ((PAGE_SIZE << PUD_TABLE_ORDER) / sizeof(pud_t))
#endif
#ifndef __PAGETABLE_PMD_FOLDED
-#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
+#define PTRS_PER_PMD ((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t))
#endif
-#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 374c6322775d..6caec386ad2f 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -41,28 +41,6 @@ struct vm_area_struct;
* by reasonable means..
*/
-/*
- * Dummy values to fill the table in mmap.c
- * The real values will be generated at runtime
- */
-#define __P000 __pgprot(0)
-#define __P001 __pgprot(0)
-#define __P010 __pgprot(0)
-#define __P011 __pgprot(0)
-#define __P100 __pgprot(0)
-#define __P101 __pgprot(0)
-#define __P110 __pgprot(0)
-#define __P111 __pgprot(0)
-
-#define __S000 __pgprot(0)
-#define __S001 __pgprot(0)
-#define __S010 __pgprot(0)
-#define __S011 __pgprot(0)
-#define __S100 __pgprot(0)
-#define __S101 __pgprot(0)
-#define __S110 __pgprot(0)
-#define __S111 __pgprot(0)
-
extern unsigned long _page_cachable_default;
extern void __update_cache(unsigned long address, pte_t pte);
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 04ca75278f02..c4501897b870 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -196,11 +196,6 @@ void output_mm_defines(void)
#endif
DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
BLANK();
- DEFINE(_PGD_ORDER, PGD_ORDER);
-#ifndef __PAGETABLE_PMD_FOLDED
- DEFINE(_PMD_ORDER, PMD_ORDER);
-#endif
- DEFINE(_PTE_ORDER, PTE_ORDER);
BLANK();
DEFINE(_PMD_SHIFT, PMD_SHIFT);
DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index d5f7362e8c24..dc023a979803 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -230,7 +230,7 @@ void mips_mt_set_cpuoptions(void)
struct class *mt_class;
-static int __init mt_init(void)
+static int __init mips_mt_init(void)
{
struct class *mtc;
@@ -243,4 +243,4 @@ static int __init mt_init(void)
return 0;
}
-subsys_initcall(mt_init);
+subsys_initcall(mips_mt_init);
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 1bfd1b501d82..db17e870bdff 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -80,7 +80,7 @@ pgd_t *kvm_pgd_alloc(void)
{
pgd_t *ret;
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
if (ret)
kvm_pgd_init(ret);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 7be7240f7703..11b3e7ddafd5 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -159,6 +159,9 @@ EXPORT_SYMBOL(_page_cachable_default);
#define PM(p) __pgprot(_page_cachable_default | (p))
+static pgprot_t protection_map[16] __ro_after_init;
+DECLARE_VM_GET_PAGE_PROT
+
static inline void setup_protection_map(void)
{
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index b08bc556d30d..a27045f5a556 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -162,6 +162,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c
index 05560b042d82..3b7590660a04 100644
--- a/arch/mips/mm/pgtable.c
+++ b/arch/mips/mm/pgtable.c
@@ -12,7 +12,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
- ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init((unsigned long)ret);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 8dbbd99fc7e8..a57519ae96b1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -818,7 +818,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* everything but the lower xuseg addresses goes down
* the module_alloc/vmalloc path.
*/
- uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_bnez(p, r, ptr, label_vmalloc);
} else {
uasm_il_bltz(p, r, tmp, label_vmalloc);
@@ -1127,7 +1127,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
uasm_i_dsrl_safe(p, scratch, tmp,
- PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_bnez(p, r, scratch, label_vmalloc);
if (pgd_reg == -1) {
@@ -1493,12 +1493,12 @@ static void setup_pw(void)
#endif
pgd_i = PGDIR_SHIFT; /* 1st level PGD */
#ifndef __PAGETABLE_PMD_FOLDED
- pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER;
+ pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER;
pmd_i = PMD_SHIFT; /* 2nd level PMD */
pmd_w = PMD_SHIFT - PAGE_SHIFT;
#else
- pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER;
+ pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER;
#endif
pt_i = PAGE_SHIFT; /* 3rd level PTE */
@@ -1536,7 +1536,7 @@ static void build_loongson3_tlb_refill_handler(void)
if (check_for_high_segbits) {
uasm_i_dmfc0(&p, K0, C0_BADVADDR);
- uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
uasm_il_beqz(&p, &r, K1, label_vmalloc);
uasm_i_nop(&p);
@@ -2065,7 +2065,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
UASM_i_LW(p, wr.r2, 0, wr.r2);
- UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
+ UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2);
uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
@@ -2611,7 +2611,7 @@ void build_tlb_refill_handler(void)
check_pabits();
#ifdef CONFIG_64BIT
- check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
#endif
if (cpu_has_3kex) {
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 262d0609268c..b3d45e815295 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -40,24 +40,8 @@ struct mm_struct;
*/
/* Remove W bit on private pages for COW support */
-#define __P000 MKP(0, 0, 0)
-#define __P001 MKP(0, 0, 1)
-#define __P010 MKP(0, 0, 0) /* COW */
-#define __P011 MKP(0, 0, 1) /* COW */
-#define __P100 MKP(1, 0, 0)
-#define __P101 MKP(1, 0, 1)
-#define __P110 MKP(1, 0, 0) /* COW */
-#define __P111 MKP(1, 0, 1) /* COW */
/* Shared pages can have exact HW mapping */
-#define __S000 MKP(0, 0, 0)
-#define __S001 MKP(0, 0, 1)
-#define __S010 MKP(0, 1, 0)
-#define __S011 MKP(0, 1, 1)
-#define __S100 MKP(1, 0, 0)
-#define __S101 MKP(1, 0, 1)
-#define __S110 MKP(1, 1, 0)
-#define __S111 MKP(1, 1, 1)
/* Used all over the kernel */
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
@@ -68,11 +52,8 @@ struct mm_struct;
#define PAGE_COPY MKP(0, 0, 1)
-#define PGD_ORDER 0
-#define PTE_ORDER 0
-
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
-#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
#define USER_PTRS_PER_PGD \
(CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE)
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index a32f14cd72f2..edaca0a6c1c1 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -139,6 +139,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 613fcaa5988a..7bc82ee889c9 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -78,9 +78,8 @@ void __init mmu_init(void)
flush_tlb_all();
}
-#define __page_aligned(order) __aligned(PAGE_SIZE << (order))
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
-pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
+pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE);
static struct page *kuser_page[1];
static int alloc_kuser_page(void)
@@ -124,3 +123,23 @@ const char *arch_vma_name(struct vm_area_struct *vma)
{
return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = MKP(0, 0, 0),
+ [VM_READ] = MKP(0, 0, 1),
+ [VM_WRITE] = MKP(0, 0, 0),
+ [VM_WRITE | VM_READ] = MKP(0, 0, 1),
+ [VM_EXEC] = MKP(1, 0, 0),
+ [VM_EXEC | VM_READ] = MKP(1, 0, 1),
+ [VM_EXEC | VM_WRITE] = MKP(1, 0, 0),
+ [VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 0, 1),
+ [VM_SHARED] = MKP(0, 0, 0),
+ [VM_SHARED | VM_READ] = MKP(0, 0, 1),
+ [VM_SHARED | VM_WRITE] = MKP(0, 1, 0),
+ [VM_SHARED | VM_WRITE | VM_READ] = MKP(0, 1, 1),
+ [VM_SHARED | VM_EXEC] = MKP(1, 0, 0),
+ [VM_SHARED | VM_EXEC | VM_READ] = MKP(1, 0, 1),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = MKP(1, 1, 0),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 1, 1)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c
index 9b587fd592dd..7c76e8a7447a 100644
--- a/arch/nios2/mm/pgtable.c
+++ b/arch/nios2/mm/pgtable.c
@@ -54,7 +54,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
- ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ ret = (pgd_t *) __get_free_page(GFP_KERNEL);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init(ret);
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index c3abbf71e09f..dcae8aea132f 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -176,24 +176,6 @@ extern void paging_init(void);
__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
| _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY_X
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY_X
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY_X
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED_X
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED_X
-
/* zero page used for uninitialized stuff */
extern unsigned long empty_zero_page[2048];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 53b760af3bb7..b4762d66e9ef 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -165,6 +165,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 3a021ab6f1ae..d531ab82be12 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -208,3 +208,23 @@ void __init mem_init(void)
mem_init_done = 1;
return;
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY_X,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 54b63374579b..e3e142b1c5c5 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -20,18 +20,18 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
- pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+ pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
if (unlikely(pgd == NULL))
return NULL;
- memset(pgd, 0, PAGE_SIZE << PGD_ORDER);
+ memset(pgd, 0, PAGE_SIZE << PGD_TABLE_ORDER);
return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_pages((unsigned long)pgd, PGD_ORDER);
+ free_pages((unsigned long)pgd, PGD_TABLE_ORDER);
}
#if CONFIG_PGTABLE_LEVELS == 3
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 69765a6dbe89..df7b931865d2 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -118,9 +118,9 @@ extern void __update_cache(pte_t pte);
#if CONFIG_PGTABLE_LEVELS == 3
#define PMD_TABLE_ORDER 1
-#define PGD_ORDER 0
+#define PGD_TABLE_ORDER 0
#else
-#define PGD_ORDER 1
+#define PGD_TABLE_ORDER 1
#endif
/* Definitions for 3rd level (we use PLD here for Page Lower directory
@@ -144,10 +144,10 @@ extern void __update_cache(pte_t pte);
/* Definitions for 1st level */
#define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
-#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
+#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
#else
-#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
+#define BITS_PER_PGD (PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY)
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -271,24 +271,6 @@ extern void __update_cache(pte_t pte);
*/
/*xwr*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 __P000 /* copy on write */
-#define __P011 __P001 /* copy on write */
-#define __P100 PAGE_EXECREAD
-#define __P101 PAGE_EXECREAD
-#define __P110 __P100 /* copy on write */
-#define __P111 __P101 /* copy on write */
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_WRITEONLY
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECREAD
-#define __S101 PAGE_EXECREAD
-#define __S110 PAGE_RWX
-#define __S111 PAGE_RWX
-
extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index c9d48bcdc933..869204e97ec9 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -311,6 +311,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We hit a shared mapping outside of the file, or some
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 8e2f7b8ceb70..b0c43f3b0a5f 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -871,3 +871,23 @@ void flush_tlb_all(void)
spin_unlock(&sid_lock);
}
#endif
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_NONE,
+ [VM_WRITE | VM_READ] = PAGE_READONLY,
+ [VM_EXEC] = PAGE_EXECREAD,
+ [VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_EXEC | VM_WRITE] = PAGE_EXECREAD,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_EXECREAD,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 88a738117312..af612d4c4bcc 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -140,7 +140,6 @@ config PPC
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_HAS_VM_GET_PAGE_PROT if PPC_BOOK3S_64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index cb9d5fd39d7f..392ff48f77df 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1273,7 +1273,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
* should return true.
* We should not call this on a hugetlb entry. We should check for HugeTLB
* entry using vma->vm_flags
- * The page table walk rule is explained in Documentation/vm/transhuge.rst
+ * The page table walk rule is explained in Documentation/mm/transhuge.rst
*/
static inline int pmd_trans_huge(pmd_t pmd)
{
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index d564d0ecd4cd..33f4bf8d22b0 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -20,25 +20,6 @@ struct mm_struct;
#include <asm/nohash/pgtable.h>
#endif /* !CONFIG_PPC_BOOK3S */
-/* Note due to the way vm flags are laid out, the bits are XWR */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_X
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY_X
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_X
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED_X
-#define __S111 PAGE_SHARED_X
-
#ifndef __ASSEMBLY__
#ifndef MAX_PTRS_PER_PGD
@@ -79,6 +60,7 @@ extern void paging_init(void);
void poking_init(void);
extern unsigned long ioremap_bot;
+extern const pgprot_t protection_map[16];
/*
* kern_addr_valid is intended to indicate whether an address is a valid
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index c1cb21a00884..7c507fb48182 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -65,6 +65,11 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
ret = 0;
*flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL);
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (*flt & VM_FAULT_COMPLETED)
+ return 0;
+
if (unlikely(*flt & VM_FAULT_ERROR)) {
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d53fed4eccbd..014005428687 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -511,6 +511,10 @@ retry:
if (fault_signal_pending(fault, regs))
return user_mode(regs) ? 0 : SIGBUS;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ goto out;
+
/*
* Handle the retry right now, the mmap_lock has been released in that
* case.
@@ -525,6 +529,7 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR))
return mm_fault_error(regs, address, fault);
+out:
/*
* Major/minor page fault accounting.
*/
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index e6166b71d36d..cb2dcdb18f8e 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -472,3 +472,27 @@ out:
return ret_pte;
}
EXPORT_SYMBOL_GPL(__find_linux_pte);
+
+/* Note due to the way vm flags are laid out, the bits are XWR */
+const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY_X,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_X,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
+};
+
+#ifndef CONFIG_PPC_BOOK3S_64
+DECLARE_VM_GET_PAGE_PROT
+#endif
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 5dbd6610729b..7ec936910a96 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -186,26 +186,6 @@ extern struct pt_alloc_ops pt_ops __initdata;
extern pgd_t swapper_pg_dir[];
-/* MAP_PRIVATE permissions: xwr (copy-on-write) */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READ
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_EXEC
-#define __P101 PAGE_READ_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_READ_EXEC
-
-/* MAP_SHARED permissions: xwr */
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READ
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXEC
-#define __S101 PAGE_READ_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
{
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 40694f0cab9e..f2fbd1400b7c 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -326,6 +326,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index d466ec670e1f..a88b7dc31a68 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -288,6 +288,26 @@ static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAG
#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
#endif /* CONFIG_XIP_KERNEL */
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READ,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READ_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READ,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+DECLARE_VM_GET_PAGE_PROT
+
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long addr = __fix_to_virt(idx);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index cf81acf3879c..f019df19884d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -424,23 +424,6 @@ static inline int is_module_addr(void *addr)
* implies read permission.
*/
/*xwr*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_RO
-#define __P010 PAGE_RO
-#define __P011 PAGE_RO
-#define __P100 PAGE_RX
-#define __P101 PAGE_RX
-#define __P110 PAGE_RX
-#define __P111 PAGE_RX
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_RO
-#define __S010 PAGE_RW
-#define __S011 PAGE_RW
-#define __S100 PAGE_RX
-#define __S101 PAGE_RX
-#define __S110 PAGE_RWX
-#define __S111 PAGE_RWX
/*
* Segment entry (large page) protection definitions.
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ee7871f770fb..13449941516c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -433,6 +433,17 @@ retry:
goto out_up;
goto out;
}
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED) {
+ if (gmap) {
+ mmap_read_lock(mm);
+ goto out_gmap;
+ }
+ fault = 0;
+ goto out;
+ }
+
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
@@ -452,6 +463,7 @@ retry:
mmap_read_lock(mm);
goto retry;
}
+out_gmap:
if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
address = __gmap_link(gmap, current->thread.gmap_addr,
address);
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index d545f5c39f7e..5980ce348832 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -188,3 +188,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_RO,
+ [VM_WRITE] = PAGE_RO,
+ [VM_WRITE | VM_READ] = PAGE_RO,
+ [VM_EXEC] = PAGE_RX,
+ [VM_EXEC | VM_READ] = PAGE_RX,
+ [VM_EXEC | VM_WRITE] = PAGE_RX,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_RO,
+ [VM_SHARED | VM_WRITE] = PAGE_RW,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW,
+ [VM_SHARED | VM_EXEC] = PAGE_RX,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index d7ddb1ec86a0..6fb9ec54cf9b 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -89,23 +89,6 @@ static inline unsigned long phys_addr_mask(void)
* completely separate permission bits for user and kernel space.
*/
/*xwr*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_EXECREAD
-#define __P101 PAGE_EXECREAD
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_WRITEONLY
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECREAD
-#define __S101 PAGE_EXECREAD
-#define __S110 PAGE_RWX
-#define __S111 PAGE_RWX
typedef pte_t *pte_addr_t;
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index e175667b1363..acd2f5e50bfc 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -485,6 +485,10 @@ good_area:
if (mm_fault_error(regs, error_code, address, fault))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6a1a1297baae..b82199878b45 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -19,6 +19,26 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
#ifdef CONFIG_MMU
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_EXECREAD,
+ [VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
+};
+DECLARE_VM_GET_PAGE_PROT
+
/*
* To avoid cache aliases, we map the shared page with same color.
*/
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 02f0a6084f04..1c852bb530ec 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -86,7 +86,6 @@ config SPARC64
select PERF_USE_VMALLOC
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_C_RECORDMCOUNT
- select ARCH_HAS_VM_GET_PAGE_PROT
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 4866625da314..8ff549004fac 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -64,25 +64,6 @@ void paging_init(void);
extern unsigned long ptr_in_current_pgd;
-/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
-
/* First physical page can be anywhere, the following is needed so that
* va-->pa and vice versa conversions work properly without performance
* hit for all __pa()/__va() operations.
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 4679e45c8348..a779418ceba9 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -187,25 +187,6 @@ bool kern_addr_valid(unsigned long addr);
#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
-/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
-#define __P000 __pgprot(0)
-#define __P001 __pgprot(0)
-#define __P010 __pgprot(0)
-#define __P011 __pgprot(0)
-#define __P100 __pgprot(0)
-#define __P101 __pgprot(0)
-#define __P110 __pgprot(0)
-#define __P111 __pgprot(0)
-
-#define __S000 __pgprot(0)
-#define __S001 __pgprot(0)
-#define __S010 __pgprot(0)
-#define __S011 __pgprot(0)
-#define __S100 __pgprot(0)
-#define __S101 __pgprot(0)
-#define __S110 __pgprot(0)
-#define __S111 __pgprot(0)
-
#ifndef __ASSEMBLY__
pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index ad569d9bd124..91259f291c54 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -190,6 +190,10 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 253e07043298..4acc12eafbf5 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -427,6 +427,10 @@ good_area:
if (fault_signal_pending(fault, regs))
goto exit_exception;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ goto lock_released;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -449,6 +453,7 @@ good_area:
}
mmap_read_unlock(mm);
+lock_released:
mm_rss = get_mm_rss(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 1e9f577f084d..d88e774c8eb4 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -302,3 +302,23 @@ void sparc_flush_page_to_ram(struct page *page)
__flush_page_to_ram(vaddr);
}
EXPORT_SYMBOL(sparc_flush_page_to_ram);
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f6174df2d5af..d6faee23c77d 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2634,6 +2634,9 @@ void vmemmap_free(unsigned long start, unsigned long end,
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
+static pgprot_t protection_map[16] __ro_after_init;
+
static void prot_init_common(unsigned long page_none,
unsigned long page_shared,
unsigned long page_copy,
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 167e236d9bb8..66bc3f99d9be 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -68,23 +68,6 @@ extern unsigned long end_iomem;
* Also, write permissions imply read permissions. This is the closest we can
* get..
*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
/*
* ZERO_PAGE is a global shared page that is always zero: used
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 276a1f0b91f1..38d5a71a579b 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -216,3 +216,23 @@ void *uml_kmalloc(int size, int flags)
{
return kmalloc(size, flags);
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index d1d5d0be0308..d3ce21c4ca32 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -76,6 +76,10 @@ good_area:
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
goto out_nosemaphore;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return 0;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cca4c0ac39cc..f9920f1341c8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -94,7 +94,6 @@ config X86
select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 88ceaf3648b3..72ca90552b6a 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -89,6 +89,8 @@ static inline void mem_encrypt_free_decrypted_mem(void) { }
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);
+void add_encrypt_protection_map(void);
+
/*
* The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
* writing to or comparing values from the cr3 register. Having the
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index bdaf8391e2e0..aa174fed3a71 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -230,25 +230,6 @@ enum page_cache_mode {
#endif /* __ASSEMBLY__ */
-/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_EXEC
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
/*
* early identity mapping pte attrib macros.
*/
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 4236a28b9be5..06ac8c7cef67 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6740,7 +6740,7 @@ int kvm_mmu_vendor_module_init(void)
if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
goto out;
- ret = register_shrinker(&mmu_shrinker);
+ ret = register_shrinker(&mmu_shrinker, "x86-mmu");
if (ret)
goto out;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 971977c438fc..fa71a5d12e87 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1408,6 +1408,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
/*
* If we need to retry the mmap_lock has already been released,
* and if there is a fatal signal pending there is no guarantee
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index a0d023cb4292..509408da0da1 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -19,44 +19,6 @@
#include <asm/tlbflush.h>
#include <asm/elf.h>
-#if 0 /* This is just for testing */
-struct page *
-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
-{
- unsigned long start = address;
- int length = 1;
- int nr;
- struct page *page;
- struct vm_area_struct *vma;
-
- vma = find_vma(mm, addr);
- if (!vma || !is_vm_hugetlb_page(vma))
- return ERR_PTR(-EINVAL);
-
- pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
-
- /* hugetlb should be locked, and hence, prefaulted */
- WARN_ON(!pte || pte_none(*pte));
-
- page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
-
- WARN_ON(!PageHead(page));
-
- return page;
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return 0;
-}
-
-int pud_huge(pud_t pud)
-{
- return 0;
-}
-
-#else
-
/*
* pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
* hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
@@ -72,7 +34,6 @@ int pud_huge(pud_t pud)
{
return !!(pud_val(pud) & _PAGE_PSE);
}
-#endif
#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 97452688f99f..9c4d8dbcb129 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -26,6 +26,7 @@
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#include <asm/setup.h>
+#include <asm/mem_encrypt.h>
#include <asm/bootparam.h>
#include <asm/set_memory.h>
#include <asm/cacheflush.h>
@@ -486,8 +487,6 @@ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, boo
void __init sme_early_init(void)
{
- unsigned int i;
-
if (!sme_me_mask)
return;
@@ -496,8 +495,7 @@ void __init sme_early_init(void)
__supported_pte_mask = __sme_set(__supported_pte_mask);
/* Update the protection map with memory encryption mask */
- for (i = 0; i < ARRAY_SIZE(protection_map); i++)
- protection_map[i] = pgprot_encrypted(protection_map[i]);
+ add_encrypt_protection_map();
x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish;
diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c
index 763742782286..c84bd9540b16 100644
--- a/arch/x86/mm/pgprot.c
+++ b/arch/x86/mm/pgprot.c
@@ -3,6 +3,34 @@
#include <linux/export.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
+#include <asm/mem_encrypt.h>
+
+static pgprot_t protection_map[16] __ro_after_init = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+
+void add_encrypt_protection_map(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(protection_map); i++)
+ protection_map[i] = pgprot_encrypted(protection_map[i]);
+}
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
index 19c5dbd46770..cafd01f730da 100644
--- a/arch/x86/um/mem_32.c
+++ b/arch/x86/um/mem_32.c
@@ -17,7 +17,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
- gate_vma.vm_page_prot = __P101;
+ gate_vma.vm_page_prot = PAGE_READONLY;
return 0;
}
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index eeb2de3a89e5..7fc0f9126dd3 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -29,7 +29,7 @@
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
{
- return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
+ return (pgd_t*) __get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline void ptes_clear(pte_t *ptep)
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 0a91376131c5..54f577c13afa 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -57,7 +57,6 @@
#define PTRS_PER_PTE 1024
#define PTRS_PER_PTE_SHIFT 10
#define PTRS_PER_PGD 1024
-#define PGD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
@@ -200,24 +199,6 @@
* What follows is the closest we can get by reasonable means..
* See linux/mm/mmap.c for protection_map[] array that uses these definitions.
*/
-#define __P000 PAGE_NONE /* private --- */
-#define __P001 PAGE_READONLY /* private --r */
-#define __P010 PAGE_COPY /* private -w- */
-#define __P011 PAGE_COPY /* private -wr */
-#define __P100 PAGE_READONLY_EXEC /* private x-- */
-#define __P101 PAGE_READONLY_EXEC /* private x-r */
-#define __P110 PAGE_COPY_EXEC /* private xw- */
-#define __P111 PAGE_COPY_EXEC /* private xwr */
-
-#define __S000 PAGE_NONE /* shared --- */
-#define __S001 PAGE_READONLY /* shared --r */
-#define __S010 PAGE_SHARED /* shared -w- */
-#define __S011 PAGE_SHARED /* shared -wr */
-#define __S100 PAGE_READONLY_EXEC /* shared x-- */
-#define __S101 PAGE_READONLY_EXEC /* shared x-r */
-#define __S110 PAGE_SHARED_EXEC /* shared xw- */
-#define __S111 PAGE_SHARED_EXEC /* shared xwr */
-
#ifndef __ASSEMBLY__
#define pte_ERROR(e) \
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 16f0a5ff5799..8c781b05c0bd 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -172,6 +172,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 6a32b2cf2718..b2587a1a7c46 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -216,3 +216,25 @@ static int __init parse_memmap_opt(char *str)
return 0;
}
early_param("memmap", parse_memmap_opt);
+
+#ifdef CONFIG_MMU
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
+};
+DECLARE_VM_GET_PAGE_PROT
+#endif