aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include
diff options
context:
space:
mode:
authorBibo Mao <maobibo@loongson.cn>2020-05-27 10:25:18 +0800
committerThomas Bogendoerfer <tsbogend@alpha.franken.de>2020-05-27 13:06:09 +0200
commit7df676974359f927056b882e10a5b24d2033169b (patch)
treed8f0fc569f5d1f3003d35dd9788ab58bb8380553 /arch/mips/include
parentMIPS: Do not flush tlb page when updating PTE entry (diff)
downloadlinux-dev-7df676974359f927056b882e10a5b24d2033169b.tar.xz
linux-dev-7df676974359f927056b882e10a5b24d2033169b.zip
mm/memory.c: Update local TLB if PTE entry exists
If two threads concurrently fault at the same page, the thread that won the race updates the PTE and its local TLB. For now, the other thread gives up, simply does nothing, and continues. It could happen that this second thread triggers another fault, whereby it only updates its local TLB while handling the fault. Instead of triggering another fault, let's directly update the local TLB of the second thread. Function update_mmu_tlb is used here to update local TLB on the second thread, and it is defined as empty on other arches. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Acked-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/pgtable.h23
1 files changed, 23 insertions, 0 deletions
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index f8f48fc361d5..6f40612b3152 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -483,6 +483,26 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
{
}
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(*ptep, entry))
+ set_pte_at(vma->vm_mm, address, ptep, entry);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -526,6 +546,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
__update_tlb(vma, address, pte);
}
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#define update_mmu_tlb update_mmu_cache
+
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{