aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel/cache.c
diff options
context:
space:
mode:
authorSven Schnelle <svens@stackframe.org>2021-10-09 20:24:36 +0200
committerHelge Deller <deller@gmx.de>2021-10-30 23:11:01 +0200
commit4f1938673994caa85d2da34f9e63f77d837e3b50 (patch)
treee84e4094d17b7a2ae53c93e4d83c95277a8ef3f0 /arch/parisc/kernel/cache.c
parentparisc: disable preemption during local tlb flush (diff)
downloadlinux-dev-4f1938673994caa85d2da34f9e63f77d837e3b50.tar.xz
linux-dev-4f1938673994caa85d2da34f9e63f77d837e3b50.zip
parisc: deduplicate code in flush_cache_mm() and flush_cache_range()
Parts of both functions are the same, so deduplicate them. No functional change. Signed-off-by: Sven Schnelle <svens@stackframe.org> Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/kernel/cache.c')
-rw-r--r--arch/parisc/kernel/cache.c81
1 files changed, 30 insertions, 51 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index a1a7e2b0812f..c61827e4928a 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -543,10 +543,33 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
return ptep;
}
+static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ unsigned long addr, pfn;
+ pte_t *ptep;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ ptep = get_ptep(mm->pgd, addr);
+ if (ptep) {
+ pfn = pte_pfn(*ptep);
+ flush_cache_page(vma, addr, pfn);
+ }
+ }
+}
+
+static void flush_user_cache_tlb(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+ flush_tlb_range(vma, start, end);
+}
+
void flush_cache_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
- pgd_t *pgd;
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
@@ -560,46 +583,20 @@ void flush_cache_mm(struct mm_struct *mm)
preempt_disable();
if (mm->context == mfsp(3)) {
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
- flush_tlb_range(vma, vma->vm_start, vma->vm_end);
- }
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ flush_user_cache_tlb(vma, vma->vm_start, vma->vm_end);
preempt_enable();
return;
}
- pgd = mm->pgd;
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- unsigned long addr;
-
- for (addr = vma->vm_start; addr < vma->vm_end;
- addr += PAGE_SIZE) {
- unsigned long pfn;
- pte_t *ptep = get_ptep(pgd, addr);
- if (!ptep)
- continue;
- pfn = pte_pfn(*ptep);
- if (!pfn_valid(pfn))
- continue;
- if (unlikely(mm->context)) {
- flush_tlb_page(vma, addr);
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
- } else {
- __purge_cache_page(vma, addr, PFN_PHYS(pfn));
- }
- }
- }
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
preempt_enable();
}
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- pgd_t *pgd;
- unsigned long addr;
-
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
end - start >= parisc_cache_flush_threshold) {
if (vma->vm_mm->context)
@@ -610,30 +607,12 @@ void flush_cache_range(struct vm_area_struct *vma,
preempt_disable();
if (vma->vm_mm->context == mfsp(3)) {
- flush_user_dcache_range_asm(start, end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(start, end);
- flush_tlb_range(vma, start, end);
+ flush_user_cache_tlb(vma, start, end);
preempt_enable();
return;
}
- pgd = vma->vm_mm->pgd;
- for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
- unsigned long pfn;
- pte_t *ptep = get_ptep(pgd, addr);
- if (!ptep)
- continue;
- pfn = pte_pfn(*ptep);
- if (pfn_valid(pfn)) {
- if (unlikely(vma->vm_mm->context)) {
- flush_tlb_page(vma, addr);
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
- } else {
- __purge_cache_page(vma, addr, PFN_PHYS(pfn));
- }
- }
- }
+ flush_cache_pages(vma, vma->vm_mm, vma->vm_start, vma->vm_end);
preempt_enable();
}