From 58de77545e53b94cd6c816776197dade598632c5 Mon Sep 17 00:00:00 2001 From: Gary Guo Date: Wed, 27 Mar 2019 00:41:25 +0000 Subject: riscv: move flush_icache_{all,mm} to cacheflush.c Currently, flush_icache_all is macro-expanded into a SBI call, yet no asm/sbi.h is included in asm/cacheflush.h. This could be moved to mm/cacheflush.c instead (SBI call will dominate performance-wise and there is no worry to not have it inlined. Currently, flush_icache_mm stays in kernel/smp.c, which looks like a hack to prevent it from being compiled when CONFIG_SMP=n. It should also be in mm/cacheflush.c. Signed-off-by: Gary Guo Signed-off-by: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cacheflush.h | 2 +- arch/riscv/kernel/smp.c | 49 ----------------------------- arch/riscv/mm/cacheflush.c | 61 +++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 50 deletions(-) (limited to 'arch') diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 8f13074413a7..1f4ba68ab9aa 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page) #else /* CONFIG_SMP */ -#define flush_icache_all() sbi_remote_fence_i(NULL) +void flush_icache_all(void); void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 9253de5d91b6..b2537ffa855c 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -205,52 +205,3 @@ void smp_send_reschedule(int cpu) send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } -/* - * Performs an icache flush for the given MM context. RISC-V has no direct - * mechanism for instruction cache shoot downs, so instead we send an IPI that - * informs the remote harts they need to flush their local instruction caches. - * To avoid pathologically slow behavior in a common case (a bunch of - * single-hart processes on a many-hart machine, ie 'make -j') we avoid the - * IPIs for harts that are not currently executing a MM context and instead - * schedule a deferred local instruction cache flush to be performed before - * execution resumes on each hart. - */ -void flush_icache_mm(struct mm_struct *mm, bool local) -{ - unsigned int cpu; - cpumask_t others, hmask, *mask; - - preempt_disable(); - - /* Mark every hart's icache as needing a flush for this MM. */ - mask = &mm->context.icache_stale_mask; - cpumask_setall(mask); - /* Flush this hart's I$ now, and mark it as flushed. */ - cpu = smp_processor_id(); - cpumask_clear_cpu(cpu, mask); - local_flush_icache_all(); - - /* - * Flush the I$ of other harts concurrently executing, and mark them as - * flushed. - */ - cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); - local |= cpumask_empty(&others); - if (mm != current->active_mm || !local) { - cpumask_clear(&hmask); - riscv_cpuid_to_hartid_mask(&others, &hmask); - sbi_remote_fence_i(hmask.bits); - } else { - /* - * It's assumed that at least one strongly ordered operation is - * performed on this hart between setting a hart's cpumask bit - * and scheduling this MM context on that hart. Sending an SBI - * remote message will do this, but in the case where no - * messages are sent we still need to order this hart's writes - * with flush_icache_deferred(). - */ - smp_mb(); - } - - preempt_enable(); -} diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 498c0a0814fe..497b7d07af0c 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -14,6 +14,67 @@ #include #include +#ifdef CONFIG_SMP + +#include + +void flush_icache_all(void) +{ + sbi_remote_fence_i(NULL); +} + +/* + * Performs an icache flush for the given MM context. RISC-V has no direct + * mechanism for instruction cache shoot downs, so instead we send an IPI that + * informs the remote harts they need to flush their local instruction caches. + * To avoid pathologically slow behavior in a common case (a bunch of + * single-hart processes on a many-hart machine, ie 'make -j') we avoid the + * IPIs for harts that are not currently executing a MM context and instead + * schedule a deferred local instruction cache flush to be performed before + * execution resumes on each hart. + */ +void flush_icache_mm(struct mm_struct *mm, bool local) +{ + unsigned int cpu; + cpumask_t others, hmask, *mask; + + preempt_disable(); + + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_flush_icache_all(); + + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + local |= cpumask_empty(&others); + if (mm != current->active_mm || !local) { + cpumask_clear(&hmask); + riscv_cpuid_to_hartid_mask(&others, &hmask); + sbi_remote_fence_i(hmask.bits); + } else { + /* + * It's assumed that at least one strongly ordered operation is + * performed on this hart between setting a hart's cpumask bit + * and scheduling this MM context on that hart. Sending an SBI + * remote message will do this, but in the case where no + * messages are sent we still need to order this hart's writes + * with flush_icache_deferred(). + */ + smp_mb(); + } + + preempt_enable(); +} + +#endif /* CONFIG_SMP */ + void flush_icache_pte(pte_t pte) { struct page *page = pte_page(pte); -- cgit v1.2.3-59-g8ed1b