aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/riscv/mm/cacheflush.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/mm/cacheflush.c')
-rw-r--r--arch/riscv/mm/cacheflush.c213
1 files changed, 207 insertions, 6 deletions
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 6cb7d96ad9c7..4ca5aafce22e 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -3,6 +3,10 @@
* Copyright (C) 2017 SiFive
*/
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/prctl.h>
+#include <asm/acpi.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_SMP
@@ -18,7 +22,22 @@ void flush_icache_all(void)
{
local_flush_icache_all();
- if (IS_ENABLED(CONFIG_RISCV_SBI))
+ if (num_online_cpus() < 2)
+ return;
+
+ /*
+ * Make sure all previous writes to the D$ are ordered before making
+ * the IPI. The RISC-V spec states that a hart must execute a data fence
+ * before triggering a remote fence.i in order to make the modification
+ * visable for remote harts.
+ *
+ * IPIs on RISC-V are triggered by MMIO writes to either CLINT or
+ * S-IMSIC, so the fence ensures previous data writes "happen before"
+ * the MMIO.
+ */
+ RISCV_FENCE(w, o);
+
+ if (riscv_use_sbi_for_rfence())
sbi_remote_fence_i(NULL);
else
on_each_cpu(ipi_remote_fence_i, NULL, 1);
@@ -66,7 +85,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
* with flush_icache_deferred().
*/
smp_mb();
- } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
+ } else if (riscv_use_sbi_for_rfence()) {
sbi_remote_fence_i(&others);
} else {
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
@@ -78,11 +97,193 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
#endif /* CONFIG_SMP */
#ifdef CONFIG_MMU
-void flush_icache_pte(pte_t pte)
+void flush_icache_pte(struct mm_struct *mm, pte_t pte)
{
- struct page *page = pte_page(pte);
+ struct folio *folio = page_folio(pte_page(pte));
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
- flush_icache_all();
+ if (!test_bit(PG_dcache_clean, &folio->flags)) {
+ flush_icache_mm(mm, false);
+ set_bit(PG_dcache_clean, &folio->flags);
+ }
}
#endif /* CONFIG_MMU */
+
+unsigned int riscv_cbom_block_size;
+EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
+
+unsigned int riscv_cboz_block_size;
+EXPORT_SYMBOL_GPL(riscv_cboz_block_size);
+
+unsigned int riscv_cbop_block_size;
+EXPORT_SYMBOL_GPL(riscv_cbop_block_size);
+
+static void __init cbo_get_block_size(struct device_node *node,
+ const char *name, u32 *block_size,
+ unsigned long *first_hartid)
+{
+ unsigned long hartid;
+ u32 val;
+
+ if (riscv_of_processor_hartid(node, &hartid))
+ return;
+
+ if (of_property_read_u32(node, name, &val))
+ return;
+
+ if (!*block_size) {
+ *block_size = val;
+ *first_hartid = hartid;
+ } else if (*block_size != val) {
+ pr_warn("%s mismatched between harts %lu and %lu\n",
+ name, *first_hartid, hartid);
+ }
+}
+
+void __init riscv_init_cbo_blocksizes(void)
+{
+ unsigned long cbom_hartid, cboz_hartid, cbop_hartid;
+ u32 cbom_block_size = 0, cboz_block_size = 0, cbop_block_size = 0;
+ struct device_node *node;
+ struct acpi_table_header *rhct;
+ acpi_status status;
+
+ if (acpi_disabled) {
+ for_each_of_cpu_node(node) {
+ /* set block-size for cbom and/or cboz extension if available */
+ cbo_get_block_size(node, "riscv,cbom-block-size",
+ &cbom_block_size, &cbom_hartid);
+ cbo_get_block_size(node, "riscv,cboz-block-size",
+ &cboz_block_size, &cboz_hartid);
+ cbo_get_block_size(node, "riscv,cbop-block-size",
+ &cbop_block_size, &cbop_hartid);
+ }
+ } else {
+ status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
+ if (ACPI_FAILURE(status))
+ return;
+
+ acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, &cbop_block_size);
+ acpi_put_table((struct acpi_table_header *)rhct);
+ }
+
+ if (cbom_block_size)
+ riscv_cbom_block_size = cbom_block_size;
+
+ if (cboz_block_size)
+ riscv_cboz_block_size = cboz_block_size;
+
+ if (cbop_block_size)
+ riscv_cbop_block_size = cbop_block_size;
+}
+
+#ifdef CONFIG_SMP
+static void set_icache_stale_mask(void)
+{
+ int cpu = get_cpu();
+ cpumask_t *mask;
+ bool stale_cpu;
+
+ /*
+ * Mark every other hart's icache as needing a flush for
+ * this MM. Maintain the previous value of the current
+ * cpu to handle the case when this function is called
+ * concurrently on different harts.
+ */
+ mask = &current->mm->context.icache_stale_mask;
+ stale_cpu = cpumask_test_cpu(cpu, mask);
+
+ cpumask_setall(mask);
+ __assign_cpu(cpu, mask, stale_cpu);
+ put_cpu();
+}
+#endif
+
+/**
+ * riscv_set_icache_flush_ctx() - Enable/disable icache flushing instructions in
+ * userspace.
+ * @ctx: Set the type of icache flushing instructions permitted/prohibited in
+ * userspace. Supported values described below.
+ *
+ * Supported values for ctx:
+ *
+ * * %PR_RISCV_CTX_SW_FENCEI_ON: Allow fence.i in user space.
+ *
+ * * %PR_RISCV_CTX_SW_FENCEI_OFF: Disallow fence.i in user space. All threads in
+ * a process will be affected when ``scope == PR_RISCV_SCOPE_PER_PROCESS``.
+ * Therefore, caution must be taken; use this flag only when you can guarantee
+ * that no thread in the process will emit fence.i from this point onward.
+ *
+ * @scope: Set scope of where icache flushing instructions are allowed to be
+ * emitted. Supported values described below.
+ *
+ * Supported values for scope:
+ *
+ * * %PR_RISCV_SCOPE_PER_PROCESS: Ensure the icache of any thread in this process
+ * is coherent with instruction storage upon
+ * migration.
+ *
+ * * %PR_RISCV_SCOPE_PER_THREAD: Ensure the icache of the current thread is
+ * coherent with instruction storage upon
+ * migration.
+ *
+ * When ``scope == PR_RISCV_SCOPE_PER_PROCESS``, all threads in the process are
+ * permitted to emit icache flushing instructions. Whenever any thread in the
+ * process is migrated, the corresponding hart's icache will be guaranteed to be
+ * consistent with instruction storage. This does not enforce any guarantees
+ * outside of migration. If a thread modifies an instruction that another thread
+ * may attempt to execute, the other thread must still emit an icache flushing
+ * instruction before attempting to execute the potentially modified
+ * instruction. This must be performed by the user-space program.
+ *
+ * In per-thread context (eg. ``scope == PR_RISCV_SCOPE_PER_THREAD``) only the
+ * thread calling this function is permitted to emit icache flushing
+ * instructions. When the thread is migrated, the corresponding hart's icache
+ * will be guaranteed to be consistent with instruction storage.
+ *
+ * On kernels configured without SMP, this function is a nop as migrations
+ * across harts will not occur.
+ */
+int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long scope)
+{
+#ifdef CONFIG_SMP
+ switch (ctx) {
+ case PR_RISCV_CTX_SW_FENCEI_ON:
+ switch (scope) {
+ case PR_RISCV_SCOPE_PER_PROCESS:
+ current->mm->context.force_icache_flush = true;
+ break;
+ case PR_RISCV_SCOPE_PER_THREAD:
+ current->thread.force_icache_flush = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case PR_RISCV_CTX_SW_FENCEI_OFF:
+ switch (scope) {
+ case PR_RISCV_SCOPE_PER_PROCESS:
+ set_icache_stale_mask();
+ current->mm->context.force_icache_flush = false;
+ break;
+ case PR_RISCV_SCOPE_PER_THREAD:
+ set_icache_stale_mask();
+ current->thread.force_icache_flush = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+#else
+ switch (ctx) {
+ case PR_RISCV_CTX_SW_FENCEI_ON:
+ case PR_RISCV_CTX_SW_FENCEI_OFF:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+#endif
+}