aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexandre Ghiti <alexghiti@rivosinc.com>2025-04-22 12:43:31 +0000
committerPalmer Dabbelt <palmer@dabbelt.com>2025-06-05 11:09:41 -0700
commit881dadf0792c02d7d156e0665dc5eaf18701cd5a (patch)
tree4d3bc487806319da5477729bdf23c833c26028bc /arch
parentriscv: Add support for PUD THP (diff)
parentriscv: Documentation: add a description about dynamic ftrace (diff)
downloadlinux-rng-881dadf0792c02d7d156e0665dc5eaf18701cd5a.tar.xz
linux-rng-881dadf0792c02d7d156e0665dc5eaf18701cd5a.zip
Merge patch series "riscv: ftrace: atmoic patching and preempt improvements"
Andy Chiu <andybnac@gmail.com> says: This series makes atomic code patching in ftrace possible and eliminates the need of the stop_machine dance. The major difference of this version is that we merge the CALL_OPS support from Puranjay [1] and make direct calls available for practical uses such as BPF. Thanks for the time reviewing the series and suggestions, we hope this version gets a step closer to happening in the upstream. Please reference the link to v3 below for more introductory view of the implementation [2] Added patch: 2, 4, 10, 11, 12 Modified patch: 5, 6 Unchanged patch: 1, 3, 7, 8, 9 (1, 8 has commit msg modified) Special thanks to Björn for his efforts on testing and guiding the series! [1]: https://lore.kernel.org/lkml/20240306165904.108141-1-puranjay12@gmail.com/ [2]: https://lore.kernel.org/linux-riscv/20241127172908.17149-1-andybnac@gmail.com/ * patches from https://lore.kernel.org/r/20250407180838.42877-1-andybnac@gmail.com: riscv: Documentation: add a description about dynamic ftrace riscv: ftrace: support direct call using call_ops riscv: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS riscv: ftrace: support PREEMPT riscv: add a data fence for CMODX in the kernel mode riscv: vector: Support calling schedule() for preemptible Vector riscv: ftrace: do not use stop_machine to update code riscv: ftrace: prepare ftrace for atomic code patching kernel: ftrace: export ftrace_sync_ipi riscv: ftrace: align patchable functions to 4 Byte boundary riscv: ftrace factor out code defined by !WITH_ARG riscv: ftrace: support fastcc in Clang for WITH_ARGS Link: https://lore.kernel.org/r/20250407180838.42877-1-andybnac@gmail.com Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/riscv/Kconfig8
-rw-r--r--arch/riscv/Makefile4
-rw-r--r--arch/riscv/include/asm/ftrace.h62
-rw-r--r--arch/riscv/include/asm/processor.h5
-rw-r--r--arch/riscv/include/asm/vector.h22
-rw-r--r--arch/riscv/kernel/asm-offsets.c13
-rw-r--r--arch/riscv/kernel/ftrace.c242
-rw-r--r--arch/riscv/kernel/mcount-dyn.S117
-rw-r--r--arch/riscv/mm/cacheflush.c15
9 files changed, 289 insertions, 199 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 63ef4aa03506..674cf6ff7188 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -99,6 +99,7 @@ config RISCV
select EDAC_SUPPORT
select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
+ select FUNCTION_ALIGNMENT_8B if DYNAMIC_FTRACE_WITH_CALL_OPS
select GENERIC_ARCH_TOPOLOGY
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
@@ -151,13 +152,15 @@ config RISCV
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE)
- select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ select FUNCTION_ALIGNMENT_4B if HAVE_DYNAMIC_FTRACE && RISCV_ISA_C
+ select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS if HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS
+ select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG)
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_GRAPH_FUNC
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if HAVE_DYNAMIC_FTRACE_WITH_ARGS
select HAVE_FUNCTION_GRAPH_FREGS
- select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_EBPF_JIT if MMU
select HAVE_GUP_FAST if MMU
select HAVE_FUNCTION_ARG_ACCESS_API
@@ -237,6 +240,7 @@ config CLANG_SUPPORTS_DYNAMIC_FTRACE
config GCC_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_GCC
depends on $(cc-option,-fpatchable-function-entry=8)
+ depends on CC_HAS_MIN_FUNCTION_ALIGNMENT || !RISCV_ISA_C
config HAVE_SHADOW_CALL_STACK
def_bool $(cc-option,-fsanitize=shadow-call-stack)
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 539d2aef5cab..df57654a615e 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -15,9 +15,9 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
LDFLAGS_vmlinux += --no-relax
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
ifeq ($(CONFIG_RISCV_ISA_C),y)
- CC_FLAGS_FTRACE := -fpatchable-function-entry=4
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=8,4
else
- CC_FLAGS_FTRACE := -fpatchable-function-entry=2
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=4,2
endif
endif
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index d627f63ee289..22ebea3c2b26 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -20,10 +20,9 @@ extern void *return_address(unsigned int level);
#define ftrace_return_address(n) return_address(n)
void _mcount(void);
-static inline unsigned long ftrace_call_adjust(unsigned long addr)
-{
- return addr;
-}
+unsigned long ftrace_call_adjust(unsigned long addr);
+unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip);
+#define ftrace_get_symaddr(fentry_ip) arch_ftrace_get_symaddr(fentry_ip)
/*
* Let's do like x86/arm64 and ignore the compat syscalls.
@@ -57,12 +56,21 @@ struct dyn_arch_ftrace {
* 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
* return address (original pc + 4)
*
+ * The first 2 instructions for each tracable function is compiled to 2 nop
+ * instructions. Then, the kernel initializes the first instruction to auipc at
+ * boot time (<ftrace disable>). The second instruction is patched to jalr to
+ * start the trace.
+ *
+ *<Image>:
+ * 0: nop
+ * 4: nop
+ *
*<ftrace enable>:
- * 0: auipc t0/ra, 0x?
- * 4: jalr t0/ra, ?(t0/ra)
+ * 0: auipc t0, 0x?
+ * 4: jalr t0, ?(t0)
*
*<ftrace disable>:
- * 0: nop
+ * 0: auipc t0, 0x?
* 4: nop
*
* Dynamic ftrace generates probes to call sites, so we must deal with
@@ -75,10 +83,9 @@ struct dyn_arch_ftrace {
#define AUIPC_OFFSET_MASK (0xfffff000)
#define AUIPC_PAD (0x00001000)
#define JALR_SHIFT 20
-#define JALR_RA (0x000080e7)
-#define AUIPC_RA (0x00000097)
#define JALR_T0 (0x000282e7)
#define AUIPC_T0 (0x00000297)
+#define JALR_RANGE (JALR_SIGN_MASK - 1)
#define to_jalr_t0(offset) \
(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
@@ -96,26 +103,14 @@ do { \
call[1] = to_jalr_t0(offset); \
} while (0)
-#define to_jalr_ra(offset) \
- (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_RA)
-
-#define to_auipc_ra(offset) \
- ((offset & JALR_SIGN_MASK) ? \
- (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_RA) : \
- ((offset & AUIPC_OFFSET_MASK) | AUIPC_RA))
-
-#define make_call_ra(caller, callee, call) \
-do { \
- unsigned int offset = \
- (unsigned long) (callee) - (unsigned long) (caller); \
- call[0] = to_auipc_ra(offset); \
- call[1] = to_jalr_ra(offset); \
-} while (0)
-
/*
- * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
+ * Only the jalr insn in the auipc+jalr is patched, so we make it 4
+ * bytes here.
*/
-#define MCOUNT_INSN_SIZE 8
+#define MCOUNT_INSN_SIZE 4
+#define MCOUNT_AUIPC_SIZE 4
+#define MCOUNT_JALR_SIZE 4
+#define MCOUNT_NOP4_SIZE 4
#ifndef __ASSEMBLY__
struct dyn_ftrace;
@@ -135,6 +130,9 @@ struct __arch_ftrace_regs {
unsigned long sp;
unsigned long s0;
unsigned long t1;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ unsigned long direct_tramp;
+#endif
union {
unsigned long args[8];
struct {
@@ -146,6 +144,13 @@ struct __arch_ftrace_regs {
unsigned long a5;
unsigned long a6;
unsigned long a7;
+#ifdef CONFIG_CC_IS_CLANG
+ unsigned long t2;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+#endif
};
};
};
@@ -221,10 +226,13 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
#define ftrace_graph_func ftrace_graph_func
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
{
arch_ftrace_regs(fregs)->t1 = addr;
}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 5f56eb9d114a..9c1cc716b891 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -79,6 +79,10 @@ struct pt_regs;
* Thus, the task does not own preempt_v. Any use of Vector will have to
* save preempt_v, if dirty, and fallback to non-preemptible kernel-mode
* Vector.
+ * - bit 29: The thread voluntarily calls schedule() while holding an active
+ * preempt_v. All preempt_v context should be dropped in such case because
+ * V-regs are caller-saved. Only sstatus.VS=ON is persisted across a
+ * schedule() call.
* - bit 30: The in-kernel preempt_v context is saved, and requries to be
* restored when returning to the context that owns the preempt_v.
* - bit 31: The in-kernel preempt_v context is dirty, as signaled by the
@@ -93,6 +97,7 @@ struct pt_regs;
#define RISCV_PREEMPT_V 0x00000100
#define RISCV_PREEMPT_V_DIRTY 0x80000000
#define RISCV_PREEMPT_V_NEED_RESTORE 0x40000000
+#define RISCV_PREEMPT_V_IN_SCHEDULE 0x20000000
/* CPU-specific state of a task */
struct thread_struct {
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index e8a83f55be2b..45c9b426fcc5 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -120,6 +120,11 @@ static __always_inline void riscv_v_disable(void)
csr_clear(CSR_SSTATUS, SR_VS);
}
+static __always_inline bool riscv_v_is_on(void)
+{
+ return !!(csr_read(CSR_SSTATUS) & SR_VS);
+}
+
static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
{
asm volatile (
@@ -366,6 +371,11 @@ static inline void __switch_to_vector(struct task_struct *prev,
struct pt_regs *regs;
if (riscv_preempt_v_started(prev)) {
+ if (riscv_v_is_on()) {
+ WARN_ON(prev->thread.riscv_v_flags & RISCV_V_CTX_DEPTH_MASK);
+ riscv_v_disable();
+ prev->thread.riscv_v_flags |= RISCV_PREEMPT_V_IN_SCHEDULE;
+ }
if (riscv_preempt_v_dirty(prev)) {
__riscv_v_vstate_save(&prev->thread.kernel_vstate,
prev->thread.kernel_vstate.datap);
@@ -376,10 +386,16 @@ static inline void __switch_to_vector(struct task_struct *prev,
riscv_v_vstate_save(&prev->thread.vstate, regs);
}
- if (riscv_preempt_v_started(next))
- riscv_preempt_v_set_restore(next);
- else
+ if (riscv_preempt_v_started(next)) {
+ if (next->thread.riscv_v_flags & RISCV_PREEMPT_V_IN_SCHEDULE) {
+ next->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_IN_SCHEDULE;
+ riscv_v_enable();
+ } else {
+ riscv_preempt_v_set_restore(next);
+ }
+ } else {
riscv_v_vstate_set_restore(next, task_pt_regs(next));
+ }
}
void riscv_v_vstate_ctrl_init(struct task_struct *tsk);
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 16490755304e..b26334075697 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -493,6 +493,12 @@ void asm_offsets(void)
DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN));
OFFSET(STACKFRAME_FP, stackframe, fp);
OFFSET(STACKFRAME_RA, stackframe, ra);
+#ifdef CONFIG_FUNCTION_TRACER
+ DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func));
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ DEFINE(FTRACE_OPS_DIRECT_CALL, offsetof(struct ftrace_ops, direct_call));
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
DEFINE(FREGS_SIZE_ON_STACK, ALIGN(sizeof(struct __arch_ftrace_regs), STACK_ALIGN));
@@ -501,6 +507,13 @@ void asm_offsets(void)
DEFINE(FREGS_SP, offsetof(struct __arch_ftrace_regs, sp));
DEFINE(FREGS_S0, offsetof(struct __arch_ftrace_regs, s0));
DEFINE(FREGS_T1, offsetof(struct __arch_ftrace_regs, t1));
+#ifdef CONFIG_CC_IS_CLANG
+ DEFINE(FREGS_T2, offsetof(struct __arch_ftrace_regs, t2));
+ DEFINE(FREGS_T3, offsetof(struct __arch_ftrace_regs, t3));
+ DEFINE(FREGS_T4, offsetof(struct __arch_ftrace_regs, t4));
+ DEFINE(FREGS_T5, offsetof(struct __arch_ftrace_regs, t5));
+ DEFINE(FREGS_T6, offsetof(struct __arch_ftrace_regs, t6));
+#endif
DEFINE(FREGS_A0, offsetof(struct __arch_ftrace_regs, a0));
DEFINE(FREGS_A1, offsetof(struct __arch_ftrace_regs, a1));
DEFINE(FREGS_A2, offsetof(struct __arch_ftrace_regs, a2));
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 674dcdfae7a1..4c6c24380cfd 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -8,98 +8,129 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/memory.h>
+#include <linux/irqflags.h>
#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/text-patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE
-void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
+unsigned long ftrace_call_adjust(unsigned long addr)
{
- mutex_lock(&text_mutex);
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
+ return addr + 8 + MCOUNT_AUIPC_SIZE;
- /*
- * The code sequences we use for ftrace can't be patched while the
- * kernel is running, so we need to use stop_machine() to modify them
- * for now. This doesn't play nice with text_mutex, we use this flag
- * to elide the check.
- */
- riscv_patch_in_stop_machine = true;
+ return addr + MCOUNT_AUIPC_SIZE;
+}
+
+unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
+{
+ return fentry_ip - MCOUNT_AUIPC_SIZE;
}
-void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
+void arch_ftrace_update_code(int command)
{
- riscv_patch_in_stop_machine = false;
+ mutex_lock(&text_mutex);
+ command |= FTRACE_MAY_SLEEP;
+ ftrace_modify_all_code(command);
mutex_unlock(&text_mutex);
+ flush_icache_all();
}
-static int ftrace_check_current_call(unsigned long hook_pos,
- unsigned int *expected)
+static int __ftrace_modify_call(unsigned long source, unsigned long target, bool validate)
{
+ unsigned int call[2], offset;
unsigned int replaced[2];
- unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
- /* we expect nops at the hook position */
- if (!expected)
- expected = nops;
+ offset = target - source;
+ call[1] = to_jalr_t0(offset);
- /*
- * Read the text we want to modify;
- * return must be -EFAULT on read error
- */
- if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
- MCOUNT_INSN_SIZE))
- return -EFAULT;
-
- /*
- * Make sure it is what we expect it to be;
- * return must be -EINVAL on failed comparison
- */
- if (memcmp(expected, replaced, sizeof(replaced))) {
- pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
- (void *)hook_pos, expected[0], expected[1], replaced[0],
- replaced[1]);
- return -EINVAL;
+ if (validate) {
+ call[0] = to_auipc_t0(offset);
+ /*
+ * Read the text we want to modify;
+ * return must be -EFAULT on read error
+ */
+ if (copy_from_kernel_nofault(replaced, (void *)source, 2 * MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ if (replaced[0] != call[0]) {
+ pr_err("%p: expected (%08x) but got (%08x)\n",
+ (void *)source, call[0], replaced[0]);
+ return -EINVAL;
+ }
}
+ /* Replace the jalr at once. Return -EPERM on write error. */
+ if (patch_insn_write((void *)(source + MCOUNT_AUIPC_SIZE), call + 1, MCOUNT_JALR_SIZE))
+ return -EPERM;
+
return 0;
}
-static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
- bool enable, bool ra)
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+static const struct ftrace_ops *riscv64_rec_get_ops(struct dyn_ftrace *rec)
{
- unsigned int call[2];
- unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
+ const struct ftrace_ops *ops = NULL;
- if (ra)
- make_call_ra(hook_pos, target, call);
- else
- make_call_t0(hook_pos, target, call);
+ if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
+ ops = ftrace_find_unique_ops(rec);
+ WARN_ON_ONCE(!ops);
+ }
- /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
- if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
- return -EPERM;
+ if (!ops)
+ ops = &ftrace_list_ops;
- return 0;
+ return ops;
+}
+
+static int ftrace_rec_set_ops(const struct dyn_ftrace *rec, const struct ftrace_ops *ops)
+{
+ unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8);
+
+ return patch_text_nosync((void *)literal, &ops, sizeof(ops));
+}
+
+static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
+{
+ return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
+}
+
+static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
+{
+ return ftrace_rec_set_ops(rec, riscv64_rec_get_ops(rec));
}
+#else
+static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
+static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
+#endif
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int call[2];
+ unsigned long distance, orig_addr, pc = rec->ip - MCOUNT_AUIPC_SIZE;
+ int ret;
- make_call_t0(rec->ip, addr, call);
+ ret = ftrace_rec_update_ops(rec);
+ if (ret)
+ return ret;
- if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE))
- return -EPERM;
+ orig_addr = (unsigned long)&ftrace_caller;
+ distance = addr > orig_addr ? addr - orig_addr : orig_addr - addr;
+ if (distance > JALR_RANGE)
+ addr = FTRACE_ADDR;
- return 0;
+ return __ftrace_modify_call(pc, addr, false);
}
-int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
- unsigned long addr)
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
+ u32 nop4 = RISCV_INSN_NOP4;
+ int ret;
- if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
+ ret = ftrace_rec_set_nop_ops(rec);
+ if (ret)
+ return ret;
+
+ if (patch_insn_write((void *)rec->ip, &nop4, MCOUNT_NOP4_SIZE))
return -EPERM;
return 0;
@@ -114,75 +145,71 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
*/
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
- int out;
+ unsigned long pc = rec->ip - MCOUNT_AUIPC_SIZE;
+ unsigned int nops[2], offset;
+ int ret;
- mutex_lock(&text_mutex);
- out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
- mutex_unlock(&text_mutex);
+ ret = ftrace_rec_set_nop_ops(rec);
+ if (ret)
+ return ret;
- return out;
-}
+ offset = (unsigned long) &ftrace_caller - pc;
+ nops[0] = to_auipc_t0(offset);
+ nops[1] = RISCV_INSN_NOP4;
-int ftrace_update_ftrace_func(ftrace_func_t func)
-{
- int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
- (unsigned long)func, true, true);
+ mutex_lock(&text_mutex);
+ ret = patch_insn_write((void *)pc, nops, 2 * MCOUNT_INSN_SIZE);
+ mutex_unlock(&text_mutex);
return ret;
}
-struct ftrace_modify_param {
- int command;
- atomic_t cpu_count;
-};
-
-static int __ftrace_modify_code(void *data)
+ftrace_func_t ftrace_call_dest = ftrace_stub;
+int ftrace_update_ftrace_func(ftrace_func_t func)
{
- struct ftrace_modify_param *param = data;
-
- if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
- ftrace_modify_all_code(param->command);
- /*
- * Make sure the patching store is effective *before* we
- * increment the counter which releases all waiting CPUs
- * by using the release variant of atomic increment. The
- * release pairs with the call to local_flush_icache_all()
- * on the waiting CPU.
- */
- atomic_inc_return_release(&param->cpu_count);
- } else {
- while (atomic_read(&param->cpu_count) <= num_online_cpus())
- cpu_relax();
-
- local_flush_icache_all();
- }
+ /*
+ * When using CALL_OPS, the function to call is associated with the
+ * call site, and we don't have a global function pointer to update.
+ */
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
+ return 0;
+ WRITE_ONCE(ftrace_call_dest, func);
+ /*
+ * The data fence ensure that the update to ftrace_call_dest happens
+ * before the write to function_trace_op later in the generic ftrace.
+ * If the sequence is not enforced, then an old ftrace_call_dest may
+ * race loading a new function_trace_op set in ftrace_modify_all_code
+ */
+ smp_wmb();
+ /*
+ * Updating ftrace dpes not take stop_machine path, so irqs should not
+ * be disabled.
+ */
+ WARN_ON(irqs_disabled());
+ smp_call_function(ftrace_sync_ipi, NULL, 1);
return 0;
}
-void arch_ftrace_update_code(int command)
+#else /* CONFIG_DYNAMIC_FTRACE */
+unsigned long ftrace_call_adjust(unsigned long addr)
{
- struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
-
- stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
+ return addr;
}
-#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
- unsigned int call[2];
- unsigned long caller = rec->ip;
+ unsigned long caller = rec->ip - MCOUNT_AUIPC_SIZE;
int ret;
- make_call_t0(caller, old_addr, call);
- ret = ftrace_check_current_call(caller, call);
-
+ ret = ftrace_rec_update_ops(rec);
if (ret)
return ret;
- return __ftrace_modify_call(caller, addr, true, false);
+ return __ftrace_modify_call(caller, FTRACE_ADDR, true);
}
#endif
@@ -210,7 +237,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
}
#ifdef CONFIG_DYNAMIC_FTRACE
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
@@ -231,19 +257,5 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
if (!function_graph_enter_regs(old, ip, frame_pointer, parent, fregs))
*parent = return_hooker;
}
-#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
-extern void ftrace_graph_call(void);
-int ftrace_enable_ftrace_graph_caller(void)
-{
- return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
- (unsigned long)&prepare_ftrace_return, true, true);
-}
-
-int ftrace_disable_ftrace_graph_caller(void)
-{
- return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
- (unsigned long)&prepare_ftrace_return, false, true);
-}
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
index 745dd4c4a69c..48f6c4f7dca0 100644
--- a/arch/riscv/kernel/mcount-dyn.S
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -13,7 +13,6 @@
.text
-#define FENTRY_RA_OFFSET 8
#define ABI_SIZE_ON_STACK 80
#define ABI_A0 0
#define ABI_A1 8
@@ -56,16 +55,13 @@
addi sp, sp, ABI_SIZE_ON_STACK
.endm
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
-
/**
* SAVE_ABI_REGS - save regs against the ftrace_regs struct
*
* After the stack is established,
*
* 0(sp) stores the PC of the traced function which can be accessed
-* by &(fregs)->epc in tracing function. Note that the real
-* function entry address should be computed with -FENTRY_RA_OFFSET.
+* by &(fregs)->epc in tracing function.
*
* 8(sp) stores the function return address (i.e. parent IP) that
* can be accessed by &(fregs)->ra in tracing function.
@@ -86,17 +82,20 @@
* +++++++++
**/
.macro SAVE_ABI_REGS
- mv t4, sp // Save original SP in T4
addi sp, sp, -FREGS_SIZE_ON_STACK
-
REG_S t0, FREGS_EPC(sp)
REG_S x1, FREGS_RA(sp)
- REG_S t4, FREGS_SP(sp) // Put original SP on stack
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
REG_S x8, FREGS_S0(sp)
#endif
REG_S x6, FREGS_T1(sp)
-
+#ifdef CONFIG_CC_IS_CLANG
+ REG_S x7, FREGS_T2(sp)
+ REG_S x28, FREGS_T3(sp)
+ REG_S x29, FREGS_T4(sp)
+ REG_S x30, FREGS_T5(sp)
+ REG_S x31, FREGS_T6(sp)
+#endif
// save the arguments
REG_S x10, FREGS_A0(sp)
REG_S x11, FREGS_A1(sp)
@@ -106,16 +105,25 @@
REG_S x15, FREGS_A5(sp)
REG_S x16, FREGS_A6(sp)
REG_S x17, FREGS_A7(sp)
+ mv a0, sp
+ addi a0, a0, FREGS_SIZE_ON_STACK
+ REG_S a0, FREGS_SP(sp) // Put original SP on stack
.endm
- .macro RESTORE_ABI_REGS, all=0
+ .macro RESTORE_ABI_REGS
REG_L t0, FREGS_EPC(sp)
REG_L x1, FREGS_RA(sp)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
REG_L x8, FREGS_S0(sp)
#endif
REG_L x6, FREGS_T1(sp)
-
+#ifdef CONFIG_CC_IS_CLANG
+ REG_L x7, FREGS_T2(sp)
+ REG_L x28, FREGS_T3(sp)
+ REG_L x29, FREGS_T4(sp)
+ REG_L x30, FREGS_T5(sp)
+ REG_L x31, FREGS_T6(sp)
+#endif
// restore the arguments
REG_L x10, FREGS_A0(sp)
REG_L x11, FREGS_A1(sp)
@@ -130,60 +138,71 @@
.endm
.macro PREPARE_ARGS
- addi a0, t0, -FENTRY_RA_OFFSET
+ addi a0, t0, -MCOUNT_JALR_SIZE // ip (callsite's jalr insn)
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ mv a1, ra // parent_ip
+ REG_L a2, -16(t0) // op
+ REG_L ra, FTRACE_OPS_FUNC(a2) // op->func
+#else
la a1, function_trace_op
- REG_L a2, 0(a1)
- mv a1, ra
- mv a3, sp
+ REG_L a2, 0(a1) // op
+ mv a1, ra // parent_ip
+#endif
+ mv a3, sp // regs
.endm
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
-
-#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
SYM_FUNC_START(ftrace_caller)
- SAVE_ABI
-
- addi a0, t0, -FENTRY_RA_OFFSET
- la a1, function_trace_op
- REG_L a2, 0(a1)
- mv a1, ra
- mv a3, sp
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ /*
+ * When CALL_OPS is enabled (2 or 4) nops [8B] are placed before the
+ * function entry, these are later overwritten with the pointer to the
+ * associated struct ftrace_ops.
+ *
+ * -8: &ftrace_ops of the associated tracer function.
+ *<ftrace enable>:
+ * 0: auipc t0/ra, 0x?
+ * 4: jalr t0/ra, ?(t0/ra)
+ *
+ * -8: &ftrace_nop_ops
+ *<ftrace disable>:
+ * 0: nop
+ * 4: nop
+ *
+ * t0 is set to ip+8 after the jalr is executed at the callsite,
+ * so we find the associated op at t0-16.
+ */
+ REG_L t1, -16(t0) // op Should be SZ_REG instead of 16
-SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
- call ftrace_stub
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- addi a0, sp, ABI_RA
- REG_L a1, ABI_T0(sp)
- addi a1, a1, -FENTRY_RA_OFFSET
-#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
- mv a2, s0
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ /*
+ * If the op has a direct call, handle it immediately without
+ * saving/restoring registers.
+ */
+ REG_L t1, FTRACE_OPS_DIRECT_CALL(t1)
+ bnez t1, ftrace_caller_direct
#endif
-SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
- call ftrace_stub
#endif
- RESTORE_ABI
- jr t0
-SYM_FUNC_END(ftrace_caller)
-
-#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
-SYM_FUNC_START(ftrace_caller)
- mv t1, zero
SAVE_ABI_REGS
PREPARE_ARGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ jalr ra
+#else
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
- call ftrace_stub
-
+ REG_L ra, ftrace_call_dest
+ jalr ra, 0(ra)
+#endif
RESTORE_ABI_REGS
- bnez t1, .Ldirect
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ bnez t1, ftrace_caller_direct
+#endif
jr t0
-.Ldirect:
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+SYM_INNER_LABEL(ftrace_caller_direct, SYM_L_LOCAL)
jr t1
+#endif
SYM_FUNC_END(ftrace_caller)
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
-
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
SYM_CODE_START(ftrace_stub_direct_tramp)
jr t0
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index b81672729887..b2e4b81763f8 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -24,7 +24,20 @@ void flush_icache_all(void)
if (num_online_cpus() < 2)
return;
- else if (riscv_use_sbi_for_rfence())
+
+ /*
+ * Make sure all previous writes to the D$ are ordered before making
+ * the IPI. The RISC-V spec states that a hart must execute a data fence
+ * before triggering a remote fence.i in order to make the modification
+ * visable for remote harts.
+ *
+ * IPIs on RISC-V are triggered by MMIO writes to either CLINT or
+ * S-IMSIC, so the fence ensures previous data writes "happen before"
+ * the MMIO.
+ */
+ RISCV_FENCE(w, o);
+
+ if (riscv_use_sbi_for_rfence())
sbi_remote_fence_i(NULL);
else
on_each_cpu(ipi_remote_fence_i, NULL, 1);