aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/prog_tests/attach_probe.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2021-03-09 18:07:05 -0800
committerDavid S. Miller <davem@davemloft.net>2021-03-09 18:07:05 -0800
commitc1acda9807e2bbe1d2026b44f37d959d6d8266c8 (patch)
tree6af2137ad95c0303f9b59d11fe7866e8ebfbcd07 /tools/testing/selftests/bpf/prog_tests/attach_probe.c
parentMerge git://git.kernel.org:/pub/scm/linux/kernel/git/netdev/net (diff)
parentMerge branch 'bpf-xdp-redirect' (diff)
downloadlinux-dev-c1acda9807e2bbe1d2026b44f37d959d6d8266c8.tar.xz
linux-dev-c1acda9807e2bbe1d2026b44f37d959d6d8266c8.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2021-03-09 The following pull-request contains BPF updates for your *net-next* tree. We've added 90 non-merge commits during the last 17 day(s) which contain a total of 114 files changed, 5158 insertions(+), 1288 deletions(-). The main changes are: 1) Faster bpf_redirect_map(), from Björn. 2) skmsg cleanup, from Cong. 3) Support for floating point types in BTF, from Ilya. 4) Documentation for sys_bpf commands, from Joe. 5) Support for sk_lookup in bpf_prog_test_run, form Lorenz. 6) Enable task local storage for tracing programs, from Song. 7) bpf_for_each_map_elem() helper, from Yonghong. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests/attach_probe.c')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c40
1 files changed, 39 insertions, 1 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index a0ee87c8e1ea..9dc4e3dfbcf3 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -2,6 +2,44 @@
#include <test_progs.h>
#include "test_attach_probe.skel.h"
+#if defined(__powerpc64__) && defined(_CALL_ELF) && _CALL_ELF == 2
+
+#define OP_RT_RA_MASK 0xffff0000UL
+#define LIS_R2 0x3c400000UL
+#define ADDIS_R2_R12 0x3c4c0000UL
+#define ADDI_R2_R2 0x38420000UL
+
+static ssize_t get_offset(ssize_t addr, ssize_t base)
+{
+ u32 *insn = (u32 *) addr;
+
+ /*
+ * A PPC64 ABIv2 function may have a local and a global entry
+ * point. We need to use the local entry point when patching
+ * functions, so identify and step over the global entry point
+ * sequence.
+ *
+ * The global entry point sequence is always of the form:
+ *
+ * addis r2,r12,XXXX
+ * addi r2,r2,XXXX
+ *
+ * A linker optimisation may convert the addis to lis:
+ *
+ * lis r2,XXXX
+ * addi r2,r2,XXXX
+ */
+ if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
+ ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
+ ((*(insn + 1) & OP_RT_RA_MASK) == ADDI_R2_R2))
+ return (ssize_t)(insn + 2) - base;
+ else
+ return addr - base;
+}
+#else
+#define get_offset(addr, base) (addr - base)
+#endif
+
ssize_t get_base_addr() {
size_t start, offset;
char buf[256];
@@ -36,7 +74,7 @@ void test_attach_probe(void)
if (CHECK(base_addr < 0, "get_base_addr",
"failed to find base addr: %zd", base_addr))
return;
- uprobe_offset = (size_t)&get_base_addr - base_addr;
+ uprobe_offset = get_offset((size_t)&get_base_addr, base_addr);
skel = test_attach_probe__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))