aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/powerpc')
-rw-r--r--tools/testing/selftests/powerpc/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/alignment/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c166
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/context_switch.c27
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/gettimeofday.c6
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/null_syscall.c3
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/settings1
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/.gitignore4
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h1
l---------tools/testing/selftests/powerpc/copyloops/copy_mc_64.S1
l---------tools/testing/selftests/powerpc/copyloops/mem_64.S1
l---------tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/memcpy_stubs.S8
-rw-r--r--tools/testing/selftests/powerpc/copyloops/memmove_validate.c58
-rw-r--r--tools/testing/selftests/powerpc/dscr/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/dscr/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_default_test.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_user_test.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/settings1
-rw-r--r--tools/testing/selftests/powerpc/eeh/Makefile2
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-basic.sh35
-rw-r--r--[-rwxr-xr-x]tools/testing/selftests/powerpc/eeh/eeh-functions.sh179
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh45
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh35
-rw-r--r--tools/testing/selftests/powerpc/eeh/settings1
-rw-r--r--tools/testing/selftests/powerpc/include/basic_asm.h63
-rw-r--r--tools/testing/selftests/powerpc/include/instructions.h77
-rw-r--r--tools/testing/selftests/powerpc/include/pkeys.h136
-rw-r--r--tools/testing/selftests/powerpc/include/reg.h83
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h51
-rw-r--r--tools/testing/selftests/powerpc/lib/reg.S107
-rw-r--r--tools/testing/selftests/powerpc/math/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile12
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_denormal.c38
-rw-r--r--tools/testing/selftests/powerpc/math/mma.S36
-rw-r--r--tools/testing/selftests/powerpc/math/mma.c48
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_preempt.c3
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_signal.c3
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_syscall.c7
-rw-r--r--tools/testing/selftests/powerpc/math/vsx_preempt.c2
-rw-r--r--tools/testing/selftests/powerpc/mce/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/mce/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/mce/inject-ra-err.c65
l---------tools/testing/selftests/powerpc/mce/vas-api.h1
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore7
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile18
-rw-r--r--tools/testing/selftests/powerpc/mm/bad_accesses.c31
-rw-r--r--tools/testing/selftests/powerpc/mm/exec_prot.c231
-rw-r--r--tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c158
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_exec_prot.c294
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_siginfo.c333
-rw-r--r--tools/testing/selftests/powerpc/mm/prot_sao.c10
-rw-r--r--tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c202
-rw-r--r--tools/testing/selftests/powerpc/mm/stack_expansion_signal.c118
-rwxr-xr-xtools/testing/selftests/powerpc/mm/stress_code_patching.sh49
-rw-r--r--tools/testing/selftests/powerpc/mm/tlbie_test.c1
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules1
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/Makefile8
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/README45
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gunz_test.c1028
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c446
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c316
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h56
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/crb.h155
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/nx.h38
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h95
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/nxu.h650
l---------tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h1
-rwxr-xr-xtools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh46
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/attr_test.c113
-rw-r--r--tools/testing/selftests/powerpc/pmu/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile26
-rw-r--r--tools/testing/selftests/powerpc/pmu/branch_loops.S28
-rw-r--r--tools/testing/selftests/powerpc/pmu/count_stcx_fail.c164
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S43
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c7
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/trace.h4
-rw-r--r--tools/testing/selftests/powerpc/pmu/event.c19
-rw-r--r--tools/testing/selftests/powerpc/pmu/event.h6
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore20
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c132
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c109
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c116
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c130
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c60
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c54
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c70
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c56
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c56
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c96
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c74
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c88
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c67
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c77
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c44
-rw-r--r--tools/testing/selftests/powerpc/pmu/l3_bank_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.h1
-rw-r--r--tools/testing/selftests/powerpc/pmu/loop.S35
-rw-r--r--tools/testing/selftests/powerpc/pmu/per_event_excludes.c5
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore21
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c114
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c57
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c537
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h234
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_cc56run_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_exceptionbits_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc1ce_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc56_test.c58
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmccext_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmcjce_test.c58
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_comb_test.c66
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c77
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_fcs_fch_test.c85
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_l2l3_test.c74
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr3_src_test.c67
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c65
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c69
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c66
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c69
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c74
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c80
-rw-r--r--tools/testing/selftests/powerpc/primitives/.gitignore1
l---------tools/testing/selftests/powerpc/primitives/asm/extable.h1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile40
-rw-r--r--tools/testing/selftests/powerpc/ptrace/core-pkey.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c635
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S52
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c125
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h14
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c127
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c659
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c57
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tar.c3
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c21
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c23
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace.h79
-rw-r--r--tools/testing/selftests/powerpc/security/.gitignore4
-rw-r--r--tools/testing/selftests/powerpc/security/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/security/entry_flush.c139
-rw-r--r--tools/testing/selftests/powerpc/security/flush_utils.c83
-rw-r--r--tools/testing/selftests/powerpc/security/flush_utils.h24
-rwxr-xr-xtools/testing/selftests/powerpc/security/mitigation-patching.sh78
-rw-r--r--tools/testing/selftests/powerpc/security/rfi_flush.c66
-rw-r--r--tools/testing/selftests/powerpc/security/spectre_v2.c29
-rw-r--r--tools/testing/selftests/powerpc/security/uaccess_flush.c158
-rw-r--r--tools/testing/selftests/powerpc/signal/.gitignore5
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile6
-rw-r--r--tools/testing/selftests/powerpc/signal/settings1
-rw-r--r--tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c174
-rw-r--r--tools/testing/selftests/powerpc/signal/signal_tm.c1
-rw-r--r--tools/testing/selftests/powerpc/signal/sigreturn_kernel.c132
-rw-r--r--tools/testing/selftests/powerpc/signal/sigreturn_unaligned.c43
-rw-r--r--tools/testing/selftests/powerpc/signal/sigreturn_vdso.c127
-rw-r--r--tools/testing/selftests/powerpc/stringloops/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/stringloops/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/stringloops/memcmp.c46
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S23
-rw-r--r--tools/testing/selftests/powerpc/syscalls/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/syscalls/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/syscalls/rtas_filter.c285
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile6
-rw-r--r--tools/testing/selftests/powerpc/tm/settings1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-exec.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-fork.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-poison.c13
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c74
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c285
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-stack.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-sigreturn.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall-asm.S37
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall.c38
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-tar.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-tmspr.c11
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-trap.c15
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-unavailable.c10
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmxcopy.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm.h39
-rw-r--r--tools/testing/selftests/powerpc/utils.c90
-rw-r--r--tools/testing/selftests/powerpc/vphn/.gitignore1
231 files changed, 13352 insertions, 659 deletions
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 644770c3b754..6ba95cd19e42 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -19,6 +19,7 @@ SUB_DIRS = alignment \
copyloops \
dscr \
mm \
+ nx-gzip \
pmu \
signal \
primitives \
@@ -29,8 +30,10 @@ SUB_DIRS = alignment \
eeh \
vphn \
math \
+ papr_attributes \
ptrace \
- security
+ security \
+ mce
endif
diff --git a/tools/testing/selftests/powerpc/alignment/.gitignore b/tools/testing/selftests/powerpc/alignment/.gitignore
index 6d4fd014511c..28bc6ca13cc6 100644
--- a/tools/testing/selftests/powerpc/alignment/.gitignore
+++ b/tools/testing/selftests/powerpc/alignment/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
copy_first_unaligned
alignment_handler
diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
index 0453c50c949c..33ee34fc0828 100644
--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
+++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
@@ -9,7 +9,8 @@
* This selftest exercises the powerpc alignment fault handler.
*
* We create two sets of source and destination buffers, one in regular memory,
- * the other cache-inhibited (we use /dev/fb0 for this).
+ * the other cache-inhibited (by default we use /dev/fb0 for this, but an
+ * alterative path for cache-inhibited memory may be provided, e.g. memtrace).
*
* We initialise the source buffers, then use whichever set of load/store
* instructions is under test to copy bytes from the source buffers to the
@@ -45,14 +46,16 @@
#include <setjmp.h>
#include <signal.h>
-#include <asm/cputable.h>
-
#include "utils.h"
+#include "instructions.h"
int bufsize;
int debug;
int testing;
volatile int gotsig;
+bool prefixes_enabled;
+char *cipath = "/dev/fb0";
+long cioffset;
void sighandler(int sig, siginfo_t *info, void *ctx)
{
@@ -64,7 +67,12 @@ void sighandler(int sig, siginfo_t *info, void *ctx)
}
gotsig = sig;
#ifdef __powerpc64__
- ucp->uc_mcontext.gp_regs[PT_NIP] += 4;
+ if (prefixes_enabled) {
+ u32 inst = *(u32 *)ucp->uc_mcontext.gp_regs[PT_NIP];
+ ucp->uc_mcontext.gp_regs[PT_NIP] += ((inst >> 26 == 1) ? 8 : 4);
+ } else {
+ ucp->uc_mcontext.gp_regs[PT_NIP] += 4;
+ }
#else
ucp->uc_mcontext.uc_regs->gregs[PT_NIP] += 4;
#endif
@@ -84,6 +92,17 @@ void sighandler(int sig, siginfo_t *info, void *ctx)
} \
rc |= do_test(#name, test_##name)
+#define TESTP(name, ld_op, st_op, ld_reg, st_reg) \
+ void test_##name(char *s, char *d) \
+ { \
+ asm volatile( \
+ ld_op(ld_reg, %0, 0, 0) \
+ st_op(st_reg, %1, 0, 0) \
+ :: "r"(s), "r"(d), "r"(0) \
+ : "memory", "vs0", "vs32", "r31"); \
+ } \
+ rc |= do_test(#name, test_##name)
+
#define LOAD_VSX_XFORM_TEST(op) TEST(op, op, stxvd2x, XFORM, 32, 32)
#define STORE_VSX_XFORM_TEST(op) TEST(op, lxvd2x, op, XFORM, 32, 32)
#define LOAD_VSX_DFORM_TEST(op) TEST(op, op, stxv, DFORM, 32, 32)
@@ -103,6 +122,17 @@ void sighandler(int sig, siginfo_t *info, void *ctx)
#define LOAD_FLOAT_XFORM_TEST(op) TEST(op, op, stfdx, XFORM, 0, 0)
#define STORE_FLOAT_XFORM_TEST(op) TEST(op, lfdx, op, XFORM, 0, 0)
+#define LOAD_MLS_PREFIX_TEST(op) TESTP(op, op, PSTD, 31, 31)
+#define STORE_MLS_PREFIX_TEST(op) TESTP(op, PLD, op, 31, 31)
+
+#define LOAD_8LS_PREFIX_TEST(op) TESTP(op, op, PSTD, 31, 31)
+#define STORE_8LS_PREFIX_TEST(op) TESTP(op, PLD, op, 31, 31)
+
+#define LOAD_FLOAT_MLS_PREFIX_TEST(op) TESTP(op, op, PSTFD, 0, 0)
+#define STORE_FLOAT_MLS_PREFIX_TEST(op) TESTP(op, PLFD, op, 0, 0)
+
+#define LOAD_VSX_8LS_PREFIX_TEST(op, tail) TESTP(op, op, PSTXV ## tail, 0, 32)
+#define STORE_VSX_8LS_PREFIX_TEST(op, tail) TESTP(op, PLXV ## tail, op, 32, 0)
/* FIXME: Unimplemented tests: */
// STORE_DFORM_TEST(stq) /* FIXME: need two registers for quad */
@@ -195,17 +225,18 @@ int do_test(char *test_name, void (*test_func)(char *, char *))
printf("\tDoing %s:\t", test_name);
- fd = open("/dev/fb0", O_RDWR);
+ fd = open(cipath, O_RDWR);
if (fd < 0) {
printf("\n");
- perror("Can't open /dev/fb0 now?");
+ perror("Can't open ci file now?");
return 1;
}
- ci0 = mmap(NULL, bufsize, PROT_WRITE, MAP_SHARED,
- fd, 0x0);
- ci1 = mmap(NULL, bufsize, PROT_WRITE, MAP_SHARED,
- fd, bufsize);
+ ci0 = mmap(NULL, bufsize, PROT_WRITE | PROT_READ, MAP_SHARED,
+ fd, cioffset);
+ ci1 = mmap(NULL, bufsize, PROT_WRITE | PROT_READ, MAP_SHARED,
+ fd, cioffset + bufsize);
+
if ((ci0 == MAP_FAILED) || (ci1 == MAP_FAILED)) {
printf("\n");
perror("mmap failed");
@@ -226,8 +257,12 @@ int do_test(char *test_name, void (*test_func)(char *, char *))
}
rc = 0;
- /* offset = 0 no alignment fault, so skip */
- for (offset = 1; offset < 16; offset++) {
+ /*
+ * offset = 0 is aligned but tests the workaround for the P9N
+ * DD2.1 vector CI load issue (see 5080332c2c89 "powerpc/64s:
+ * Add workaround for P9 vector CI load issue")
+ */
+ for (offset = 0; offset < 16; offset++) {
width = 16; /* vsx == 16 bytes */
r = 0;
@@ -270,11 +305,11 @@ int do_test(char *test_name, void (*test_func)(char *, char *))
return rc;
}
-static bool can_open_fb0(void)
+static bool can_open_cifile(void)
{
int fd;
- fd = open("/dev/fb0", O_RDWR);
+ fd = open(cipath, O_RDWR);
if (fd < 0)
return false;
@@ -286,7 +321,7 @@ int test_alignment_handler_vsx_206(void)
{
int rc = 0;
- SKIP_IF(!can_open_fb0());
+ SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
printf("VSX: 2.06B\n");
@@ -304,7 +339,7 @@ int test_alignment_handler_vsx_207(void)
{
int rc = 0;
- SKIP_IF(!can_open_fb0());
+ SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
printf("VSX: 2.07B\n");
@@ -320,7 +355,7 @@ int test_alignment_handler_vsx_300(void)
{
int rc = 0;
- SKIP_IF(!can_open_fb0());
+ SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00));
printf("VSX: 3.00B\n");
@@ -348,11 +383,30 @@ int test_alignment_handler_vsx_300(void)
return rc;
}
+int test_alignment_handler_vsx_prefix(void)
+{
+ int rc = 0;
+
+ SKIP_IF(!can_open_cifile());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ printf("VSX: PREFIX\n");
+ LOAD_VSX_8LS_PREFIX_TEST(PLXSD, 0);
+ LOAD_VSX_8LS_PREFIX_TEST(PLXSSP, 0);
+ LOAD_VSX_8LS_PREFIX_TEST(PLXV0, 0);
+ LOAD_VSX_8LS_PREFIX_TEST(PLXV1, 1);
+ STORE_VSX_8LS_PREFIX_TEST(PSTXSD, 0);
+ STORE_VSX_8LS_PREFIX_TEST(PSTXSSP, 0);
+ STORE_VSX_8LS_PREFIX_TEST(PSTXV0, 0);
+ STORE_VSX_8LS_PREFIX_TEST(PSTXV1, 1);
+ return rc;
+}
+
int test_alignment_handler_integer(void)
{
int rc = 0;
- SKIP_IF(!can_open_fb0());
+ SKIP_IF(!can_open_cifile());
printf("Integer\n");
LOAD_DFORM_TEST(lbz);
@@ -380,7 +434,6 @@ int test_alignment_handler_integer(void)
LOAD_DFORM_TEST(ldu);
LOAD_XFORM_TEST(ldx);
LOAD_XFORM_TEST(ldux);
- LOAD_DFORM_TEST(lmw);
STORE_DFORM_TEST(stb);
STORE_XFORM_TEST(stbx);
STORE_DFORM_TEST(stbu);
@@ -399,7 +452,11 @@ int test_alignment_handler_integer(void)
STORE_XFORM_TEST(stdx);
STORE_DFORM_TEST(stdu);
STORE_XFORM_TEST(stdux);
+
+#ifdef __BIG_ENDIAN__
+ LOAD_DFORM_TEST(lmw);
STORE_DFORM_TEST(stmw);
+#endif
return rc;
}
@@ -408,7 +465,7 @@ int test_alignment_handler_integer_206(void)
{
int rc = 0;
- SKIP_IF(!can_open_fb0());
+ SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
printf("Integer: 2.06\n");
@@ -419,11 +476,32 @@ int test_alignment_handler_integer_206(void)
return rc;
}
+int test_alignment_handler_integer_prefix(void)
+{
+ int rc = 0;
+
+ SKIP_IF(!can_open_cifile());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ printf("Integer: PREFIX\n");
+ LOAD_MLS_PREFIX_TEST(PLBZ);
+ LOAD_MLS_PREFIX_TEST(PLHZ);
+ LOAD_MLS_PREFIX_TEST(PLHA);
+ LOAD_MLS_PREFIX_TEST(PLWZ);
+ LOAD_8LS_PREFIX_TEST(PLWA);
+ LOAD_8LS_PREFIX_TEST(PLD);
+ STORE_MLS_PREFIX_TEST(PSTB);
+ STORE_MLS_PREFIX_TEST(PSTH);
+ STORE_MLS_PREFIX_TEST(PSTW);
+ STORE_8LS_PREFIX_TEST(PSTD);
+ return rc;
+}
+
int test_alignment_handler_vmx(void)
{
int rc = 0;
- SKIP_IF(!can_open_fb0());
+ SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap(PPC_FEATURE_HAS_ALTIVEC));
printf("VMX\n");
@@ -451,7 +529,7 @@ int test_alignment_handler_fp(void)
{
int rc = 0;
- SKIP_IF(!can_open_fb0());
+ SKIP_IF(!can_open_cifile());
printf("Floating point\n");
LOAD_FLOAT_DFORM_TEST(lfd);
@@ -479,7 +557,7 @@ int test_alignment_handler_fp_205(void)
{
int rc = 0;
- SKIP_IF(!can_open_fb0());
+ SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_05));
printf("Floating point: 2.05\n");
@@ -497,7 +575,7 @@ int test_alignment_handler_fp_206(void)
{
int rc = 0;
- SKIP_IF(!can_open_fb0());
+ SKIP_IF(!can_open_cifile());
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
printf("Floating point: 2.06\n");
@@ -507,13 +585,32 @@ int test_alignment_handler_fp_206(void)
return rc;
}
+
+int test_alignment_handler_fp_prefix(void)
+{
+ int rc = 0;
+
+ SKIP_IF(!can_open_cifile());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ printf("Floating point: PREFIX\n");
+ LOAD_FLOAT_DFORM_TEST(lfs);
+ LOAD_FLOAT_MLS_PREFIX_TEST(PLFS);
+ LOAD_FLOAT_MLS_PREFIX_TEST(PLFD);
+ STORE_FLOAT_MLS_PREFIX_TEST(PSTFS);
+ STORE_FLOAT_MLS_PREFIX_TEST(PSTFD);
+ return rc;
+}
+
void usage(char *prog)
{
- printf("Usage: %s [options]\n", prog);
+ printf("Usage: %s [options] [path [offset]]\n", prog);
printf(" -d Enable debug error output\n");
printf("\n");
- printf("This test requires a POWER8 or POWER9 CPU and a usable ");
- printf("framebuffer at /dev/fb0.\n");
+ printf("This test requires a POWER8, POWER9 or POWER10 CPU ");
+ printf("and either a usable framebuffer at /dev/fb0 or ");
+ printf("the path to usable cache inhibited memory and optional ");
+ printf("offset to be provided\n");
}
int main(int argc, char *argv[])
@@ -533,6 +630,13 @@ int main(int argc, char *argv[])
exit(1);
}
}
+ argc -= optind;
+ argv += optind;
+
+ if (argc > 0)
+ cipath = argv[0];
+ if (argc > 1)
+ cioffset = strtol(argv[1], 0, 0x10);
bufsize = getpagesize();
@@ -546,16 +650,22 @@ int main(int argc, char *argv[])
exit(1);
}
+ prefixes_enabled = have_hwcap2(PPC_FEATURE2_ARCH_3_1);
+
rc |= test_harness(test_alignment_handler_vsx_206,
"test_alignment_handler_vsx_206");
rc |= test_harness(test_alignment_handler_vsx_207,
"test_alignment_handler_vsx_207");
rc |= test_harness(test_alignment_handler_vsx_300,
"test_alignment_handler_vsx_300");
+ rc |= test_harness(test_alignment_handler_vsx_prefix,
+ "test_alignment_handler_vsx_prefix");
rc |= test_harness(test_alignment_handler_integer,
"test_alignment_handler_integer");
rc |= test_harness(test_alignment_handler_integer_206,
"test_alignment_handler_integer_206");
+ rc |= test_harness(test_alignment_handler_integer_prefix,
+ "test_alignment_handler_integer_prefix");
rc |= test_harness(test_alignment_handler_vmx,
"test_alignment_handler_vmx");
rc |= test_harness(test_alignment_handler_fp,
@@ -564,5 +674,7 @@ int main(int argc, char *argv[])
"test_alignment_handler_fp_205");
rc |= test_harness(test_alignment_handler_fp_206,
"test_alignment_handler_fp_206");
+ rc |= test_harness(test_alignment_handler_fp_prefix,
+ "test_alignment_handler_fp_prefix");
return rc;
}
diff --git a/tools/testing/selftests/powerpc/benchmarks/.gitignore b/tools/testing/selftests/powerpc/benchmarks/.gitignore
index 9161679b1e1a..c9ce13983c99 100644
--- a/tools/testing/selftests/powerpc/benchmarks/.gitignore
+++ b/tools/testing/selftests/powerpc/benchmarks/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
gettimeofday
context_switch
fork
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile
index d40300a65b42..a32a6ab89914 100644
--- a/tools/testing/selftests/powerpc/benchmarks/Makefile
+++ b/tools/testing/selftests/powerpc/benchmarks/Makefile
@@ -2,6 +2,8 @@
TEST_GEN_PROGS := gettimeofday context_switch fork mmap_bench futex_bench null_syscall
TEST_GEN_FILES := exec_target
+TEST_FILES := settings
+
CFLAGS += -O2
top_srcdir = ../../../../..
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
index a2e8c9da7fa5..96554e2794d1 100644
--- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c
+++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
@@ -19,6 +19,7 @@
#include <limits.h>
#include <sys/time.h>
#include <sys/syscall.h>
+#include <sys/sysinfo.h>
#include <sys/types.h>
#include <sys/shm.h>
#include <linux/futex.h>
@@ -104,8 +105,9 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
{
- int pid;
- cpu_set_t cpuset;
+ int pid, ncpus;
+ cpu_set_t *cpuset;
+ size_t size;
pid = fork();
if (pid == -1) {
@@ -116,14 +118,23 @@ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
if (pid)
return;
- CPU_ZERO(&cpuset);
- CPU_SET(cpu, &cpuset);
+ ncpus = get_nprocs();
+ size = CPU_ALLOC_SIZE(ncpus);
+ cpuset = CPU_ALLOC(ncpus);
+ if (!cpuset) {
+ perror("malloc");
+ exit(1);
+ }
+ CPU_ZERO_S(size, cpuset);
+ CPU_SET_S(cpu, size, cpuset);
- if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) {
+ if (sched_setaffinity(0, size, cpuset)) {
perror("sched_setaffinity");
+ CPU_FREE(cpuset);
exit(1);
}
+ CPU_FREE(cpuset);
fn(arg);
exit(0);
@@ -470,6 +481,12 @@ int main(int argc, char *argv[])
else
printf("futex");
+ if (!have_hwcap(PPC_FEATURE_HAS_ALTIVEC))
+ touch_altivec = 0;
+
+ if (!have_hwcap(PPC_FEATURE_HAS_VSX))
+ touch_vector = 0;
+
printf(" on cpus %d/%d touching FP:%s altivec:%s vector:%s vdso:%s\n",
cpu1, cpu2, touch_fp ? "yes" : "no", touch_altivec ? "yes" : "no",
touch_vector ? "yes" : "no", touch_vdso ? "yes" : "no");
diff --git a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
index 6b415683357b..580fcac0a09f 100644
--- a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
+++ b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
@@ -12,7 +12,7 @@ static int test_gettimeofday(void)
{
int i;
- struct timeval tv_start, tv_end;
+ struct timeval tv_start, tv_end, tv_diff;
gettimeofday(&tv_start, NULL);
@@ -20,7 +20,9 @@ static int test_gettimeofday(void)
gettimeofday(&tv_end, NULL);
}
- printf("time = %.6f\n", tv_end.tv_sec - tv_start.tv_sec + (tv_end.tv_usec - tv_start.tv_usec) * 1e-6);
+ timersub(&tv_start, &tv_end, &tv_diff);
+
+ printf("time = %.6f\n", tv_diff.tv_sec + (tv_diff.tv_usec) * 1e-6);
return 0;
}
diff --git a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
index 579f0215c6e7..9836838a529f 100644
--- a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
+++ b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
@@ -14,6 +14,7 @@
#include <time.h>
#include <sys/types.h>
#include <sys/time.h>
+#include <sys/syscall.h>
#include <signal.h>
static volatile int soak_done;
@@ -121,7 +122,7 @@ static void do_null_syscall(unsigned long nr)
unsigned long i;
for (i = 0; i < nr; i++)
- getppid();
+ syscall(__NR_gettid);
}
#define TIME(A, STR) \
diff --git a/tools/testing/selftests/powerpc/benchmarks/settings b/tools/testing/selftests/powerpc/benchmarks/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/benchmarks/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/powerpc/cache_shape/.gitignore b/tools/testing/selftests/powerpc/cache_shape/.gitignore
index ec1848434be5..b385eee3012c 100644
--- a/tools/testing/selftests/powerpc/cache_shape/.gitignore
+++ b/tools/testing/selftests/powerpc/cache_shape/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
cache_shape
diff --git a/tools/testing/selftests/powerpc/copyloops/.gitignore b/tools/testing/selftests/powerpc/copyloops/.gitignore
index 12ef5b031974..7283e8b07b75 100644
--- a/tools/testing/selftests/powerpc/copyloops/.gitignore
+++ b/tools/testing/selftests/powerpc/copyloops/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
copyuser_64_t0
copyuser_64_t1
copyuser_64_t2
@@ -11,4 +12,5 @@ memcpy_p7_t1
copyuser_64_exc_t0
copyuser_64_exc_t1
copyuser_64_exc_t2
-memcpy_mcsafe_64
+copy_mc_64
+memmove_64
diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile
index 0917983a1c78..77594e697f2f 100644
--- a/tools/testing/selftests/powerpc/copyloops/Makefile
+++ b/tools/testing/selftests/powerpc/copyloops/Makefile
@@ -12,8 +12,9 @@ ASFLAGS = $(CFLAGS) -Wa,-mpower4
TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \
copyuser_p7_t0 copyuser_p7_t1 \
memcpy_64_t0 memcpy_64_t1 memcpy_64_t2 \
- memcpy_p7_t0 memcpy_p7_t1 memcpy_mcsafe_64 \
- copyuser_64_exc_t0 copyuser_64_exc_t1 copyuser_64_exc_t2
+ memcpy_p7_t0 memcpy_p7_t1 copy_mc_64 \
+ copyuser_64_exc_t0 copyuser_64_exc_t1 copyuser_64_exc_t2 \
+ memmove_64
EXTRA_SOURCES := validate.c ../harness.c stubs.S
@@ -45,9 +46,9 @@ $(OUTPUT)/memcpy_p7_t%: memcpy_power7.S $(EXTRA_SOURCES)
-D SELFTEST_CASE=$(subst memcpy_p7_t,,$(notdir $@)) \
-o $@ $^
-$(OUTPUT)/memcpy_mcsafe_64: memcpy_mcsafe_64.S $(EXTRA_SOURCES)
+$(OUTPUT)/copy_mc_64: copy_mc_64.S $(EXTRA_SOURCES)
$(CC) $(CPPFLAGS) $(CFLAGS) \
- -D COPY_LOOP=test_memcpy_mcsafe \
+ -D COPY_LOOP=test_copy_mc_generic \
-o $@ $^
$(OUTPUT)/copyuser_64_exc_t%: copyuser_64.S exc_validate.c ../harness.c \
@@ -56,3 +57,9 @@ $(OUTPUT)/copyuser_64_exc_t%: copyuser_64.S exc_validate.c ../harness.c \
-D COPY_LOOP=test___copy_tofrom_user_base \
-D SELFTEST_CASE=$(subst copyuser_64_exc_t,,$(notdir $@)) \
-o $@ $^
+
+$(OUTPUT)/memmove_64: mem_64.S memcpy_64.S memmove_validate.c ../harness.c \
+ memcpy_stubs.S
+ $(CC) $(CPPFLAGS) $(CFLAGS) \
+ -D TEST_MEMMOVE=test_memmove \
+ -o $@ $^
diff --git a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
index 58c1cef3e399..003e1b3d9300 100644
--- a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
+++ b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
@@ -26,6 +26,7 @@
#define _GLOBAL(A) FUNC_START(test_ ## A)
#define _GLOBAL_TOC(A) _GLOBAL(A)
#define _GLOBAL_TOC_KASAN(A) _GLOBAL(A)
+#define _GLOBAL_KASAN(A) _GLOBAL(A)
#define PPC_MTOCRF(A, B) mtocrf A, B
diff --git a/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S b/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S
new file mode 120000
index 000000000000..dcbe06d500fb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S
@@ -0,0 +1 @@
+../../../../../arch/powerpc/lib/copy_mc_64.S \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/copyloops/mem_64.S b/tools/testing/selftests/powerpc/copyloops/mem_64.S
new file mode 120000
index 000000000000..db254c9a5f5c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/copyloops/mem_64.S
@@ -0,0 +1 @@
+../../../../../arch/powerpc/lib/mem_64.S \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S b/tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S
deleted file mode 120000
index f0feef3062f6..000000000000
--- a/tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../arch/powerpc/lib/memcpy_mcsafe_64.S \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/copyloops/memcpy_stubs.S b/tools/testing/selftests/powerpc/copyloops/memcpy_stubs.S
new file mode 100644
index 000000000000..d9baa832fa49
--- /dev/null
+++ b/tools/testing/selftests/powerpc/copyloops/memcpy_stubs.S
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/ppc_asm.h>
+
+FUNC_START(memcpy)
+ b test_memcpy
+
+FUNC_START(backwards_memcpy)
+ b test_backwards_memcpy
diff --git a/tools/testing/selftests/powerpc/copyloops/memmove_validate.c b/tools/testing/selftests/powerpc/copyloops/memmove_validate.c
new file mode 100644
index 000000000000..1a23218b5757
--- /dev/null
+++ b/tools/testing/selftests/powerpc/copyloops/memmove_validate.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <malloc.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include "utils.h"
+
+void *TEST_MEMMOVE(const void *s1, const void *s2, size_t n);
+
+#define BUF_LEN 65536
+#define MAX_OFFSET 512
+
+size_t max(size_t a, size_t b)
+{
+ if (a >= b)
+ return a;
+ return b;
+}
+
+static int testcase_run(void)
+{
+ size_t i, src_off, dst_off, len;
+
+ char *usermap = memalign(BUF_LEN, BUF_LEN);
+ char *kernelmap = memalign(BUF_LEN, BUF_LEN);
+
+ assert(usermap != NULL);
+ assert(kernelmap != NULL);
+
+ memset(usermap, 0, BUF_LEN);
+ memset(kernelmap, 0, BUF_LEN);
+
+ for (i = 0; i < BUF_LEN; i++) {
+ usermap[i] = i & 0xff;
+ kernelmap[i] = i & 0xff;
+ }
+
+ for (src_off = 0; src_off < MAX_OFFSET; src_off++) {
+ for (dst_off = 0; dst_off < MAX_OFFSET; dst_off++) {
+ for (len = 1; len < MAX_OFFSET - max(src_off, dst_off); len++) {
+
+ memmove(usermap + dst_off, usermap + src_off, len);
+ TEST_MEMMOVE(kernelmap + dst_off, kernelmap + src_off, len);
+ if (memcmp(usermap, kernelmap, MAX_OFFSET) != 0) {
+ printf("memmove failed at %ld %ld %ld\n",
+ src_off, dst_off, len);
+ abort();
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(testcase_run, "memmove");
+}
diff --git a/tools/testing/selftests/powerpc/dscr/.gitignore b/tools/testing/selftests/powerpc/dscr/.gitignore
index b585c6c1564a..1d08b15af697 100644
--- a/tools/testing/selftests/powerpc/dscr/.gitignore
+++ b/tools/testing/selftests/powerpc/dscr/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
dscr_default_test
dscr_explicit_test
dscr_inherit_exec_test
diff --git a/tools/testing/selftests/powerpc/dscr/Makefile b/tools/testing/selftests/powerpc/dscr/Makefile
index 5df476364b4d..845db6273a1b 100644
--- a/tools/testing/selftests/powerpc/dscr/Makefile
+++ b/tools/testing/selftests/powerpc/dscr/Makefile
@@ -3,9 +3,11 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test \
dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \
dscr_sysfs_thread_test
+TEST_FILES := settings
+
top_srcdir = ../../../../..
include ../../lib.mk
$(OUTPUT)/dscr_default_test: LDLIBS += -lpthread
-$(TEST_GEN_PROGS): ../harness.c
+$(TEST_GEN_PROGS): ../harness.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
index 288a4e2ad156..e76611e608af 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
@@ -63,6 +63,8 @@ int dscr_default(void)
unsigned long i, *status[THREADS];
unsigned long orig_dscr_default;
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
+
orig_dscr_default = get_default_dscr();
/* Initial DSCR default */
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c b/tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c
index aefcd8d8759b..32fcf2b324b1 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c
@@ -21,6 +21,8 @@ int dscr_explicit(void)
{
unsigned long i, dscr = 0;
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
+
srand(getpid());
set_dscr(dscr);
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c b/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
index 7c1cb46397c6..c6a81b2d6b91 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
@@ -44,6 +44,8 @@ int dscr_inherit_exec(void)
unsigned long i, dscr = 0;
pid_t pid;
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
+
for (i = 0; i < COUNT; i++) {
dscr++;
if (dscr > DSCR_MAX)
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c b/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
index 04297a69ab59..f9dfd3d3c2d5 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
@@ -22,6 +22,8 @@ int dscr_inherit(void)
unsigned long i, dscr = 0;
pid_t pid;
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
+
srand(getpid());
set_dscr(dscr);
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
index 02f6b4efde14..fbbdffdb2e5d 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
@@ -77,6 +77,8 @@ int dscr_sysfs(void)
unsigned long orig_dscr_default;
int i, j;
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
+
orig_dscr_default = get_default_dscr();
for (i = 0; i < COUNT; i++) {
for (j = 0; j < DSCR_MAX; j++) {
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c
index 37be2c25f277..191ed126f118 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c
@@ -56,6 +56,8 @@ int dscr_sysfs_thread(void)
unsigned long orig_dscr_default;
int i, j;
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
+
orig_dscr_default = get_default_dscr();
for (i = 0; i < COUNT; i++) {
for (j = 0; j < DSCR_MAX; j++) {
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_user_test.c b/tools/testing/selftests/powerpc/dscr/dscr_user_test.c
index eaf785d11eed..e09072446dd3 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_user_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_user_test.c
@@ -36,6 +36,8 @@ int dscr_user(void)
{
int i;
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_DSCR));
+
check_dscr("");
for (i = 0; i < COUNT; i++) {
diff --git a/tools/testing/selftests/powerpc/dscr/settings b/tools/testing/selftests/powerpc/dscr/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/dscr/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/powerpc/eeh/Makefile b/tools/testing/selftests/powerpc/eeh/Makefile
index b397babd569b..ae963eb2dc5b 100644
--- a/tools/testing/selftests/powerpc/eeh/Makefile
+++ b/tools/testing/selftests/powerpc/eeh/Makefile
@@ -3,7 +3,7 @@ noarg:
$(MAKE) -C ../
TEST_PROGS := eeh-basic.sh
-TEST_FILES := eeh-functions.sh
+TEST_FILES := eeh-functions.sh settings
top_srcdir = ../../../../..
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
index f988d2f42e8f..442b666ccdb5 100755
--- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
+++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
@@ -3,24 +3,11 @@
. ./eeh-functions.sh
-if ! eeh_supported ; then
- echo "EEH not supported on this system, skipping"
- exit 0;
-fi
-
-if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
- [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
- echo "debugfs EEH testing files are missing. Is debugfs mounted?"
- exit 1;
-fi
+eeh_test_prep # NB: may exit
pre_lspci=`mktemp`
lspci > $pre_lspci
-# Bump the max freeze count to something absurd so we don't
-# trip over it while breaking things.
-echo 5000 > /sys/kernel/debug/powerpc/eeh_max_freezes
-
# record the devices that we break in here. Assuming everything
# goes to plan we should get them back once the recover process
# is finished.
@@ -28,29 +15,16 @@ devices=""
# Build up a list of candidate devices.
for dev in `ls -1 /sys/bus/pci/devices/ | grep '\.0$'` ; do
- # skip bridges since we can't recover them (yet...)
- if [ -e "/sys/bus/pci/devices/$dev/pci_bus" ] ; then
- echo "$dev, Skipped: bridge"
+ if ! eeh_can_break $dev ; then
continue;
fi
- # Skip VFs for now since we don't have a reliable way
- # to break them.
+ # Skip VFs for now since we don't have a reliable way to break them.
if [ -e "/sys/bus/pci/devices/$dev/physfn" ] ; then
echo "$dev, Skipped: virtfn"
continue;
fi
- # Don't inject errosr into an already-frozen PE. This happens with
- # PEs that contain multiple PCI devices (e.g. multi-function cards)
- # and injecting new errors during the recovery process will probably
- # result in the recovery failing and the device being marked as
- # failed.
- if ! pe_ok $dev ; then
- echo "$dev, Skipped: Bad initial PE state"
- continue;
- fi
-
echo "$dev, Added"
# Add to this list of device to check
@@ -79,4 +53,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
lspci | diff -u $pre_lspci -
rm -f $pre_lspci
-exit $failed
+test "$failed" -eq 0
+exit $?
diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
index f52ed92b53e7..70daa3925dcb 100755..100644
--- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
+++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
@@ -1,16 +1,27 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
+export KSELFTESTS_SKIP=4
+
+log() {
+ echo >/dev/stderr $*
+}
+
pe_ok() {
local dev="$1"
local path="/sys/bus/pci/devices/$dev/eeh_pe_state"
- if ! [ -e "$path" ] ; then
+ # if a driver doesn't support the error handling callbacks then the
+ # device is recovered by removing and re-probing it. This causes the
+ # sysfs directory to disappear so read the PE state once and squash
+ # any potential error messages
+ local eeh_state="$(cat $path 2>/dev/null)"
+ if [ -z "$eeh_state" ]; then
return 1;
fi
- local fw_state="$(cut -d' ' -f1 < $path)"
- local sw_state="$(cut -d' ' -f2 < $path)"
+ local fw_state="$(echo $eeh_state | cut -d' ' -f1)"
+ local sw_state="$(echo $eeh_state | cut -d' ' -f2)"
# If EEH_PE_ISOLATED or EEH_PE_RECOVERING are set then the PE is in an
# error state or being recovered. Either way, not ok.
@@ -34,6 +45,52 @@ eeh_supported() {
grep -q 'EEH Subsystem is enabled' /proc/powerpc/eeh
}
+eeh_test_prep() {
+ if ! eeh_supported ; then
+ echo "EEH not supported on this system, skipping"
+ exit $KSELFTESTS_SKIP;
+ fi
+
+ if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
+ [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
+ log "debugfs EEH testing files are missing. Is debugfs mounted?"
+ exit $KSELFTESTS_SKIP;
+ fi
+
+ # Bump the max freeze count to something absurd so we don't
+ # trip over it while breaking things.
+ echo 5000 > /sys/kernel/debug/powerpc/eeh_max_freezes
+}
+
+eeh_can_break() {
+ # skip bridges since we can't recover them (yet...)
+ if [ -e "/sys/bus/pci/devices/$dev/pci_bus" ] ; then
+ log "$dev, Skipped: bridge"
+ return 1;
+ fi
+
+ # The ahci driver doesn't support error recovery. If the ahci device
+ # happens to be hosting the root filesystem, and then we go and break
+ # it the system will generally go down. We should probably fix that
+ # at some point
+ if [ "ahci" = "$(basename $(realpath /sys/bus/pci/devices/$dev/driver))" ] ; then
+ log "$dev, Skipped: ahci doesn't support recovery"
+ return 1;
+ fi
+
+ # Don't inject errosr into an already-frozen PE. This happens with
+ # PEs that contain multiple PCI devices (e.g. multi-function cards)
+ # and injecting new errors during the recovery process will probably
+ # result in the recovery failing and the device being marked as
+ # failed.
+ if ! pe_ok $dev ; then
+ log "$dev, Skipped: Bad initial PE state"
+ return 1;
+ fi
+
+ return 0
+}
+
eeh_one_dev() {
local dev="$1"
@@ -41,7 +98,7 @@ eeh_one_dev() {
# testing so check that the argument is a well-formed sysfs device
# name.
if ! test -e /sys/bus/pci/devices/$dev/ ; then
- echo "Error: '$dev' must be a sysfs device name (DDDD:BB:DD.F)"
+ log "Error: '$dev' must be a sysfs device name (DDDD:BB:DD.F)"
return 1;
fi
@@ -65,16 +122,124 @@ eeh_one_dev() {
if pe_ok $dev ; then
break;
fi
- echo "$dev, waited $i/${max_wait}"
+ log "$dev, waited $i/${max_wait}"
sleep 1
done
if ! pe_ok $dev ; then
- echo "$dev, Failed to recover!"
+ log "$dev, Failed to recover!"
return 1;
fi
- echo "$dev, Recovered after $i seconds"
+ log "$dev, Recovered after $i seconds"
return 0;
}
+eeh_has_driver() {
+ test -e /sys/bus/pci/devices/$1/driver;
+ return $?
+}
+
+eeh_can_recover() {
+ # we'll get an IO error if the device's current driver doesn't support
+ # error recovery
+ echo $1 > '/sys/kernel/debug/powerpc/eeh_dev_can_recover' 2>/dev/null
+
+ return $?
+}
+
+eeh_find_all_pfs() {
+ devices=""
+
+ # SR-IOV on pseries requires hypervisor support, so check for that
+ is_pseries=""
+ if grep -q pSeries /proc/cpuinfo ; then
+ if [ ! -f /proc/device-tree/rtas/ibm,open-sriov-allow-unfreeze ] ||
+ [ ! -f /proc/device-tree/rtas/ibm,open-sriov-map-pe-number ] ; then
+ return 1;
+ fi
+
+ is_pseries="true"
+ fi
+
+ for dev in `ls -1 /sys/bus/pci/devices/` ; do
+ sysfs="/sys/bus/pci/devices/$dev"
+ if [ ! -e "$sysfs/sriov_numvfs" ] ; then
+ continue
+ fi
+
+ # skip unsupported PFs on pseries
+ if [ -z "$is_pseries" ] &&
+ [ ! -f "$sysfs/of_node/ibm,is-open-sriov-pf" ] &&
+ [ ! -f "$sysfs/of_node/ibm,open-sriov-vf-bar-info" ] ; then
+ continue;
+ fi
+
+ # no driver, no vfs
+ if ! eeh_has_driver $dev ; then
+ continue
+ fi
+
+ devices="$devices $dev"
+ done
+
+ if [ -z "$devices" ] ; then
+ return 1;
+ fi
+
+ echo $devices
+ return 0;
+}
+
+# attempts to enable one VF on each PF so we can do VF specific tests.
+# stdout: list of enabled VFs, one per line
+# return code: 0 if vfs are found, 1 otherwise
+eeh_enable_vfs() {
+ pf_list="$(eeh_find_all_pfs)"
+
+ vfs=0
+ for dev in $pf_list ; do
+ pf_sysfs="/sys/bus/pci/devices/$dev"
+
+ # make sure we have a single VF
+ echo 0 > "$pf_sysfs/sriov_numvfs"
+ echo 1 > "$pf_sysfs/sriov_numvfs"
+ if [ "$?" != 0 ] ; then
+ log "Unable to enable VFs on $pf, skipping"
+ continue;
+ fi
+
+ vf="$(basename $(realpath "$pf_sysfs/virtfn0"))"
+ if [ $? != 0 ] ; then
+ log "unable to find enabled vf on $pf"
+ echo 0 > "$pf_sysfs/sriov_numvfs"
+ continue;
+ fi
+
+ if ! eeh_can_break $vf ; then
+ log "skipping "
+
+ echo 0 > "$pf_sysfs/sriov_numvfs"
+ continue;
+ fi
+
+ vfs="$((vfs + 1))"
+ echo $vf
+ done
+
+ test "$vfs" != 0
+ return $?
+}
+
+eeh_disable_vfs() {
+ pf_list="$(eeh_find_all_pfs)"
+ if [ -z "$pf_list" ] ; then
+ return 1;
+ fi
+
+ for dev in $pf_list ; do
+ echo 0 > "/sys/bus/pci/devices/$dev/sriov_numvfs"
+ done
+
+ return 0;
+}
diff --git a/tools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh b/tools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh
new file mode 100755
index 000000000000..874c11953bb6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+. ./eeh-functions.sh
+
+eeh_test_prep # NB: may exit
+
+vf_list="$(eeh_enable_vfs)";
+if $? != 0 ; then
+ log "No usable VFs found. Skipping EEH unaware VF test"
+ exit $KSELFTESTS_SKIP;
+fi
+
+log "Enabled VFs: $vf_list"
+
+tested=0
+passed=0
+for vf in $vf_list ; do
+ log "Testing $vf"
+
+ if ! eeh_can_recover $vf ; then
+ log "Driver for $vf doesn't support error recovery, skipping"
+ continue;
+ fi
+
+ tested="$((tested + 1))"
+
+ log "Breaking $vf..."
+ if ! eeh_one_dev $vf ; then
+ log "$vf failed to recover"
+ continue;
+ fi
+
+ passed="$((passed + 1))"
+done
+
+eeh_disable_vfs
+
+if [ "$tested" == 0 ] ; then
+ echo "No VFs with EEH aware drivers found, skipping"
+ exit $KSELFTESTS_SKIP
+fi
+
+test "$failed" != 0
+exit $?;
diff --git a/tools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh b/tools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh
new file mode 100755
index 000000000000..8a4c147b9d43
--- /dev/null
+++ b/tools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh
@@ -0,0 +1,35 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+. ./eeh-functions.sh
+
+eeh_test_prep # NB: may exit
+
+vf_list="$(eeh_enable_vfs)";
+if $? != 0 ; then
+ log "No usable VFs found. Skipping EEH unaware VF test"
+ exit $KSELFTESTS_SKIP;
+fi
+
+log "Enabled VFs: $vf_list"
+
+failed=0
+for vf in $vf_list ; do
+ log "Testing $vf"
+
+ if eeh_can_recover $vf ; then
+ log "Driver for $vf supports error recovery. Unbinding..."
+ echo "$vf" > /sys/bus/pci/devices/$vf/driver/unbind
+ fi
+
+ log "Breaking $vf..."
+ if ! eeh_one_dev $vf ; then
+ log "$vf failed to recover"
+ failed="$((failed + 1))"
+ fi
+done
+
+eeh_disable_vfs
+
+test "$failed" != 0
+exit $?;
diff --git a/tools/testing/selftests/powerpc/eeh/settings b/tools/testing/selftests/powerpc/eeh/settings
new file mode 100644
index 000000000000..694d70710ff0
--- /dev/null
+++ b/tools/testing/selftests/powerpc/eeh/settings
@@ -0,0 +1 @@
+timeout=300
diff --git a/tools/testing/selftests/powerpc/include/basic_asm.h b/tools/testing/selftests/powerpc/include/basic_asm.h
index 886dc026fe7a..26cde8ea1f49 100644
--- a/tools/testing/selftests/powerpc/include/basic_asm.h
+++ b/tools/testing/selftests/powerpc/include/basic_asm.h
@@ -5,6 +5,16 @@
#include <ppc-asm.h>
#include <asm/unistd.h>
+#ifdef __powerpc64__
+#define PPC_LL ld
+#define PPC_STL std
+#define PPC_STLU stdu
+#else
+#define PPC_LL lwz
+#define PPC_STL stw
+#define PPC_STLU stwu
+#endif
+
#define LOAD_REG_IMMEDIATE(reg, expr) \
lis reg, (expr)@highest; \
ori reg, reg, (expr)@higher; \
@@ -14,16 +24,20 @@
/*
* Note: These macros assume that variables being stored on the stack are
- * doublewords, while this is usually the case it may not always be the
+ * sizeof(long), while this is usually the case it may not always be the
* case for each use case.
*/
+#ifdef __powerpc64__
+
+// ABIv2
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define STACK_FRAME_MIN_SIZE 32
#define STACK_FRAME_TOC_POS 24
#define __STACK_FRAME_PARAM(_param) (32 + ((_param)*8))
#define __STACK_FRAME_LOCAL(_num_params, _var_num) \
((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*8))
-#else
+
+#else // ABIv1 below
#define STACK_FRAME_MIN_SIZE 112
#define STACK_FRAME_TOC_POS 40
#define __STACK_FRAME_PARAM(i) (48 + ((i)*8))
@@ -34,7 +48,24 @@
*/
#define __STACK_FRAME_LOCAL(_num_params, _var_num) \
(112 + ((_var_num)*8))
-#endif
+
+
+#endif // ABIv2
+
+// Common 64-bit
+#define STACK_FRAME_LR_POS 16
+#define STACK_FRAME_CR_POS 8
+
+#else // 32-bit below
+
+#define STACK_FRAME_MIN_SIZE 16
+#define STACK_FRAME_LR_POS 4
+
+#define __STACK_FRAME_PARAM(_param) (STACK_FRAME_MIN_SIZE + ((_param)*4))
+#define __STACK_FRAME_LOCAL(_num_params, _var_num) \
+ ((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*4))
+
+#endif // __powerpc64__
/* Parameter x saved to the stack */
#define STACK_FRAME_PARAM(var) __STACK_FRAME_PARAM(var)
@@ -42,8 +73,6 @@
/* Local variable x saved to the stack after x parameters */
#define STACK_FRAME_LOCAL(num_params, var) \
__STACK_FRAME_LOCAL(num_params, var)
-#define STACK_FRAME_LR_POS 16
-#define STACK_FRAME_CR_POS 8
/*
* It is very important to note here that _extra is the extra amount of
@@ -56,19 +85,21 @@
* preprocessed incorrectly, hence r0.
*/
#define PUSH_BASIC_STACK(_extra) \
- mflr r0; \
- std r0, STACK_FRAME_LR_POS(%r1); \
- stdu %r1, -(_extra + STACK_FRAME_MIN_SIZE)(%r1); \
- mfcr r0; \
- stw r0, STACK_FRAME_CR_POS(%r1); \
- std %r2, STACK_FRAME_TOC_POS(%r1);
+ mflr r0; \
+ PPC_STL r0, STACK_FRAME_LR_POS(%r1); \
+ PPC_STLU %r1, -(((_extra + 15) & ~15) + STACK_FRAME_MIN_SIZE)(%r1);
#define POP_BASIC_STACK(_extra) \
- ld %r2, STACK_FRAME_TOC_POS(%r1); \
- lwz r0, STACK_FRAME_CR_POS(%r1); \
- mtcr r0; \
- addi %r1, %r1, (_extra + STACK_FRAME_MIN_SIZE); \
- ld r0, STACK_FRAME_LR_POS(%r1); \
+ addi %r1, %r1, (((_extra + 15) & ~15) + STACK_FRAME_MIN_SIZE); \
+ PPC_LL r0, STACK_FRAME_LR_POS(%r1); \
mtlr r0;
+.macro OP_REGS op, reg_width, start_reg, end_reg, base_reg, base_reg_offset=0, skip=0
+ .set i, \start_reg
+ .rept (\end_reg - \start_reg + 1)
+ \op i, (\reg_width * (i - \skip) + \base_reg_offset)(\base_reg)
+ .set i, i + 1
+ .endr
+.endm
+
#endif /* _SELFTESTS_POWERPC_BASIC_ASM_H */
diff --git a/tools/testing/selftests/powerpc/include/instructions.h b/tools/testing/selftests/powerpc/include/instructions.h
index f36061eb6f0f..4efa6314bd96 100644
--- a/tools/testing/selftests/powerpc/include/instructions.h
+++ b/tools/testing/selftests/powerpc/include/instructions.h
@@ -66,4 +66,81 @@ static inline int paste_last(void *i)
#define PPC_INST_PASTE __PASTE(0, 0, 0, 0)
#define PPC_INST_PASTE_LAST __PASTE(0, 0, 1, 1)
+/* This defines the prefixed load/store instructions */
+#ifdef __ASSEMBLY__
+# define stringify_in_c(...) __VA_ARGS__
+#else
+# define __stringify_in_c(...) #__VA_ARGS__
+# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
+#endif
+
+#define __PPC_RA(a) (((a) & 0x1f) << 16)
+#define __PPC_RS(s) (((s) & 0x1f) << 21)
+#define __PPC_RT(t) __PPC_RS(t)
+#define __PPC_PREFIX_R(r) (((r) & 0x1) << 20)
+
+#define PPC_PREFIX_MLS 0x06000000
+#define PPC_PREFIX_8LS 0x04000000
+
+#define PPC_INST_LBZ 0x88000000
+#define PPC_INST_LHZ 0xa0000000
+#define PPC_INST_LHA 0xa8000000
+#define PPC_INST_LWZ 0x80000000
+#define PPC_INST_STB 0x98000000
+#define PPC_INST_STH 0xb0000000
+#define PPC_INST_STW 0x90000000
+#define PPC_INST_STD 0xf8000000
+#define PPC_INST_LFS 0xc0000000
+#define PPC_INST_LFD 0xc8000000
+#define PPC_INST_STFS 0xd0000000
+#define PPC_INST_STFD 0xd8000000
+
+#define PREFIX_MLS(instr, t, a, r, d) stringify_in_c(.balign 64, , 4;) \
+ stringify_in_c(.long PPC_PREFIX_MLS | \
+ __PPC_PREFIX_R(r) | \
+ (((d) >> 16) & 0x3ffff);) \
+ stringify_in_c(.long (instr) | \
+ __PPC_RT(t) | \
+ __PPC_RA(a) | \
+ ((d) & 0xffff);\n)
+
+#define PREFIX_8LS(instr, t, a, r, d) stringify_in_c(.balign 64, , 4;) \
+ stringify_in_c(.long PPC_PREFIX_8LS | \
+ __PPC_PREFIX_R(r) | \
+ (((d) >> 16) & 0x3ffff);) \
+ stringify_in_c(.long (instr) | \
+ __PPC_RT(t) | \
+ __PPC_RA(a) | \
+ ((d) & 0xffff);\n)
+
+/* Prefixed Integer Load/Store instructions */
+#define PLBZ(t, a, r, d) PREFIX_MLS(PPC_INST_LBZ, t, a, r, d)
+#define PLHZ(t, a, r, d) PREFIX_MLS(PPC_INST_LHZ, t, a, r, d)
+#define PLHA(t, a, r, d) PREFIX_MLS(PPC_INST_LHA, t, a, r, d)
+#define PLWZ(t, a, r, d) PREFIX_MLS(PPC_INST_LWZ, t, a, r, d)
+#define PLWA(t, a, r, d) PREFIX_8LS(0xa4000000, t, a, r, d)
+#define PLD(t, a, r, d) PREFIX_8LS(0xe4000000, t, a, r, d)
+#define PLQ(t, a, r, d) PREFIX_8LS(0xe0000000, t, a, r, d)
+#define PSTB(s, a, r, d) PREFIX_MLS(PPC_INST_STB, s, a, r, d)
+#define PSTH(s, a, r, d) PREFIX_MLS(PPC_INST_STH, s, a, r, d)
+#define PSTW(s, a, r, d) PREFIX_MLS(PPC_INST_STW, s, a, r, d)
+#define PSTD(s, a, r, d) PREFIX_8LS(0xf4000000, s, a, r, d)
+#define PSTQ(s, a, r, d) PREFIX_8LS(0xf0000000, s, a, r, d)
+
+/* Prefixed Floating-Point Load/Store Instructions */
+#define PLFS(frt, a, r, d) PREFIX_MLS(PPC_INST_LFS, frt, a, r, d)
+#define PLFD(frt, a, r, d) PREFIX_MLS(PPC_INST_LFD, frt, a, r, d)
+#define PSTFS(frs, a, r, d) PREFIX_MLS(PPC_INST_STFS, frs, a, r, d)
+#define PSTFD(frs, a, r, d) PREFIX_MLS(PPC_INST_STFD, frs, a, r, d)
+
+/* Prefixed VSX Load/Store Instructions */
+#define PLXSD(vrt, a, r, d) PREFIX_8LS(0xa8000000, vrt, a, r, d)
+#define PLXSSP(vrt, a, r, d) PREFIX_8LS(0xac000000, vrt, a, r, d)
+#define PLXV0(s, a, r, d) PREFIX_8LS(0xc8000000, s, a, r, d)
+#define PLXV1(s, a, r, d) PREFIX_8LS(0xcc000000, s, a, r, d)
+#define PSTXSD(vrs, a, r, d) PREFIX_8LS(0xb8000000, vrs, a, r, d)
+#define PSTXSSP(vrs, a, r, d) PREFIX_8LS(0xbc000000, vrs, a, r, d)
+#define PSTXV0(s, a, r, d) PREFIX_8LS(0xd8000000, s, a, r, d)
+#define PSTXV1(s, a, r, d) PREFIX_8LS(0xdc000000, s, a, r, d)
+
#endif /* _SELFTESTS_POWERPC_INSTRUCTIONS_H */
diff --git a/tools/testing/selftests/powerpc/include/pkeys.h b/tools/testing/selftests/powerpc/include/pkeys.h
new file mode 100644
index 000000000000..3312cb1b058d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/include/pkeys.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020, Sandipan Das, IBM Corp.
+ */
+
+#ifndef _SELFTESTS_POWERPC_PKEYS_H
+#define _SELFTESTS_POWERPC_PKEYS_H
+
+#include <sys/mman.h>
+
+#include "reg.h"
+#include "utils.h"
+
+/*
+ * Older versions of libc use the Intel-specific access rights.
+ * Hence, override the definitions as they might be incorrect.
+ */
+#undef PKEY_DISABLE_ACCESS
+#define PKEY_DISABLE_ACCESS 0x3
+
+#undef PKEY_DISABLE_WRITE
+#define PKEY_DISABLE_WRITE 0x2
+
+#undef PKEY_DISABLE_EXECUTE
+#define PKEY_DISABLE_EXECUTE 0x4
+
+/* Older versions of libc do not not define this */
+#ifndef SEGV_PKUERR
+#define SEGV_PKUERR 4
+#endif
+
+#define SI_PKEY_OFFSET 0x20
+
+#define __NR_pkey_mprotect 386
+#define __NR_pkey_alloc 384
+#define __NR_pkey_free 385
+
+#define PKEY_BITS_PER_PKEY 2
+#define NR_PKEYS 32
+#define PKEY_BITS_MASK ((1UL << PKEY_BITS_PER_PKEY) - 1)
+
+inline unsigned long pkeyreg_get(void)
+{
+ return mfspr(SPRN_AMR);
+}
+
+inline void pkeyreg_set(unsigned long amr)
+{
+ set_amr(amr);
+}
+
+void pkey_set_rights(int pkey, unsigned long rights)
+{
+ unsigned long amr, shift;
+
+ shift = (NR_PKEYS - pkey - 1) * PKEY_BITS_PER_PKEY;
+ amr = pkeyreg_get();
+ amr &= ~(PKEY_BITS_MASK << shift);
+ amr |= (rights & PKEY_BITS_MASK) << shift;
+ pkeyreg_set(amr);
+}
+
+int sys_pkey_mprotect(void *addr, size_t len, int prot, int pkey)
+{
+ return syscall(__NR_pkey_mprotect, addr, len, prot, pkey);
+}
+
+int sys_pkey_alloc(unsigned long flags, unsigned long rights)
+{
+ return syscall(__NR_pkey_alloc, flags, rights);
+}
+
+int sys_pkey_free(int pkey)
+{
+ return syscall(__NR_pkey_free, pkey);
+}
+
+int pkeys_unsupported(void)
+{
+ bool hash_mmu = false;
+ int pkey;
+
+ /* Protection keys are currently supported on Hash MMU only */
+ FAIL_IF(using_hash_mmu(&hash_mmu));
+ SKIP_IF(!hash_mmu);
+
+ /* Check if the system call is supported */
+ pkey = sys_pkey_alloc(0, 0);
+ SKIP_IF(pkey < 0);
+ sys_pkey_free(pkey);
+
+ return 0;
+}
+
+int siginfo_pkey(siginfo_t *si)
+{
+ /*
+ * In older versions of libc, siginfo_t does not have si_pkey as
+ * a member.
+ */
+#ifdef si_pkey
+ return si->si_pkey;
+#else
+ return *((int *)(((char *) si) + SI_PKEY_OFFSET));
+#endif
+}
+
+#define pkey_rights(r) ({ \
+ static char buf[4] = "rwx"; \
+ unsigned int amr_bits; \
+ if ((r) & PKEY_DISABLE_EXECUTE) \
+ buf[2] = '-'; \
+ amr_bits = (r) & PKEY_BITS_MASK; \
+ if (amr_bits & PKEY_DISABLE_WRITE) \
+ buf[1] = '-'; \
+ if (amr_bits & PKEY_DISABLE_ACCESS & ~PKEY_DISABLE_WRITE) \
+ buf[0] = '-'; \
+ buf; \
+})
+
+unsigned long next_pkey_rights(unsigned long rights)
+{
+ if (rights == PKEY_DISABLE_ACCESS)
+ return PKEY_DISABLE_EXECUTE;
+ else if (rights == (PKEY_DISABLE_ACCESS | PKEY_DISABLE_EXECUTE))
+ return 0;
+
+ if ((rights & PKEY_BITS_MASK) == 0)
+ rights |= PKEY_DISABLE_WRITE;
+ else if ((rights & PKEY_BITS_MASK) == PKEY_DISABLE_WRITE)
+ rights |= PKEY_DISABLE_ACCESS;
+
+ return rights;
+}
+
+#endif /* _SELFTESTS_POWERPC_PKEYS_H */
diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h
index 022c5076b2c5..d5a547f72669 100644
--- a/tools/testing/selftests/powerpc/include/reg.h
+++ b/tools/testing/selftests/powerpc/include/reg.h
@@ -52,11 +52,24 @@
#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
#define SPRN_TAR 0x32f /* Target Address Register */
+#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF)
+#define SPRN_PVR 0x11F
+
+#define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */
+#define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */
+#define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */
+
#define SPRN_DSCR_PRIV 0x11 /* Privilege State DSCR */
#define SPRN_DSCR 0x03 /* Data Stream Control Register */
#define SPRN_PPR 896 /* Program Priority Register */
#define SPRN_AMR 13 /* Authority Mask Register - problem state */
+#define set_amr(v) asm volatile("isync;" \
+ "mtspr " __stringify(SPRN_AMR) ",%0;" \
+ "isync" : \
+ : "r" ((unsigned long)(v)) \
+ : "memory")
+
/* TEXASR register bits */
#define TEXASR_FC 0xFE00000000000000
#define TEXASR_FP 0x0100000000000000
@@ -78,6 +91,7 @@
#define TEXASR_ROT 0x0000000002000000
/* MSR register bits */
+#define MSR_HV (1ul << 60) /* Hypervisor state */
#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */
#define MSR_TS_T_LG 34 /* Trans Mem state: Active */
@@ -113,45 +127,44 @@
"li 30, %[" #_asm_symbol_name_immed "];" \
"li 31, %[" #_asm_symbol_name_immed "];"
-#define ASM_LOAD_FPR_SINGLE_PRECISION(_asm_symbol_name_addr) \
- "lfs 0, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 1, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 2, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 3, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 4, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 5, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 6, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 7, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 8, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 9, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 10, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 11, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 12, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 13, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 14, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 15, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 16, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 17, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 18, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 19, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 20, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 21, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 22, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 23, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 24, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 25, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 26, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 27, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 28, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 29, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 30, 0(%[" #_asm_symbol_name_addr "]);" \
- "lfs 31, 0(%[" #_asm_symbol_name_addr "]);"
+#define ASM_LOAD_FPR(_asm_symbol_name_addr) \
+ "lfd 0, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 1, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 2, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 3, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 4, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 5, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 6, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 7, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 8, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 9, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 10, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 11, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 12, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 13, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 14, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 15, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 16, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 17, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 18, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 19, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 20, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 21, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 22, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 23, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 24, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 25, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 26, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 27, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 28, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 29, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 30, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfd 31, 0(%[" #_asm_symbol_name_addr "]);"
#ifndef __ASSEMBLER__
void store_gpr(unsigned long *addr);
void load_gpr(unsigned long *addr);
-void load_fpr_single_precision(float *addr);
-void store_fpr_single_precision(float *addr);
+void store_fpr(double *addr);
#endif /* end of __ASSEMBLER__ */
#endif /* _SELFTESTS_POWERPC_REG_H */
diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index e089a0c30d9a..e222a5858450 100644
--- a/tools/testing/selftests/powerpc/include/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
@@ -12,6 +12,7 @@
#include <stdbool.h>
#include <linux/auxvec.h>
#include <linux/perf_event.h>
+#include <asm/cputable.h>
#include "reg.h"
/* Avoid headaches with PRI?64 - just use %ll? always */
@@ -35,13 +36,27 @@ int pick_online_cpu(void);
int read_debugfs_file(char *debugfs_file, int *result);
int write_debugfs_file(char *debugfs_file, int result);
int read_sysfs_file(char *debugfs_file, char *result, size_t result_size);
-void set_dscr(unsigned long val);
int perf_event_open_counter(unsigned int type,
unsigned long config, int group_fd);
int perf_event_enable(int fd);
int perf_event_disable(int fd);
int perf_event_reset(int fd);
+struct perf_event_read {
+ __u64 nr;
+ __u64 l1d_misses;
+};
+
+#if !defined(__GLIBC_PREREQ) || !__GLIBC_PREREQ(2, 30)
+#include <unistd.h>
+#include <sys/syscall.h>
+
+static inline pid_t gettid(void)
+{
+ return syscall(SYS_gettid);
+}
+#endif
+
static inline bool have_hwcap(unsigned long ftr)
{
return ((unsigned long)get_auxv_entry(AT_HWCAP) & ftr) == ftr;
@@ -59,7 +74,18 @@ static inline bool have_hwcap2(unsigned long ftr2)
}
#endif
+static inline char *auxv_base_platform(void)
+{
+ return ((char *)get_auxv_entry(AT_BASE_PLATFORM));
+}
+
+static inline char *auxv_platform(void)
+{
+ return ((char *)get_auxv_entry(AT_PLATFORM));
+}
+
bool is_ppc64le(void);
+int using_hash_mmu(bool *using_hash);
/* Yes, this is evil */
#define FAIL_IF(x) \
@@ -71,6 +97,15 @@ do { \
} \
} while (0)
+#define FAIL_IF_EXIT(x) \
+do { \
+ if ((x)) { \
+ fprintf(stderr, \
+ "[FAIL] Test FAILED on line %d\n", __LINE__); \
+ _exit(1); \
+ } \
+} while (0)
+
/* The test harness uses this, yes it's gross */
#define MAGIC_SKIP_RETURN_VALUE 99
@@ -96,11 +131,25 @@ do { \
#define _str(s) #s
#define str(s) _str(s)
+#define sigsafe_err(msg) ({ \
+ ssize_t nbytes __attribute__((unused)); \
+ nbytes = write(STDERR_FILENO, msg, strlen(msg)); })
+
/* POWER9 feature */
#ifndef PPC_FEATURE2_ARCH_3_00
#define PPC_FEATURE2_ARCH_3_00 0x00800000
#endif
+/* POWER10 feature */
+#ifndef PPC_FEATURE2_ARCH_3_1
+#define PPC_FEATURE2_ARCH_3_1 0x00040000
+#endif
+
+/* POWER10 features */
+#ifndef PPC_FEATURE2_MMA
+#define PPC_FEATURE2_MMA 0x00020000
+#endif
+
#if defined(__powerpc64__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP]
#define UCONTEXT_MSR(UC) (UC)->uc_mcontext.gp_regs[PT_MSR]
diff --git a/tools/testing/selftests/powerpc/lib/reg.S b/tools/testing/selftests/powerpc/lib/reg.S
index 9304ea7d59b9..6d1af4a9a6b4 100644
--- a/tools/testing/selftests/powerpc/lib/reg.S
+++ b/tools/testing/selftests/powerpc/lib/reg.S
@@ -53,79 +53,42 @@ FUNC_START(store_gpr)
blr
FUNC_END(store_gpr)
-/* Single Precision Float - float buf[32] */
-FUNC_START(load_fpr_single_precision)
- lfs 0, 0*4(3)
- lfs 1, 1*4(3)
- lfs 2, 2*4(3)
- lfs 3, 3*4(3)
- lfs 4, 4*4(3)
- lfs 5, 5*4(3)
- lfs 6, 6*4(3)
- lfs 7, 7*4(3)
- lfs 8, 8*4(3)
- lfs 9, 9*4(3)
- lfs 10, 10*4(3)
- lfs 11, 11*4(3)
- lfs 12, 12*4(3)
- lfs 13, 13*4(3)
- lfs 14, 14*4(3)
- lfs 15, 15*4(3)
- lfs 16, 16*4(3)
- lfs 17, 17*4(3)
- lfs 18, 18*4(3)
- lfs 19, 19*4(3)
- lfs 20, 20*4(3)
- lfs 21, 21*4(3)
- lfs 22, 22*4(3)
- lfs 23, 23*4(3)
- lfs 24, 24*4(3)
- lfs 25, 25*4(3)
- lfs 26, 26*4(3)
- lfs 27, 27*4(3)
- lfs 28, 28*4(3)
- lfs 29, 29*4(3)
- lfs 30, 30*4(3)
- lfs 31, 31*4(3)
+/* Double Precision Float - double buf[32] */
+FUNC_START(store_fpr)
+ stfd 0, 0*8(3)
+ stfd 1, 1*8(3)
+ stfd 2, 2*8(3)
+ stfd 3, 3*8(3)
+ stfd 4, 4*8(3)
+ stfd 5, 5*8(3)
+ stfd 6, 6*8(3)
+ stfd 7, 7*8(3)
+ stfd 8, 8*8(3)
+ stfd 9, 9*8(3)
+ stfd 10, 10*8(3)
+ stfd 11, 11*8(3)
+ stfd 12, 12*8(3)
+ stfd 13, 13*8(3)
+ stfd 14, 14*8(3)
+ stfd 15, 15*8(3)
+ stfd 16, 16*8(3)
+ stfd 17, 17*8(3)
+ stfd 18, 18*8(3)
+ stfd 19, 19*8(3)
+ stfd 20, 20*8(3)
+ stfd 21, 21*8(3)
+ stfd 22, 22*8(3)
+ stfd 23, 23*8(3)
+ stfd 24, 24*8(3)
+ stfd 25, 25*8(3)
+ stfd 26, 26*8(3)
+ stfd 27, 27*8(3)
+ stfd 28, 28*8(3)
+ stfd 29, 29*8(3)
+ stfd 30, 30*8(3)
+ stfd 31, 31*8(3)
blr
-FUNC_END(load_fpr_single_precision)
-
-/* Single Precision Float - float buf[32] */
-FUNC_START(store_fpr_single_precision)
- stfs 0, 0*4(3)
- stfs 1, 1*4(3)
- stfs 2, 2*4(3)
- stfs 3, 3*4(3)
- stfs 4, 4*4(3)
- stfs 5, 5*4(3)
- stfs 6, 6*4(3)
- stfs 7, 7*4(3)
- stfs 8, 8*4(3)
- stfs 9, 9*4(3)
- stfs 10, 10*4(3)
- stfs 11, 11*4(3)
- stfs 12, 12*4(3)
- stfs 13, 13*4(3)
- stfs 14, 14*4(3)
- stfs 15, 15*4(3)
- stfs 16, 16*4(3)
- stfs 17, 17*4(3)
- stfs 18, 18*4(3)
- stfs 19, 19*4(3)
- stfs 20, 20*4(3)
- stfs 21, 21*4(3)
- stfs 22, 22*4(3)
- stfs 23, 23*4(3)
- stfs 24, 24*4(3)
- stfs 25, 25*4(3)
- stfs 26, 26*4(3)
- stfs 27, 27*4(3)
- stfs 28, 28*4(3)
- stfs 29, 29*4(3)
- stfs 30, 30*4(3)
- stfs 31, 31*4(3)
- blr
-FUNC_END(store_fpr_single_precision)
+FUNC_END(store_fpr)
/* VMX/VSX registers - unsigned long buf[128] */
FUNC_START(loadvsx)
diff --git a/tools/testing/selftests/powerpc/math/.gitignore b/tools/testing/selftests/powerpc/math/.gitignore
index 50ded63e25b7..07b4893ef7af 100644
--- a/tools/testing/selftests/powerpc/math/.gitignore
+++ b/tools/testing/selftests/powerpc/math/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
fpu_syscall
vmx_syscall
fpu_preempt
@@ -5,3 +6,5 @@ vmx_preempt
fpu_signal
vmx_signal
vsx_preempt
+fpu_denormal
+mma
diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile
index 11a10d7a2bbd..3948f7c510aa 100644
--- a/tools/testing/selftests/powerpc/math/Makefile
+++ b/tools/testing/selftests/powerpc/math/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt
+TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal fpu_denormal vmx_syscall vmx_preempt vmx_signal vsx_preempt mma
top_srcdir = ../../../../..
include ../../lib.mk
@@ -11,9 +11,11 @@ $(OUTPUT)/fpu_syscall: fpu_asm.S
$(OUTPUT)/fpu_preempt: fpu_asm.S
$(OUTPUT)/fpu_signal: fpu_asm.S
-$(OUTPUT)/vmx_syscall: vmx_asm.S
-$(OUTPUT)/vmx_preempt: vmx_asm.S
-$(OUTPUT)/vmx_signal: vmx_asm.S
+$(OUTPUT)/vmx_syscall: vmx_asm.S ../utils.c
+$(OUTPUT)/vmx_preempt: vmx_asm.S ../utils.c
+$(OUTPUT)/vmx_signal: vmx_asm.S ../utils.c
$(OUTPUT)/vsx_preempt: CFLAGS += -mvsx
-$(OUTPUT)/vsx_preempt: vsx_asm.S
+$(OUTPUT)/vsx_preempt: vsx_asm.S ../utils.c
+
+$(OUTPUT)/mma: mma.c mma.S ../utils.c
diff --git a/tools/testing/selftests/powerpc/math/fpu_denormal.c b/tools/testing/selftests/powerpc/math/fpu_denormal.c
new file mode 100644
index 000000000000..5f96682abaa8
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/fpu_denormal.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * This test attempts to cause a FP denormal exception on POWER8 CPUs. Unfortunately
+ * if the denormal handler is not configured or working properly, this can cause a bad
+ * crash in kernel mode when the kernel tries to save FP registers when the process
+ * exits.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "utils.h"
+
+static int test_denormal_fpu(void)
+{
+ unsigned int m32;
+ unsigned long m64;
+ volatile float f;
+ volatile double d;
+
+ /* try to induce lfs <denormal> ; stfd */
+
+ m32 = 0x00715fcf; /* random denormal */
+ memcpy((float *)&f, &m32, sizeof(f));
+ d = f;
+ memcpy(&m64, (double *)&d, sizeof(d));
+
+ FAIL_IF((long)(m64 != 0x380c57f3c0000000)); /* renormalised value */
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(test_denormal_fpu, "fpu_denormal");
+}
diff --git a/tools/testing/selftests/powerpc/math/mma.S b/tools/testing/selftests/powerpc/math/mma.S
new file mode 100644
index 000000000000..61cc88b1b26b
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/mma.S
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Test basic matrix multiply assist (MMA) functionality if available.
+ *
+ * Copyright 2020, Alistair Popple, IBM Corp.
+ */
+ .global test_mma
+test_mma:
+ /* Load accumulator via VSX registers from image passed in r3 */
+ lxvh8x 4,0,3
+ lxvh8x 5,0,4
+
+ /* Clear and prime the accumulator (xxsetaccz) */
+ .long 0x7c030162
+
+ /* Prime the accumulator with MMA VSX move to accumulator
+ * X-form (xxmtacc) (not needed due to above zeroing) */
+ //.long 0x7c010162
+
+ /* xvi16ger2s */
+ .long 0xec042958
+
+ /* Deprime the accumulator - xxmfacc 0 */
+ .long 0x7c000162
+
+ /* Store result in image passed in r5 */
+ stxvw4x 0,0,5
+ addi 5,5,16
+ stxvw4x 1,0,5
+ addi 5,5,16
+ stxvw4x 2,0,5
+ addi 5,5,16
+ stxvw4x 3,0,5
+ addi 5,5,16
+
+ blr
diff --git a/tools/testing/selftests/powerpc/math/mma.c b/tools/testing/selftests/powerpc/math/mma.c
new file mode 100644
index 000000000000..3a71808c993f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/mma.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test basic matrix multiply assist (MMA) functionality if available.
+ *
+ * Copyright 2020, Alistair Popple, IBM Corp.
+ */
+#include <stdio.h>
+#include <stdint.h>
+
+#include "utils.h"
+
+extern void test_mma(uint16_t (*)[8], uint16_t (*)[8], uint32_t (*)[4*4]);
+
+static int mma(void)
+{
+ int i;
+ int rc = 0;
+ uint16_t x[] = {1, 0, 2, 0, 3, 0, 4, 0};
+ uint16_t y[] = {1, 0, 2, 0, 3, 0, 4, 0};
+ uint32_t z[4*4];
+ uint32_t exp[4*4] = {1, 2, 3, 4,
+ 2, 4, 6, 8,
+ 3, 6, 9, 12,
+ 4, 8, 12, 16};
+
+ SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_ARCH_3_1), "Need ISAv3.1");
+ SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_MMA), "Need MMA");
+
+ test_mma(&x, &y, &z);
+
+ for (i = 0; i < 16; i++) {
+ printf("MMA[%d] = %d ", i, z[i]);
+
+ if (z[i] == exp[i]) {
+ printf(" (Correct)\n");
+ } else {
+ printf(" (Incorrect)\n");
+ rc = 1;
+ }
+ }
+
+ return rc;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(mma, "mma");
+}
diff --git a/tools/testing/selftests/powerpc/math/vmx_preempt.c b/tools/testing/selftests/powerpc/math/vmx_preempt.c
index 2e059f154e77..6761d6ce30ec 100644
--- a/tools/testing/selftests/powerpc/math/vmx_preempt.c
+++ b/tools/testing/selftests/powerpc/math/vmx_preempt.c
@@ -57,6 +57,9 @@ int test_preempt_vmx(void)
int i, rc, threads;
pthread_t *tids;
+ // vcmpequd used in vmx_asm.S is v2.07
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
+
threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR;
tids = malloc(threads * sizeof(pthread_t));
FAIL_IF(!tids);
diff --git a/tools/testing/selftests/powerpc/math/vmx_signal.c b/tools/testing/selftests/powerpc/math/vmx_signal.c
index 785a48e0976f..b340a5c4e79d 100644
--- a/tools/testing/selftests/powerpc/math/vmx_signal.c
+++ b/tools/testing/selftests/powerpc/math/vmx_signal.c
@@ -96,6 +96,9 @@ int test_signal_vmx(void)
void *rc_p;
pthread_t *tids;
+ // vcmpequd used in vmx_asm.S is v2.07
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
+
threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR;
tids = malloc(threads * sizeof(pthread_t));
FAIL_IF(!tids);
diff --git a/tools/testing/selftests/powerpc/math/vmx_syscall.c b/tools/testing/selftests/powerpc/math/vmx_syscall.c
index 9ee293cc868e..03c78dfe3444 100644
--- a/tools/testing/selftests/powerpc/math/vmx_syscall.c
+++ b/tools/testing/selftests/powerpc/math/vmx_syscall.c
@@ -49,9 +49,14 @@ int test_vmx_syscall(void)
* Setup an environment with much context switching
*/
pid_t pid2;
- pid_t pid = fork();
+ pid_t pid;
int ret;
int child_ret;
+
+ // vcmpequd used in vmx_asm.S is v2.07
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
+
+ pid = fork();
FAIL_IF(pid == -1);
pid2 = fork();
diff --git a/tools/testing/selftests/powerpc/math/vsx_preempt.c b/tools/testing/selftests/powerpc/math/vsx_preempt.c
index 63de9c6e2cd3..d1601bb889d4 100644
--- a/tools/testing/selftests/powerpc/math/vsx_preempt.c
+++ b/tools/testing/selftests/powerpc/math/vsx_preempt.c
@@ -92,6 +92,8 @@ int test_preempt_vsx(void)
int i, rc, threads;
pthread_t *tids;
+ SKIP_IF(!have_hwcap(PPC_FEATURE_HAS_VSX));
+
threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR;
tids = malloc(threads * sizeof(pthread_t));
FAIL_IF(!tids);
diff --git a/tools/testing/selftests/powerpc/mce/.gitignore b/tools/testing/selftests/powerpc/mce/.gitignore
new file mode 100644
index 000000000000..f5921462a495
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mce/.gitignore
@@ -0,0 +1 @@
+inject-ra-err
diff --git a/tools/testing/selftests/powerpc/mce/Makefile b/tools/testing/selftests/powerpc/mce/Makefile
new file mode 100644
index 000000000000..2424513982d9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mce/Makefile
@@ -0,0 +1,7 @@
+#SPDX-License-Identifier: GPL-2.0-or-later
+
+TEST_GEN_PROGS := inject-ra-err
+
+include ../../lib.mk
+
+$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/mce/inject-ra-err.c b/tools/testing/selftests/powerpc/mce/inject-ra-err.c
new file mode 100644
index 000000000000..94323c34d9a6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mce/inject-ra-err.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "vas-api.h"
+#include "utils.h"
+
+static bool faulted;
+
+static void sigbus_handler(int n, siginfo_t *info, void *ctxt_v)
+{
+ ucontext_t *ctxt = (ucontext_t *)ctxt_v;
+ struct pt_regs *regs = ctxt->uc_mcontext.regs;
+
+ faulted = true;
+ regs->nip += 4;
+}
+
+static int test_ra_error(void)
+{
+ struct vas_tx_win_open_attr attr;
+ int fd, *paste_addr;
+ char *devname = "/dev/crypto/nx-gzip";
+ struct sigaction act = {
+ .sa_sigaction = sigbus_handler,
+ .sa_flags = SA_SIGINFO,
+ };
+
+ memset(&attr, 0, sizeof(attr));
+ attr.version = 1;
+ attr.vas_id = 0;
+
+ SKIP_IF(access(devname, F_OK));
+
+ fd = open(devname, O_RDWR);
+ FAIL_IF(fd < 0);
+ FAIL_IF(ioctl(fd, VAS_TX_WIN_OPEN, &attr) < 0);
+ FAIL_IF(sigaction(SIGBUS, &act, NULL) != 0);
+
+ paste_addr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0ULL);
+
+ /* The following assignment triggers exception */
+ mb();
+ *paste_addr = 1;
+ mb();
+
+ FAIL_IF(!faulted);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_ra_error, "inject-ra-err");
+}
+
diff --git a/tools/testing/selftests/powerpc/mce/vas-api.h b/tools/testing/selftests/powerpc/mce/vas-api.h
new file mode 120000
index 000000000000..1455c1bcd351
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mce/vas-api.h
@@ -0,0 +1 @@
+../../../../../arch/powerpc/include/uapi/asm/vas-api.h \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index 0ebeaea22641..4e1a294eec35 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
hugetlb_vs_thp_test
subpage_prot
tempfile
@@ -6,3 +7,9 @@ segv_errors
wild_bctr
large_vm_fork_separation
bad_accesses
+tlbie_test
+pkey_exec_prot
+pkey_siginfo
+stack_expansion_ldst
+stack_expansion_signal
+large_vm_gpr_corruption
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index b9103c4bb414..19dd0b2ea397 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -3,22 +3,36 @@ noarg:
$(MAKE) -C ../
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
- large_vm_fork_separation bad_accesses
+ large_vm_fork_separation bad_accesses exec_prot pkey_exec_prot \
+ pkey_siginfo stack_expansion_signal stack_expansion_ldst \
+ large_vm_gpr_corruption
+TEST_PROGS := stress_code_patching.sh
+
TEST_GEN_PROGS_EXTENDED := tlbie_test
TEST_GEN_FILES := tempfile
top_srcdir = ../../../../..
include ../../lib.mk
-$(TEST_GEN_PROGS): ../harness.c
+$(TEST_GEN_PROGS): ../harness.c ../utils.c
$(OUTPUT)/prot_sao: ../utils.c
$(OUTPUT)/wild_bctr: CFLAGS += -m64
$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
+$(OUTPUT)/large_vm_gpr_corruption: CFLAGS += -m64
$(OUTPUT)/bad_accesses: CFLAGS += -m64
+$(OUTPUT)/exec_prot: CFLAGS += -m64
+$(OUTPUT)/pkey_exec_prot: CFLAGS += -m64
+$(OUTPUT)/pkey_siginfo: CFLAGS += -m64
+
+$(OUTPUT)/stack_expansion_signal: ../utils.c ../pmu/lib.c
+
+$(OUTPUT)/stack_expansion_ldst: CFLAGS += -fno-stack-protector
+$(OUTPUT)/stack_expansion_ldst: ../utils.c
$(OUTPUT)/tempfile:
dd if=/dev/zero of=$@ bs=64k count=1
$(OUTPUT)/tlbie_test: LDLIBS += -lpthread
+$(OUTPUT)/pkey_siginfo: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/powerpc/mm/bad_accesses.c b/tools/testing/selftests/powerpc/mm/bad_accesses.c
index adc465f499ef..65d2148b05dc 100644
--- a/tools/testing/selftests/powerpc/mm/bad_accesses.c
+++ b/tools/testing/selftests/powerpc/mm/bad_accesses.c
@@ -38,7 +38,7 @@ static void segv_handler(int n, siginfo_t *info, void *ctxt_v)
int bad_access(char *p, bool write)
{
- char x;
+ char x = 0;
fault_code = 0;
fault_addr = 0;
@@ -64,34 +64,6 @@ int bad_access(char *p, bool write)
return 0;
}
-static int using_hash_mmu(bool *using_hash)
-{
- char line[128];
- FILE *f;
- int rc;
-
- f = fopen("/proc/cpuinfo", "r");
- FAIL_IF(!f);
-
- rc = 0;
- while (fgets(line, sizeof(line), f) != NULL) {
- if (strcmp(line, "MMU : Hash\n") == 0) {
- *using_hash = true;
- goto out;
- }
-
- if (strcmp(line, "MMU : Radix\n") == 0) {
- *using_hash = false;
- goto out;
- }
- }
-
- rc = -1;
-out:
- fclose(f);
- return rc;
-}
-
static int test(void)
{
unsigned long i, j, addr, region_shift, page_shift, page_size;
@@ -167,5 +139,6 @@ static int test(void)
int main(void)
{
+ test_harness_set_timeout(300);
return test_harness(test, "bad_accesses");
}
diff --git a/tools/testing/selftests/powerpc/mm/exec_prot.c b/tools/testing/selftests/powerpc/mm/exec_prot.c
new file mode 100644
index 000000000000..db75b2225de1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/exec_prot.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2022, Nicholas Miehlbradt, IBM Corporation
+ * based on pkey_exec_prot.c
+ *
+ * Test if applying execute protection on pages works as expected.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include "pkeys.h"
+
+
+#define PPC_INST_NOP 0x60000000
+#define PPC_INST_TRAP 0x7fe00008
+#define PPC_INST_BLR 0x4e800020
+
+static volatile sig_atomic_t fault_code;
+static volatile sig_atomic_t remaining_faults;
+static volatile unsigned int *fault_addr;
+static unsigned long pgsize, numinsns;
+static unsigned int *insns;
+static bool pkeys_supported;
+
+static bool is_fault_expected(int fault_code)
+{
+ if (fault_code == SEGV_ACCERR)
+ return true;
+
+ /* Assume any pkey error is fine since pkey_exec_prot test covers them */
+ if (fault_code == SEGV_PKUERR && pkeys_supported)
+ return true;
+
+ return false;
+}
+
+static void trap_handler(int signum, siginfo_t *sinfo, void *ctx)
+{
+ /* Check if this fault originated from the expected address */
+ if (sinfo->si_addr != (void *)fault_addr)
+ sigsafe_err("got a fault for an unexpected address\n");
+
+ _exit(1);
+}
+
+static void segv_handler(int signum, siginfo_t *sinfo, void *ctx)
+{
+ fault_code = sinfo->si_code;
+
+ /* Check if this fault originated from the expected address */
+ if (sinfo->si_addr != (void *)fault_addr) {
+ sigsafe_err("got a fault for an unexpected address\n");
+ _exit(1);
+ }
+
+ /* Check if too many faults have occurred for a single test case */
+ if (!remaining_faults) {
+ sigsafe_err("got too many faults for the same address\n");
+ _exit(1);
+ }
+
+
+ /* Restore permissions in order to continue */
+ if (is_fault_expected(fault_code)) {
+ if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE | PROT_EXEC)) {
+ sigsafe_err("failed to set access permissions\n");
+ _exit(1);
+ }
+ } else {
+ sigsafe_err("got a fault with an unexpected code\n");
+ _exit(1);
+ }
+
+ remaining_faults--;
+}
+
+static int check_exec_fault(int rights)
+{
+ /*
+ * Jump to the executable region.
+ *
+ * The first iteration also checks if the overwrite of the
+ * first instruction word from a trap to a no-op succeeded.
+ */
+ fault_code = -1;
+ remaining_faults = 0;
+ if (!(rights & PROT_EXEC))
+ remaining_faults = 1;
+
+ FAIL_IF(mprotect(insns, pgsize, rights) != 0);
+ asm volatile("mtctr %0; bctrl" : : "r"(insns));
+
+ FAIL_IF(remaining_faults != 0);
+ if (!(rights & PROT_EXEC))
+ FAIL_IF(!is_fault_expected(fault_code));
+
+ return 0;
+}
+
+static int test(void)
+{
+ struct sigaction segv_act, trap_act;
+ int i;
+
+ /* Skip the test if the CPU doesn't support Radix */
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00));
+
+ /* Check if pkeys are supported */
+ pkeys_supported = pkeys_unsupported() == 0;
+
+ /* Setup SIGSEGV handler */
+ segv_act.sa_handler = 0;
+ segv_act.sa_sigaction = segv_handler;
+ FAIL_IF(sigprocmask(SIG_SETMASK, 0, &segv_act.sa_mask) != 0);
+ segv_act.sa_flags = SA_SIGINFO;
+ segv_act.sa_restorer = 0;
+ FAIL_IF(sigaction(SIGSEGV, &segv_act, NULL) != 0);
+
+ /* Setup SIGTRAP handler */
+ trap_act.sa_handler = 0;
+ trap_act.sa_sigaction = trap_handler;
+ FAIL_IF(sigprocmask(SIG_SETMASK, 0, &trap_act.sa_mask) != 0);
+ trap_act.sa_flags = SA_SIGINFO;
+ trap_act.sa_restorer = 0;
+ FAIL_IF(sigaction(SIGTRAP, &trap_act, NULL) != 0);
+
+ /* Setup executable region */
+ pgsize = getpagesize();
+ numinsns = pgsize / sizeof(unsigned int);
+ insns = (unsigned int *)mmap(NULL, pgsize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ FAIL_IF(insns == MAP_FAILED);
+
+ /* Write the instruction words */
+ for (i = 1; i < numinsns - 1; i++)
+ insns[i] = PPC_INST_NOP;
+
+ /*
+ * Set the first instruction as an unconditional trap. If
+ * the last write to this address succeeds, this should
+ * get overwritten by a no-op.
+ */
+ insns[0] = PPC_INST_TRAP;
+
+ /*
+ * Later, to jump to the executable region, we use a branch
+ * and link instruction (bctrl) which sets the return address
+ * automatically in LR. Use that to return back.
+ */
+ insns[numinsns - 1] = PPC_INST_BLR;
+
+ /*
+ * Pick the first instruction's address from the executable
+ * region.
+ */
+ fault_addr = insns;
+
+ /*
+ * Read an instruction word from the address when the page
+ * is execute only. This should generate an access fault.
+ */
+ fault_code = -1;
+ remaining_faults = 1;
+ printf("Testing read on --x, should fault...");
+ FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0);
+ i = *fault_addr;
+ FAIL_IF(remaining_faults != 0 || !is_fault_expected(fault_code));
+ printf("ok!\n");
+
+ /*
+ * Write an instruction word to the address when the page
+ * execute only. This should also generate an access fault.
+ */
+ fault_code = -1;
+ remaining_faults = 1;
+ printf("Testing write on --x, should fault...");
+ FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0);
+ *fault_addr = PPC_INST_NOP;
+ FAIL_IF(remaining_faults != 0 || !is_fault_expected(fault_code));
+ printf("ok!\n");
+
+ printf("Testing exec on ---, should fault...");
+ FAIL_IF(check_exec_fault(PROT_NONE));
+ printf("ok!\n");
+
+ printf("Testing exec on r--, should fault...");
+ FAIL_IF(check_exec_fault(PROT_READ));
+ printf("ok!\n");
+
+ printf("Testing exec on -w-, should fault...");
+ FAIL_IF(check_exec_fault(PROT_WRITE));
+ printf("ok!\n");
+
+ printf("Testing exec on rw-, should fault...");
+ FAIL_IF(check_exec_fault(PROT_READ | PROT_WRITE));
+ printf("ok!\n");
+
+ printf("Testing exec on --x, should succeed...");
+ FAIL_IF(check_exec_fault(PROT_EXEC));
+ printf("ok!\n");
+
+ printf("Testing exec on r-x, should succeed...");
+ FAIL_IF(check_exec_fault(PROT_READ | PROT_EXEC));
+ printf("ok!\n");
+
+ printf("Testing exec on -wx, should succeed...");
+ FAIL_IF(check_exec_fault(PROT_WRITE | PROT_EXEC));
+ printf("ok!\n");
+
+ printf("Testing exec on rwx, should succeed...");
+ FAIL_IF(check_exec_fault(PROT_READ | PROT_WRITE | PROT_EXEC));
+ printf("ok!\n");
+
+ /* Cleanup */
+ FAIL_IF(munmap((void *)insns, pgsize));
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test, "exec_prot");
+}
diff --git a/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c
new file mode 100644
index 000000000000..7da515f1da72
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2022, Michael Ellerman, IBM Corp.
+//
+// Test that the 4PB address space SLB handling doesn't corrupt userspace registers
+// (r9-r13) due to a SLB fault while saving the PPR.
+//
+// The bug was introduced in f384796c4 ("powerpc/mm: Add support for handling > 512TB
+// address in SLB miss") and fixed in 4c2de74cc869 ("powerpc/64: Interrupts save PPR on
+// stack rather than thread_struct").
+//
+// To hit the bug requires the task struct and kernel stack to be in different segments.
+// Usually that requires more than 1TB of RAM, or if that's not practical, boot the kernel
+// with "disable_1tb_segments".
+//
+// The test works by creating mappings above 512TB, to trigger the large address space
+// support. It creates 64 mappings, double the size of the SLB, to cause SLB faults on
+// each access (assuming naive replacement). It then loops over those mappings touching
+// each, and checks that r9-r13 aren't corrupted.
+//
+// It then forks another child and tries again, because a new child process will get a new
+// kernel stack and thread struct allocated, which may be more optimally placed to trigger
+// the bug. It would probably be better to leave the previous child processes hanging
+// around, so that kernel stack & thread struct allocations are not reused, but that would
+// amount to a 30 second fork bomb. The current design reliably triggers the bug on
+// unpatched kernels.
+
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#ifndef MAP_FIXED_NOREPLACE
+#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB
+#endif
+
+#define BASE_ADDRESS (1ul << 50) // 1PB
+#define STRIDE (2ul << 40) // 2TB
+#define SLB_SIZE 32
+#define NR_MAPPINGS (SLB_SIZE * 2)
+
+static volatile sig_atomic_t signaled;
+
+static void signal_handler(int sig)
+{
+ signaled = 1;
+}
+
+#define CHECK_REG(_reg) \
+ if (_reg != _reg##_orig) { \
+ printf(str(_reg) " corrupted! Expected 0x%lx != 0x%lx\n", _reg##_orig, \
+ _reg); \
+ _exit(1); \
+ }
+
+static int touch_mappings(void)
+{
+ unsigned long r9_orig, r10_orig, r11_orig, r12_orig, r13_orig;
+ unsigned long r9, r10, r11, r12, r13;
+ unsigned long addr, *p;
+ int i;
+
+ for (i = 0; i < NR_MAPPINGS; i++) {
+ addr = BASE_ADDRESS + (i * STRIDE);
+ p = (unsigned long *)addr;
+
+ asm volatile("mr %0, %%r9 ;" // Read original GPR values
+ "mr %1, %%r10 ;"
+ "mr %2, %%r11 ;"
+ "mr %3, %%r12 ;"
+ "mr %4, %%r13 ;"
+ "std %10, 0(%11) ;" // Trigger SLB fault
+ "mr %5, %%r9 ;" // Save possibly corrupted values
+ "mr %6, %%r10 ;"
+ "mr %7, %%r11 ;"
+ "mr %8, %%r12 ;"
+ "mr %9, %%r13 ;"
+ "mr %%r9, %0 ;" // Restore original values
+ "mr %%r10, %1 ;"
+ "mr %%r11, %2 ;"
+ "mr %%r12, %3 ;"
+ "mr %%r13, %4 ;"
+ : "=&b"(r9_orig), "=&b"(r10_orig), "=&b"(r11_orig),
+ "=&b"(r12_orig), "=&b"(r13_orig), "=&b"(r9), "=&b"(r10),
+ "=&b"(r11), "=&b"(r12), "=&b"(r13)
+ : "b"(i), "b"(p)
+ : "r9", "r10", "r11", "r12", "r13");
+
+ CHECK_REG(r9);
+ CHECK_REG(r10);
+ CHECK_REG(r11);
+ CHECK_REG(r12);
+ CHECK_REG(r13);
+ }
+
+ return 0;
+}
+
+static int test(void)
+{
+ unsigned long page_size, addr, *p;
+ struct sigaction action;
+ bool hash_mmu;
+ int i, status;
+ pid_t pid;
+
+ // This tests a hash MMU specific bug.
+ FAIL_IF(using_hash_mmu(&hash_mmu));
+ SKIP_IF(!hash_mmu);
+ // 4K kernels don't support 4PB address space
+ SKIP_IF(sysconf(_SC_PAGESIZE) < 65536);
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ for (i = 0; i < NR_MAPPINGS; i++) {
+ addr = BASE_ADDRESS + (i * STRIDE);
+
+ p = mmap((void *)addr, page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
+ if (p == MAP_FAILED) {
+ perror("mmap");
+ printf("Error: couldn't mmap(), confirm kernel has 4PB support?\n");
+ return 1;
+ }
+ }
+
+ action.sa_handler = signal_handler;
+ action.sa_flags = SA_RESTART;
+ FAIL_IF(sigaction(SIGALRM, &action, NULL) < 0);
+
+ // Seen to always crash in under ~10s on affected kernels.
+ alarm(30);
+
+ while (!signaled) {
+ // Fork new processes, to increase the chance that we hit the case where
+ // the kernel stack and task struct are in different segments.
+ pid = fork();
+ if (pid == 0)
+ exit(touch_mappings());
+
+ FAIL_IF(waitpid(-1, &status, 0) == -1);
+ FAIL_IF(WIFSIGNALED(status));
+ FAIL_IF(!WIFEXITED(status));
+ FAIL_IF(WEXITSTATUS(status));
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test, "large_vm_gpr_corruption");
+}
diff --git a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
new file mode 100644
index 000000000000..0af4f02669a1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2020, Sandipan Das, IBM Corp.
+ *
+ * Test if applying execute protection on pages using memory
+ * protection keys works as expected.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+
+#include <unistd.h>
+
+#include "pkeys.h"
+
+#define PPC_INST_NOP 0x60000000
+#define PPC_INST_TRAP 0x7fe00008
+#define PPC_INST_BLR 0x4e800020
+
+static volatile sig_atomic_t fault_pkey, fault_code, fault_type;
+static volatile sig_atomic_t remaining_faults;
+static volatile unsigned int *fault_addr;
+static unsigned long pgsize, numinsns;
+static unsigned int *insns;
+
+static void trap_handler(int signum, siginfo_t *sinfo, void *ctx)
+{
+ /* Check if this fault originated from the expected address */
+ if (sinfo->si_addr != (void *) fault_addr)
+ sigsafe_err("got a fault for an unexpected address\n");
+
+ _exit(1);
+}
+
+static void segv_handler(int signum, siginfo_t *sinfo, void *ctx)
+{
+ int signal_pkey;
+
+ signal_pkey = siginfo_pkey(sinfo);
+ fault_code = sinfo->si_code;
+
+ /* Check if this fault originated from the expected address */
+ if (sinfo->si_addr != (void *) fault_addr) {
+ sigsafe_err("got a fault for an unexpected address\n");
+ _exit(1);
+ }
+
+ /* Check if too many faults have occurred for a single test case */
+ if (!remaining_faults) {
+ sigsafe_err("got too many faults for the same address\n");
+ _exit(1);
+ }
+
+
+ /* Restore permissions in order to continue */
+ switch (fault_code) {
+ case SEGV_ACCERR:
+ if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE)) {
+ sigsafe_err("failed to set access permissions\n");
+ _exit(1);
+ }
+ break;
+ case SEGV_PKUERR:
+ if (signal_pkey != fault_pkey) {
+ sigsafe_err("got a fault for an unexpected pkey\n");
+ _exit(1);
+ }
+
+ switch (fault_type) {
+ case PKEY_DISABLE_ACCESS:
+ pkey_set_rights(fault_pkey, 0);
+ break;
+ case PKEY_DISABLE_EXECUTE:
+ /*
+ * Reassociate the exec-only pkey with the region
+ * to be able to continue. Unlike AMR, we cannot
+ * set IAMR directly from userspace to restore the
+ * permissions.
+ */
+ if (mprotect(insns, pgsize, PROT_EXEC)) {
+ sigsafe_err("failed to set execute permissions\n");
+ _exit(1);
+ }
+ break;
+ default:
+ sigsafe_err("got a fault with an unexpected type\n");
+ _exit(1);
+ }
+ break;
+ default:
+ sigsafe_err("got a fault with an unexpected code\n");
+ _exit(1);
+ }
+
+ remaining_faults--;
+}
+
+static int test(void)
+{
+ struct sigaction segv_act, trap_act;
+ unsigned long rights;
+ int pkey, ret, i;
+
+ ret = pkeys_unsupported();
+ if (ret)
+ return ret;
+
+ /* Setup SIGSEGV handler */
+ segv_act.sa_handler = 0;
+ segv_act.sa_sigaction = segv_handler;
+ FAIL_IF(sigprocmask(SIG_SETMASK, 0, &segv_act.sa_mask) != 0);
+ segv_act.sa_flags = SA_SIGINFO;
+ segv_act.sa_restorer = 0;
+ FAIL_IF(sigaction(SIGSEGV, &segv_act, NULL) != 0);
+
+ /* Setup SIGTRAP handler */
+ trap_act.sa_handler = 0;
+ trap_act.sa_sigaction = trap_handler;
+ FAIL_IF(sigprocmask(SIG_SETMASK, 0, &trap_act.sa_mask) != 0);
+ trap_act.sa_flags = SA_SIGINFO;
+ trap_act.sa_restorer = 0;
+ FAIL_IF(sigaction(SIGTRAP, &trap_act, NULL) != 0);
+
+ /* Setup executable region */
+ pgsize = getpagesize();
+ numinsns = pgsize / sizeof(unsigned int);
+ insns = (unsigned int *) mmap(NULL, pgsize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ FAIL_IF(insns == MAP_FAILED);
+
+ /* Write the instruction words */
+ for (i = 1; i < numinsns - 1; i++)
+ insns[i] = PPC_INST_NOP;
+
+ /*
+ * Set the first instruction as an unconditional trap. If
+ * the last write to this address succeeds, this should
+ * get overwritten by a no-op.
+ */
+ insns[0] = PPC_INST_TRAP;
+
+ /*
+ * Later, to jump to the executable region, we use a branch
+ * and link instruction (bctrl) which sets the return address
+ * automatically in LR. Use that to return back.
+ */
+ insns[numinsns - 1] = PPC_INST_BLR;
+
+ /* Allocate a pkey that restricts execution */
+ rights = PKEY_DISABLE_EXECUTE;
+ pkey = sys_pkey_alloc(0, rights);
+ FAIL_IF(pkey < 0);
+
+ /*
+ * Pick the first instruction's address from the executable
+ * region.
+ */
+ fault_addr = insns;
+
+ /* The following two cases will avoid SEGV_PKUERR */
+ fault_type = -1;
+ fault_pkey = -1;
+
+ /*
+ * Read an instruction word from the address when AMR bits
+ * are not set i.e. the pkey permits both read and write
+ * access.
+ *
+ * This should not generate a fault as having PROT_EXEC
+ * implies PROT_READ on GNU systems. The pkey currently
+ * restricts execution only based on the IAMR bits. The
+ * AMR bits are cleared.
+ */
+ remaining_faults = 0;
+ FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0);
+ printf("read from %p, pkey permissions are %s\n", fault_addr,
+ pkey_rights(rights));
+ i = *fault_addr;
+ FAIL_IF(remaining_faults != 0);
+
+ /*
+ * Write an instruction word to the address when AMR bits
+ * are not set i.e. the pkey permits both read and write
+ * access.
+ *
+ * This should generate an access fault as having just
+ * PROT_EXEC also restricts writes. The pkey currently
+ * restricts execution only based on the IAMR bits. The
+ * AMR bits are cleared.
+ */
+ remaining_faults = 1;
+ FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0);
+ printf("write to %p, pkey permissions are %s\n", fault_addr,
+ pkey_rights(rights));
+ *fault_addr = PPC_INST_TRAP;
+ FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR);
+
+ /* The following three cases will generate SEGV_PKUERR */
+ rights |= PKEY_DISABLE_ACCESS;
+ fault_type = PKEY_DISABLE_ACCESS;
+ fault_pkey = pkey;
+
+ /*
+ * Read an instruction word from the address when AMR bits
+ * are set i.e. the pkey permits neither read nor write
+ * access.
+ *
+ * This should generate a pkey fault based on AMR bits only
+ * as having PROT_EXEC implicitly allows reads.
+ */
+ remaining_faults = 1;
+ FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0);
+ pkey_set_rights(pkey, rights);
+ printf("read from %p, pkey permissions are %s\n", fault_addr,
+ pkey_rights(rights));
+ i = *fault_addr;
+ FAIL_IF(remaining_faults != 0 || fault_code != SEGV_PKUERR);
+
+ /*
+ * Write an instruction word to the address when AMR bits
+ * are set i.e. the pkey permits neither read nor write
+ * access.
+ *
+ * This should generate two faults. First, a pkey fault
+ * based on AMR bits and then an access fault since
+ * PROT_EXEC does not allow writes.
+ */
+ remaining_faults = 2;
+ FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0);
+ pkey_set_rights(pkey, rights);
+ printf("write to %p, pkey permissions are %s\n", fault_addr,
+ pkey_rights(rights));
+ *fault_addr = PPC_INST_NOP;
+ FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR);
+
+ /* Free the current pkey */
+ sys_pkey_free(pkey);
+
+ rights = 0;
+ do {
+ /*
+ * Allocate pkeys with all valid combinations of read,
+ * write and execute restrictions.
+ */
+ pkey = sys_pkey_alloc(0, rights);
+ FAIL_IF(pkey < 0);
+
+ /*
+ * Jump to the executable region. AMR bits may or may not
+ * be set but they should not affect execution.
+ *
+ * This should generate pkey faults based on IAMR bits which
+ * may be set to restrict execution.
+ *
+ * The first iteration also checks if the overwrite of the
+ * first instruction word from a trap to a no-op succeeded.
+ */
+ fault_pkey = pkey;
+ fault_type = -1;
+ remaining_faults = 0;
+ if (rights & PKEY_DISABLE_EXECUTE) {
+ fault_type = PKEY_DISABLE_EXECUTE;
+ remaining_faults = 1;
+ }
+
+ FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0);
+ printf("execute at %p, pkey permissions are %s\n", fault_addr,
+ pkey_rights(rights));
+ asm volatile("mtctr %0; bctrl" : : "r"(insns));
+ FAIL_IF(remaining_faults != 0);
+ if (rights & PKEY_DISABLE_EXECUTE)
+ FAIL_IF(fault_code != SEGV_PKUERR);
+
+ /* Free the current pkey */
+ sys_pkey_free(pkey);
+
+ /* Find next valid combination of pkey rights */
+ rights = next_pkey_rights(rights);
+ } while (rights);
+
+ /* Cleanup */
+ munmap((void *) insns, pgsize);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test, "pkey_exec_prot");
+}
diff --git a/tools/testing/selftests/powerpc/mm/pkey_siginfo.c b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
new file mode 100644
index 000000000000..2db76e56d4cb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020, Sandipan Das, IBM Corp.
+ *
+ * Test if the signal information reports the correct memory protection
+ * key upon getting a key access violation fault for a page that was
+ * attempted to be protected by two different keys from two competing
+ * threads at the same time.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/mman.h>
+
+#include "pkeys.h"
+
+#define PPC_INST_NOP 0x60000000
+#define PPC_INST_BLR 0x4e800020
+#define PROT_RWX (PROT_READ | PROT_WRITE | PROT_EXEC)
+
+#define NUM_ITERATIONS 1000000
+
+static volatile sig_atomic_t perm_pkey, rest_pkey;
+static volatile sig_atomic_t rights, fault_count;
+static volatile unsigned int *volatile fault_addr;
+static pthread_barrier_t iteration_barrier;
+
+static void segv_handler(int signum, siginfo_t *sinfo, void *ctx)
+{
+ void *pgstart;
+ size_t pgsize;
+ int pkey;
+
+ pkey = siginfo_pkey(sinfo);
+
+ /* Check if this fault originated from a pkey access violation */
+ if (sinfo->si_code != SEGV_PKUERR) {
+ sigsafe_err("got a fault for an unexpected reason\n");
+ _exit(1);
+ }
+
+ /* Check if this fault originated from the expected address */
+ if (sinfo->si_addr != (void *) fault_addr) {
+ sigsafe_err("got a fault for an unexpected address\n");
+ _exit(1);
+ }
+
+ /* Check if this fault originated from the restrictive pkey */
+ if (pkey != rest_pkey) {
+ sigsafe_err("got a fault for an unexpected pkey\n");
+ _exit(1);
+ }
+
+ /* Check if too many faults have occurred for the same iteration */
+ if (fault_count > 0) {
+ sigsafe_err("got too many faults for the same address\n");
+ _exit(1);
+ }
+
+ pgsize = getpagesize();
+ pgstart = (void *) ((unsigned long) fault_addr & ~(pgsize - 1));
+
+ /*
+ * If the current fault occurred due to lack of execute rights,
+ * reassociate the page with the exec-only pkey since execute
+ * rights cannot be changed directly for the faulting pkey as
+ * IAMR is inaccessible from userspace.
+ *
+ * Otherwise, if the current fault occurred due to lack of
+ * read-write rights, change the AMR permission bits for the
+ * pkey.
+ *
+ * This will let the test continue.
+ */
+ if (rights == PKEY_DISABLE_EXECUTE &&
+ mprotect(pgstart, pgsize, PROT_EXEC))
+ _exit(1);
+ else
+ pkey_set_rights(pkey, 0);
+
+ fault_count++;
+}
+
+struct region {
+ unsigned long rights;
+ unsigned int *base;
+ size_t size;
+};
+
+static void *protect(void *p)
+{
+ unsigned long rights;
+ unsigned int *base;
+ size_t size;
+ int tid, i;
+
+ tid = gettid();
+ base = ((struct region *) p)->base;
+ size = ((struct region *) p)->size;
+ FAIL_IF_EXIT(!base);
+
+ /* No read, write and execute restrictions */
+ rights = 0;
+
+ printf("tid %d, pkey permissions are %s\n", tid, pkey_rights(rights));
+
+ /* Allocate the permissive pkey */
+ perm_pkey = sys_pkey_alloc(0, rights);
+ FAIL_IF_EXIT(perm_pkey < 0);
+
+ /*
+ * Repeatedly try to protect the common region with a permissive
+ * pkey
+ */
+ for (i = 0; i < NUM_ITERATIONS; i++) {
+ /*
+ * Wait until the other thread has finished allocating the
+ * restrictive pkey or until the next iteration has begun
+ */
+ pthread_barrier_wait(&iteration_barrier);
+
+ /* Try to associate the permissive pkey with the region */
+ FAIL_IF_EXIT(sys_pkey_mprotect(base, size, PROT_RWX,
+ perm_pkey));
+ }
+
+ /* Free the permissive pkey */
+ sys_pkey_free(perm_pkey);
+
+ return NULL;
+}
+
+static void *protect_access(void *p)
+{
+ size_t size, numinsns;
+ unsigned int *base;
+ int tid, i;
+
+ tid = gettid();
+ base = ((struct region *) p)->base;
+ size = ((struct region *) p)->size;
+ rights = ((struct region *) p)->rights;
+ numinsns = size / sizeof(base[0]);
+ FAIL_IF_EXIT(!base);
+
+ /* Allocate the restrictive pkey */
+ rest_pkey = sys_pkey_alloc(0, rights);
+ FAIL_IF_EXIT(rest_pkey < 0);
+
+ printf("tid %d, pkey permissions are %s\n", tid, pkey_rights(rights));
+ printf("tid %d, %s randomly in range [%p, %p]\n", tid,
+ (rights == PKEY_DISABLE_EXECUTE) ? "execute" :
+ (rights == PKEY_DISABLE_WRITE) ? "write" : "read",
+ base, base + numinsns);
+
+ /*
+ * Repeatedly try to protect the common region with a restrictive
+ * pkey and read, write or execute from it
+ */
+ for (i = 0; i < NUM_ITERATIONS; i++) {
+ /*
+ * Wait until the other thread has finished allocating the
+ * permissive pkey or until the next iteration has begun
+ */
+ pthread_barrier_wait(&iteration_barrier);
+
+ /* Try to associate the restrictive pkey with the region */
+ FAIL_IF_EXIT(sys_pkey_mprotect(base, size, PROT_RWX,
+ rest_pkey));
+
+ /* Choose a random instruction word address from the region */
+ fault_addr = base + (rand() % numinsns);
+ fault_count = 0;
+
+ switch (rights) {
+ /* Read protection test */
+ case PKEY_DISABLE_ACCESS:
+ /*
+ * Read an instruction word from the region and
+ * verify if it has not been overwritten to
+ * something unexpected
+ */
+ FAIL_IF_EXIT(*fault_addr != PPC_INST_NOP &&
+ *fault_addr != PPC_INST_BLR);
+ break;
+
+ /* Write protection test */
+ case PKEY_DISABLE_WRITE:
+ /*
+ * Write an instruction word to the region and
+ * verify if the overwrite has succeeded
+ */
+ *fault_addr = PPC_INST_BLR;
+ FAIL_IF_EXIT(*fault_addr != PPC_INST_BLR);
+ break;
+
+ /* Execute protection test */
+ case PKEY_DISABLE_EXECUTE:
+ /* Jump to the region and execute instructions */
+ asm volatile(
+ "mtctr %0; bctrl"
+ : : "r"(fault_addr) : "ctr", "lr");
+ break;
+ }
+
+ /*
+ * Restore the restrictions originally imposed by the
+ * restrictive pkey as the signal handler would have
+ * cleared out the corresponding AMR bits
+ */
+ pkey_set_rights(rest_pkey, rights);
+ }
+
+ /* Free restrictive pkey */
+ sys_pkey_free(rest_pkey);
+
+ return NULL;
+}
+
+static void reset_pkeys(unsigned long rights)
+{
+ int pkeys[NR_PKEYS], i;
+
+ /* Exhaustively allocate all available pkeys */
+ for (i = 0; i < NR_PKEYS; i++)
+ pkeys[i] = sys_pkey_alloc(0, rights);
+
+ /* Free all allocated pkeys */
+ for (i = 0; i < NR_PKEYS; i++)
+ sys_pkey_free(pkeys[i]);
+}
+
+static int test(void)
+{
+ pthread_t prot_thread, pacc_thread;
+ struct sigaction act;
+ pthread_attr_t attr;
+ size_t numinsns;
+ struct region r;
+ int ret, i;
+
+ srand(time(NULL));
+ ret = pkeys_unsupported();
+ if (ret)
+ return ret;
+
+ /* Allocate the region */
+ r.size = getpagesize();
+ r.base = mmap(NULL, r.size, PROT_RWX,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ FAIL_IF(r.base == MAP_FAILED);
+
+ /*
+ * Fill the region with no-ops with a branch at the end
+ * for returning to the caller
+ */
+ numinsns = r.size / sizeof(r.base[0]);
+ for (i = 0; i < numinsns - 1; i++)
+ r.base[i] = PPC_INST_NOP;
+ r.base[i] = PPC_INST_BLR;
+
+ /* Setup SIGSEGV handler */
+ act.sa_handler = 0;
+ act.sa_sigaction = segv_handler;
+ FAIL_IF(sigprocmask(SIG_SETMASK, 0, &act.sa_mask) != 0);
+ act.sa_flags = SA_SIGINFO;
+ act.sa_restorer = 0;
+ FAIL_IF(sigaction(SIGSEGV, &act, NULL) != 0);
+
+ /*
+ * For these tests, the parent process should clear all bits of
+ * AMR and IAMR, i.e. impose no restrictions, for all available
+ * pkeys. This will be the base for the initial AMR and IAMR
+ * values for all the test thread pairs.
+ *
+ * If the AMR and IAMR bits of all available pkeys are cleared
+ * before running the tests and a fault is generated when
+ * attempting to read, write or execute instructions from a
+ * pkey protected region, the pkey responsible for this must be
+ * the one from the protect-and-access thread since the other
+ * one is fully permissive. Despite that, if the pkey reported
+ * by siginfo is not the restrictive pkey, then there must be a
+ * kernel bug.
+ */
+ reset_pkeys(0);
+
+ /* Setup barrier for protect and protect-and-access threads */
+ FAIL_IF(pthread_attr_init(&attr) != 0);
+ FAIL_IF(pthread_barrier_init(&iteration_barrier, NULL, 2) != 0);
+
+ /* Setup and start protect and protect-and-read threads */
+ puts("starting thread pair (protect, protect-and-read)");
+ r.rights = PKEY_DISABLE_ACCESS;
+ FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0);
+ FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0);
+ FAIL_IF(pthread_join(prot_thread, NULL) != 0);
+ FAIL_IF(pthread_join(pacc_thread, NULL) != 0);
+
+ /* Setup and start protect and protect-and-write threads */
+ puts("starting thread pair (protect, protect-and-write)");
+ r.rights = PKEY_DISABLE_WRITE;
+ FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0);
+ FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0);
+ FAIL_IF(pthread_join(prot_thread, NULL) != 0);
+ FAIL_IF(pthread_join(pacc_thread, NULL) != 0);
+
+ /* Setup and start protect and protect-and-execute threads */
+ puts("starting thread pair (protect, protect-and-execute)");
+ r.rights = PKEY_DISABLE_EXECUTE;
+ FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0);
+ FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0);
+ FAIL_IF(pthread_join(prot_thread, NULL) != 0);
+ FAIL_IF(pthread_join(pacc_thread, NULL) != 0);
+
+ /* Cleanup */
+ FAIL_IF(pthread_attr_destroy(&attr) != 0);
+ FAIL_IF(pthread_barrier_destroy(&iteration_barrier) != 0);
+ munmap(r.base, r.size);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test, "pkey_siginfo");
+}
diff --git a/tools/testing/selftests/powerpc/mm/prot_sao.c b/tools/testing/selftests/powerpc/mm/prot_sao.c
index e2eed65b7735..30b71b1d78d5 100644
--- a/tools/testing/selftests/powerpc/mm/prot_sao.c
+++ b/tools/testing/selftests/powerpc/mm/prot_sao.c
@@ -7,6 +7,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
+#include <unistd.h>
#include <asm/cputable.h>
@@ -18,8 +19,13 @@ int test_prot_sao(void)
{
char *p;
- /* 2.06 or later should support SAO */
- SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
+ /*
+ * SAO was introduced in 2.06 and removed in 3.1. It's disabled in
+ * guests/LPARs by default, so also skip if we are running in a guest.
+ */
+ SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06) ||
+ have_hwcap2(PPC_FEATURE2_ARCH_3_1) ||
+ access("/proc/device-tree/rtas/ibm,hypertas-functions", F_OK) == 0);
/*
* Ensure we can ask for PROT_SAO.
diff --git a/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c b/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c
new file mode 100644
index 000000000000..ed9143990888
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test that loads/stores expand the stack segment, or trigger a SEGV, in
+ * various conditions.
+ *
+ * Based on test code by Tom Lane.
+ */
+
+#undef NDEBUG
+#include <assert.h>
+
+#include <err.h>
+#include <errno.h>
+#include <stdio.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#define _KB (1024)
+#define _MB (1024 * 1024)
+
+volatile char *stack_top_ptr;
+volatile unsigned long stack_top_sp;
+volatile char c;
+
+enum access_type {
+ LOAD,
+ STORE,
+};
+
+/*
+ * Consume stack until the stack pointer is below @target_sp, then do an access
+ * (load or store) at offset @delta from either the base of the stack or the
+ * current stack pointer.
+ */
+__attribute__ ((noinline))
+int consume_stack(unsigned long target_sp, unsigned long stack_high, int delta, enum access_type type)
+{
+ unsigned long target;
+ char stack_cur;
+
+ if ((unsigned long)&stack_cur > target_sp)
+ return consume_stack(target_sp, stack_high, delta, type);
+ else {
+ // We don't really need this, but without it GCC might not
+ // generate a recursive call above.
+ stack_top_ptr = &stack_cur;
+
+#ifdef __powerpc__
+ asm volatile ("mr %[sp], %%r1" : [sp] "=r" (stack_top_sp));
+#else
+ asm volatile ("mov %%rsp, %[sp]" : [sp] "=r" (stack_top_sp));
+#endif
+ target = stack_high - delta + 1;
+ volatile char *p = (char *)target;
+
+ if (type == STORE)
+ *p = c;
+ else
+ c = *p;
+
+ // Do something to prevent the stack frame being popped prior to
+ // our access above.
+ getpid();
+ }
+
+ return 0;
+}
+
+static int search_proc_maps(char *needle, unsigned long *low, unsigned long *high)
+{
+ unsigned long start, end;
+ static char buf[4096];
+ char name[128];
+ FILE *f;
+ int rc;
+
+ f = fopen("/proc/self/maps", "r");
+ if (!f) {
+ perror("fopen");
+ return -1;
+ }
+
+ while (fgets(buf, sizeof(buf), f)) {
+ rc = sscanf(buf, "%lx-%lx %*c%*c%*c%*c %*x %*d:%*d %*d %127s\n",
+ &start, &end, name);
+ if (rc == 2)
+ continue;
+
+ if (rc != 3) {
+ printf("sscanf errored\n");
+ rc = -1;
+ break;
+ }
+
+ if (strstr(name, needle)) {
+ *low = start;
+ *high = end - 1;
+ rc = 0;
+ break;
+ }
+ }
+
+ fclose(f);
+
+ return rc;
+}
+
+int child(unsigned int stack_used, int delta, enum access_type type)
+{
+ unsigned long low, stack_high;
+
+ assert(search_proc_maps("[stack]", &low, &stack_high) == 0);
+
+ assert(consume_stack(stack_high - stack_used, stack_high, delta, type) == 0);
+
+ printf("Access OK: %s delta %-7d used size 0x%06x stack high 0x%lx top_ptr %p top sp 0x%lx actual used 0x%lx\n",
+ type == LOAD ? "load" : "store", delta, stack_used, stack_high,
+ stack_top_ptr, stack_top_sp, stack_high - stack_top_sp + 1);
+
+ return 0;
+}
+
+static int test_one(unsigned int stack_used, int delta, enum access_type type)
+{
+ pid_t pid;
+ int rc;
+
+ pid = fork();
+ if (pid == 0)
+ exit(child(stack_used, delta, type));
+
+ assert(waitpid(pid, &rc, 0) != -1);
+
+ if (WIFEXITED(rc) && WEXITSTATUS(rc) == 0)
+ return 0;
+
+ // We don't expect a non-zero exit that's not a signal
+ assert(!WIFEXITED(rc));
+
+ printf("Faulted: %s delta %-7d used size 0x%06x signal %d\n",
+ type == LOAD ? "load" : "store", delta, stack_used,
+ WTERMSIG(rc));
+
+ return 1;
+}
+
+// This is fairly arbitrary but is well below any of the targets below,
+// so that the delta between the stack pointer and the target is large.
+#define DEFAULT_SIZE (32 * _KB)
+
+static void test_one_type(enum access_type type, unsigned long page_size, unsigned long rlim_cur)
+{
+ unsigned long delta;
+
+ // We should be able to access anywhere within the rlimit
+ for (delta = page_size; delta <= rlim_cur; delta += page_size)
+ assert(test_one(DEFAULT_SIZE, delta, type) == 0);
+
+ assert(test_one(DEFAULT_SIZE, rlim_cur, type) == 0);
+
+ // But if we go past the rlimit it should fail
+ assert(test_one(DEFAULT_SIZE, rlim_cur + 1, type) != 0);
+}
+
+static int test(void)
+{
+ unsigned long page_size;
+ struct rlimit rlimit;
+
+ page_size = getpagesize();
+ getrlimit(RLIMIT_STACK, &rlimit);
+ printf("Stack rlimit is 0x%lx\n", rlimit.rlim_cur);
+
+ printf("Testing loads ...\n");
+ test_one_type(LOAD, page_size, rlimit.rlim_cur);
+ printf("Testing stores ...\n");
+ test_one_type(STORE, page_size, rlimit.rlim_cur);
+
+ printf("All OK\n");
+
+ return 0;
+}
+
+#ifdef __powerpc__
+#include "utils.h"
+
+int main(void)
+{
+ return test_harness(test, "stack_expansion_ldst");
+}
+#else
+int main(void)
+{
+ return test();
+}
+#endif
diff --git a/tools/testing/selftests/powerpc/mm/stack_expansion_signal.c b/tools/testing/selftests/powerpc/mm/stack_expansion_signal.c
new file mode 100644
index 000000000000..c8b32a29e274
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/stack_expansion_signal.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test that signal delivery is able to expand the stack segment without
+ * triggering a SEGV.
+ *
+ * Based on test code by Tom Lane.
+ */
+
+#include <err.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "../pmu/lib.h"
+#include "utils.h"
+
+#define _KB (1024)
+#define _MB (1024 * 1024)
+
+static char *stack_base_ptr;
+static char *stack_top_ptr;
+
+static volatile sig_atomic_t sig_occurred = 0;
+
+static void sigusr1_handler(int signal_arg)
+{
+ sig_occurred = 1;
+}
+
+static int consume_stack(unsigned int stack_size, union pipe write_pipe)
+{
+ char stack_cur;
+
+ if ((stack_base_ptr - &stack_cur) < stack_size)
+ return consume_stack(stack_size, write_pipe);
+ else {
+ stack_top_ptr = &stack_cur;
+
+ FAIL_IF(notify_parent(write_pipe));
+
+ while (!sig_occurred)
+ barrier();
+ }
+
+ return 0;
+}
+
+static int child(unsigned int stack_size, union pipe write_pipe)
+{
+ struct sigaction act;
+ char stack_base;
+
+ act.sa_handler = sigusr1_handler;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = 0;
+ if (sigaction(SIGUSR1, &act, NULL) < 0)
+ err(1, "sigaction");
+
+ stack_base_ptr = (char *) (((size_t) &stack_base + 65535) & ~65535UL);
+
+ FAIL_IF(consume_stack(stack_size, write_pipe));
+
+ printf("size 0x%06x: OK, stack base %p top %p (%zx used)\n",
+ stack_size, stack_base_ptr, stack_top_ptr,
+ stack_base_ptr - stack_top_ptr);
+
+ return 0;
+}
+
+static int test_one_size(unsigned int stack_size)
+{
+ union pipe read_pipe, write_pipe;
+ pid_t pid;
+
+ FAIL_IF(pipe(read_pipe.fds) == -1);
+ FAIL_IF(pipe(write_pipe.fds) == -1);
+
+ pid = fork();
+ if (pid == 0) {
+ close(read_pipe.read_fd);
+ close(write_pipe.write_fd);
+ exit(child(stack_size, read_pipe));
+ }
+
+ close(read_pipe.write_fd);
+ close(write_pipe.read_fd);
+ FAIL_IF(sync_with_child(read_pipe, write_pipe));
+
+ kill(pid, SIGUSR1);
+
+ FAIL_IF(wait_for_child(pid));
+
+ close(read_pipe.read_fd);
+ close(write_pipe.write_fd);
+
+ return 0;
+}
+
+int test(void)
+{
+ unsigned int i, size;
+
+ // Test with used stack from 1MB - 64K to 1MB + 64K
+ // Increment by 64 to get more coverage of odd sizes
+ for (i = 0; i < (128 * _KB); i += 64) {
+ size = i + (1 * _MB) - (64 * _KB);
+ FAIL_IF(test_one_size(size));
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test, "stack_expansion_signal");
+}
diff --git a/tools/testing/selftests/powerpc/mm/stress_code_patching.sh b/tools/testing/selftests/powerpc/mm/stress_code_patching.sh
new file mode 100755
index 000000000000..e454509659f6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/stress_code_patching.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+TIMEOUT=30
+
+DEBUFS_DIR=`cat /proc/mounts | grep debugfs | awk '{print $2}'`
+if [ ! -e "$DEBUFS_DIR" ]
+then
+ echo "debugfs not found, skipping" 1>&2
+ exit 4
+fi
+
+if [ ! -e "$DEBUFS_DIR/tracing/current_tracer" ]
+then
+ echo "Tracing files not found, skipping" 1>&2
+ exit 4
+fi
+
+
+echo "Testing for spurious faults when mapping kernel memory..."
+
+if grep -q "FUNCTION TRACING IS CORRUPTED" "$DEBUFS_DIR/tracing/trace"
+then
+ echo "FAILED: Ftrace already dead. Probably due to a spurious fault" 1>&2
+ exit 1
+fi
+
+dmesg -C
+START_TIME=`date +%s`
+END_TIME=`expr $START_TIME + $TIMEOUT`
+while [ `date +%s` -lt $END_TIME ]
+do
+ echo function > $DEBUFS_DIR/tracing/current_tracer
+ echo nop > $DEBUFS_DIR/tracing/current_tracer
+ if dmesg | grep -q 'ftrace bug'
+ then
+ break
+ fi
+done
+
+echo nop > $DEBUFS_DIR/tracing/current_tracer
+if dmesg | grep -q 'ftrace bug'
+then
+ echo "FAILED: Mapping kernel memory causes spurious faults" 1>&2
+ exit 1
+else
+ echo "OK: Mapping kernel memory does not cause spurious faults"
+ exit 0
+fi
diff --git a/tools/testing/selftests/powerpc/mm/tlbie_test.c b/tools/testing/selftests/powerpc/mm/tlbie_test.c
index f85a0938ab25..48344a74b212 100644
--- a/tools/testing/selftests/powerpc/mm/tlbie_test.c
+++ b/tools/testing/selftests/powerpc/mm/tlbie_test.c
@@ -33,7 +33,6 @@
#include <sched.h>
#include <time.h>
#include <stdarg.h>
-#include <sched.h>
#include <pthread.h>
#include <signal.h>
#include <sys/prctl.h>
diff --git a/tools/testing/selftests/powerpc/nx-gzip/.gitignore b/tools/testing/selftests/powerpc/nx-gzip/.gitignore
new file mode 100644
index 000000000000..886d522d52df
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+gunz_test
+gzfht_test
diff --git a/tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules b/tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules
new file mode 100644
index 000000000000..5a7118495cb3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules
@@ -0,0 +1 @@
+SUBSYSTEM=="nxgzip", KERNEL=="nx-gzip", MODE="0666"
diff --git a/tools/testing/selftests/powerpc/nx-gzip/Makefile b/tools/testing/selftests/powerpc/nx-gzip/Makefile
new file mode 100644
index 000000000000..0785c2e99d40
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/Makefile
@@ -0,0 +1,8 @@
+CFLAGS = -O3 -m64 -I./include -I../include
+
+TEST_GEN_FILES := gzfht_test gunz_test
+TEST_PROGS := nx-gzip-test.sh
+
+include ../../lib.mk
+
+$(TEST_GEN_FILES): gzip_vas.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/nx-gzip/README b/tools/testing/selftests/powerpc/nx-gzip/README
new file mode 100644
index 000000000000..9809dbaa1905
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/README
@@ -0,0 +1,45 @@
+Test the nx-gzip function:
+=========================
+
+Verify that following device exists:
+ /dev/crypto/nx-gzip
+If you get a permission error run as sudo or set the device permissions:
+ sudo chmod go+rw /dev/crypto/nx-gzip
+However, chmod may not survive across boots. You may create a udev file such
+as:
+ /etc/udev/rules.d/99-nx-gzip.rules
+
+
+To manually build and run:
+$ gcc -O3 -I./include -o gzfht_test gzfht_test.c gzip_vas.c
+$ gcc -O3 -I./include -o gunz_test gunz_test.c gzip_vas.c
+
+
+Compress any file using Fixed Huffman mode. Output will have a .nx.gz suffix:
+$ ./gzfht_test gzip_vas.c
+file gzip_vas.c read, 6413 bytes
+compressed 6413 to 3124 bytes total, crc32 checksum = abd15e8a
+
+
+Uncompress the previous output. Output will have a .nx.gunzip suffix:
+./gunz_test gzip_vas.c.nx.gz
+gzHeader FLG 0
+00 00 00 00 04 03
+gzHeader MTIME, XFL, OS ignored
+computed checksum abd15e8a isize 0000190d
+stored checksum abd15e8a isize 0000190d
+decomp is complete: fclose
+
+
+Compare two files:
+$ sha1sum gzip_vas.c.nx.gz.nx.gunzip gzip_vas.c
+bf43e3c0c3651f5f22b6f9784cd9b1eeab4120b6 gzip_vas.c.nx.gz.nx.gunzip
+bf43e3c0c3651f5f22b6f9784cd9b1eeab4120b6 gzip_vas.c
+
+
+Note that the code here are intended for testing the nx-gzip hardware function.
+They are not intended for demonstrating performance or compression ratio.
+By being simplistic these selftests expect to allocate the entire set of source
+and target pages in the memory so it needs enough memory to work.
+For more information and source code consider using:
+https://github.com/libnxz/power-gzip
diff --git a/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c b/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
new file mode 100644
index 000000000000..7c23d3dd7d6d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
@@ -0,0 +1,1028 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* P9 gunzip sample code for demonstrating the P9 NX hardware
+ * interface. Not intended for productive uses or for performance or
+ * compression ratio measurements. Note also that /dev/crypto/gzip,
+ * VAS and skiboot support are required
+ *
+ * Copyright 2020 IBM Corp.
+ *
+ * Author: Bulent Abali <abali@us.ibm.com>
+ *
+ * https://github.com/libnxz/power-gzip for zlib api and other utils
+ * Definitions of acronyms used here. See
+ * P9 NX Gzip Accelerator User's Manual for details:
+ * https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf
+ *
+ * adler/crc: 32 bit checksums appended to stream tail
+ * ce: completion extension
+ * cpb: coprocessor parameter block (metadata)
+ * crb: coprocessor request block (command)
+ * csb: coprocessor status block (status)
+ * dht: dynamic huffman table
+ * dde: data descriptor element (address, length)
+ * ddl: list of ddes
+ * dh/fh: dynamic and fixed huffman types
+ * fc: coprocessor function code
+ * histlen: history/dictionary length
+ * history: sliding window of up to 32KB of data
+ * lzcount: Deflate LZ symbol counts
+ * rembytecnt: remaining byte count
+ * sfbt: source final block type; last block's type during decomp
+ * spbc: source processed byte count
+ * subc: source unprocessed bit count
+ * tebc: target ending bit count; valid bits in the last byte
+ * tpbc: target processed byte count
+ * vas: virtual accelerator switch; the user mode interface
+ */
+
+#define _ISOC11_SOURCE // For aligned_alloc()
+#define _DEFAULT_SOURCE // For endian.h
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/fcntl.h>
+#include <sys/mman.h>
+#include <endian.h>
+#include <bits/endian.h>
+#include <sys/ioctl.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include "nxu.h"
+#include "nx.h"
+#include "crb.h"
+
+int nx_dbg;
+FILE *nx_gzip_log;
+
+#define NX_MIN(X, Y) (((X) < (Y))?(X):(Y))
+#define NX_MAX(X, Y) (((X) > (Y))?(X):(Y))
+
+#define GETINPC(X) fgetc(X)
+#define FNAME_MAX 1024
+
+/* fifo queue management */
+#define fifo_used_bytes(used) (used)
+#define fifo_free_bytes(used, len) ((len)-(used))
+/* amount of free bytes in the first and last parts */
+#define fifo_free_first_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
+ ? (len)-((cur)+(used)) : 0)
+#define fifo_free_last_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
+ ? (cur) : (len)-(used))
+/* amount of used bytes in the first and last parts */
+#define fifo_used_first_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
+ ? (used) : (len)-(cur))
+#define fifo_used_last_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
+ ? 0 : ((used)+(cur))-(len))
+/* first and last free parts start here */
+#define fifo_free_first_offset(cur, used) ((cur)+(used))
+#define fifo_free_last_offset(cur, used, len) \
+ fifo_used_last_bytes(cur, used, len)
+/* first and last used parts start here */
+#define fifo_used_first_offset(cur) (cur)
+#define fifo_used_last_offset(cur) (0)
+
+const int fifo_in_len = 1<<24;
+const int fifo_out_len = 1<<24;
+const int page_sz = 1<<16;
+const int line_sz = 1<<7;
+const int window_max = 1<<15;
+
+/*
+ * Adds an (address, len) pair to the list of ddes (ddl) and updates
+ * the base dde. ddl[0] is the only dde in a direct dde which
+ * contains a single (addr,len) pair. For more pairs, ddl[0] becomes
+ * the indirect (base) dde that points to a list of direct ddes.
+ * See Section 6.4 of the NX-gzip user manual for DDE description.
+ * Addr=NULL, len=0 clears the ddl[0]. Returns the total number of
+ * bytes in ddl. Caller is responsible for allocting the array of
+ * nx_dde_t *ddl. If N addresses are required in the scatter-gather
+ * list, the ddl array must have N+1 entries minimum.
+ */
+static inline uint32_t nx_append_dde(struct nx_dde_t *ddl, void *addr,
+ uint32_t len)
+{
+ uint32_t ddecnt;
+ uint32_t bytes;
+
+ if (addr == NULL && len == 0) {
+ clearp_dde(ddl);
+ return 0;
+ }
+
+ NXPRT(fprintf(stderr, "%d: %s addr %p len %x\n", __LINE__, addr,
+ __func__, len));
+
+ /* Number of ddes in the dde list ; == 0 when it is a direct dde */
+ ddecnt = getpnn(ddl, dde_count);
+ bytes = getp32(ddl, ddebc);
+
+ if (ddecnt == 0 && bytes == 0) {
+ /* First dde is unused; make it a direct dde */
+ bytes = len;
+ putp32(ddl, ddebc, bytes);
+ putp64(ddl, ddead, (uint64_t) addr);
+ } else if (ddecnt == 0) {
+ /* Converting direct to indirect dde
+ * ddl[0] becomes head dde of ddl
+ * copy direct to indirect first.
+ */
+ ddl[1] = ddl[0];
+
+ /* Add the new dde next */
+ clear_dde(ddl[2]);
+ put32(ddl[2], ddebc, len);
+ put64(ddl[2], ddead, (uint64_t) addr);
+
+ /* Ddl head points to 2 direct ddes */
+ ddecnt = 2;
+ putpnn(ddl, dde_count, ddecnt);
+ bytes = bytes + len;
+ putp32(ddl, ddebc, bytes);
+ /* Pointer to the first direct dde */
+ putp64(ddl, ddead, (uint64_t) &ddl[1]);
+ } else {
+ /* Append a dde to an existing indirect ddl */
+ ++ddecnt;
+ clear_dde(ddl[ddecnt]);
+ put64(ddl[ddecnt], ddead, (uint64_t) addr);
+ put32(ddl[ddecnt], ddebc, len);
+
+ putpnn(ddl, dde_count, ddecnt);
+ bytes = bytes + len;
+ putp32(ddl, ddebc, bytes); /* byte sum of all dde */
+ }
+ return bytes;
+}
+
+/*
+ * Touch specified number of pages represented in number bytes
+ * beginning from the first buffer in a dde list.
+ * Do not touch the pages past buf_sz-th byte's page.
+ *
+ * Set buf_sz = 0 to touch all pages described by the ddep.
+ */
+static int nx_touch_pages_dde(struct nx_dde_t *ddep, long buf_sz, long page_sz,
+ int wr)
+{
+ uint32_t indirect_count;
+ uint32_t buf_len;
+ long total;
+ uint64_t buf_addr;
+ struct nx_dde_t *dde_list;
+ int i;
+
+ assert(!!ddep);
+
+ indirect_count = getpnn(ddep, dde_count);
+
+ NXPRT(fprintf(stderr, "%s dde_count %d request len ", __func__,
+ indirect_count));
+ NXPRT(fprintf(stderr, "0x%lx\n", buf_sz));
+
+ if (indirect_count == 0) {
+ /* Direct dde */
+ buf_len = getp32(ddep, ddebc);
+ buf_addr = getp64(ddep, ddead);
+
+ NXPRT(fprintf(stderr, "touch direct ddebc 0x%x ddead %p\n",
+ buf_len, (void *)buf_addr));
+
+ if (buf_sz == 0)
+ nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr);
+ else
+ nxu_touch_pages((void *)buf_addr, NX_MIN(buf_len,
+ buf_sz), page_sz, wr);
+
+ return ERR_NX_OK;
+ }
+
+ /* Indirect dde */
+ if (indirect_count > MAX_DDE_COUNT)
+ return ERR_NX_EXCESSIVE_DDE;
+
+ /* First address of the list */
+ dde_list = (struct nx_dde_t *) getp64(ddep, ddead);
+
+ if (buf_sz == 0)
+ buf_sz = getp32(ddep, ddebc);
+
+ total = 0;
+ for (i = 0; i < indirect_count; i++) {
+ buf_len = get32(dde_list[i], ddebc);
+ buf_addr = get64(dde_list[i], ddead);
+ total += buf_len;
+
+ NXPRT(fprintf(stderr, "touch loop len 0x%x ddead %p total ",
+ buf_len, (void *)buf_addr));
+ NXPRT(fprintf(stderr, "0x%lx\n", total));
+
+ /* Touching fewer pages than encoded in the ddebc */
+ if (total > buf_sz) {
+ buf_len = NX_MIN(buf_len, total - buf_sz);
+ nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr);
+ NXPRT(fprintf(stderr, "touch loop break len 0x%x ",
+ buf_len));
+ NXPRT(fprintf(stderr, "ddead %p\n", (void *)buf_addr));
+ break;
+ }
+ nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr);
+ }
+ return ERR_NX_OK;
+}
+
+/*
+ * Src and dst buffers are supplied in scatter gather lists.
+ * NX function code and other parameters supplied in cmdp.
+ */
+static int nx_submit_job(struct nx_dde_t *src, struct nx_dde_t *dst,
+ struct nx_gzip_crb_cpb_t *cmdp, void *handle)
+{
+ uint64_t csbaddr;
+
+ memset((void *)&cmdp->crb.csb, 0, sizeof(cmdp->crb.csb));
+
+ cmdp->crb.source_dde = *src;
+ cmdp->crb.target_dde = *dst;
+
+ /* Status, output byte count in tpbc */
+ csbaddr = ((uint64_t) &cmdp->crb.csb) & csb_address_mask;
+ put64(cmdp->crb, csb_address, csbaddr);
+
+ /* NX reports input bytes in spbc; cleared */
+ cmdp->cpb.out_spbc_comp_wrap = 0;
+ cmdp->cpb.out_spbc_comp_with_count = 0;
+ cmdp->cpb.out_spbc_decomp = 0;
+
+ /* Clear output */
+ put32(cmdp->cpb, out_crc, INIT_CRC);
+ put32(cmdp->cpb, out_adler, INIT_ADLER);
+
+ /* Submit the crb, the job descriptor, to the accelerator. */
+ return nxu_submit_job(cmdp, handle);
+}
+
+int decompress_file(int argc, char **argv, void *devhandle)
+{
+ FILE *inpf = NULL;
+ FILE *outf = NULL;
+
+ int c, expect, i, cc, rc = 0;
+ char gzfname[FNAME_MAX];
+
+ /* Queuing, file ops, byte counting */
+ char *fifo_in, *fifo_out;
+ int used_in, cur_in, used_out, cur_out, read_sz, n;
+ int first_free, last_free, first_used, last_used;
+ int first_offset, last_offset;
+ int write_sz, free_space, source_sz;
+ int source_sz_estimate, target_sz_estimate;
+ uint64_t last_comp_ratio = 0; /* 1000 max */
+ uint64_t total_out = 0;
+ int is_final, is_eof;
+
+ /* nx hardware */
+ int sfbt, subc, spbc, tpbc, nx_ce, fc, resuming = 0;
+ int history_len = 0;
+ struct nx_gzip_crb_cpb_t cmd, *cmdp;
+ struct nx_dde_t *ddl_in;
+ struct nx_dde_t dde_in[6] __aligned(128);
+ struct nx_dde_t *ddl_out;
+ struct nx_dde_t dde_out[6] __aligned(128);
+ int pgfault_retries;
+
+ /* when using mmap'ed files */
+ off_t input_file_offset;
+
+ if (argc > 2) {
+ fprintf(stderr, "usage: %s <fname> or stdin\n", argv[0]);
+ fprintf(stderr, " writes to stdout or <fname>.nx.gunzip\n");
+ return -1;
+ }
+
+ if (argc == 1) {
+ inpf = stdin;
+ outf = stdout;
+ } else if (argc == 2) {
+ char w[1024];
+ char *wp;
+
+ inpf = fopen(argv[1], "r");
+ if (inpf == NULL) {
+ perror(argv[1]);
+ return -1;
+ }
+
+ /* Make a new file name to write to. Ignoring '.gz' */
+ wp = (NULL != (wp = strrchr(argv[1], '/'))) ? (wp+1) : argv[1];
+ strcpy(w, wp);
+ strcat(w, ".nx.gunzip");
+
+ outf = fopen(w, "w");
+ if (outf == NULL) {
+ perror(w);
+ return -1;
+ }
+ }
+
+ /* Decode the gzip header */
+ c = GETINPC(inpf); expect = 0x1f; /* ID1 */
+ if (c != expect)
+ goto err1;
+
+ c = GETINPC(inpf); expect = 0x8b; /* ID2 */
+ if (c != expect)
+ goto err1;
+
+ c = GETINPC(inpf); expect = 0x08; /* CM */
+ if (c != expect)
+ goto err1;
+
+ int flg = GETINPC(inpf); /* FLG */
+
+ if (flg & 0xE0 || flg & 0x4 || flg == EOF)
+ goto err2;
+
+ fprintf(stderr, "gzHeader FLG %x\n", flg);
+
+ /* Read 6 bytes; ignoring the MTIME, XFL, OS fields in this
+ * sample code.
+ */
+ for (i = 0; i < 6; i++) {
+ char tmp[10];
+
+ tmp[i] = GETINPC(inpf);
+ if (tmp[i] == EOF)
+ goto err3;
+ fprintf(stderr, "%02x ", tmp[i]);
+ if (i == 5)
+ fprintf(stderr, "\n");
+ }
+ fprintf(stderr, "gzHeader MTIME, XFL, OS ignored\n");
+
+ /* FNAME */
+ if (flg & 0x8) {
+ int k = 0;
+
+ do {
+ c = GETINPC(inpf);
+ if (c == EOF || k >= FNAME_MAX)
+ goto err3;
+ gzfname[k++] = c;
+ } while (c);
+ fprintf(stderr, "gzHeader FNAME: %s\n", gzfname);
+ }
+
+ /* FHCRC */
+ if (flg & 0x2) {
+ c = GETINPC(inpf);
+ if (c == EOF)
+ goto err3;
+ c = GETINPC(inpf);
+ if (c == EOF)
+ goto err3;
+ fprintf(stderr, "gzHeader FHCRC: ignored\n");
+ }
+
+ used_in = cur_in = used_out = cur_out = 0;
+ is_final = is_eof = 0;
+
+ /* Allocate one page larger to prevent page faults due to NX
+ * overfetching.
+ * Either do this (char*)(uintptr_t)aligned_alloc or use
+ * -std=c11 flag to make the int-to-pointer warning go away.
+ */
+ assert((fifo_in = (char *)(uintptr_t)aligned_alloc(line_sz,
+ fifo_in_len + page_sz)) != NULL);
+ assert((fifo_out = (char *)(uintptr_t)aligned_alloc(line_sz,
+ fifo_out_len + page_sz + line_sz)) != NULL);
+ /* Leave unused space due to history rounding rules */
+ fifo_out = fifo_out + line_sz;
+ nxu_touch_pages(fifo_out, fifo_out_len, page_sz, 1);
+
+ ddl_in = &dde_in[0];
+ ddl_out = &dde_out[0];
+ cmdp = &cmd;
+ memset(&cmdp->crb, 0, sizeof(cmdp->crb));
+
+read_state:
+
+ /* Read from .gz file */
+
+ NXPRT(fprintf(stderr, "read_state:\n"));
+
+ if (is_eof != 0)
+ goto write_state;
+
+ /* We read in to fifo_in in two steps: first: read in to from
+ * cur_in to the end of the buffer. last: if free space wrapped
+ * around, read from fifo_in offset 0 to offset cur_in.
+ */
+
+ /* Reset fifo head to reduce unnecessary wrap arounds */
+ cur_in = (used_in == 0) ? 0 : cur_in;
+
+ /* Free space total is reduced by a gap */
+ free_space = NX_MAX(0, fifo_free_bytes(used_in, fifo_in_len)
+ - line_sz);
+
+ /* Free space may wrap around as first and last */
+ first_free = fifo_free_first_bytes(cur_in, used_in, fifo_in_len);
+ last_free = fifo_free_last_bytes(cur_in, used_in, fifo_in_len);
+
+ /* Start offsets of the free memory */
+ first_offset = fifo_free_first_offset(cur_in, used_in);
+ last_offset = fifo_free_last_offset(cur_in, used_in, fifo_in_len);
+
+ /* Reduce read_sz because of the line_sz gap */
+ read_sz = NX_MIN(free_space, first_free);
+ n = 0;
+ if (read_sz > 0) {
+ /* Read in to offset cur_in + used_in */
+ n = fread(fifo_in + first_offset, 1, read_sz, inpf);
+ used_in = used_in + n;
+ free_space = free_space - n;
+ assert(n <= read_sz);
+ if (n != read_sz) {
+ /* Either EOF or error; exit the read loop */
+ is_eof = 1;
+ goto write_state;
+ }
+ }
+
+ /* If free space wrapped around */
+ if (last_free > 0) {
+ /* Reduce read_sz because of the line_sz gap */
+ read_sz = NX_MIN(free_space, last_free);
+ n = 0;
+ if (read_sz > 0) {
+ n = fread(fifo_in + last_offset, 1, read_sz, inpf);
+ used_in = used_in + n; /* Increase used space */
+ free_space = free_space - n; /* Decrease free space */
+ assert(n <= read_sz);
+ if (n != read_sz) {
+ /* Either EOF or error; exit the read loop */
+ is_eof = 1;
+ goto write_state;
+ }
+ }
+ }
+
+ /* At this point we have used_in bytes in fifo_in with the
+ * data head starting at cur_in and possibly wrapping around.
+ */
+
+write_state:
+
+ /* Write decompressed data to output file */
+
+ NXPRT(fprintf(stderr, "write_state:\n"));
+
+ if (used_out == 0)
+ goto decomp_state;
+
+ /* If fifo_out has data waiting, write it out to the file to
+ * make free target space for the accelerator used bytes in
+ * the first and last parts of fifo_out.
+ */
+
+ first_used = fifo_used_first_bytes(cur_out, used_out, fifo_out_len);
+ last_used = fifo_used_last_bytes(cur_out, used_out, fifo_out_len);
+
+ write_sz = first_used;
+
+ n = 0;
+ if (write_sz > 0) {
+ n = fwrite(fifo_out + cur_out, 1, write_sz, outf);
+ used_out = used_out - n;
+ /* Move head of the fifo */
+ cur_out = (cur_out + n) % fifo_out_len;
+ assert(n <= write_sz);
+ if (n != write_sz) {
+ fprintf(stderr, "error: write\n");
+ rc = -1;
+ goto err5;
+ }
+ }
+
+ if (last_used > 0) { /* If more data available in the last part */
+ write_sz = last_used; /* Keep it here for later */
+ n = 0;
+ if (write_sz > 0) {
+ n = fwrite(fifo_out, 1, write_sz, outf);
+ used_out = used_out - n;
+ cur_out = (cur_out + n) % fifo_out_len;
+ assert(n <= write_sz);
+ if (n != write_sz) {
+ fprintf(stderr, "error: write\n");
+ rc = -1;
+ goto err5;
+ }
+ }
+ }
+
+decomp_state:
+
+ /* NX decompresses input data */
+
+ NXPRT(fprintf(stderr, "decomp_state:\n"));
+
+ if (is_final)
+ goto finish_state;
+
+ /* Address/len lists */
+ clearp_dde(ddl_in);
+ clearp_dde(ddl_out);
+
+ /* FC, CRC, HistLen, Table 6-6 */
+ if (resuming) {
+ /* Resuming a partially decompressed input.
+ * The key to resume is supplying the 32KB
+ * dictionary (history) to NX, which is basically
+ * the last 32KB of output produced.
+ */
+ fc = GZIP_FC_DECOMPRESS_RESUME;
+
+ cmdp->cpb.in_crc = cmdp->cpb.out_crc;
+ cmdp->cpb.in_adler = cmdp->cpb.out_adler;
+
+ /* Round up the history size to quadword. Section 2.10 */
+ history_len = (history_len + 15) / 16;
+ putnn(cmdp->cpb, in_histlen, history_len);
+ history_len = history_len * 16; /* bytes */
+
+ if (history_len > 0) {
+ /* Chain in the history buffer to the DDE list */
+ if (cur_out >= history_len) {
+ nx_append_dde(ddl_in, fifo_out
+ + (cur_out - history_len),
+ history_len);
+ } else {
+ nx_append_dde(ddl_in, fifo_out
+ + ((fifo_out_len + cur_out)
+ - history_len),
+ history_len - cur_out);
+ /* Up to 32KB history wraps around fifo_out */
+ nx_append_dde(ddl_in, fifo_out, cur_out);
+ }
+
+ }
+ } else {
+ /* First decompress job */
+ fc = GZIP_FC_DECOMPRESS;
+
+ history_len = 0;
+ /* Writing 0 clears out subc as well */
+ cmdp->cpb.in_histlen = 0;
+ total_out = 0;
+
+ put32(cmdp->cpb, in_crc, INIT_CRC);
+ put32(cmdp->cpb, in_adler, INIT_ADLER);
+ put32(cmdp->cpb, out_crc, INIT_CRC);
+ put32(cmdp->cpb, out_adler, INIT_ADLER);
+
+ /* Assuming 10% compression ratio initially; use the
+ * most recently measured compression ratio as a
+ * heuristic to estimate the input and output
+ * sizes. If we give too much input, the target buffer
+ * overflows and NX cycles are wasted, and then we
+ * must retry with smaller input size. 1000 is 100%.
+ */
+ last_comp_ratio = 100UL;
+ }
+ cmdp->crb.gzip_fc = 0;
+ putnn(cmdp->crb, gzip_fc, fc);
+
+ /*
+ * NX source buffers
+ */
+ first_used = fifo_used_first_bytes(cur_in, used_in, fifo_in_len);
+ last_used = fifo_used_last_bytes(cur_in, used_in, fifo_in_len);
+
+ if (first_used > 0)
+ nx_append_dde(ddl_in, fifo_in + cur_in, first_used);
+
+ if (last_used > 0)
+ nx_append_dde(ddl_in, fifo_in, last_used);
+
+ /*
+ * NX target buffers
+ */
+ first_free = fifo_free_first_bytes(cur_out, used_out, fifo_out_len);
+ last_free = fifo_free_last_bytes(cur_out, used_out, fifo_out_len);
+
+ /* Reduce output free space amount not to overwrite the history */
+ int target_max = NX_MAX(0, fifo_free_bytes(used_out, fifo_out_len)
+ - (1<<16));
+
+ NXPRT(fprintf(stderr, "target_max %d (0x%x)\n", target_max,
+ target_max));
+
+ first_free = NX_MIN(target_max, first_free);
+ if (first_free > 0) {
+ first_offset = fifo_free_first_offset(cur_out, used_out);
+ nx_append_dde(ddl_out, fifo_out + first_offset, first_free);
+ }
+
+ if (last_free > 0) {
+ last_free = NX_MIN(target_max - first_free, last_free);
+ if (last_free > 0) {
+ last_offset = fifo_free_last_offset(cur_out, used_out,
+ fifo_out_len);
+ nx_append_dde(ddl_out, fifo_out + last_offset,
+ last_free);
+ }
+ }
+
+ /* Target buffer size is used to limit the source data size
+ * based on previous measurements of compression ratio.
+ */
+
+ /* source_sz includes history */
+ source_sz = getp32(ddl_in, ddebc);
+ assert(source_sz > history_len);
+ source_sz = source_sz - history_len;
+
+ /* Estimating how much source is needed to 3/4 fill a
+ * target_max size target buffer. If we overshoot, then NX
+ * must repeat the job with smaller input and we waste
+ * bandwidth. If we undershoot then we use more NX calls than
+ * necessary.
+ */
+
+ source_sz_estimate = ((uint64_t)target_max * last_comp_ratio * 3UL)
+ / 4000;
+
+ if (source_sz_estimate < source_sz) {
+ /* Target might be small, therefore limiting the
+ * source data.
+ */
+ source_sz = source_sz_estimate;
+ target_sz_estimate = target_max;
+ } else {
+ /* Source file might be small, therefore limiting target
+ * touch pages to a smaller value to save processor cycles.
+ */
+ target_sz_estimate = ((uint64_t)source_sz * 1000UL)
+ / (last_comp_ratio + 1);
+ target_sz_estimate = NX_MIN(2 * target_sz_estimate,
+ target_max);
+ }
+
+ source_sz = source_sz + history_len;
+
+ /* Some NX condition codes require submitting the NX job again.
+ * Kernel doesn't handle NX page faults. Expects user code to
+ * touch pages.
+ */
+ pgfault_retries = NX_MAX_FAULTS;
+
+restart_nx:
+
+ putp32(ddl_in, ddebc, source_sz);
+
+ /* Fault in pages */
+ nxu_touch_pages(cmdp, sizeof(struct nx_gzip_crb_cpb_t), page_sz, 1);
+ nx_touch_pages_dde(ddl_in, 0, page_sz, 0);
+ nx_touch_pages_dde(ddl_out, target_sz_estimate, page_sz, 1);
+
+ /* Send job to NX */
+ cc = nx_submit_job(ddl_in, ddl_out, cmdp, devhandle);
+
+ switch (cc) {
+
+ case ERR_NX_AT_FAULT:
+
+ /* We touched the pages ahead of time. In the most common case
+ * we shouldn't be here. But may be some pages were paged out.
+ * Kernel should have placed the faulting address to fsaddr.
+ */
+ NXPRT(fprintf(stderr, "ERR_NX_AT_FAULT %p\n",
+ (void *)cmdp->crb.csb.fsaddr));
+
+ if (pgfault_retries == NX_MAX_FAULTS) {
+ /* Try once with exact number of pages */
+ --pgfault_retries;
+ goto restart_nx;
+ } else if (pgfault_retries > 0) {
+ /* If still faulting try fewer input pages
+ * assuming memory outage
+ */
+ if (source_sz > page_sz)
+ source_sz = NX_MAX(source_sz / 2, page_sz);
+ --pgfault_retries;
+ goto restart_nx;
+ } else {
+ fprintf(stderr, "cannot make progress; too many ");
+ fprintf(stderr, "page fault retries cc= %d\n", cc);
+ rc = -1;
+ goto err5;
+ }
+
+ case ERR_NX_DATA_LENGTH:
+
+ NXPRT(fprintf(stderr, "ERR_NX_DATA_LENGTH; "));
+ NXPRT(fprintf(stderr, "stream may have trailing data\n"));
+
+ /* Not an error in the most common case; it just says
+ * there is trailing data that we must examine.
+ *
+ * CC=3 CE(1)=0 CE(0)=1 indicates partial completion
+ * Fig.6-7 and Table 6-8.
+ */
+ nx_ce = get_csb_ce_ms3b(cmdp->crb.csb);
+
+ if (!csb_ce_termination(nx_ce) &&
+ csb_ce_partial_completion(nx_ce)) {
+ /* Check CPB for more information
+ * spbc and tpbc are valid
+ */
+ sfbt = getnn(cmdp->cpb, out_sfbt); /* Table 6-4 */
+ subc = getnn(cmdp->cpb, out_subc); /* Table 6-4 */
+ spbc = get32(cmdp->cpb, out_spbc_decomp);
+ tpbc = get32(cmdp->crb.csb, tpbc);
+ assert(target_max >= tpbc);
+
+ goto ok_cc3; /* not an error */
+ } else {
+ /* History length error when CE(1)=1 CE(0)=0. */
+ rc = -1;
+ fprintf(stderr, "history length error cc= %d\n", cc);
+ goto err5;
+ }
+
+ case ERR_NX_TARGET_SPACE:
+
+ /* Target buffer not large enough; retry smaller input
+ * data; give at least 1 byte. SPBC/TPBC are not valid.
+ */
+ assert(source_sz > history_len);
+ source_sz = ((source_sz - history_len + 2) / 2) + history_len;
+ NXPRT(fprintf(stderr, "ERR_NX_TARGET_SPACE; retry with "));
+ NXPRT(fprintf(stderr, "smaller input data src %d hist %d\n",
+ source_sz, history_len));
+ goto restart_nx;
+
+ case ERR_NX_OK:
+
+ /* This should not happen for gzip formatted data;
+ * we need trailing crc and isize
+ */
+ fprintf(stderr, "ERR_NX_OK\n");
+ spbc = get32(cmdp->cpb, out_spbc_decomp);
+ tpbc = get32(cmdp->crb.csb, tpbc);
+ assert(target_max >= tpbc);
+ assert(spbc >= history_len);
+ source_sz = spbc - history_len;
+ goto offsets_state;
+
+ default:
+ fprintf(stderr, "error: cc= %d\n", cc);
+ rc = -1;
+ goto err5;
+ }
+
+ok_cc3:
+
+ NXPRT(fprintf(stderr, "cc3: sfbt: %x\n", sfbt));
+
+ assert(spbc > history_len);
+ source_sz = spbc - history_len;
+
+ /* Table 6-4: Source Final Block Type (SFBT) describes the
+ * last processed deflate block and clues the software how to
+ * resume the next job. SUBC indicates how many input bits NX
+ * consumed but did not process. SPBC indicates how many
+ * bytes of source were given to the accelerator including
+ * history bytes.
+ */
+
+ switch (sfbt) {
+ int dhtlen;
+
+ case 0x0: /* Deflate final EOB received */
+
+ /* Calculating the checksum start position. */
+
+ source_sz = source_sz - subc / 8;
+ is_final = 1;
+ break;
+
+ /* Resume decompression cases are below. Basically
+ * indicates where NX has suspended and how to resume
+ * the input stream.
+ */
+
+ case 0x8: /* Within a literal block; use rembytecount */
+ case 0x9: /* Within a literal block; use rembytecount; bfinal=1 */
+
+ /* Supply the partially processed source byte again */
+ source_sz = source_sz - ((subc + 7) / 8);
+
+ /* SUBC LS 3bits: number of bits in the first source byte need
+ * to be processed.
+ * 000 means all 8 bits; Table 6-3
+ * Clear subc, histlen, sfbt, rembytecnt, dhtlen
+ */
+ cmdp->cpb.in_subc = 0;
+ cmdp->cpb.in_sfbt = 0;
+ putnn(cmdp->cpb, in_subc, subc % 8);
+ putnn(cmdp->cpb, in_sfbt, sfbt);
+ putnn(cmdp->cpb, in_rembytecnt, getnn(cmdp->cpb,
+ out_rembytecnt));
+ break;
+
+ case 0xA: /* Within a FH block; */
+ case 0xB: /* Within a FH block; bfinal=1 */
+
+ source_sz = source_sz - ((subc + 7) / 8);
+
+ /* Clear subc, histlen, sfbt, rembytecnt, dhtlen */
+ cmdp->cpb.in_subc = 0;
+ cmdp->cpb.in_sfbt = 0;
+ putnn(cmdp->cpb, in_subc, subc % 8);
+ putnn(cmdp->cpb, in_sfbt, sfbt);
+ break;
+
+ case 0xC: /* Within a DH block; */
+ case 0xD: /* Within a DH block; bfinal=1 */
+
+ source_sz = source_sz - ((subc + 7) / 8);
+
+ /* Clear subc, histlen, sfbt, rembytecnt, dhtlen */
+ cmdp->cpb.in_subc = 0;
+ cmdp->cpb.in_sfbt = 0;
+ putnn(cmdp->cpb, in_subc, subc % 8);
+ putnn(cmdp->cpb, in_sfbt, sfbt);
+
+ dhtlen = getnn(cmdp->cpb, out_dhtlen);
+ putnn(cmdp->cpb, in_dhtlen, dhtlen);
+ assert(dhtlen >= 42);
+
+ /* Round up to a qword */
+ dhtlen = (dhtlen + 127) / 128;
+
+ while (dhtlen > 0) { /* Copy dht from cpb.out to cpb.in */
+ --dhtlen;
+ cmdp->cpb.in_dht[dhtlen] = cmdp->cpb.out_dht[dhtlen];
+ }
+ break;
+
+ case 0xE: /* Within a block header; bfinal=0; */
+ /* Also given if source data exactly ends (SUBC=0) with
+ * EOB code with BFINAL=0. Means the next byte will
+ * contain a block header.
+ */
+ case 0xF: /* within a block header with BFINAL=1. */
+
+ source_sz = source_sz - ((subc + 7) / 8);
+
+ /* Clear subc, histlen, sfbt, rembytecnt, dhtlen */
+ cmdp->cpb.in_subc = 0;
+ cmdp->cpb.in_sfbt = 0;
+ putnn(cmdp->cpb, in_subc, subc % 8);
+ putnn(cmdp->cpb, in_sfbt, sfbt);
+
+ /* Engine did not process any data */
+ if (is_eof && (source_sz == 0))
+ is_final = 1;
+ }
+
+offsets_state:
+
+ /* Adjust the source and target buffer offsets and lengths */
+
+ NXPRT(fprintf(stderr, "offsets_state:\n"));
+
+ /* Delete input data from fifo_in */
+ used_in = used_in - source_sz;
+ cur_in = (cur_in + source_sz) % fifo_in_len;
+ input_file_offset = input_file_offset + source_sz;
+
+ /* Add output data to fifo_out */
+ used_out = used_out + tpbc;
+
+ assert(used_out <= fifo_out_len);
+
+ total_out = total_out + tpbc;
+
+ /* Deflate history is 32KB max. No need to supply more
+ * than 32KB on a resume.
+ */
+ history_len = (total_out > window_max) ? window_max : total_out;
+
+ /* To estimate expected expansion in the next NX job; 500 means 50%.
+ * Deflate best case is around 1 to 1000.
+ */
+ last_comp_ratio = (1000UL * ((uint64_t)source_sz + 1))
+ / ((uint64_t)tpbc + 1);
+ last_comp_ratio = NX_MAX(NX_MIN(1000UL, last_comp_ratio), 1);
+ NXPRT(fprintf(stderr, "comp_ratio %ld source_sz %d spbc %d tpbc %d\n",
+ last_comp_ratio, source_sz, spbc, tpbc));
+
+ resuming = 1;
+
+finish_state:
+
+ NXPRT(fprintf(stderr, "finish_state:\n"));
+
+ if (is_final) {
+ if (used_out)
+ goto write_state; /* More data to write out */
+ else if (used_in < 8) {
+ /* Need at least 8 more bytes containing gzip crc
+ * and isize.
+ */
+ rc = -1;
+ goto err4;
+ } else {
+ /* Compare checksums and exit */
+ int i;
+ unsigned char tail[8];
+ uint32_t cksum, isize;
+
+ for (i = 0; i < 8; i++)
+ tail[i] = fifo_in[(cur_in + i) % fifo_in_len];
+ fprintf(stderr, "computed checksum %08x isize %08x\n",
+ cmdp->cpb.out_crc, (uint32_t) (total_out
+ % (1ULL<<32)));
+ cksum = ((uint32_t) tail[0] | (uint32_t) tail[1]<<8
+ | (uint32_t) tail[2]<<16
+ | (uint32_t) tail[3]<<24);
+ isize = ((uint32_t) tail[4] | (uint32_t) tail[5]<<8
+ | (uint32_t) tail[6]<<16
+ | (uint32_t) tail[7]<<24);
+ fprintf(stderr, "stored checksum %08x isize %08x\n",
+ cksum, isize);
+
+ if (cksum == cmdp->cpb.out_crc && isize == (uint32_t)
+ (total_out % (1ULL<<32))) {
+ rc = 0; goto ok1;
+ } else {
+ rc = -1; goto err4;
+ }
+ }
+ } else
+ goto read_state;
+
+ return -1;
+
+err1:
+ fprintf(stderr, "error: not a gzip file, expect %x, read %x\n",
+ expect, c);
+ return -1;
+
+err2:
+ fprintf(stderr, "error: the FLG byte is wrong or not being handled\n");
+ return -1;
+
+err3:
+ fprintf(stderr, "error: gzip header\n");
+ return -1;
+
+err4:
+ fprintf(stderr, "error: checksum missing or mismatch\n");
+
+err5:
+ok1:
+ fprintf(stderr, "decomp is complete: fclose\n");
+ fclose(outf);
+
+ return rc;
+}
+
+
+int main(int argc, char **argv)
+{
+ int rc;
+ struct sigaction act;
+ void *handle;
+
+ nx_dbg = 0;
+ nx_gzip_log = NULL;
+ act.sa_handler = 0;
+ act.sa_sigaction = nxu_sigsegv_handler;
+ act.sa_flags = SA_SIGINFO;
+ act.sa_restorer = 0;
+ sigemptyset(&act.sa_mask);
+ sigaction(SIGSEGV, &act, NULL);
+
+ handle = nx_function_begin(NX_FUNC_COMP_GZIP, 0);
+ if (!handle) {
+ fprintf(stderr, "Unable to init NX, errno %d\n", errno);
+ exit(-1);
+ }
+
+ rc = decompress_file(argc, argv, handle);
+
+ nx_function_end(handle);
+
+ return rc;
+}
diff --git a/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
new file mode 100644
index 000000000000..095195a25687
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* P9 gzip sample code for demonstrating the P9 NX hardware interface.
+ * Not intended for productive uses or for performance or compression
+ * ratio measurements. For simplicity of demonstration, this sample
+ * code compresses in to fixed Huffman blocks only (Deflate btype=1)
+ * and has very simple memory management. Dynamic Huffman blocks
+ * (Deflate btype=2) are more involved as detailed in the user guide.
+ * Note also that /dev/crypto/gzip, VAS and skiboot support are
+ * required.
+ *
+ * Copyright 2020 IBM Corp.
+ *
+ * https://github.com/libnxz/power-gzip for zlib api and other utils
+ *
+ * Author: Bulent Abali <abali@us.ibm.com>
+ *
+ * Definitions of acronyms used here. See
+ * P9 NX Gzip Accelerator User's Manual for details:
+ * https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf
+ *
+ * adler/crc: 32 bit checksums appended to stream tail
+ * ce: completion extension
+ * cpb: coprocessor parameter block (metadata)
+ * crb: coprocessor request block (command)
+ * csb: coprocessor status block (status)
+ * dht: dynamic huffman table
+ * dde: data descriptor element (address, length)
+ * ddl: list of ddes
+ * dh/fh: dynamic and fixed huffman types
+ * fc: coprocessor function code
+ * histlen: history/dictionary length
+ * history: sliding window of up to 32KB of data
+ * lzcount: Deflate LZ symbol counts
+ * rembytecnt: remaining byte count
+ * sfbt: source final block type; last block's type during decomp
+ * spbc: source processed byte count
+ * subc: source unprocessed bit count
+ * tebc: target ending bit count; valid bits in the last byte
+ * tpbc: target processed byte count
+ * vas: virtual accelerator switch; the user mode interface
+ */
+
+#define _ISOC11_SOURCE // For aligned_alloc()
+#define _DEFAULT_SOURCE // For endian.h
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/fcntl.h>
+#include <sys/mman.h>
+#include <endian.h>
+#include <bits/endian.h>
+#include <sys/ioctl.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include "utils.h"
+#include "nxu.h"
+#include "nx.h"
+
+int nx_dbg;
+FILE *nx_gzip_log;
+
+#define NX_MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
+#define FNAME_MAX 1024
+#define FEXT ".nx.gz"
+
+#define SYSFS_MAX_REQ_BUF_PATH "devices/vio/ibm,compression-v1/nx_gzip_caps/req_max_processed_len"
+
+/*
+ * LZ counts returned in the user supplied nx_gzip_crb_cpb_t structure.
+ */
+static int compress_fht_sample(char *src, uint32_t srclen, char *dst,
+ uint32_t dstlen, int with_count,
+ struct nx_gzip_crb_cpb_t *cmdp, void *handle)
+{
+ uint32_t fc;
+
+ assert(!!cmdp);
+
+ put32(cmdp->crb, gzip_fc, 0); /* clear */
+ fc = (with_count) ? GZIP_FC_COMPRESS_RESUME_FHT_COUNT :
+ GZIP_FC_COMPRESS_RESUME_FHT;
+ putnn(cmdp->crb, gzip_fc, fc);
+ putnn(cmdp->cpb, in_histlen, 0); /* resuming with no history */
+ memset((void *) &cmdp->crb.csb, 0, sizeof(cmdp->crb.csb));
+
+ /* Section 6.6 programming notes; spbc may be in two different
+ * places depending on FC.
+ */
+ if (!with_count)
+ put32(cmdp->cpb, out_spbc_comp, 0);
+ else
+ put32(cmdp->cpb, out_spbc_comp_with_count, 0);
+
+ /* Figure 6-3 6-4; CSB location */
+ put64(cmdp->crb, csb_address, 0);
+ put64(cmdp->crb, csb_address,
+ (uint64_t) &cmdp->crb.csb & csb_address_mask);
+
+ /* Source direct dde (scatter-gather list) */
+ clear_dde(cmdp->crb.source_dde);
+ putnn(cmdp->crb.source_dde, dde_count, 0);
+ put32(cmdp->crb.source_dde, ddebc, srclen);
+ put64(cmdp->crb.source_dde, ddead, (uint64_t) src);
+
+ /* Target direct dde (scatter-gather list) */
+ clear_dde(cmdp->crb.target_dde);
+ putnn(cmdp->crb.target_dde, dde_count, 0);
+ put32(cmdp->crb.target_dde, ddebc, dstlen);
+ put64(cmdp->crb.target_dde, ddead, (uint64_t) dst);
+
+ /* Submit the crb, the job descriptor, to the accelerator */
+ return nxu_submit_job(cmdp, handle);
+}
+
+/*
+ * Prepares a blank no filename no timestamp gzip header and returns
+ * the number of bytes written to buf.
+ * Gzip specification at https://tools.ietf.org/html/rfc1952
+ */
+int gzip_header_blank(char *buf)
+{
+ int i = 0;
+
+ buf[i++] = 0x1f; /* ID1 */
+ buf[i++] = 0x8b; /* ID2 */
+ buf[i++] = 0x08; /* CM */
+ buf[i++] = 0x00; /* FLG */
+ buf[i++] = 0x00; /* MTIME */
+ buf[i++] = 0x00; /* MTIME */
+ buf[i++] = 0x00; /* MTIME */
+ buf[i++] = 0x00; /* MTIME */
+ buf[i++] = 0x04; /* XFL 4=fastest */
+ buf[i++] = 0x03; /* OS UNIX */
+
+ return i;
+}
+
+/* Caller must free the allocated buffer return nonzero on error. */
+int read_alloc_input_file(char *fname, char **buf, size_t *bufsize)
+{
+ struct stat statbuf;
+ FILE *fp;
+ char *p;
+ size_t num_bytes;
+
+ if (stat(fname, &statbuf)) {
+ perror(fname);
+ return(-1);
+ }
+ fp = fopen(fname, "r");
+ if (fp == NULL) {
+ perror(fname);
+ return(-1);
+ }
+ assert(NULL != (p = (char *) malloc(statbuf.st_size)));
+ num_bytes = fread(p, 1, statbuf.st_size, fp);
+ if (ferror(fp) || (num_bytes != statbuf.st_size)) {
+ perror(fname);
+ return(-1);
+ }
+ *buf = p;
+ *bufsize = num_bytes;
+ return 0;
+}
+
+/* Returns nonzero on error */
+int write_output_file(char *fname, char *buf, size_t bufsize)
+{
+ FILE *fp;
+ size_t num_bytes;
+
+ fp = fopen(fname, "w");
+ if (fp == NULL) {
+ perror(fname);
+ return(-1);
+ }
+ num_bytes = fwrite(buf, 1, bufsize, fp);
+ if (ferror(fp) || (num_bytes != bufsize)) {
+ perror(fname);
+ return(-1);
+ }
+ fclose(fp);
+ return 0;
+}
+
+/*
+ * Z_SYNC_FLUSH as described in zlib.h.
+ * Returns number of appended bytes
+ */
+int append_sync_flush(char *buf, int tebc, int final)
+{
+ uint64_t flush;
+ int shift = (tebc & 0x7);
+
+ if (tebc > 0) {
+ /* Last byte is partially full */
+ buf = buf - 1;
+ *buf = *buf & (unsigned char) ((1<<tebc)-1);
+ } else
+ *buf = 0;
+ flush = ((0x1ULL & final) << shift) | *buf;
+ shift = shift + 3; /* BFINAL and BTYPE written */
+ shift = (shift <= 8) ? 8 : 16;
+ flush |= (0xFFFF0000ULL) << shift; /* Zero length block */
+ shift = shift + 32;
+ while (shift > 0) {
+ *buf++ = (unsigned char) (flush & 0xffULL);
+ flush = flush >> 8;
+ shift = shift - 8;
+ }
+ return(((tebc > 5) || (tebc == 0)) ? 5 : 4);
+}
+
+/*
+ * Final deflate block bit. This call assumes the block
+ * beginning is byte aligned.
+ */
+static void set_bfinal(void *buf, int bfinal)
+{
+ char *b = buf;
+
+ if (bfinal)
+ *b = *b | (unsigned char) 0x01;
+ else
+ *b = *b & (unsigned char) 0xfe;
+}
+
+int compress_file(int argc, char **argv, void *handle)
+{
+ char *inbuf, *outbuf, *srcbuf, *dstbuf;
+ char outname[FNAME_MAX];
+ uint32_t srclen, dstlen;
+ uint32_t flushlen, chunk;
+ size_t inlen, outlen, dsttotlen, srctotlen;
+ uint32_t crc, spbc, tpbc, tebc;
+ int lzcounts = 0;
+ int cc;
+ int num_hdr_bytes;
+ struct nx_gzip_crb_cpb_t *cmdp;
+ uint32_t pagelen = 65536;
+ int fault_tries = NX_MAX_FAULTS;
+ char buf[32];
+
+ cmdp = (void *)(uintptr_t)
+ aligned_alloc(sizeof(struct nx_gzip_crb_cpb_t),
+ sizeof(struct nx_gzip_crb_cpb_t));
+
+ if (argc != 2) {
+ fprintf(stderr, "usage: %s <fname>\n", argv[0]);
+ exit(-1);
+ }
+ if (read_alloc_input_file(argv[1], &inbuf, &inlen))
+ exit(-1);
+ fprintf(stderr, "file %s read, %ld bytes\n", argv[1], inlen);
+
+ /* Generous output buffer for header/trailer */
+ outlen = 2 * inlen + 1024;
+
+ assert(NULL != (outbuf = (char *)malloc(outlen)));
+ nxu_touch_pages(outbuf, outlen, pagelen, 1);
+
+ /*
+ * On PowerVM, the hypervisor defines the maximum request buffer
+ * size is defined and this value is available via sysfs.
+ */
+ if (!read_sysfs_file(SYSFS_MAX_REQ_BUF_PATH, buf, sizeof(buf))) {
+ chunk = atoi(buf);
+ } else {
+ /* sysfs entry is not available on PowerNV */
+ /* Compress piecemeal in smallish chunks */
+ chunk = 1<<22;
+ }
+
+ /* Write the gzip header to the stream */
+ num_hdr_bytes = gzip_header_blank(outbuf);
+ dstbuf = outbuf + num_hdr_bytes;
+ outlen = outlen - num_hdr_bytes;
+ dsttotlen = num_hdr_bytes;
+
+ srcbuf = inbuf;
+ srctotlen = 0;
+
+ /* Init the CRB, the coprocessor request block */
+ memset(&cmdp->crb, 0, sizeof(cmdp->crb));
+
+ /* Initial gzip crc32 */
+ put32(cmdp->cpb, in_crc, 0);
+
+ while (inlen > 0) {
+
+ /* Submit chunk size source data per job */
+ srclen = NX_MIN(chunk, inlen);
+ /* Supply large target in case data expands */
+ dstlen = NX_MIN(2*srclen, outlen);
+
+ /* Page faults are handled by the user code */
+
+ /* Fault-in pages; an improved code wouldn't touch so
+ * many pages but would try to estimate the
+ * compression ratio and adjust both the src and dst
+ * touch amounts.
+ */
+ nxu_touch_pages(cmdp, sizeof(struct nx_gzip_crb_cpb_t), pagelen,
+ 1);
+ nxu_touch_pages(srcbuf, srclen, pagelen, 0);
+ nxu_touch_pages(dstbuf, dstlen, pagelen, 1);
+
+ cc = compress_fht_sample(
+ srcbuf, srclen,
+ dstbuf, dstlen,
+ lzcounts, cmdp, handle);
+
+ if (cc != ERR_NX_OK && cc != ERR_NX_TPBC_GT_SPBC &&
+ cc != ERR_NX_AT_FAULT) {
+ fprintf(stderr, "nx error: cc= %d\n", cc);
+ exit(-1);
+ }
+
+ /* Page faults are handled by the user code */
+ if (cc == ERR_NX_AT_FAULT) {
+ NXPRT(fprintf(stderr, "page fault: cc= %d, ", cc));
+ NXPRT(fprintf(stderr, "try= %d, fsa= %08llx\n",
+ fault_tries,
+ (unsigned long long) cmdp->crb.csb.fsaddr));
+ fault_tries--;
+ if (fault_tries > 0) {
+ continue;
+ } else {
+ fprintf(stderr, "error: cannot progress; ");
+ fprintf(stderr, "too many faults\n");
+ exit(-1);
+ }
+ }
+
+ fault_tries = NX_MAX_FAULTS; /* Reset for the next chunk */
+
+ inlen = inlen - srclen;
+ srcbuf = srcbuf + srclen;
+ srctotlen = srctotlen + srclen;
+
+ /* Two possible locations for spbc depending on the function
+ * code.
+ */
+ spbc = (!lzcounts) ? get32(cmdp->cpb, out_spbc_comp) :
+ get32(cmdp->cpb, out_spbc_comp_with_count);
+ assert(spbc == srclen);
+
+ /* Target byte count */
+ tpbc = get32(cmdp->crb.csb, tpbc);
+ /* Target ending bit count */
+ tebc = getnn(cmdp->cpb, out_tebc);
+ NXPRT(fprintf(stderr, "compressed chunk %d ", spbc));
+ NXPRT(fprintf(stderr, "to %d bytes, tebc= %d\n", tpbc, tebc));
+
+ if (inlen > 0) { /* More chunks to go */
+ set_bfinal(dstbuf, 0);
+ dstbuf = dstbuf + tpbc;
+ dsttotlen = dsttotlen + tpbc;
+ outlen = outlen - tpbc;
+ /* Round up to the next byte with a flush
+ * block; do not set the BFINAqL bit.
+ */
+ flushlen = append_sync_flush(dstbuf, tebc, 0);
+ dsttotlen = dsttotlen + flushlen;
+ outlen = outlen - flushlen;
+ dstbuf = dstbuf + flushlen;
+ NXPRT(fprintf(stderr, "added sync_flush %d bytes\n",
+ flushlen));
+ } else { /* Done */
+ /* Set the BFINAL bit of the last block per Deflate
+ * specification.
+ */
+ set_bfinal(dstbuf, 1);
+ dstbuf = dstbuf + tpbc;
+ dsttotlen = dsttotlen + tpbc;
+ outlen = outlen - tpbc;
+ }
+
+ /* Resuming crc32 for the next chunk */
+ crc = get32(cmdp->cpb, out_crc);
+ put32(cmdp->cpb, in_crc, crc);
+ crc = be32toh(crc);
+ }
+
+ /* Append crc32 and ISIZE to the end */
+ memcpy(dstbuf, &crc, 4);
+ memcpy(dstbuf+4, &srctotlen, 4);
+ dsttotlen = dsttotlen + 8;
+ outlen = outlen - 8;
+
+ assert(FNAME_MAX > (strlen(argv[1]) + strlen(FEXT)));
+ strcpy(outname, argv[1]);
+ strcat(outname, FEXT);
+ if (write_output_file(outname, outbuf, dsttotlen)) {
+ fprintf(stderr, "write error: %s\n", outname);
+ exit(-1);
+ }
+
+ fprintf(stderr, "compressed %ld to %ld bytes total, ", srctotlen,
+ dsttotlen);
+ fprintf(stderr, "crc32 checksum = %08x\n", crc);
+
+ if (inbuf != NULL)
+ free(inbuf);
+
+ if (outbuf != NULL)
+ free(outbuf);
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int rc;
+ struct sigaction act;
+ void *handle;
+
+ nx_dbg = 0;
+ nx_gzip_log = NULL;
+ act.sa_handler = 0;
+ act.sa_sigaction = nxu_sigsegv_handler;
+ act.sa_flags = SA_SIGINFO;
+ act.sa_restorer = 0;
+ sigemptyset(&act.sa_mask);
+ sigaction(SIGSEGV, &act, NULL);
+
+ handle = nx_function_begin(NX_FUNC_COMP_GZIP, 0);
+ if (!handle) {
+ fprintf(stderr, "Unable to init NX, errno %d\n", errno);
+ exit(-1);
+ }
+
+ rc = compress_file(argc, argv, handle);
+
+ nx_function_end(handle);
+
+ return rc;
+}
diff --git a/tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c b/tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c
new file mode 100644
index 000000000000..c055885da40a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/*
+ * Copyright 2020 IBM Corp.
+ *
+ * Author: Bulent Abali <abali@us.ibm.com>
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/fcntl.h>
+#include <sys/mman.h>
+#include <endian.h>
+#include <bits/endian.h>
+#include <sys/ioctl.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include "vas-api.h"
+#include "nx.h"
+#include "copy-paste.h"
+#include "nxu.h"
+#include "nx_dbg.h"
+#include <sys/platform/ppc.h>
+
+#define barrier()
+#define hwsync() ({ asm volatile("sync" ::: "memory"); })
+
+#ifndef NX_NO_CPU_PRI
+#define cpu_pri_default() ({ asm volatile ("or 2, 2, 2"); })
+#define cpu_pri_low() ({ asm volatile ("or 31, 31, 31"); })
+#else
+#define cpu_pri_default()
+#define cpu_pri_low()
+#endif
+
+void *nx_fault_storage_address;
+
+struct nx_handle {
+ int fd;
+ int function;
+ void *paste_addr;
+};
+
+static int open_device_nodes(char *devname, int pri, struct nx_handle *handle)
+{
+ int rc, fd;
+ void *addr;
+ struct vas_tx_win_open_attr txattr;
+
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, " open device name %s\n", devname);
+ return -errno;
+ }
+
+ memset(&txattr, 0, sizeof(txattr));
+ txattr.version = 1;
+ txattr.vas_id = pri;
+ rc = ioctl(fd, VAS_TX_WIN_OPEN, (unsigned long)&txattr);
+ if (rc < 0) {
+ fprintf(stderr, "ioctl() n %d, error %d\n", rc, errno);
+ rc = -errno;
+ goto out;
+ }
+
+ addr = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0ULL);
+ if (addr == MAP_FAILED) {
+ fprintf(stderr, "mmap() failed, errno %d\n", errno);
+ rc = -errno;
+ goto out;
+ }
+ handle->fd = fd;
+ handle->paste_addr = (void *)((char *)addr + 0x400);
+
+ rc = 0;
+out:
+ close(fd);
+ return rc;
+}
+
+void *nx_function_begin(int function, int pri)
+{
+ int rc;
+ char *devname = "/dev/crypto/nx-gzip";
+ struct nx_handle *nxhandle;
+
+ if (function != NX_FUNC_COMP_GZIP) {
+ errno = EINVAL;
+ fprintf(stderr, " NX_FUNC_COMP_GZIP not found\n");
+ return NULL;
+ }
+
+
+ nxhandle = malloc(sizeof(*nxhandle));
+ if (!nxhandle) {
+ errno = ENOMEM;
+ fprintf(stderr, " No memory\n");
+ return NULL;
+ }
+
+ nxhandle->function = function;
+ rc = open_device_nodes(devname, pri, nxhandle);
+ if (rc < 0) {
+ errno = -rc;
+ fprintf(stderr, " open_device_nodes failed\n");
+ return NULL;
+ }
+
+ return nxhandle;
+}
+
+int nx_function_end(void *handle)
+{
+ int rc = 0;
+ struct nx_handle *nxhandle = handle;
+
+ rc = munmap(nxhandle->paste_addr - 0x400, 4096);
+ if (rc < 0) {
+ fprintf(stderr, "munmap() failed, errno %d\n", errno);
+ return rc;
+ }
+ close(nxhandle->fd);
+ free(nxhandle);
+
+ return rc;
+}
+
+static int nx_wait_for_csb(struct nx_gzip_crb_cpb_t *cmdp)
+{
+ long poll = 0;
+ uint64_t t;
+
+ /* Save power and let other threads use the h/w. top may show
+ * 100% but only because OS doesn't know we slowed the this
+ * h/w thread while polling. We're letting other threads have
+ * higher throughput on the core.
+ */
+ cpu_pri_low();
+
+#define CSB_MAX_POLL 200000000UL
+#define USLEEP_TH 300000UL
+
+ t = __ppc_get_timebase();
+
+ while (getnn(cmdp->crb.csb, csb_v) == 0) {
+ ++poll;
+ hwsync();
+
+ cpu_pri_low();
+
+ /* usleep(0) takes around 29000 ticks ~60 us.
+ * 300000 is spinning for about 600 us then
+ * start sleeping.
+ */
+ if ((__ppc_get_timebase() - t) > USLEEP_TH) {
+ cpu_pri_default();
+ usleep(1);
+ }
+
+ if (poll > CSB_MAX_POLL)
+ break;
+
+ /* Fault address from signal handler */
+ if (nx_fault_storage_address) {
+ cpu_pri_default();
+ return -EAGAIN;
+ }
+
+ }
+
+ cpu_pri_default();
+
+ /* hw has updated csb and output buffer */
+ hwsync();
+
+ /* Check CSB flags. */
+ if (getnn(cmdp->crb.csb, csb_v) == 0) {
+ fprintf(stderr, "CSB still not valid after %d polls.\n",
+ (int) poll);
+ prt_err("CSB still not valid after %d polls, giving up.\n",
+ (int) poll);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int nxu_run_job(struct nx_gzip_crb_cpb_t *cmdp, void *handle)
+{
+ int i, ret, retries;
+ struct nx_handle *nxhandle = handle;
+
+ assert(handle != NULL);
+ i = 0;
+ retries = 5000;
+ while (i++ < retries) {
+ hwsync();
+ vas_copy(&cmdp->crb, 0);
+ ret = vas_paste(nxhandle->paste_addr, 0);
+ hwsync();
+
+ NXPRT(fprintf(stderr, "Paste attempt %d/%d returns 0x%x\n",
+ i, retries, ret));
+
+ if ((ret == 2) || (ret == 3)) {
+
+ ret = nx_wait_for_csb(cmdp);
+ if (!ret) {
+ goto out;
+ } else if (ret == -EAGAIN) {
+ long x;
+
+ prt_err("Touching address %p, 0x%lx\n",
+ nx_fault_storage_address,
+ *(long *) nx_fault_storage_address);
+ x = *(long *) nx_fault_storage_address;
+ *(long *) nx_fault_storage_address = x;
+ nx_fault_storage_address = 0;
+ continue;
+ } else {
+ prt_err("wait_for_csb() returns %d\n", ret);
+ break;
+ }
+ } else {
+ if (i < 10) {
+ /* spin for few ticks */
+#define SPIN_TH 500UL
+ uint64_t fail_spin;
+
+ fail_spin = __ppc_get_timebase();
+ while ((__ppc_get_timebase() - fail_spin) <
+ SPIN_TH)
+ ;
+ } else {
+ /* sleep */
+ unsigned int pr = 0;
+
+ if (pr++ % 100 == 0) {
+ prt_err("Paste attempt %d/", i);
+ prt_err("%d, failed pid= %d\n", retries,
+ getpid());
+ }
+ usleep(1);
+ }
+ continue;
+ }
+ }
+
+out:
+ cpu_pri_default();
+
+ return ret;
+}
+
+int nxu_submit_job(struct nx_gzip_crb_cpb_t *cmdp, void *handle)
+{
+ int cc;
+
+ cc = nxu_run_job(cmdp, handle);
+
+ if (!cc)
+ cc = getnn(cmdp->crb.csb, csb_cc); /* CC Table 6-8 */
+
+ return cc;
+}
+
+
+void nxu_sigsegv_handler(int sig, siginfo_t *info, void *ctx)
+{
+ fprintf(stderr, "%d: Got signal %d si_code %d, si_addr %p\n", getpid(),
+ sig, info->si_code, info->si_addr);
+
+ nx_fault_storage_address = info->si_addr;
+}
+
+/*
+ * Fault in pages prior to NX job submission. wr=1 may be required to
+ * touch writeable pages. System zero pages do not fault-in the page as
+ * intended. Typically set wr=1 for NX target pages and set wr=0 for NX
+ * source pages.
+ */
+int nxu_touch_pages(void *buf, long buf_len, long page_len, int wr)
+{
+ char *begin = buf;
+ char *end = (char *) buf + buf_len - 1;
+ volatile char t;
+
+ assert(buf_len >= 0 && !!buf);
+
+ NXPRT(fprintf(stderr, "touch %p %p len 0x%lx wr=%d\n", buf,
+ (buf + buf_len), buf_len, wr));
+
+ if (buf_len <= 0 || buf == NULL)
+ return -1;
+
+ do {
+ t = *begin;
+ if (wr)
+ *begin = t;
+ begin = begin + page_len;
+ } while (begin < end);
+
+ /* When buf_sz is small or buf tail is in another page */
+ t = *end;
+ if (wr)
+ *end = t;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h b/tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h
new file mode 100644
index 000000000000..0db2d6485037
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/* From asm-compat.h */
+#define __stringify_in_c(...) #__VA_ARGS__
+#define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
+
+/*
+ * Macros taken from arch/powerpc/include/asm/ppc-opcode.h and other
+ * header files.
+ */
+#define ___PPC_RA(a) (((a) & 0x1f) << 16)
+#define ___PPC_RB(b) (((b) & 0x1f) << 11)
+
+#define PPC_INST_COPY 0x7c20060c
+#define PPC_INST_PASTE 0x7c20070d
+
+#define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_PASTE(a, b) stringify_in_c(.long PPC_INST_PASTE | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define CR0_SHIFT 28
+#define CR0_MASK 0xF
+/*
+ * Copy/paste instructions:
+ *
+ * copy RA,RB
+ * Copy contents of address (RA) + effective_address(RB)
+ * to internal copy-buffer.
+ *
+ * paste RA,RB
+ * Paste contents of internal copy-buffer to the address
+ * (RA) + effective_address(RB)
+ */
+static inline int vas_copy(void *crb, int offset)
+{
+ asm volatile(PPC_COPY(%0, %1)";"
+ :
+ : "b" (offset), "b" (crb)
+ : "memory");
+
+ return 0;
+}
+
+static inline int vas_paste(void *paste_address, int offset)
+{
+ __u32 cr;
+
+ cr = 0;
+ asm volatile(PPC_PASTE(%1, %2)";"
+ "mfocrf %0, 0x80;"
+ : "=r" (cr)
+ : "b" (offset), "b" (paste_address)
+ : "memory", "cr0");
+
+ return (cr >> CR0_SHIFT) & CR0_MASK;
+}
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/crb.h b/tools/testing/selftests/powerpc/nx-gzip/include/crb.h
new file mode 100644
index 000000000000..ab101085fa7e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/crb.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __CRB_H
+#define __CRB_H
+#include <linux/types.h>
+#include "nx.h"
+
+/* CCW 842 CI/FC masks
+ * NX P8 workbook, section 4.3.1, figure 4-6
+ * "CI/FC Boundary by NX CT type"
+ */
+#define CCW_CI_842 (0x00003ff8)
+#define CCW_FC_842 (0x00000007)
+
+/* Chapter 6.5.8 Coprocessor-Completion Block (CCB) */
+
+#define CCB_VALUE (0x3fffffffffffffff)
+#define CCB_ADDRESS (0xfffffffffffffff8)
+#define CCB_CM (0x0000000000000007)
+#define CCB_CM0 (0x0000000000000004)
+#define CCB_CM12 (0x0000000000000003)
+
+#define CCB_CM0_ALL_COMPLETIONS (0x0)
+#define CCB_CM0_LAST_IN_CHAIN (0x4)
+#define CCB_CM12_STORE (0x0)
+#define CCB_CM12_INTERRUPT (0x1)
+
+#define CCB_SIZE (0x10)
+#define CCB_ALIGN CCB_SIZE
+
+struct coprocessor_completion_block {
+ __be64 value;
+ __be64 address;
+} __aligned(CCB_ALIGN);
+
+
+/* Chapter 6.5.7 Coprocessor-Status Block (CSB) */
+
+#define CSB_V (0x80)
+#define CSB_F (0x04)
+#define CSB_CH (0x03)
+#define CSB_CE_INCOMPLETE (0x80)
+#define CSB_CE_TERMINATION (0x40)
+#define CSB_CE_TPBC (0x20)
+
+#define CSB_CC_SUCCESS (0)
+#define CSB_CC_INVALID_ALIGN (1)
+#define CSB_CC_OPERAND_OVERLAP (2)
+#define CSB_CC_DATA_LENGTH (3)
+#define CSB_CC_TRANSLATION (5)
+#define CSB_CC_PROTECTION (6)
+#define CSB_CC_RD_EXTERNAL (7)
+#define CSB_CC_INVALID_OPERAND (8)
+#define CSB_CC_PRIVILEGE (9)
+#define CSB_CC_INTERNAL (10)
+#define CSB_CC_WR_EXTERNAL (12)
+#define CSB_CC_NOSPC (13)
+#define CSB_CC_EXCESSIVE_DDE (14)
+#define CSB_CC_WR_TRANSLATION (15)
+#define CSB_CC_WR_PROTECTION (16)
+#define CSB_CC_UNKNOWN_CODE (17)
+#define CSB_CC_ABORT (18)
+#define CSB_CC_TRANSPORT (20)
+#define CSB_CC_SEGMENTED_DDL (31)
+#define CSB_CC_PROGRESS_POINT (32)
+#define CSB_CC_DDE_OVERFLOW (33)
+#define CSB_CC_SESSION (34)
+#define CSB_CC_PROVISION (36)
+#define CSB_CC_CHAIN (37)
+#define CSB_CC_SEQUENCE (38)
+#define CSB_CC_HW (39)
+
+#define CSB_SIZE (0x10)
+#define CSB_ALIGN CSB_SIZE
+
+struct coprocessor_status_block {
+ __u8 flags;
+ __u8 cs;
+ __u8 cc;
+ __u8 ce;
+ __be32 count;
+ __be64 address;
+} __aligned(CSB_ALIGN);
+
+
+/* Chapter 6.5.10 Data-Descriptor List (DDL)
+ * each list contains one or more Data-Descriptor Entries (DDE)
+ */
+
+#define DDE_P (0x8000)
+
+#define DDE_SIZE (0x10)
+#define DDE_ALIGN DDE_SIZE
+
+struct data_descriptor_entry {
+ __be16 flags;
+ __u8 count;
+ __u8 index;
+ __be32 length;
+ __be64 address;
+} __aligned(DDE_ALIGN);
+
+
+/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */
+
+#define CRB_SIZE (0x80)
+#define CRB_ALIGN (0x100) /* Errata: requires 256 alignment */
+
+
+/* Coprocessor Status Block field
+ * ADDRESS address of CSB
+ * C CCB is valid
+ * AT 0 = addrs are virtual, 1 = addrs are phys
+ * M enable perf monitor
+ */
+#define CRB_CSB_ADDRESS (0xfffffffffffffff0)
+#define CRB_CSB_C (0x0000000000000008)
+#define CRB_CSB_AT (0x0000000000000002)
+#define CRB_CSB_M (0x0000000000000001)
+
+struct coprocessor_request_block {
+ __be32 ccw;
+ __be32 flags;
+ __be64 csb_addr;
+
+ struct data_descriptor_entry source;
+ struct data_descriptor_entry target;
+
+ struct coprocessor_completion_block ccb;
+
+ __u8 reserved[48];
+
+ struct coprocessor_status_block csb;
+} __aligned(CRB_ALIGN);
+
+#define crb_csb_addr(c) __be64_to_cpu(c->csb_addr)
+#define crb_nx_fault_addr(c) __be64_to_cpu(c->stamp.nx.fault_storage_addr)
+#define crb_nx_flags(c) c->stamp.nx.flags
+#define crb_nx_fault_status(c) c->stamp.nx.fault_status
+#define crb_nx_pswid(c) c->stamp.nx.pswid
+
+
+/* RFC02167 Initiate Coprocessor Instructions document
+ * Chapter 8.2.1.1.1 RS
+ * Chapter 8.2.3 Coprocessor Directive
+ * Chapter 8.2.4 Execution
+ *
+ * The CCW must be converted to BE before passing to icswx()
+ */
+
+#define CCW_PS (0xff000000)
+#define CCW_CT (0x00ff0000)
+#define CCW_CD (0x0000ffff)
+#define CCW_CL (0x0000c000)
+
+#endif
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/nx.h b/tools/testing/selftests/powerpc/nx-gzip/include/nx.h
new file mode 100644
index 000000000000..1abe23fc29e8
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/nx.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2020 IBM Corp.
+ *
+ */
+#ifndef _NX_H
+#define _NX_H
+
+#include <stdbool.h>
+
+#define NX_FUNC_COMP_842 1
+#define NX_FUNC_COMP_GZIP 2
+
+#ifndef __aligned
+#define __aligned(x) __attribute__((aligned(x)))
+#endif
+
+struct nx842_func_args {
+ bool use_crc;
+ bool decompress; /* true decompress; false compress */
+ bool move_data;
+ int timeout; /* seconds */
+};
+
+struct nxbuf_t {
+ int len;
+ char *buf;
+};
+
+/* @function should be EFT (aka 842), GZIP etc */
+void *nx_function_begin(int function, int pri);
+
+int nx_function(void *handle, struct nxbuf_t *in, struct nxbuf_t *out,
+ void *arg);
+
+int nx_function_end(void *handle);
+
+#endif /* _NX_H */
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h b/tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h
new file mode 100644
index 000000000000..16464e19c47f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2020 IBM Corporation
+ *
+ */
+
+#ifndef _NXU_DBG_H_
+#define _NXU_DBG_H_
+
+#include <sys/file.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <time.h>
+#include <pthread.h>
+
+extern FILE * nx_gzip_log;
+extern int nx_gzip_trace;
+extern unsigned int nx_gzip_inflate_impl;
+extern unsigned int nx_gzip_deflate_impl;
+extern unsigned int nx_gzip_inflate_flags;
+extern unsigned int nx_gzip_deflate_flags;
+
+extern int nx_dbg;
+pthread_mutex_t mutex_log;
+
+#define nx_gzip_trace_enabled() (nx_gzip_trace & 0x1)
+#define nx_gzip_hw_trace_enabled() (nx_gzip_trace & 0x2)
+#define nx_gzip_sw_trace_enabled() (nx_gzip_trace & 0x4)
+#define nx_gzip_gather_statistics() (nx_gzip_trace & 0x8)
+#define nx_gzip_per_stream_stat() (nx_gzip_trace & 0x10)
+
+#define prt(fmt, ...) do { \
+ pthread_mutex_lock(&mutex_log); \
+ flock(nx_gzip_log->_fileno, LOCK_EX); \
+ time_t t; struct tm *m; time(&t); m = localtime(&t); \
+ fprintf(nx_gzip_log, "[%04d/%02d/%02d %02d:%02d:%02d] " \
+ "pid %d: " fmt, \
+ (int)m->tm_year + 1900, (int)m->tm_mon+1, (int)m->tm_mday, \
+ (int)m->tm_hour, (int)m->tm_min, (int)m->tm_sec, \
+ (int)getpid(), ## __VA_ARGS__); \
+ fflush(nx_gzip_log); \
+ flock(nx_gzip_log->_fileno, LOCK_UN); \
+ pthread_mutex_unlock(&mutex_log); \
+} while (0)
+
+/* Use in case of an error */
+#define prt_err(fmt, ...) do { if (nx_dbg >= 0) { \
+ prt("%s:%u: Error: "fmt, \
+ __FILE__, __LINE__, ## __VA_ARGS__); \
+}} while (0)
+
+/* Use in case of an warning */
+#define prt_warn(fmt, ...) do { if (nx_dbg >= 1) { \
+ prt("%s:%u: Warning: "fmt, \
+ __FILE__, __LINE__, ## __VA_ARGS__); \
+}} while (0)
+
+/* Informational printouts */
+#define prt_info(fmt, ...) do { if (nx_dbg >= 2) { \
+ prt("Info: "fmt, ## __VA_ARGS__); \
+}} while (0)
+
+/* Trace zlib wrapper code */
+#define prt_trace(fmt, ...) do { if (nx_gzip_trace_enabled()) { \
+ prt("### "fmt, ## __VA_ARGS__); \
+}} while (0)
+
+/* Trace statistics */
+#define prt_stat(fmt, ...) do { if (nx_gzip_gather_statistics()) { \
+ prt("### "fmt, ## __VA_ARGS__); \
+}} while (0)
+
+/* Trace zlib hardware implementation */
+#define hw_trace(fmt, ...) do { \
+ if (nx_gzip_hw_trace_enabled()) \
+ fprintf(nx_gzip_log, "hhh " fmt, ## __VA_ARGS__); \
+ } while (0)
+
+/* Trace zlib software implementation */
+#define sw_trace(fmt, ...) do { \
+ if (nx_gzip_sw_trace_enabled()) \
+ fprintf(nx_gzip_log, "sss " fmt, ## __VA_ARGS__); \
+ } while (0)
+
+
+/**
+ * str_to_num - Convert string into number and copy with endings like
+ * KiB for kilobyte
+ * MiB for megabyte
+ * GiB for gigabyte
+ */
+uint64_t str_to_num(char *str);
+void nx_lib_debug(int onoff);
+
+#endif /* _NXU_DBG_H_ */
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/nxu.h b/tools/testing/selftests/powerpc/nx-gzip/include/nxu.h
new file mode 100644
index 000000000000..20a4e883e0d3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/nxu.h
@@ -0,0 +1,650 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Hardware interface of the NX-GZIP compression accelerator
+ *
+ * Copyright (C) IBM Corporation, 2020
+ *
+ * Author: Bulent Abali <abali@us.ibm.com>
+ *
+ */
+
+#ifndef _NXU_H
+#define _NXU_H
+
+#include <stdint.h>
+#include <endian.h>
+#include "nx.h"
+
+/* deflate */
+#define LLSZ 286
+#define DSZ 30
+
+/* nx */
+#define DHTSZ 18
+#define DHT_MAXSZ 288
+#define MAX_DDE_COUNT 256
+
+/* util */
+#ifdef NXDBG
+#define NXPRT(X) X
+#else
+#define NXPRT(X)
+#endif
+
+#ifdef NXTIMER
+#include <sys/platform/ppc.h>
+#define NX_CLK(X) X
+#define nx_get_time() __ppc_get_timebase()
+#define nx_get_freq() __ppc_get_timebase_freq()
+#else
+#define NX_CLK(X)
+#define nx_get_time() (-1)
+#define nx_get_freq() (-1)
+#endif
+
+#define NX_MAX_FAULTS 500
+
+/*
+ * Definitions of acronyms used here. See
+ * P9 NX Gzip Accelerator User's Manual for details:
+ * https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf
+ *
+ * adler/crc: 32 bit checksums appended to stream tail
+ * ce: completion extension
+ * cpb: coprocessor parameter block (metadata)
+ * crb: coprocessor request block (command)
+ * csb: coprocessor status block (status)
+ * dht: dynamic huffman table
+ * dde: data descriptor element (address, length)
+ * ddl: list of ddes
+ * dh/fh: dynamic and fixed huffman types
+ * fc: coprocessor function code
+ * histlen: history/dictionary length
+ * history: sliding window of up to 32KB of data
+ * lzcount: Deflate LZ symbol counts
+ * rembytecnt: remaining byte count
+ * sfbt: source final block type; last block's type during decomp
+ * spbc: source processed byte count
+ * subc: source unprocessed bit count
+ * tebc: target ending bit count; valid bits in the last byte
+ * tpbc: target processed byte count
+ * vas: virtual accelerator switch; the user mode interface
+ */
+
+union nx_qw_t {
+ uint32_t word[4];
+ uint64_t dword[2];
+} __aligned(16);
+
+/*
+ * Note: NX registers with fewer than 32 bits are declared by
+ * convention as uint32_t variables in unions. If *_offset and *_mask
+ * are defined for a variable, then use get_ put_ macros to
+ * conveniently access the register fields for endian conversions.
+ */
+
+struct nx_dde_t {
+ /* Data Descriptor Element, Section 6.4 */
+ union {
+ uint32_t dde_count;
+ /* When dde_count == 0 ddead is a pointer to a data buffer;
+ * ddebc is the buffer length bytes.
+ * When dde_count > 0 dde is an indirect dde; ddead is a
+ * pointer to a contiguous list of direct ddes; ddebc is the
+ * total length of all data pointed to by the list of direct
+ * ddes. Note that only one level of indirection is permitted.
+ * See Section 6.4 of the user manual for additional details.
+ */
+ };
+ uint32_t ddebc; /* dde byte count */
+ uint64_t ddead; /* dde address */
+} __aligned(16);
+
+struct nx_csb_t {
+ /* Coprocessor Status Block, Section 6.6 */
+ union {
+ uint32_t csb_v;
+ /* Valid bit. v must be set to 0 by the program
+ * before submitting the coprocessor command.
+ * Software can poll for the v bit
+ */
+
+ uint32_t csb_f;
+ /* 16B CSB size. Written to 0 by DMA when it writes the CPB */
+
+ uint32_t csb_cs;
+ /* cs completion sequence; unused */
+
+ uint32_t csb_cc;
+ /* cc completion code; cc != 0 exception occurred */
+
+ uint32_t csb_ce;
+ /* ce completion extension */
+
+ };
+ uint32_t tpbc;
+ /* target processed byte count TPBC */
+
+ uint64_t fsaddr;
+ /* Section 6.12.1 CSB NonZero error summary. FSA Failing storage
+ * address. Address where error occurred. When available, written
+ * to A field of CSB
+ */
+} __aligned(16);
+
+struct nx_ccb_t {
+ /* Coprocessor Completion Block, Section 6.7 */
+
+ uint32_t reserved[3];
+ union {
+ /* When crb.c==0 (no ccb defined) it is reserved;
+ * When crb.c==1 (ccb defined) it is cm
+ */
+
+ uint32_t ccb_cm;
+ /* Signal interrupt of crb.c==1 and cm==1 */
+
+ uint32_t word;
+ /* generic access to the 32bit word */
+ };
+} __aligned(16);
+
+struct vas_stamped_crb_t {
+ /*
+ * CRB operand of the paste coprocessor instruction is stamped
+ * in quadword 4 with the information shown here as its written
+ * in to the receive FIFO of the coprocessor
+ */
+
+ union {
+ uint32_t vas_buf_num;
+ /* Verification only vas buffer number which correlates to
+ * the low order bits of the atag in the paste command
+ */
+
+ uint32_t send_wc_id;
+ /* Pointer to Send Window Context that provides for NX address
+ * translation information, such as MSR and LPCR bits, job
+ * completion interrupt RA, PSWID, and job utilization counter.
+ */
+
+ };
+ union {
+ uint32_t recv_wc_id;
+ /* Pointer to Receive Window Context. NX uses this to return
+ * credits to a Receive FIFO as entries are dequeued.
+ */
+
+ };
+ uint32_t reserved2;
+ union {
+ uint32_t vas_invalid;
+ /* Invalid bit. If this bit is 1 the CRB is discarded by
+ * NX upon fetching from the receive FIFO. If this bit is 0
+ * the CRB is processed normally. The bit is stamped to 0
+ * by VAS and may be written to 1 by hypervisor while
+ * the CRB is in the receive FIFO (in memory).
+ */
+
+ };
+};
+
+struct nx_stamped_fault_crb_t {
+ /*
+ * A CRB that has a translation fault is stamped by NX in quadword 4
+ * and pasted to the Fault Send Window in VAS.
+ */
+ uint64_t fsa;
+ union {
+ uint32_t nxsf_t;
+ uint32_t nxsf_fs;
+ };
+ uint32_t pswid;
+};
+
+union stamped_crb_t {
+ struct vas_stamped_crb_t vas;
+ struct nx_stamped_fault_crb_t nx;
+};
+
+struct nx_gzip_cpb_t {
+ /*
+ * Coprocessor Parameter Block In/Out are used to pass metadata
+ * to/from accelerator. Tables 6.5 and 6.6 of the user manual.
+ */
+
+ /* CPBInput */
+
+ struct {
+ union {
+ union nx_qw_t qw0;
+ struct {
+ uint32_t in_adler; /* bits 0:31 */
+ uint32_t in_crc; /* bits 32:63 */
+ union {
+ uint32_t in_histlen; /* bits 64:75 */
+ uint32_t in_subc; /* bits 93:95 */
+ };
+ union {
+ /* bits 108:111 */
+ uint32_t in_sfbt;
+ /* bits 112:127 */
+ uint32_t in_rembytecnt;
+ /* bits 116:127 */
+ uint32_t in_dhtlen;
+ };
+ };
+ };
+ union {
+ union nx_qw_t in_dht[DHTSZ]; /* qw[1:18] */
+ char in_dht_char[DHT_MAXSZ]; /* byte access */
+ };
+ union nx_qw_t reserved[5]; /* qw[19:23] */
+ };
+
+ /* CPBOutput */
+
+ volatile struct {
+ union {
+ union nx_qw_t qw24;
+ struct {
+ uint32_t out_adler; /* bits 0:31 qw[24] */
+ uint32_t out_crc; /* bits 32:63 qw[24] */
+ union {
+ /* bits 77:79 qw[24] */
+ uint32_t out_tebc;
+ /* bits 80:95 qw[24] */
+ uint32_t out_subc;
+ };
+ union {
+ /* bits 108:111 qw[24] */
+ uint32_t out_sfbt;
+ /* bits 112:127 qw[24] */
+ uint32_t out_rembytecnt;
+ /* bits 116:127 qw[24] */
+ uint32_t out_dhtlen;
+ };
+ };
+ };
+ union {
+ union nx_qw_t qw25[79]; /* qw[25:103] */
+ /* qw[25] compress no lzcounts or wrap */
+ uint32_t out_spbc_comp_wrap;
+ uint32_t out_spbc_wrap; /* qw[25] wrap */
+ /* qw[25] compress no lzcounts */
+ uint32_t out_spbc_comp;
+ /* 286 LL and 30 D symbol counts */
+ uint32_t out_lzcount[LLSZ+DSZ];
+ struct {
+ union nx_qw_t out_dht[DHTSZ]; /* qw[25:42] */
+ /* qw[43] decompress */
+ uint32_t out_spbc_decomp;
+ };
+ };
+ /* qw[104] compress with lzcounts */
+ uint32_t out_spbc_comp_with_count;
+ };
+} __aligned(128);
+
+struct nx_gzip_crb_t {
+ union { /* byte[0:3] */
+ uint32_t gzip_fc; /* bits[24-31] */
+ };
+ uint32_t reserved1; /* byte[4:7] */
+ union {
+ uint64_t csb_address; /* byte[8:15] */
+ struct {
+ uint32_t reserved2;
+ union {
+ uint32_t crb_c;
+ /* c==0 no ccb defined */
+
+ uint32_t crb_at;
+ /* at==0 address type is ignored;
+ * all addrs effective assumed.
+ */
+
+ };
+ };
+ };
+ struct nx_dde_t source_dde; /* byte[16:31] */
+ struct nx_dde_t target_dde; /* byte[32:47] */
+ volatile struct nx_ccb_t ccb; /* byte[48:63] */
+ volatile union {
+ /* byte[64:239] shift csb by 128 bytes out of the crb; csb was
+ * in crb earlier; JReilly says csb written with partial inject
+ */
+ union nx_qw_t reserved64[11];
+ union stamped_crb_t stamp; /* byte[64:79] */
+ };
+ volatile struct nx_csb_t csb;
+} __aligned(128);
+
+struct nx_gzip_crb_cpb_t {
+ struct nx_gzip_crb_t crb;
+ struct nx_gzip_cpb_t cpb;
+} __aligned(2048);
+
+
+/*
+ * NX hardware convention has the msb bit on the left numbered 0.
+ * The defines below has *_offset defined as the right most bit
+ * position of a field. x of size_mask(x) is the field width in bits.
+ */
+
+#define size_mask(x) ((1U<<(x))-1)
+
+/*
+ * Offsets and Widths within the containing 32 bits of the various NX
+ * gzip hardware registers. Use the getnn/putnn macros to access
+ * these regs
+ */
+
+#define dde_count_mask size_mask(8)
+#define dde_count_offset 23
+
+/* CSB */
+
+#define csb_v_mask size_mask(1)
+#define csb_v_offset 0
+#define csb_f_mask size_mask(1)
+#define csb_f_offset 6
+#define csb_cs_mask size_mask(8)
+#define csb_cs_offset 15
+#define csb_cc_mask size_mask(8)
+#define csb_cc_offset 23
+#define csb_ce_mask size_mask(8)
+#define csb_ce_offset 31
+
+/* CCB */
+
+#define ccb_cm_mask size_mask(3)
+#define ccb_cm_offset 31
+
+/* VAS stamped CRB fields */
+
+#define vas_buf_num_mask size_mask(6)
+#define vas_buf_num_offset 5
+#define send_wc_id_mask size_mask(16)
+#define send_wc_id_offset 31
+#define recv_wc_id_mask size_mask(16)
+#define recv_wc_id_offset 31
+#define vas_invalid_mask size_mask(1)
+#define vas_invalid_offset 31
+
+/* NX stamped fault CRB fields */
+
+#define nxsf_t_mask size_mask(1)
+#define nxsf_t_offset 23
+#define nxsf_fs_mask size_mask(8)
+#define nxsf_fs_offset 31
+
+/* CPB input */
+
+#define in_histlen_mask size_mask(12)
+#define in_histlen_offset 11
+#define in_dhtlen_mask size_mask(12)
+#define in_dhtlen_offset 31
+#define in_subc_mask size_mask(3)
+#define in_subc_offset 31
+#define in_sfbt_mask size_mask(4)
+#define in_sfbt_offset 15
+#define in_rembytecnt_mask size_mask(16)
+#define in_rembytecnt_offset 31
+
+/* CPB output */
+
+#define out_tebc_mask size_mask(3)
+#define out_tebc_offset 15
+#define out_subc_mask size_mask(16)
+#define out_subc_offset 31
+#define out_sfbt_mask size_mask(4)
+#define out_sfbt_offset 15
+#define out_rembytecnt_mask size_mask(16)
+#define out_rembytecnt_offset 31
+#define out_dhtlen_mask size_mask(12)
+#define out_dhtlen_offset 31
+
+/* CRB */
+
+#define gzip_fc_mask size_mask(8)
+#define gzip_fc_offset 31
+#define crb_c_mask size_mask(1)
+#define crb_c_offset 28
+#define crb_at_mask size_mask(1)
+#define crb_at_offset 30
+#define csb_address_mask ~(15UL) /* mask off bottom 4b */
+
+/*
+ * Access macros for the registers. Do not access registers directly
+ * because of the endian conversion. P9 processor may run either as
+ * Little or Big endian. However the NX coprocessor regs are always
+ * big endian.
+ * Use the 32 and 64b macros to access respective
+ * register sizes.
+ * Use nn forms for the register fields shorter than 32 bits.
+ */
+
+#define getnn(ST, REG) ((be32toh(ST.REG) >> (31-REG##_offset)) \
+ & REG##_mask)
+#define getpnn(ST, REG) ((be32toh((ST)->REG) >> (31-REG##_offset)) \
+ & REG##_mask)
+#define get32(ST, REG) (be32toh(ST.REG))
+#define getp32(ST, REG) (be32toh((ST)->REG))
+#define get64(ST, REG) (be64toh(ST.REG))
+#define getp64(ST, REG) (be64toh((ST)->REG))
+
+#define unget32(ST, REG) (get32(ST, REG) & ~((REG##_mask) \
+ << (31-REG##_offset)))
+/* get 32bits less the REG field */
+
+#define ungetp32(ST, REG) (getp32(ST, REG) & ~((REG##_mask) \
+ << (31-REG##_offset)))
+/* get 32bits less the REG field */
+
+#define clear_regs(ST) memset((void *)(&(ST)), 0, sizeof(ST))
+#define clear_dde(ST) do { ST.dde_count = ST.ddebc = 0; ST.ddead = 0; \
+ } while (0)
+#define clearp_dde(ST) do { (ST)->dde_count = (ST)->ddebc = 0; \
+ (ST)->ddead = 0; \
+ } while (0)
+#define clear_struct(ST) memset((void *)(&(ST)), 0, sizeof(ST))
+#define putnn(ST, REG, X) (ST.REG = htobe32(unget32(ST, REG) | (((X) \
+ & REG##_mask) << (31-REG##_offset))))
+#define putpnn(ST, REG, X) ((ST)->REG = htobe32(ungetp32(ST, REG) \
+ | (((X) & REG##_mask) << (31-REG##_offset))))
+
+#define put32(ST, REG, X) (ST.REG = htobe32(X))
+#define putp32(ST, REG, X) ((ST)->REG = htobe32(X))
+#define put64(ST, REG, X) (ST.REG = htobe64(X))
+#define putp64(ST, REG, X) ((ST)->REG = htobe64(X))
+
+/*
+ * Completion extension ce(0) ce(1) ce(2). Bits ce(3-7)
+ * unused. Section 6.6 Figure 6.7.
+ */
+
+#define get_csb_ce(ST) ((uint32_t)getnn(ST, csb_ce))
+#define get_csb_ce_ms3b(ST) (get_csb_ce(ST) >> 5)
+#define put_csb_ce_ms3b(ST, X) putnn(ST, csb_ce, ((uint32_t)(X) << 5))
+
+#define CSB_CE_PARTIAL 0x4
+#define CSB_CE_TERMINATE 0x2
+#define CSB_CE_TPBC_VALID 0x1
+
+#define csb_ce_termination(X) (!!((X) & CSB_CE_TERMINATE))
+/* termination, output buffers may be modified, SPBC/TPBC invalid Fig.6-7 */
+
+#define csb_ce_check_completion(X) (!csb_ce_termination(X))
+/* if not terminated then check full or partial completion */
+
+#define csb_ce_partial_completion(X) (!!((X) & CSB_CE_PARTIAL))
+#define csb_ce_full_completion(X) (!csb_ce_partial_completion(X))
+#define csb_ce_tpbc_valid(X) (!!((X) & CSB_CE_TPBC_VALID))
+/* TPBC indicates successfully stored data count */
+
+#define csb_ce_default_err(X) csb_ce_termination(X)
+/* most error CEs have CE(0)=0 and CE(1)=1 */
+
+#define csb_ce_cc3_partial(X) csb_ce_partial_completion(X)
+/* some CC=3 are partially completed, Table 6-8 */
+
+#define csb_ce_cc64(X) ((X)&(CSB_CE_PARTIAL \
+ | CSB_CE_TERMINATE) == 0)
+/* Compression: when TPBC>SPBC then CC=64 Table 6-8; target didn't
+ * compress smaller than source.
+ */
+
+/* Decompress SFBT combinations Tables 5-3, 6-4, 6-6 */
+
+#define SFBT_BFINAL 0x1
+#define SFBT_LIT 0x4
+#define SFBT_FHT 0x5
+#define SFBT_DHT 0x6
+#define SFBT_HDR 0x7
+
+/*
+ * NX gzip function codes. Table 6.2.
+ * Bits 0:4 are the FC. Bit 5 is used by the DMA controller to
+ * select one of the two Byte Count Limits.
+ */
+
+#define GZIP_FC_LIMIT_MASK 0x01
+#define GZIP_FC_COMPRESS_FHT 0x00
+#define GZIP_FC_COMPRESS_DHT 0x02
+#define GZIP_FC_COMPRESS_FHT_COUNT 0x04
+#define GZIP_FC_COMPRESS_DHT_COUNT 0x06
+#define GZIP_FC_COMPRESS_RESUME_FHT 0x08
+#define GZIP_FC_COMPRESS_RESUME_DHT 0x0a
+#define GZIP_FC_COMPRESS_RESUME_FHT_COUNT 0x0c
+#define GZIP_FC_COMPRESS_RESUME_DHT_COUNT 0x0e
+#define GZIP_FC_DECOMPRESS 0x10
+#define GZIP_FC_DECOMPRESS_SINGLE_BLK_N_SUSPEND 0x12
+#define GZIP_FC_DECOMPRESS_RESUME 0x14
+#define GZIP_FC_DECOMPRESS_RESUME_SINGLE_BLK_N_SUSPEND 0x16
+#define GZIP_FC_WRAP 0x1e
+
+#define fc_is_compress(fc) (((fc) & 0x10) == 0)
+#define fc_has_count(fc) (fc_is_compress(fc) && (((fc) & 0x4) != 0))
+
+/* CSB.CC Error codes */
+
+#define ERR_NX_OK 0
+#define ERR_NX_ALIGNMENT 1
+#define ERR_NX_OPOVERLAP 2
+#define ERR_NX_DATA_LENGTH 3
+#define ERR_NX_TRANSLATION 5
+#define ERR_NX_PROTECTION 6
+#define ERR_NX_EXTERNAL_UE7 7
+#define ERR_NX_INVALID_OP 8
+#define ERR_NX_PRIVILEGE 9
+#define ERR_NX_INTERNAL_UE 10
+#define ERR_NX_EXTERN_UE_WR 12
+#define ERR_NX_TARGET_SPACE 13
+#define ERR_NX_EXCESSIVE_DDE 14
+#define ERR_NX_TRANSL_WR 15
+#define ERR_NX_PROTECT_WR 16
+#define ERR_NX_SUBFUNCTION 17
+#define ERR_NX_FUNC_ABORT 18
+#define ERR_NX_BYTE_MAX 19
+#define ERR_NX_CORRUPT_CRB 20
+#define ERR_NX_INVALID_CRB 21
+#define ERR_NX_INVALID_DDE 30
+#define ERR_NX_SEGMENTED_DDL 31
+#define ERR_NX_DDE_OVERFLOW 33
+#define ERR_NX_TPBC_GT_SPBC 64
+#define ERR_NX_MISSING_CODE 66
+#define ERR_NX_INVALID_DIST 67
+#define ERR_NX_INVALID_DHT 68
+#define ERR_NX_EXTERNAL_UE90 90
+#define ERR_NX_WDOG_TIMER 224
+#define ERR_NX_AT_FAULT 250
+#define ERR_NX_INTR_SERVER 252
+#define ERR_NX_UE253 253
+#define ERR_NX_NO_HW 254
+#define ERR_NX_HUNG_OP 255
+#define ERR_NX_END 256
+
+/* initial values for non-resume operations */
+#define INIT_CRC 0 /* crc32(0L, Z_NULL, 0) */
+#define INIT_ADLER 1 /* adler32(0L, Z_NULL, 0) adler is initialized to 1 */
+
+/* prototypes */
+int nxu_submit_job(struct nx_gzip_crb_cpb_t *c, void *handle);
+
+extern void nxu_sigsegv_handler(int sig, siginfo_t *info, void *ctx);
+extern int nxu_touch_pages(void *buf, long buf_len, long page_len, int wr);
+
+/* caller supplies a print buffer 4*sizeof(crb) */
+
+char *nx_crb_str(struct nx_gzip_crb_t *crb, char *prbuf);
+char *nx_cpb_str(struct nx_gzip_cpb_t *cpb, char *prbuf);
+char *nx_prt_hex(void *cp, int sz, char *prbuf);
+char *nx_lzcount_str(struct nx_gzip_cpb_t *cpb, char *prbuf);
+char *nx_strerror(int e);
+
+#ifdef NX_SIM
+#include <stdio.h>
+int nx_sim_init(void *ctx);
+int nx_sim_end(void *ctx);
+int nxu_run_sim_job(struct nx_gzip_crb_cpb_t *c, void *ctx);
+#endif /* NX_SIM */
+
+/* Deflate stream manipulation */
+
+#define set_final_bit(x) (x |= (unsigned char)1)
+#define clr_final_bit(x) (x &= ~(unsigned char)1)
+
+#define append_empty_fh_blk(p, b) do { *(p) = (2 | (1&(b))); *((p)+1) = 0; \
+ } while (0)
+/* append 10 bits 0000001b 00...... ;
+ * assumes appending starts on a byte boundary; b is the final bit.
+ */
+
+
+#ifdef NX_842
+
+/* 842 Engine */
+
+struct nx_eft_crb_t {
+ union { /* byte[0:3] */
+ uint32_t eft_fc; /* bits[29-31] */
+ };
+ uint32_t reserved1; /* byte[4:7] */
+ union {
+ uint64_t csb_address; /* byte[8:15] */
+ struct {
+ uint32_t reserved2;
+ union {
+ uint32_t crb_c;
+ /* c==0 no ccb defined */
+
+ uint32_t crb_at;
+ /* at==0 address type is ignored;
+ * all addrs effective assumed.
+ */
+
+ };
+ };
+ };
+ struct nx_dde_t source_dde; /* byte[16:31] */
+ struct nx_dde_t target_dde; /* byte[32:47] */
+ struct nx_ccb_t ccb; /* byte[48:63] */
+ union {
+ union nx_qw_t reserved64[3]; /* byte[64:96] */
+ };
+ struct nx_csb_t csb;
+} __aligned(128);
+
+/* 842 CRB */
+
+#define EFT_FC_MASK size_mask(3)
+#define EFT_FC_OFFSET 31
+#define EFT_FC_COMPRESS 0x0
+#define EFT_FC_COMPRESS_WITH_CRC 0x1
+#define EFT_FC_DECOMPRESS 0x2
+#define EFT_FC_DECOMPRESS_WITH_CRC 0x3
+#define EFT_FC_BLK_DATA_MOVE 0x4
+#endif /* NX_842 */
+
+#endif /* _NXU_H */
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h b/tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h
new file mode 120000
index 000000000000..77fb4c7236d0
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h
@@ -0,0 +1 @@
+../../../../../../arch/powerpc/include/uapi/asm/vas-api.h \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh b/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh
new file mode 100755
index 000000000000..c7b46c5fd7b3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+if [[ ! -w /dev/crypto/nx-gzip ]]; then
+ echo "Can't access /dev/crypto/nx-gzip, skipping"
+ echo "skip: $0"
+ exit 4
+fi
+
+set -e
+
+function cleanup
+{
+ rm -f nx-tempfile*
+}
+
+trap cleanup EXIT
+
+function test_sizes
+{
+ local n=$1
+ local fname="nx-tempfile.$n"
+
+ for size in 4K 64K 1M 64M
+ do
+ echo "Testing $size ($n) ..."
+ dd if=/dev/urandom of=$fname bs=$size count=1
+ ./gzfht_test $fname
+ ./gunz_test ${fname}.nx.gz
+ done
+}
+
+echo "Doing basic test of different sizes ..."
+test_sizes 0
+
+echo "Running tests in parallel ..."
+for i in {1..16}
+do
+ test_sizes $i &
+done
+
+wait
+
+echo "OK"
+
+exit 0
diff --git a/tools/testing/selftests/powerpc/papr_attributes/.gitignore b/tools/testing/selftests/powerpc/papr_attributes/.gitignore
new file mode 100644
index 000000000000..d5f42b6d9e99
--- /dev/null
+++ b/tools/testing/selftests/powerpc/papr_attributes/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+attr_test
diff --git a/tools/testing/selftests/powerpc/papr_attributes/Makefile b/tools/testing/selftests/powerpc/papr_attributes/Makefile
new file mode 100644
index 000000000000..e899712d49db
--- /dev/null
+++ b/tools/testing/selftests/powerpc/papr_attributes/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+TEST_GEN_PROGS := attr_test
+
+top_srcdir = ../../../../..
+include ../../lib.mk
+
+$(TEST_GEN_PROGS): ../harness.c ../utils.c \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/papr_attributes/attr_test.c b/tools/testing/selftests/powerpc/papr_attributes/attr_test.c
new file mode 100644
index 000000000000..9b655be641c9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/papr_attributes/attr_test.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PAPR Energy attributes sniff test
+ * This checks if the papr folders and contents are populated relating to
+ * the energy and frequency attributes
+ *
+ * Copyright 2022, Pratik Rajesh Sampat, IBM Corp.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <dirent.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#include "utils.h"
+
+enum energy_freq_attrs {
+ POWER_PERFORMANCE_MODE = 1,
+ IDLE_POWER_SAVER_STATUS = 2,
+ MIN_FREQ = 3,
+ STAT_FREQ = 4,
+ MAX_FREQ = 6,
+ PROC_FOLDING_STATUS = 8
+};
+
+enum type {
+ INVALID,
+ STR_VAL,
+ NUM_VAL
+};
+
+static int value_type(int id)
+{
+ int val_type;
+
+ switch (id) {
+ case POWER_PERFORMANCE_MODE:
+ case IDLE_POWER_SAVER_STATUS:
+ val_type = STR_VAL;
+ break;
+ case MIN_FREQ:
+ case STAT_FREQ:
+ case MAX_FREQ:
+ case PROC_FOLDING_STATUS:
+ val_type = NUM_VAL;
+ break;
+ default:
+ val_type = INVALID;
+ }
+
+ return val_type;
+}
+
+static int verify_energy_info(void)
+{
+ const char *path = "/sys/firmware/papr/energy_scale_info";
+ struct dirent *entry;
+ struct stat s;
+ DIR *dirp;
+
+ errno = 0;
+ if (stat(path, &s)) {
+ SKIP_IF(errno == ENOENT);
+ FAIL_IF(errno);
+ }
+
+ FAIL_IF(!S_ISDIR(s.st_mode));
+
+ dirp = opendir(path);
+
+ while ((entry = readdir(dirp)) != NULL) {
+ char file_name[64];
+ int id, attr_type;
+ FILE *f;
+
+ if (strcmp(entry->d_name, ".") == 0 ||
+ strcmp(entry->d_name, "..") == 0)
+ continue;
+
+ id = atoi(entry->d_name);
+ attr_type = value_type(id);
+ FAIL_IF(attr_type == INVALID);
+
+ /* Check if the files exist and have data in them */
+ sprintf(file_name, "%s/%d/desc", path, id);
+ f = fopen(file_name, "r");
+ FAIL_IF(!f);
+ FAIL_IF(fgetc(f) == EOF);
+
+ sprintf(file_name, "%s/%d/value", path, id);
+ f = fopen(file_name, "r");
+ FAIL_IF(!f);
+ FAIL_IF(fgetc(f) == EOF);
+
+ if (attr_type == STR_VAL) {
+ sprintf(file_name, "%s/%d/value_desc", path, id);
+ f = fopen(file_name, "r");
+ FAIL_IF(!f);
+ FAIL_IF(fgetc(f) == EOF);
+ }
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(verify_energy_info, "papr_attributes");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/.gitignore b/tools/testing/selftests/powerpc/pmu/.gitignore
index e748f336eed3..f69b1e2641a1 100644
--- a/tools/testing/selftests/powerpc/pmu/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/.gitignore
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
count_instructions
l3_bank_test
per_event_excludes
+count_stcx_fail
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index 19046db995fe..30803353bd7c 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -2,19 +2,23 @@
noarg:
$(MAKE) -C ../
-TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes
+TEST_GEN_PROGS := count_instructions count_stcx_fail l3_bank_test per_event_excludes
EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
top_srcdir = ../../../../..
include ../../lib.mk
-all: $(TEST_GEN_PROGS) ebb
+all: $(TEST_GEN_PROGS) ebb sampling_tests event_code_tests
$(TEST_GEN_PROGS): $(EXTRA_SOURCES)
# loop.S can only be built 64-bit
+$(OUTPUT)/count_instructions: CFLAGS += -m64
$(OUTPUT)/count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES)
- $(CC) $(CFLAGS) -m64 -o $@ $^
+
+$(OUTPUT)/count_stcx_fail: CFLAGS += -m64
+$(OUTPUT)/count_stcx_fail: loop.S $(EXTRA_SOURCES)
+
$(OUTPUT)/per_event_excludes: ../utils.c
@@ -22,25 +26,39 @@ DEFAULT_RUN_TESTS := $(RUN_TESTS)
override define RUN_TESTS
$(DEFAULT_RUN_TESTS)
TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
+ TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
+ TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
endef
DEFAULT_EMIT_TESTS := $(EMIT_TESTS)
override define EMIT_TESTS
$(DEFAULT_EMIT_TESTS)
TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+ TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+ TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
endef
DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
override define INSTALL_RULE
$(DEFAULT_INSTALL_RULE)
TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
+ TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
+ TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install
endef
clean:
$(RM) $(TEST_GEN_PROGS) $(OUTPUT)/loop.o
TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
+ TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
+ TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean
ebb:
TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
-.PHONY: all run_tests clean ebb
+sampling_tests:
+ TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
+
+event_code_tests:
+ TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
+
+.PHONY: all run_tests clean ebb sampling_tests event_code_tests
diff --git a/tools/testing/selftests/powerpc/pmu/branch_loops.S b/tools/testing/selftests/powerpc/pmu/branch_loops.S
new file mode 100644
index 000000000000..de758dd3cecf
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/branch_loops.S
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <ppc-asm.h>
+
+ .text
+
+#define ITER_SHIFT 31
+
+FUNC_START(indirect_branch_loop)
+ li r3, 1
+ sldi r3, r3, ITER_SHIFT
+
+1: cmpdi r3, 0
+ beqlr
+
+ addi r3, r3, -1
+
+ ld r4, 2f@got(%r2)
+ mtctr r4
+ bctr
+
+ .balign 32
+2: b 1b
+
+FUNC_END(indirect_branch_loop)
diff --git a/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c b/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c
new file mode 100644
index 000000000000..2070a1e2b3a5
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/prctl.h>
+
+#include "event.h"
+#include "utils.h"
+#include "lib.h"
+
+extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
+
+static void setup_event(struct event *e, u64 config, int type, char *name)
+{
+ event_init_opts(e, config, type, name);
+
+ e->attr.disabled = 1;
+ e->attr.exclude_kernel = 1;
+ e->attr.exclude_hv = 1;
+ e->attr.exclude_idle = 1;
+}
+
+static int do_count_loop(struct event *events, u64 instructions,
+ u64 overhead, bool report)
+{
+ s64 difference, expected;
+ double percentage;
+ u64 dummy;
+
+ prctl(PR_TASK_PERF_EVENTS_ENABLE);
+
+ /* Run for 1M instructions */
+ thirty_two_instruction_loop_with_ll_sc(instructions >> 5, &dummy);
+
+ prctl(PR_TASK_PERF_EVENTS_DISABLE);
+
+ event_read(&events[0]);
+ event_read(&events[1]);
+ event_read(&events[2]);
+
+ expected = instructions + overhead + (events[2].result.value * 10);
+ difference = events[0].result.value - expected;
+ percentage = (double)difference / events[0].result.value * 100;
+
+ if (report) {
+ printf("-----\n");
+ event_report(&events[0]);
+ event_report(&events[1]);
+ event_report(&events[2]);
+
+ printf("Looped for %llu instructions, overhead %llu\n", instructions, overhead);
+ printf("Expected %llu\n", expected);
+ printf("Actual %llu\n", events[0].result.value);
+ printf("Delta %lld, %f%%\n", difference, percentage);
+ }
+
+ event_reset(&events[0]);
+ event_reset(&events[1]);
+ event_reset(&events[2]);
+
+ if (difference < 0)
+ difference = -difference;
+
+ /* Tolerate a difference below 0.0001 % */
+ difference *= 10000 * 100;
+ if (difference / events[0].result.value)
+ return -1;
+
+ return 0;
+}
+
+/* Count how many instructions it takes to do a null loop */
+static u64 determine_overhead(struct event *events)
+{
+ u64 current, overhead;
+ int i;
+
+ do_count_loop(events, 0, 0, false);
+ overhead = events[0].result.value;
+
+ for (i = 0; i < 100; i++) {
+ do_count_loop(events, 0, 0, false);
+ current = events[0].result.value;
+ if (current < overhead) {
+ printf("Replacing overhead %llu with %llu\n", overhead, current);
+ overhead = current;
+ }
+ }
+
+ return overhead;
+}
+
+#define PM_MRK_STCX_FAIL 0x03e158
+#define PM_STCX_FAIL 0x01e058
+
+static int test_body(void)
+{
+ struct event events[3];
+ u64 overhead;
+
+ // The STCX_FAIL event we use works on Power8 or later
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
+
+ setup_event(&events[0], PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "instructions");
+ setup_event(&events[1], PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "cycles");
+ setup_event(&events[2], PM_STCX_FAIL, PERF_TYPE_RAW, "stcx_fail");
+
+ if (event_open(&events[0])) {
+ perror("perf_event_open");
+ return -1;
+ }
+
+ if (event_open_with_group(&events[1], events[0].fd)) {
+ perror("perf_event_open");
+ return -1;
+ }
+
+ if (event_open_with_group(&events[2], events[0].fd)) {
+ perror("perf_event_open");
+ return -1;
+ }
+
+ overhead = determine_overhead(events);
+ printf("Overhead of null loop: %llu instructions\n", overhead);
+
+ /* Run for 1Mi instructions */
+ FAIL_IF(do_count_loop(events, 1000000, overhead, true));
+
+ /* Run for 10Mi instructions */
+ FAIL_IF(do_count_loop(events, 10000000, overhead, true));
+
+ /* Run for 100Mi instructions */
+ FAIL_IF(do_count_loop(events, 100000000, overhead, true));
+
+ /* Run for 1Bi instructions */
+ FAIL_IF(do_count_loop(events, 1000000000, overhead, true));
+
+ /* Run for 16Bi instructions */
+ FAIL_IF(do_count_loop(events, 16000000000, overhead, true));
+
+ /* Run for 64Bi instructions */
+ FAIL_IF(do_count_loop(events, 64000000000, overhead, true));
+
+ event_close(&events[0]);
+ event_close(&events[1]);
+
+ return 0;
+}
+
+static int count_ll_sc(void)
+{
+ return eat_cpu(test_body);
+}
+
+int main(void)
+{
+ return test_harness(count_ll_sc, "count_ll_sc");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
index 42bddbed8b64..64d8dfdac74a 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
reg_access_test
event_attributes_test
cycles_test
@@ -20,3 +21,4 @@ back_to_back_ebbs_test
lost_exception_test
no_handler_test
cycles_with_mmcr2_test
+regs_access_pmccext_test
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 417306353e07..010160690227 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-include ../../../../../../scripts/Kbuild.include
+include ../../../../../build/Build.include
noarg:
$(MAKE) -C ../../
@@ -7,6 +7,7 @@ noarg:
# The EBB handler is 64-bit code and everything links against it
CFLAGS += -m64
+TMPOUT = $(OUTPUT)/TMPDIR/
# Toolchains may build PIE by default which breaks the assembly
no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
$(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
@@ -23,7 +24,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \
fork_cleanup_test ebb_on_child_test \
ebb_on_willing_child_test back_to_back_ebbs_test \
lost_exception_test no_handler_test \
- cycles_with_mmcr2_test
+ cycles_with_mmcr2_test regs_access_pmccext_test
top_srcdir = ../../../../../..
include ../../../lib.mk
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
index a2d7b0e3dca9..a26ac122c759 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
@@ -91,8 +91,6 @@ int back_to_back_ebbs(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
event_close(&event);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
index bc893813483e..bb9f587fa76e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
@@ -42,8 +42,6 @@ int cycles(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
event_close(&event);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
index dcd351d20328..9ae795ce314e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
@@ -99,8 +99,6 @@ int cycles_with_freeze(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
printf("EBBs while frozen %d\n", ebbs_while_frozen);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
index 94c99c12c0f2..fc32187d483d 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
@@ -50,6 +50,7 @@ int cycles_with_mmcr2(void)
expected[1] = MMCR2_EXPECTED_2;
i = 0;
bad_mmcr2 = false;
+ actual = 0;
/* Make sure we loop until we take at least one EBB */
while ((ebb_state.stats.ebb_count < 20 && !bad_mmcr2) ||
@@ -71,8 +72,6 @@ int cycles_with_mmcr2(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
event_close(&event);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
index dfbc5c3ad52d..21537d6eb6b7 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
@@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
event_close(&event);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb.h
index b5bc2b616075..2c803b5b48d6 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.h
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.h
@@ -55,8 +55,6 @@ void ebb_global_disable(void);
bool ebb_is_supported(void);
void ebb_freeze_pmcs(void);
void ebb_unfreeze_pmcs(void);
-void event_ebb_init(struct event *e);
-void event_leader_ebb_init(struct event *e);
int count_pmc(int pmc, uint32_t sample_period);
void dump_ebb_state(void);
void dump_summary_ebb_state(void);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
index ca2f7d729155..b208bf6ad58d 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
@@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
FAIL_IF(ebb_state.stats.ebb_count == 0);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S b/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S
deleted file mode 100644
index 08a7b5f133b9..000000000000
--- a/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2014, Michael Ellerman, IBM Corp.
- */
-
-#include <ppc-asm.h>
-
- .text
-
-FUNC_START(thirty_two_instruction_loop)
- cmpwi r3,0
- beqlr
- addi r4,r3,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1 # 28 addi's
- subi r3,r3,1
- b FUNC_NAME(thirty_two_instruction_loop)
-FUNC_END(thirty_two_instruction_loop)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
index ac3e6e182614..ba2681a12cc7 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
@@ -75,7 +75,6 @@ static int test_body(void)
ebb_freeze_pmcs();
ebb_global_disable();
- count_pmc(4, sample_period);
mtspr(SPRN_PMC4, 0xdead);
dump_summary_ebb_state();
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
index b8242e9d97d2..791d37ba327b 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
@@ -70,13 +70,6 @@ int multi_counter(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
- count_pmc(2, sample_period);
- count_pmc(3, sample_period);
- count_pmc(4, sample_period);
- count_pmc(5, sample_period);
- count_pmc(6, sample_period);
-
dump_ebb_state();
for (i = 0; i < 6; i++)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
index a05c0e18ded6..9b0f70d59702 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
@@ -61,8 +61,6 @@ static int cycles_child(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_summary_ebb_state();
event_close(&event);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
index fc5bf4870d8e..01e827c31169 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
@@ -50,8 +50,6 @@ static int no_handler_test(void)
event_close(&event);
- dump_ebb_state();
-
/* The real test is that we never took an EBB at 0x0 */
return 0;
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
index 153ebc92234f..2904c741e04e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
@@ -82,8 +82,6 @@ static int test_body(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(1, sample_period);
-
dump_ebb_state();
if (mmcr0_mismatch)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
index eadad75ed7e6..b29f8ba22d1e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
@@ -76,8 +76,6 @@ int pmc56_overflow(void)
ebb_global_disable();
ebb_freeze_pmcs();
- count_pmc(2, sample_period);
-
dump_ebb_state();
printf("PMC5/6 overflow %d\n", pmc56_overflowed);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c b/tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c
new file mode 100644
index 000000000000..1eda8e9932e8
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <setjmp.h>
+#include <signal.h>
+
+#include "ebb.h"
+
+
+/*
+ * Test that closing the EBB event clears MMCR0_PMCC and
+ * sets MMCR0_PMCCEXT preventing further read access to the
+ * group B PMU registers.
+ */
+
+static int regs_access_pmccext(void)
+{
+ struct event event;
+
+ SKIP_IF(!ebb_is_supported());
+
+ event_init_named(&event, 0x1001e, "cycles");
+ event_leader_ebb_init(&event);
+
+ FAIL_IF(event_open(&event));
+
+ ebb_enable_pmc_counting(1);
+ setup_ebb_handler(standard_ebb_callee);
+ ebb_global_enable();
+ FAIL_IF(ebb_event_enable(&event));
+
+ mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
+
+ while (ebb_state.stats.ebb_count < 1)
+ FAIL_IF(core_busy_loop());
+
+ ebb_global_disable();
+ event_close(&event);
+
+ FAIL_IF(ebb_state.stats.ebb_count == 0);
+
+ /*
+ * For ISA v3.1, verify the test takes a SIGILL when reading
+ * PMU regs after the event is closed. With the control bit
+ * in MMCR0 (PMCCEXT) restricting access to group B PMU regs,
+ * sigill is expected.
+ */
+ if (have_hwcap2(PPC_FEATURE2_ARCH_3_1))
+ FAIL_IF(catch_sigill(dump_ebb_state));
+ else
+ dump_ebb_state();
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(regs_access_pmccext, "regs_access_pmccext");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/trace.h b/tools/testing/selftests/powerpc/pmu/ebb/trace.h
index 7c0fb5d2bdb1..da2a3be5441f 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/trace.h
+++ b/tools/testing/selftests/powerpc/pmu/ebb/trace.h
@@ -18,7 +18,7 @@ struct trace_entry
{
u8 type;
u8 length;
- u8 data[0];
+ u8 data[];
};
struct trace_buffer
@@ -26,7 +26,7 @@ struct trace_buffer
u64 size;
bool overflow;
void *tail;
- u8 data[0];
+ u8 data[];
};
struct trace_buffer *trace_buffer_allocate(u64 size);
diff --git a/tools/testing/selftests/powerpc/pmu/event.c b/tools/testing/selftests/powerpc/pmu/event.c
index 48e3a413b15d..0c1c1bdba081 100644
--- a/tools/testing/selftests/powerpc/pmu/event.c
+++ b/tools/testing/selftests/powerpc/pmu/event.c
@@ -8,6 +8,7 @@
#include <sys/syscall.h>
#include <string.h>
#include <stdio.h>
+#include <stdbool.h>
#include <sys/ioctl.h>
#include "event.h"
@@ -20,7 +21,8 @@ int perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu,
group_fd, flags);
}
-void event_init_opts(struct event *e, u64 config, int type, char *name)
+static void __event_init_opts(struct event *e, u64 config,
+ int type, char *name, bool sampling)
{
memset(e, 0, sizeof(*e));
@@ -32,6 +34,16 @@ void event_init_opts(struct event *e, u64 config, int type, char *name)
/* This has to match the structure layout in the header */
e->attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | \
PERF_FORMAT_TOTAL_TIME_RUNNING;
+ if (sampling) {
+ e->attr.sample_period = 1000;
+ e->attr.sample_type = PERF_SAMPLE_REGS_INTR;
+ e->attr.disabled = 1;
+ }
+}
+
+void event_init_opts(struct event *e, u64 config, int type, char *name)
+{
+ __event_init_opts(e, config, type, name, false);
}
void event_init_named(struct event *e, u64 config, char *name)
@@ -44,6 +56,11 @@ void event_init(struct event *e, u64 config)
event_init_opts(e, config, PERF_TYPE_RAW, "event");
}
+void event_init_sampling(struct event *e, u64 config)
+{
+ __event_init_opts(e, config, PERF_TYPE_RAW, "event", true);
+}
+
#define PERF_CURRENT_PID 0
#define PERF_NO_PID -1
#define PERF_NO_CPU -1
diff --git a/tools/testing/selftests/powerpc/pmu/event.h b/tools/testing/selftests/powerpc/pmu/event.h
index 302eaab51706..51aad0b6d9ad 100644
--- a/tools/testing/selftests/powerpc/pmu/event.h
+++ b/tools/testing/selftests/powerpc/pmu/event.h
@@ -22,11 +22,17 @@ struct event {
u64 running;
u64 enabled;
} result;
+ /*
+ * mmap buffer used while recording sample.
+ * Accessed as "struct perf_event_mmap_page"
+ */
+ void *mmap_buffer;
};
void event_init(struct event *e, u64 config);
void event_init_named(struct event *e, u64 config, char *name);
void event_init_opts(struct event *e, u64 config, int type, char *name);
+void event_init_sampling(struct event *e, u64 config);
int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd);
int event_open_with_group(struct event *e, int group_fd);
int event_open_with_pid(struct event *e, pid_t pid);
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore b/tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore
new file mode 100644
index 000000000000..5710683da525
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore
@@ -0,0 +1,20 @@
+blacklisted_events_test
+event_alternatives_tests_p10
+event_alternatives_tests_p9
+generic_events_valid_test
+group_constraint_cache_test
+group_constraint_l2l3_sel_test
+group_constraint_mmcra_sample_test
+group_constraint_pmc56_test
+group_constraint_pmc_count_test
+group_constraint_radix_scope_qual_test
+group_constraint_repeat_test
+group_constraint_thresh_cmp_test
+group_constraint_thresh_ctl_test
+group_constraint_thresh_sel_test
+group_constraint_unit_test
+group_pmc56_exclude_constraints_test
+hw_cache_event_type_test
+invalid_event_code_test
+reserved_bits_mmcra_sample_elig_mode_test
+reserved_bits_mmcra_thresh_ctl_test
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile b/tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile
new file mode 100644
index 000000000000..4e07d7046457
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -m64
+
+TEST_GEN_PROGS := group_constraint_pmc56_test group_pmc56_exclude_constraints_test group_constraint_pmc_count_test \
+ group_constraint_repeat_test group_constraint_radix_scope_qual_test reserved_bits_mmcra_sample_elig_mode_test \
+ group_constraint_mmcra_sample_test invalid_event_code_test reserved_bits_mmcra_thresh_ctl_test \
+ blacklisted_events_test event_alternatives_tests_p9 event_alternatives_tests_p10 generic_events_valid_test \
+ group_constraint_l2l3_sel_test group_constraint_cache_test group_constraint_thresh_cmp_test \
+ group_constraint_unit_test group_constraint_thresh_ctl_test group_constraint_thresh_sel_test \
+ hw_cache_event_type_test
+
+top_srcdir = ../../../../../..
+include ../../../lib.mk
+
+$(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c ../sampling_tests/misc.h ../sampling_tests/misc.c
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c
new file mode 100644
index 000000000000..fafeff19cb34
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <sys/prctl.h>
+#include <limits.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+#define PM_DTLB_MISS_16G 0x1c058
+#define PM_DERAT_MISS_2M 0x1c05a
+#define PM_DTLB_MISS_2M 0x1c05c
+#define PM_MRK_DTLB_MISS_1G 0x1d15c
+#define PM_DTLB_MISS_4K 0x2c056
+#define PM_DERAT_MISS_1G 0x2c05a
+#define PM_MRK_DERAT_MISS_2M 0x2d152
+#define PM_MRK_DTLB_MISS_4K 0x2d156
+#define PM_MRK_DTLB_MISS_16G 0x2d15e
+#define PM_DTLB_MISS_64K 0x3c056
+#define PM_MRK_DERAT_MISS_1G 0x3d152
+#define PM_MRK_DTLB_MISS_64K 0x3d156
+#define PM_DISP_HELD_SYNC_HOLD 0x4003c
+#define PM_DTLB_MISS_16M 0x4c056
+#define PM_DTLB_MISS_1G 0x4c05a
+#define PM_MRK_DTLB_MISS_16M 0x4c15e
+#define PM_MRK_ST_DONE_L2 0x10134
+#define PM_RADIX_PWC_L1_HIT 0x1f056
+#define PM_FLOP_CMPL 0x100f4
+#define PM_MRK_NTF_FIN 0x20112
+#define PM_RADIX_PWC_L2_HIT 0x2d024
+#define PM_IFETCH_THROTTLE 0x3405e
+#define PM_MRK_L2_TM_ST_ABORT_SISTER 0x3e15c
+#define PM_RADIX_PWC_L3_HIT 0x3f056
+#define PM_RUN_CYC_SMT2_MODE 0x3006c
+#define PM_TM_TX_PASS_RUN_INST 0x4e014
+
+#define PVR_POWER9_CUMULUS 0x00002000
+
+int blacklist_events_dd21[] = {
+ PM_MRK_ST_DONE_L2,
+ PM_RADIX_PWC_L1_HIT,
+ PM_FLOP_CMPL,
+ PM_MRK_NTF_FIN,
+ PM_RADIX_PWC_L2_HIT,
+ PM_IFETCH_THROTTLE,
+ PM_MRK_L2_TM_ST_ABORT_SISTER,
+ PM_RADIX_PWC_L3_HIT,
+ PM_RUN_CYC_SMT2_MODE,
+ PM_TM_TX_PASS_RUN_INST,
+ PM_DISP_HELD_SYNC_HOLD,
+};
+
+int blacklist_events_dd22[] = {
+ PM_DTLB_MISS_16G,
+ PM_DERAT_MISS_2M,
+ PM_DTLB_MISS_2M,
+ PM_MRK_DTLB_MISS_1G,
+ PM_DTLB_MISS_4K,
+ PM_DERAT_MISS_1G,
+ PM_MRK_DERAT_MISS_2M,
+ PM_MRK_DTLB_MISS_4K,
+ PM_MRK_DTLB_MISS_16G,
+ PM_DTLB_MISS_64K,
+ PM_MRK_DERAT_MISS_1G,
+ PM_MRK_DTLB_MISS_64K,
+ PM_DISP_HELD_SYNC_HOLD,
+ PM_DTLB_MISS_16M,
+ PM_DTLB_MISS_1G,
+ PM_MRK_DTLB_MISS_16M,
+};
+
+int pvr_min;
+
+/*
+ * check for power9 support for 2.1 and
+ * 2.2 model where blacklist is applicable.
+ */
+int check_for_power9_version(void)
+{
+ pvr_min = PVR_MIN(mfspr(SPRN_PVR));
+
+ SKIP_IF(PVR_VER(pvr) != POWER9);
+ SKIP_IF(!(pvr & PVR_POWER9_CUMULUS));
+
+ SKIP_IF(!(3 - pvr_min));
+
+ return 0;
+}
+
+/*
+ * Testcase to ensure that using blacklisted bits in
+ * event code should cause event_open to fail in power9
+ */
+
+static int blacklisted_events(void)
+{
+ struct event event;
+ int i = 0;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * check for power9 support for 2.1 and
+ * 2.2 model where blacklist is applicable.
+ */
+ SKIP_IF(check_for_power9_version());
+
+ /* Skip for Generic compat mode */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ if (pvr_min == 1) {
+ for (i = 0; i < ARRAY_SIZE(blacklist_events_dd21); i++) {
+ event_init(&event, blacklist_events_dd21[i]);
+ FAIL_IF(!event_open(&event));
+ }
+ } else if (pvr_min == 2) {
+ for (i = 0; i < ARRAY_SIZE(blacklist_events_dd22); i++) {
+ event_init(&event, blacklist_events_dd22[i]);
+ FAIL_IF(!event_open(&event));
+ }
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(blacklisted_events, "blacklisted_events");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c
new file mode 100644
index 000000000000..8be7aada6523
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+#define PM_RUN_CYC_ALT 0x200f4
+#define PM_INST_DISP 0x200f2
+#define PM_BR_2PATH 0x20036
+#define PM_LD_MISS_L1 0x3e054
+#define PM_RUN_INST_CMPL_ALT 0x400fa
+
+#define EventCode_1 0x100fc
+#define EventCode_2 0x200fa
+#define EventCode_3 0x300fc
+#define EventCode_4 0x400fc
+
+/*
+ * Check for event alternatives.
+ */
+
+static int event_alternatives_tests_p10(void)
+{
+ struct event *e, events[5];
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * PVR check is used here since PMU specific data like
+ * alternative events is handled by respective PMU driver
+ * code and using PVR will work correctly for all cases
+ * including generic compat mode.
+ */
+ SKIP_IF(PVR_VER(mfspr(SPRN_PVR)) != POWER10);
+
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /*
+ * Test for event alternative for 0x0001e
+ * and 0x00002.
+ */
+ e = &events[0];
+ event_init(e, 0x0001e);
+
+ e = &events[1];
+ event_init(e, EventCode_1);
+
+ e = &events[2];
+ event_init(e, EventCode_2);
+
+ e = &events[3];
+ event_init(e, EventCode_3);
+
+ e = &events[4];
+ event_init(e, EventCode_4);
+
+ FAIL_IF(event_open(&events[0]));
+
+ /*
+ * Expected to pass since 0x0001e has alternative event
+ * 0x600f4 in PMC6. So it can go in with other events
+ * in PMC1 to PMC4.
+ */
+ for (i = 1; i < 5; i++)
+ FAIL_IF(event_open_with_group(&events[i], events[0].fd));
+
+ for (i = 0; i < 5; i++)
+ event_close(&events[i]);
+
+ e = &events[0];
+ event_init(e, 0x00002);
+
+ e = &events[1];
+ event_init(e, EventCode_1);
+
+ e = &events[2];
+ event_init(e, EventCode_2);
+
+ e = &events[3];
+ event_init(e, EventCode_3);
+
+ e = &events[4];
+ event_init(e, EventCode_4);
+
+ FAIL_IF(event_open(&events[0]));
+
+ /*
+ * Expected to pass since 0x00020 has alternative event
+ * 0x500fa in PMC5. So it can go in with other events
+ * in PMC1 to PMC4.
+ */
+ for (i = 1; i < 5; i++)
+ FAIL_IF(event_open_with_group(&events[i], events[0].fd));
+
+ for (i = 0; i < 5; i++)
+ event_close(&events[i]);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(event_alternatives_tests_p10, "event_alternatives_tests_p10");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c
new file mode 100644
index 000000000000..f7dcf0e0447c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+#define PM_RUN_CYC_ALT 0x200f4
+#define PM_INST_DISP 0x200f2
+#define PM_BR_2PATH 0x20036
+#define PM_LD_MISS_L1 0x3e054
+#define PM_RUN_INST_CMPL_ALT 0x400fa
+
+#define EventCode_1 0x200fa
+#define EventCode_2 0x200fc
+#define EventCode_3 0x300fc
+#define EventCode_4 0x400fc
+
+/*
+ * Check for event alternatives.
+ */
+
+static int event_alternatives_tests_p9(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * PVR check is used here since PMU specific data like
+ * alternative events is handled by respective PMU driver
+ * code and using PVR will work correctly for all cases
+ * including generic compat mode.
+ */
+ SKIP_IF(PVR_VER(mfspr(SPRN_PVR)) != POWER9);
+
+ /* Skip for generic compat PMU */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /* Init the event for PM_RUN_CYC_ALT */
+ event_init(&leader, PM_RUN_CYC_ALT);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_1);
+
+ /*
+ * Expected to pass since PM_RUN_CYC_ALT in PMC2 has alternative event
+ * 0x600f4. So it can go in with EventCode_1 which is using PMC2
+ */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ event_init(&leader, PM_INST_DISP);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+ /*
+ * Expected to pass since PM_INST_DISP in PMC2 has alternative event
+ * 0x300f2 in PMC3. So it can go in with EventCode_2 which is using PMC2
+ */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ event_init(&leader, PM_BR_2PATH);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+ /*
+ * Expected to pass since PM_BR_2PATH in PMC2 has alternative event
+ * 0x40036 in PMC4. So it can go in with EventCode_2 which is using PMC2
+ */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ event_init(&leader, PM_LD_MISS_L1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_3);
+ /*
+ * Expected to pass since PM_LD_MISS_L1 in PMC3 has alternative event
+ * 0x400f0 in PMC4. So it can go in with EventCode_3 which is using PMC3
+ */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ event_init(&leader, PM_RUN_INST_CMPL_ALT);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_4);
+ /*
+ * Expected to pass since PM_RUN_INST_CMPL_ALT in PMC4 has alternative event
+ * 0x500fa in PMC5. So it can go in with EventCode_4 which is using PMC4
+ */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(event_alternatives_tests_p9, "event_alternatives_tests_p9");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c
new file mode 100644
index 000000000000..0d237c15d3f2
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <sys/prctl.h>
+#include <limits.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase to ensure that using invalid event in generic
+ * event for PERF_TYPE_HARDWARE should fail
+ */
+
+static int generic_events_valid_test(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* generic events is different in compat_mode */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /*
+ * Invalid generic events in power10:
+ * - PERF_COUNT_HW_BUS_CYCLES
+ * - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+ * - PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+ * - PERF_COUNT_HW_REF_CPU_CYCLES
+ */
+ if (PVR_VER(mfspr(SPRN_PVR)) == POWER10) {
+ event_init_opts(&event, PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_INSTRUCTIONS,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_CACHE_REFERENCES,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_CACHE_MISSES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BRANCH_MISSES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BUS_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+
+ event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+
+ event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+
+ event_init_opts(&event, PERF_COUNT_HW_REF_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+ } else if (PVR_VER(mfspr(SPRN_PVR)) == POWER9) {
+ /*
+ * Invalid generic events in power9:
+ * - PERF_COUNT_HW_BUS_CYCLES
+ * - PERF_COUNT_HW_REF_CPU_CYCLES
+ */
+ event_init_opts(&event, PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_CACHE_REFERENCES,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_CACHE_MISSES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BRANCH_MISSES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_BUS_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+
+ event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
+ PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init_opts(&event, PERF_COUNT_HW_REF_CPU_CYCLES, PERF_TYPE_HARDWARE, "event");
+ FAIL_IF(!event_open(&event));
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(generic_events_valid_test, "generic_events_valid_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c
new file mode 100644
index 000000000000..f4be05aa3a3d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/* All L1 D cache load references counted at finish, gated by reject */
+#define EventCode_1 0x1100fc
+/* Load Missed L1 */
+#define EventCode_2 0x23e054
+/* Load Missed L1 */
+#define EventCode_3 0x13e054
+
+/*
+ * Testcase for group constraint check of data and instructions
+ * cache qualifier bits which is used to program cache select field in
+ * Monitor Mode Control Register 1 (MMCR1: 16-17) for l1 cache.
+ * All events in the group should match cache select bits otherwise
+ * event_open for the group will fail.
+ */
+static int group_constraint_cache(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Init the events for the group contraint check for l1 cache select bits */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+
+ /* Expected to fail as sibling event doesn't request same l1 cache select bits as leader */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint l1 cache select test */
+ event_init(&event, EventCode_3);
+
+ /* Expected to succeed as sibling event request same l1 cache select bits as leader */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_cache, "group_constraint_cache");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c
new file mode 100644
index 000000000000..85a636886069
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/* All successful D-side store dispatches for this thread */
+#define EventCode_1 0x010000046080
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+#define EventCode_2 0x26880
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+#define EventCode_3 0x010000026880
+
+/*
+ * Testcase for group constraint check of l2l3_sel bits which is
+ * used to program l2l3 select field in Monitor Mode Control Register 0
+ * (MMCR0: 56-60).
+ * All events in the group should match l2l3_sel bits otherwise
+ * event_open for the group should fail.
+ */
+static int group_constraint_l2l3_sel(void)
+{
+ struct event event, leader;
+
+ /*
+ * Check for platform support for the test.
+ * This test is only aplicable on power10
+ */
+ SKIP_IF(platform_check_for_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the events for the group contraint check for l2l3_sel bits */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+
+ /* Expected to fail as sibling event doesn't request same l2l3_sel bits as leader */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint l2l3_sel test */
+ event_init(&event, EventCode_3);
+
+ /* Expected to succeed as sibling event request same l2l3_sel bits as leader */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_l2l3_sel, "group_constraint_l2l3_sel");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c
new file mode 100644
index 000000000000..ff625b5d80eb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+#define EventCode_1 0x35340401e0
+#define EventCode_2 0x353c0101ec
+#define EventCode_3 0x35340101ec
+/*
+ * Test that using different sample bits in
+ * event code cause failure in schedule for
+ * group of events.
+ */
+
+static int group_constraint_mmcra_sample(void)
+{
+ struct event event, leader;
+
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Events with different "sample" field values
+ * in a group will fail to schedule.
+ * Use event with load only sampling mode as
+ * group leader. Use event with store only sampling
+ * as sibling event.
+ */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+
+ /* Expected to fail as sibling event doesn't use same sampling bits as leader */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_init(&event, EventCode_3);
+
+ /* Expected to pass as sibling event use same sampling bits as leader */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_mmcra_sample, "group_constraint_mmcra_sample");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c
new file mode 100644
index 000000000000..f5ee4796d46c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase for checking constraint checks for
+ * Performance Monitor Counter 5 (PMC5) and also
+ * Performance Monitor Counter 6 (PMC6). Events using
+ * PMC5/PMC6 shouldn't have other fields in event
+ * code like cache bits, thresholding or marked bit.
+ */
+
+static int group_constraint_pmc56(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Events using PMC5 and PMC6 with cache bit
+ * set in event code is expected to fail.
+ */
+ event_init(&event, 0x2500fa);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x2600f4);
+ FAIL_IF(!event_open(&event));
+
+ /*
+ * PMC5 and PMC6 only supports base events:
+ * ie 500fa and 600f4. Other combinations
+ * should fail.
+ */
+ event_init(&event, 0x501e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x6001e);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x501fa);
+ FAIL_IF(!event_open(&event));
+
+ /*
+ * Events using PMC5 and PMC6 with random
+ * sampling bits set in event code should fail
+ * to schedule.
+ */
+ event_init(&event, 0x35340500fa);
+ FAIL_IF(!event_open(&event));
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_pmc56, "group_constraint_pmc56");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c
new file mode 100644
index 000000000000..af7c5c75101c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase for number of counters in use.
+ * The number of programmable counters is from
+ * performance monitor counter 1 to performance
+ * monitor counter 4 (PMC1-PMC4). If number of
+ * counters in use exceeds the limit, next event
+ * should fail to schedule.
+ */
+
+static int group_constraint_pmc_count(void)
+{
+ struct event *e, events[5];
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Test for number of counters in use.
+ * Use PMC1 to PMC4 for leader and 3 sibling
+ * events. Trying to open fourth event should
+ * fail here.
+ */
+ e = &events[0];
+ event_init(e, 0x1001a);
+
+ e = &events[1];
+ event_init(e, 0x200fc);
+
+ e = &events[2];
+ event_init(e, 0x30080);
+
+ e = &events[3];
+ event_init(e, 0x40054);
+
+ e = &events[4];
+ event_init(e, 0x0002c);
+
+ FAIL_IF(event_open(&events[0]));
+
+ /*
+ * The event_open will fail on event 4 if constraint
+ * check fails
+ */
+ for (i = 1; i < 5; i++) {
+ if (i == 4)
+ FAIL_IF(!event_open_with_group(&events[i], events[0].fd));
+ else
+ FAIL_IF(event_open_with_group(&events[i], events[0].fd));
+ }
+
+ for (i = 1; i < 4; i++)
+ event_close(&events[i]);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_pmc_count, "group_constraint_pmc_count");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c
new file mode 100644
index 000000000000..9225618b846a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/* PM_DATA_RADIX_PROCESS_L2_PTE_FROM_L2 */
+#define EventCode_1 0x14242
+/* PM_DATA_RADIX_PROCESS_L2_PTE_FROM_L3 */
+#define EventCode_2 0x24242
+
+/*
+ * Testcase for group constraint check for radix_scope_qual
+ * field which is used to program Monitor Mode Control
+ * egister (MMCR1) bit 18.
+ * All events in the group should match radix_scope_qual,
+ * bits otherwise event_open for the group should fail.
+ */
+
+static int group_constraint_radix_scope_qual(void)
+{
+ struct event event, leader;
+
+ /*
+ * Check for platform support for the test.
+ * This test is aplicable on power10 only.
+ */
+ SKIP_IF(platform_check_for_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the events for the group contraint check for radix_scope_qual bits */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, 0x200fc);
+
+ /* Expected to fail as sibling event doesn't request same radix_scope_qual bits as leader */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_init(&event, EventCode_2);
+ /* Expected to pass as sibling event request same radix_scope_qual bits as leader */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_radix_scope_qual,
+ "group_constraint_radix_scope_qual");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c
new file mode 100644
index 000000000000..371cd05bb3ed
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/* The processor's L1 data cache was reloaded */
+#define EventCode1 0x21C040
+#define EventCode2 0x22C040
+
+/*
+ * Testcase for group constraint check
+ * when using events with same PMC.
+ * Multiple events in a group shouldn't
+ * ask for same PMC. If so it should fail.
+ */
+
+static int group_constraint_repeat(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Two events in a group using same PMC
+ * should fail to get scheduled. Usei same PMC2
+ * for leader and sibling event which is expected
+ * to fail.
+ */
+ event_init(&leader, EventCode1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode1);
+
+ /* Expected to fail since sibling event is requesting same PMC as leader */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_init(&event, EventCode2);
+
+ /* Expected to pass since sibling event is requesting different PMC */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_repeat, "group_constraint_repeat");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c
new file mode 100644
index 000000000000..9f1197104e8c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Primary PMU events used here is PM_MRK_INST_CMPL (0x401e0) and
+ * PM_THRESH_MET (0x101ec)
+ * Threshold event selection used is issue to complete for cycles
+ * Sampling criteria is Load or Store only sampling
+ */
+#define p9_EventCode_1 0x13e35340401e0
+#define p9_EventCode_2 0x17d34340101ec
+#define p9_EventCode_3 0x13e35340101ec
+#define p10_EventCode_1 0x35340401e0
+#define p10_EventCode_2 0x35340101ec
+
+/*
+ * Testcase for group constraint check of thresh_cmp bits which is
+ * used to program thresh compare field in Monitor Mode Control Register A
+ * (MMCRA: 9-18 bits for power9 and MMCRA: 8-18 bits for power10).
+ * All events in the group should match thresh compare bits otherwise
+ * event_open for the group will fail.
+ */
+static int group_constraint_thresh_cmp(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ if (have_hwcap2(PPC_FEATURE2_ARCH_3_1)) {
+ /* Init the events for the group contraint check for thresh_cmp bits */
+ event_init(&leader, p10_EventCode_1);
+
+ /* Add the thresh_cmp value for leader in config1 */
+ leader.attr.config1 = 1000;
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, p10_EventCode_2);
+
+ /* Add the different thresh_cmp value from the leader event in config1 */
+ event.attr.config1 = 2000;
+
+ /* Expected to fail as sibling and leader event request different thresh_cmp bits */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint thresh compare test */
+ event_init(&event, p10_EventCode_2);
+
+ /* Add the same thresh_cmp value for leader and sibling event in config1 */
+ event.attr.config1 = 1000;
+
+ /* Expected to succeed as sibling and leader event request same thresh_cmp bits */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+ } else {
+ /* Init the events for the group contraint check for thresh_cmp bits */
+ event_init(&leader, p9_EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, p9_EventCode_2);
+
+ /* Expected to fail as sibling and leader event request different thresh_cmp bits */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint thresh compare test */
+ event_init(&event, p9_EventCode_3);
+
+ /* Expected to succeed as sibling and leader event request same thresh_cmp bits */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_thresh_cmp, "group_constraint_thresh_cmp");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c
new file mode 100644
index 000000000000..e0852ebc1671
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Primary PMU events used here are PM_MRK_INST_CMPL (0x401e0) and
+ * PM_THRESH_MET (0x101ec).
+ * Threshold event selection used is issue to complete and issue to
+ * finished for cycles
+ * Sampling criteria is Load or Store only sampling
+ */
+#define EventCode_1 0x35340401e0
+#define EventCode_2 0x34340101ec
+#define EventCode_3 0x35340101ec
+
+/*
+ * Testcase for group constraint check of thresh_ctl bits which is
+ * used to program thresh compare field in Monitor Mode Control Register A
+ * (MMCR0: 48-55).
+ * All events in the group should match thresh ctl bits otherwise
+ * event_open for the group will fail.
+ */
+static int group_constraint_thresh_ctl(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Init the events for the group contraint thresh control test */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+
+ /* Expected to fail as sibling and leader event request different thresh_ctl bits */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint thresh control test */
+ event_init(&event, EventCode_3);
+
+ /* Expected to succeed as sibling and leader event request same thresh_ctl bits */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_thresh_ctl, "group_constraint_thresh_ctl");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c
new file mode 100644
index 000000000000..50a8cd843ce7
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Primary PMU events used here are PM_MRK_INST_CMPL (0x401e0) and
+ * PM_THRESH_MET (0x101ec).
+ * Threshold event selection used is issue to complete
+ * Sampling criteria is Load or Store only sampling
+ */
+#define EventCode_1 0x35340401e0
+#define EventCode_2 0x35540101ec
+#define EventCode_3 0x35340101ec
+
+/*
+ * Testcase for group constraint check of thresh_sel bits which is
+ * used to program thresh select field in Monitor Mode Control Register A
+ * (MMCRA: 45-57).
+ * All events in the group should match thresh sel bits otherwise
+ * event_open for the group will fail.
+ */
+static int group_constraint_thresh_sel(void)
+{
+ struct event event, leader;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Init the events for the group contraint thresh select test */
+ event_init(&leader, EventCode_1);
+ FAIL_IF(event_open(&leader));
+
+ event_init(&event, EventCode_2);
+
+ /* Expected to fail as sibling and leader event request different thresh_sel bits */
+ FAIL_IF(!event_open_with_group(&event, leader.fd));
+
+ event_close(&event);
+
+ /* Init the event for the group contraint thresh select test */
+ event_init(&event, EventCode_3);
+
+ /* Expected to succeed as sibling and leader event request same thresh_sel bits */
+ FAIL_IF(event_open_with_group(&event, leader.fd));
+
+ event_close(&leader);
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_thresh_sel, "group_constraint_thresh_sel");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c
new file mode 100644
index 000000000000..a2c18923dcec
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/* All successful D-side store dispatches for this thread with PMC 2 */
+#define EventCode_1 0x26080
+/* All successful D-side store dispatches for this thread with PMC 4 */
+#define EventCode_2 0x46080
+/* All successful D-side store dispatches for this thread that were L2 Miss with PMC 3 */
+#define EventCode_3 0x36880
+
+/*
+ * Testcase for group constraint check of unit and pmc bits which is
+ * used to program corresponding unit and pmc field in Monitor Mode
+ * Control Register 1 (MMCR1)
+ * One of the event in the group should use PMC 4 incase units field
+ * value is within 6 to 9 otherwise event_open for the group will fail.
+ */
+static int group_constraint_unit(void)
+{
+ struct event *e, events[3];
+
+ /*
+ * Check for platform support for the test.
+ * Constraint to use PMC4 with one of the event in group,
+ * when the unit is within 6 to 9 is only applicable on
+ * power9.
+ */
+ SKIP_IF(platform_check_for_tests());
+ SKIP_IF(have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the events for the group contraint check for unit bits */
+ e = &events[0];
+ event_init(e, EventCode_1);
+
+ /* Expected to fail as PMC 4 is not used with unit field value 6 to 9 */
+ FAIL_IF(!event_open(&events[0]));
+
+ /* Init the events for the group contraint check for unit bits */
+ e = &events[1];
+ event_init(e, EventCode_2);
+
+ /* Expected to pass as PMC 4 is used with unit field value 6 to 9 */
+ FAIL_IF(event_open(&events[1]));
+
+ /* Init the event for the group contraint unit test */
+ e = &events[2];
+ event_init(e, EventCode_3);
+
+ /* Expected to fail as PMC4 is not being used */
+ FAIL_IF(!event_open_with_group(&events[2], events[0].fd));
+
+ /* Expected to succeed as event using PMC4 */
+ FAIL_IF(event_open_with_group(&events[2], events[1].fd));
+
+ event_close(&events[0]);
+ event_close(&events[1]);
+ event_close(&events[2]);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_constraint_unit, "group_constraint_unit");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c
new file mode 100644
index 000000000000..cff9ac170df6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include <sys/prctl.h>
+#include <limits.h>
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase for group constraint check for
+ * Performance Monitor Counter 5 (PMC5) and also
+ * Performance Monitor Counter 6 (PMC6).
+ * Test that pmc5/6 is excluded from constraint
+ * check when scheduled along with group of events.
+ */
+
+static int group_pmc56_exclude_constraints(void)
+{
+ struct event *e, events[3];
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * PMC5/6 is excluded from constraint bit
+ * check along with group of events. Use
+ * group of events with PMC5, PMC6 and also
+ * event with cache bit (dc_ic) set. Test expects
+ * this set of events to go in as a group.
+ */
+ e = &events[0];
+ event_init(e, 0x500fa);
+
+ e = &events[1];
+ event_init(e, 0x600f4);
+
+ e = &events[2];
+ event_init(e, 0x22C040);
+
+ FAIL_IF(event_open(&events[0]));
+
+ /*
+ * The event_open will fail if constraint check fails.
+ * Since we are asking for events in a group and since
+ * PMC5/PMC6 is excluded from group constraints, even_open
+ * should pass.
+ */
+ for (i = 1; i < 3; i++)
+ FAIL_IF(event_open_with_group(&events[i], events[0].fd));
+
+ for (i = 0; i < 3; i++)
+ event_close(&events[i]);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(group_pmc56_exclude_constraints, "group_pmc56_exclude_constraints");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c
new file mode 100644
index 000000000000..a45b1da5b568
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "utils.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Load Missed L1, for power9 its pointing to PM_LD_MISS_L1_FIN (0x2c04e) and
+ * for power10 its pointing to PM_LD_MISS_L1 (0x3e054)
+ *
+ * Hardware cache level : PERF_COUNT_HW_CACHE_L1D
+ * Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_READ
+ * Hardware cache event result type : PERF_COUNT_HW_CACHE_RESULT_MISS
+ */
+#define EventCode_1 0x10000
+/*
+ * Hardware cache level : PERF_COUNT_HW_CACHE_L1D
+ * Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_WRITE
+ * Hardware cache event result type : PERF_COUNT_HW_CACHE_RESULT_ACCESS
+ */
+#define EventCode_2 0x0100
+/*
+ * Hardware cache level : PERF_COUNT_HW_CACHE_DTLB
+ * Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_WRITE
+ * Hardware cache event result type : PERF_COUNT_HW_CACHE_RESULT_ACCESS
+ */
+#define EventCode_3 0x0103
+/*
+ * Hardware cache level : PERF_COUNT_HW_CACHE_L1D
+ * Hardware cache event operation type : PERF_COUNT_HW_CACHE_OP_READ
+ * Hardware cache event result type : Invalid ( > PERF_COUNT_HW_CACHE_RESULT_MAX)
+ */
+#define EventCode_4 0x030000
+
+/*
+ * A perf test to check valid hardware cache events.
+ */
+static int hw_cache_event_type_test(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Skip for Generic compat PMU */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /* Init the event to test hardware cache event */
+ event_init_opts(&event, EventCode_1, PERF_TYPE_HW_CACHE, "event");
+
+ /* Expected to success as its pointing to L1 load miss */
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ /* Init the event to test hardware cache event */
+ event_init_opts(&event, EventCode_2, PERF_TYPE_HW_CACHE, "event");
+
+ /* Expected to fail as the corresponding cache event entry have 0 in that index */
+ FAIL_IF(!event_open(&event));
+ event_close(&event);
+
+ /* Init the event to test hardware cache event */
+ event_init_opts(&event, EventCode_3, PERF_TYPE_HW_CACHE, "event");
+
+ /* Expected to fail as the corresponding cache event entry have -1 in that index */
+ FAIL_IF(!event_open(&event));
+ event_close(&event);
+
+ /* Init the event to test hardware cache event */
+ event_init_opts(&event, EventCode_4, PERF_TYPE_HW_CACHE, "event");
+
+ /* Expected to fail as hardware cache event result type is Invalid */
+ FAIL_IF(!event_open(&event));
+ event_close(&event);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(hw_cache_event_type_test, "hw_cache_event_type_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c
new file mode 100644
index 000000000000..f51fcab837fc
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <sys/prctl.h>
+#include <limits.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/* The data cache was reloaded from local core's L3 due to a demand load */
+#define EventCode_1 0x1340000001c040
+/* PM_DATA_RADIX_PROCESS_L2_PTE_FROM_L2 */
+#define EventCode_2 0x14242
+/* Event code with IFM, EBB, BHRB bits set in event code */
+#define EventCode_3 0xf00000000000001e
+
+/*
+ * Some of the bits in the event code is
+ * reserved for specific platforms.
+ * Event code bits 52-59 are reserved in power9,
+ * whereas in power10, these are used for programming
+ * Monitor Mode Control Register 3 (MMCR3).
+ * Bit 9 in event code is reserved in power9,
+ * whereas it is used for programming "radix_scope_qual"
+ * bit 18 in Monitor Mode Control Register 1 (MMCR1).
+ *
+ * Testcase to ensure that using reserved bits in
+ * event code should cause event_open to fail.
+ */
+
+static int invalid_event_code(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Events using MMCR3 bits and radix scope qual bits
+ * should fail in power9 and should succeed in power10.
+ * Init the events and check for pass/fail in event open.
+ */
+ if (have_hwcap2(PPC_FEATURE2_ARCH_3_1)) {
+ event_init(&event, EventCode_1);
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+
+ event_init(&event, EventCode_2);
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+ } else {
+ event_init(&event, EventCode_1);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, EventCode_2);
+ FAIL_IF(!event_open(&event));
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(invalid_event_code, "invalid_event_code");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c
new file mode 100644
index 000000000000..4c119c821b99
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase for reserved bits in Monitor Mode Control
+ * Register A (MMCRA) Random Sampling Mode (SM) value.
+ * As per Instruction Set Architecture (ISA), the values
+ * 0x5, 0x9, 0xD, 0x19, 0x1D, 0x1A, 0x1E are reserved
+ * for sampling mode field. Test that having these reserved
+ * bit values should cause event_open to fail.
+ * Input event code uses these sampling bits along with
+ * 401e0 (PM_MRK_INST_CMPL).
+ */
+
+static int reserved_bits_mmcra_sample_elig_mode(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Skip for Generic compat PMU */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /*
+ * MMCRA Random Sampling Mode (SM) values: 0x5
+ * 0x9, 0xD, 0x19, 0x1D, 0x1A, 0x1E is reserved.
+ * Expected to fail when using these reserved values.
+ */
+ event_init(&event, 0x50401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x90401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0xD0401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x190401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x1D0401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x1A0401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x1E0401e0);
+ FAIL_IF(!event_open(&event));
+
+ /*
+ * MMCRA Random Sampling Mode (SM) value 0x10
+ * is reserved in power10 and 0xC is reserved in
+ * power9.
+ */
+ if (PVR_VER(mfspr(SPRN_PVR)) == POWER10) {
+ event_init(&event, 0x100401e0);
+ FAIL_IF(!event_open(&event));
+ } else if (PVR_VER(mfspr(SPRN_PVR)) == POWER9) {
+ event_init(&event, 0xC0401e0);
+ FAIL_IF(!event_open(&event));
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(reserved_bits_mmcra_sample_elig_mode,
+ "reserved_bits_mmcra_sample_elig_mode");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c b/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c
new file mode 100644
index 000000000000..4ea1c2f8913f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include "../event.h"
+#include "../sampling_tests/misc.h"
+
+/*
+ * Testcase for reserved bits in Monitor Mode
+ * Control Register A (MMCRA) thresh_ctl bits.
+ * For MMCRA[48:51]/[52:55]) Threshold Start/Stop,
+ * 0b11110000/0b00001111 is reserved.
+ */
+
+static int reserved_bits_mmcra_thresh_ctl(void)
+{
+ struct event event;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /* Skip for Generic compat PMU */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /*
+ * MMCRA[48:51]/[52:55]) Threshold Start/Stop
+ * events Selection. 0b11110000/0b00001111 is reserved.
+ * Expected to fail when using these reserved values.
+ */
+ event_init(&event, 0xf0340401e0);
+ FAIL_IF(!event_open(&event));
+
+ event_init(&event, 0x0f340401e0);
+ FAIL_IF(!event_open(&event));
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(reserved_bits_mmcra_thresh_ctl, "reserved_bits_mmcra_thresh_ctl");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/l3_bank_test.c b/tools/testing/selftests/powerpc/pmu/l3_bank_test.c
index a96d512a18c4..a5dfa9bf3b9f 100644
--- a/tools/testing/selftests/powerpc/pmu/l3_bank_test.c
+++ b/tools/testing/selftests/powerpc/pmu/l3_bank_test.c
@@ -20,6 +20,9 @@ static int l3_bank_test(void)
char *p;
int i;
+ // The L3 bank logic is only used on Power8 or later
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
+
p = malloc(MALLOC_SIZE);
FAIL_IF(!p);
diff --git a/tools/testing/selftests/powerpc/pmu/lib.h b/tools/testing/selftests/powerpc/pmu/lib.h
index fa12e7d0b4d3..bf1bec013bbb 100644
--- a/tools/testing/selftests/powerpc/pmu/lib.h
+++ b/tools/testing/selftests/powerpc/pmu/lib.h
@@ -6,6 +6,7 @@
#ifndef __SELFTESTS_POWERPC_PMU_LIB_H
#define __SELFTESTS_POWERPC_PMU_LIB_H
+#include <stdbool.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
diff --git a/tools/testing/selftests/powerpc/pmu/loop.S b/tools/testing/selftests/powerpc/pmu/loop.S
index 8cc9b5e2c9de..c52ba09b6fed 100644
--- a/tools/testing/selftests/powerpc/pmu/loop.S
+++ b/tools/testing/selftests/powerpc/pmu/loop.S
@@ -41,3 +41,38 @@ FUNC_START(thirty_two_instruction_loop)
subi r3,r3,1
b FUNC_NAME(thirty_two_instruction_loop)
FUNC_END(thirty_two_instruction_loop)
+
+FUNC_START(thirty_two_instruction_loop_with_ll_sc)
+ cmpdi r3,0
+ beqlr
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1 # 5
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+1: ldarx r6,0,r4 # 10
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1 # 15
+ addi r5,r5,1
+ addi r5,r5,1
+ stdcx. r6,0,r4
+ bne- 1b
+ addi r5,r5,1 # 20
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1 # 25
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1 # 30
+ subi r3,r3,1
+ b FUNC_NAME(thirty_two_instruction_loop_with_ll_sc)
+FUNC_END(thirty_two_instruction_loop_with_ll_sc)
diff --git a/tools/testing/selftests/powerpc/pmu/per_event_excludes.c b/tools/testing/selftests/powerpc/pmu/per_event_excludes.c
index 2756fe2efdc5..ad32a09a6540 100644
--- a/tools/testing/selftests/powerpc/pmu/per_event_excludes.c
+++ b/tools/testing/selftests/powerpc/pmu/per_event_excludes.c
@@ -23,12 +23,9 @@
static int per_event_excludes(void)
{
struct event *e, events[4];
- char *platform;
int i;
- platform = (char *)get_auxv_entry(AT_BASE_PLATFORM);
- FAIL_IF(!platform);
- SKIP_IF(strcmp(platform, "power8") != 0);
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
/*
* We need to create the events disabled, otherwise the running/enabled
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore b/tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore
new file mode 100644
index 000000000000..f93b4c7c3a8a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore
@@ -0,0 +1,21 @@
+bhrb_filter_map_test
+bhrb_no_crash_wo_pmu_test
+intr_regs_no_crash_wo_pmu_test
+mmcr0_cc56run_test
+mmcr0_exceptionbits_test
+mmcr0_fc56_pmc1ce_test
+mmcr0_fc56_pmc56_test
+mmcr0_pmccext_test
+mmcr0_pmcjce_test
+mmcr1_comb_test
+mmcr1_sel_unit_cache_test
+mmcr2_fcs_fch_test
+mmcr2_l2l3_test
+mmcr3_src_test
+mmcra_bhrb_any_test
+mmcra_bhrb_cond_test
+mmcra_bhrb_disable_no_branch_test
+mmcra_bhrb_disable_test
+mmcra_bhrb_ind_call_test
+mmcra_thresh_cmp_test
+mmcra_thresh_marked_sample_test
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile b/tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile
new file mode 100644
index 000000000000..9e67351fb252
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -m64
+
+TEST_GEN_PROGS := mmcr0_exceptionbits_test mmcr0_cc56run_test mmcr0_pmccext_test \
+ mmcr0_pmcjce_test mmcr0_fc56_pmc1ce_test mmcr0_fc56_pmc56_test \
+ mmcr1_comb_test mmcr2_l2l3_test mmcr2_fcs_fch_test \
+ mmcr3_src_test mmcra_thresh_marked_sample_test mmcra_thresh_cmp_test \
+ mmcra_bhrb_ind_call_test mmcra_bhrb_any_test mmcra_bhrb_cond_test \
+ mmcra_bhrb_disable_test bhrb_no_crash_wo_pmu_test intr_regs_no_crash_wo_pmu_test \
+ bhrb_filter_map_test mmcr1_sel_unit_cache_test mmcra_bhrb_disable_no_branch_test
+
+top_srcdir = ../../../../../..
+include ../../../lib.mk
+
+$(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c misc.c misc.h ../loop.S ../branch_loops.S
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c
new file mode 100644
index 000000000000..3f43c315c666
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/*
+ * A perf sampling test to check bhrb filter
+ * map. All the branch filters are not supported
+ * in powerpc. Supported filters in:
+ * power10: any, any_call, ind_call, cond
+ * power9: any, any_call
+ *
+ * Testcase checks event open for invalid bhrb filter
+ * types should fail and valid filter types should pass.
+ * Testcase does validity check for these branch
+ * sample types.
+ */
+
+/* Invalid types for powerpc */
+/* Valid bhrb filters in power9/power10 */
+int bhrb_filter_map_valid_common[] = {
+ PERF_SAMPLE_BRANCH_ANY,
+ PERF_SAMPLE_BRANCH_ANY_CALL,
+};
+
+/* Valid bhrb filters in power10 */
+int bhrb_filter_map_valid_p10[] = {
+ PERF_SAMPLE_BRANCH_IND_CALL,
+ PERF_SAMPLE_BRANCH_COND,
+};
+
+#define EventCode 0x1001e
+
+static int bhrb_filter_map_test(void)
+{
+ struct event event;
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(platform_check_for_tests());
+
+ /*
+ * Skip for Generic compat PMU since
+ * bhrb filters is not supported
+ */
+ SKIP_IF(check_for_generic_compat_pmu());
+
+ /* Init the event for the sampling test */
+ event_init(&event, EventCode);
+
+ event.attr.sample_period = 1000;
+ event.attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
+ event.attr.disabled = 1;
+
+ /* Invalid filter maps which are expected to fail in event_open */
+ for (i = PERF_SAMPLE_BRANCH_USER_SHIFT; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
+ /* Skip the valid branch sample type */
+ if (i == PERF_SAMPLE_BRANCH_ANY_SHIFT || i == PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT \
+ || i == PERF_SAMPLE_BRANCH_IND_CALL_SHIFT || i == PERF_SAMPLE_BRANCH_COND_SHIFT)
+ continue;
+ event.attr.branch_sample_type = 1U << i;
+ FAIL_IF(!event_open(&event));
+ }
+
+ /* valid filter maps for power9/power10 which are expected to pass in event_open */
+ for (i = 0; i < ARRAY_SIZE(bhrb_filter_map_valid_common); i++) {
+ event.attr.branch_sample_type = bhrb_filter_map_valid_common[i];
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+ }
+
+ /*
+ * filter maps which are valid in power10 and invalid in power9.
+ * PVR check is used here since PMU specific data like bhrb filter
+ * alternative tests is handled by respective PMU driver code and
+ * using PVR will work correctly for all cases including generic
+ * compat mode.
+ */
+ if (PVR_VER(mfspr(SPRN_PVR)) == POWER10) {
+ for (i = 0; i < ARRAY_SIZE(bhrb_filter_map_valid_p10); i++) {
+ event.attr.branch_sample_type = bhrb_filter_map_valid_p10[i];
+ FAIL_IF(event_open(&event));
+ event_close(&event);
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(bhrb_filter_map_valid_p10); i++) {
+ event.attr.branch_sample_type = bhrb_filter_map_valid_p10[i];
+ FAIL_IF(!event_open(&event));
+ }
+ }
+
+ /*
+ * Combine filter maps which includes a valid branch filter and an invalid branch
+ * filter. Example: any ( PERF_SAMPLE_BRANCH_ANY) and any_call
+ * (PERF_SAMPLE_BRANCH_ANY_CALL).
+ * The perf_event_open should fail in this case.
+ */
+ event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY | PERF_SAMPLE_BRANCH_ANY_CALL;
+ FAIL_IF(!event_open(&event));
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(bhrb_filter_map_test, "bhrb_filter_map_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c
new file mode 100644
index 000000000000..4644c6782974
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/*
+ * A perf sampling test for making sure
+ * enabling branch stack doesn't crash in any
+ * environment, say:
+ * - With generic compat PMU
+ * - without any PMU registered
+ * - With platform specific PMU
+ * A fix for bhrb sampling crash was added in kernel
+ * via commit: b460b512417a ("powerpc/perf: Fix crashes
+ * with generic_compat_pmu & BHRB")
+ *
+ * This testcase exercises this code by doing branch
+ * stack enable for software event. s/w event is used
+ * since software event will work even in platform
+ * without PMU.
+ */
+static int bhrb_no_crash_wo_pmu_test(void)
+{
+ struct event event;
+
+ /*
+ * Init the event for the sampling test.
+ * This uses software event which works on
+ * any platform.
+ */
+ event_init_opts(&event, 0, PERF_TYPE_SOFTWARE, "cycles");
+
+ event.attr.sample_period = 1000;
+ event.attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
+ event.attr.disabled = 1;
+
+ /*
+ * Return code of event_open is not
+ * considered since test just expects no crash from
+ * using PERF_SAMPLE_BRANCH_STACK. Also for environment
+ * like generic compat PMU, branch stack is unsupported.
+ */
+ event_open(&event);
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(bhrb_no_crash_wo_pmu_test, "bhrb_no_crash_wo_pmu_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c
new file mode 100644
index 000000000000..839d2d225da0
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/*
+ * A perf sampling test for making sure
+ * sampling with -intr-regs doesn't crash
+ * in any environment, say:
+ * - With generic compat PMU
+ * - without any PMU registered
+ * - With platform specific PMU.
+ * A fix for crash with intr_regs was
+ * addressed in commit: f75e7d73bdf7 in kernel.
+ *
+ * This testcase exercises this code path by doing
+ * intr_regs using software event. Software event is
+ * used since s/w event will work even in platform
+ * without PMU.
+ */
+static int intr_regs_no_crash_wo_pmu_test(void)
+{
+ struct event event;
+
+ /*
+ * Init the event for the sampling test.
+ * This uses software event which works on
+ * any platform.
+ */
+ event_init_opts(&event, 0, PERF_TYPE_SOFTWARE, "cycles");
+
+ event.attr.sample_period = 1000;
+ event.attr.sample_type = PERF_SAMPLE_REGS_INTR;
+ event.attr.disabled = 1;
+
+ /*
+ * Return code of event_open is not considered
+ * since test just expects no crash from using
+ * PERF_SAMPLE_REGS_INTR.
+ */
+ event_open(&event);
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(intr_regs_no_crash_wo_pmu_test, "intr_regs_no_crash_wo_pmu_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
new file mode 100644
index 000000000000..eac6420abdf1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ * Copyright 2022, Madhavan Srinivasan, IBM Corp.
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <string.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <stdlib.h>
+#include <ctype.h>
+
+#include "misc.h"
+
+#define PAGE_SIZE sysconf(_SC_PAGESIZE)
+
+/* Storage for platform version */
+int pvr;
+u64 platform_extended_mask;
+
+/* Mask and Shift for Event code fields */
+int ev_mask_pmcxsel, ev_shift_pmcxsel; //pmcxsel field
+int ev_mask_marked, ev_shift_marked; //marked filed
+int ev_mask_comb, ev_shift_comb; //combine field
+int ev_mask_unit, ev_shift_unit; //unit field
+int ev_mask_pmc, ev_shift_pmc; //pmc field
+int ev_mask_cache, ev_shift_cache; //Cache sel field
+int ev_mask_sample, ev_shift_sample; //Random sampling field
+int ev_mask_thd_sel, ev_shift_thd_sel; //thresh_sel field
+int ev_mask_thd_start, ev_shift_thd_start; //thresh_start field
+int ev_mask_thd_stop, ev_shift_thd_stop; //thresh_stop field
+int ev_mask_thd_cmp, ev_shift_thd_cmp; //thresh cmp field
+int ev_mask_sm, ev_shift_sm; //SDAR mode field
+int ev_mask_rsq, ev_shift_rsq; //radix scope qual field
+int ev_mask_l2l3, ev_shift_l2l3; //l2l3 sel field
+int ev_mask_mmcr3_src, ev_shift_mmcr3_src; //mmcr3 field
+
+static void init_ev_encodes(void)
+{
+ ev_mask_pmcxsel = 0xff;
+ ev_shift_pmcxsel = 0;
+ ev_mask_marked = 1;
+ ev_shift_marked = 8;
+ ev_mask_unit = 0xf;
+ ev_shift_unit = 12;
+ ev_mask_pmc = 0xf;
+ ev_shift_pmc = 16;
+ ev_mask_sample = 0x1f;
+ ev_shift_sample = 24;
+ ev_mask_thd_sel = 0x7;
+ ev_shift_thd_sel = 29;
+ ev_mask_thd_start = 0xf;
+ ev_shift_thd_start = 36;
+ ev_mask_thd_stop = 0xf;
+ ev_shift_thd_stop = 32;
+
+ switch (pvr) {
+ case POWER10:
+ ev_mask_thd_cmp = 0x3ffff;
+ ev_shift_thd_cmp = 0;
+ ev_mask_rsq = 1;
+ ev_shift_rsq = 9;
+ ev_mask_comb = 3;
+ ev_shift_comb = 10;
+ ev_mask_cache = 3;
+ ev_shift_cache = 20;
+ ev_mask_sm = 0x3;
+ ev_shift_sm = 22;
+ ev_mask_l2l3 = 0x1f;
+ ev_shift_l2l3 = 40;
+ ev_mask_mmcr3_src = 0x7fff;
+ ev_shift_mmcr3_src = 45;
+ break;
+ case POWER9:
+ ev_mask_comb = 3;
+ ev_shift_comb = 10;
+ ev_mask_cache = 0xf;
+ ev_shift_cache = 20;
+ ev_mask_thd_cmp = 0x3ff;
+ ev_shift_thd_cmp = 40;
+ ev_mask_sm = 0x3;
+ ev_shift_sm = 50;
+ break;
+ default:
+ FAIL_IF_EXIT(1);
+ }
+}
+
+/* Return the extended regs mask value */
+static u64 perf_get_platform_reg_mask(void)
+{
+ if (have_hwcap2(PPC_FEATURE2_ARCH_3_1))
+ return PERF_POWER10_MASK;
+ if (have_hwcap2(PPC_FEATURE2_ARCH_3_00))
+ return PERF_POWER9_MASK;
+
+ return -1;
+}
+
+int check_extended_regs_support(void)
+{
+ int fd;
+ struct event event;
+
+ event_init(&event, 0x1001e);
+
+ event.attr.type = 4;
+ event.attr.sample_period = 1;
+ event.attr.disabled = 1;
+ event.attr.sample_type = PERF_SAMPLE_REGS_INTR;
+ event.attr.sample_regs_intr = platform_extended_mask;
+
+ fd = event_open(&event);
+ if (fd != -1)
+ return 0;
+
+ return -1;
+}
+
+int platform_check_for_tests(void)
+{
+ pvr = PVR_VER(mfspr(SPRN_PVR));
+
+ /*
+ * Check for supported platforms
+ * for sampling test
+ */
+ if ((pvr != POWER10) && (pvr != POWER9))
+ goto out;
+
+ /*
+ * Check PMU driver registered by looking for
+ * PPC_FEATURE2_EBB bit in AT_HWCAP2
+ */
+ if (!have_hwcap2(PPC_FEATURE2_EBB) || !have_hwcap2(PPC_FEATURE2_ARCH_3_00))
+ goto out;
+
+ return 0;
+
+out:
+ printf("%s: Tests unsupported for this platform\n", __func__);
+ return -1;
+}
+
+int check_pvr_for_sampling_tests(void)
+{
+ SKIP_IF(platform_check_for_tests());
+
+ platform_extended_mask = perf_get_platform_reg_mask();
+ /* check if platform supports extended regs */
+ if (check_extended_regs_support())
+ goto out;
+
+ init_ev_encodes();
+ return 0;
+
+out:
+ printf("%s: Sampling tests un-supported\n", __func__);
+ return -1;
+}
+
+/*
+ * Allocate mmap buffer of "mmap_pages" number of
+ * pages.
+ */
+void *event_sample_buf_mmap(int fd, int mmap_pages)
+{
+ size_t page_size = sysconf(_SC_PAGESIZE);
+ size_t mmap_size;
+ void *buff;
+
+ if (mmap_pages <= 0)
+ return NULL;
+
+ if (fd <= 0)
+ return NULL;
+
+ mmap_size = page_size * (1 + mmap_pages);
+ buff = mmap(NULL, mmap_size,
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+
+ if (buff == MAP_FAILED) {
+ perror("mmap() failed.");
+ return NULL;
+ }
+ return buff;
+}
+
+/*
+ * Post process the mmap buffer.
+ * - If sample_count != NULL then return count of total
+ * number of samples present in the mmap buffer.
+ * - If sample_count == NULL then return the address
+ * of first sample from the mmap buffer
+ */
+void *__event_read_samples(void *sample_buff, size_t *size, u64 *sample_count)
+{
+ size_t page_size = sysconf(_SC_PAGESIZE);
+ struct perf_event_header *header = sample_buff + page_size;
+ struct perf_event_mmap_page *metadata_page = sample_buff;
+ unsigned long data_head, data_tail;
+
+ /*
+ * PERF_RECORD_SAMPLE:
+ * struct {
+ * struct perf_event_header hdr;
+ * u64 data[];
+ * };
+ */
+
+ data_head = metadata_page->data_head;
+ /* sync memory before reading sample */
+ mb();
+ data_tail = metadata_page->data_tail;
+
+ /* Check for sample_count */
+ if (sample_count)
+ *sample_count = 0;
+
+ while (1) {
+ /*
+ * Reads the mmap data buffer by moving
+ * the data_tail to know the last read data.
+ * data_head points to head in data buffer.
+ * refer "struct perf_event_mmap_page" in
+ * "include/uapi/linux/perf_event.h".
+ */
+ if (data_head - data_tail < sizeof(header))
+ return NULL;
+
+ data_tail += sizeof(header);
+ if (header->type == PERF_RECORD_SAMPLE) {
+ *size = (header->size - sizeof(header));
+ if (!sample_count)
+ return sample_buff + page_size + data_tail;
+ data_tail += *size;
+ *sample_count += 1;
+ } else {
+ *size = (header->size - sizeof(header));
+ if ((metadata_page->data_tail + *size) > metadata_page->data_head)
+ data_tail = metadata_page->data_head;
+ else
+ data_tail += *size;
+ }
+ header = (struct perf_event_header *)((void *)header + header->size);
+ }
+ return NULL;
+}
+
+int collect_samples(void *sample_buff)
+{
+ u64 sample_count;
+ size_t size = 0;
+
+ __event_read_samples(sample_buff, &size, &sample_count);
+ return sample_count;
+}
+
+static void *perf_read_first_sample(void *sample_buff, size_t *size)
+{
+ return __event_read_samples(sample_buff, size, NULL);
+}
+
+u64 *get_intr_regs(struct event *event, void *sample_buff)
+{
+ u64 type = event->attr.sample_type;
+ u64 *intr_regs;
+ size_t size = 0;
+
+ if ((type ^ (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_BRANCH_STACK)) &&
+ (type ^ PERF_SAMPLE_REGS_INTR))
+ return NULL;
+
+ intr_regs = (u64 *)perf_read_first_sample(sample_buff, &size);
+ if (!intr_regs)
+ return NULL;
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ /*
+ * PERF_RECORD_SAMPLE and PERF_SAMPLE_BRANCH_STACK:
+ * struct {
+ * struct perf_event_header hdr;
+ * u64 number_of_branches;
+ * struct perf_branch_entry[number_of_branches];
+ * u64 data[];
+ * };
+ * struct perf_branch_entry {
+ * u64 from;
+ * u64 to;
+ * u64 misc;
+ * };
+ */
+ intr_regs += ((*intr_regs) * 3) + 1;
+ }
+
+ /*
+ * First entry in the sample buffer used to specify
+ * PERF_SAMPLE_REGS_ABI_64, skip perf regs abi to access
+ * interrupt registers.
+ */
+ ++intr_regs;
+
+ return intr_regs;
+}
+
+static const int __perf_reg_mask(const char *register_name)
+{
+ if (!strcmp(register_name, "R0"))
+ return 0;
+ else if (!strcmp(register_name, "R1"))
+ return 1;
+ else if (!strcmp(register_name, "R2"))
+ return 2;
+ else if (!strcmp(register_name, "R3"))
+ return 3;
+ else if (!strcmp(register_name, "R4"))
+ return 4;
+ else if (!strcmp(register_name, "R5"))
+ return 5;
+ else if (!strcmp(register_name, "R6"))
+ return 6;
+ else if (!strcmp(register_name, "R7"))
+ return 7;
+ else if (!strcmp(register_name, "R8"))
+ return 8;
+ else if (!strcmp(register_name, "R9"))
+ return 9;
+ else if (!strcmp(register_name, "R10"))
+ return 10;
+ else if (!strcmp(register_name, "R11"))
+ return 11;
+ else if (!strcmp(register_name, "R12"))
+ return 12;
+ else if (!strcmp(register_name, "R13"))
+ return 13;
+ else if (!strcmp(register_name, "R14"))
+ return 14;
+ else if (!strcmp(register_name, "R15"))
+ return 15;
+ else if (!strcmp(register_name, "R16"))
+ return 16;
+ else if (!strcmp(register_name, "R17"))
+ return 17;
+ else if (!strcmp(register_name, "R18"))
+ return 18;
+ else if (!strcmp(register_name, "R19"))
+ return 19;
+ else if (!strcmp(register_name, "R20"))
+ return 20;
+ else if (!strcmp(register_name, "R21"))
+ return 21;
+ else if (!strcmp(register_name, "R22"))
+ return 22;
+ else if (!strcmp(register_name, "R23"))
+ return 23;
+ else if (!strcmp(register_name, "R24"))
+ return 24;
+ else if (!strcmp(register_name, "R25"))
+ return 25;
+ else if (!strcmp(register_name, "R26"))
+ return 26;
+ else if (!strcmp(register_name, "R27"))
+ return 27;
+ else if (!strcmp(register_name, "R28"))
+ return 28;
+ else if (!strcmp(register_name, "R29"))
+ return 29;
+ else if (!strcmp(register_name, "R30"))
+ return 30;
+ else if (!strcmp(register_name, "R31"))
+ return 31;
+ else if (!strcmp(register_name, "NIP"))
+ return 32;
+ else if (!strcmp(register_name, "MSR"))
+ return 33;
+ else if (!strcmp(register_name, "ORIG_R3"))
+ return 34;
+ else if (!strcmp(register_name, "CTR"))
+ return 35;
+ else if (!strcmp(register_name, "LINK"))
+ return 36;
+ else if (!strcmp(register_name, "XER"))
+ return 37;
+ else if (!strcmp(register_name, "CCR"))
+ return 38;
+ else if (!strcmp(register_name, "SOFTE"))
+ return 39;
+ else if (!strcmp(register_name, "TRAP"))
+ return 40;
+ else if (!strcmp(register_name, "DAR"))
+ return 41;
+ else if (!strcmp(register_name, "DSISR"))
+ return 42;
+ else if (!strcmp(register_name, "SIER"))
+ return 43;
+ else if (!strcmp(register_name, "MMCRA"))
+ return 44;
+ else if (!strcmp(register_name, "MMCR0"))
+ return 45;
+ else if (!strcmp(register_name, "MMCR1"))
+ return 46;
+ else if (!strcmp(register_name, "MMCR2"))
+ return 47;
+ else if (!strcmp(register_name, "MMCR3"))
+ return 48;
+ else if (!strcmp(register_name, "SIER2"))
+ return 49;
+ else if (!strcmp(register_name, "SIER3"))
+ return 50;
+ else if (!strcmp(register_name, "PMC1"))
+ return 51;
+ else if (!strcmp(register_name, "PMC2"))
+ return 52;
+ else if (!strcmp(register_name, "PMC3"))
+ return 53;
+ else if (!strcmp(register_name, "PMC4"))
+ return 54;
+ else if (!strcmp(register_name, "PMC5"))
+ return 55;
+ else if (!strcmp(register_name, "PMC6"))
+ return 56;
+ else if (!strcmp(register_name, "SDAR"))
+ return 57;
+ else if (!strcmp(register_name, "SIAR"))
+ return 58;
+ else
+ return -1;
+}
+
+u64 get_reg_value(u64 *intr_regs, char *register_name)
+{
+ int register_bit_position;
+
+ register_bit_position = __perf_reg_mask(register_name);
+
+ if (register_bit_position < 0 || (!((platform_extended_mask >>
+ (register_bit_position - 1)) & 1)))
+ return -1;
+
+ return *(intr_regs + register_bit_position);
+}
+
+int get_thresh_cmp_val(struct event event)
+{
+ int exp = 0;
+ u64 result = 0;
+ u64 value;
+
+ if (!have_hwcap2(PPC_FEATURE2_ARCH_3_1))
+ return EV_CODE_EXTRACT(event.attr.config, thd_cmp);
+
+ value = EV_CODE_EXTRACT(event.attr.config1, thd_cmp);
+
+ if (!value)
+ return value;
+
+ /*
+ * Incase of P10, thresh_cmp value is not part of raw event code
+ * and provided via attr.config1 parameter. To program threshold in MMCRA,
+ * take a 18 bit number N and shift right 2 places and increment
+ * the exponent E by 1 until the upper 10 bits of N are zero.
+ * Write E to the threshold exponent and write the lower 8 bits of N
+ * to the threshold mantissa.
+ * The max threshold that can be written is 261120.
+ */
+ if (value > 261120)
+ value = 261120;
+ while ((64 - __builtin_clzl(value)) > 8) {
+ exp++;
+ value >>= 2;
+ }
+
+ /*
+ * Note that it is invalid to write a mantissa with the
+ * upper 2 bits of mantissa being zero, unless the
+ * exponent is also zero.
+ */
+ if (!(value & 0xC0) && exp)
+ result = -1;
+ else
+ result = (exp << 8) | value;
+ return result;
+}
+
+/*
+ * Utility function to check for generic compat PMU
+ * by comparing base_platform value from auxv and real
+ * PVR value.
+ */
+static bool auxv_generic_compat_pmu(void)
+{
+ int base_pvr = 0;
+
+ if (!strcmp(auxv_base_platform(), "power9"))
+ base_pvr = POWER9;
+ else if (!strcmp(auxv_base_platform(), "power10"))
+ base_pvr = POWER10;
+
+ return (!base_pvr);
+}
+
+/*
+ * Check for generic compat PMU.
+ * First check for presence of pmu_name from
+ * "/sys/bus/event_source/devices/cpu/caps".
+ * If doesn't exist, fallback to using value
+ * auxv.
+ */
+bool check_for_generic_compat_pmu(void)
+{
+ char pmu_name[256];
+
+ memset(pmu_name, 0, sizeof(pmu_name));
+ if (read_sysfs_file("bus/event_source/devices/cpu/caps/pmu_name",
+ pmu_name, sizeof(pmu_name)) < 0)
+ return auxv_generic_compat_pmu();
+
+ if (!strcmp(pmu_name, "ISAv3"))
+ return true;
+ else
+ return false;
+}
+
+/*
+ * Check if system is booted in compat mode.
+ */
+bool check_for_compat_mode(void)
+{
+ char *platform = auxv_platform();
+ char *base_platform = auxv_base_platform();
+
+ return strcmp(platform, base_platform);
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h
new file mode 100644
index 000000000000..4181755cf5a0
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ * Copyright 2022, Madhavan Srinivasan, IBM Corp.
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <sys/stat.h>
+#include "../event.h"
+
+#define POWER10 0x80
+#define POWER9 0x4e
+#define PERF_POWER9_MASK 0x7f8ffffffffffff
+#define PERF_POWER10_MASK 0x7ffffffffffffff
+
+#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */
+#define MMCR0_PMCCEXT 0x00000200UL /* PMCCEXT control */
+#define MMCR1_RSQ 0x200000000000ULL /* radix scope qual field */
+#define BHRB_DISABLE 0x2000000000ULL /* MMCRA BHRB DISABLE bit */
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+extern int ev_mask_pmcxsel, ev_shift_pmcxsel;
+extern int ev_mask_marked, ev_shift_marked;
+extern int ev_mask_comb, ev_shift_comb;
+extern int ev_mask_unit, ev_shift_unit;
+extern int ev_mask_pmc, ev_shift_pmc;
+extern int ev_mask_cache, ev_shift_cache;
+extern int ev_mask_sample, ev_shift_sample;
+extern int ev_mask_thd_sel, ev_shift_thd_sel;
+extern int ev_mask_thd_start, ev_shift_thd_start;
+extern int ev_mask_thd_stop, ev_shift_thd_stop;
+extern int ev_mask_thd_cmp, ev_shift_thd_cmp;
+extern int ev_mask_sm, ev_shift_sm;
+extern int ev_mask_rsq, ev_shift_rsq;
+extern int ev_mask_l2l3, ev_shift_l2l3;
+extern int ev_mask_mmcr3_src, ev_shift_mmcr3_src;
+extern int pvr;
+extern u64 platform_extended_mask;
+extern int check_pvr_for_sampling_tests(void);
+extern int platform_check_for_tests(void);
+
+/*
+ * Event code field extraction macro.
+ * Raw event code is combination of multiple
+ * fields. Macro to extract individual fields
+ *
+ * x - Raw event code value
+ * y - Field to extract
+ */
+#define EV_CODE_EXTRACT(x, y) \
+ ((x >> ev_shift_##y) & ev_mask_##y)
+
+void *event_sample_buf_mmap(int fd, int mmap_pages);
+void *__event_read_samples(void *sample_buff, size_t *size, u64 *sample_count);
+int collect_samples(void *sample_buff);
+u64 *get_intr_regs(struct event *event, void *sample_buff);
+u64 get_reg_value(u64 *intr_regs, char *register_name);
+int get_thresh_cmp_val(struct event event);
+bool check_for_generic_compat_pmu(void);
+bool check_for_compat_mode(void);
+
+static inline int get_mmcr0_fc56(u64 mmcr0, int pmc)
+{
+ return (mmcr0 & MMCR0_FC56);
+}
+
+static inline int get_mmcr0_pmccext(u64 mmcr0, int pmc)
+{
+ return (mmcr0 & MMCR0_PMCCEXT);
+}
+
+static inline int get_mmcr0_pmao(u64 mmcr0, int pmc)
+{
+ return ((mmcr0 >> 7) & 0x1);
+}
+
+static inline int get_mmcr0_cc56run(u64 mmcr0, int pmc)
+{
+ return ((mmcr0 >> 8) & 0x1);
+}
+
+static inline int get_mmcr0_pmcjce(u64 mmcr0, int pmc)
+{
+ return ((mmcr0 >> 14) & 0x1);
+}
+
+static inline int get_mmcr0_pmc1ce(u64 mmcr0, int pmc)
+{
+ return ((mmcr0 >> 15) & 0x1);
+}
+
+static inline int get_mmcr0_pmae(u64 mmcr0, int pmc)
+{
+ return ((mmcr0 >> 27) & 0x1);
+}
+
+static inline int get_mmcr1_pmcxsel(u64 mmcr1, int pmc)
+{
+ return ((mmcr1 >> ((24 - (((pmc) - 1) * 8))) & 0xff));
+}
+
+static inline int get_mmcr1_unit(u64 mmcr1, int pmc)
+{
+ return ((mmcr1 >> ((60 - (4 * ((pmc) - 1))))) & 0xf);
+}
+
+static inline int get_mmcr1_comb(u64 mmcr1, int pmc)
+{
+ return ((mmcr1 >> (38 - ((pmc - 1) * 2))) & 0x3);
+}
+
+static inline int get_mmcr1_cache(u64 mmcr1, int pmc)
+{
+ return ((mmcr1 >> 46) & 0x3);
+}
+
+static inline int get_mmcr1_rsq(u64 mmcr1, int pmc)
+{
+ return mmcr1 & MMCR1_RSQ;
+}
+
+static inline int get_mmcr2_fcs(u64 mmcr2, int pmc)
+{
+ return ((mmcr2 & (1ull << (63 - (((pmc) - 1) * 9)))) >> (63 - (((pmc) - 1) * 9)));
+}
+
+static inline int get_mmcr2_fcp(u64 mmcr2, int pmc)
+{
+ return ((mmcr2 & (1ull << (62 - (((pmc) - 1) * 9)))) >> (62 - (((pmc) - 1) * 9)));
+}
+
+static inline int get_mmcr2_fcpc(u64 mmcr2, int pmc)
+{
+ return ((mmcr2 & (1ull << (61 - (((pmc) - 1) * 9)))) >> (61 - (((pmc) - 1) * 9)));
+}
+
+static inline int get_mmcr2_fcm1(u64 mmcr2, int pmc)
+{
+ return ((mmcr2 & (1ull << (60 - (((pmc) - 1) * 9)))) >> (60 - (((pmc) - 1) * 9)));
+}
+
+static inline int get_mmcr2_fcm0(u64 mmcr2, int pmc)
+{
+ return ((mmcr2 & (1ull << (59 - (((pmc) - 1) * 9)))) >> (59 - (((pmc) - 1) * 9)));
+}
+
+static inline int get_mmcr2_fcwait(u64 mmcr2, int pmc)
+{
+ return ((mmcr2 & (1ull << (58 - (((pmc) - 1) * 9)))) >> (58 - (((pmc) - 1) * 9)));
+}
+
+static inline int get_mmcr2_fch(u64 mmcr2, int pmc)
+{
+ return ((mmcr2 & (1ull << (57 - (((pmc) - 1) * 9)))) >> (57 - (((pmc) - 1) * 9)));
+}
+
+static inline int get_mmcr2_fcti(u64 mmcr2, int pmc)
+{
+ return ((mmcr2 & (1ull << (56 - (((pmc) - 1) * 9)))) >> (56 - (((pmc) - 1) * 9)));
+}
+
+static inline int get_mmcr2_fcta(u64 mmcr2, int pmc)
+{
+ return ((mmcr2 & (1ull << (55 - (((pmc) - 1) * 9)))) >> (55 - (((pmc) - 1) * 9)));
+}
+
+static inline int get_mmcr2_l2l3(u64 mmcr2, int pmc)
+{
+ if (pvr == POWER10)
+ return ((mmcr2 & 0xf8) >> 3);
+ return 0;
+}
+
+static inline int get_mmcr3_src(u64 mmcr3, int pmc)
+{
+ if (pvr != POWER10)
+ return 0;
+ return ((mmcr3 >> ((49 - (15 * ((pmc) - 1))))) & 0x7fff);
+}
+
+static inline int get_mmcra_thd_cmp(u64 mmcra, int pmc)
+{
+ if (pvr == POWER10)
+ return ((mmcra >> 45) & 0x7ff);
+ return ((mmcra >> 45) & 0x3ff);
+}
+
+static inline int get_mmcra_sm(u64 mmcra, int pmc)
+{
+ return ((mmcra >> 42) & 0x3);
+}
+
+static inline u64 get_mmcra_bhrb_disable(u64 mmcra, int pmc)
+{
+ if (pvr == POWER10)
+ return mmcra & BHRB_DISABLE;
+ return 0;
+}
+
+static inline int get_mmcra_ifm(u64 mmcra, int pmc)
+{
+ return ((mmcra >> 30) & 0x3);
+}
+
+static inline int get_mmcra_thd_sel(u64 mmcra, int pmc)
+{
+ return ((mmcra >> 16) & 0x7);
+}
+
+static inline int get_mmcra_thd_start(u64 mmcra, int pmc)
+{
+ return ((mmcra >> 12) & 0xf);
+}
+
+static inline int get_mmcra_thd_stop(u64 mmcra, int pmc)
+{
+ return ((mmcra >> 8) & 0xf);
+}
+
+static inline int get_mmcra_rand_samp_elig(u64 mmcra, int pmc)
+{
+ return ((mmcra >> 4) & 0x7);
+}
+
+static inline int get_mmcra_sample_mode(u64 mmcra, int pmc)
+{
+ return ((mmcra >> 1) & 0x3);
+}
+
+static inline int get_mmcra_marked(u64 mmcra, int pmc)
+{
+ return mmcra & 0x1;
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_cc56run_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_cc56run_test.c
new file mode 100644
index 000000000000..ae4172f83817
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_cc56run_test.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/*
+ * A perf sampling test for mmcr0
+ * field: cc56run.
+ */
+static int mmcr0_cc56run(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, 0x500fa);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that cc56run bit is set in MMCR0 */
+ FAIL_IF(!get_mmcr0_cc56run(get_reg_value(intr_regs, "MMCR0"), 5));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcr0_cc56run, "mmcr0_cc56run");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_exceptionbits_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_exceptionbits_test.c
new file mode 100644
index 000000000000..982aa56d2171
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_exceptionbits_test.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/*
+ * A perf sampling test for mmcr0
+ * fields : pmae, pmao.
+ */
+static int mmcr0_exceptionbits(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, 0x500fa);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that pmae is cleared and pmao is set in MMCR0 */
+ FAIL_IF(get_mmcr0_pmae(get_reg_value(intr_regs, "MMCR0"), 5));
+ FAIL_IF(!get_mmcr0_pmao(get_reg_value(intr_regs, "MMCR0"), 5));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcr0_exceptionbits, "mmcr0_exceptionbits");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc1ce_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc1ce_test.c
new file mode 100644
index 000000000000..1c1813c182c0
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc1ce_test.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/*
+ * A perf sampling test for mmcr0
+ * fields: fc56, pmc1ce.
+ */
+static int mmcr0_fc56_pmc1ce(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, 0x1001e);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that fc56, pmc1ce fields are set in MMCR0 */
+ FAIL_IF(!get_mmcr0_fc56(get_reg_value(intr_regs, "MMCR0"), 1));
+ FAIL_IF(!get_mmcr0_pmc1ce(get_reg_value(intr_regs, "MMCR0"), 1));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcr0_fc56_pmc1ce, "mmcr0_fc56_pmc1ce");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc56_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc56_test.c
new file mode 100644
index 000000000000..332d24b5ab9c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc56_test.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/*
+ * A perf sampling test for mmcr0
+ * fields: fc56_pmc56
+ */
+static int mmcr0_fc56_pmc56(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, 0x500fa);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that fc56 is not set in MMCR0 when using PMC5 */
+ FAIL_IF(get_mmcr0_fc56(get_reg_value(intr_regs, "MMCR0"), 5));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcr0_fc56_pmc56, "mmcr0_fc56_pmc56");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmccext_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmccext_test.c
new file mode 100644
index 000000000000..dfd186cd8eec
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmccext_test.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/*
+ * A perf sampling test for mmcr0
+ * field: pmccext
+ */
+static int mmcr0_pmccext(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, 0x4001e);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that pmccext field is set in MMCR0 */
+ FAIL_IF(!get_mmcr0_pmccext(get_reg_value(intr_regs, "MMCR0"), 4));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcr0_pmccext, "mmcr0_pmccext");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmcjce_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmcjce_test.c
new file mode 100644
index 000000000000..fdd8ed9bf725
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmcjce_test.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/*
+ * A perf sampling test for mmcr0
+ * field: pmcjce
+ */
+static int mmcr0_pmcjce(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, 0x500fa);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that pmcjce field is set in MMCR0 */
+ FAIL_IF(!get_mmcr0_pmcjce(get_reg_value(intr_regs, "MMCR0"), 5));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcr0_pmcjce, "mmcr0_pmcjce");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_comb_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_comb_test.c
new file mode 100644
index 000000000000..5aea6499ee9a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_comb_test.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+#define EventCode 0x46880
+
+extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
+
+/*
+ * A perf sampling test for mmcr1
+ * fields : comb.
+ */
+static int mmcr1_comb(void)
+{
+ struct event event;
+ u64 *intr_regs;
+ u64 dummy;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop_with_ll_sc(10000000, &dummy);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /*
+ * Verify that comb field match with
+ * corresponding event code fields
+ */
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, comb) !=
+ get_mmcr1_comb(get_reg_value(intr_regs, "MMCR1"), 4));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcr1_comb, "mmcr1_comb");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c
new file mode 100644
index 000000000000..f0c003282630
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Athira Rajeev, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+#define MALLOC_SIZE (0x10000 * 10) /* Ought to be enough .. */
+
+/* The data cache was reloaded from local core's L3 due to a demand load */
+#define EventCode 0x21c040
+
+/*
+ * A perf sampling test for mmcr1
+ * fields : pmcxsel, unit, cache.
+ */
+static int mmcr1_sel_unit_cache(void)
+{
+ struct event event;
+ u64 *intr_regs;
+ char *p;
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ p = malloc(MALLOC_SIZE);
+ FAIL_IF(!p);
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.sample_period = 1;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ event_enable(&event);
+
+ /* workload to make the event overflow */
+ for (i = 0; i < MALLOC_SIZE; i += 0x10000)
+ p[i] = i;
+
+ event_disable(&event);
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /*
+ * Verify that pmcxsel, unit and cache field of MMCR1
+ * match with corresponding event code fields
+ */
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, pmcxsel) !=
+ get_mmcr1_pmcxsel(get_reg_value(intr_regs, "MMCR1"), 1));
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, unit) !=
+ get_mmcr1_unit(get_reg_value(intr_regs, "MMCR1"), 1));
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, cache) !=
+ get_mmcr1_cache(get_reg_value(intr_regs, "MMCR1"), 1));
+
+ free(p);
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ FAIL_IF(test_harness(mmcr1_sel_unit_cache, "mmcr1_sel_unit_cache"));
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_fcs_fch_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_fcs_fch_test.c
new file mode 100644
index 000000000000..4e242fd61b25
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_fcs_fch_test.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Madhavan Srinivasan, IBM Corp.
+ */
+
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+static bool is_hv;
+
+static void sig_usr2_handler(int signum, siginfo_t *info, void *data)
+{
+ ucontext_t *uctx = data;
+
+ is_hv = !!(uctx->uc_mcontext.gp_regs[PT_MSR] & MSR_HV);
+}
+
+/*
+ * A perf sampling test for mmcr2
+ * fields : fcs, fch.
+ */
+static int mmcr2_fcs_fch(void)
+{
+ struct sigaction sigact = {
+ .sa_sigaction = sig_usr2_handler,
+ .sa_flags = SA_SIGINFO
+ };
+ struct event event;
+ u64 *intr_regs;
+
+ FAIL_IF(sigaction(SIGUSR2, &sigact, NULL));
+ FAIL_IF(kill(getpid(), SIGUSR2));
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, 0x1001e);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.exclude_kernel = 1;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /*
+ * Verify that fcs and fch field of MMCR2 match
+ * with corresponding modifier fields.
+ */
+ if (is_hv)
+ FAIL_IF(event.attr.exclude_kernel !=
+ get_mmcr2_fch(get_reg_value(intr_regs, "MMCR2"), 1));
+ else
+ FAIL_IF(event.attr.exclude_kernel !=
+ get_mmcr2_fcs(get_reg_value(intr_regs, "MMCR2"), 1));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcr2_fcs_fch, "mmcr2_fcs_fch");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_l2l3_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_l2l3_test.c
new file mode 100644
index 000000000000..ceca597016b2
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_l2l3_test.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Madhavan Srinivasan, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/* All successful D-side store dispatches for this thread */
+#define EventCode 0x010000046080
+
+#define MALLOC_SIZE (0x10000 * 10) /* Ought to be enough .. */
+
+/*
+ * A perf sampling test for mmcr2
+ * fields : l2l3
+ */
+static int mmcr2_l2l3(void)
+{
+ struct event event;
+ u64 *intr_regs;
+ char *p;
+ int i;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ p = malloc(MALLOC_SIZE);
+ FAIL_IF(!p);
+
+ for (i = 0; i < MALLOC_SIZE; i += 0x10000)
+ p[i] = i;
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /*
+ * Verify that l2l3 field of MMCR2 match with
+ * corresponding event code field
+ */
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, l2l3) !=
+ get_mmcr2_l2l3(get_reg_value(intr_regs, "MMCR2"), 4));
+
+ event_close(&event);
+ free(p);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcr2_l2l3, "mmcr2_l2l3");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr3_src_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr3_src_test.c
new file mode 100644
index 000000000000..e154e2a4cc3a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr3_src_test.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
+
+/* The data cache was reloaded from local core's L3 due to a demand load */
+#define EventCode 0x1340000001c040
+
+/*
+ * A perf sampling test for mmcr3
+ * fields.
+ */
+static int mmcr3_src(void)
+{
+ struct event event;
+ u64 *intr_regs;
+ u64 dummy;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make event overflow */
+ thirty_two_instruction_loop_with_ll_sc(1000000, &dummy);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /*
+ * Verify that src field of MMCR3 match with
+ * corresponding event code field
+ */
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, mmcr3_src) !=
+ get_mmcr3_src(get_reg_value(intr_regs, "MMCR3"), 1));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcr3_src, "mmcr3_src");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c
new file mode 100644
index 000000000000..14854694af62
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/* Instructions */
+#define EventCode 0x500fa
+
+/* ifm field for any branch mode */
+#define IFM_ANY_BRANCH 0x0
+
+/*
+ * A perf sampling test for mmcra
+ * field: ifm for bhrb any call.
+ */
+static int mmcra_bhrb_any_test(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
+ event.attr.exclude_kernel = 1;
+
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that ifm bit is set properly in MMCRA */
+ FAIL_IF(get_mmcra_ifm(get_reg_value(intr_regs, "MMCRA"), 5) != IFM_ANY_BRANCH);
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_bhrb_any_test, "mmcra_bhrb_any_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c
new file mode 100644
index 000000000000..3e08176eb7f8
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/* Instructions */
+#define EventCode 0x500fa
+
+/* ifm field for conditional branch mode */
+#define IFM_COND_BRANCH 0x3
+
+/*
+ * A perf sampling test for mmcra
+ * field: ifm for bhrb cond call.
+ */
+static int mmcra_bhrb_cond_test(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /*
+ * Check for platform support for the test.
+ * This test is only aplicable on power10
+ */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_COND;
+ event.attr.exclude_kernel = 1;
+
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that ifm bit is set properly in MMCRA */
+ FAIL_IF(get_mmcra_ifm(get_reg_value(intr_regs, "MMCRA"), 5) != IFM_COND_BRANCH);
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_bhrb_cond_test, "mmcra_bhrb_cond_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c
new file mode 100644
index 000000000000..488c865387e4
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/* Instructions */
+#define EventCode 0x500fa
+
+/*
+ * A perf sampling test for mmcra
+ * field: bhrb_disable.
+ */
+static int mmcra_bhrb_disable_no_branch_test(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /*
+ * Check for platform support for the test.
+ * This test is only aplicable on power10
+ */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.exclude_kernel = 1;
+
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that bhrb_disable bit is set in MMCRA for non-branch samples */
+ FAIL_IF(!get_mmcra_bhrb_disable(get_reg_value(intr_regs, "MMCRA"), 5));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_bhrb_disable_no_branch_test, "mmcra_bhrb_disable_no_branch_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c
new file mode 100644
index 000000000000..186a853c0f62
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(int loops);
+
+/* Instructions */
+#define EventCode 0x500fa
+
+/*
+ * A perf sampling test for mmcra
+ * field: bhrb_disable.
+ */
+static int mmcra_bhrb_disable_test(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /*
+ * Check for platform support for the test.
+ * This test is only aplicable on power10
+ */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
+ event.attr.exclude_kernel = 1;
+
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop(10000);
+
+ FAIL_IF(event_disable(&event));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that bhrb_disable bit is set in MMCRA */
+ FAIL_IF(get_mmcra_bhrb_disable(get_reg_value(intr_regs, "MMCRA"), 5));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_bhrb_disable_test, "mmcra_bhrb_disable_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c
new file mode 100644
index 000000000000..f0706730c099
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+extern void indirect_branch_loop(void);
+
+/* Instructions */
+#define EventCode 0x500fa
+
+/* ifm field for indirect branch mode */
+#define IFM_IND_BRANCH 0x2
+
+/*
+ * A perf sampling test for mmcra
+ * field: ifm for bhrb ind_call.
+ */
+static int mmcra_bhrb_ind_call_test(void)
+{
+ struct event event;
+ u64 *intr_regs;
+
+ /*
+ * Check for platform support for the test.
+ * This test is only aplicable on power10
+ */
+ SKIP_IF(check_pvr_for_sampling_tests());
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ event.attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_IND_CALL;
+ event.attr.exclude_kernel = 1;
+
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ indirect_branch_loop();
+
+ FAIL_IF(event_disable(&event));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that ifm bit is set properly in MMCRA */
+ FAIL_IF(get_mmcra_ifm(get_reg_value(intr_regs, "MMCRA"), 5) != IFM_IND_BRANCH);
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_bhrb_ind_call_test, "mmcra_bhrb_ind_call_test");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c
new file mode 100644
index 000000000000..904362f172c9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/*
+ * Primary PMU event used here is PM_MRK_INST_CMPL (0x401e0)
+ * Threshold event selection used is issue to complete for cycles
+ * Sampling criteria is Load only sampling
+ */
+#define p9_EventCode 0x13E35340401e0
+#define p10_EventCode 0x35340401e0
+
+extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
+
+/* A perf sampling test to test mmcra fields */
+static int mmcra_thresh_cmp(void)
+{
+ struct event event;
+ u64 *intr_regs;
+ u64 dummy;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Skip for comapt mode */
+ SKIP_IF(check_for_compat_mode());
+
+ /* Init the event for the sampling test */
+ if (!have_hwcap2(PPC_FEATURE2_ARCH_3_1)) {
+ event_init_sampling(&event, p9_EventCode);
+ } else {
+ event_init_sampling(&event, p10_EventCode);
+ event.attr.config1 = 1000;
+ }
+
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop_with_ll_sc(1000000, &dummy);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /* Verify that thresh cmp match with the corresponding event code fields */
+ FAIL_IF(get_thresh_cmp_val(event) !=
+ get_mmcra_thd_cmp(get_reg_value(intr_regs, "MMCRA"), 4));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ FAIL_IF(test_harness(mmcra_thresh_cmp, "mmcra_thresh_cmp"));
+}
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
new file mode 100644
index 000000000000..022cc1655eb5
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022, Kajol Jain, IBM Corp.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../event.h"
+#include "misc.h"
+#include "utils.h"
+
+/*
+ * Primary PMU event used here is PM_MRK_INST_CMPL (0x401e0)
+ * Threshold event selection used is issue to complete for cycles
+ * Sampling criteria is Load only sampling
+ */
+#define EventCode 0x35340401e0
+
+extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
+
+/* A perf sampling test to test mmcra fields */
+static int mmcra_thresh_marked_sample(void)
+{
+ struct event event;
+ u64 *intr_regs;
+ u64 dummy;
+
+ /* Check for platform support for the test */
+ SKIP_IF(check_pvr_for_sampling_tests());
+
+ /* Init the event for the sampling test */
+ event_init_sampling(&event, EventCode);
+ event.attr.sample_regs_intr = platform_extended_mask;
+ FAIL_IF(event_open(&event));
+ event.mmap_buffer = event_sample_buf_mmap(event.fd, 1);
+
+ FAIL_IF(event_enable(&event));
+
+ /* workload to make the event overflow */
+ thirty_two_instruction_loop_with_ll_sc(1000000, &dummy);
+
+ FAIL_IF(event_disable(&event));
+
+ /* Check for sample count */
+ FAIL_IF(!collect_samples(event.mmap_buffer));
+
+ intr_regs = get_intr_regs(&event, event.mmap_buffer);
+
+ /* Check for intr_regs */
+ FAIL_IF(!intr_regs);
+
+ /*
+ * Verify that thresh sel/start/stop, marked, random sample
+ * eligibility, sdar mode and sample mode fields match with
+ * the corresponding event code fields
+ */
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, thd_sel) !=
+ get_mmcra_thd_sel(get_reg_value(intr_regs, "MMCRA"), 4));
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, thd_start) !=
+ get_mmcra_thd_start(get_reg_value(intr_regs, "MMCRA"), 4));
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, thd_stop) !=
+ get_mmcra_thd_stop(get_reg_value(intr_regs, "MMCRA"), 4));
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, marked) !=
+ get_mmcra_marked(get_reg_value(intr_regs, "MMCRA"), 4));
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sample >> 2) !=
+ get_mmcra_rand_samp_elig(get_reg_value(intr_regs, "MMCRA"), 4));
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sample & 0x3) !=
+ get_mmcra_sample_mode(get_reg_value(intr_regs, "MMCRA"), 4));
+ FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sm) !=
+ get_mmcra_sm(get_reg_value(intr_regs, "MMCRA"), 4));
+
+ event_close(&event);
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(mmcra_thresh_marked_sample, "mmcra_thresh_marked_sample");
+}
diff --git a/tools/testing/selftests/powerpc/primitives/.gitignore b/tools/testing/selftests/powerpc/primitives/.gitignore
index 4cc4e31bed1d..1e5c04e24254 100644
--- a/tools/testing/selftests/powerpc/primitives/.gitignore
+++ b/tools/testing/selftests/powerpc/primitives/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
load_unaligned_zeropad
diff --git a/tools/testing/selftests/powerpc/primitives/asm/extable.h b/tools/testing/selftests/powerpc/primitives/asm/extable.h
new file mode 120000
index 000000000000..6385f059a951
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/asm/extable.h
@@ -0,0 +1 @@
+../../../../../../arch/powerpc/include/asm/extable.h \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore
index dce19f221c46..eb75e5360e31 100644
--- a/tools/testing/selftests/powerpc/ptrace/.gitignore
+++ b/tools/testing/selftests/powerpc/ptrace/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ptrace-gpr
ptrace-tm-gpr
ptrace-tm-spd-gpr
@@ -13,3 +14,4 @@ perf-hwbreak
core-pkey
ptrace-pkey
ptrace-syscall
+ptrace-perf-hwbreak
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
index 8d3f006c98cc..2f02cb54224d 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,15 +1,41 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
- ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
- ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
- perf-hwbreak ptrace-syscall
+
+TM_TESTS := ptrace-tm-gpr
+TM_TESTS += ptrace-tm-spd-gpr
+TM_TESTS += ptrace-tm-spd-tar
+TM_TESTS += ptrace-tm-spd-vsx
+TM_TESTS += ptrace-tm-spr
+TM_TESTS += ptrace-tm-tar
+TM_TESTS += ptrace-tm-vsx
+
+TESTS_64 := $(TM_TESTS)
+TESTS_64 += core-pkey
+TESTS_64 += perf-hwbreak
+TESTS_64 += ptrace-hwbreak
+TESTS_64 += ptrace-perf-hwbreak
+TESTS_64 += ptrace-pkey
+TESTS_64 += ptrace-syscall
+TESTS_64 += ptrace-tar
+TESTS_64 += ptrace-vsx
+
+TESTS += ptrace-gpr
+
+TEST_GEN_PROGS := $(TESTS) $(TESTS_64)
+
+LOCAL_HDRS += $(patsubst %,$(selfdir)/powerpc/ptrace/%,$(wildcard *.h))
top_srcdir = ../../../../..
include ../../lib.mk
-CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm -fno-pie
+TM_TESTS := $(patsubst %,$(OUTPUT)/%,$(TM_TESTS))
+TESTS_64 := $(patsubst %,$(OUTPUT)/%,$(TESTS_64))
+
+$(TESTS_64): CFLAGS += -m64
+$(TM_TESTS): CFLAGS += -I../tm -mhtm
+
+CFLAGS += -I../../../../../usr/include -fno-pie
-$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: child.h
+$(OUTPUT)/ptrace-gpr: ptrace-gpr.S
$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: LDLIBS += -pthread
-$(TEST_GEN_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
+$(TEST_GEN_PROGS): ../harness.c ../utils.c ../lib/reg.S
diff --git a/tools/testing/selftests/powerpc/ptrace/core-pkey.c b/tools/testing/selftests/powerpc/ptrace/core-pkey.c
index d5c64fee032d..bbc05ffc5860 100644
--- a/tools/testing/selftests/powerpc/ptrace/core-pkey.c
+++ b/tools/testing/selftests/powerpc/ptrace/core-pkey.c
@@ -150,7 +150,7 @@ static int child(struct shared_info *info)
printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n",
user_write, info->amr, pkey1, pkey2, pkey3);
- mtspr(SPRN_AMR, info->amr);
+ set_amr(info->amr);
/*
* We won't use pkey3. This tests whether the kernel restores the UAMOR
diff --git a/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
index c1f324afdbf3..ecde2c199f3b 100644
--- a/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
+++ b/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
@@ -21,8 +21,13 @@
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
+#include <signal.h>
#include <string.h>
#include <sys/ioctl.h>
+#include <sys/wait.h>
+#include <sys/ptrace.h>
+#include <sys/sysinfo.h>
+#include <asm/ptrace.h>
#include <elf.h>
#include <pthread.h>
#include <sys/syscall.h>
@@ -30,32 +35,130 @@
#include <linux/hw_breakpoint.h>
#include "utils.h"
+#ifndef PPC_DEBUG_FEATURE_DATA_BP_ARCH_31
+#define PPC_DEBUG_FEATURE_DATA_BP_ARCH_31 0x20
+#endif
+
#define MAX_LOOPS 10000
#define DAWR_LENGTH_MAX ((0x3f + 1) * 8)
-static inline int sys_perf_event_open(struct perf_event_attr *attr, pid_t pid,
- int cpu, int group_fd,
- unsigned long flags)
+int nprocs;
+
+static volatile int a = 10;
+static volatile int b = 10;
+static volatile char c[512 + 8] __attribute__((aligned(512)));
+
+static void perf_event_attr_set(struct perf_event_attr *attr,
+ __u32 type, __u64 addr, __u64 len,
+ bool exclude_user)
{
- attr->size = sizeof(*attr);
- return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
+ memset(attr, 0, sizeof(struct perf_event_attr));
+ attr->type = PERF_TYPE_BREAKPOINT;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->bp_type = type;
+ attr->bp_addr = addr;
+ attr->bp_len = len;
+ attr->exclude_kernel = 1;
+ attr->exclude_hv = 1;
+ attr->exclude_guest = 1;
+ attr->exclude_user = exclude_user;
+ attr->disabled = 1;
}
-static inline bool breakpoint_test(int len)
+static int
+perf_process_event_open_exclude_user(__u32 type, __u64 addr, __u64 len, bool exclude_user)
+{
+ struct perf_event_attr attr;
+
+ perf_event_attr_set(&attr, type, addr, len, exclude_user);
+ return syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
+}
+
+static int perf_process_event_open(__u32 type, __u64 addr, __u64 len)
+{
+ struct perf_event_attr attr;
+
+ perf_event_attr_set(&attr, type, addr, len, 0);
+ return syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
+}
+
+static int perf_cpu_event_open(long cpu, __u32 type, __u64 addr, __u64 len)
{
struct perf_event_attr attr;
+
+ perf_event_attr_set(&attr, type, addr, len, 0);
+ return syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
+}
+
+static void close_fds(int *fd, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ close(fd[i]);
+}
+
+static unsigned long read_fds(int *fd, int n)
+{
+ int i;
+ unsigned long c = 0;
+ unsigned long count = 0;
+ size_t res;
+
+ for (i = 0; i < n; i++) {
+ res = read(fd[i], &c, sizeof(c));
+ assert(res == sizeof(unsigned long long));
+ count += c;
+ }
+ return count;
+}
+
+static void reset_fds(int *fd, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ ioctl(fd[i], PERF_EVENT_IOC_RESET);
+}
+
+static void enable_fds(int *fd, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ ioctl(fd[i], PERF_EVENT_IOC_ENABLE);
+}
+
+static void disable_fds(int *fd, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ ioctl(fd[i], PERF_EVENT_IOC_DISABLE);
+}
+
+static int perf_systemwide_event_open(int *fd, __u32 type, __u64 addr, __u64 len)
+{
+ int i = 0;
+
+ /* Assume online processors are 0 to nprocs for simplisity */
+ for (i = 0; i < nprocs; i++) {
+ fd[i] = perf_cpu_event_open(i, type, addr, len);
+ if (fd[i] < 0) {
+ close_fds(fd, i);
+ return fd[i];
+ }
+ }
+ return 0;
+}
+
+static inline bool breakpoint_test(int len)
+{
int fd;
- /* setup counters */
- memset(&attr, 0, sizeof(attr));
- attr.disabled = 1;
- attr.type = PERF_TYPE_BREAKPOINT;
- attr.bp_type = HW_BREAKPOINT_R;
/* bp_addr can point anywhere but needs to be aligned */
- attr.bp_addr = (__u64)(&attr) & 0xfffffffffffff800;
- attr.bp_len = len;
- fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ fd = perf_process_event_open(HW_BREAKPOINT_R, (__u64)(&fd) & 0xfffffffffffff800, len);
if (fd < 0)
return false;
close(fd);
@@ -75,7 +178,6 @@ static inline bool dawr_supported(void)
static int runtestsingle(int readwriteflag, int exclude_user, int arraytest)
{
int i,j;
- struct perf_event_attr attr;
size_t res;
unsigned long long breaks, needed;
int readint;
@@ -85,6 +187,7 @@ static int runtestsingle(int readwriteflag, int exclude_user, int arraytest)
int break_fd;
int loop_num = MAX_LOOPS - (rand() % 100); /* provide some variability */
volatile int *k;
+ __u64 len;
/* align to 0x400 boundary as required by DAWR */
readintalign = (int *)(((unsigned long)readintarraybig + 0x7ff) &
@@ -94,19 +197,11 @@ static int runtestsingle(int readwriteflag, int exclude_user, int arraytest)
if (arraytest)
ptr = &readintalign[0];
- /* setup counters */
- memset(&attr, 0, sizeof(attr));
- attr.disabled = 1;
- attr.type = PERF_TYPE_BREAKPOINT;
- attr.bp_type = readwriteflag;
- attr.bp_addr = (__u64)ptr;
- attr.bp_len = sizeof(int);
- if (arraytest)
- attr.bp_len = DAWR_LENGTH_MAX;
- attr.exclude_user = exclude_user;
- break_fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ len = arraytest ? DAWR_LENGTH_MAX : sizeof(int);
+ break_fd = perf_process_event_open_exclude_user(readwriteflag, (__u64)ptr,
+ len, exclude_user);
if (break_fd < 0) {
- perror("sys_perf_event_open");
+ perror("perf_process_event_open_exclude_user");
exit(1);
}
@@ -153,7 +248,6 @@ static int runtest_dar_outside(void)
void *target;
volatile __u16 temp16;
volatile __u64 temp64;
- struct perf_event_attr attr;
int break_fd;
unsigned long long breaks;
int fail = 0;
@@ -165,21 +259,11 @@ static int runtest_dar_outside(void)
exit(EXIT_FAILURE);
}
- /* setup counters */
- memset(&attr, 0, sizeof(attr));
- attr.disabled = 1;
- attr.type = PERF_TYPE_BREAKPOINT;
- attr.exclude_kernel = 1;
- attr.exclude_hv = 1;
- attr.exclude_guest = 1;
- attr.bp_type = HW_BREAKPOINT_RW;
/* watch middle half of target array */
- attr.bp_addr = (__u64)(target + 2);
- attr.bp_len = 4;
- break_fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ break_fd = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)(target + 2), 4);
if (break_fd < 0) {
free(target);
- perror("sys_perf_event_open");
+ perror("perf_process_event_open");
exit(EXIT_FAILURE);
}
@@ -263,11 +347,467 @@ static int runtest_dar_outside(void)
return fail;
}
+static void multi_dawr_workload(void)
+{
+ a += 10;
+ b += 10;
+ c[512 + 1] += 'a';
+}
+
+static int test_process_multi_diff_addr(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int fd1, fd2;
+ char *desc = "Process specific, Two events, diff addr";
+ size_t res;
+
+ fd1 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (fd1 < 0) {
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ fd2 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&b, (__u64)sizeof(b));
+ if (fd2 < 0) {
+ close(fd1);
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ioctl(fd1, PERF_EVENT_IOC_RESET);
+ ioctl(fd2, PERF_EVENT_IOC_RESET);
+ ioctl(fd1, PERF_EVENT_IOC_ENABLE);
+ ioctl(fd2, PERF_EVENT_IOC_ENABLE);
+ multi_dawr_workload();
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE);
+
+ res = read(fd1, &breaks1, sizeof(breaks1));
+ assert(res == sizeof(unsigned long long));
+ res = read(fd2, &breaks2, sizeof(breaks2));
+ assert(res == sizeof(unsigned long long));
+
+ close(fd1);
+ close(fd2);
+
+ if (breaks1 != 2 || breaks2 != 2) {
+ printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_process_multi_same_addr(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int fd1, fd2;
+ char *desc = "Process specific, Two events, same addr";
+ size_t res;
+
+ fd1 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (fd1 < 0) {
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ fd2 = perf_process_event_open(HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (fd2 < 0) {
+ close(fd1);
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ioctl(fd1, PERF_EVENT_IOC_RESET);
+ ioctl(fd2, PERF_EVENT_IOC_RESET);
+ ioctl(fd1, PERF_EVENT_IOC_ENABLE);
+ ioctl(fd2, PERF_EVENT_IOC_ENABLE);
+ multi_dawr_workload();
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE);
+
+ res = read(fd1, &breaks1, sizeof(breaks1));
+ assert(res == sizeof(unsigned long long));
+ res = read(fd2, &breaks2, sizeof(breaks2));
+ assert(res == sizeof(unsigned long long));
+
+ close(fd1);
+ close(fd2);
+
+ if (breaks1 != 2 || breaks2 != 2) {
+ printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_process_multi_diff_addr_ro_wo(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int fd1, fd2;
+ char *desc = "Process specific, Two events, diff addr, one is RO, other is WO";
+ size_t res;
+
+ fd1 = perf_process_event_open(HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
+ if (fd1 < 0) {
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ fd2 = perf_process_event_open(HW_BREAKPOINT_R, (__u64)&b, (__u64)sizeof(b));
+ if (fd2 < 0) {
+ close(fd1);
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ioctl(fd1, PERF_EVENT_IOC_RESET);
+ ioctl(fd2, PERF_EVENT_IOC_RESET);
+ ioctl(fd1, PERF_EVENT_IOC_ENABLE);
+ ioctl(fd2, PERF_EVENT_IOC_ENABLE);
+ multi_dawr_workload();
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE);
+
+ res = read(fd1, &breaks1, sizeof(breaks1));
+ assert(res == sizeof(unsigned long long));
+ res = read(fd2, &breaks2, sizeof(breaks2));
+ assert(res == sizeof(unsigned long long));
+
+ close(fd1);
+ close(fd2);
+
+ if (breaks1 != 1 || breaks2 != 1) {
+ printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_process_multi_same_addr_ro_wo(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int fd1, fd2;
+ char *desc = "Process specific, Two events, same addr, one is RO, other is WO";
+ size_t res;
+
+ fd1 = perf_process_event_open(HW_BREAKPOINT_R, (__u64)&a, (__u64)sizeof(a));
+ if (fd1 < 0) {
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ fd2 = perf_process_event_open(HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
+ if (fd2 < 0) {
+ close(fd1);
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ioctl(fd1, PERF_EVENT_IOC_RESET);
+ ioctl(fd2, PERF_EVENT_IOC_RESET);
+ ioctl(fd1, PERF_EVENT_IOC_ENABLE);
+ ioctl(fd2, PERF_EVENT_IOC_ENABLE);
+ multi_dawr_workload();
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE);
+
+ res = read(fd1, &breaks1, sizeof(breaks1));
+ assert(res == sizeof(unsigned long long));
+ res = read(fd2, &breaks2, sizeof(breaks2));
+ assert(res == sizeof(unsigned long long));
+
+ close(fd1);
+ close(fd2);
+
+ if (breaks1 != 1 || breaks2 != 1) {
+ printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_syswide_multi_diff_addr(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int *fd1 = malloc(nprocs * sizeof(int));
+ int *fd2 = malloc(nprocs * sizeof(int));
+ char *desc = "Systemwide, Two events, diff addr";
+ int ret;
+
+ ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_RW, (__u64)&b, (__u64)sizeof(b));
+ if (ret) {
+ close_fds(fd1, nprocs);
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ reset_fds(fd1, nprocs);
+ reset_fds(fd2, nprocs);
+ enable_fds(fd1, nprocs);
+ enable_fds(fd2, nprocs);
+ multi_dawr_workload();
+ disable_fds(fd1, nprocs);
+ disable_fds(fd2, nprocs);
+
+ breaks1 = read_fds(fd1, nprocs);
+ breaks2 = read_fds(fd2, nprocs);
+
+ close_fds(fd1, nprocs);
+ close_fds(fd2, nprocs);
+
+ free(fd1);
+ free(fd2);
+
+ if (breaks1 != 2 || breaks2 != 2) {
+ printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_syswide_multi_same_addr(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int *fd1 = malloc(nprocs * sizeof(int));
+ int *fd2 = malloc(nprocs * sizeof(int));
+ char *desc = "Systemwide, Two events, same addr";
+ int ret;
+
+ ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_RW, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ close_fds(fd1, nprocs);
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ reset_fds(fd1, nprocs);
+ reset_fds(fd2, nprocs);
+ enable_fds(fd1, nprocs);
+ enable_fds(fd2, nprocs);
+ multi_dawr_workload();
+ disable_fds(fd1, nprocs);
+ disable_fds(fd2, nprocs);
+
+ breaks1 = read_fds(fd1, nprocs);
+ breaks2 = read_fds(fd2, nprocs);
+
+ close_fds(fd1, nprocs);
+ close_fds(fd2, nprocs);
+
+ free(fd1);
+ free(fd2);
+
+ if (breaks1 != 2 || breaks2 != 2) {
+ printf("FAILED: %s: %lld != 2 || %lld != 2\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_syswide_multi_diff_addr_ro_wo(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int *fd1 = malloc(nprocs * sizeof(int));
+ int *fd2 = malloc(nprocs * sizeof(int));
+ char *desc = "Systemwide, Two events, diff addr, one is RO, other is WO";
+ int ret;
+
+ ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_R, (__u64)&b, (__u64)sizeof(b));
+ if (ret) {
+ close_fds(fd1, nprocs);
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ reset_fds(fd1, nprocs);
+ reset_fds(fd2, nprocs);
+ enable_fds(fd1, nprocs);
+ enable_fds(fd2, nprocs);
+ multi_dawr_workload();
+ disable_fds(fd1, nprocs);
+ disable_fds(fd2, nprocs);
+
+ breaks1 = read_fds(fd1, nprocs);
+ breaks2 = read_fds(fd2, nprocs);
+
+ close_fds(fd1, nprocs);
+ close_fds(fd2, nprocs);
+
+ free(fd1);
+ free(fd2);
+
+ if (breaks1 != 1 || breaks2 != 1) {
+ printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int test_syswide_multi_same_addr_ro_wo(void)
+{
+ unsigned long long breaks1 = 0, breaks2 = 0;
+ int *fd1 = malloc(nprocs * sizeof(int));
+ int *fd2 = malloc(nprocs * sizeof(int));
+ char *desc = "Systemwide, Two events, same addr, one is RO, other is WO";
+ int ret;
+
+ ret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_R, (__u64)&a, (__u64)sizeof(a));
+ if (ret) {
+ close_fds(fd1, nprocs);
+ perror("perf_systemwide_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ reset_fds(fd1, nprocs);
+ reset_fds(fd2, nprocs);
+ enable_fds(fd1, nprocs);
+ enable_fds(fd2, nprocs);
+ multi_dawr_workload();
+ disable_fds(fd1, nprocs);
+ disable_fds(fd2, nprocs);
+
+ breaks1 = read_fds(fd1, nprocs);
+ breaks2 = read_fds(fd2, nprocs);
+
+ close_fds(fd1, nprocs);
+ close_fds(fd2, nprocs);
+
+ free(fd1);
+ free(fd2);
+
+ if (breaks1 != 1 || breaks2 != 1) {
+ printf("FAILED: %s: %lld != 1 || %lld != 1\n", desc, breaks1, breaks2);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+static int runtest_multi_dawr(void)
+{
+ int ret = 0;
+
+ ret |= test_process_multi_diff_addr();
+ ret |= test_process_multi_same_addr();
+ ret |= test_process_multi_diff_addr_ro_wo();
+ ret |= test_process_multi_same_addr_ro_wo();
+ ret |= test_syswide_multi_diff_addr();
+ ret |= test_syswide_multi_same_addr();
+ ret |= test_syswide_multi_diff_addr_ro_wo();
+ ret |= test_syswide_multi_same_addr_ro_wo();
+
+ return ret;
+}
+
+static int runtest_unaligned_512bytes(void)
+{
+ unsigned long long breaks = 0;
+ int fd;
+ char *desc = "Process specific, 512 bytes, unaligned";
+ __u64 addr = (__u64)&c + 8;
+ size_t res;
+
+ fd = perf_process_event_open(HW_BREAKPOINT_RW, addr, 512);
+ if (fd < 0) {
+ perror("perf_process_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ ioctl(fd, PERF_EVENT_IOC_RESET);
+ ioctl(fd, PERF_EVENT_IOC_ENABLE);
+ multi_dawr_workload();
+ ioctl(fd, PERF_EVENT_IOC_DISABLE);
+
+ res = read(fd, &breaks, sizeof(breaks));
+ assert(res == sizeof(unsigned long long));
+
+ close(fd);
+
+ if (breaks != 2) {
+ printf("FAILED: %s: %lld != 2\n", desc, breaks);
+ return 1;
+ }
+
+ printf("TESTED: %s\n", desc);
+ return 0;
+}
+
+/* There is no perf api to find number of available watchpoints. Use ptrace. */
+static int get_nr_wps(bool *arch_31)
+{
+ struct ppc_debug_info dbginfo;
+ int child_pid;
+
+ child_pid = fork();
+ if (!child_pid) {
+ int ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
+ if (ret) {
+ perror("PTRACE_TRACEME failed\n");
+ exit(EXIT_FAILURE);
+ }
+ kill(getpid(), SIGUSR1);
+
+ sleep(1);
+ exit(EXIT_SUCCESS);
+ }
+
+ wait(NULL);
+ if (ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, &dbginfo)) {
+ perror("Can't get breakpoint info");
+ exit(EXIT_FAILURE);
+ }
+
+ *arch_31 = !!(dbginfo.features & PPC_DEBUG_FEATURE_DATA_BP_ARCH_31);
+ return dbginfo.num_data_bps;
+}
+
static int runtest(void)
{
int rwflag;
int exclude_user;
int ret;
+ bool dawr = dawr_supported();
+ bool arch_31 = false;
+ int nr_wps = get_nr_wps(&arch_31);
/*
* perf defines rwflag as two bits read and write and at least
@@ -280,7 +820,7 @@ static int runtest(void)
return ret;
/* if we have the dawr, we can do an array test */
- if (!dawr_supported())
+ if (!dawr)
continue;
ret = runtestsingle(rwflag, exclude_user, 1);
if (ret)
@@ -289,6 +829,19 @@ static int runtest(void)
}
ret = runtest_dar_outside();
+ if (ret)
+ return ret;
+
+ if (dawr && nr_wps > 1) {
+ nprocs = get_nprocs();
+ ret = runtest_multi_dawr();
+ if (ret)
+ return ret;
+ }
+
+ if (dawr && arch_31)
+ ret = runtest_unaligned_512bytes();
+
return ret;
}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S
new file mode 100644
index 000000000000..070e8443e3cc
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * test helper assembly functions
+ *
+ * Copyright (C) 2016 Simon Guo, IBM Corporation.
+ * Copyright 2022 Michael Ellerman, IBM Corporation.
+ */
+#include "basic_asm.h"
+
+#define GPR_SIZE __SIZEOF_LONG__
+#define FIRST_GPR 14
+#define NUM_GPRS (32 - FIRST_GPR)
+#define STACK_SIZE (NUM_GPRS * GPR_SIZE)
+
+// gpr_child_loop(int *read_flag, int *write_flag,
+// unsigned long *gpr_buf, double *fpr_buf);
+FUNC_START(gpr_child_loop)
+ // r3 = read_flag
+ // r4 = write_flag
+ // r5 = gpr_buf
+ // r6 = fpr_buf
+ PUSH_BASIC_STACK(STACK_SIZE)
+
+ // Save non-volatile GPRs
+ OP_REGS PPC_STL, GPR_SIZE, FIRST_GPR, 31, %r1, STACK_FRAME_LOCAL(0, 0), FIRST_GPR
+
+ // Load GPRs with expected values
+ OP_REGS PPC_LL, GPR_SIZE, FIRST_GPR, 31, r5, 0, FIRST_GPR
+
+ // Load FPRs with expected values
+ OP_REGS lfd, 8, 0, 31, r6
+
+ // Signal to parent that we're ready
+ li r0, 1
+ stw r0, 0(r4)
+
+ // Wait for parent to finish
+1: lwz r0, 0(r3)
+ cmpwi r0, 0
+ beq 1b // Loop while flag is zero
+
+ // Save GPRs back to caller buffer
+ OP_REGS PPC_STL, GPR_SIZE, FIRST_GPR, 31, r5, 0, FIRST_GPR
+
+ // Save FPRs
+ OP_REGS stfd, 8, 0, 31, r6
+
+ // Reload non-volatile GPRs
+ OP_REGS PPC_LL, GPR_SIZE, FIRST_GPR, 31, %r1, STACK_FRAME_LOCAL(0, 0), FIRST_GPR
+
+ POP_BASIC_STACK(STACK_SIZE)
+ blr
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
index 17cd480c8780..9ed87d297799 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
@@ -7,72 +7,127 @@
#include "ptrace.h"
#include "ptrace-gpr.h"
#include "reg.h"
+#include <time.h>
/* Tracer and Tracee Shared Data */
int shm_id;
int *cptr, *pptr;
-float a = FPR_1;
-float b = FPR_2;
-float c = FPR_3;
+extern void gpr_child_loop(int *read_flag, int *write_flag,
+ unsigned long *gpr_buf, double *fpr_buf);
-void gpr(void)
+unsigned long child_gpr_val, parent_gpr_val;
+double child_fpr_val, parent_fpr_val;
+
+static int child(void)
{
- unsigned long gpr_buf[18];
- float fpr_buf[32];
+ unsigned long gpr_buf[32];
+ double fpr_buf[32];
+ int i;
cptr = (int *)shmat(shm_id, NULL, 0);
+ memset(gpr_buf, 0, sizeof(gpr_buf));
+ memset(fpr_buf, 0, sizeof(fpr_buf));
- asm __volatile__(
- ASM_LOAD_GPR_IMMED(gpr_1)
- ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
- :
- : [gpr_1]"i"(GPR_1), [flt_1] "b" (&a)
- : "memory", "r6", "r7", "r8", "r9", "r10",
- "r11", "r12", "r13", "r14", "r15", "r16", "r17",
- "r18", "r19", "r20", "r21", "r22", "r23", "r24",
- "r25", "r26", "r27", "r28", "r29", "r30", "r31"
- );
-
- cptr[1] = 1;
+ for (i = 0; i < 32; i++) {
+ gpr_buf[i] = child_gpr_val;
+ fpr_buf[i] = child_fpr_val;
+ }
- while (!cptr[0])
- asm volatile("" : : : "memory");
+ gpr_child_loop(&cptr[0], &cptr[1], gpr_buf, fpr_buf);
shmdt((void *)cptr);
- store_gpr(gpr_buf);
- store_fpr_single_precision(fpr_buf);
-
- if (validate_gpr(gpr_buf, GPR_3))
- exit(1);
- if (validate_fpr_float(fpr_buf, c))
- exit(1);
+ FAIL_IF(validate_gpr(gpr_buf, parent_gpr_val));
+ FAIL_IF(validate_fpr_double(fpr_buf, parent_fpr_val));
- exit(0);
+ return 0;
}
int trace_gpr(pid_t child)
{
+ __u64 tmp, fpr[32], *peeked_fprs;
unsigned long gpr[18];
- unsigned long fpr[32];
FAIL_IF(start_trace(child));
+
+ // Check child GPRs match what we expect using GETREGS
FAIL_IF(show_gpr(child, gpr));
- FAIL_IF(validate_gpr(gpr, GPR_1));
+ FAIL_IF(validate_gpr(gpr, child_gpr_val));
+
+ // Check child FPRs match what we expect using GETFPREGS
FAIL_IF(show_fpr(child, fpr));
- FAIL_IF(validate_fpr(fpr, FPR_1_REP));
- FAIL_IF(write_gpr(child, GPR_3));
- FAIL_IF(write_fpr(child, FPR_3_REP));
+ memcpy(&tmp, &child_fpr_val, sizeof(tmp));
+ FAIL_IF(validate_fpr(fpr, tmp));
+
+ // Check child FPRs match what we expect using PEEKUSR
+ peeked_fprs = peek_fprs(child);
+ FAIL_IF(!peeked_fprs);
+ FAIL_IF(validate_fpr(peeked_fprs, tmp));
+ free(peeked_fprs);
+
+ // Write child GPRs using SETREGS
+ FAIL_IF(write_gpr(child, parent_gpr_val));
+
+ // Write child FPRs using SETFPREGS
+ memcpy(&tmp, &parent_fpr_val, sizeof(tmp));
+ FAIL_IF(write_fpr(child, tmp));
+
+ // Check child FPRs match what we just set, using PEEKUSR
+ peeked_fprs = peek_fprs(child);
+ FAIL_IF(!peeked_fprs);
+ FAIL_IF(validate_fpr(peeked_fprs, tmp));
+
+ // Write child FPRs using POKEUSR
+ FAIL_IF(poke_fprs(child, (unsigned long *)peeked_fprs));
+
+ // Child will check its FPRs match before exiting
FAIL_IF(stop_trace(child));
return TEST_PASS;
}
+#ifndef __LONG_WIDTH__
+#define __LONG_WIDTH__ (sizeof(long) * 8)
+#endif
+
+static uint64_t rand_reg(void)
+{
+ uint64_t result;
+ long r;
+
+ r = random();
+
+ // Small values are typical
+ result = r & 0xffff;
+ if (r & 0x10000)
+ return result;
+
+ // Pointers tend to have high bits set
+ result |= random() << (__LONG_WIDTH__ - 31);
+ if (r & 0x100000)
+ return result;
+
+ // And sometimes we want a full 64-bit value
+ result ^= random() << 16;
+
+ return result;
+}
+
int ptrace_gpr(void)
{
- pid_t pid;
+ unsigned long seed;
int ret, status;
+ pid_t pid;
+
+ seed = getpid() ^ time(NULL);
+ printf("srand(%lu)\n", seed);
+ srand(seed);
+
+ child_gpr_val = rand_reg();
+ child_fpr_val = rand_reg();
+ parent_gpr_val = rand_reg();
+ parent_fpr_val = rand_reg();
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
pid = fork();
@@ -81,7 +136,7 @@ int ptrace_gpr(void)
return TEST_FAIL;
}
if (pid == 0)
- gpr();
+ exit(child());
if (pid) {
pptr = (int *)shmat(shm_id, NULL, 0);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
index c5cd53181e2e..a5470b88bd08 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
@@ -12,10 +12,10 @@
#define FPR_3 0.003
#define FPR_4 0.004
-#define FPR_1_REP 0x3f50624de0000000
-#define FPR_2_REP 0x3f60624de0000000
-#define FPR_3_REP 0x3f689374c0000000
-#define FPR_4_REP 0x3f70624de0000000
+#define FPR_1_REP 0x3f50624dd2f1a9fcull
+#define FPR_2_REP 0x3f60624dd2f1a9fcull
+#define FPR_3_REP 0x3f689374bc6a7efaull
+#define FPR_4_REP 0x3f70624dd2f1a9fcull
/* Buffer must have 18 elements */
int validate_gpr(unsigned long *gpr, unsigned long val)
@@ -36,13 +36,13 @@ int validate_gpr(unsigned long *gpr, unsigned long val)
}
/* Buffer must have 32 elements */
-int validate_fpr(unsigned long *fpr, unsigned long val)
+int validate_fpr(__u64 *fpr, __u64 val)
{
int i, found = 1;
for (i = 0; i < 32; i++) {
if (fpr[i] != val) {
- printf("FPR[%d]: %lx Expected: %lx\n", i, fpr[i], val);
+ printf("FPR[%d]: %llx Expected: %llx\n", i, fpr[i], val);
found = 0;
}
}
@@ -53,7 +53,7 @@ int validate_fpr(unsigned long *fpr, unsigned long val)
}
/* Buffer must have 32 elements */
-int validate_fpr_float(float *fpr, float val)
+int validate_fpr_double(double *fpr, double val)
{
int i, found = 1;
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
index fc477dfe86a2..a0635a3819aa 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
@@ -20,6 +20,8 @@
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <sys/syscall.h>
+#include <linux/limits.h>
#include "ptrace.h"
#define SPRN_PVR 0x11F
@@ -44,6 +46,7 @@ struct gstruct {
};
static volatile struct gstruct gstruct __attribute__((aligned(512)));
+static volatile char cwd[PATH_MAX] __attribute__((aligned(8)));
static void get_dbginfo(pid_t child_pid, struct ppc_debug_info *dbginfo)
{
@@ -138,6 +141,9 @@ static void test_workload(void)
write_var(len);
}
+ /* PTRACE_SET_DEBUGREG, Kernel Access Userspace test */
+ syscall(__NR_getcwd, &cwd, PATH_MAX);
+
/* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, WO test */
write_var(1);
@@ -150,6 +156,9 @@ static void test_workload(void)
else
read_var(1);
+ /* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, Kernel Access Userspace test */
+ syscall(__NR_getcwd, &cwd, PATH_MAX);
+
/* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, WO test */
gstruct.a[rand() % A_LEN] = 'a';
@@ -185,6 +194,18 @@ static void test_workload(void)
big_var[rand() % DAWR_MAX_LEN] = 'a';
else
cvar = big_var[rand() % DAWR_MAX_LEN];
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW ALIGNED, WO test */
+ gstruct.a[rand() % A_LEN] = 'a';
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW UNALIGNED, RO test */
+ cvar = gstruct.b[rand() % B_LEN];
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, WO test */
+ gstruct.a[rand() % A_LEN] = 'a';
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, RO test */
+ cvar = gstruct.a[rand() % A_LEN];
}
static void check_success(pid_t child_pid, const char *name, const char *type,
@@ -293,6 +314,24 @@ static int test_set_debugreg(pid_t child_pid)
return 0;
}
+static int test_set_debugreg_kernel_userspace(pid_t child_pid)
+{
+ unsigned long wp_addr = (unsigned long)cwd;
+ char *name = "PTRACE_SET_DEBUGREG";
+
+ /* PTRACE_SET_DEBUGREG, Kernel Access Userspace test */
+ wp_addr &= ~0x7UL;
+ wp_addr |= (1Ul << DABR_READ_SHIFT);
+ wp_addr |= (1UL << DABR_WRITE_SHIFT);
+ wp_addr |= (1UL << DABR_TRANSLATION_SHIFT);
+ ptrace_set_debugreg(child_pid, wp_addr);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "Kernel Access Userspace", wp_addr, 8);
+
+ ptrace_set_debugreg(child_pid, 0);
+ return 0;
+}
+
static void get_ppc_hw_breakpoint(struct ppc_hw_breakpoint *info, int type,
unsigned long addr, int len)
{
@@ -338,6 +377,22 @@ static void test_sethwdebug_exact(pid_t child_pid)
ptrace_delhwdebug(child_pid, wh);
}
+static void test_sethwdebug_exact_kernel_userspace(pid_t child_pid)
+{
+ struct ppc_hw_breakpoint info;
+ unsigned long wp_addr = (unsigned long)&cwd;
+ char *name = "PPC_PTRACE_SETHWDEBUG, MODE_EXACT";
+ int len = 1; /* hardcoded in kernel */
+ int wh;
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, Kernel Access Userspace test */
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr, 0);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "Kernel Access Userspace", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
+}
+
static void test_sethwdebug_range_aligned(pid_t child_pid)
{
struct ppc_hw_breakpoint info;
@@ -374,6 +429,69 @@ static void test_sethwdebug_range_aligned(pid_t child_pid)
ptrace_delhwdebug(child_pid, wh);
}
+static void test_multi_sethwdebug_range(pid_t child_pid)
+{
+ struct ppc_hw_breakpoint info1, info2;
+ unsigned long wp_addr1, wp_addr2;
+ char *name1 = "PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW ALIGNED";
+ char *name2 = "PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW UNALIGNED";
+ int len1, len2;
+ int wh1, wh2;
+
+ wp_addr1 = (unsigned long)&gstruct.a;
+ wp_addr2 = (unsigned long)&gstruct.b;
+ len1 = A_LEN;
+ len2 = B_LEN;
+ get_ppc_hw_breakpoint(&info1, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr1, len1);
+ get_ppc_hw_breakpoint(&info2, PPC_BREAKPOINT_TRIGGER_READ, wp_addr2, len2);
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW ALIGNED, WO test */
+ wh1 = ptrace_sethwdebug(child_pid, &info1);
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW UNALIGNED, RO test */
+ wh2 = ptrace_sethwdebug(child_pid, &info2);
+
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name1, "WO", wp_addr1, len1);
+
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name2, "RO", wp_addr2, len2);
+
+ ptrace_delhwdebug(child_pid, wh1);
+ ptrace_delhwdebug(child_pid, wh2);
+}
+
+static void test_multi_sethwdebug_range_dawr_overlap(pid_t child_pid)
+{
+ struct ppc_hw_breakpoint info1, info2;
+ unsigned long wp_addr1, wp_addr2;
+ char *name = "PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap";
+ int len1, len2;
+ int wh1, wh2;
+
+ wp_addr1 = (unsigned long)&gstruct.a;
+ wp_addr2 = (unsigned long)&gstruct.a;
+ len1 = A_LEN;
+ len2 = A_LEN;
+ get_ppc_hw_breakpoint(&info1, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr1, len1);
+ get_ppc_hw_breakpoint(&info2, PPC_BREAKPOINT_TRIGGER_READ, wp_addr2, len2);
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, WO test */
+ wh1 = ptrace_sethwdebug(child_pid, &info1);
+
+ /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DAWR Overlap, RO test */
+ wh2 = ptrace_sethwdebug(child_pid, &info2);
+
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "WO", wp_addr1, len1);
+
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RO", wp_addr2, len2);
+
+ ptrace_delhwdebug(child_pid, wh1);
+ ptrace_delhwdebug(child_pid, wh2);
+}
+
static void test_sethwdebug_range_unaligned(pid_t child_pid)
{
struct ppc_hw_breakpoint info;
@@ -452,14 +570,19 @@ static void
run_tests(pid_t child_pid, struct ppc_debug_info *dbginfo, bool dawr)
{
test_set_debugreg(child_pid);
+ test_set_debugreg_kernel_userspace(child_pid);
+ test_sethwdebug_exact(child_pid);
+ test_sethwdebug_exact_kernel_userspace(child_pid);
if (dbginfo->features & PPC_DEBUG_FEATURE_DATA_BP_RANGE) {
- test_sethwdebug_exact(child_pid);
-
test_sethwdebug_range_aligned(child_pid);
if (dawr || is_8xx) {
test_sethwdebug_range_unaligned(child_pid);
test_sethwdebug_range_unaligned_dar(child_pid);
test_sethwdebug_dawr_max_range(child_pid);
+ if (dbginfo->num_data_bps > 1) {
+ test_multi_sethwdebug_range(child_pid);
+ test_multi_sethwdebug_range_dawr_overlap(child_pid);
+ }
}
}
}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c
new file mode 100644
index 000000000000..3344e74a97b4
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
+#include <asm/unistd.h>
+#include <sys/ptrace.h>
+#include <sys/wait.h>
+#include "ptrace.h"
+
+char data[16];
+
+/* Overlapping address range */
+volatile __u64 *ptrace_data1 = (__u64 *)&data[0];
+volatile __u64 *perf_data1 = (__u64 *)&data[4];
+
+/* Non-overlapping address range */
+volatile __u64 *ptrace_data2 = (__u64 *)&data[0];
+volatile __u64 *perf_data2 = (__u64 *)&data[8];
+
+static unsigned long pid_max_addr(void)
+{
+ FILE *fp;
+ char *line, *c;
+ char addr[100];
+ size_t len = 0;
+
+ fp = fopen("/proc/kallsyms", "r");
+ if (!fp) {
+ printf("Failed to read /proc/kallsyms. Exiting..\n");
+ exit(EXIT_FAILURE);
+ }
+
+ while (getline(&line, &len, fp) != -1) {
+ if (!strstr(line, "pid_max") || strstr(line, "pid_max_max") ||
+ strstr(line, "pid_max_min"))
+ continue;
+
+ strncpy(addr, line, len < 100 ? len : 100);
+ c = strchr(addr, ' ');
+ *c = '\0';
+ return strtoul(addr, &c, 16);
+ }
+ fclose(fp);
+ printf("Could not find pix_max. Exiting..\n");
+ exit(EXIT_FAILURE);
+ return -1;
+}
+
+static void perf_user_event_attr_set(struct perf_event_attr *attr, __u64 addr, __u64 len)
+{
+ memset(attr, 0, sizeof(struct perf_event_attr));
+ attr->type = PERF_TYPE_BREAKPOINT;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->bp_type = HW_BREAKPOINT_R;
+ attr->bp_addr = addr;
+ attr->bp_len = len;
+ attr->exclude_kernel = 1;
+ attr->exclude_hv = 1;
+}
+
+static void perf_kernel_event_attr_set(struct perf_event_attr *attr)
+{
+ memset(attr, 0, sizeof(struct perf_event_attr));
+ attr->type = PERF_TYPE_BREAKPOINT;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->bp_type = HW_BREAKPOINT_R;
+ attr->bp_addr = pid_max_addr();
+ attr->bp_len = sizeof(unsigned long);
+ attr->exclude_user = 1;
+ attr->exclude_hv = 1;
+}
+
+static int perf_cpu_event_open(int cpu, __u64 addr, __u64 len)
+{
+ struct perf_event_attr attr;
+
+ perf_user_event_attr_set(&attr, addr, len);
+ return syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
+}
+
+static int perf_thread_event_open(pid_t child_pid, __u64 addr, __u64 len)
+{
+ struct perf_event_attr attr;
+
+ perf_user_event_attr_set(&attr, addr, len);
+ return syscall(__NR_perf_event_open, &attr, child_pid, -1, -1, 0);
+}
+
+static int perf_thread_cpu_event_open(pid_t child_pid, int cpu, __u64 addr, __u64 len)
+{
+ struct perf_event_attr attr;
+
+ perf_user_event_attr_set(&attr, addr, len);
+ return syscall(__NR_perf_event_open, &attr, child_pid, cpu, -1, 0);
+}
+
+static int perf_thread_kernel_event_open(pid_t child_pid)
+{
+ struct perf_event_attr attr;
+
+ perf_kernel_event_attr_set(&attr);
+ return syscall(__NR_perf_event_open, &attr, child_pid, -1, -1, 0);
+}
+
+static int perf_cpu_kernel_event_open(int cpu)
+{
+ struct perf_event_attr attr;
+
+ perf_kernel_event_attr_set(&attr);
+ return syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
+}
+
+static int child(void)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
+ if (ret) {
+ printf("Error: PTRACE_TRACEME failed\n");
+ return 0;
+ }
+ kill(getpid(), SIGUSR1); /* --> parent (SIGUSR1) */
+
+ return 0;
+}
+
+static void ptrace_ppc_hw_breakpoint(struct ppc_hw_breakpoint *info, int type,
+ __u64 addr, int len)
+{
+ info->version = 1;
+ info->trigger_type = type;
+ info->condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
+ info->addr = addr;
+ info->addr2 = addr + len;
+ info->condition_value = 0;
+ if (!len)
+ info->addr_mode = PPC_BREAKPOINT_MODE_EXACT;
+ else
+ info->addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
+}
+
+static int ptrace_open(pid_t child_pid, __u64 wp_addr, int len)
+{
+ struct ppc_hw_breakpoint info;
+
+ ptrace_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_RW, wp_addr, len);
+ return ptrace(PPC_PTRACE_SETHWDEBUG, child_pid, 0, &info);
+}
+
+static int test1(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by ptrace)
+ * if (existing cpu event by perf)
+ * if (addr range overlaps)
+ * fail;
+ */
+
+ perf_fd = perf_cpu_event_open(0, (__u64)perf_data1, sizeof(*perf_data1));
+ if (perf_fd < 0)
+ return -1;
+
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd > 0 || errno != ENOSPC)
+ ret = -1;
+
+ close(perf_fd);
+ return ret;
+}
+
+static int test2(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by ptrace)
+ * if (existing cpu event by perf)
+ * if (addr range does not overlaps)
+ * allow;
+ */
+
+ perf_fd = perf_cpu_event_open(0, (__u64)perf_data2, sizeof(*perf_data2));
+ if (perf_fd < 0)
+ return -1;
+
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data2, sizeof(*ptrace_data2));
+ if (ptrace_fd < 0) {
+ ret = -1;
+ goto perf_close;
+ }
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+
+perf_close:
+ close(perf_fd);
+ return ret;
+}
+
+static int test3(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by ptrace)
+ * if (existing thread event by perf on the same thread)
+ * if (addr range overlaps)
+ * fail;
+ */
+ perf_fd = perf_thread_event_open(child_pid, (__u64)perf_data1,
+ sizeof(*perf_data1));
+ if (perf_fd < 0)
+ return -1;
+
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd > 0 || errno != ENOSPC)
+ ret = -1;
+
+ close(perf_fd);
+ return ret;
+}
+
+static int test4(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by ptrace)
+ * if (existing thread event by perf on the same thread)
+ * if (addr range does not overlaps)
+ * fail;
+ */
+ perf_fd = perf_thread_event_open(child_pid, (__u64)perf_data2,
+ sizeof(*perf_data2));
+ if (perf_fd < 0)
+ return -1;
+
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data2, sizeof(*ptrace_data2));
+ if (ptrace_fd < 0) {
+ ret = -1;
+ goto perf_close;
+ }
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+
+perf_close:
+ close(perf_fd);
+ return ret;
+}
+
+static int test5(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int cpid;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by ptrace)
+ * if (existing thread event by perf on the different thread)
+ * allow;
+ */
+ cpid = fork();
+ if (!cpid) {
+ /* Temporary Child */
+ pause();
+ exit(EXIT_SUCCESS);
+ }
+
+ perf_fd = perf_thread_event_open(cpid, (__u64)perf_data1, sizeof(*perf_data1));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto kill_child;
+ }
+
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0) {
+ ret = -1;
+ goto perf_close;
+ }
+
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+perf_close:
+ close(perf_fd);
+kill_child:
+ kill(cpid, SIGINT);
+ return ret;
+}
+
+static int test6(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread kernel event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * allow;
+ * -- OR --
+ * if (new per cpu kernel event by perf)
+ * if (existing thread event by ptrace)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_thread_kernel_event_open(child_pid);
+ if (perf_fd < 0) {
+ ret = -1;
+ goto ptrace_close;
+ }
+ close(perf_fd);
+
+ perf_fd = perf_cpu_kernel_event_open(0);
+ if (perf_fd < 0) {
+ ret = -1;
+ goto ptrace_close;
+ }
+ close(perf_fd);
+
+ptrace_close:
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test7(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range overlaps)
+ * fail;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_thread_event_open(child_pid, (__u64)perf_data1,
+ sizeof(*perf_data1));
+ if (perf_fd > 0 || errno != ENOSPC)
+ ret = -1;
+
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test8(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range does not overlaps)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data2, sizeof(*ptrace_data2));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_thread_event_open(child_pid, (__u64)perf_data2,
+ sizeof(*perf_data2));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto ptrace_close;
+ }
+ close(perf_fd);
+
+ptrace_close:
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test9(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int cpid;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread event by perf)
+ * if (existing thread event by ptrace on the other thread)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ cpid = fork();
+ if (!cpid) {
+ /* Temporary Child */
+ pause();
+ exit(EXIT_SUCCESS);
+ }
+
+ perf_fd = perf_thread_event_open(cpid, (__u64)perf_data1, sizeof(*perf_data1));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto kill_child;
+ }
+ close(perf_fd);
+
+kill_child:
+ kill(cpid, SIGINT);
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test10(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per cpu event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range overlaps)
+ * fail;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_cpu_event_open(0, (__u64)perf_data1, sizeof(*perf_data1));
+ if (perf_fd > 0 || errno != ENOSPC)
+ ret = -1;
+
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test11(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per cpu event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range does not overlap)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data2, sizeof(*ptrace_data2));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_cpu_event_open(0, (__u64)perf_data2, sizeof(*perf_data2));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto ptrace_close;
+ }
+ close(perf_fd);
+
+ptrace_close:
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test12(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread and per cpu event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range overlaps)
+ * fail;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_thread_cpu_event_open(child_pid, 0, (__u64)perf_data1, sizeof(*perf_data1));
+ if (perf_fd > 0 || errno != ENOSPC)
+ ret = -1;
+
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test13(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread and per cpu event by perf)
+ * if (existing thread event by ptrace on the same thread)
+ * if (addr range does not overlap)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data2, sizeof(*ptrace_data2));
+ if (ptrace_fd < 0)
+ return -1;
+
+ perf_fd = perf_thread_cpu_event_open(child_pid, 0, (__u64)perf_data2, sizeof(*perf_data2));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto ptrace_close;
+ }
+ close(perf_fd);
+
+ptrace_close:
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int test14(pid_t child_pid)
+{
+ int perf_fd;
+ int ptrace_fd;
+ int cpid;
+ int ret = 0;
+
+ /* Test:
+ * if (new per thread and per cpu event by perf)
+ * if (existing thread event by ptrace on the other thread)
+ * allow;
+ */
+ ptrace_fd = ptrace_open(child_pid, (__u64)ptrace_data1, sizeof(*ptrace_data1));
+ if (ptrace_fd < 0)
+ return -1;
+
+ cpid = fork();
+ if (!cpid) {
+ /* Temporary Child */
+ pause();
+ exit(EXIT_SUCCESS);
+ }
+
+ perf_fd = perf_thread_cpu_event_open(cpid, 0, (__u64)perf_data1,
+ sizeof(*perf_data1));
+ if (perf_fd < 0) {
+ ret = -1;
+ goto kill_child;
+ }
+ close(perf_fd);
+
+kill_child:
+ kill(cpid, SIGINT);
+ ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, ptrace_fd);
+ return ret;
+}
+
+static int do_test(const char *msg, int (*fun)(pid_t arg), pid_t arg)
+{
+ int ret;
+
+ ret = fun(arg);
+ if (ret)
+ printf("%s: Error\n", msg);
+ else
+ printf("%s: Ok\n", msg);
+ return ret;
+}
+
+char *desc[14] = {
+ "perf cpu event -> ptrace thread event (Overlapping)",
+ "perf cpu event -> ptrace thread event (Non-overlapping)",
+ "perf thread event -> ptrace same thread event (Overlapping)",
+ "perf thread event -> ptrace same thread event (Non-overlapping)",
+ "perf thread event -> ptrace other thread event",
+ "ptrace thread event -> perf kernel event",
+ "ptrace thread event -> perf same thread event (Overlapping)",
+ "ptrace thread event -> perf same thread event (Non-overlapping)",
+ "ptrace thread event -> perf other thread event",
+ "ptrace thread event -> perf cpu event (Overlapping)",
+ "ptrace thread event -> perf cpu event (Non-overlapping)",
+ "ptrace thread event -> perf same thread & cpu event (Overlapping)",
+ "ptrace thread event -> perf same thread & cpu event (Non-overlapping)",
+ "ptrace thread event -> perf other thread & cpu event",
+};
+
+static int test(pid_t child_pid)
+{
+ int ret = TEST_PASS;
+
+ ret |= do_test(desc[0], test1, child_pid);
+ ret |= do_test(desc[1], test2, child_pid);
+ ret |= do_test(desc[2], test3, child_pid);
+ ret |= do_test(desc[3], test4, child_pid);
+ ret |= do_test(desc[4], test5, child_pid);
+ ret |= do_test(desc[5], test6, child_pid);
+ ret |= do_test(desc[6], test7, child_pid);
+ ret |= do_test(desc[7], test8, child_pid);
+ ret |= do_test(desc[8], test9, child_pid);
+ ret |= do_test(desc[9], test10, child_pid);
+ ret |= do_test(desc[10], test11, child_pid);
+ ret |= do_test(desc[11], test12, child_pid);
+ ret |= do_test(desc[12], test13, child_pid);
+ ret |= do_test(desc[13], test14, child_pid);
+
+ return ret;
+}
+
+static void get_dbginfo(pid_t child_pid, struct ppc_debug_info *dbginfo)
+{
+ if (ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, dbginfo)) {
+ perror("Can't get breakpoint info");
+ exit(-1);
+ }
+}
+
+static int ptrace_perf_hwbreak(void)
+{
+ int ret;
+ pid_t child_pid;
+ struct ppc_debug_info dbginfo;
+
+ child_pid = fork();
+ if (!child_pid)
+ return child();
+
+ /* parent */
+ wait(NULL); /* <-- child (SIGUSR1) */
+
+ get_dbginfo(child_pid, &dbginfo);
+ SKIP_IF(dbginfo.num_data_bps <= 1);
+
+ ret = perf_cpu_event_open(0, (__u64)perf_data1, sizeof(*perf_data1));
+ SKIP_IF(ret < 0);
+ close(ret);
+
+ ret = test(child_pid);
+
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ return ret;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_perf_hwbreak, "ptrace-perf-hwbreak");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
index bdbbbe8431e0..bc454f899124 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
@@ -44,7 +44,7 @@ struct shared_info {
unsigned long amr2;
/* AMR value that ptrace should refuse to write to the child. */
- unsigned long amr3;
+ unsigned long invalid_amr;
/* IAMR value the parent expects to read from the child. */
unsigned long expected_iamr;
@@ -57,8 +57,8 @@ struct shared_info {
* (even though they're valid ones) because userspace doesn't have
* access to those registers.
*/
- unsigned long new_iamr;
- unsigned long new_uamor;
+ unsigned long invalid_iamr;
+ unsigned long invalid_uamor;
};
static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
@@ -66,11 +66,6 @@ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights)
return syscall(__NR_pkey_alloc, flags, init_access_rights);
}
-static int sys_pkey_free(int pkey)
-{
- return syscall(__NR_pkey_free, pkey);
-}
-
static int child(struct shared_info *info)
{
unsigned long reg;
@@ -100,33 +95,37 @@ static int child(struct shared_info *info)
info->amr1 |= 3ul << pkeyshift(pkey1);
info->amr2 |= 3ul << pkeyshift(pkey2);
- info->amr3 |= info->amr2 | 3ul << pkeyshift(pkey3);
+ /*
+ * invalid amr value where we try to force write
+ * things which are deined by a uamor setting.
+ */
+ info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor);
+ /*
+ * if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr
+ */
if (disable_execute)
info->expected_iamr |= 1ul << pkeyshift(pkey1);
else
info->expected_iamr &= ~(1ul << pkeyshift(pkey1));
- info->expected_iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3));
-
- info->expected_uamor |= 3ul << pkeyshift(pkey1) |
- 3ul << pkeyshift(pkey2);
- info->new_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2);
- info->new_uamor |= 3ul << pkeyshift(pkey1);
+ /*
+ * We allocated pkey2 and pkey 3 above. Clear the IAMR bits.
+ */
+ info->expected_iamr &= ~(1ul << pkeyshift(pkey2));
+ info->expected_iamr &= ~(1ul << pkeyshift(pkey3));
/*
- * We won't use pkey3. We just want a plausible but invalid key to test
- * whether ptrace will let us write to AMR bits we are not supposed to.
- *
- * This also tests whether the kernel restores the UAMOR permissions
- * after a key is freed.
+ * Create an IAMR value different from expected value.
+ * Kernel will reject an IAMR and UAMOR change.
*/
- sys_pkey_free(pkey3);
+ info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2));
+ info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1));
printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n",
user_write, info->amr1, pkey1, pkey2, pkey3);
- mtspr(SPRN_AMR, info->amr1);
+ set_amr(info->amr1);
/* Wait for parent to read our AMR value and write a new one. */
ret = prod_parent(&info->child_sync);
@@ -196,9 +195,9 @@ static int parent(struct shared_info *info, pid_t pid)
PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync);
PARENT_FAIL_IF(ret, &info->child_sync);
- info->amr1 = info->amr2 = info->amr3 = regs[0];
- info->expected_iamr = info->new_iamr = regs[1];
- info->expected_uamor = info->new_uamor = regs[2];
+ info->amr1 = info->amr2 = regs[0];
+ info->expected_iamr = regs[1];
+ info->expected_uamor = regs[2];
/* Wake up child so that it can set itself up. */
ret = prod_child(&info->child_sync);
@@ -234,10 +233,10 @@ static int parent(struct shared_info *info, pid_t pid)
return ret;
/* Write invalid AMR value in child. */
- ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr3, 1);
+ ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1);
PARENT_FAIL_IF(ret, &info->child_sync);
- printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr3);
+ printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr);
/* Wake up child so that it can verify it didn't change. */
ret = prod_child(&info->child_sync);
@@ -249,7 +248,7 @@ static int parent(struct shared_info *info, pid_t pid)
/* Try to write to IAMR. */
regs[0] = info->amr1;
- regs[1] = info->new_iamr;
+ regs[1] = info->invalid_iamr;
ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2);
PARENT_FAIL_IF(!ret, &info->child_sync);
@@ -257,7 +256,7 @@ static int parent(struct shared_info *info, pid_t pid)
ptrace_write_running, regs[0], regs[1]);
/* Try to write to IAMR and UAMOR. */
- regs[2] = info->new_uamor;
+ regs[2] = info->invalid_uamor;
ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3);
PARENT_FAIL_IF(!ret, &info->child_sync);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
index 58cb1a860cc9..4436ca9d3caf 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
@@ -78,6 +78,9 @@ int ptrace_tar(void)
pid_t pid;
int ret, status;
+ // TAR was added in v2.07
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
+
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
pid = fork();
if (pid < 0) {
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
index 82f7bdc2e5e6..5dc152b162df 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
@@ -12,15 +12,15 @@
int shm_id;
unsigned long *cptr, *pptr;
-float a = FPR_1;
-float b = FPR_2;
-float c = FPR_3;
+double a = FPR_1;
+double b = FPR_2;
+double c = FPR_3;
void tm_gpr(void)
{
unsigned long gpr_buf[18];
unsigned long result, texasr;
- float fpr_buf[32];
+ double fpr_buf[32];
printf("Starting the child\n");
cptr = (unsigned long *)shmat(shm_id, NULL, 0);
@@ -29,12 +29,12 @@ trans:
cptr[1] = 0;
asm __volatile__(
ASM_LOAD_GPR_IMMED(gpr_1)
- ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
+ ASM_LOAD_FPR(flt_1)
"1: ;"
"tbegin.;"
"beq 2f;"
ASM_LOAD_GPR_IMMED(gpr_2)
- ASM_LOAD_FPR_SINGLE_PRECISION(flt_2)
+ ASM_LOAD_FPR(flt_2)
"tsuspend.;"
"li 7, 1;"
"stw 7, 0(%[cptr1]);"
@@ -57,7 +57,7 @@ trans:
: [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2),
[sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a),
[flt_2] "b" (&b), [cptr1] "b" (&cptr[1])
- : "memory", "r7", "r8", "r9", "r10",
+ : "memory", "r0", "r7", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15", "r16",
"r17", "r18", "r19", "r20", "r21", "r22",
"r23", "r24", "r25", "r26", "r27", "r28",
@@ -70,12 +70,12 @@ trans:
shmdt((void *)cptr);
store_gpr(gpr_buf);
- store_fpr_single_precision(fpr_buf);
+ store_fpr(fpr_buf);
if (validate_gpr(gpr_buf, GPR_3))
exit(1);
- if (validate_fpr_float(fpr_buf, c))
+ if (validate_fpr_double(fpr_buf, c))
exit(1);
exit(0);
@@ -87,7 +87,7 @@ trans:
int trace_tm_gpr(pid_t child)
{
unsigned long gpr[18];
- unsigned long fpr[32];
+ __u64 fpr[32];
FAIL_IF(start_trace(child));
FAIL_IF(show_gpr(child, gpr));
@@ -113,6 +113,7 @@ int ptrace_tm_gpr(void)
int ret, status;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
pid = fork();
if (pid < 0) {
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
index ad65be6e8e85..458cc1a70ccf 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
@@ -12,10 +12,10 @@
int shm_id;
int *cptr, *pptr;
-float a = FPR_1;
-float b = FPR_2;
-float c = FPR_3;
-float d = FPR_4;
+double a = FPR_1;
+double b = FPR_2;
+double c = FPR_3;
+double d = FPR_4;
__attribute__((used)) void wait_parent(void)
{
@@ -28,7 +28,7 @@ void tm_spd_gpr(void)
{
unsigned long gpr_buf[18];
unsigned long result, texasr;
- float fpr_buf[32];
+ double fpr_buf[32];
cptr = (int *)shmat(shm_id, NULL, 0);
@@ -36,7 +36,7 @@ trans:
cptr[2] = 0;
asm __volatile__(
ASM_LOAD_GPR_IMMED(gpr_1)
- ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
+ ASM_LOAD_FPR(flt_1)
"1: ;"
"tbegin.;"
@@ -45,7 +45,7 @@ trans:
ASM_LOAD_GPR_IMMED(gpr_2)
"tsuspend.;"
ASM_LOAD_GPR_IMMED(gpr_4)
- ASM_LOAD_FPR_SINGLE_PRECISION(flt_4)
+ ASM_LOAD_FPR(flt_4)
"bl wait_parent;"
"tresume.;"
@@ -65,7 +65,7 @@ trans:
: [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4),
[sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a),
[flt_4] "b" (&d)
- : "memory", "r5", "r6", "r7",
+ : "memory", "r0", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
@@ -77,12 +77,12 @@ trans:
shmdt((void *)cptr);
store_gpr(gpr_buf);
- store_fpr_single_precision(fpr_buf);
+ store_fpr(fpr_buf);
if (validate_gpr(gpr_buf, GPR_3))
exit(1);
- if (validate_fpr_float(fpr_buf, c))
+ if (validate_fpr_double(fpr_buf, c))
exit(1);
exit(0);
}
@@ -93,7 +93,7 @@ trans:
int trace_tm_spd_gpr(pid_t child)
{
unsigned long gpr[18];
- unsigned long fpr[32];
+ __u64 fpr[32];
FAIL_IF(start_trace(child));
FAIL_IF(show_gpr(child, gpr));
@@ -119,6 +119,7 @@ int ptrace_tm_spd_gpr(void)
int ret, status;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
pid = fork();
if (pid < 0) {
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
index 2ecfa1158e2b..e112a34fbe59 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
@@ -129,6 +129,7 @@ int ptrace_tm_spd_tar(void)
int ret, status;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
pid = fork();
if (pid == 0)
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
index 6f7fb51f0809..40133d49fe39 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
@@ -129,6 +129,7 @@ int ptrace_tm_spd_vsx(void)
int ret, status, i;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
for (i = 0; i < 128; i++) {
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
index 068bfed2e606..880ba6a29a48 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
@@ -114,6 +114,7 @@ int ptrace_tm_spr(void)
int ret, status;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
shm_id = shmget(IPC_PRIVATE, sizeof(struct shared), 0777|IPC_CREAT);
shm_id1 = shmget(IPC_PRIVATE, sizeof(int), 0777|IPC_CREAT);
pid = fork();
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
index 46ef378a15ec..d0db6df0f0ea 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
@@ -117,6 +117,7 @@ int ptrace_tm_tar(void)
int ret, status;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
pid = fork();
if (pid == 0)
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
index 70ca01234f79..4f05ce4fd282 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
@@ -113,6 +113,7 @@ int ptrace_tm_vsx(void)
int ret, status, i;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
for (i = 0; i < 128; i++) {
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
index c4fe0e893306..cb9875f764ca 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
@@ -61,6 +61,8 @@ int ptrace_vsx(void)
pid_t pid;
int ret, status, i;
+ SKIP_IF(!have_hwcap(PPC_FEATURE_HAS_VSX));
+
shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
for (i = 0; i < VEC_MAX; i++)
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace.h b/tools/testing/selftests/powerpc/ptrace/ptrace.h
index 5181ad9b4b6c..4e0233c0f2b3 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace.h
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace.h
@@ -4,6 +4,9 @@
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*/
+
+#define __SANE_USERSPACE_TYPES__
+
#include <inttypes.h>
#include <unistd.h>
#include <stdlib.h>
@@ -20,6 +23,7 @@
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/user.h>
+#include <sys/syscall.h>
#include <linux/elf.h>
#include <linux/types.h>
#include <linux/auxvec.h>
@@ -30,8 +34,8 @@
#define TEST_FAIL 1
struct fpr_regs {
- unsigned long fpr[32];
- unsigned long fpscr;
+ __u64 fpr[32];
+ __u64 fpscr;
};
struct tm_spr_regs {
@@ -318,7 +322,7 @@ fail:
}
/* FPR */
-int show_fpr(pid_t child, unsigned long *fpr)
+int show_fpr(pid_t child, __u64 *fpr)
{
struct fpr_regs *regs;
int ret, i;
@@ -337,7 +341,7 @@ int show_fpr(pid_t child, unsigned long *fpr)
return TEST_PASS;
}
-int write_fpr(pid_t child, unsigned long val)
+int write_fpr(pid_t child, __u64 val)
{
struct fpr_regs *regs;
int ret, i;
@@ -360,7 +364,7 @@ int write_fpr(pid_t child, unsigned long val)
return TEST_PASS;
}
-int show_ckpt_fpr(pid_t child, unsigned long *fpr)
+int show_ckpt_fpr(pid_t child, __u64 *fpr)
{
struct fpr_regs *regs;
struct iovec iov;
@@ -437,6 +441,70 @@ int show_gpr(pid_t child, unsigned long *gpr)
return TEST_PASS;
}
+long sys_ptrace(enum __ptrace_request request, pid_t pid, unsigned long addr, unsigned long data)
+{
+ return syscall(__NR_ptrace, request, pid, (void *)addr, data);
+}
+
+// 33 because of FPSCR
+#define PT_NUM_FPRS (33 * (sizeof(__u64) / sizeof(unsigned long)))
+
+__u64 *peek_fprs(pid_t child)
+{
+ unsigned long *fprs, *p, addr;
+ long ret;
+ int i;
+
+ fprs = malloc(sizeof(unsigned long) * PT_NUM_FPRS);
+ if (!fprs) {
+ perror("malloc() failed");
+ return NULL;
+ }
+
+ for (i = 0, p = fprs; i < PT_NUM_FPRS; i++, p++) {
+ addr = sizeof(unsigned long) * (PT_FPR0 + i);
+ ret = sys_ptrace(PTRACE_PEEKUSER, child, addr, (unsigned long)p);
+ if (ret) {
+ perror("ptrace(PTRACE_PEEKUSR) failed");
+ return NULL;
+ }
+ }
+
+ addr = sizeof(unsigned long) * (PT_FPR0 + i);
+ ret = sys_ptrace(PTRACE_PEEKUSER, child, addr, (unsigned long)&addr);
+ if (!ret) {
+ printf("ptrace(PTRACE_PEEKUSR) succeeded unexpectedly!\n");
+ return NULL;
+ }
+
+ return (__u64 *)fprs;
+}
+
+int poke_fprs(pid_t child, unsigned long *fprs)
+{
+ unsigned long *p, addr;
+ long ret;
+ int i;
+
+ for (i = 0, p = fprs; i < PT_NUM_FPRS; i++, p++) {
+ addr = sizeof(unsigned long) * (PT_FPR0 + i);
+ ret = sys_ptrace(PTRACE_POKEUSER, child, addr, *p);
+ if (ret) {
+ perror("ptrace(PTRACE_POKEUSR) failed");
+ return -1;
+ }
+ }
+
+ addr = sizeof(unsigned long) * (PT_FPR0 + i);
+ ret = sys_ptrace(PTRACE_POKEUSER, child, addr, addr);
+ if (!ret) {
+ printf("ptrace(PTRACE_POKEUSR) succeeded unexpectedly!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
int write_gpr(pid_t child, unsigned long val)
{
struct pt_regs *regs;
@@ -742,4 +810,3 @@ void analyse_texasr(unsigned long texasr)
}
void store_gpr(unsigned long *addr);
-void store_fpr(float *addr);
diff --git a/tools/testing/selftests/powerpc/security/.gitignore b/tools/testing/selftests/powerpc/security/.gitignore
index 0b969fba3beb..9357b186b13c 100644
--- a/tools/testing/selftests/powerpc/security/.gitignore
+++ b/tools/testing/selftests/powerpc/security/.gitignore
@@ -1 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
rfi_flush
+entry_flush
+spectre_v2
+uaccess_flush
diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile
index eadbbff50be6..7488315fd847 100644
--- a/tools/testing/selftests/powerpc/security/Makefile
+++ b/tools/testing/selftests/powerpc/security/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0+
-TEST_GEN_PROGS := rfi_flush spectre_v2
+TEST_GEN_PROGS := rfi_flush entry_flush uaccess_flush spectre_v2
+TEST_PROGS := mitigation-patching.sh
+
top_srcdir = ../../../../..
CFLAGS += -I../../../../../usr/include
@@ -11,3 +13,6 @@ $(TEST_GEN_PROGS): ../harness.c ../utils.c
$(OUTPUT)/spectre_v2: CFLAGS += -m64
$(OUTPUT)/spectre_v2: ../pmu/event.c branch_loops.S
+$(OUTPUT)/rfi_flush: flush_utils.c
+$(OUTPUT)/entry_flush: flush_utils.c
+$(OUTPUT)/uaccess_flush: flush_utils.c
diff --git a/tools/testing/selftests/powerpc/security/entry_flush.c b/tools/testing/selftests/powerpc/security/entry_flush.c
new file mode 100644
index 000000000000..68ce377b205e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/entry_flush.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2018 IBM Corporation.
+ */
+
+#define __SANE_USERSPACE_TYPES__
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <malloc.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include "utils.h"
+#include "flush_utils.h"
+
+int entry_flush_test(void)
+{
+ char *p;
+ int repetitions = 10;
+ int fd, passes = 0, iter, rc = 0;
+ struct perf_event_read v;
+ __u64 l1d_misses_total = 0;
+ unsigned long iterations = 100000, zero_size = 24 * 1024;
+ unsigned long l1d_misses_expected;
+ int rfi_flush_orig;
+ int entry_flush, entry_flush_orig;
+
+ SKIP_IF(geteuid() != 0);
+
+ // The PMU event we use only works on Power7 or later
+ SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
+
+ if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
+ perror("Unable to read powerpc/rfi_flush debugfs file");
+ SKIP_IF(1);
+ }
+
+ if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) {
+ perror("Unable to read powerpc/entry_flush debugfs file");
+ SKIP_IF(1);
+ }
+
+ if (rfi_flush_orig != 0) {
+ if (write_debugfs_file("powerpc/rfi_flush", 0) < 0) {
+ perror("error writing to powerpc/rfi_flush debugfs file");
+ FAIL_IF(1);
+ }
+ }
+
+ entry_flush = entry_flush_orig;
+
+ fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
+ FAIL_IF(fd < 0);
+
+ p = (char *)memalign(zero_size, CACHELINE_SIZE);
+
+ FAIL_IF(perf_event_enable(fd));
+
+ // disable L1 prefetching
+ set_dscr(1);
+
+ iter = repetitions;
+
+ /*
+ * We expect to see l1d miss for each cacheline access when entry_flush
+ * is set. Allow a small variation on this.
+ */
+ l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
+
+again:
+ FAIL_IF(perf_event_reset(fd));
+
+ syscall_loop(p, iterations, zero_size);
+
+ FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
+
+ if (entry_flush && v.l1d_misses >= l1d_misses_expected)
+ passes++;
+ else if (!entry_flush && v.l1d_misses < (l1d_misses_expected / 2))
+ passes++;
+
+ l1d_misses_total += v.l1d_misses;
+
+ while (--iter)
+ goto again;
+
+ if (passes < repetitions) {
+ printf("FAIL (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d failures]\n",
+ entry_flush, l1d_misses_total, entry_flush ? '<' : '>',
+ entry_flush ? repetitions * l1d_misses_expected :
+ repetitions * l1d_misses_expected / 2,
+ repetitions - passes, repetitions);
+ rc = 1;
+ } else {
+ printf("PASS (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d pass]\n",
+ entry_flush, l1d_misses_total, entry_flush ? '>' : '<',
+ entry_flush ? repetitions * l1d_misses_expected :
+ repetitions * l1d_misses_expected / 2,
+ passes, repetitions);
+ }
+
+ if (entry_flush == entry_flush_orig) {
+ entry_flush = !entry_flush_orig;
+ if (write_debugfs_file("powerpc/entry_flush", entry_flush) < 0) {
+ perror("error writing to powerpc/entry_flush debugfs file");
+ return 1;
+ }
+ iter = repetitions;
+ l1d_misses_total = 0;
+ passes = 0;
+ goto again;
+ }
+
+ perf_event_disable(fd);
+ close(fd);
+
+ set_dscr(0);
+
+ if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) {
+ perror("unable to restore original value of powerpc/rfi_flush debugfs file");
+ return 1;
+ }
+
+ if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) {
+ perror("unable to restore original value of powerpc/entry_flush debugfs file");
+ return 1;
+ }
+
+ return rc;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(entry_flush_test, "entry_flush_test");
+}
diff --git a/tools/testing/selftests/powerpc/security/flush_utils.c b/tools/testing/selftests/powerpc/security/flush_utils.c
new file mode 100644
index 000000000000..4d95965cb751
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/flush_utils.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2018 IBM Corporation.
+ */
+
+#define __SANE_USERSPACE_TYPES__
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <sys/utsname.h>
+#include "utils.h"
+#include "flush_utils.h"
+
+static inline __u64 load(void *addr)
+{
+ __u64 tmp;
+
+ asm volatile("ld %0,0(%1)" : "=r"(tmp) : "b"(addr));
+
+ return tmp;
+}
+
+void syscall_loop(char *p, unsigned long iterations,
+ unsigned long zero_size)
+{
+ for (unsigned long i = 0; i < iterations; i++) {
+ for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE)
+ load(p + j);
+ getppid();
+ }
+}
+
+void syscall_loop_uaccess(char *p, unsigned long iterations,
+ unsigned long zero_size)
+{
+ struct utsname utsname;
+
+ for (unsigned long i = 0; i < iterations; i++) {
+ for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE)
+ load(p + j);
+ uname(&utsname);
+ }
+}
+
+static void sigill_handler(int signr, siginfo_t *info, void *unused)
+{
+ static int warned;
+ ucontext_t *ctx = (ucontext_t *)unused;
+ unsigned long *pc = &UCONTEXT_NIA(ctx);
+
+ /* mtspr 3,RS to check for move to DSCR below */
+ if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) {
+ if (!warned++)
+ printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n");
+ *pc += 4;
+ } else {
+ printf("SIGILL at %p\n", pc);
+ abort();
+ }
+}
+
+void set_dscr(unsigned long val)
+{
+ static int init;
+ struct sigaction sa;
+
+ if (!init) {
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = sigill_handler;
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGILL, &sa, NULL))
+ perror("sigill_handler");
+ init = 1;
+ }
+
+ asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
+}
diff --git a/tools/testing/selftests/powerpc/security/flush_utils.h b/tools/testing/selftests/powerpc/security/flush_utils.h
new file mode 100644
index 000000000000..e1e68281f7ac
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/flush_utils.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Copyright 2018 IBM Corporation.
+ */
+
+#ifndef _SELFTESTS_POWERPC_SECURITY_FLUSH_UTILS_H
+#define _SELFTESTS_POWERPC_SECURITY_FLUSH_UTILS_H
+
+#define CACHELINE_SIZE 128
+
+#define PERF_L1D_READ_MISS_CONFIG ((PERF_COUNT_HW_CACHE_L1D) | \
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) | \
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16))
+
+void syscall_loop(char *p, unsigned long iterations,
+ unsigned long zero_size);
+
+void syscall_loop_uaccess(char *p, unsigned long iterations,
+ unsigned long zero_size);
+
+void set_dscr(unsigned long val);
+
+#endif /* _SELFTESTS_POWERPC_SECURITY_FLUSH_UTILS_H */
diff --git a/tools/testing/selftests/powerpc/security/mitigation-patching.sh b/tools/testing/selftests/powerpc/security/mitigation-patching.sh
new file mode 100755
index 000000000000..f43aa4b77fba
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/mitigation-patching.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+TIMEOUT=10
+
+function do_one
+{
+ local mitigation="$1"
+ local orig
+ local start
+ local now
+
+ orig=$(cat "$mitigation")
+
+ start=$(date +%s)
+ now=$start
+
+ while [[ $((now-start)) -lt "$TIMEOUT" ]]
+ do
+ echo 0 > "$mitigation"
+ echo 1 > "$mitigation"
+
+ now=$(date +%s)
+ done
+
+ echo "$orig" > "$mitigation"
+}
+
+rc=0
+cd /sys/kernel/debug/powerpc || rc=1
+if [[ "$rc" -ne 0 ]]; then
+ echo "Error: couldn't cd to /sys/kernel/debug/powerpc" >&2
+ exit 1
+fi
+
+tainted=$(cat /proc/sys/kernel/tainted)
+if [[ "$tainted" -ne 0 ]]; then
+ echo "Error: kernel already tainted!" >&2
+ exit 1
+fi
+
+mitigations="barrier_nospec stf_barrier count_cache_flush rfi_flush entry_flush uaccess_flush"
+
+for m in $mitigations
+do
+ if [[ -f /sys/kernel/debug/powerpc/$m ]]
+ then
+ do_one "$m" &
+ fi
+done
+
+echo "Spawned threads enabling/disabling mitigations ..."
+
+if stress-ng > /dev/null 2>&1; then
+ stress="stress-ng"
+elif stress > /dev/null 2>&1; then
+ stress="stress"
+else
+ stress=""
+fi
+
+if [[ -n "$stress" ]]; then
+ "$stress" -m "$(nproc)" -t "$TIMEOUT" &
+ echo "Spawned VM stressors ..."
+fi
+
+echo "Waiting for timeout ..."
+wait
+
+tainted=$(cat /proc/sys/kernel/tainted)
+if [[ "$tainted" -ne 0 ]]; then
+ echo "Error: kernel became tainted!" >&2
+ exit 1
+fi
+
+echo "OK"
+exit 0
diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
index 0a7d0afb26b8..f73484a6470f 100644
--- a/tools/testing/selftests/powerpc/security/rfi_flush.c
+++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
@@ -14,32 +14,8 @@
#include <string.h>
#include <stdio.h>
#include "utils.h"
+#include "flush_utils.h"
-#define CACHELINE_SIZE 128
-
-struct perf_event_read {
- __u64 nr;
- __u64 l1d_misses;
-};
-
-static inline __u64 load(void *addr)
-{
- __u64 tmp;
-
- asm volatile("ld %0,0(%1)" : "=r"(tmp) : "b"(addr));
-
- return tmp;
-}
-
-static void syscall_loop(char *p, unsigned long iterations,
- unsigned long zero_size)
-{
- for (unsigned long i = 0; i < iterations; i++) {
- for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE)
- load(p + j);
- getppid();
- }
-}
int rfi_flush_test(void)
{
@@ -50,24 +26,42 @@ int rfi_flush_test(void)
__u64 l1d_misses_total = 0;
unsigned long iterations = 100000, zero_size = 24 * 1024;
unsigned long l1d_misses_expected;
- int rfi_flush_org, rfi_flush;
+ int rfi_flush_orig, rfi_flush;
+ int have_entry_flush, entry_flush_orig;
SKIP_IF(geteuid() != 0);
- if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_org)) {
+ // The PMU event we use only works on Power7 or later
+ SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
+
+ if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
perror("Unable to read powerpc/rfi_flush debugfs file");
SKIP_IF(1);
}
- rfi_flush = rfi_flush_org;
+ if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) {
+ have_entry_flush = 0;
+ } else {
+ have_entry_flush = 1;
+
+ if (entry_flush_orig != 0) {
+ if (write_debugfs_file("powerpc/entry_flush", 0) < 0) {
+ perror("error writing to powerpc/entry_flush debugfs file");
+ return 1;
+ }
+ }
+ }
+
+ rfi_flush = rfi_flush_orig;
- fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
+ fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
FAIL_IF(fd < 0);
p = (char *)memalign(zero_size, CACHELINE_SIZE);
FAIL_IF(perf_event_enable(fd));
+ // disable L1 prefetching
set_dscr(1);
iter = repetitions;
@@ -109,8 +103,8 @@ again:
repetitions * l1d_misses_expected / 2,
passes, repetitions);
- if (rfi_flush == rfi_flush_org) {
- rfi_flush = !rfi_flush_org;
+ if (rfi_flush == rfi_flush_orig) {
+ rfi_flush = !rfi_flush_orig;
if (write_debugfs_file("powerpc/rfi_flush", rfi_flush) < 0) {
perror("error writing to powerpc/rfi_flush debugfs file");
return 1;
@@ -126,11 +120,19 @@ again:
set_dscr(0);
- if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_org) < 0) {
+ if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) {
perror("unable to restore original value of powerpc/rfi_flush debugfs file");
return 1;
}
+ if (have_entry_flush) {
+ if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) {
+ perror("unable to restore original value of powerpc/entry_flush "
+ "debugfs file");
+ return 1;
+ }
+ }
+
return rc;
}
diff --git a/tools/testing/selftests/powerpc/security/spectre_v2.c b/tools/testing/selftests/powerpc/security/spectre_v2.c
index 8c6b982af2a8..5b2abb719ef2 100644
--- a/tools/testing/selftests/powerpc/security/spectre_v2.c
+++ b/tools/testing/selftests/powerpc/security/spectre_v2.c
@@ -125,8 +125,6 @@ static enum spectre_v2_state get_sysfs_state(void)
#define PM_BR_PRED_PCACHE 0x048a0 // P9 only
#define PM_BR_MPRED_PCACHE 0x048b0 // P9 only
-#define SPRN_PVR 287
-
int spectre_v2_test(void)
{
enum spectre_v2_state state;
@@ -134,6 +132,9 @@ int spectre_v2_test(void)
s64 miss_percent;
bool is_p9;
+ // The PMU events we use only work on Power8 or later
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
+
state = get_sysfs_state();
if (state == UNKNOWN) {
printf("Error: couldn't determine spectre_v2 mitigation state?\n");
@@ -181,8 +182,24 @@ int spectre_v2_test(void)
case COUNT_CACHE_FLUSH_HW:
// These should all not affect userspace branch prediction
if (miss_percent > 15) {
+ if (miss_percent > 95) {
+ /*
+ * Such a mismatch may be caused by a system being unaware
+ * the count cache is disabled. This may be to enable
+ * guest migration between hosts with different settings.
+ * Return skip code to avoid detecting this as an error.
+ * We are not vulnerable and reporting otherwise, so
+ * missing such a mismatch is safe.
+ */
+ printf("Branch misses > 95%% unexpected in this configuration.\n");
+ printf("Count cache likely disabled without Linux knowing.\n");
+ if (state == COUNT_CACHE_FLUSH_SW)
+ printf("WARNING: Kernel performing unnecessary flushes.\n");
+ return 4;
+ }
printf("Branch misses > 15%% unexpected in this configuration!\n");
- printf("Possible mis-match between reported & actual mitigation\n");
+ printf("Possible mismatch between reported & actual mitigation\n");
+
return 1;
}
break;
@@ -190,14 +207,14 @@ int spectre_v2_test(void)
// This seems to affect userspace branch prediction a bit?
if (miss_percent > 25) {
printf("Branch misses > 25%% unexpected in this configuration!\n");
- printf("Possible mis-match between reported & actual mitigation\n");
+ printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
break;
case COUNT_CACHE_DISABLED:
if (miss_percent < 95) {
- printf("Branch misses < 20%% unexpected in this configuration!\n");
- printf("Possible mis-match between reported & actual mitigation\n");
+ printf("Branch misses < 95%% unexpected in this configuration!\n");
+ printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
break;
diff --git a/tools/testing/selftests/powerpc/security/uaccess_flush.c b/tools/testing/selftests/powerpc/security/uaccess_flush.c
new file mode 100644
index 000000000000..cf80f960e38a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/uaccess_flush.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2018 IBM Corporation.
+ * Copyright 2020 Canonical Ltd.
+ */
+
+#define __SANE_USERSPACE_TYPES__
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <malloc.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include "utils.h"
+#include "flush_utils.h"
+
+int uaccess_flush_test(void)
+{
+ char *p;
+ int repetitions = 10;
+ int fd, passes = 0, iter, rc = 0;
+ struct perf_event_read v;
+ __u64 l1d_misses_total = 0;
+ unsigned long iterations = 100000, zero_size = 24 * 1024;
+ unsigned long l1d_misses_expected;
+ int rfi_flush_orig;
+ int entry_flush_orig;
+ int uaccess_flush, uaccess_flush_orig;
+
+ SKIP_IF(geteuid() != 0);
+
+ // The PMU event we use only works on Power7 or later
+ SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
+
+ if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
+ perror("Unable to read powerpc/rfi_flush debugfs file");
+ SKIP_IF(1);
+ }
+
+ if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) {
+ perror("Unable to read powerpc/entry_flush debugfs file");
+ SKIP_IF(1);
+ }
+
+ if (read_debugfs_file("powerpc/uaccess_flush", &uaccess_flush_orig) < 0) {
+ perror("Unable to read powerpc/entry_flush debugfs file");
+ SKIP_IF(1);
+ }
+
+ if (rfi_flush_orig != 0) {
+ if (write_debugfs_file("powerpc/rfi_flush", 0) < 0) {
+ perror("error writing to powerpc/rfi_flush debugfs file");
+ FAIL_IF(1);
+ }
+ }
+
+ if (entry_flush_orig != 0) {
+ if (write_debugfs_file("powerpc/entry_flush", 0) < 0) {
+ perror("error writing to powerpc/entry_flush debugfs file");
+ FAIL_IF(1);
+ }
+ }
+
+ uaccess_flush = uaccess_flush_orig;
+
+ fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
+ FAIL_IF(fd < 0);
+
+ p = (char *)memalign(zero_size, CACHELINE_SIZE);
+
+ FAIL_IF(perf_event_enable(fd));
+
+ // disable L1 prefetching
+ set_dscr(1);
+
+ iter = repetitions;
+
+ /*
+ * We expect to see l1d miss for each cacheline access when entry_flush
+ * is set. Allow a small variation on this.
+ */
+ l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
+
+again:
+ FAIL_IF(perf_event_reset(fd));
+
+ syscall_loop_uaccess(p, iterations, zero_size);
+
+ FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
+
+ if (uaccess_flush && v.l1d_misses >= l1d_misses_expected)
+ passes++;
+ else if (!uaccess_flush && v.l1d_misses < (l1d_misses_expected / 2))
+ passes++;
+
+ l1d_misses_total += v.l1d_misses;
+
+ while (--iter)
+ goto again;
+
+ if (passes < repetitions) {
+ printf("FAIL (L1D misses with uaccess_flush=%d: %llu %c %lu) [%d/%d failures]\n",
+ uaccess_flush, l1d_misses_total, uaccess_flush ? '<' : '>',
+ uaccess_flush ? repetitions * l1d_misses_expected :
+ repetitions * l1d_misses_expected / 2,
+ repetitions - passes, repetitions);
+ rc = 1;
+ } else {
+ printf("PASS (L1D misses with uaccess_flush=%d: %llu %c %lu) [%d/%d pass]\n",
+ uaccess_flush, l1d_misses_total, uaccess_flush ? '>' : '<',
+ uaccess_flush ? repetitions * l1d_misses_expected :
+ repetitions * l1d_misses_expected / 2,
+ passes, repetitions);
+ }
+
+ if (uaccess_flush == uaccess_flush_orig) {
+ uaccess_flush = !uaccess_flush_orig;
+ if (write_debugfs_file("powerpc/uaccess_flush", uaccess_flush) < 0) {
+ perror("error writing to powerpc/uaccess_flush debugfs file");
+ return 1;
+ }
+ iter = repetitions;
+ l1d_misses_total = 0;
+ passes = 0;
+ goto again;
+ }
+
+ perf_event_disable(fd);
+ close(fd);
+
+ set_dscr(0);
+
+ if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) {
+ perror("unable to restore original value of powerpc/rfi_flush debugfs file");
+ return 1;
+ }
+
+ if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) {
+ perror("unable to restore original value of powerpc/entry_flush debugfs file");
+ return 1;
+ }
+
+ if (write_debugfs_file("powerpc/uaccess_flush", uaccess_flush_orig) < 0) {
+ perror("unable to restore original value of powerpc/uaccess_flush debugfs file");
+ return 1;
+ }
+
+ return rc;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(uaccess_flush_test, "uaccess_flush_test");
+}
diff --git a/tools/testing/selftests/powerpc/signal/.gitignore b/tools/testing/selftests/powerpc/signal/.gitignore
index dca5852a1546..9d0915777fed 100644
--- a/tools/testing/selftests/powerpc/signal/.gitignore
+++ b/tools/testing/selftests/powerpc/signal/.gitignore
@@ -1,3 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
signal
signal_tm
sigfuz
+sigreturn_vdso
+sig_sc_double_restart
+sigreturn_kernel
+sigreturn_unaligned
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile
index 113838fbbe7f..f679d260afc8 100644
--- a/tools/testing/selftests/powerpc/signal/Makefile
+++ b/tools/testing/selftests/powerpc/signal/Makefile
@@ -1,10 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := signal signal_tm sigfuz
+TEST_GEN_PROGS := signal signal_tm sigfuz sigreturn_vdso sig_sc_double_restart
+TEST_GEN_PROGS += sigreturn_kernel
+TEST_GEN_PROGS += sigreturn_unaligned
CFLAGS += -maltivec
$(OUTPUT)/signal_tm: CFLAGS += -mhtm
$(OUTPUT)/sigfuz: CFLAGS += -pthread -m64
+TEST_FILES := settings
+
top_srcdir = ../../../../..
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/signal/settings b/tools/testing/selftests/powerpc/signal/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c b/tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c
new file mode 100644
index 000000000000..e3972264615b
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test that a syscall does not get restarted twice, handled by trap_norestart()
+ *
+ * Based on Al's description, and a test for the bug fixed in this commit:
+ *
+ * commit 9a81c16b527528ad307843be5571111aa8d35a80
+ * Author: Al Viro <viro@zeniv.linux.org.uk>
+ * Date: Mon Sep 20 21:48:57 2010 +0100
+ *
+ * powerpc: fix double syscall restarts
+ *
+ * Make sigreturn zero regs->trap, make do_signal() do the same on all
+ * paths. As it is, signal interrupting e.g. read() from fd 512 (==
+ * ERESTARTSYS) with another signal getting unblocked when the first
+ * handler finishes will lead to restart one insn earlier than it ought
+ * to. Same for multiple signals with in-kernel handlers interrupting
+ * that sucker at the same time. Same for multiple signals of any kind
+ * interrupting that sucker on 64bit...
+ */
+#define _GNU_SOURCE
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <signal.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "utils.h"
+
+static void SIGUSR1_handler(int sig)
+{
+ kill(getpid(), SIGUSR2);
+ /*
+ * SIGUSR2 is blocked until the handler exits, at which point it will
+ * be raised again and think there is a restart to be done because the
+ * pending restarted syscall has 512 (ERESTARTSYS) in r3. The second
+ * restart will retreat NIP another 4 bytes to fail case branch.
+ */
+}
+
+static void SIGUSR2_handler(int sig)
+{
+}
+
+static ssize_t raw_read(int fd, void *buf, size_t count)
+{
+ register long nr asm("r0") = __NR_read;
+ register long _fd asm("r3") = fd;
+ register void *_buf asm("r4") = buf;
+ register size_t _count asm("r5") = count;
+
+ asm volatile(
+" b 0f \n"
+" b 1f \n"
+" 0: sc 0 \n"
+" bns 2f \n"
+" neg %0,%0 \n"
+" b 2f \n"
+" 1: \n"
+" li %0,%4 \n"
+" 2: \n"
+ : "+r"(_fd), "+r"(nr), "+r"(_buf), "+r"(_count)
+ : "i"(-ENOANO)
+ : "memory", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "cr0");
+
+ if (_fd < 0) {
+ errno = -_fd;
+ _fd = -1;
+ }
+
+ return _fd;
+}
+
+#define DATA "test 123"
+#define DLEN (strlen(DATA)+1)
+
+int test_restart(void)
+{
+ int pipefd[2];
+ pid_t pid;
+ char buf[512];
+
+ if (pipe(pipefd) == -1) {
+ perror("pipe");
+ exit(EXIT_FAILURE);
+ }
+
+ pid = fork();
+ if (pid == -1) {
+ perror("fork");
+ exit(EXIT_FAILURE);
+ }
+
+ if (pid == 0) { /* Child reads from pipe */
+ struct sigaction act;
+ int fd;
+
+ memset(&act, 0, sizeof(act));
+ sigaddset(&act.sa_mask, SIGUSR2);
+ act.sa_handler = SIGUSR1_handler;
+ act.sa_flags = SA_RESTART;
+ if (sigaction(SIGUSR1, &act, NULL) == -1) {
+ perror("sigaction");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = SIGUSR2_handler;
+ act.sa_flags = SA_RESTART;
+ if (sigaction(SIGUSR2, &act, NULL) == -1) {
+ perror("sigaction");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Let's get ERESTARTSYS into r3 */
+ while ((fd = dup(pipefd[0])) != 512) {
+ if (fd == -1) {
+ perror("dup");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (raw_read(fd, buf, 512) == -1) {
+ if (errno == ENOANO) {
+ fprintf(stderr, "Double restart moved restart before sc instruction.\n");
+ _exit(EXIT_FAILURE);
+ }
+ perror("read");
+ exit(EXIT_FAILURE);
+ }
+
+ if (strncmp(buf, DATA, DLEN)) {
+ fprintf(stderr, "bad test string %s\n", buf);
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+
+ } else {
+ int wstatus;
+
+ usleep(100000); /* Hack to get reader waiting */
+ kill(pid, SIGUSR1);
+ usleep(100000);
+ if (write(pipefd[1], DATA, DLEN) != DLEN) {
+ perror("write");
+ exit(EXIT_FAILURE);
+ }
+ close(pipefd[0]);
+ close(pipefd[1]);
+ if (wait(&wstatus) == -1) {
+ perror("wait");
+ exit(EXIT_FAILURE);
+ }
+ if (!WIFEXITED(wstatus)) {
+ fprintf(stderr, "child exited abnormally\n");
+ exit(EXIT_FAILURE);
+ }
+
+ FAIL_IF(WEXITSTATUS(wstatus) != EXIT_SUCCESS);
+
+ return 0;
+ }
+}
+
+int main(void)
+{
+ test_harness_set_timeout(10);
+ return test_harness(test_restart, "sig sys restart");
+}
diff --git a/tools/testing/selftests/powerpc/signal/signal_tm.c b/tools/testing/selftests/powerpc/signal/signal_tm.c
index 5bf2224ef7f2..c9cf66a3daa2 100644
--- a/tools/testing/selftests/powerpc/signal/signal_tm.c
+++ b/tools/testing/selftests/powerpc/signal/signal_tm.c
@@ -56,6 +56,7 @@ static int test_signal_tm()
}
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
for (i = 0; i < MAX_ATTEMPT; i++) {
/*
diff --git a/tools/testing/selftests/powerpc/signal/sigreturn_kernel.c b/tools/testing/selftests/powerpc/signal/sigreturn_kernel.c
new file mode 100644
index 000000000000..0a1b6e591eee
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/sigreturn_kernel.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test that we can't sigreturn to kernel addresses, or to kernel mode.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#define MSR_PR (1ul << 14)
+
+static volatile unsigned long long sigreturn_addr;
+static volatile unsigned long long sigreturn_msr_mask;
+
+static void sigusr1_handler(int signo, siginfo_t *si, void *uc_ptr)
+{
+ ucontext_t *uc = (ucontext_t *)uc_ptr;
+
+ if (sigreturn_addr)
+ UCONTEXT_NIA(uc) = sigreturn_addr;
+
+ if (sigreturn_msr_mask)
+ UCONTEXT_MSR(uc) &= sigreturn_msr_mask;
+}
+
+static pid_t fork_child(void)
+{
+ pid_t pid;
+
+ pid = fork();
+ if (pid == 0) {
+ raise(SIGUSR1);
+ exit(0);
+ }
+
+ return pid;
+}
+
+static int expect_segv(pid_t pid)
+{
+ int child_ret;
+
+ waitpid(pid, &child_ret, 0);
+ FAIL_IF(WIFEXITED(child_ret));
+ FAIL_IF(!WIFSIGNALED(child_ret));
+ FAIL_IF(WTERMSIG(child_ret) != 11);
+
+ return 0;
+}
+
+int test_sigreturn_kernel(void)
+{
+ struct sigaction act;
+ int child_ret, i;
+ pid_t pid;
+
+ act.sa_sigaction = sigusr1_handler;
+ act.sa_flags = SA_SIGINFO;
+ sigemptyset(&act.sa_mask);
+
+ FAIL_IF(sigaction(SIGUSR1, &act, NULL));
+
+ for (i = 0; i < 2; i++) {
+ // Return to kernel
+ sigreturn_addr = 0xcull << 60;
+ pid = fork_child();
+ expect_segv(pid);
+
+ // Return to kernel virtual
+ sigreturn_addr = 0xc008ull << 48;
+ pid = fork_child();
+ expect_segv(pid);
+
+ // Return out of range
+ sigreturn_addr = 0xc010ull << 48;
+ pid = fork_child();
+ expect_segv(pid);
+
+ // Return to no-man's land, just below PAGE_OFFSET
+ sigreturn_addr = (0xcull << 60) - (64 * 1024);
+ pid = fork_child();
+ expect_segv(pid);
+
+ // Return to no-man's land, above TASK_SIZE_4PB
+ sigreturn_addr = 0x1ull << 52;
+ pid = fork_child();
+ expect_segv(pid);
+
+ // Return to 0xd space
+ sigreturn_addr = 0xdull << 60;
+ pid = fork_child();
+ expect_segv(pid);
+
+ // Return to 0xe space
+ sigreturn_addr = 0xeull << 60;
+ pid = fork_child();
+ expect_segv(pid);
+
+ // Return to 0xf space
+ sigreturn_addr = 0xfull << 60;
+ pid = fork_child();
+ expect_segv(pid);
+
+ // Attempt to set PR=0 for 2nd loop (should be blocked by kernel)
+ sigreturn_msr_mask = ~MSR_PR;
+ }
+
+ printf("All children killed as expected\n");
+
+ // Don't change address, just MSR, should return to user as normal
+ sigreturn_addr = 0;
+ sigreturn_msr_mask = ~MSR_PR;
+ pid = fork_child();
+ waitpid(pid, &child_ret, 0);
+ FAIL_IF(!WIFEXITED(child_ret));
+ FAIL_IF(WIFSIGNALED(child_ret));
+ FAIL_IF(WEXITSTATUS(child_ret) != 0);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_sigreturn_kernel, "sigreturn_kernel");
+}
diff --git a/tools/testing/selftests/powerpc/signal/sigreturn_unaligned.c b/tools/testing/selftests/powerpc/signal/sigreturn_unaligned.c
new file mode 100644
index 000000000000..6e58ee4f0fdf
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/sigreturn_unaligned.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test sigreturn to an unaligned address, ie. low 2 bits set.
+ * Nothing bad should happen.
+ * This was able to trigger warnings with CONFIG_PPC_RFI_SRR_DEBUG=y.
+ */
+
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ucontext.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+
+static void sigusr1_handler(int signo, siginfo_t *info, void *ptr)
+{
+ ucontext_t *uc = ptr;
+
+ UCONTEXT_NIA(uc) |= 3;
+}
+
+static int test_sigreturn_unaligned(void)
+{
+ struct sigaction action;
+
+ memset(&action, 0, sizeof(action));
+ action.sa_sigaction = sigusr1_handler;
+ action.sa_flags = SA_SIGINFO;
+
+ FAIL_IF(sigaction(SIGUSR1, &action, NULL) == -1);
+
+ raise(SIGUSR1);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_sigreturn_unaligned, "sigreturn_unaligned");
+}
diff --git a/tools/testing/selftests/powerpc/signal/sigreturn_vdso.c b/tools/testing/selftests/powerpc/signal/sigreturn_vdso.c
new file mode 100644
index 000000000000..e282fff0fe25
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/sigreturn_vdso.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test that we can take signals with and without the VDSO mapped, which trigger
+ * different paths in the signal handling code.
+ *
+ * See handle_rt_signal64() and setup_trampoline() in signal_64.c
+ */
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <stdio.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+// Ensure assert() is not compiled out
+#undef NDEBUG
+#include <assert.h>
+
+#include "utils.h"
+
+static int search_proc_maps(char *needle, unsigned long *low, unsigned long *high)
+{
+ unsigned long start, end;
+ static char buf[4096];
+ char name[128];
+ FILE *f;
+ int rc = -1;
+
+ f = fopen("/proc/self/maps", "r");
+ if (!f) {
+ perror("fopen");
+ return -1;
+ }
+
+ while (fgets(buf, sizeof(buf), f)) {
+ rc = sscanf(buf, "%lx-%lx %*c%*c%*c%*c %*x %*d:%*d %*d %127s\n",
+ &start, &end, name);
+ if (rc == 2)
+ continue;
+
+ if (rc != 3) {
+ printf("sscanf errored\n");
+ rc = -1;
+ break;
+ }
+
+ if (strstr(name, needle)) {
+ *low = start;
+ *high = end - 1;
+ rc = 0;
+ break;
+ }
+ }
+
+ fclose(f);
+
+ return rc;
+}
+
+static volatile sig_atomic_t took_signal = 0;
+
+static void sigusr1_handler(int sig)
+{
+ took_signal++;
+}
+
+int test_sigreturn_vdso(void)
+{
+ unsigned long low, high, size;
+ struct sigaction act;
+ char *p;
+
+ act.sa_handler = sigusr1_handler;
+ act.sa_flags = 0;
+ sigemptyset(&act.sa_mask);
+
+ assert(sigaction(SIGUSR1, &act, NULL) == 0);
+
+ // Confirm the VDSO is mapped, and work out where it is
+ assert(search_proc_maps("[vdso]", &low, &high) == 0);
+ size = high - low + 1;
+ printf("VDSO is at 0x%lx-0x%lx (%lu bytes)\n", low, high, size);
+
+ kill(getpid(), SIGUSR1);
+ assert(took_signal == 1);
+ printf("Signal delivered OK with VDSO mapped\n");
+
+ // Remap the VDSO somewhere else
+ p = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ assert(p != MAP_FAILED);
+ assert(mremap((void *)low, size, size, MREMAP_MAYMOVE|MREMAP_FIXED, p) != MAP_FAILED);
+ assert(search_proc_maps("[vdso]", &low, &high) == 0);
+ size = high - low + 1;
+ printf("VDSO moved to 0x%lx-0x%lx (%lu bytes)\n", low, high, size);
+
+ kill(getpid(), SIGUSR1);
+ assert(took_signal == 2);
+ printf("Signal delivered OK with VDSO moved\n");
+
+ assert(munmap((void *)low, size) == 0);
+ printf("Unmapped VDSO\n");
+
+ // Confirm the VDSO is not mapped anymore
+ assert(search_proc_maps("[vdso]", &low, &high) != 0);
+
+ // Make the stack executable
+ assert(search_proc_maps("[stack]", &low, &high) == 0);
+ size = high - low + 1;
+ mprotect((void *)low, size, PROT_READ|PROT_WRITE|PROT_EXEC);
+ printf("Remapped the stack executable\n");
+
+ kill(getpid(), SIGUSR1);
+ assert(took_signal == 3);
+ printf("Signal delivered OK with VDSO unmapped\n");
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_sigreturn_vdso, "sigreturn_vdso");
+}
diff --git a/tools/testing/selftests/powerpc/stringloops/.gitignore b/tools/testing/selftests/powerpc/stringloops/.gitignore
index 31a17e0ba884..b0dfc74aa57e 100644
--- a/tools/testing/selftests/powerpc/stringloops/.gitignore
+++ b/tools/testing/selftests/powerpc/stringloops/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
memcmp_64
memcmp_32
strlen
diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile
index 7fc0623d85c3..9c39f55a58ff 100644
--- a/tools/testing/selftests/powerpc/stringloops/Makefile
+++ b/tools/testing/selftests/powerpc/stringloops/Makefile
@@ -8,7 +8,7 @@ build_32bit = $(shell if ($(CC) $(CFLAGS) -m32 -o /dev/null memcmp.c >/dev/null
TEST_GEN_PROGS := memcmp_64 strlen
-$(OUTPUT)/memcmp_64: memcmp.c
+$(OUTPUT)/memcmp_64: memcmp.c ../utils.c
$(OUTPUT)/memcmp_64: CFLAGS += -m64 -maltivec
ifeq ($(build_32bit),1)
diff --git a/tools/testing/selftests/powerpc/stringloops/memcmp.c b/tools/testing/selftests/powerpc/stringloops/memcmp.c
index b1fa7546957f..cb2f18855c8d 100644
--- a/tools/testing/selftests/powerpc/stringloops/memcmp.c
+++ b/tools/testing/selftests/powerpc/stringloops/memcmp.c
@@ -2,7 +2,9 @@
#include <malloc.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/mman.h>
#include <time.h>
+
#include "utils.h"
#define SIZE 256
@@ -13,6 +15,9 @@
#define LARGE_MAX_OFFSET 32
#define LARGE_SIZE_START 4096
+/* This is big enough to fit LARGE_SIZE and works on 4K & 64K kernels */
+#define MAP_SIZE (64 * 1024)
+
#define MAX_OFFSET_DIFF_S1_S2 48
int vmx_count;
@@ -68,25 +73,25 @@ static void test_one(char *s1, char *s2, unsigned long max_offset,
static int testcase(bool islarge)
{
- char *s1;
- char *s2;
- unsigned long i;
-
- unsigned long comp_size = (islarge ? LARGE_SIZE : SIZE);
- unsigned long alloc_size = comp_size + MAX_OFFSET_DIFF_S1_S2;
- int iterations = islarge ? LARGE_ITERATIONS : ITERATIONS;
-
- s1 = memalign(128, alloc_size);
- if (!s1) {
- perror("memalign");
- exit(1);
- }
+ unsigned long i, comp_size, alloc_size;
+ char *p, *s1, *s2;
+ int iterations;
- s2 = memalign(128, alloc_size);
- if (!s2) {
- perror("memalign");
- exit(1);
- }
+ comp_size = (islarge ? LARGE_SIZE : SIZE);
+ alloc_size = comp_size + MAX_OFFSET_DIFF_S1_S2;
+ iterations = islarge ? LARGE_ITERATIONS : ITERATIONS;
+
+ p = mmap(NULL, 4 * MAP_SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ FAIL_IF(p == MAP_FAILED);
+
+ /* Put s1/s2 at the end of a page */
+ s1 = p + MAP_SIZE - alloc_size;
+ s2 = p + 3 * MAP_SIZE - alloc_size;
+
+ /* And unmap the subsequent page to force a fault if we overread */
+ munmap(p + MAP_SIZE, MAP_SIZE);
+ munmap(p + 3 * MAP_SIZE, MAP_SIZE);
srandom(time(0));
@@ -147,6 +152,11 @@ static int testcase(bool islarge)
static int testcases(void)
{
+#ifdef __powerpc64__
+ // vcmpequd used in memcmp_64.S is v2.07
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
+#endif
+
testcase(0);
testcase(1);
return 0;
diff --git a/tools/testing/selftests/powerpc/switch_endian/.gitignore b/tools/testing/selftests/powerpc/switch_endian/.gitignore
index 89e762eab676..30e962cf84d1 100644
--- a/tools/testing/selftests/powerpc/switch_endian/.gitignore
+++ b/tools/testing/selftests/powerpc/switch_endian/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
switch_endian_test
check-reversed.S
diff --git a/tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S b/tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S
index cc4930467235..7887f78cf072 100644
--- a/tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S
+++ b/tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S
@@ -3,9 +3,13 @@
.data
.balign 8
-message:
+success_message:
.ascii "success: switch_endian_test\n\0"
+ .balign 8
+failure_message:
+ .ascii "failure: switch_endian_test\n\0"
+
.section ".toc"
.balign 8
pattern:
@@ -64,6 +68,9 @@ FUNC_START(_start)
li r0, __NR_switch_endian
sc
+ tdi 0, 0, 0x48 // b +8 if the endian was switched
+ b .Lfail // exit if endian didn't switch
+
#include "check-reversed.S"
/* Flip back, r0 already has the switch syscall number */
@@ -71,12 +78,20 @@ FUNC_START(_start)
#include "check.S"
+ ld r4, success_message@got(%r2)
+ li r5, 28 // strlen(success_message)
+ li r14, 0 // exit status
+.Lout:
li r0, __NR_write
li r3, 1 /* stdout */
- ld r4, message@got(%r2)
- li r5, 28 /* strlen(message3) */
sc
li r0, __NR_exit
- li r3, 0
+ mr r3, r14
sc
b .
+
+.Lfail:
+ ld r4, failure_message@got(%r2)
+ li r5, 28 // strlen(failure_message)
+ li r14, 1
+ b .Lout
diff --git a/tools/testing/selftests/powerpc/syscalls/.gitignore b/tools/testing/selftests/powerpc/syscalls/.gitignore
index f0f3fcc9d802..a1e19ccdef84 100644
--- a/tools/testing/selftests/powerpc/syscalls/.gitignore
+++ b/tools/testing/selftests/powerpc/syscalls/.gitignore
@@ -1 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
ipc_unmuxed
+rtas_filter
diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile
index 01b22775ca87..b63f8459c704 100644
--- a/tools/testing/selftests/powerpc/syscalls/Makefile
+++ b/tools/testing/selftests/powerpc/syscalls/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-TEST_GEN_PROGS := ipc_unmuxed
+TEST_GEN_PROGS := ipc_unmuxed rtas_filter
CFLAGS += -I../../../../../usr/include
diff --git a/tools/testing/selftests/powerpc/syscalls/rtas_filter.c b/tools/testing/selftests/powerpc/syscalls/rtas_filter.c
new file mode 100644
index 000000000000..03b487f18d00
--- /dev/null
+++ b/tools/testing/selftests/powerpc/syscalls/rtas_filter.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2005-2020 IBM Corporation.
+ *
+ * Includes code from librtas (https://github.com/ibm-power-utilities/librtas/)
+ */
+
+#include <byteswap.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+#include "utils.h"
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define cpu_to_be32(x) bswap_32(x)
+#define be32_to_cpu(x) bswap_32(x)
+#else
+#define cpu_to_be32(x) (x)
+#define be32_to_cpu(x) (x)
+#endif
+
+#define RTAS_IO_ASSERT -1098 /* Unexpected I/O Error */
+#define RTAS_UNKNOWN_OP -1099 /* No Firmware Implementation of Function */
+#define BLOCK_SIZE 4096
+#define PAGE_SIZE 4096
+#define MAX_PAGES 64
+
+static const char *ofdt_rtas_path = "/proc/device-tree/rtas";
+
+typedef __be32 uint32_t;
+struct rtas_args {
+ __be32 token;
+ __be32 nargs;
+ __be32 nret;
+ __be32 args[16];
+ __be32 *rets; /* Pointer to return values in args[]. */
+};
+
+struct region {
+ uint64_t addr;
+ uint32_t size;
+ struct region *next;
+};
+
+int read_entire_file(int fd, char **buf, size_t *len)
+{
+ size_t buf_size = 0;
+ size_t off = 0;
+ int rc;
+
+ *buf = NULL;
+ do {
+ buf_size += BLOCK_SIZE;
+ if (*buf == NULL)
+ *buf = malloc(buf_size);
+ else
+ *buf = realloc(*buf, buf_size);
+
+ if (*buf == NULL)
+ return -ENOMEM;
+
+ rc = read(fd, *buf + off, BLOCK_SIZE);
+ if (rc < 0)
+ return -EIO;
+
+ off += rc;
+ } while (rc == BLOCK_SIZE);
+
+ if (len)
+ *len = off;
+
+ return 0;
+}
+
+static int open_prop_file(const char *prop_path, const char *prop_name, int *fd)
+{
+ char *path;
+ int len;
+
+ /* allocate enough for two string, a slash and trailing NULL */
+ len = strlen(prop_path) + strlen(prop_name) + 1 + 1;
+ path = malloc(len);
+ if (path == NULL)
+ return -ENOMEM;
+
+ snprintf(path, len, "%s/%s", prop_path, prop_name);
+
+ *fd = open(path, O_RDONLY);
+ free(path);
+ if (*fd < 0)
+ return -errno;
+
+ return 0;
+}
+
+static int get_property(const char *prop_path, const char *prop_name,
+ char **prop_val, size_t *prop_len)
+{
+ int rc, fd;
+
+ rc = open_prop_file(prop_path, prop_name, &fd);
+ if (rc)
+ return rc;
+
+ rc = read_entire_file(fd, prop_val, prop_len);
+ close(fd);
+
+ return rc;
+}
+
+int rtas_token(const char *call_name)
+{
+ char *prop_buf = NULL;
+ size_t len;
+ int rc;
+
+ rc = get_property(ofdt_rtas_path, call_name, &prop_buf, &len);
+ if (rc < 0) {
+ rc = RTAS_UNKNOWN_OP;
+ goto err;
+ }
+
+ rc = be32_to_cpu(*(int *)prop_buf);
+
+err:
+ free(prop_buf);
+ return rc;
+}
+
+static int read_kregion_bounds(struct region *kregion)
+{
+ char *buf;
+ int fd;
+ int rc;
+
+ fd = open("/proc/ppc64/rtas/rmo_buffer", O_RDONLY);
+ if (fd < 0) {
+ printf("Could not open rmo_buffer file\n");
+ return RTAS_IO_ASSERT;
+ }
+
+ rc = read_entire_file(fd, &buf, NULL);
+ close(fd);
+ if (rc) {
+ free(buf);
+ return rc;
+ }
+
+ sscanf(buf, "%" SCNx64 " %x", &kregion->addr, &kregion->size);
+ free(buf);
+
+ if (!(kregion->size && kregion->addr) ||
+ (kregion->size > (PAGE_SIZE * MAX_PAGES))) {
+ printf("Unexpected kregion bounds\n");
+ return RTAS_IO_ASSERT;
+ }
+
+ return 0;
+}
+
+static int rtas_call(const char *name, int nargs,
+ int nrets, ...)
+{
+ struct rtas_args args;
+ __be32 *rets[16];
+ int i, rc, token;
+ va_list ap;
+
+ va_start(ap, nrets);
+
+ token = rtas_token(name);
+ if (token == RTAS_UNKNOWN_OP) {
+ // We don't care if the call doesn't exist
+ printf("call '%s' not available, skipping...", name);
+ rc = RTAS_UNKNOWN_OP;
+ goto err;
+ }
+
+ args.token = cpu_to_be32(token);
+ args.nargs = cpu_to_be32(nargs);
+ args.nret = cpu_to_be32(nrets);
+
+ for (i = 0; i < nargs; i++)
+ args.args[i] = (__be32) va_arg(ap, unsigned long);
+
+ for (i = 0; i < nrets; i++)
+ rets[i] = (__be32 *) va_arg(ap, unsigned long);
+
+ rc = syscall(__NR_rtas, &args);
+ if (rc) {
+ rc = -errno;
+ goto err;
+ }
+
+ if (nrets) {
+ *(rets[0]) = be32_to_cpu(args.args[nargs]);
+
+ for (i = 1; i < nrets; i++) {
+ *(rets[i]) = args.args[nargs + i];
+ }
+ }
+
+err:
+ va_end(ap);
+ return rc;
+}
+
+static int test(void)
+{
+ struct region rmo_region;
+ uint32_t rmo_start;
+ uint32_t rmo_end;
+ __be32 rets[1];
+ int rc;
+
+ // Test a legitimate harmless call
+ // Expected: call succeeds
+ printf("Test a permitted call, no parameters... ");
+ rc = rtas_call("get-time-of-day", 0, 1, rets);
+ printf("rc: %d\n", rc);
+ FAIL_IF(rc != 0 && rc != RTAS_UNKNOWN_OP);
+
+ // Test a prohibited call
+ // Expected: call returns -EINVAL
+ printf("Test a prohibited call... ");
+ rc = rtas_call("nvram-fetch", 0, 1, rets);
+ printf("rc: %d\n", rc);
+ FAIL_IF(rc != -EINVAL && rc != RTAS_UNKNOWN_OP);
+
+ // Get RMO
+ rc = read_kregion_bounds(&rmo_region);
+ if (rc) {
+ printf("Couldn't read RMO region bounds, skipping remaining cases\n");
+ return 0;
+ }
+ rmo_start = rmo_region.addr;
+ rmo_end = rmo_start + rmo_region.size - 1;
+ printf("RMO range: %08x - %08x\n", rmo_start, rmo_end);
+
+ // Test a permitted call, user-supplied size, buffer inside RMO
+ // Expected: call succeeds
+ printf("Test a permitted call, user-supplied size, buffer inside RMO... ");
+ rc = rtas_call("ibm,get-system-parameter", 3, 1, 0, cpu_to_be32(rmo_start),
+ cpu_to_be32(rmo_end - rmo_start + 1), rets);
+ printf("rc: %d\n", rc);
+ FAIL_IF(rc != 0 && rc != RTAS_UNKNOWN_OP);
+
+ // Test a permitted call, user-supplied size, buffer start outside RMO
+ // Expected: call returns -EINVAL
+ printf("Test a permitted call, user-supplied size, buffer start outside RMO... ");
+ rc = rtas_call("ibm,get-system-parameter", 3, 1, 0, cpu_to_be32(rmo_end + 1),
+ cpu_to_be32(4000), rets);
+ printf("rc: %d\n", rc);
+ FAIL_IF(rc != -EINVAL && rc != RTAS_UNKNOWN_OP);
+
+ // Test a permitted call, user-supplied size, buffer end outside RMO
+ // Expected: call returns -EINVAL
+ printf("Test a permitted call, user-supplied size, buffer end outside RMO... ");
+ rc = rtas_call("ibm,get-system-parameter", 3, 1, 0, cpu_to_be32(rmo_start),
+ cpu_to_be32(rmo_end - rmo_start + 2), rets);
+ printf("rc: %d\n", rc);
+ FAIL_IF(rc != -EINVAL && rc != RTAS_UNKNOWN_OP);
+
+ // Test a permitted call, fixed size, buffer end outside RMO
+ // Expected: call returns -EINVAL
+ printf("Test a permitted call, fixed size, buffer end outside RMO... ");
+ rc = rtas_call("ibm,configure-connector", 2, 1, cpu_to_be32(rmo_end - 4000), 0, rets);
+ printf("rc: %d\n", rc);
+ FAIL_IF(rc != -EINVAL && rc != RTAS_UNKNOWN_OP);
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(test, "rtas_filter");
+}
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 98f2708d86cc..d8900a0c47a1 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
tm-resched-dscr
tm-syscall
tm-signal-msr-resv
@@ -13,6 +14,7 @@ tm-signal-context-chk-vmx
tm-signal-context-chk-vsx
tm-signal-context-force-tm
tm-signal-sigreturn-nt
+tm-signal-pagefault
tm-vmx-unavail
tm-unavailable
tm-trap
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index b15a1a325bd0..5881e97c73c1 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -5,7 +5,9 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu
TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
$(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn tm-signal-sigreturn-nt \
- tm-signal-context-force-tm tm-poison
+ tm-signal-context-force-tm tm-poison tm-signal-pagefault
+
+TEST_FILES := settings
top_srcdir = ../../../../..
include ../../lib.mk
@@ -22,6 +24,8 @@ $(OUTPUT)/tm-resched-dscr: ../pmu/lib.c
$(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx
$(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64
$(OUTPUT)/tm-signal-context-force-tm: CFLAGS += -pthread -m64
+$(OUTPUT)/tm-signal-pagefault: CFLAGS += -pthread -m64
+$(OUTPUT)/tm-poison: CFLAGS += -m64
SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS))
$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
diff --git a/tools/testing/selftests/powerpc/tm/settings b/tools/testing/selftests/powerpc/tm/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/powerpc/tm/tm-exec.c b/tools/testing/selftests/powerpc/tm/tm-exec.c
index 260cfdb97d23..c59919d6710d 100644
--- a/tools/testing/selftests/powerpc/tm/tm-exec.c
+++ b/tools/testing/selftests/powerpc/tm/tm-exec.c
@@ -27,6 +27,7 @@ static char *path;
static int test_exec(void)
{
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
asm __volatile__(
"tbegin.;"
diff --git a/tools/testing/selftests/powerpc/tm/tm-fork.c b/tools/testing/selftests/powerpc/tm/tm-fork.c
index 6efa5a685a77..c27b935f0e9f 100644
--- a/tools/testing/selftests/powerpc/tm/tm-fork.c
+++ b/tools/testing/selftests/powerpc/tm/tm-fork.c
@@ -21,6 +21,7 @@
int test_fork(void)
{
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
asm __volatile__(
"tbegin.;"
diff --git a/tools/testing/selftests/powerpc/tm/tm-poison.c b/tools/testing/selftests/powerpc/tm/tm-poison.c
index 977558497c16..a7bbf034b5bb 100644
--- a/tools/testing/selftests/powerpc/tm/tm-poison.c
+++ b/tools/testing/selftests/powerpc/tm/tm-poison.c
@@ -20,13 +20,12 @@
#include <sched.h>
#include <sys/types.h>
#include <signal.h>
-#include <inttypes.h>
#include "tm.h"
int tm_poison_test(void)
{
- int pid;
+ int cpu, pid;
cpu_set_t cpuset;
uint64_t poison = 0xdeadbeefc0dec0fe;
uint64_t unknown = 0;
@@ -34,11 +33,15 @@ int tm_poison_test(void)
bool fail_vr = false;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
+
+ cpu = pick_online_cpu();
+ FAIL_IF(cpu < 0);
- /* Attach both Child and Parent to CPU 0 */
+ // Attach both Child and Parent to the same CPU
CPU_ZERO(&cpuset);
- CPU_SET(0, &cpuset);
- sched_setaffinity(0, sizeof(cpuset), &cpuset);
+ CPU_SET(cpu, &cpuset);
+ FAIL_IF(sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0);
pid = fork();
if (!pid) {
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
index 4cdb83964bb3..85c940ae6ff8 100644
--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
@@ -40,6 +40,7 @@ int test_body(void)
uint64_t rv, dscr1 = 1, dscr2, texasr;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
printf("Check DSCR TM context switch: ");
fflush(stdout);
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c
index 254f912ad611..657d755b2905 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c
@@ -79,6 +79,7 @@ static int tm_signal_context_chk_fpu()
pid_t pid = getpid();
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
act.sa_sigaction = signal_usr1;
sigemptyset(&act.sa_mask);
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c
index 0cc680f61828..400fa70ca71e 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c
@@ -81,6 +81,7 @@ static int tm_signal_context_chk_gpr()
pid_t pid = getpid();
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
act.sa_sigaction = signal_usr1;
sigemptyset(&act.sa_mask);
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c
index b6d52730a0d8..d628fd302b28 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c
@@ -104,6 +104,7 @@ static int tm_signal_context_chk()
pid_t pid = getpid();
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
act.sa_sigaction = signal_usr1;
sigemptyset(&act.sa_mask);
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c
index 8e25e2072ecd..9bd869245bad 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c
@@ -153,6 +153,7 @@ static int tm_signal_context_chk()
pid_t pid = getpid();
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
act.sa_sigaction = signal_usr1;
sigemptyset(&act.sa_mask);
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c
index 31717625f318..421cb082f6be 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c
@@ -42,9 +42,10 @@
#endif
/* Setting contexts because the test will crash and we want to recover */
-ucontext_t init_context, main_context;
+ucontext_t init_context;
-static int count, first_time;
+/* count is changed in the signal handler, so it must be volatile */
+static volatile int count;
void usr_signal_handler(int signo, siginfo_t *si, void *uc)
{
@@ -98,11 +99,6 @@ void usr_signal_handler(int signo, siginfo_t *si, void *uc)
void seg_signal_handler(int signo, siginfo_t *si, void *uc)
{
- if (count == COUNT_MAX) {
- /* Return to tm_signal_force_msr() and exit */
- setcontext(&main_context);
- }
-
count++;
/* Reexecute the test */
@@ -126,37 +122,41 @@ void tm_trap_test(void)
*/
getcontext(&init_context);
- /* Allocated an alternative signal stack area */
- ss.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
- ss.ss_size = SIGSTKSZ;
- ss.ss_flags = 0;
+ while (count < COUNT_MAX) {
+ /* Allocated an alternative signal stack area */
+ ss.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ ss.ss_size = SIGSTKSZ;
+ ss.ss_flags = 0;
- if (ss.ss_sp == (void *)-1) {
- perror("mmap error\n");
- exit(-1);
- }
+ if (ss.ss_sp == (void *)-1) {
+ perror("mmap error\n");
+ exit(-1);
+ }
- /* Force the allocation through a page fault */
- if (madvise(ss.ss_sp, SIGSTKSZ, MADV_DONTNEED)) {
- perror("madvise\n");
- exit(-1);
- }
+ /* Force the allocation through a page fault */
+ if (madvise(ss.ss_sp, SIGSTKSZ, MADV_DONTNEED)) {
+ perror("madvise\n");
+ exit(-1);
+ }
- /* Setting an alternative stack to generate a page fault when
- * the signal is raised.
- */
- if (sigaltstack(&ss, NULL)) {
- perror("sigaltstack\n");
- exit(-1);
+ /*
+ * Setting an alternative stack to generate a page fault when
+ * the signal is raised.
+ */
+ if (sigaltstack(&ss, NULL)) {
+ perror("sigaltstack\n");
+ exit(-1);
+ }
+
+ /* The signal handler will enable MSR_TS */
+ sigaction(SIGUSR1, &usr_sa, NULL);
+ /* If it does not crash, it might segfault, avoid it to retest */
+ sigaction(SIGSEGV, &seg_sa, NULL);
+
+ raise(SIGUSR1);
+ count++;
}
-
- /* The signal handler will enable MSR_TS */
- sigaction(SIGUSR1, &usr_sa, NULL);
- /* If it does not crash, it will segfault, avoid it to retest */
- sigaction(SIGSEGV, &seg_sa, NULL);
-
- raise(SIGUSR1);
}
int tm_signal_context_force_tm(void)
@@ -169,11 +169,7 @@ int tm_signal_context_force_tm(void)
*/
SKIP_IF(!is_ppc64le());
- /* Will get back here after COUNT_MAX interactions */
- getcontext(&main_context);
-
- if (!first_time++)
- tm_trap_test();
+ tm_trap_test();
return EXIT_SUCCESS;
}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c b/tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c
new file mode 100644
index 000000000000..0b84c9208d62
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020, Gustavo Luiz Duarte, IBM Corp.
+ *
+ * This test starts a transaction and triggers a signal, forcing a pagefault to
+ * happen when the kernel signal handling code touches the user signal stack.
+ *
+ * In order to avoid pre-faulting the signal stack memory and to force the
+ * pagefault to happen precisely in the kernel signal handling code, the
+ * pagefault handling is done in userspace using the userfaultfd facility.
+ *
+ * Further pagefaults are triggered by crafting the signal handler's ucontext
+ * to point to additional memory regions managed by the userfaultfd, so using
+ * the same mechanism used to avoid pre-faulting the signal stack memory.
+ *
+ * On failure (bug is present) kernel crashes or never returns control back to
+ * userspace. If bug is not present, tests completes almost immediately.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/userfaultfd.h>
+#include <poll.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <signal.h>
+#include <errno.h>
+
+#include "tm.h"
+
+
+#define UF_MEM_SIZE 655360 /* 10 x 64k pages */
+
+/* Memory handled by userfaultfd */
+static char *uf_mem;
+static size_t uf_mem_offset = 0;
+
+/*
+ * Data that will be copied into the faulting pages (instead of zero-filled
+ * pages). This is used to make the test more reliable and avoid segfaulting
+ * when we return from the signal handler. Since we are making the signal
+ * handler's ucontext point to newly allocated memory, when that memory is
+ * paged-in it will contain the expected content.
+ */
+static char backing_mem[UF_MEM_SIZE];
+
+static size_t pagesize;
+
+/*
+ * Return a chunk of at least 'size' bytes of memory that will be handled by
+ * userfaultfd. If 'backing_data' is not NULL, its content will be save to
+ * 'backing_mem' and then copied into the faulting pages when the page fault
+ * is handled.
+ */
+void *get_uf_mem(size_t size, void *backing_data)
+{
+ void *ret;
+
+ if (uf_mem_offset + size > UF_MEM_SIZE) {
+ fprintf(stderr, "Requesting more uf_mem than expected!\n");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = &uf_mem[uf_mem_offset];
+
+ /* Save the data that will be copied into the faulting page */
+ if (backing_data != NULL)
+ memcpy(&backing_mem[uf_mem_offset], backing_data, size);
+
+ /* Reserve the requested amount of uf_mem */
+ uf_mem_offset += size;
+ /* Keep uf_mem_offset aligned to the page size (round up) */
+ uf_mem_offset = (uf_mem_offset + pagesize - 1) & ~(pagesize - 1);
+
+ return ret;
+}
+
+void *fault_handler_thread(void *arg)
+{
+ struct uffd_msg msg; /* Data read from userfaultfd */
+ long uffd; /* userfaultfd file descriptor */
+ struct uffdio_copy uffdio_copy;
+ struct pollfd pollfd;
+ ssize_t nread, offset;
+
+ uffd = (long) arg;
+
+ for (;;) {
+ pollfd.fd = uffd;
+ pollfd.events = POLLIN;
+ if (poll(&pollfd, 1, -1) == -1) {
+ perror("poll() failed");
+ exit(EXIT_FAILURE);
+ }
+
+ nread = read(uffd, &msg, sizeof(msg));
+ if (nread == 0) {
+ fprintf(stderr, "read(): EOF on userfaultfd\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (nread == -1) {
+ perror("read() failed");
+ exit(EXIT_FAILURE);
+ }
+
+ /* We expect only one kind of event */
+ if (msg.event != UFFD_EVENT_PAGEFAULT) {
+ fprintf(stderr, "Unexpected event on userfaultfd\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * We need to handle page faults in units of pages(!).
+ * So, round faulting address down to page boundary.
+ */
+ uffdio_copy.dst = msg.arg.pagefault.address & ~(pagesize-1);
+
+ offset = (char *) uffdio_copy.dst - uf_mem;
+ uffdio_copy.src = (unsigned long) &backing_mem[offset];
+
+ uffdio_copy.len = pagesize;
+ uffdio_copy.mode = 0;
+ uffdio_copy.copy = 0;
+ if (ioctl(uffd, UFFDIO_COPY, &uffdio_copy) == -1) {
+ perror("ioctl-UFFDIO_COPY failed");
+ exit(EXIT_FAILURE);
+ }
+ }
+}
+
+void setup_uf_mem(void)
+{
+ long uffd; /* userfaultfd file descriptor */
+ pthread_t thr;
+ struct uffdio_api uffdio_api;
+ struct uffdio_register uffdio_register;
+ int ret;
+
+ pagesize = sysconf(_SC_PAGE_SIZE);
+
+ /* Create and enable userfaultfd object */
+ uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+ if (uffd == -1) {
+ perror("userfaultfd() failed");
+ exit(EXIT_FAILURE);
+ }
+ uffdio_api.api = UFFD_API;
+ uffdio_api.features = 0;
+ if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) {
+ perror("ioctl-UFFDIO_API failed");
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Create a private anonymous mapping. The memory will be demand-zero
+ * paged, that is, not yet allocated. When we actually touch the memory
+ * the related page will be allocated via the userfaultfd mechanism.
+ */
+ uf_mem = mmap(NULL, UF_MEM_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (uf_mem == MAP_FAILED) {
+ perror("mmap() failed");
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Register the memory range of the mapping we've just mapped to be
+ * handled by the userfaultfd object. In 'mode' we request to track
+ * missing pages (i.e. pages that have not yet been faulted-in).
+ */
+ uffdio_register.range.start = (unsigned long) uf_mem;
+ uffdio_register.range.len = UF_MEM_SIZE;
+ uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) {
+ perror("ioctl-UFFDIO_REGISTER");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Create a thread that will process the userfaultfd events */
+ ret = pthread_create(&thr, NULL, fault_handler_thread, (void *) uffd);
+ if (ret != 0) {
+ fprintf(stderr, "pthread_create(): Error. Returned %d\n", ret);
+ exit(EXIT_FAILURE);
+ }
+}
+
+/*
+ * Assumption: the signal was delivered while userspace was in transactional or
+ * suspended state, i.e. uc->uc_link != NULL.
+ */
+void signal_handler(int signo, siginfo_t *si, void *uc)
+{
+ ucontext_t *ucp = uc;
+
+ /* Skip 'trap' after returning, otherwise we get a SIGTRAP again */
+ ucp->uc_link->uc_mcontext.regs->nip += 4;
+
+ ucp->uc_mcontext.v_regs =
+ get_uf_mem(sizeof(elf_vrreg_t), ucp->uc_mcontext.v_regs);
+
+ ucp->uc_link->uc_mcontext.v_regs =
+ get_uf_mem(sizeof(elf_vrreg_t), ucp->uc_link->uc_mcontext.v_regs);
+
+ ucp->uc_link = get_uf_mem(sizeof(ucontext_t), ucp->uc_link);
+}
+
+bool have_userfaultfd(void)
+{
+ long rc;
+
+ errno = 0;
+ rc = syscall(__NR_userfaultfd, -1);
+
+ return rc == 0 || errno != ENOSYS;
+}
+
+int tm_signal_pagefault(void)
+{
+ struct sigaction sa;
+ stack_t ss;
+
+ SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
+ SKIP_IF(!have_userfaultfd());
+
+ setup_uf_mem();
+
+ /*
+ * Set an alternative stack that will generate a page fault when the
+ * signal is raised. The page fault will be treated via userfaultfd,
+ * i.e. via fault_handler_thread.
+ */
+ ss.ss_sp = get_uf_mem(SIGSTKSZ, NULL);
+ ss.ss_size = SIGSTKSZ;
+ ss.ss_flags = 0;
+ if (sigaltstack(&ss, NULL) == -1) {
+ perror("sigaltstack() failed");
+ exit(EXIT_FAILURE);
+ }
+
+ sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ sa.sa_sigaction = signal_handler;
+ if (sigaction(SIGTRAP, &sa, NULL) == -1) {
+ perror("sigaction() failed");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Trigger a SIGTRAP in transactional state */
+ asm __volatile__(
+ "tbegin.;"
+ "beq 1f;"
+ "trap;"
+ "1: ;"
+ : : : "memory");
+
+ /* Trigger a SIGTRAP in suspended state */
+ asm __volatile__(
+ "tbegin.;"
+ "beq 1f;"
+ "tsuspend.;"
+ "trap;"
+ "tresume.;"
+ "1: ;"
+ : : : "memory");
+
+ return EXIT_SUCCESS;
+}
+
+int main(int argc, char **argv)
+{
+ /*
+ * Depending on kernel config, the TM Bad Thing might not result in a
+ * crash, instead the kernel never returns control back to userspace, so
+ * set a tight timeout. If the test passes it completes almost
+ * immediately.
+ */
+ test_harness_set_timeout(2);
+ return test_harness(tm_signal_pagefault, "tm_signal_pagefault");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
index 07c388147b75..06b801906f27 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
@@ -32,6 +32,7 @@ int tm_signal_sigreturn_nt(void)
struct sigaction trap_sa;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
trap_sa.sa_flags = SA_SIGINFO;
trap_sa.sa_sigaction = trap_signal_handler;
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-stack.c b/tools/testing/selftests/powerpc/tm/tm-signal-stack.c
index cdcf8c5bbbc7..68807aac8dd3 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-stack.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-stack.c
@@ -35,6 +35,7 @@ int tm_signal_stack()
int pid;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
pid = fork();
if (pid < 0)
diff --git a/tools/testing/selftests/powerpc/tm/tm-sigreturn.c b/tools/testing/selftests/powerpc/tm/tm-sigreturn.c
index 9a6017a1d769..ffe4e5515f33 100644
--- a/tools/testing/selftests/powerpc/tm/tm-sigreturn.c
+++ b/tools/testing/selftests/powerpc/tm/tm-sigreturn.c
@@ -55,6 +55,7 @@ int tm_sigreturn(void)
uint64_t ret = 0;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
SKIP_IF(!is_ppc64le());
memset(&sa, 0, sizeof(sa));
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S b/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
index bd1ca25febe4..aed632d29fff 100644
--- a/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <ppc-asm.h>
+#include <basic_asm.h>
#include <asm/unistd.h>
.text
@@ -26,3 +26,38 @@ FUNC_START(getppid_tm_suspended)
1:
li r3, -1
blr
+
+
+.macro scv level
+ .long (0x44000001 | (\level) << 5)
+.endm
+
+FUNC_START(getppid_scv_tm_active)
+ PUSH_BASIC_STACK(0)
+ tbegin.
+ beq 1f
+ li r0, __NR_getppid
+ scv 0
+ tend.
+ POP_BASIC_STACK(0)
+ blr
+1:
+ li r3, -1
+ POP_BASIC_STACK(0)
+ blr
+
+FUNC_START(getppid_scv_tm_suspended)
+ PUSH_BASIC_STACK(0)
+ tbegin.
+ beq 1f
+ li r0, __NR_getppid
+ tsuspend.
+ scv 0
+ tresume.
+ tend.
+ POP_BASIC_STACK(0)
+ blr
+1:
+ li r3, -1
+ POP_BASIC_STACK(0)
+ blr
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall.c b/tools/testing/selftests/powerpc/tm/tm-syscall.c
index becb8207b432..b763354c2eb4 100644
--- a/tools/testing/selftests/powerpc/tm/tm-syscall.c
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall.c
@@ -19,24 +19,36 @@
#include "utils.h"
#include "tm.h"
+#ifndef PPC_FEATURE2_SCV
+#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
+#endif
+
extern int getppid_tm_active(void);
extern int getppid_tm_suspended(void);
+extern int getppid_scv_tm_active(void);
+extern int getppid_scv_tm_suspended(void);
unsigned retries = 0;
#define TEST_DURATION 10 /* seconds */
-#define TM_RETRIES 100
-pid_t getppid_tm(bool suspend)
+pid_t getppid_tm(bool scv, bool suspend)
{
int i;
pid_t pid;
for (i = 0; i < TM_RETRIES; i++) {
- if (suspend)
- pid = getppid_tm_suspended();
- else
- pid = getppid_tm_active();
+ if (suspend) {
+ if (scv)
+ pid = getppid_scv_tm_suspended();
+ else
+ pid = getppid_tm_suspended();
+ } else {
+ if (scv)
+ pid = getppid_scv_tm_active();
+ else
+ pid = getppid_tm_active();
+ }
if (pid >= 0)
return pid;
@@ -67,6 +79,7 @@ int tm_syscall(void)
struct timeval end, now;
SKIP_IF(!have_htm_nosc());
+ SKIP_IF(htm_is_synthetic());
setbuf(stdout, NULL);
@@ -82,15 +95,24 @@ int tm_syscall(void)
* Test a syscall within a suspended transaction and verify
* that it succeeds.
*/
- FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */
+ FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */
/*
* Test a syscall within an active transaction and verify that
* it fails with the correct failure code.
*/
- FAIL_IF(getppid_tm(false) != -1); /* Should fail... */
+ FAIL_IF(getppid_tm(false, false) != -1); /* Should fail... */
FAIL_IF(!failure_is_persistent()); /* ...persistently... */
FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
+
+ /* Now do it all again with scv if it is available. */
+ if (have_hwcap2(PPC_FEATURE2_SCV)) {
+ FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */
+ FAIL_IF(getppid_tm(true, false) != -1); /* Should fail... */
+ FAIL_IF(!failure_is_persistent()); /* ...persistently... */
+ FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
+ }
+
gettimeofday(&now, 0);
}
diff --git a/tools/testing/selftests/powerpc/tm/tm-tar.c b/tools/testing/selftests/powerpc/tm/tm-tar.c
index 03be8c47292b..f2a9137f3c1e 100644
--- a/tools/testing/selftests/powerpc/tm/tm-tar.c
+++ b/tools/testing/selftests/powerpc/tm/tm-tar.c
@@ -26,6 +26,7 @@ int test_tar(void)
int i;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
SKIP_IF(!is_ppc64le());
for (i = 0; i < num_loops; i++)
diff --git a/tools/testing/selftests/powerpc/tm/tm-tmspr.c b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
index 17becf3dcee4..dd5ddffa28b7 100644
--- a/tools/testing/selftests/powerpc/tm/tm-tmspr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
@@ -33,19 +33,13 @@
#include "utils.h"
#include "tm.h"
-int num_loops = 10000;
+int num_loops = 1000000;
int passed = 1;
void tfiar_tfhar(void *in)
{
- int i, cpu;
unsigned long tfhar, tfhar_rd, tfiar, tfiar_rd;
- cpu_set_t cpuset;
-
- CPU_ZERO(&cpuset);
- cpu = (unsigned long)in >> 1;
- CPU_SET(cpu, &cpuset);
- sched_setaffinity(0, sizeof(cpuset), &cpuset);
+ int i;
/* TFIAR: Last bit has to be high so userspace can read register */
tfiar = ((unsigned long)in) + 1;
@@ -102,6 +96,7 @@ int test_tmspr()
unsigned long i;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
/* To cause some context switching */
thread_num = 10 * sysconf(_SC_NPROCESSORS_ONLN);
diff --git a/tools/testing/selftests/powerpc/tm/tm-trap.c b/tools/testing/selftests/powerpc/tm/tm-trap.c
index 601f0c1d450d..97cb74768e30 100644
--- a/tools/testing/selftests/powerpc/tm/tm-trap.c
+++ b/tools/testing/selftests/powerpc/tm/tm-trap.c
@@ -66,7 +66,7 @@ void trap_signal_handler(int signo, siginfo_t *si, void *uc)
/* Get thread endianness: extract bit LE from MSR */
thread_endianness = MSR_LE & ucp->uc_mcontext.gp_regs[PT_MSR];
- /***
+ /*
* Little-Endian Machine
*/
@@ -126,7 +126,7 @@ void trap_signal_handler(int signo, siginfo_t *si, void *uc)
}
}
- /***
+ /*
* Big-Endian Machine
*/
@@ -247,8 +247,7 @@ void *pong(void *not_used)
int tm_trap_test(void)
{
uint16_t k = 1;
-
- int rc;
+ int cpu, rc;
pthread_attr_t attr;
cpu_set_t cpuset;
@@ -256,6 +255,7 @@ int tm_trap_test(void)
struct sigaction trap_sa;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
trap_sa.sa_flags = SA_SIGINFO;
trap_sa.sa_sigaction = trap_signal_handler;
@@ -267,9 +267,12 @@ int tm_trap_test(void)
usr1_sa.sa_sigaction = usr1_signal_handler;
sigaction(SIGUSR1, &usr1_sa, NULL);
- /* Set only CPU 0 in the mask. Both threads will be bound to cpu 0. */
+ cpu = pick_online_cpu();
+ FAIL_IF(cpu < 0);
+
+ // Set only one CPU in the mask. Both threads will be bound to that CPU.
CPU_ZERO(&cpuset);
- CPU_SET(0, &cpuset);
+ CPU_SET(cpu, &cpuset);
/* Init pthread attribute */
rc = pthread_attr_init(&attr);
diff --git a/tools/testing/selftests/powerpc/tm/tm-unavailable.c b/tools/testing/selftests/powerpc/tm/tm-unavailable.c
index 2ca2fccb0a3e..6bf1b65b020d 100644
--- a/tools/testing/selftests/powerpc/tm/tm-unavailable.c
+++ b/tools/testing/selftests/powerpc/tm/tm-unavailable.c
@@ -338,16 +338,20 @@ void test_fp_vec(int fp, int vec, pthread_attr_t *attr)
int tm_unavailable_test(void)
{
- int rc, exception; /* FP = 0, VEC = 1, VSX = 2 */
+ int cpu, rc, exception; /* FP = 0, VEC = 1, VSX = 2 */
pthread_t t1;
pthread_attr_t attr;
cpu_set_t cpuset;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
- /* Set only CPU 0 in the mask. Both threads will be bound to CPU 0. */
+ cpu = pick_online_cpu();
+ FAIL_IF(cpu < 0);
+
+ // Set only one CPU in the mask. Both threads will be bound to that CPU.
CPU_ZERO(&cpuset);
- CPU_SET(0, &cpuset);
+ CPU_SET(cpu, &cpuset);
/* Init pthread attribute. */
rc = pthread_attr_init(&attr);
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
index e2a0c07e8362..34364ed2b6b7 100644
--- a/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
+++ b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
@@ -17,7 +17,6 @@
#include <pthread.h>
#include <sys/mman.h>
#include <unistd.h>
-#include <pthread.h>
#include "tm.h"
#include "utils.h"
@@ -92,6 +91,7 @@ int tm_vmx_unavail_test()
pthread_t *thread;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
passed = 1;
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c b/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
index c1e788a6df47..1640e7ead69b 100644
--- a/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
+++ b/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
@@ -46,6 +46,7 @@ int test_vmxcopy()
uint64_t aborted = 0;
SKIP_IF(!have_htm());
+ SKIP_IF(htm_is_synthetic());
SKIP_IF(!is_ppc64le());
fd = mkstemp(tmpfile);
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
index c402464b038f..c03c6e778876 100644
--- a/tools/testing/selftests/powerpc/tm/tm.h
+++ b/tools/testing/selftests/powerpc/tm/tm.h
@@ -6,11 +6,13 @@
#ifndef _SELFTESTS_POWERPC_TM_TM_H
#define _SELFTESTS_POWERPC_TM_TM_H
-#include <asm/tm.h>
-#include <asm/cputable.h>
#include <stdbool.h>
+#include <asm/tm.h>
#include "utils.h"
+#include "reg.h"
+
+#define TM_RETRIES 100
static inline bool have_htm(void)
{
@@ -32,6 +34,39 @@ static inline bool have_htm_nosc(void)
#endif
}
+/*
+ * Transactional Memory was removed in ISA 3.1. A synthetic TM implementation
+ * is provided on P10 for threads running in P8/P9 compatibility mode. The
+ * synthetic implementation immediately fails after tbegin. This failure sets
+ * Bit 7 (Failure Persistent) and Bit 15 (Implementation-specific).
+ */
+static inline bool htm_is_synthetic(void)
+{
+ int i;
+
+ /*
+ * Per the ISA, the Failure Persistent bit may be incorrect. Try a few
+ * times in case we got an Implementation-specific failure on a non ISA
+ * v3.1 system. On these systems the Implementation-specific failure
+ * should not be persistent.
+ */
+ for (i = 0; i < TM_RETRIES; i++) {
+ asm volatile(
+ "tbegin.;"
+ "beq 1f;"
+ "tend.;"
+ "1:"
+ :
+ :
+ : "memory");
+
+ if ((__builtin_get_texasr() & (TEXASR_FP | TEXASR_IC)) !=
+ (TEXASR_FP | TEXASR_IC))
+ break;
+ }
+ return i == TM_RETRIES;
+}
+
static inline long failure_code(void)
{
return __builtin_get_texasru() >> 24;
diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
index 5ee0e98c4896..1f36ee1a909a 100644
--- a/tools/testing/selftests/powerpc/utils.c
+++ b/tools/testing/selftests/powerpc/utils.c
@@ -10,12 +10,12 @@
#include <fcntl.h>
#include <link.h>
#include <sched.h>
-#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
+#include <sys/sysinfo.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <unistd.h>
@@ -88,28 +88,40 @@ void *get_auxv_entry(int type)
int pick_online_cpu(void)
{
- cpu_set_t mask;
- int cpu;
+ int ncpus, cpu = -1;
+ cpu_set_t *mask;
+ size_t size;
+
+ ncpus = get_nprocs_conf();
+ size = CPU_ALLOC_SIZE(ncpus);
+ mask = CPU_ALLOC(ncpus);
+ if (!mask) {
+ perror("malloc");
+ return -1;
+ }
- CPU_ZERO(&mask);
+ CPU_ZERO_S(size, mask);
- if (sched_getaffinity(0, sizeof(mask), &mask)) {
+ if (sched_getaffinity(0, size, mask)) {
perror("sched_getaffinity");
- return -1;
+ goto done;
}
/* We prefer a primary thread, but skip 0 */
- for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
- if (CPU_ISSET(cpu, &mask))
- return cpu;
+ for (cpu = 8; cpu < ncpus; cpu += 8)
+ if (CPU_ISSET_S(cpu, size, mask))
+ goto done;
/* Search for anything, but in reverse */
- for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
- if (CPU_ISSET(cpu, &mask))
- return cpu;
+ for (cpu = ncpus - 1; cpu >= 0; cpu--)
+ if (CPU_ISSET_S(cpu, size, mask))
+ goto done;
printf("No cpus in affinity mask?!\n");
- return -1;
+
+done:
+ CPU_FREE(mask);
+ return cpu;
}
bool is_ppc64le(void)
@@ -260,36 +272,32 @@ int perf_event_reset(int fd)
return 0;
}
-static void sigill_handler(int signr, siginfo_t *info, void *unused)
+int using_hash_mmu(bool *using_hash)
{
- static int warned = 0;
- ucontext_t *ctx = (ucontext_t *)unused;
- unsigned long *pc = &UCONTEXT_NIA(ctx);
-
- /* mtspr 3,RS to check for move to DSCR below */
- if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) {
- if (!warned++)
- printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n");
- *pc += 4;
- } else {
- printf("SIGILL at %p\n", pc);
- abort();
- }
-}
+ char line[128];
+ FILE *f;
+ int rc;
-void set_dscr(unsigned long val)
-{
- static int init = 0;
- struct sigaction sa;
-
- if (!init) {
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = sigill_handler;
- sa.sa_flags = SA_SIGINFO;
- if (sigaction(SIGILL, &sa, NULL))
- perror("sigill_handler");
- init = 1;
+ f = fopen("/proc/cpuinfo", "r");
+ FAIL_IF(!f);
+
+ rc = 0;
+ while (fgets(line, sizeof(line), f) != NULL) {
+ if (!strcmp(line, "MMU : Hash\n") ||
+ !strcmp(line, "platform : Cell\n") ||
+ !strcmp(line, "platform : PowerMac\n")) {
+ *using_hash = true;
+ goto out;
+ }
+
+ if (strcmp(line, "MMU : Radix\n") == 0) {
+ *using_hash = false;
+ goto out;
+ }
}
- asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
+ rc = -1;
+out:
+ fclose(f);
+ return rc;
}
diff --git a/tools/testing/selftests/powerpc/vphn/.gitignore b/tools/testing/selftests/powerpc/vphn/.gitignore
index 7c04395010cb..b744aedfd1f2 100644
--- a/tools/testing/selftests/powerpc/vphn/.gitignore
+++ b/tools/testing/selftests/powerpc/vphn/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
test-vphn