aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/NMI-RCU.rst3
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt14
-rw-r--r--Documentation/kbuild/makefiles.rst1
-rw-r--r--Documentation/process/magic-number.rst1
-rw-r--r--Documentation/translations/it_IT/process/magic-number.rst1
-rw-r--r--Documentation/translations/zh_CN/process/magic-number.rst1
-rw-r--r--MAINTAINERS11
-rw-r--r--arch/Kconfig32
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/Makefile1
-rw-r--r--arch/alpha/oprofile/Makefile20
-rw-r--r--arch/alpha/oprofile/common.c189
-rw-r--r--arch/alpha/oprofile/op_impl.h55
-rw-r--r--arch/alpha/oprofile/op_model_ev4.c114
-rw-r--r--arch/alpha/oprofile/op_model_ev5.c209
-rw-r--r--arch/alpha/oprofile/op_model_ev6.c101
-rw-r--r--arch/alpha/oprofile/op_model_ev67.c261
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/oprofile/Makefile10
-rw-r--r--arch/arc/oprofile/common.c23
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/configs/bcm2835_defconfig1
-rw-r--r--arch/arm/configs/cns3420vb_defconfig1
-rw-r--r--arch/arm/configs/corgi_defconfig1
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig1
-rw-r--r--arch/arm/configs/keystone_defconfig1
-rw-r--r--arch/arm/configs/multi_v5_defconfig1
-rw-r--r--arch/arm/configs/mv78xx0_defconfig1
-rw-r--r--arch/arm/configs/mvebu_v5_defconfig1
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/orion5x_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/qcom_defconfig1
-rw-r--r--arch/arm/configs/socfpga_defconfig1
-rw-r--r--arch/arm/configs/spitz_defconfig1
-rw-r--r--arch/arm/configs/vexpress_defconfig1
-rw-r--r--arch/arm/oprofile/Makefile14
-rw-r--r--arch/arm/oprofile/common.c132
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/Makefile1
-rw-r--r--arch/ia64/configs/bigsur_defconfig1
-rw-r--r--arch/ia64/include/asm/hw_irq.h1
-rw-r--r--arch/ia64/include/asm/perfmon.h111
-rw-r--r--arch/ia64/include/uapi/asm/perfmon.h178
-rw-r--r--arch/ia64/include/uapi/asm/perfmon_default_smpl.h84
-rw-r--r--arch/ia64/kernel/palinfo.c41
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c297
-rw-r--r--arch/ia64/kernel/perfmon_generic.h46
-rw-r--r--arch/ia64/kernel/perfmon_itanium.h2
-rw-r--r--arch/ia64/kernel/perfmon_mckinley.h188
-rw-r--r--arch/ia64/kernel/perfmon_montecito.h270
-rw-r--r--arch/ia64/oprofile/Makefile10
-rw-r--r--arch/ia64/oprofile/backtrace.c131
-rw-r--r--arch/ia64/oprofile/init.c28
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/Makefile2
-rw-r--r--arch/microblaze/oprofile/Makefile14
-rw-r--r--arch/microblaze/oprofile/microblaze_oprofile.c22
-rw-r--r--arch/mips/Kconfig3
-rw-r--r--arch/mips/Makefile1
-rw-r--r--arch/mips/configs/fuloong2e_defconfig1
-rw-r--r--arch/mips/configs/ip32_defconfig1
-rw-r--r--arch/mips/configs/lemote2f_defconfig1
-rw-r--r--arch/mips/configs/mtx1_defconfig1
-rw-r--r--arch/mips/configs/rs90_defconfig1
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/loongson.h9
-rw-r--r--arch/mips/loongson2ef/fuloong-2e/irq.c2
-rw-r--r--arch/mips/loongson2ef/lemote-2f/irq.c1
-rw-r--r--arch/mips/oprofile/Makefile18
-rw-r--r--arch/mips/oprofile/backtrace.c177
-rw-r--r--arch/mips/oprofile/common.c147
-rw-r--r--arch/mips/oprofile/op_impl.h41
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c161
-rw-r--r--arch/mips/oprofile/op_model_loongson3.c213
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c479
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/oprofile/Makefile10
-rw-r--r--arch/parisc/oprofile/init.c23
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/powerpc/configs/44x/akebono_defconfig1
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig1
-rw-r--r--arch/powerpc/configs/44x/fsp2_defconfig1
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig1
-rw-r--r--arch/powerpc/configs/cell_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/include/asm/cputable.h20
-rw-r--r--arch/powerpc/include/asm/oprofile_impl.h135
-rw-r--r--arch/powerpc/include/asm/spu.h33
-rw-r--r--arch/powerpc/kernel/cputable.c67
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c2
-rw-r--r--arch/powerpc/oprofile/Makefile19
-rw-r--r--arch/powerpc/oprofile/backtrace.c120
-rw-r--r--arch/powerpc/oprofile/cell/pr_util.h110
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c248
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c657
-rw-r--r--arch/powerpc/oprofile/cell/vma_map.c279
-rw-r--r--arch/powerpc/oprofile/common.c243
-rw-r--r--arch/powerpc/oprofile/op_model_7450.c207
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c1709
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_emb.c380
-rw-r--r--arch/powerpc/oprofile/op_model_pa6t.c227
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c438
-rw-r--r--arch/powerpc/platforms/cell/Kconfig5
-rw-r--r--arch/powerpc/platforms/cell/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/spu_notify.c55
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h1
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/Makefile3
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/defconfig1
-rw-r--r--arch/s390/oprofile/Makefile10
-rw-r--r--arch/s390/oprofile/init.c37
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/Makefile1
-rw-r--r--arch/sh/configs/espt_defconfig1
-rw-r--r--arch/sh/configs/migor_defconfig1
-rw-r--r--arch/sh/configs/r7780mp_defconfig1
-rw-r--r--arch/sh/configs/r7785rp_defconfig1
-rw-r--r--arch/sh/configs/rsk7201_defconfig1
-rw-r--r--arch/sh/configs/rsk7203_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig1
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig1
-rw-r--r--arch/sh/configs/sdk7786_defconfig1
-rw-r--r--arch/sh/configs/se7206_defconfig1
-rw-r--r--arch/sh/configs/sh03_defconfig1
-rw-r--r--arch/sh/configs/sh7724_generic_defconfig1
-rw-r--r--arch/sh/configs/sh7763rdp_defconfig1
-rw-r--r--arch/sh/configs/sh7770_generic_defconfig1
-rw-r--r--arch/sh/configs/shx3_defconfig1
-rw-r--r--arch/sh/oprofile/Makefile16
-rw-r--r--arch/sh/oprofile/backtrace.c80
-rw-r--r--arch/sh/oprofile/common.c64
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/Makefile1
-rw-r--r--arch/sparc/configs/sparc64_defconfig1
-rw-r--r--arch/sparc/oprofile/Makefile10
-rw-r--r--arch/sparc/oprofile/init.c87
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile3
-rw-r--r--arch/x86/include/asm/nmi.h1
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c11
-rw-r--r--arch/x86/oprofile/Makefile12
-rw-r--r--arch/x86/oprofile/backtrace.c127
-rw-r--r--arch/x86/oprofile/init.c38
-rw-r--r--arch/x86/oprofile/nmi_int.c780
-rw-r--r--arch/x86/oprofile/op_counter.h30
-rw-r--r--arch/x86/oprofile/op_model_amd.c542
-rw-r--r--arch/x86/oprofile/op_model_p4.c723
-rw-r--r--arch/x86/oprofile/op_model_ppro.c245
-rw-r--r--arch/x86/oprofile/op_x86_model.h90
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/Makefile1
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig1
-rw-r--r--arch/xtensa/configs/xip_kc705_defconfig1
-rw-r--r--arch/xtensa/kernel/stacktrace.c2
-rw-r--r--arch/xtensa/oprofile/Makefile10
-rw-r--r--arch/xtensa/oprofile/backtrace.c27
-rw-r--r--arch/xtensa/oprofile/init.c26
-rw-r--r--drivers/oprofile/buffer_sync.c591
-rw-r--r--drivers/oprofile/buffer_sync.h22
-rw-r--r--drivers/oprofile/cpu_buffer.c465
-rw-r--r--drivers/oprofile/cpu_buffer.h121
-rw-r--r--drivers/oprofile/event_buffer.c209
-rw-r--r--drivers/oprofile/event_buffer.h40
-rw-r--r--drivers/oprofile/nmi_timer_int.c157
-rw-r--r--drivers/oprofile/oprof.c286
-rw-r--r--drivers/oprofile/oprof.h50
-rw-r--r--drivers/oprofile/oprofile_files.c201
-rw-r--r--drivers/oprofile/oprofile_perf.c328
-rw-r--r--drivers/oprofile/oprofile_stats.c84
-rw-r--r--drivers/oprofile/oprofile_stats.h33
-rw-r--r--drivers/oprofile/oprofilefs.c300
-rw-r--r--drivers/oprofile/timer_int.c122
-rw-r--r--fs/Makefile1
-rw-r--r--fs/dcookies.c356
-rw-r--r--include/linux/dcookies.h69
-rw-r--r--include/linux/oprofile.h209
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/sys.c1
-rw-r--r--usr/include/Makefile2
199 files changed, 8 insertions, 15569 deletions
diff --git a/Documentation/RCU/NMI-RCU.rst b/Documentation/RCU/NMI-RCU.rst
index 180958388ff9..2a92bc685ef1 100644
--- a/Documentation/RCU/NMI-RCU.rst
+++ b/Documentation/RCU/NMI-RCU.rst
@@ -8,8 +8,7 @@ Although RCU is usually used to protect read-mostly data structures,
it is possible to use RCU to provide dynamic non-maskable interrupt
handlers, as well as dynamic irq handlers. This document describes
how to do this, drawing loosely from Zwane Mwaikambo's NMI-timer
-work in "arch/x86/oprofile/nmi_timer_int.c" and in
-"arch/x86/kernel/traps.c".
+work in "arch/x86/kernel/traps.c".
The relevant pieces of code are listed below, each followed by a
brief explanation::
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a10b545c2070..85f31ca83864 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3458,20 +3458,6 @@
For example, to override I2C bus2:
omap_mux=i2c2_scl.i2c2_scl=0x100,i2c2_sda.i2c2_sda=0x100
- oprofile.timer= [HW]
- Use timer interrupt instead of performance counters
-
- oprofile.cpu_type= Force an oprofile cpu type
- This might be useful if you have an older oprofile
- userland or if you want common events.
- Format: { arch_perfmon }
- arch_perfmon: [X86] Force use of architectural
- perfmon on Intel CPUs instead of the
- CPU specific event set.
- timer: [X86] Force use of architectural NMI
- timer mode (see also oprofile.timer
- for generic hr timer mode)
-
oops=panic Always panic on oopses. Default is to just kill the
process, but there is a small probability of
deadlocking the machine.
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index 300d8edcb994..910c6303c7ea 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -1317,7 +1317,6 @@ When kbuild executes, the following steps are followed (roughly):
libs-y += arch/sparc/lib/
drivers-$(CONFIG_PM) += arch/sparc/power/
- drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
7.5 Architecture-specific boot images
-------------------------------------
diff --git a/Documentation/process/magic-number.rst b/Documentation/process/magic-number.rst
index e02ff5ffb653..c6dfe060ec2f 100644
--- a/Documentation/process/magic-number.rst
+++ b/Documentation/process/magic-number.rst
@@ -135,7 +135,6 @@ FW_HEADER_MAGIC 0x65726F66 fw_header ``drivers/atm/fo
SLOT_MAGIC 0x67267321 slot ``drivers/hotplug/cpqphp.h``
SLOT_MAGIC 0x67267322 slot ``drivers/hotplug/acpiphp.h``
LO_MAGIC 0x68797548 nbd_device ``include/linux/nbd.h``
-OPROFILE_MAGIC 0x6f70726f super_block ``drivers/oprofile/oprofilefs.h``
M3_STATE_MAGIC 0x734d724d m3_state ``sound/oss/maestro3.c``
VMALLOC_MAGIC 0x87654320 snd_alloc_track ``sound/core/memory.c``
KMALLOC_MAGIC 0x87654321 snd_alloc_track ``sound/core/memory.c``
diff --git a/Documentation/translations/it_IT/process/magic-number.rst b/Documentation/translations/it_IT/process/magic-number.rst
index 0243d32a0b59..1af30f4228f2 100644
--- a/Documentation/translations/it_IT/process/magic-number.rst
+++ b/Documentation/translations/it_IT/process/magic-number.rst
@@ -141,7 +141,6 @@ FW_HEADER_MAGIC 0x65726F66 fw_header ``drivers/atm/fo
SLOT_MAGIC 0x67267321 slot ``drivers/hotplug/cpqphp.h``
SLOT_MAGIC 0x67267322 slot ``drivers/hotplug/acpiphp.h``
LO_MAGIC 0x68797548 nbd_device ``include/linux/nbd.h``
-OPROFILE_MAGIC 0x6f70726f super_block ``drivers/oprofile/oprofilefs.h``
M3_STATE_MAGIC 0x734d724d m3_state ``sound/oss/maestro3.c``
VMALLOC_MAGIC 0x87654320 snd_alloc_track ``sound/core/memory.c``
KMALLOC_MAGIC 0x87654321 snd_alloc_track ``sound/core/memory.c``
diff --git a/Documentation/translations/zh_CN/process/magic-number.rst b/Documentation/translations/zh_CN/process/magic-number.rst
index de182bf4191c..7bb9d4165ed3 100644
--- a/Documentation/translations/zh_CN/process/magic-number.rst
+++ b/Documentation/translations/zh_CN/process/magic-number.rst
@@ -124,7 +124,6 @@ FW_HEADER_MAGIC 0x65726F66 fw_header ``drivers/atm/fo
SLOT_MAGIC 0x67267321 slot ``drivers/hotplug/cpqphp.h``
SLOT_MAGIC 0x67267322 slot ``drivers/hotplug/acpiphp.h``
LO_MAGIC 0x68797548 nbd_device ``include/linux/nbd.h``
-OPROFILE_MAGIC 0x6f70726f super_block ``drivers/oprofile/oprofilefs.h``
M3_STATE_MAGIC 0x734d724d m3_state ``sound/oss/maestro3.c``
VMALLOC_MAGIC 0x87654320 snd_alloc_track ``sound/core/memory.c``
KMALLOC_MAGIC 0x87654321 snd_alloc_track ``sound/core/memory.c``
diff --git a/MAINTAINERS b/MAINTAINERS
index c113989c5cec..01c2666610f4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1413,7 +1413,6 @@ F: arch/arm*/include/asm/hw_breakpoint.h
F: arch/arm*/include/asm/perf_event.h
F: arch/arm*/kernel/hw_breakpoint.c
F: arch/arm*/kernel/perf_*
-F: arch/arm/oprofile/common.c
F: drivers/perf/
F: include/linux/perf/arm_pmu.h
@@ -4060,7 +4059,6 @@ W: http://www.ibm.com/developerworks/power/cell/
F: arch/powerpc/include/asm/cell*.h
F: arch/powerpc/include/asm/spu*.h
F: arch/powerpc/include/uapi/asm/spu*.h
-F: arch/powerpc/oprofile/*cell*
F: arch/powerpc/platforms/cell/
CELLWISE CW2015 BATTERY DRIVER
@@ -13291,15 +13289,6 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
F: sound/drivers/opl4/
-OPROFILE
-M: Robert Richter <rric@kernel.org>
-L: oprofile-list@lists.sf.net
-S: Maintained
-F: arch/*/include/asm/oprofile*.h
-F: arch/*/oprofile/
-F: drivers/oprofile/
-F: include/linux/oprofile.h
-
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M: Mark Fasheh <mark@fasheh.com>
M: Joel Becker <jlbec@evilplan.org>
diff --git a/arch/Kconfig b/arch/Kconfig
index b8bbfd3c66b8..87608c2fa027 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -33,38 +33,6 @@ config HOTPLUG_SMT
config GENERIC_ENTRY
bool
-config OPROFILE
- tristate "OProfile system profiling"
- depends on PROFILING
- depends on HAVE_OPROFILE
- select RING_BUFFER
- select RING_BUFFER_ALLOW_SWAP
- help
- OProfile is a profiling system capable of profiling the
- whole system, include the kernel, kernel modules, libraries,
- and applications.
-
- If unsure, say N.
-
-config OPROFILE_EVENT_MULTIPLEX
- bool "OProfile multiplexing support (EXPERIMENTAL)"
- default n
- depends on OPROFILE && X86
- help
- The number of hardware counters is limited. The multiplexing
- feature enables OProfile to gather more events than counters
- are provided by the hardware. This is realized by switching
- between events at a user specified time interval.
-
- If unsure, say N.
-
-config HAVE_OPROFILE
- bool
-
-config OPROFILE_NMI_TIMER
- def_bool y
- depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !PPC64
-
config KPROBES
bool "Kprobes"
depends on MODULES
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 1f51437d5765..a401c1481a11 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -14,7 +14,6 @@ config ALPHA
select HAVE_AOUT
select HAVE_ASM_MODVERSIONS
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select NEED_DMA_MAP_STATE
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
index 12dee59b011c..c2946431d88d 100644
--- a/arch/alpha/Makefile
+++ b/arch/alpha/Makefile
@@ -40,7 +40,6 @@ head-y := arch/alpha/kernel/head.o
core-y += arch/alpha/kernel/ arch/alpha/mm/
core-$(CONFIG_MATHEMU) += arch/alpha/math-emu/
-drivers-$(CONFIG_OPROFILE) += arch/alpha/oprofile/
libs-y += arch/alpha/lib/
# export what is needed by arch/alpha/boot/Makefile
diff --git a/arch/alpha/oprofile/Makefile b/arch/alpha/oprofile/Makefile
deleted file mode 100644
index 79f32820a42f..000000000000
--- a/arch/alpha/oprofile/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Werror -Wno-sign-compare
-
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o
-oprofile-$(CONFIG_ALPHA_GENERIC) += op_model_ev4.o \
- op_model_ev5.o \
- op_model_ev6.o \
- op_model_ev67.o
-oprofile-$(CONFIG_ALPHA_EV4) += op_model_ev4.o
-oprofile-$(CONFIG_ALPHA_EV5) += op_model_ev5.o
-oprofile-$(CONFIG_ALPHA_EV6) += op_model_ev6.o \
- op_model_ev67.o
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
deleted file mode 100644
index 1b1259c7d7d1..000000000000
--- a/arch/alpha/oprofile/common.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * @file arch/alpha/oprofile/common.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-#include <asm/special_insns.h>
-
-#include "op_impl.h"
-
-extern struct op_axp_model op_model_ev4 __attribute__((weak));
-extern struct op_axp_model op_model_ev5 __attribute__((weak));
-extern struct op_axp_model op_model_pca56 __attribute__((weak));
-extern struct op_axp_model op_model_ev6 __attribute__((weak));
-extern struct op_axp_model op_model_ev67 __attribute__((weak));
-
-static struct op_axp_model *model;
-
-extern void (*perf_irq)(unsigned long, struct pt_regs *);
-static void (*save_perf_irq)(unsigned long, struct pt_regs *);
-
-static struct op_counter_config ctr[20];
-static struct op_system_config sys;
-static struct op_register_config reg;
-
-/* Called from do_entInt to handle the performance monitor interrupt. */
-
-static void
-op_handle_interrupt(unsigned long which, struct pt_regs *regs)
-{
- model->handle_interrupt(which, regs, ctr);
-
- /* If the user has selected an interrupt frequency that is
- not exactly the width of the counter, write a new value
- into the counter such that it'll overflow after N more
- events. */
- if ((reg.need_reset >> which) & 1)
- model->reset_ctr(&reg, which);
-}
-
-static int
-op_axp_setup(void)
-{
- unsigned long i, e;
-
- /* Install our interrupt handler into the existing hook. */
- save_perf_irq = perf_irq;
- perf_irq = op_handle_interrupt;
-
- /* Compute the mask of enabled counters. */
- for (i = e = 0; i < model->num_counters; ++i)
- if (ctr[i].enabled)
- e |= 1 << i;
- reg.enable = e;
-
- /* Pre-compute the values to stuff in the hardware registers. */
- model->reg_setup(&reg, ctr, &sys);
-
- /* Configure the registers on all cpus. */
- smp_call_function(model->cpu_setup, &reg, 1);
- model->cpu_setup(&reg);
- return 0;
-}
-
-static void
-op_axp_shutdown(void)
-{
- /* Remove our interrupt handler. We may be removing this module. */
- perf_irq = save_perf_irq;
-}
-
-static void
-op_axp_cpu_start(void *dummy)
-{
- wrperfmon(1, reg.enable);
-}
-
-static int
-op_axp_start(void)
-{
- smp_call_function(op_axp_cpu_start, NULL, 1);
- op_axp_cpu_start(NULL);
- return 0;
-}
-
-static inline void
-op_axp_cpu_stop(void *dummy)
-{
- /* Disable performance monitoring for all counters. */
- wrperfmon(0, -1);
-}
-
-static void
-op_axp_stop(void)
-{
- smp_call_function(op_axp_cpu_stop, NULL, 1);
- op_axp_cpu_stop(NULL);
-}
-
-static int
-op_axp_create_files(struct dentry *root)
-{
- int i;
-
- for (i = 0; i < model->num_counters; ++i) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(root, buf);
-
- oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(dir, "count", &ctr[i].count);
- /* Dummies. */
- oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(dir, "user", &ctr[i].user);
- oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
- }
-
- if (model->can_set_proc_mode) {
- oprofilefs_create_ulong(root, "enable_pal",
- &sys.enable_pal);
- oprofilefs_create_ulong(root, "enable_kernel",
- &sys.enable_kernel);
- oprofilefs_create_ulong(root, "enable_user",
- &sys.enable_user);
- }
-
- return 0;
-}
-
-int __init
-oprofile_arch_init(struct oprofile_operations *ops)
-{
- struct op_axp_model *lmodel = NULL;
-
- switch (implver()) {
- case IMPLVER_EV4:
- lmodel = &op_model_ev4;
- break;
- case IMPLVER_EV5:
- /* 21164PC has a slightly different set of events.
- Recognize the chip by the presence of the MAX insns. */
- if (!amask(AMASK_MAX))
- lmodel = &op_model_pca56;
- else
- lmodel = &op_model_ev5;
- break;
- case IMPLVER_EV6:
- /* 21264A supports ProfileMe.
- Recognize the chip by the presence of the CIX insns. */
- if (!amask(AMASK_CIX))
- lmodel = &op_model_ev67;
- else
- lmodel = &op_model_ev6;
- break;
- }
-
- if (!lmodel)
- return -ENODEV;
- model = lmodel;
-
- ops->create_files = op_axp_create_files;
- ops->setup = op_axp_setup;
- ops->shutdown = op_axp_shutdown;
- ops->start = op_axp_start;
- ops->stop = op_axp_stop;
- ops->cpu_type = lmodel->cpu_type;
-
- printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
- lmodel->cpu_type);
-
- return 0;
-}
-
-
-void
-oprofile_arch_exit(void)
-{
-}
diff --git a/arch/alpha/oprofile/op_impl.h b/arch/alpha/oprofile/op_impl.h
deleted file mode 100644
index b2b87ae9a353..000000000000
--- a/arch/alpha/oprofile/op_impl.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_impl.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#ifndef OP_IMPL_H
-#define OP_IMPL_H 1
-
-/* Per-counter configuration as set via oprofilefs. */
-struct op_counter_config {
- unsigned long enabled;
- unsigned long event;
- unsigned long count;
- /* Dummies because I am too lazy to hack the userspace tools. */
- unsigned long kernel;
- unsigned long user;
- unsigned long unit_mask;
-};
-
-/* System-wide configuration as set via oprofilefs. */
-struct op_system_config {
- unsigned long enable_pal;
- unsigned long enable_kernel;
- unsigned long enable_user;
-};
-
-/* Cached values for the various performance monitoring registers. */
-struct op_register_config {
- unsigned long enable;
- unsigned long mux_select;
- unsigned long proc_mode;
- unsigned long freq;
- unsigned long reset_values;
- unsigned long need_reset;
-};
-
-/* Per-architecture configuration and hooks. */
-struct op_axp_model {
- void (*reg_setup) (struct op_register_config *,
- struct op_counter_config *,
- struct op_system_config *);
- void (*cpu_setup) (void *);
- void (*reset_ctr) (struct op_register_config *, unsigned long);
- void (*handle_interrupt) (unsigned long, struct pt_regs *,
- struct op_counter_config *);
- char *cpu_type;
- unsigned char num_counters;
- unsigned char can_set_proc_mode;
-};
-
-#endif
diff --git a/arch/alpha/oprofile/op_model_ev4.c b/arch/alpha/oprofile/op_model_ev4.c
deleted file mode 100644
index 086a0d5445c5..000000000000
--- a/arch/alpha/oprofile/op_model_ev4.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_model_ev4.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-
-#include "op_impl.h"
-
-
-/* Compute all of the registers in preparation for enabling profiling. */
-
-static void
-ev4_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys)
-{
- unsigned long ctl = 0, count, hilo;
-
- /* Select desired events. We've mapped the event numbers
- such that they fit directly into the event selection fields.
-
- Note that there is no "off" setting. In both cases we select
- the EXTERNAL event source, hoping that it'll be the lowest
- frequency, and set the frequency counter to LOW. The interrupts
- for these "disabled" counter overflows are ignored by the
- interrupt handler.
-
- This is most irritating, because the hardware *can* enable and
- disable the interrupts for these counters independently, but the
- wrperfmon interface doesn't allow it. */
-
- ctl |= (ctr[0].enabled ? ctr[0].event << 8 : 14 << 8);
- ctl |= (ctr[1].enabled ? (ctr[1].event - 16) << 32 : 7ul << 32);
-
- /* EV4 can not read or write its counter registers. The only
- thing one can do at all is see if you overflow and get an
- interrupt. We can set the width of the counters, to some
- extent. Take the interrupt count selected by the user,
- map it onto one of the possible values, and write it back. */
-
- count = ctr[0].count;
- if (count <= 4096)
- count = 4096, hilo = 1;
- else
- count = 65536, hilo = 0;
- ctr[0].count = count;
- ctl |= (ctr[0].enabled && hilo) << 3;
-
- count = ctr[1].count;
- if (count <= 256)
- count = 256, hilo = 1;
- else
- count = 4096, hilo = 0;
- ctr[1].count = count;
- ctl |= (ctr[1].enabled && hilo);
-
- reg->mux_select = ctl;
-
- /* Select performance monitoring options. */
- /* ??? Need to come up with some mechanism to trace only
- selected processes. EV4 does not have a mechanism to
- select kernel or user mode only. For now, enable always. */
- reg->proc_mode = 0;
-
- /* Frequency is folded into mux_select for EV4. */
- reg->freq = 0;
-
- /* See above regarding no writes. */
- reg->reset_values = 0;
- reg->need_reset = 0;
-
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void
-ev4_cpu_setup(void *x)
-{
- struct op_register_config *reg = x;
-
- wrperfmon(2, reg->mux_select);
- wrperfmon(3, reg->proc_mode);
-}
-
-static void
-ev4_handle_interrupt(unsigned long which, struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- /* EV4 can't properly disable counters individually.
- Discard "disabled" events now. */
- if (!ctr[which].enabled)
- return;
-
- /* Record the sample. */
- oprofile_add_sample(regs, which);
-}
-
-
-struct op_axp_model op_model_ev4 = {
- .reg_setup = ev4_reg_setup,
- .cpu_setup = ev4_cpu_setup,
- .reset_ctr = NULL,
- .handle_interrupt = ev4_handle_interrupt,
- .cpu_type = "alpha/ev4",
- .num_counters = 2,
- .can_set_proc_mode = 0,
-};
diff --git a/arch/alpha/oprofile/op_model_ev5.c b/arch/alpha/oprofile/op_model_ev5.c
deleted file mode 100644
index c300f5ef3482..000000000000
--- a/arch/alpha/oprofile/op_model_ev5.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_model_ev5.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-
-#include "op_impl.h"
-
-
-/* Compute all of the registers in preparation for enabling profiling.
-
- The 21164 (EV5) and 21164PC (PCA65) vary in the bit placement and
- meaning of the "CBOX" events. Given that we don't care about meaning
- at this point, arrange for the difference in bit placement to be
- handled by common code. */
-
-static void
-common_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys,
- int cbox1_ofs, int cbox2_ofs)
-{
- int i, ctl, reset, need_reset;
-
- /* Select desired events. The event numbers are selected such
- that they map directly into the event selection fields:
-
- PCSEL0: 0, 1
- PCSEL1: 24-39
- CBOX1: 40-47
- PCSEL2: 48-63
- CBOX2: 64-71
-
- There are two special cases, in that CYCLES can be measured
- on PCSEL[02], and SCACHE_WRITE can be measured on CBOX[12].
- These event numbers are canonicalizes to their first appearance. */
-
- ctl = 0;
- for (i = 0; i < 3; ++i) {
- unsigned long event = ctr[i].event;
- if (!ctr[i].enabled)
- continue;
-
- /* Remap the duplicate events, as described above. */
- if (i == 2) {
- if (event == 0)
- event = 12+48;
- else if (event == 2+41)
- event = 4+65;
- }
-
- /* Convert the event numbers onto mux_select bit mask. */
- if (event < 2)
- ctl |= event << 31;
- else if (event < 24)
- /* error */;
- else if (event < 40)
- ctl |= (event - 24) << 4;
- else if (event < 48)
- ctl |= (event - 40) << cbox1_ofs | 15 << 4;
- else if (event < 64)
- ctl |= event - 48;
- else if (event < 72)
- ctl |= (event - 64) << cbox2_ofs | 15;
- }
- reg->mux_select = ctl;
-
- /* Select processor mode. */
- /* ??? Need to come up with some mechanism to trace only selected
- processes. For now select from pal, kernel and user mode. */
- ctl = 0;
- ctl |= !sys->enable_pal << 9;
- ctl |= !sys->enable_kernel << 8;
- ctl |= !sys->enable_user << 30;
- reg->proc_mode = ctl;
-
- /* Select interrupt frequencies. Take the interrupt count selected
- by the user, and map it onto one of the possible counter widths.
- If the user value is in between, compute a value to which the
- counter is reset at each interrupt. */
-
- ctl = reset = need_reset = 0;
- for (i = 0; i < 3; ++i) {
- unsigned long max, hilo, count = ctr[i].count;
- if (!ctr[i].enabled)
- continue;
-
- if (count <= 256)
- count = 256, hilo = 3, max = 256;
- else {
- max = (i == 2 ? 16384 : 65536);
- hilo = 2;
- if (count > max)
- count = max;
- }
- ctr[i].count = count;
-
- ctl |= hilo << (8 - i*2);
- reset |= (max - count) << (48 - 16*i);
- if (count != max)
- need_reset |= 1 << i;
- }
- reg->freq = ctl;
- reg->reset_values = reset;
- reg->need_reset = need_reset;
-}
-
-static void
-ev5_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys)
-{
- common_reg_setup(reg, ctr, sys, 19, 22);
-}
-
-static void
-pca56_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys)
-{
- common_reg_setup(reg, ctr, sys, 8, 11);
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void
-ev5_cpu_setup (void *x)
-{
- struct op_register_config *reg = x;
-
- wrperfmon(2, reg->mux_select);
- wrperfmon(3, reg->proc_mode);
- wrperfmon(4, reg->freq);
- wrperfmon(6, reg->reset_values);
-}
-
-/* CTR is a counter for which the user has requested an interrupt count
- in between one of the widths selectable in hardware. Reset the count
- for CTR to the value stored in REG->RESET_VALUES.
-
- For EV5, this means disabling profiling, reading the current values,
- masking in the value for the desired register, writing, then turning
- profiling back on.
-
- This can be streamlined if profiling is only enabled for user mode.
- In that case we know that the counters are not currently incrementing
- (due to being in kernel mode). */
-
-static void
-ev5_reset_ctr(struct op_register_config *reg, unsigned long ctr)
-{
- unsigned long values, mask, not_pk, reset_values;
-
- mask = (ctr == 0 ? 0xfffful << 48
- : ctr == 1 ? 0xfffful << 32
- : 0x3fff << 16);
-
- not_pk = 1 << 9 | 1 << 8;
-
- reset_values = reg->reset_values;
-
- if ((reg->proc_mode & not_pk) == not_pk) {
- values = wrperfmon(5, 0);
- values = (reset_values & mask) | (values & ~mask & -2);
- wrperfmon(6, values);
- } else {
- wrperfmon(0, -1);
- values = wrperfmon(5, 0);
- values = (reset_values & mask) | (values & ~mask & -2);
- wrperfmon(6, values);
- wrperfmon(1, reg->enable);
- }
-}
-
-static void
-ev5_handle_interrupt(unsigned long which, struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- /* Record the sample. */
- oprofile_add_sample(regs, which);
-}
-
-
-struct op_axp_model op_model_ev5 = {
- .reg_setup = ev5_reg_setup,
- .cpu_setup = ev5_cpu_setup,
- .reset_ctr = ev5_reset_ctr,
- .handle_interrupt = ev5_handle_interrupt,
- .cpu_type = "alpha/ev5",
- .num_counters = 3,
- .can_set_proc_mode = 1,
-};
-
-struct op_axp_model op_model_pca56 = {
- .reg_setup = pca56_reg_setup,
- .cpu_setup = ev5_cpu_setup,
- .reset_ctr = ev5_reset_ctr,
- .handle_interrupt = ev5_handle_interrupt,
- .cpu_type = "alpha/pca56",
- .num_counters = 3,
- .can_set_proc_mode = 1,
-};
diff --git a/arch/alpha/oprofile/op_model_ev6.c b/arch/alpha/oprofile/op_model_ev6.c
deleted file mode 100644
index 02edf5971614..000000000000
--- a/arch/alpha/oprofile/op_model_ev6.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_model_ev6.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-
-#include "op_impl.h"
-
-
-/* Compute all of the registers in preparation for enabling profiling. */
-
-static void
-ev6_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys)
-{
- unsigned long ctl, reset, need_reset, i;
-
- /* Select desired events. We've mapped the event numbers
- such that they fit directly into the event selection fields. */
- ctl = 0;
- if (ctr[0].enabled && ctr[0].event)
- ctl |= (ctr[0].event & 1) << 4;
- if (ctr[1].enabled)
- ctl |= (ctr[1].event - 2) & 15;
- reg->mux_select = ctl;
-
- /* Select logging options. */
- /* ??? Need to come up with some mechanism to trace only
- selected processes. EV6 does not have a mechanism to
- select kernel or user mode only. For now, enable always. */
- reg->proc_mode = 0;
-
- /* EV6 cannot change the width of the counters as with the
- other implementations. But fortunately, we can write to
- the counters and set the value such that it will overflow
- at the right time. */
- reset = need_reset = 0;
- for (i = 0; i < 2; ++i) {
- unsigned long count = ctr[i].count;
- if (!ctr[i].enabled)
- continue;
-
- if (count > 0x100000)
- count = 0x100000;
- ctr[i].count = count;
- reset |= (0x100000 - count) << (i ? 6 : 28);
- if (count != 0x100000)
- need_reset |= 1 << i;
- }
- reg->reset_values = reset;
- reg->need_reset = need_reset;
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void
-ev6_cpu_setup (void *x)
-{
- struct op_register_config *reg = x;
-
- wrperfmon(2, reg->mux_select);
- wrperfmon(3, reg->proc_mode);
- wrperfmon(6, reg->reset_values | 3);
-}
-
-/* CTR is a counter for which the user has requested an interrupt count
- in between one of the widths selectable in hardware. Reset the count
- for CTR to the value stored in REG->RESET_VALUES. */
-
-static void
-ev6_reset_ctr(struct op_register_config *reg, unsigned long ctr)
-{
- wrperfmon(6, reg->reset_values | (1 << ctr));
-}
-
-static void
-ev6_handle_interrupt(unsigned long which, struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- /* Record the sample. */
- oprofile_add_sample(regs, which);
-}
-
-
-struct op_axp_model op_model_ev6 = {
- .reg_setup = ev6_reg_setup,
- .cpu_setup = ev6_cpu_setup,
- .reset_ctr = ev6_reset_ctr,
- .handle_interrupt = ev6_handle_interrupt,
- .cpu_type = "alpha/ev6",
- .num_counters = 2,
- .can_set_proc_mode = 0,
-};
diff --git a/arch/alpha/oprofile/op_model_ev67.c b/arch/alpha/oprofile/op_model_ev67.c
deleted file mode 100644
index adb1744d20f3..000000000000
--- a/arch/alpha/oprofile/op_model_ev67.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_model_ev67.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- * @author Falk Hueffner <falk@debian.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-
-#include "op_impl.h"
-
-
-/* Compute all of the registers in preparation for enabling profiling. */
-
-static void
-ev67_reg_setup(struct op_register_config *reg,
- struct op_counter_config *ctr,
- struct op_system_config *sys)
-{
- unsigned long ctl, reset, need_reset, i;
-
- /* Select desired events. */
- ctl = 1UL << 4; /* Enable ProfileMe mode. */
-
- /* The event numbers are chosen so we can use them directly if
- PCTR1 is enabled. */
- if (ctr[1].enabled) {
- ctl |= (ctr[1].event & 3) << 2;
- } else {
- if (ctr[0].event == 0) /* cycles */
- ctl |= 1UL << 2;
- }
- reg->mux_select = ctl;
-
- /* Select logging options. */
- /* ??? Need to come up with some mechanism to trace only
- selected processes. EV67 does not have a mechanism to
- select kernel or user mode only. For now, enable always. */
- reg->proc_mode = 0;
-
- /* EV67 cannot change the width of the counters as with the
- other implementations. But fortunately, we can write to
- the counters and set the value such that it will overflow
- at the right time. */
- reset = need_reset = 0;
- for (i = 0; i < 2; ++i) {
- unsigned long count = ctr[i].count;
- if (!ctr[i].enabled)
- continue;
-
- if (count > 0x100000)
- count = 0x100000;
- ctr[i].count = count;
- reset |= (0x100000 - count) << (i ? 6 : 28);
- if (count != 0x100000)
- need_reset |= 1 << i;
- }
- reg->reset_values = reset;
- reg->need_reset = need_reset;
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void
-ev67_cpu_setup (void *x)
-{
- struct op_register_config *reg = x;
-
- wrperfmon(2, reg->mux_select);
- wrperfmon(3, reg->proc_mode);
- wrperfmon(6, reg->reset_values | 3);
-}
-
-/* CTR is a counter for which the user has requested an interrupt count
- in between one of the widths selectable in hardware. Reset the count
- for CTR to the value stored in REG->RESET_VALUES. */
-
-static void
-ev67_reset_ctr(struct op_register_config *reg, unsigned long ctr)
-{
- wrperfmon(6, reg->reset_values | (1 << ctr));
-}
-
-/* ProfileMe conditions which will show up as counters. We can also
- detect the following, but it seems unlikely that anybody is
- interested in counting them:
- * Reset
- * MT_FPCR (write to floating point control register)
- * Arithmetic trap
- * Dstream Fault
- * Machine Check (ECC fault, etc.)
- * OPCDEC (illegal opcode)
- * Floating point disabled
- * Differentiate between DTB single/double misses and 3 or 4 level
- page tables
- * Istream access violation
- * Interrupt
- * Icache Parity Error.
- * Instruction killed (nop, trapb)
-
- Unfortunately, there seems to be no way to detect Dcache and Bcache
- misses; the latter could be approximated by making the counter
- count Bcache misses, but that is not precise.
-
- We model this as 20 counters:
- * PCTR0
- * PCTR1
- * 9 ProfileMe events, induced by PCTR0
- * 9 ProfileMe events, induced by PCTR1
-*/
-
-enum profileme_counters {
- PM_STALLED, /* Stalled for at least one cycle
- between the fetch and map stages */
- PM_TAKEN, /* Conditional branch taken */
- PM_MISPREDICT, /* Branch caused mispredict trap */
- PM_ITB_MISS, /* ITB miss */
- PM_DTB_MISS, /* DTB miss */
- PM_REPLAY, /* Replay trap */
- PM_LOAD_STORE, /* Load-store order trap */
- PM_ICACHE_MISS, /* Icache miss */
- PM_UNALIGNED, /* Unaligned Load/Store */
- PM_NUM_COUNTERS
-};
-
-static inline void
-op_add_pm(unsigned long pc, int kern, unsigned long counter,
- struct op_counter_config *ctr, unsigned long event)
-{
- unsigned long fake_counter = 2 + event;
- if (counter == 1)
- fake_counter += PM_NUM_COUNTERS;
- if (ctr[fake_counter].enabled)
- oprofile_add_pc(pc, kern, fake_counter);
-}
-
-static void
-ev67_handle_interrupt(unsigned long which, struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pmpc, pctr_ctl;
- int kern = !user_mode(regs);
- int mispredict = 0;
- union {
- unsigned long v;
- struct {
- unsigned reserved: 30; /* 0-29 */
- unsigned overcount: 3; /* 30-32 */
- unsigned icache_miss: 1; /* 33 */
- unsigned trap_type: 4; /* 34-37 */
- unsigned load_store: 1; /* 38 */
- unsigned trap: 1; /* 39 */
- unsigned mispredict: 1; /* 40 */
- } fields;
- } i_stat;
-
- enum trap_types {
- TRAP_REPLAY,
- TRAP_INVALID0,
- TRAP_DTB_DOUBLE_MISS_3,
- TRAP_DTB_DOUBLE_MISS_4,
- TRAP_FP_DISABLED,
- TRAP_UNALIGNED,
- TRAP_DTB_SINGLE_MISS,
- TRAP_DSTREAM_FAULT,
- TRAP_OPCDEC,
- TRAP_INVALID1,
- TRAP_MACHINE_CHECK,
- TRAP_INVALID2,
- TRAP_ARITHMETIC,
- TRAP_INVALID3,
- TRAP_MT_FPCR,
- TRAP_RESET
- };
-
- pmpc = wrperfmon(9, 0);
- /* ??? Don't know how to handle physical-mode PALcode address. */
- if (pmpc & 1)
- return;
- pmpc &= ~2; /* clear reserved bit */
-
- i_stat.v = wrperfmon(8, 0);
- if (i_stat.fields.trap) {
- switch (i_stat.fields.trap_type) {
- case TRAP_INVALID1:
- case TRAP_INVALID2:
- case TRAP_INVALID3:
- /* Pipeline redirection occurred. PMPC points
- to PALcode. Recognize ITB miss by PALcode
- offset address, and get actual PC from
- EXC_ADDR. */
- oprofile_add_pc(regs->pc, kern, which);
- if ((pmpc & ((1 << 15) - 1)) == 581)
- op_add_pm(regs->pc, kern, which,
- ctr, PM_ITB_MISS);
- /* Most other bit and counter values will be
- those for the first instruction in the
- fault handler, so we're done. */
- return;
- case TRAP_REPLAY:
- op_add_pm(pmpc, kern, which, ctr,
- (i_stat.fields.load_store
- ? PM_LOAD_STORE : PM_REPLAY));
- break;
- case TRAP_DTB_DOUBLE_MISS_3:
- case TRAP_DTB_DOUBLE_MISS_4:
- case TRAP_DTB_SINGLE_MISS:
- op_add_pm(pmpc, kern, which, ctr, PM_DTB_MISS);
- break;
- case TRAP_UNALIGNED:
- op_add_pm(pmpc, kern, which, ctr, PM_UNALIGNED);
- break;
- case TRAP_INVALID0:
- case TRAP_FP_DISABLED:
- case TRAP_DSTREAM_FAULT:
- case TRAP_OPCDEC:
- case TRAP_MACHINE_CHECK:
- case TRAP_ARITHMETIC:
- case TRAP_MT_FPCR:
- case TRAP_RESET:
- break;
- }
-
- /* ??? JSR/JMP/RET/COR or HW_JSR/HW_JMP/HW_RET/HW_COR
- mispredicts do not set this bit but can be
- recognized by the presence of one of these
- instructions at the PMPC location with bit 39
- set. */
- if (i_stat.fields.mispredict) {
- mispredict = 1;
- op_add_pm(pmpc, kern, which, ctr, PM_MISPREDICT);
- }
- }
-
- oprofile_add_pc(pmpc, kern, which);
-
- pctr_ctl = wrperfmon(5, 0);
- if (pctr_ctl & (1UL << 27))
- op_add_pm(pmpc, kern, which, ctr, PM_STALLED);
-
- /* Unfortunately, TAK is undefined on mispredicted branches.
- ??? It is also undefined for non-cbranch insns, should
- check that. */
- if (!mispredict && pctr_ctl & (1UL << 0))
- op_add_pm(pmpc, kern, which, ctr, PM_TAKEN);
-}
-
-struct op_axp_model op_model_ev67 = {
- .reg_setup = ev67_reg_setup,
- .cpu_setup = ev67_cpu_setup,
- .reset_ctr = ev67_reset_ctr,
- .handle_interrupt = ev67_handle_interrupt,
- .cpu_type = "alpha/ev67",
- .num_counters = 20,
- .can_set_proc_mode = 0,
-};
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index b55ca77f619b..bc8d6aecfbbd 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -37,7 +37,6 @@ config ARC
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_MOD_ARCH_SPECIFIC
- select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HANDLE_DOMAIN_IRQ
select IRQ_DOMAIN
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 578bdbbb0fa7..4392c9c189c4 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -96,8 +96,6 @@ core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/
core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/
core-$(CONFIG_ARC_SOC_HSDK) += arch/arc/plat-hsdk/
-drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/
-
libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot
diff --git a/arch/arc/oprofile/Makefile b/arch/arc/oprofile/Makefile
deleted file mode 100644
index 698367bb41d0..000000000000
--- a/arch/arc/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arc/oprofile/common.c b/arch/arc/oprofile/common.c
deleted file mode 100644
index 86bf5899533b..000000000000
--- a/arch/arc/oprofile/common.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * Based on orig code from @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/perf_event.h>
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- /*
- * A failure here, forces oprofile core to switch to Timer based PC
- * sampling, which will happen if say perf is not enabled/available
- */
- return oprofile_perf_init(ops);
-}
-
-void oprofile_arch_exit(void)
-{
- oprofile_perf_exit();
-}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 6c423ee402ae..6d0ed2888935 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -102,7 +102,6 @@ config ARM
select HAVE_KRETPROBES if HAVE_KPROBES
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OPROFILE if HAVE_PERF_EVENTS
select HAVE_OPTPROBES if !THUMB2_KERNEL
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 5887de173fc9..dad5502ecc28 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -260,8 +260,6 @@ core-y += $(machdirs) $(platdirs)
core- += $(patsubst %,arch/arm/mach-%/, $(machine-))
core- += $(patsubst %,arch/arm/plat-%/, $(plat-))
-drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
-
libs-y := arch/arm/lib/ $(libs-y)
# Default target when executing plain make
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 44ff9cd88d81..1ef2bc4c7f69 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -21,7 +21,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_JUMP_LABEL=y
CONFIG_CC_STACKPROTECTOR_REGULAR=y
CONFIG_MODULES=y
diff --git a/arch/arm/configs/cns3420vb_defconfig b/arch/arm/configs/cns3420vb_defconfig
index 66a80b46038d..63fa2eb21b75 100644
--- a/arch/arm/configs/cns3420vb_defconfig
+++ b/arch/arm/configs/cns3420vb_defconfig
@@ -11,7 +11,6 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_PERF_EVENTS is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 911e880f06ed..15b749f6996d 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index bb70acc6b526..1d9fa77bbafc 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -27,7 +27,6 @@ CONFIG_AEABI=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_PM_DEBUG=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 84a3b055f253..33c917df7b32 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -16,7 +16,6 @@ CONFIG_KALLSYMS_ALL=y
# CONFIG_BASE_FULL is not set
CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index 81c45d1baba6..9f862b21b40a 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -67,7 +67,6 @@ CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_KIRKWOOD_CPUIDLE=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig
index b39b1300a459..cd703c15798f 100644
--- a/arch/arm/configs/mv78xx0_defconfig
+++ b/arch/arm/configs/mv78xx0_defconfig
@@ -5,7 +5,6 @@ CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_SLUB_DEBUG is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig
index 226f2e97c6e2..4f16716bfc32 100644
--- a/arch/arm/configs/mvebu_v5_defconfig
+++ b/arch/arm/configs/mvebu_v5_defconfig
@@ -4,7 +4,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=19
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 3b6e7452609b..3148567b66b6 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -13,7 +13,6 @@ CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index e5afbbae0356..b26ef4866a35 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -62,7 +62,6 @@ CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 4bdbb036ac26..b9e3b647e732 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
# CONFIG_SLUB_DEBUG is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 8654ece13004..bd7dd81c9c54 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -13,7 +13,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 19d03ea09405..3f36887e8333 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -10,7 +10,6 @@ CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index e73c97b0f5b0..0c60eb382c80 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -18,7 +18,6 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_VFP=y
CONFIG_NEON=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index 8b2c14424927..f42c7a502b6e 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index c01baf7d6e37..4479369540f2 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -11,7 +11,6 @@ CONFIG_CPUSETS=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
deleted file mode 100644
index 39688dc9f181..000000000000
--- a/arch/arm/oprofile/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-ifeq ($(CONFIG_HW_PERF_EVENTS),y)
-DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
-endif
-
-oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
deleted file mode 100644
index 7cb3e0453fcd..000000000000
--- a/arch/arm/oprofile/common.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/**
- * @file common.c
- *
- * @remark Copyright 2004 Oprofile Authors
- * @remark Copyright 2010 ARM Ltd.
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- * @author Will Deacon [move to perf]
- */
-
-#include <linux/cpumask.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/oprofile.h>
-#include <linux/perf_event.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <asm/stacktrace.h>
-#include <linux/uaccess.h>
-
-#include <asm/perf_event.h>
-#include <asm/ptrace.h>
-
-#ifdef CONFIG_HW_PERF_EVENTS
-
-/*
- * OProfile has a curious naming scheme for the ARM PMUs, but they are
- * part of the user ABI so we need to map from the perf PMU name for
- * supported PMUs.
- */
-static struct op_perf_name {
- char *perf_name;
- char *op_name;
-} op_perf_name_map[] = {
- { "armv5_xscale1", "arm/xscale1" },
- { "armv5_xscale2", "arm/xscale2" },
- { "armv6_1136", "arm/armv6" },
- { "armv6_1156", "arm/armv6" },
- { "armv6_1176", "arm/armv6" },
- { "armv6_11mpcore", "arm/mpcore" },
- { "armv7_cortex_a8", "arm/armv7" },
- { "armv7_cortex_a9", "arm/armv7-ca9" },
-};
-
-char *op_name_from_perf_id(void)
-{
- int i;
- struct op_perf_name names;
- const char *perf_name = perf_pmu_name();
-
- for (i = 0; i < ARRAY_SIZE(op_perf_name_map); ++i) {
- names = op_perf_name_map[i];
- if (!strcmp(names.perf_name, perf_name))
- return names.op_name;
- }
-
- return NULL;
-}
-#endif
-
-static int report_trace(struct stackframe *frame, void *d)
-{
- unsigned int *depth = d;
-
- if (*depth) {
- oprofile_add_trace(frame->pc);
- (*depth)--;
- }
-
- return *depth == 0;
-}
-
-/*
- * The registers we're interested in are at the end of the variable
- * length saved register structure. The fp points at the end of this
- * structure so the address of this struct is:
- * (struct frame_tail *)(xxx->fp)-1
- */
-struct frame_tail {
- struct frame_tail *fp;
- unsigned long sp;
- unsigned long lr;
-} __attribute__((packed));
-
-static struct frame_tail* user_backtrace(struct frame_tail *tail)
-{
- struct frame_tail buftail[2];
-
- /* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(tail, sizeof(buftail)))
- return NULL;
- if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
- return NULL;
-
- oprofile_add_trace(buftail[0].lr);
-
- /* frame pointers should strictly progress back up the stack
- * (towards higher addresses) */
- if (tail + 1 >= buftail[0].fp)
- return NULL;
-
- return buftail[0].fp-1;
-}
-
-static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
-
- if (!user_mode(regs)) {
- struct stackframe frame;
- arm_get_current_stackframe(regs, &frame);
- walk_stackframe(&frame, report_trace, &depth);
- return;
- }
-
- while (depth-- && tail && !((unsigned long) tail & 3))
- tail = user_backtrace(tail);
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- /* provide backtrace support also in timer mode: */
- ops->backtrace = arm_backtrace;
-
- return oprofile_perf_init(ops);
-}
-
-void oprofile_arch_exit(void)
-{
- oprofile_perf_exit();
-}
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 6e00c16a36b5..44a409967af1 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -7,7 +7,6 @@ config HEXAGON
select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_PREEMPT
- select HAVE_OPROFILE
# Other pending projects/to-do items.
# select HAVE_REGS_AND_STACK_ACCESS_API
# select HAVE_HW_BREAKPOINT if PERF_EVENTS
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index eed59ec32657..2ad7a8d29fcc 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -24,7 +24,6 @@ config IA64
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_EXIT_THREAD
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_FTRACE_MCOUNT_RECORD
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 45d5368d6a99..f3328a29e881 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -52,7 +52,6 @@ core-y += arch/ia64/kernel/ arch/ia64/mm/
core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
drivers-y += arch/ia64/pci/ arch/ia64/hp/common/
-drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
PHONY += compressed check
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig
index cfed5ed89301..c409756b5396 100644
--- a/arch/ia64/configs/bigsur_defconfig
+++ b/arch/ia64/configs/bigsur_defconfig
@@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index f6ff95b4ecb1..5d267132f8cb 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -69,7 +69,6 @@ extern int ia64_last_device_vector;
#define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1)
#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */
-#define IA64_PERFMON_VECTOR 0xee /* performance monitor interrupt vector */
#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
#define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */
diff --git a/arch/ia64/include/asm/perfmon.h b/arch/ia64/include/asm/perfmon.h
deleted file mode 100644
index e0545869cc8c..000000000000
--- a/arch/ia64/include/asm/perfmon.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2001-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-#ifndef _ASM_IA64_PERFMON_H
-#define _ASM_IA64_PERFMON_H
-
-#include <uapi/asm/perfmon.h>
-
-
-extern long perfmonctl(int fd, int cmd, void *arg, int narg);
-
-typedef struct {
- void (*handler)(int irq, void *arg, struct pt_regs *regs);
-} pfm_intr_handler_desc_t;
-
-extern void pfm_save_regs (struct task_struct *);
-extern void pfm_load_regs (struct task_struct *);
-
-extern void pfm_exit_thread(struct task_struct *);
-extern int pfm_use_debug_registers(struct task_struct *);
-extern int pfm_release_debug_registers(struct task_struct *);
-extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
-extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
-extern void pfm_init_percpu(void);
-extern void pfm_handle_work(void);
-extern int pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
-extern int pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
-
-
-
-/*
- * Reset PMD register flags
- */
-#define PFM_PMD_SHORT_RESET 0
-#define PFM_PMD_LONG_RESET 1
-
-typedef union {
- unsigned int val;
- struct {
- unsigned int notify_user:1; /* notify user program of overflow */
- unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
- unsigned int block_task:1; /* block monitored task on kernel exit */
- unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
- unsigned int reserved:28; /* for future use */
- } bits;
-} pfm_ovfl_ctrl_t;
-
-typedef struct {
- unsigned char ovfl_pmd; /* index of overflowed PMD */
- unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
- unsigned short active_set; /* event set active at the time of the overflow */
- pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
-
- unsigned long pmd_last_reset; /* last reset value of of the PMD */
- unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
- unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
- unsigned long pmd_value; /* current 64-bit value of the PMD */
- unsigned long pmd_eventid; /* eventid associated with PMD */
-} pfm_ovfl_arg_t;
-
-
-typedef struct {
- char *fmt_name;
- pfm_uuid_t fmt_uuid;
- size_t fmt_arg_size;
- unsigned long fmt_flags;
-
- int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
- int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
- int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
- int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
- int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
- int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
- int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
-
- struct list_head fmt_list;
-} pfm_buffer_fmt_t;
-
-extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt);
-extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
-
-/*
- * perfmon interface exported to modules
- */
-extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
-extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
-extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
-extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
-
-/*
- * describe the content of the local_cpu_date->pfm_syst_info field
- */
-#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exists */
-#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
-#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
-
-/*
- * sysctl control structure. visible to sampling formats
- */
-typedef struct {
- int debug; /* turn on/off debugging via syslog */
- int debug_ovfl; /* turn on/off debug printk in overflow handler */
- int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
- int expert_mode; /* turn on/off value checking */
-} pfm_sysctl_t;
-extern pfm_sysctl_t pfm_sysctl;
-
-
-#endif /* _ASM_IA64_PERFMON_H */
diff --git a/arch/ia64/include/uapi/asm/perfmon.h b/arch/ia64/include/uapi/asm/perfmon.h
deleted file mode 100644
index 017548365e5c..000000000000
--- a/arch/ia64/include/uapi/asm/perfmon.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2001-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-
-#ifndef _UAPI_ASM_IA64_PERFMON_H
-#define _UAPI_ASM_IA64_PERFMON_H
-
-/*
- * perfmon commands supported on all CPU models
- */
-#define PFM_WRITE_PMCS 0x01
-#define PFM_WRITE_PMDS 0x02
-#define PFM_READ_PMDS 0x03
-#define PFM_STOP 0x04
-#define PFM_START 0x05
-#define PFM_ENABLE 0x06 /* obsolete */
-#define PFM_DISABLE 0x07 /* obsolete */
-#define PFM_CREATE_CONTEXT 0x08
-#define PFM_DESTROY_CONTEXT 0x09 /* obsolete use close() */
-#define PFM_RESTART 0x0a
-#define PFM_PROTECT_CONTEXT 0x0b /* obsolete */
-#define PFM_GET_FEATURES 0x0c
-#define PFM_DEBUG 0x0d
-#define PFM_UNPROTECT_CONTEXT 0x0e /* obsolete */
-#define PFM_GET_PMC_RESET_VAL 0x0f
-#define PFM_LOAD_CONTEXT 0x10
-#define PFM_UNLOAD_CONTEXT 0x11
-
-/*
- * PMU model specific commands (may not be supported on all PMU models)
- */
-#define PFM_WRITE_IBRS 0x20
-#define PFM_WRITE_DBRS 0x21
-
-/*
- * context flags
- */
-#define PFM_FL_NOTIFY_BLOCK 0x01 /* block task on user level notifications */
-#define PFM_FL_SYSTEM_WIDE 0x02 /* create a system wide context */
-#define PFM_FL_OVFL_NO_MSG 0x80 /* do not post overflow/end messages for notification */
-
-/*
- * event set flags
- */
-#define PFM_SETFL_EXCL_IDLE 0x01 /* exclude idle task (syswide only) XXX: DO NOT USE YET */
-
-/*
- * PMC flags
- */
-#define PFM_REGFL_OVFL_NOTIFY 0x1 /* send notification on overflow */
-#define PFM_REGFL_RANDOM 0x2 /* randomize sampling interval */
-
-/*
- * PMD/PMC/IBR/DBR return flags (ignored on input)
- *
- * Those flags are used on output and must be checked in case EAGAIN is returned
- * by any of the calls using a pfarg_reg_t or pfarg_dbreg_t structure.
- */
-#define PFM_REG_RETFL_NOTAVAIL (1UL<<31) /* set if register is implemented but not available */
-#define PFM_REG_RETFL_EINVAL (1UL<<30) /* set if register entry is invalid */
-#define PFM_REG_RETFL_MASK (PFM_REG_RETFL_NOTAVAIL|PFM_REG_RETFL_EINVAL)
-
-#define PFM_REG_HAS_ERROR(flag) (((flag) & PFM_REG_RETFL_MASK) != 0)
-
-typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type */
-
-/*
- * Request structure used to define a context
- */
-typedef struct {
- pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
- unsigned long ctx_flags; /* noblock/block */
- unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */
- unsigned short ctx_reserved1; /* for future use */
- int ctx_fd; /* return arg: unique identification for context */
- void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
- unsigned long ctx_reserved2[11];/* for future use */
-} pfarg_context_t;
-
-/*
- * Request structure used to write/read a PMC or PMD
- */
-typedef struct {
- unsigned int reg_num; /* which register */
- unsigned short reg_set; /* event set for this register */
- unsigned short reg_reserved1; /* for future use */
-
- unsigned long reg_value; /* initial pmc/pmd value */
- unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
-
- unsigned long reg_long_reset; /* reset after buffer overflow notification */
- unsigned long reg_short_reset; /* reset after counter overflow */
-
- unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
- unsigned long reg_random_seed; /* seed value when randomization is used */
- unsigned long reg_random_mask; /* bitmask used to limit random value */
- unsigned long reg_last_reset_val;/* return: PMD last reset value */
-
- unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
- unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
-
- unsigned long reg_reserved2[3]; /* for future use */
-} pfarg_reg_t;
-
-typedef struct {
- unsigned int dbreg_num; /* which debug register */
- unsigned short dbreg_set; /* event set for this register */
- unsigned short dbreg_reserved1; /* for future use */
- unsigned long dbreg_value; /* value for debug register */
- unsigned long dbreg_flags; /* return: dbreg error */
- unsigned long dbreg_reserved2[1]; /* for future use */
-} pfarg_dbreg_t;
-
-typedef struct {
- unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */
- unsigned int ft_reserved; /* reserved for future use */
- unsigned long reserved[4]; /* for future use */
-} pfarg_features_t;
-
-typedef struct {
- pid_t load_pid; /* process to load the context into */
- unsigned short load_set; /* first event set to load */
- unsigned short load_reserved1; /* for future use */
- unsigned long load_reserved2[3]; /* for future use */
-} pfarg_load_t;
-
-typedef struct {
- int msg_type; /* generic message header */
- int msg_ctx_fd; /* generic message header */
- unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
- unsigned short msg_active_set; /* active set at the time of overflow */
- unsigned short msg_reserved1; /* for future use */
- unsigned int msg_reserved2; /* for future use */
- unsigned long msg_tstamp; /* for perf tuning/debug */
-} pfm_ovfl_msg_t;
-
-typedef struct {
- int msg_type; /* generic message header */
- int msg_ctx_fd; /* generic message header */
- unsigned long msg_tstamp; /* for perf tuning */
-} pfm_end_msg_t;
-
-typedef struct {
- int msg_type; /* type of the message */
- int msg_ctx_fd; /* unique identifier for the context */
- unsigned long msg_tstamp; /* for perf tuning */
-} pfm_gen_msg_t;
-
-#define PFM_MSG_OVFL 1 /* an overflow happened */
-#define PFM_MSG_END 2 /* task to which context was attached ended */
-
-typedef union {
- pfm_ovfl_msg_t pfm_ovfl_msg;
- pfm_end_msg_t pfm_end_msg;
- pfm_gen_msg_t pfm_gen_msg;
-} pfm_msg_t;
-
-/*
- * Define the version numbers for both perfmon as a whole and the sampling buffer format.
- */
-#define PFM_VERSION_MAJ 2U
-#define PFM_VERSION_MIN 0U
-#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
-#define PFM_VERSION_MAJOR(x) (((x)>>16) & 0xffff)
-#define PFM_VERSION_MINOR(x) ((x) & 0xffff)
-
-
-/*
- * miscellaneous architected definitions
- */
-#define PMU_FIRST_COUNTER 4 /* first counting monitor (PMC/PMD) */
-#define PMU_MAX_PMCS 256 /* maximum architected number of PMC registers */
-#define PMU_MAX_PMDS 256 /* maximum architected number of PMD registers */
-
-
-#endif /* _UAPI_ASM_IA64_PERFMON_H */
diff --git a/arch/ia64/include/uapi/asm/perfmon_default_smpl.h b/arch/ia64/include/uapi/asm/perfmon_default_smpl.h
deleted file mode 100644
index d3f36aff0e1f..000000000000
--- a/arch/ia64/include/uapi/asm/perfmon_default_smpl.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * This file implements the default sampling buffer format
- * for Linux/ia64 perfmon subsystem.
- */
-#ifndef __PERFMON_DEFAULT_SMPL_H__
-#define __PERFMON_DEFAULT_SMPL_H__ 1
-
-#define PFM_DEFAULT_SMPL_UUID { \
- 0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97}
-
-/*
- * format specific parameters (passed at context creation)
- */
-typedef struct {
- unsigned long buf_size; /* size of the buffer in bytes */
- unsigned int flags; /* buffer specific flags */
- unsigned int res1; /* for future use */
- unsigned long reserved[2]; /* for future use */
-} pfm_default_smpl_arg_t;
-
-/*
- * combined context+format specific structure. Can be passed
- * to PFM_CONTEXT_CREATE
- */
-typedef struct {
- pfarg_context_t ctx_arg;
- pfm_default_smpl_arg_t buf_arg;
-} pfm_default_smpl_ctx_arg_t;
-
-/*
- * This header is at the beginning of the sampling buffer returned to the user.
- * It is directly followed by the first record.
- */
-typedef struct {
- unsigned long hdr_count; /* how many valid entries */
- unsigned long hdr_cur_offs; /* current offset from top of buffer */
- unsigned long hdr_reserved2; /* reserved for future use */
-
- unsigned long hdr_overflows; /* how many times the buffer overflowed */
- unsigned long hdr_buf_size; /* how many bytes in the buffer */
-
- unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
- unsigned int hdr_reserved1; /* for future use */
- unsigned long hdr_reserved[10]; /* for future use */
-} pfm_default_smpl_hdr_t;
-
-/*
- * Entry header in the sampling buffer. The header is directly followed
- * with the values of the PMD registers of interest saved in increasing
- * index order: PMD4, PMD5, and so on. How many PMDs are present depends
- * on how the session was programmed.
- *
- * In the case where multiple counters overflow at the same time, multiple
- * entries are written consecutively.
- *
- * last_reset_value member indicates the initial value of the overflowed PMD.
- */
-typedef struct {
- int pid; /* thread id (for NPTL, this is gettid()) */
- unsigned char reserved1[3]; /* reserved for future use */
- unsigned char ovfl_pmd; /* index of overflowed PMD */
-
- unsigned long last_reset_val; /* initial value of overflowed PMD */
- unsigned long ip; /* where did the overflow interrupt happened */
- unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
-
- unsigned short cpu; /* cpu on which the overflow occurred */
- unsigned short set; /* event set active when overflow occurred */
- int tgid; /* thread group id (for NPTL, this is getpid()) */
-} pfm_default_smpl_entry_t;
-
-#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
-#define PFM_DEFAULT_MAX_ENTRY_SIZE (sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS))
-#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE (sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE)
-
-#define PFM_DEFAULT_SMPL_VERSION_MAJ 2U
-#define PFM_DEFAULT_SMPL_VERSION_MIN 0U
-#define PFM_DEFAULT_SMPL_VERSION (((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff))
-
-#endif /* __PERFMON_DEFAULT_SMPL_H__ */
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 78fa6579c9ea..64189f04c1a4 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -648,46 +648,6 @@ static int version_info(struct seq_file *m)
return 0;
}
-static int perfmon_info(struct seq_file *m)
-{
- u64 pm_buffer[16];
- pal_perf_mon_info_u_t pm_info;
-
- if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0)
- return 0;
-
- seq_printf(m,
- "PMC/PMD pairs : %d\n"
- "Counter width : %d bits\n"
- "Cycle event number : %d\n"
- "Retired event number : %d\n"
- "Implemented PMC : ",
- pm_info.pal_perf_mon_info_s.generic,
- pm_info.pal_perf_mon_info_s.width,
- pm_info.pal_perf_mon_info_s.cycles,
- pm_info.pal_perf_mon_info_s.retired);
-
- bitregister_process(m, pm_buffer, 256);
- seq_puts(m, "\nImplemented PMD : ");
- bitregister_process(m, pm_buffer+4, 256);
- seq_puts(m, "\nCycles count capable : ");
- bitregister_process(m, pm_buffer+8, 256);
- seq_puts(m, "\nRetired bundles count capable : ");
-
-#ifdef CONFIG_ITANIUM
- /*
- * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
- * which is wrong, both PMC4 and PMD5 support it.
- */
- if (pm_buffer[12] == 0x10)
- pm_buffer[12]=0x30;
-#endif
-
- bitregister_process(m, pm_buffer+12, 256);
- seq_putc(m, '\n');
- return 0;
-}
-
static int frequency_info(struct seq_file *m)
{
struct pal_freq_ratio proc, itc, bus;
@@ -816,7 +776,6 @@ static const palinfo_entry_t palinfo_entries[]={
{ "power_info", power_info, },
{ "register_info", register_info, },
{ "processor_info", processor_info, },
- { "perfmon_info", perfmon_info, },
{ "frequency_info", frequency_info, },
{ "bus_info", bus_info },
{ "tr_info", tr_info, }
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
deleted file mode 100644
index a40c56020fc5..000000000000
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- *
- * This file implements the default sampling buffer format
- * for the Linux/ia64 perfmon-2 subsystem.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <asm/delay.h>
-#include <linux/smp.h>
-
-#include <asm/perfmon.h>
-#include <asm/perfmon_default_smpl.h>
-
-MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
-MODULE_DESCRIPTION("perfmon default sampling format");
-MODULE_LICENSE("GPL");
-
-#define DEFAULT_DEBUG 1
-
-#ifdef DEFAULT_DEBUG
-#define DPRINT(a) \
- do { \
- if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
- } while (0)
-
-#define DPRINT_ovfl(a) \
- do { \
- if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \
- } while (0)
-
-#else
-#define DPRINT(a)
-#define DPRINT_ovfl(a)
-#endif
-
-static int
-default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
-{
- pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t*)data;
- int ret = 0;
-
- if (data == NULL) {
- DPRINT(("[%d] no argument passed\n", task_pid_nr(task)));
- return -EINVAL;
- }
-
- DPRINT(("[%d] validate flags=0x%x CPU%d\n", task_pid_nr(task), flags, cpu));
-
- /*
- * must hold at least the buffer header + one minimally sized entry
- */
- if (arg->buf_size < PFM_DEFAULT_SMPL_MIN_BUF_SIZE) return -EINVAL;
-
- DPRINT(("buf_size=%lu\n", arg->buf_size));
-
- return ret;
-}
-
-static int
-default_get_size(struct task_struct *task, unsigned int flags, int cpu, void *data, unsigned long *size)
-{
- pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t *)data;
-
- /*
- * size has been validated in default_validate
- */
- *size = arg->buf_size;
-
- return 0;
-}
-
-static int
-default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *data)
-{
- pfm_default_smpl_hdr_t *hdr;
- pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t *)data;
-
- hdr = (pfm_default_smpl_hdr_t *)buf;
-
- hdr->hdr_version = PFM_DEFAULT_SMPL_VERSION;
- hdr->hdr_buf_size = arg->buf_size;
- hdr->hdr_cur_offs = sizeof(*hdr);
- hdr->hdr_overflows = 0UL;
- hdr->hdr_count = 0UL;
-
- DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n",
- task_pid_nr(task),
- buf,
- hdr->hdr_buf_size,
- sizeof(*hdr),
- hdr->hdr_version,
- hdr->hdr_cur_offs));
-
- return 0;
-}
-
-static int
-default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp)
-{
- pfm_default_smpl_hdr_t *hdr;
- pfm_default_smpl_entry_t *ent;
- void *cur, *last;
- unsigned long *e, entry_size;
- unsigned int npmds, i;
- unsigned char ovfl_pmd;
- unsigned char ovfl_notify;
-
- if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) {
- DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg));
- return -EINVAL;
- }
-
- hdr = (pfm_default_smpl_hdr_t *)buf;
- cur = buf+hdr->hdr_cur_offs;
- last = buf+hdr->hdr_buf_size;
- ovfl_pmd = arg->ovfl_pmd;
- ovfl_notify = arg->ovfl_notify;
-
- /*
- * precheck for sanity
- */
- if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full;
-
- npmds = hweight64(arg->smpl_pmds[0]);
-
- ent = (pfm_default_smpl_entry_t *)cur;
-
- prefetch(arg->smpl_pmds_values);
-
- entry_size = sizeof(*ent) + (npmds << 3);
-
- /* position for first pmd */
- e = (unsigned long *)(ent+1);
-
- hdr->hdr_count++;
-
- DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmd=%d ovfl_notify=%d npmds=%u\n",
- task->pid,
- hdr->hdr_count,
- cur, last,
- last-cur,
- ovfl_pmd,
- ovfl_notify, npmds));
-
- /*
- * current = task running at the time of the overflow.
- *
- * per-task mode:
- * - this is usually the task being monitored.
- * Under certain conditions, it might be a different task
- *
- * system-wide:
- * - this is not necessarily the task controlling the session
- */
- ent->pid = current->pid;
- ent->ovfl_pmd = ovfl_pmd;
- ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val;
-
- /*
- * where did the fault happen (includes slot number)
- */
- ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3);
-
- ent->tstamp = stamp;
- ent->cpu = smp_processor_id();
- ent->set = arg->active_set;
- ent->tgid = current->tgid;
-
- /*
- * selectively store PMDs in increasing index number
- */
- if (npmds) {
- unsigned long *val = arg->smpl_pmds_values;
- for(i=0; i < npmds; i++) {
- *e++ = *val++;
- }
- }
-
- /*
- * update position for next entry
- */
- hdr->hdr_cur_offs += entry_size;
- cur += entry_size;
-
- /*
- * post check to avoid losing the last sample
- */
- if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full;
-
- /*
- * keep same ovfl_pmds, ovfl_notify
- */
- arg->ovfl_ctrl.bits.notify_user = 0;
- arg->ovfl_ctrl.bits.block_task = 0;
- arg->ovfl_ctrl.bits.mask_monitoring = 0;
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1; /* reset before returning from interrupt handler */
-
- return 0;
-full:
- DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=%d\n", last-cur, hdr->hdr_count, ovfl_notify));
-
- /*
- * increment number of buffer overflow.
- * important to detect duplicate set of samples.
- */
- hdr->hdr_overflows++;
-
- /*
- * if no notification requested, then we saturate the buffer
- */
- if (ovfl_notify == 0) {
- arg->ovfl_ctrl.bits.notify_user = 0;
- arg->ovfl_ctrl.bits.block_task = 0;
- arg->ovfl_ctrl.bits.mask_monitoring = 1;
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0;
- } else {
- arg->ovfl_ctrl.bits.notify_user = 1;
- arg->ovfl_ctrl.bits.block_task = 1; /* ignored for non-blocking context */
- arg->ovfl_ctrl.bits.mask_monitoring = 1;
- arg->ovfl_ctrl.bits.reset_ovfl_pmds = 0; /* no reset now */
- }
- return -1; /* we are full, sorry */
-}
-
-static int
-default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
-{
- pfm_default_smpl_hdr_t *hdr;
-
- hdr = (pfm_default_smpl_hdr_t *)buf;
-
- hdr->hdr_count = 0UL;
- hdr->hdr_cur_offs = sizeof(*hdr);
-
- ctrl->bits.mask_monitoring = 0;
- ctrl->bits.reset_ovfl_pmds = 1; /* uses long-reset values */
-
- return 0;
-}
-
-static int
-default_exit(struct task_struct *task, void *buf, struct pt_regs *regs)
-{
- DPRINT(("[%d] exit(%p)\n", task_pid_nr(task), buf));
- return 0;
-}
-
-static pfm_buffer_fmt_t default_fmt={
- .fmt_name = "default_format",
- .fmt_uuid = PFM_DEFAULT_SMPL_UUID,
- .fmt_arg_size = sizeof(pfm_default_smpl_arg_t),
- .fmt_validate = default_validate,
- .fmt_getsize = default_get_size,
- .fmt_init = default_init,
- .fmt_handler = default_handler,
- .fmt_restart = default_restart,
- .fmt_restart_active = default_restart,
- .fmt_exit = default_exit,
-};
-
-static int __init
-pfm_default_smpl_init_module(void)
-{
- int ret;
-
- ret = pfm_register_buffer_fmt(&default_fmt);
- if (ret == 0) {
- printk("perfmon_default_smpl: %s v%u.%u registered\n",
- default_fmt.fmt_name,
- PFM_DEFAULT_SMPL_VERSION_MAJ,
- PFM_DEFAULT_SMPL_VERSION_MIN);
- } else {
- printk("perfmon_default_smpl: %s cannot register ret=%d\n",
- default_fmt.fmt_name,
- ret);
- }
-
- return ret;
-}
-
-static void __exit
-pfm_default_smpl_cleanup_module(void)
-{
- int ret;
- ret = pfm_unregister_buffer_fmt(default_fmt.fmt_uuid);
-
- printk("perfmon_default_smpl: unregister %s=%d\n", default_fmt.fmt_name, ret);
-}
-
-module_init(pfm_default_smpl_init_module);
-module_exit(pfm_default_smpl_cleanup_module);
-
diff --git a/arch/ia64/kernel/perfmon_generic.h b/arch/ia64/kernel/perfmon_generic.h
deleted file mode 100644
index 96af4696cea9..000000000000
--- a/arch/ia64/kernel/perfmon_generic.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file contains the generic PMU register description tables
- * and pmc checker used by perfmon.c.
- *
- * Copyright (C) 2002-2003 Hewlett Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-
-static pfm_reg_desc_t pfm_gen_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_gen_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd1 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd2 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd3 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_gen={
- .pmu_name = "Generic",
- .pmu_family = 0xff, /* any */
- .ovfl_val = (1UL << 32) - 1,
- .num_ibrs = 0, /* does not use */
- .num_dbrs = 0, /* does not use */
- .pmd_desc = pfm_gen_pmd_desc,
- .pmc_desc = pfm_gen_pmc_desc
-};
-
diff --git a/arch/ia64/kernel/perfmon_itanium.h b/arch/ia64/kernel/perfmon_itanium.h
index f2d348648a03..dbd04028aafa 100644
--- a/arch/ia64/kernel/perfmon_itanium.h
+++ b/arch/ia64/kernel/perfmon_itanium.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file contains the Itanium PMU register description tables
- * and pmc checker used by perfmon.c.
+ * and pmc checker.
*
* Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
diff --git a/arch/ia64/kernel/perfmon_mckinley.h b/arch/ia64/kernel/perfmon_mckinley.h
deleted file mode 100644
index a993249e58bc..000000000000
--- a/arch/ia64/kernel/perfmon_mckinley.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file contains the McKinley PMU register description tables
- * and pmc checker used by perfmon.c.
- *
- * Copyright (C) 2002-2003 Hewlett Packard Co
- * Stephane Eranian <eranian@hpl.hp.com>
- */
-static int pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-
-static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc4 */ { PFM_REG_COUNTING, 6, 0x0000000000800000UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc8 */ { PFM_REG_CONFIG , 0, 0xffffffff3fffffffUL, 0xffffffff3ffffffbUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc9 */ { PFM_REG_CONFIG , 0, 0xffffffff3ffffffcUL, 0xffffffff3ffffffbUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc10 */ { PFM_REG_MONITOR , 4, 0x0UL, 0xffffUL, NULL, pfm_mck_pmc_check, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0UL, 0x30f01cf, NULL, pfm_mck_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, 0xffffUL, NULL, pfm_mck_pmc_check, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc13 */ { PFM_REG_CONFIG , 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc14 */ { PFM_REG_CONFIG , 0, 0x0db60db60db60db6UL, 0x2492UL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc15 */ { PFM_REG_CONFIG , 0, 0x00000000fffffff0UL, 0xfUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_mck_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
-/* pmd1 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
-/* pmd2 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
-/* pmd3 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
-/* pmd8 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd9 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd10 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd11 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd12 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd13 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd14 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd15 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd16 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
-/* pmd17 */ { PFM_REG_BUFFER , 0, 0x0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
- { PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-/*
- * PMC reserved fields must have their power-up values preserved
- */
-static int
-pfm_mck_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- unsigned long tmp1, tmp2, ival = *val;
-
- /* remove reserved areas from user value */
- tmp1 = ival & PMC_RSVD_MASK(cnum);
-
- /* get reserved fields values */
- tmp2 = PMC_DFL_VAL(cnum) & ~PMC_RSVD_MASK(cnum);
-
- *val = tmp1 | tmp2;
-
- DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
- cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
- return 0;
-}
-
-/*
- * task can be NULL if the context is unloaded
- */
-static int
-pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- int ret = 0, check_case1 = 0;
- unsigned long val8 = 0, val14 = 0, val13 = 0;
- int is_loaded;
-
- /* first preserve the reserved fields */
- pfm_mck_reserved(cnum, val, regs);
-
- /* sanitfy check */
- if (ctx == NULL) return -EINVAL;
-
- is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
-
- /*
- * we must clear the debug registers if pmc13 has a value which enable
- * memory pipeline event constraints. In this case we need to clear the
- * the debug registers if they have not yet been accessed. This is required
- * to avoid picking stale state.
- * PMC13 is "active" if:
- * one of the pmc13.cfg_dbrpXX field is different from 0x3
- * AND
- * at the corresponding pmc13.ena_dbrpXX is set.
- */
- DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, *val, ctx->ctx_fl_using_dbreg, is_loaded));
-
- if (cnum == 13 && is_loaded
- && (*val & 0x1e00000000000UL) && (*val & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc13 settings, clearing dbr\n", cnum, *val));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
- }
- /*
- * we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
- * before they are (fl_using_dbreg==0) to avoid picking up stale information.
- */
- if (cnum == 14 && is_loaded && ((*val & 0x2222UL) != 0x2222UL) && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc14 settings, clearing ibr\n", cnum, *val));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
-
- }
-
- switch(cnum) {
- case 4: *val |= 1UL << 23; /* force power enable bit */
- break;
- case 8: val8 = *val;
- val13 = ctx->ctx_pmcs[13];
- val14 = ctx->ctx_pmcs[14];
- check_case1 = 1;
- break;
- case 13: val8 = ctx->ctx_pmcs[8];
- val13 = *val;
- val14 = ctx->ctx_pmcs[14];
- check_case1 = 1;
- break;
- case 14: val8 = ctx->ctx_pmcs[8];
- val13 = ctx->ctx_pmcs[13];
- val14 = *val;
- check_case1 = 1;
- break;
- }
- /* check illegal configuration which can produce inconsistencies in tagging
- * i-side events in L1D and L2 caches
- */
- if (check_case1) {
- ret = ((val13 >> 45) & 0xf) == 0
- && ((val8 & 0x1) == 0)
- && ((((val14>>1) & 0x3) == 0x2 || ((val14>>1) & 0x3) == 0x0)
- ||(((val14>>4) & 0x3) == 0x2 || ((val14>>4) & 0x3) == 0x0));
-
- if (ret) DPRINT((KERN_DEBUG "perfmon: failure check_case1\n"));
- }
-
- return ret ? -EINVAL : 0;
-}
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_mck={
- .pmu_name = "Itanium 2",
- .pmu_family = 0x1f,
- .flags = PFM_PMU_IRQ_RESEND,
- .ovfl_val = (1UL << 47) - 1,
- .pmd_desc = pfm_mck_pmd_desc,
- .pmc_desc = pfm_mck_pmc_desc,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .use_rr_dbregs = 1 /* debug register are use for range restrictions */
-};
-
-
diff --git a/arch/ia64/kernel/perfmon_montecito.h b/arch/ia64/kernel/perfmon_montecito.h
deleted file mode 100644
index c0b5b9110c88..000000000000
--- a/arch/ia64/kernel/perfmon_montecito.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file contains the Montecito PMU register description tables
- * and pmc checker used by perfmon.c.
- *
- * Copyright (c) 2005-2006 Hewlett-Packard Development Company, L.P.
- * Contributed by Stephane Eranian <eranian@hpl.hp.com>
- */
-static int pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-
-#define RDEP_MONT_ETB (RDEP(38)|RDEP(39)|RDEP(48)|RDEP(49)|RDEP(50)|RDEP(51)|RDEP(52)|RDEP(53)|RDEP(54)|\
- RDEP(55)|RDEP(56)|RDEP(57)|RDEP(58)|RDEP(59)|RDEP(60)|RDEP(61)|RDEP(62)|RDEP(63))
-#define RDEP_MONT_DEAR (RDEP(32)|RDEP(33)|RDEP(36))
-#define RDEP_MONT_IEAR (RDEP(34)|RDEP(35))
-
-static pfm_reg_desc_t pfm_mont_pmc_desc[PMU_MAX_PMCS]={
-/* pmc0 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc4 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(4),0, 0, 0}, {0,0, 0, 0}},
-/* pmc5 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(5),0, 0, 0}, {0,0, 0, 0}},
-/* pmc6 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(6),0, 0, 0}, {0,0, 0, 0}},
-/* pmc7 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(7),0, 0, 0}, {0,0, 0, 0}},
-/* pmc8 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(8),0, 0, 0}, {0,0, 0, 0}},
-/* pmc9 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(9),0, 0, 0}, {0,0, 0, 0}},
-/* pmc10 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(10),0, 0, 0}, {0,0, 0, 0}},
-/* pmc11 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(11),0, 0, 0}, {0,0, 0, 0}},
-/* pmc12 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(12),0, 0, 0}, {0,0, 0, 0}},
-/* pmc13 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(13),0, 0, 0}, {0,0, 0, 0}},
-/* pmc14 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(14),0, 0, 0}, {0,0, 0, 0}},
-/* pmc15 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(15),0, 0, 0}, {0,0, 0, 0}},
-/* pmc16 */ { PFM_REG_NOTIMPL, },
-/* pmc17 */ { PFM_REG_NOTIMPL, },
-/* pmc18 */ { PFM_REG_NOTIMPL, },
-/* pmc19 */ { PFM_REG_NOTIMPL, },
-/* pmc20 */ { PFM_REG_NOTIMPL, },
-/* pmc21 */ { PFM_REG_NOTIMPL, },
-/* pmc22 */ { PFM_REG_NOTIMPL, },
-/* pmc23 */ { PFM_REG_NOTIMPL, },
-/* pmc24 */ { PFM_REG_NOTIMPL, },
-/* pmc25 */ { PFM_REG_NOTIMPL, },
-/* pmc26 */ { PFM_REG_NOTIMPL, },
-/* pmc27 */ { PFM_REG_NOTIMPL, },
-/* pmc28 */ { PFM_REG_NOTIMPL, },
-/* pmc29 */ { PFM_REG_NOTIMPL, },
-/* pmc30 */ { PFM_REG_NOTIMPL, },
-/* pmc31 */ { PFM_REG_NOTIMPL, },
-/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffffUL, 0x30f01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffffUL, 0xf01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
-/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
-/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
-/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
- { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-static pfm_reg_desc_t pfm_mont_pmd_desc[PMU_MAX_PMDS]={
-/* pmd0 */ { PFM_REG_NOTIMPL, },
-/* pmd1 */ { PFM_REG_NOTIMPL, },
-/* pmd2 */ { PFM_REG_NOTIMPL, },
-/* pmd3 */ { PFM_REG_NOTIMPL, },
-/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(4),0, 0, 0}},
-/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(5),0, 0, 0}},
-/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(6),0, 0, 0}},
-/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(7),0, 0, 0}},
-/* pmd8 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(8),0, 0, 0}},
-/* pmd9 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(9),0, 0, 0}},
-/* pmd10 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(10),0, 0, 0}},
-/* pmd11 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(11),0, 0, 0}},
-/* pmd12 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(12),0, 0, 0}},
-/* pmd13 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(13),0, 0, 0}},
-/* pmd14 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(14),0, 0, 0}},
-/* pmd15 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(15),0, 0, 0}},
-/* pmd16 */ { PFM_REG_NOTIMPL, },
-/* pmd17 */ { PFM_REG_NOTIMPL, },
-/* pmd18 */ { PFM_REG_NOTIMPL, },
-/* pmd19 */ { PFM_REG_NOTIMPL, },
-/* pmd20 */ { PFM_REG_NOTIMPL, },
-/* pmd21 */ { PFM_REG_NOTIMPL, },
-/* pmd22 */ { PFM_REG_NOTIMPL, },
-/* pmd23 */ { PFM_REG_NOTIMPL, },
-/* pmd24 */ { PFM_REG_NOTIMPL, },
-/* pmd25 */ { PFM_REG_NOTIMPL, },
-/* pmd26 */ { PFM_REG_NOTIMPL, },
-/* pmd27 */ { PFM_REG_NOTIMPL, },
-/* pmd28 */ { PFM_REG_NOTIMPL, },
-/* pmd29 */ { PFM_REG_NOTIMPL, },
-/* pmd30 */ { PFM_REG_NOTIMPL, },
-/* pmd31 */ { PFM_REG_NOTIMPL, },
-/* pmd32 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(33)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
-/* pmd33 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}},
-/* pmd34 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(35),0, 0, 0}, {RDEP(37),0, 0, 0}},
-/* pmd35 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(34),0, 0, 0}, {RDEP(37),0, 0, 0}},
-/* pmd36 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(33),0, 0, 0}, {RDEP(40),0, 0, 0}},
-/* pmd37 */ { PFM_REG_NOTIMPL, },
-/* pmd38 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd39 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd40 */ { PFM_REG_NOTIMPL, },
-/* pmd41 */ { PFM_REG_NOTIMPL, },
-/* pmd42 */ { PFM_REG_NOTIMPL, },
-/* pmd43 */ { PFM_REG_NOTIMPL, },
-/* pmd44 */ { PFM_REG_NOTIMPL, },
-/* pmd45 */ { PFM_REG_NOTIMPL, },
-/* pmd46 */ { PFM_REG_NOTIMPL, },
-/* pmd47 */ { PFM_REG_NOTIMPL, },
-/* pmd48 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd49 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd50 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd51 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd52 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd53 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd54 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd55 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd56 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd57 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd58 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd59 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd60 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd61 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd62 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
-/* pmd63 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}},
- { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
-};
-
-/*
- * PMC reserved fields must have their power-up values preserved
- */
-static int
-pfm_mont_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- unsigned long tmp1, tmp2, ival = *val;
-
- /* remove reserved areas from user value */
- tmp1 = ival & PMC_RSVD_MASK(cnum);
-
- /* get reserved fields values */
- tmp2 = PMC_DFL_VAL(cnum) & ~PMC_RSVD_MASK(cnum);
-
- *val = tmp1 | tmp2;
-
- DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
- cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
- return 0;
-}
-
-/*
- * task can be NULL if the context is unloaded
- */
-static int
-pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
-{
- int ret = 0;
- unsigned long val32 = 0, val38 = 0, val41 = 0;
- unsigned long tmpval;
- int check_case1 = 0;
- int is_loaded;
-
- /* first preserve the reserved fields */
- pfm_mont_reserved(cnum, val, regs);
-
- tmpval = *val;
-
- /* sanity check */
- if (ctx == NULL) return -EINVAL;
-
- is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED;
-
- /*
- * we must clear the debug registers if pmc41 has a value which enable
- * memory pipeline event constraints. In this case we need to clear the
- * the debug registers if they have not yet been accessed. This is required
- * to avoid picking stale state.
- * PMC41 is "active" if:
- * one of the pmc41.cfg_dtagXX field is different from 0x3
- * AND
- * at the corresponding pmc41.en_dbrpXX is set.
- * AND
- * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
- */
- DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
-
- if (cnum == 41 && is_loaded
- && (tmpval & 0x1e00000000000UL) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers if:
- * AND
- */
- ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
- }
- /*
- * we must clear the (instruction) debug registers if:
- * pmc38.ig_ibrpX is 0 (enabled)
- * AND
- * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used)
- */
- if (cnum == 38 && is_loaded && ((tmpval & 0x492UL) != 0x492UL) && ctx->ctx_fl_using_dbreg == 0) {
-
- DPRINT(("pmc38=0x%lx has active pmc38 settings, clearing ibr\n", tmpval));
-
- /* don't mix debug with perfmon */
- if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
-
- /*
- * a count of 0 will mark the debug registers as in use and also
- * ensure that they are properly cleared.
- */
- ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs);
- if (ret) return ret;
-
- }
- switch(cnum) {
- case 32: val32 = *val;
- val38 = ctx->ctx_pmcs[38];
- val41 = ctx->ctx_pmcs[41];
- check_case1 = 1;
- break;
- case 38: val38 = *val;
- val32 = ctx->ctx_pmcs[32];
- val41 = ctx->ctx_pmcs[41];
- check_case1 = 1;
- break;
- case 41: val41 = *val;
- val32 = ctx->ctx_pmcs[32];
- val38 = ctx->ctx_pmcs[38];
- check_case1 = 1;
- break;
- }
- /* check illegal configuration which can produce inconsistencies in tagging
- * i-side events in L1D and L2 caches
- */
- if (check_case1) {
- ret = (((val41 >> 45) & 0xf) == 0 && ((val32>>57) & 0x1) == 0)
- && ((((val38>>1) & 0x3) == 0x2 || ((val38>>1) & 0x3) == 0)
- || (((val38>>4) & 0x3) == 0x2 || ((val38>>4) & 0x3) == 0));
- if (ret) {
- DPRINT(("invalid config pmc38=0x%lx pmc41=0x%lx pmc32=0x%lx\n", val38, val41, val32));
- return -EINVAL;
- }
- }
- *val = tmpval;
- return 0;
-}
-
-/*
- * impl_pmcs, impl_pmds are computed at runtime to minimize errors!
- */
-static pmu_config_t pmu_conf_mont={
- .pmu_name = "Montecito",
- .pmu_family = 0x20,
- .flags = PFM_PMU_IRQ_RESEND,
- .ovfl_val = (1UL << 47) - 1,
- .pmd_desc = pfm_mont_pmd_desc,
- .pmc_desc = pfm_mont_pmc_desc,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .use_rr_dbregs = 1 /* debug register are use for range retrictions */
-};
diff --git a/arch/ia64/oprofile/Makefile b/arch/ia64/oprofile/Makefile
deleted file mode 100644
index fc7944d462f4..000000000000
--- a/arch/ia64/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/ia64/oprofile/backtrace.c b/arch/ia64/oprofile/backtrace.c
deleted file mode 100644
index 6a219a946050..000000000000
--- a/arch/ia64/oprofile/backtrace.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * @file backtrace.c
- *
- * @remark Copyright 2004 Silicon Graphics Inc. All Rights Reserved.
- * @remark Read the file COPYING
- *
- * @author Greg Banks <gnb@melbourne.sgi.com>
- * @author Keith Owens <kaos@melbourne.sgi.com>
- * Based on work done for the ia64 port of the SGI kernprof patch, which is
- * Copyright (c) 2003-2004 Silicon Graphics Inc. All Rights Reserved.
- */
-
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <asm/ptrace.h>
-
-/*
- * For IA64 we need to perform a complex little dance to get both
- * the struct pt_regs and a synthetic struct switch_stack in place
- * to allow the unwind code to work. This dance requires our unwind
- * using code to be called from a function called from unw_init_running().
- * There we only get a single void* data pointer, so use this struct
- * to hold all the data we need during the unwind.
- */
-typedef struct
-{
- unsigned int depth;
- struct pt_regs *regs;
- struct unw_frame_info frame;
- unsigned long *prev_pfs_loc; /* state for WAR for old spinlock ool code */
-} ia64_backtrace_t;
-
-/* Returns non-zero if the PC is in the Interrupt Vector Table */
-static __inline__ int in_ivt_code(unsigned long pc)
-{
- extern char ia64_ivt[];
- return (pc >= (u_long)ia64_ivt && pc < (u_long)ia64_ivt+32768);
-}
-
-/*
- * Unwind to next stack frame.
- */
-static __inline__ int next_frame(ia64_backtrace_t *bt)
-{
- /*
- * Avoid unsightly console message from unw_unwind() when attempting
- * to unwind through the Interrupt Vector Table which has no unwind
- * information.
- */
- if (in_ivt_code(bt->frame.ip))
- return 0;
-
- /*
- * WAR for spinlock contention from leaf functions. ia64_spinlock_contention_pre3_4
- * has ar.pfs == r0. Leaf functions do not modify ar.pfs so ar.pfs remains
- * as 0, stopping the backtrace. Record the previous ar.pfs when the current
- * IP is in ia64_spinlock_contention_pre3_4 then unwind, if pfs_loc has not changed
- * after unwind then use pt_regs.ar_pfs which is where the real ar.pfs is for
- * leaf functions.
- */
- if (bt->prev_pfs_loc && bt->regs && bt->frame.pfs_loc == bt->prev_pfs_loc)
- bt->frame.pfs_loc = &bt->regs->ar_pfs;
- bt->prev_pfs_loc = NULL;
-
- return unw_unwind(&bt->frame) == 0;
-}
-
-
-static void do_ia64_backtrace(struct unw_frame_info *info, void *vdata)
-{
- ia64_backtrace_t *bt = vdata;
- struct switch_stack *sw;
- int count = 0;
- u_long pc, sp;
-
- sw = (struct switch_stack *)(info+1);
- /* padding from unw_init_running */
- sw = (struct switch_stack *)(((unsigned long)sw + 15) & ~15);
-
- unw_init_frame_info(&bt->frame, current, sw);
-
- /* skip over interrupt frame and oprofile calls */
- do {
- unw_get_sp(&bt->frame, &sp);
- if (sp >= (u_long)bt->regs)
- break;
- if (!next_frame(bt))
- return;
- } while (count++ < 200);
-
- /* finally, grab the actual sample */
- while (bt->depth-- && next_frame(bt)) {
- unw_get_ip(&bt->frame, &pc);
- oprofile_add_trace(pc);
- if (unw_is_intr_frame(&bt->frame)) {
- /*
- * Interrupt received on kernel stack; this can
- * happen when timer interrupt fires while processing
- * a softirq from the tail end of a hardware interrupt
- * which interrupted a system call. Don't laugh, it
- * happens! Splice the backtrace into two parts to
- * avoid spurious cycles in the gprof output.
- */
- /* TODO: split rather than drop the 2nd half */
- break;
- }
- }
-}
-
-void
-ia64_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- ia64_backtrace_t bt;
- unsigned long flags;
-
- /*
- * On IA64 there is little hope of getting backtraces from
- * user space programs -- the problems of getting the unwind
- * information from arbitrary user programs are extreme.
- */
- if (user_mode(regs))
- return;
-
- bt.depth = depth;
- bt.regs = regs;
- bt.prev_pfs_loc = NULL;
- local_irq_save(flags);
- unw_init_running(do_ia64_backtrace, &bt);
- local_irq_restore(flags);
-}
diff --git a/arch/ia64/oprofile/init.c b/arch/ia64/oprofile/init.c
deleted file mode 100644
index a692ba16a07b..000000000000
--- a/arch/ia64/oprofile/init.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-
-extern int perfmon_init(struct oprofile_operations *ops);
-extern void perfmon_exit(void);
-extern void ia64_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = ia64_backtrace;
- return -ENODEV;
-}
-
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index f82795592ce5..25a5a3fb14aa 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -30,7 +30,6 @@ config MICROBLAZE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
- select HAVE_OPROFILE
select HAVE_PCI
select IRQ_DOMAIN
select XILINX_INTC
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index bb980891816d..b41f323e1fde 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -54,8 +54,6 @@ core-y += arch/microblaze/kernel/
core-y += arch/microblaze/mm/
core-$(CONFIG_PCI) += arch/microblaze/pci/
-drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/
-
boot := arch/microblaze/boot
# Are we making a simpleImage.<boardname> target? If so, crack out the boardname
diff --git a/arch/microblaze/oprofile/Makefile b/arch/microblaze/oprofile/Makefile
deleted file mode 100644
index 107f2f55d995..000000000000
--- a/arch/microblaze/oprofile/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# arch/microblaze/oprofile/Makefile
-#
-
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) microblaze_oprofile.o
diff --git a/arch/microblaze/oprofile/microblaze_oprofile.c b/arch/microblaze/oprofile/microblaze_oprofile.c
deleted file mode 100644
index def17e59888e..000000000000
--- a/arch/microblaze/oprofile/microblaze_oprofile.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Microblaze oprofile code
- *
- * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2009 PetaLogix
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- return -1;
-}
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f29ec95e3458..465bc5425d4c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -74,7 +74,6 @@ config MIPS
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
@@ -2845,7 +2844,7 @@ config NODES_SHIFT
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && !OPROFILE && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON64)
+ depends on PERF_EVENTS && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON64)
default y
help
Enable hardware performance counter support for perf events. If
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index cd4343edeb11..f62a6d951d3c 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -316,7 +316,6 @@ libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/mips/math-emu/
core-y += arch/mips/
drivers-y += arch/mips/crypto/
-drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/mips/power/
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 023b4e644b1c..5c24ac7fdf56 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -22,7 +22,6 @@ CONFIG_MIPS32_N32=y
# CONFIG_SUSPEND is not set
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION="/dev/sda3"
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 7b1fab518317..1ae48f7d9ddd 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -14,7 +14,6 @@ CONFIG_SGI_IP32=y
CONFIG_PCI=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 688c91918db2..aaf9d5e0aa2c 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -21,7 +21,6 @@ CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION="/dev/hda3"
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index bbe0f39f8088..205d3b34528c 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -17,7 +17,6 @@ CONFIG_PCCARD=m
CONFIG_YENTA=m
CONFIG_PD6729=m
CONFIG_I82092=m
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
diff --git a/arch/mips/configs/rs90_defconfig b/arch/mips/configs/rs90_defconfig
index 4f540bb94628..7ce3b814fdc8 100644
--- a/arch/mips/configs/rs90_defconfig
+++ b/arch/mips/configs/rs90_defconfig
@@ -30,7 +30,6 @@ CONFIG_PM=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
CONFIG_CPUFREQ_DT=y
-CONFIG_OPROFILE=y
CONFIG_JUMP_LABEL=y
# CONFIG_STACKPROTECTOR is not set
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/mips/include/asm/mach-loongson2ef/loongson.h b/arch/mips/include/asm/mach-loongson2ef/loongson.h
index 57e571128489..ca039b8dcde3 100644
--- a/arch/mips/include/asm/mach-loongson2ef/loongson.h
+++ b/arch/mips/include/asm/mach-loongson2ef/loongson.h
@@ -56,15 +56,6 @@ extern int mach_i8259_irq(void);
(*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
#define LOONGSON_IRQ_BASE 32
-#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
-
-#include <linux/interrupt.h>
-static inline void do_perfcnt_IRQ(void)
-{
-#if IS_ENABLED(CONFIG_OPROFILE)
- do_IRQ(LOONGSON2_PERFCNT_IRQ);
-#endif
-}
#define LOONGSON_FLASH_BASE 0x1c000000
#define LOONGSON_FLASH_SIZE 0x02000000 /* 32M */
diff --git a/arch/mips/loongson2ef/fuloong-2e/irq.c b/arch/mips/loongson2ef/fuloong-2e/irq.c
index 305aa2eb74ad..b1c9d4ee0335 100644
--- a/arch/mips/loongson2ef/fuloong-2e/irq.c
+++ b/arch/mips/loongson2ef/fuloong-2e/irq.c
@@ -26,7 +26,7 @@ asmlinkage void mach_irq_dispatch(unsigned int pending)
if (pending & CAUSEF_IP7)
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
else if (pending & CAUSEF_IP6) /* perf counter loverflow */
- do_perfcnt_IRQ();
+ return;
else if (pending & CAUSEF_IP5)
i8259_irqdispatch();
else if (pending & CAUSEF_IP2)
diff --git a/arch/mips/loongson2ef/lemote-2f/irq.c b/arch/mips/loongson2ef/lemote-2f/irq.c
index 6f00579971a3..f5a731a2a35f 100644
--- a/arch/mips/loongson2ef/lemote-2f/irq.c
+++ b/arch/mips/loongson2ef/lemote-2f/irq.c
@@ -75,7 +75,6 @@ void mach_irq_dispatch(unsigned int pending)
if (pending & CAUSEF_IP7)
do_IRQ(LOONGSON_TIMER_IRQ);
else if (pending & CAUSEF_IP6) { /* North Bridge, Perf counter */
- do_perfcnt_IRQ();
bonito_irqdispatch();
} else if (pending & CAUSEF_IP3) /* CPU UART */
do_IRQ(LOONGSON_UART_IRQ);
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
deleted file mode 100644
index e10f216d0422..000000000000
--- a/arch/mips/oprofile/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
-
-oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o
-oprofile-$(CONFIG_CPU_LOONGSON2EF) += op_model_loongson2.o
-oprofile-$(CONFIG_CPU_LOONGSON64) += op_model_loongson3.o
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
deleted file mode 100644
index 07d98ba7f49e..000000000000
--- a/arch/mips/oprofile/backtrace.c
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/stacktrace.h>
-#include <linux/stacktrace.h>
-#include <linux/kernel.h>
-#include <asm/sections.h>
-#include <asm/inst.h>
-
-struct stackframe {
- unsigned long sp;
- unsigned long pc;
- unsigned long ra;
-};
-
-static inline int get_mem(unsigned long addr, unsigned long *result)
-{
- unsigned long *address = (unsigned long *) addr;
- if (!access_ok(address, sizeof(unsigned long)))
- return -1;
- if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
- return -3;
- return 0;
-}
-
-/*
- * These two instruction helpers were taken from process.c
- */
-static inline int is_ra_save_ins(union mips_instruction *ip)
-{
- /* sw / sd $ra, offset($sp) */
- return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op)
- && ip->i_format.rs == 29 && ip->i_format.rt == 31;
-}
-
-static inline int is_sp_move_ins(union mips_instruction *ip)
-{
- /* addiu/daddiu sp,sp,-imm */
- if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
- return 0;
- if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
- return 1;
- return 0;
-}
-
-/*
- * Looks for specific instructions that mark the end of a function.
- * This usually means we ran into the code area of the previous function.
- */
-static inline int is_end_of_function_marker(union mips_instruction *ip)
-{
- /* jr ra */
- if (ip->r_format.func == jr_op && ip->r_format.rs == 31)
- return 1;
- /* lui gp */
- if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28)
- return 1;
- return 0;
-}
-
-/*
- * TODO for userspace stack unwinding:
- * - handle cases where the stack is adjusted inside a function
- * (generally doesn't happen)
- * - find optimal value for max_instr_check
- * - try to find a better way to handle leaf functions
- */
-
-static inline int unwind_user_frame(struct stackframe *old_frame,
- const unsigned int max_instr_check)
-{
- struct stackframe new_frame = *old_frame;
- off_t ra_offset = 0;
- size_t stack_size = 0;
- unsigned long addr;
-
- if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0)
- return -9;
-
- for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc)
- && (!ra_offset || !stack_size); --addr) {
- union mips_instruction ip;
-
- if (get_mem(addr, (unsigned long *) &ip))
- return -11;
-
- if (is_sp_move_ins(&ip)) {
- int stack_adjustment = ip.i_format.simmediate;
- if (stack_adjustment > 0)
- /* This marks the end of the previous function,
- which means we overran. */
- break;
- stack_size = (unsigned long) stack_adjustment;
- } else if (is_ra_save_ins(&ip)) {
- int ra_slot = ip.i_format.simmediate;
- if (ra_slot < 0)
- /* This shouldn't happen. */
- break;
- ra_offset = ra_slot;
- } else if (is_end_of_function_marker(&ip))
- break;
- }
-
- if (!ra_offset || !stack_size)
- goto done;
-
- if (ra_offset) {
- new_frame.ra = old_frame->sp + ra_offset;
- if (get_mem(new_frame.ra, &(new_frame.ra)))
- return -13;
- }
-
- if (stack_size) {
- new_frame.sp = old_frame->sp + stack_size;
- if (get_mem(new_frame.sp, &(new_frame.sp)))
- return -14;
- }
-
- if (new_frame.sp > old_frame->sp)
- return -2;
-
-done:
- new_frame.pc = old_frame->ra;
- *old_frame = new_frame;
-
- return 0;
-}
-
-static inline void do_user_backtrace(unsigned long low_addr,
- struct stackframe *frame,
- unsigned int depth)
-{
- const unsigned int max_instr_check = 512;
- const unsigned long high_addr = low_addr + THREAD_SIZE;
-
- while (depth-- && !unwind_user_frame(frame, max_instr_check)) {
- oprofile_add_trace(frame->ra);
- if (frame->sp < low_addr || frame->sp > high_addr)
- break;
- }
-}
-
-#ifndef CONFIG_KALLSYMS
-static inline void do_kernel_backtrace(unsigned long low_addr,
- struct stackframe *frame,
- unsigned int depth) { }
-#else
-static inline void do_kernel_backtrace(unsigned long low_addr,
- struct stackframe *frame,
- unsigned int depth)
-{
- while (depth-- && frame->pc) {
- frame->pc = unwind_stack_by_address(low_addr,
- &(frame->sp),
- frame->pc,
- &(frame->ra));
- oprofile_add_trace(frame->ra);
- }
-}
-#endif
-
-void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth)
-{
- struct stackframe frame = { .sp = regs->regs[29],
- .pc = regs->cp0_epc,
- .ra = regs->regs[31] };
- const int userspace = user_mode(regs);
- const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE);
-
- if (userspace)
- do_user_backtrace(low_addr, &frame, depth);
- else
- do_kernel_backtrace(low_addr, &frame, depth);
-}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
deleted file mode 100644
index d3996c4c6440..000000000000
--- a/arch/mips/oprofile/common.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004, 2005 Ralf Baechle
- * Copyright (C) 2005 MIPS Technologies, Inc.
- */
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/cpu-info.h>
-#include <asm/cpu-type.h>
-
-#include "op_impl.h"
-
-extern struct op_mips_model op_model_mipsxx_ops __weak;
-extern struct op_mips_model op_model_loongson2_ops __weak;
-extern struct op_mips_model op_model_loongson3_ops __weak;
-
-static struct op_mips_model *model;
-
-static struct op_counter_config ctr[20];
-
-static int op_mips_setup(void)
-{
- /* Pre-compute the values to stuff in the hardware registers. */
- model->reg_setup(ctr);
-
- /* Configure the registers on all cpus. */
- on_each_cpu(model->cpu_setup, NULL, 1);
-
- return 0;
-}
-
-static int op_mips_create_files(struct dentry *root)
-{
- int i;
-
- for (i = 0; i < model->num_counters; ++i) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(root, buf);
-
- oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(dir, "count", &ctr[i].count);
- oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(dir, "user", &ctr[i].user);
- oprofilefs_create_ulong(dir, "exl", &ctr[i].exl);
- /* Dummy. */
- oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
- }
-
- return 0;
-}
-
-static int op_mips_start(void)
-{
- on_each_cpu(model->cpu_start, NULL, 1);
-
- return 0;
-}
-
-static void op_mips_stop(void)
-{
- /* Disable performance monitoring for all counters. */
- on_each_cpu(model->cpu_stop, NULL, 1);
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- struct op_mips_model *lmodel = NULL;
- int res;
-
- switch (boot_cpu_type()) {
- case CPU_5KC:
- case CPU_M14KC:
- case CPU_M14KEC:
- case CPU_20KC:
- case CPU_24K:
- case CPU_25KF:
- case CPU_34K:
- case CPU_1004K:
- case CPU_74K:
- case CPU_1074K:
- case CPU_INTERAPTIV:
- case CPU_PROAPTIV:
- case CPU_P5600:
- case CPU_I6400:
- case CPU_M5150:
- case CPU_LOONGSON32:
- case CPU_SB1:
- case CPU_SB1A:
- case CPU_R10000:
- case CPU_R12000:
- case CPU_R14000:
- case CPU_R16000:
- case CPU_XLR:
- lmodel = &op_model_mipsxx_ops;
- break;
-
- case CPU_LOONGSON2EF:
- lmodel = &op_model_loongson2_ops;
- break;
- case CPU_LOONGSON64:
- lmodel = &op_model_loongson3_ops;
- break;
- }
-
- /*
- * Always set the backtrace. This allows unsupported CPU types to still
- * use timer-based oprofile.
- */
- ops->backtrace = op_mips_backtrace;
-
- if (!lmodel)
- return -ENODEV;
-
- res = lmodel->init();
- if (res)
- return res;
-
- model = lmodel;
-
- ops->create_files = op_mips_create_files;
- ops->setup = op_mips_setup;
- //ops->shutdown = op_mips_shutdown;
- ops->start = op_mips_start;
- ops->stop = op_mips_stop;
- ops->cpu_type = lmodel->cpu_type;
-
- printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
- lmodel->cpu_type);
-
- return 0;
-}
-
-void oprofile_arch_exit(void)
-{
- if (model)
- model->exit();
-}
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
deleted file mode 100644
index a4e758a39af4..000000000000
--- a/arch/mips/oprofile/op_impl.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * @file arch/alpha/oprofile/op_impl.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Richard Henderson <rth@twiddle.net>
- */
-
-#ifndef OP_IMPL_H
-#define OP_IMPL_H 1
-
-extern int (*perf_irq)(void);
-
-/* Per-counter configuration as set via oprofilefs. */
-struct op_counter_config {
- unsigned long enabled;
- unsigned long event;
- unsigned long count;
- /* Dummies because I am too lazy to hack the userspace tools. */
- unsigned long kernel;
- unsigned long user;
- unsigned long exl;
- unsigned long unit_mask;
-};
-
-/* Per-architecture configure and hooks. */
-struct op_mips_model {
- void (*reg_setup) (struct op_counter_config *);
- void (*cpu_setup) (void *dummy);
- int (*init)(void);
- void (*exit)(void);
- void (*cpu_start)(void *args);
- void (*cpu_stop)(void *args);
- char *cpu_type;
- unsigned char num_counters;
-};
-
-void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-#endif
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
deleted file mode 100644
index b249ec0bebb2..000000000000
--- a/arch/mips/oprofile/op_model_loongson2.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Loongson2 performance counter driver for oprofile
- *
- * Copyright (C) 2009 Lemote Inc.
- * Author: Yanhua <yanh@lemote.com>
- * Author: Wu Zhangjin <wuzhangjin@gmail.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-
-#include <loongson.h> /* LOONGSON2_PERFCNT_IRQ */
-#include "op_impl.h"
-
-#define LOONGSON2_CPU_TYPE "mips/loongson2"
-
-#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31)
-
-#define LOONGSON2_PERFCTRL_EXL (1UL << 0)
-#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1)
-#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2)
-#define LOONGSON2_PERFCTRL_USER (1UL << 3)
-#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4)
-#define LOONGSON2_PERFCTRL_EVENT(idx, event) \
- (((event) & 0x0f) << ((idx) ? 9 : 5))
-
-#define read_c0_perfctrl() __read_64bit_c0_register($24, 0)
-#define write_c0_perfctrl(val) __write_64bit_c0_register($24, 0, val)
-#define read_c0_perfcnt() __read_64bit_c0_register($25, 0)
-#define write_c0_perfcnt(val) __write_64bit_c0_register($25, 0, val)
-
-static struct loongson2_register_config {
- unsigned int ctrl;
- unsigned long long reset_counter1;
- unsigned long long reset_counter2;
- int cnt1_enabled, cnt2_enabled;
-} reg;
-
-static char *oprofid = "LoongsonPerf";
-static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id);
-
-static void reset_counters(void *arg)
-{
- write_c0_perfctrl(0);
- write_c0_perfcnt(0);
-}
-
-static void loongson2_reg_setup(struct op_counter_config *cfg)
-{
- unsigned int ctrl = 0;
-
- reg.reset_counter1 = 0;
- reg.reset_counter2 = 0;
-
- /*
- * Compute the performance counter ctrl word.
- * For now, count kernel and user mode.
- */
- if (cfg[0].enabled) {
- ctrl |= LOONGSON2_PERFCTRL_EVENT(0, cfg[0].event);
- reg.reset_counter1 = 0x80000000ULL - cfg[0].count;
- }
-
- if (cfg[1].enabled) {
- ctrl |= LOONGSON2_PERFCTRL_EVENT(1, cfg[1].event);
- reg.reset_counter2 = 0x80000000ULL - cfg[1].count;
- }
-
- if (cfg[0].enabled || cfg[1].enabled) {
- ctrl |= LOONGSON2_PERFCTRL_EXL | LOONGSON2_PERFCTRL_ENABLE;
- if (cfg[0].kernel || cfg[1].kernel)
- ctrl |= LOONGSON2_PERFCTRL_KERNEL;
- if (cfg[0].user || cfg[1].user)
- ctrl |= LOONGSON2_PERFCTRL_USER;
- }
-
- reg.ctrl = ctrl;
-
- reg.cnt1_enabled = cfg[0].enabled;
- reg.cnt2_enabled = cfg[1].enabled;
-}
-
-static void loongson2_cpu_setup(void *args)
-{
- write_c0_perfcnt((reg.reset_counter2 << 32) | reg.reset_counter1);
-}
-
-static void loongson2_cpu_start(void *args)
-{
- /* Start all counters on current CPU */
- if (reg.cnt1_enabled || reg.cnt2_enabled)
- write_c0_perfctrl(reg.ctrl);
-}
-
-static void loongson2_cpu_stop(void *args)
-{
- /* Stop all counters on current CPU */
- write_c0_perfctrl(0);
- memset(&reg, 0, sizeof(reg));
-}
-
-static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
-{
- uint64_t counter, counter1, counter2;
- struct pt_regs *regs = get_irq_regs();
- int enabled;
-
- /* Check whether the irq belongs to me */
- enabled = read_c0_perfctrl() & LOONGSON2_PERFCTRL_ENABLE;
- if (!enabled)
- return IRQ_NONE;
- enabled = reg.cnt1_enabled | reg.cnt2_enabled;
- if (!enabled)
- return IRQ_NONE;
-
- counter = read_c0_perfcnt();
- counter1 = counter & 0xffffffff;
- counter2 = counter >> 32;
-
- if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) {
- if (reg.cnt1_enabled)
- oprofile_add_sample(regs, 0);
- counter1 = reg.reset_counter1;
- }
- if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) {
- if (reg.cnt2_enabled)
- oprofile_add_sample(regs, 1);
- counter2 = reg.reset_counter2;
- }
-
- write_c0_perfcnt((counter2 << 32) | counter1);
-
- return IRQ_HANDLED;
-}
-
-static int __init loongson2_init(void)
-{
- return request_irq(LOONGSON2_PERFCNT_IRQ, loongson2_perfcount_handler,
- IRQF_SHARED, "Perfcounter", oprofid);
-}
-
-static void loongson2_exit(void)
-{
- reset_counters(NULL);
- free_irq(LOONGSON2_PERFCNT_IRQ, oprofid);
-}
-
-struct op_mips_model op_model_loongson2_ops = {
- .reg_setup = loongson2_reg_setup,
- .cpu_setup = loongson2_cpu_setup,
- .init = loongson2_init,
- .exit = loongson2_exit,
- .cpu_start = loongson2_cpu_start,
- .cpu_stop = loongson2_cpu_stop,
- .cpu_type = LOONGSON2_CPU_TYPE,
- .num_counters = 2
-};
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
deleted file mode 100644
index 436b1fc99f2c..000000000000
--- a/arch/mips/oprofile/op_model_loongson3.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- */
-#include <linux/init.h>
-#include <linux/cpu.h>
-#include <linux/smp.h>
-#include <linux/proc_fs.h>
-#include <linux/oprofile.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/uaccess.h>
-#include <irq.h>
-#include <loongson.h>
-#include "op_impl.h"
-
-#define LOONGSON3_PERFCNT_OVERFLOW (1ULL << 63)
-
-#define LOONGSON3_PERFCTRL_EXL (1UL << 0)
-#define LOONGSON3_PERFCTRL_KERNEL (1UL << 1)
-#define LOONGSON3_PERFCTRL_SUPERVISOR (1UL << 2)
-#define LOONGSON3_PERFCTRL_USER (1UL << 3)
-#define LOONGSON3_PERFCTRL_ENABLE (1UL << 4)
-#define LOONGSON3_PERFCTRL_W (1UL << 30)
-#define LOONGSON3_PERFCTRL_M (1UL << 31)
-#define LOONGSON3_PERFCTRL_EVENT(idx, event) \
- (((event) & (idx ? 0x0f : 0x3f)) << 5)
-
-/* Loongson-3 PerfCount performance counter1 register */
-#define read_c0_perflo1() __read_64bit_c0_register($25, 0)
-#define write_c0_perflo1(val) __write_64bit_c0_register($25, 0, val)
-#define read_c0_perfhi1() __read_64bit_c0_register($25, 1)
-#define write_c0_perfhi1(val) __write_64bit_c0_register($25, 1, val)
-
-/* Loongson-3 PerfCount performance counter2 register */
-#define read_c0_perflo2() __read_64bit_c0_register($25, 2)
-#define write_c0_perflo2(val) __write_64bit_c0_register($25, 2, val)
-#define read_c0_perfhi2() __read_64bit_c0_register($25, 3)
-#define write_c0_perfhi2(val) __write_64bit_c0_register($25, 3, val)
-
-static int (*save_perf_irq)(void);
-
-static struct loongson3_register_config {
- unsigned int control1;
- unsigned int control2;
- unsigned long long reset_counter1;
- unsigned long long reset_counter2;
- int ctr1_enable, ctr2_enable;
-} reg;
-
-static void reset_counters(void *arg)
-{
- write_c0_perfhi1(0);
- write_c0_perfhi2(0);
- write_c0_perflo1(0xc0000000);
- write_c0_perflo2(0x40000000);
-}
-
-/* Compute all of the registers in preparation for enabling profiling. */
-static void loongson3_reg_setup(struct op_counter_config *ctr)
-{
- unsigned int control1 = 0;
- unsigned int control2 = 0;
-
- reg.reset_counter1 = 0;
- reg.reset_counter2 = 0;
- /* Compute the performance counter control word. */
- /* For now count kernel and user mode */
- if (ctr[0].enabled) {
- control1 |= LOONGSON3_PERFCTRL_EVENT(0, ctr[0].event) |
- LOONGSON3_PERFCTRL_ENABLE;
- if (ctr[0].kernel)
- control1 |= LOONGSON3_PERFCTRL_KERNEL;
- if (ctr[0].user)
- control1 |= LOONGSON3_PERFCTRL_USER;
- reg.reset_counter1 = 0x8000000000000000ULL - ctr[0].count;
- }
-
- if (ctr[1].enabled) {
- control2 |= LOONGSON3_PERFCTRL_EVENT(1, ctr[1].event) |
- LOONGSON3_PERFCTRL_ENABLE;
- if (ctr[1].kernel)
- control2 |= LOONGSON3_PERFCTRL_KERNEL;
- if (ctr[1].user)
- control2 |= LOONGSON3_PERFCTRL_USER;
- reg.reset_counter2 = 0x8000000000000000ULL - ctr[1].count;
- }
-
- if (ctr[0].enabled)
- control1 |= LOONGSON3_PERFCTRL_EXL;
- if (ctr[1].enabled)
- control2 |= LOONGSON3_PERFCTRL_EXL;
-
- reg.control1 = control1;
- reg.control2 = control2;
- reg.ctr1_enable = ctr[0].enabled;
- reg.ctr2_enable = ctr[1].enabled;
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-static void loongson3_cpu_setup(void *args)
-{
- uint64_t perfcount1, perfcount2;
-
- perfcount1 = reg.reset_counter1;
- perfcount2 = reg.reset_counter2;
- write_c0_perfhi1(perfcount1);
- write_c0_perfhi2(perfcount2);
-}
-
-static void loongson3_cpu_start(void *args)
-{
- /* Start all counters on current CPU */
- reg.control1 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
- reg.control2 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
-
- if (reg.ctr1_enable)
- write_c0_perflo1(reg.control1);
- if (reg.ctr2_enable)
- write_c0_perflo2(reg.control2);
-}
-
-static void loongson3_cpu_stop(void *args)
-{
- /* Stop all counters on current CPU */
- write_c0_perflo1(0xc0000000);
- write_c0_perflo2(0x40000000);
- memset(&reg, 0, sizeof(reg));
-}
-
-static int loongson3_perfcount_handler(void)
-{
- unsigned long flags;
- uint64_t counter1, counter2;
- uint32_t cause, handled = IRQ_NONE;
- struct pt_regs *regs = get_irq_regs();
-
- cause = read_c0_cause();
- if (!(cause & CAUSEF_PCI))
- return handled;
-
- counter1 = read_c0_perfhi1();
- counter2 = read_c0_perfhi2();
-
- local_irq_save(flags);
-
- if (counter1 & LOONGSON3_PERFCNT_OVERFLOW) {
- if (reg.ctr1_enable)
- oprofile_add_sample(regs, 0);
- counter1 = reg.reset_counter1;
- }
- if (counter2 & LOONGSON3_PERFCNT_OVERFLOW) {
- if (reg.ctr2_enable)
- oprofile_add_sample(regs, 1);
- counter2 = reg.reset_counter2;
- }
-
- local_irq_restore(flags);
-
- write_c0_perfhi1(counter1);
- write_c0_perfhi2(counter2);
-
- if (!(cause & CAUSEF_TI))
- handled = IRQ_HANDLED;
-
- return handled;
-}
-
-static int loongson3_starting_cpu(unsigned int cpu)
-{
- write_c0_perflo1(reg.control1);
- write_c0_perflo2(reg.control2);
- return 0;
-}
-
-static int loongson3_dying_cpu(unsigned int cpu)
-{
- write_c0_perflo1(0xc0000000);
- write_c0_perflo2(0x40000000);
- return 0;
-}
-
-static int __init loongson3_init(void)
-{
- on_each_cpu(reset_counters, NULL, 1);
- cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
- "mips/oprofile/loongson3:starting",
- loongson3_starting_cpu, loongson3_dying_cpu);
- save_perf_irq = perf_irq;
- perf_irq = loongson3_perfcount_handler;
-
- return 0;
-}
-
-static void loongson3_exit(void)
-{
- on_each_cpu(reset_counters, NULL, 1);
- cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
- perf_irq = save_perf_irq;
-}
-
-struct op_mips_model op_model_loongson3_ops = {
- .reg_setup = loongson3_reg_setup,
- .cpu_setup = loongson3_cpu_setup,
- .init = loongson3_init,
- .exit = loongson3_exit,
- .cpu_start = loongson3_cpu_start,
- .cpu_stop = loongson3_cpu_stop,
- .cpu_type = "mips/loongson3",
- .num_counters = 2
-};
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
deleted file mode 100644
index 55d7b7fd18b6..000000000000
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004, 05, 06 by Ralf Baechle
- * Copyright (C) 2005 by MIPS Technologies, Inc.
- */
-#include <linux/cpumask.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/smp.h>
-#include <asm/irq_regs.h>
-#include <asm/time.h>
-
-#include "op_impl.h"
-
-#define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
- MIPS_PERFCTRL_EVENT)
-#define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
-
-#define M_COUNTER_OVERFLOW (1UL << 31)
-
-static int (*save_perf_irq)(void);
-static int perfcount_irq;
-
-/*
- * XLR has only one set of counters per core. Designate the
- * first hardware thread in the core for setup and init.
- * Skip CPUs with non-zero hardware thread id (4 hwt per core)
- */
-#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
-#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
-#else
-#define oprofile_skip_cpu(c) 0
-#endif
-
-#ifdef CONFIG_MIPS_MT_SMP
-#define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \
- M_PERFCTL_VPEID(cpu_vpe_id(&current_cpu_data)))
-#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
- 0 : cpu_vpe_id(&current_cpu_data))
-
-/*
- * The number of bits to shift to convert between counters per core and
- * counters per VPE. There is no reasonable interface atm to obtain the
- * number of VPEs used by Linux and in the 34K this number is fixed to two
- * anyways so we hardcore a few things here for the moment. The way it's
- * done here will ensure that oprofile VSMP kernel will run right on a lesser
- * core like a 24K also or with maxcpus=1.
- */
-static inline unsigned int vpe_shift(void)
-{
- if (num_possible_cpus() > 1)
- return 1;
-
- return 0;
-}
-
-#else
-
-#define WHAT 0
-#define vpe_id() 0
-
-static inline unsigned int vpe_shift(void)
-{
- return 0;
-}
-
-#endif
-
-static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
-{
- return counters >> vpe_shift();
-}
-
-static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
-{
- return counters << vpe_shift();
-}
-
-#define __define_perf_accessors(r, n, np) \
- \
-static inline unsigned int r_c0_ ## r ## n(void) \
-{ \
- unsigned int cpu = vpe_id(); \
- \
- switch (cpu) { \
- case 0: \
- return read_c0_ ## r ## n(); \
- case 1: \
- return read_c0_ ## r ## np(); \
- default: \
- BUG(); \
- } \
- return 0; \
-} \
- \
-static inline void w_c0_ ## r ## n(unsigned int value) \
-{ \
- unsigned int cpu = vpe_id(); \
- \
- switch (cpu) { \
- case 0: \
- write_c0_ ## r ## n(value); \
- return; \
- case 1: \
- write_c0_ ## r ## np(value); \
- return; \
- default: \
- BUG(); \
- } \
- return; \
-} \
-
-__define_perf_accessors(perfcntr, 0, 2)
-__define_perf_accessors(perfcntr, 1, 3)
-__define_perf_accessors(perfcntr, 2, 0)
-__define_perf_accessors(perfcntr, 3, 1)
-
-__define_perf_accessors(perfctrl, 0, 2)
-__define_perf_accessors(perfctrl, 1, 3)
-__define_perf_accessors(perfctrl, 2, 0)
-__define_perf_accessors(perfctrl, 3, 1)
-
-struct op_mips_model op_model_mipsxx_ops;
-
-static struct mipsxx_register_config {
- unsigned int control[4];
- unsigned int counter[4];
-} reg;
-
-/* Compute all of the registers in preparation for enabling profiling. */
-
-static void mipsxx_reg_setup(struct op_counter_config *ctr)
-{
- unsigned int counters = op_model_mipsxx_ops.num_counters;
- int i;
-
- /* Compute the performance counter control word. */
- for (i = 0; i < counters; i++) {
- reg.control[i] = 0;
- reg.counter[i] = 0;
-
- if (!ctr[i].enabled)
- continue;
-
- reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
- MIPS_PERFCTRL_IE;
- if (ctr[i].kernel)
- reg.control[i] |= MIPS_PERFCTRL_K;
- if (ctr[i].user)
- reg.control[i] |= MIPS_PERFCTRL_U;
- if (ctr[i].exl)
- reg.control[i] |= MIPS_PERFCTRL_EXL;
- if (boot_cpu_type() == CPU_XLR)
- reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
- reg.counter[i] = 0x80000000 - ctr[i].count;
- }
-}
-
-/* Program all of the registers in preparation for enabling profiling. */
-
-static void mipsxx_cpu_setup(void *args)
-{
- unsigned int counters = op_model_mipsxx_ops.num_counters;
-
- if (oprofile_skip_cpu(smp_processor_id()))
- return;
-
- switch (counters) {
- case 4:
- w_c0_perfctrl3(0);
- w_c0_perfcntr3(reg.counter[3]);
- fallthrough;
- case 3:
- w_c0_perfctrl2(0);
- w_c0_perfcntr2(reg.counter[2]);
- fallthrough;
- case 2:
- w_c0_perfctrl1(0);
- w_c0_perfcntr1(reg.counter[1]);
- fallthrough;
- case 1:
- w_c0_perfctrl0(0);
- w_c0_perfcntr0(reg.counter[0]);
- }
-}
-
-/* Start all counters on current CPU */
-static void mipsxx_cpu_start(void *args)
-{
- unsigned int counters = op_model_mipsxx_ops.num_counters;
-
- if (oprofile_skip_cpu(smp_processor_id()))
- return;
-
- switch (counters) {
- case 4:
- w_c0_perfctrl3(WHAT | reg.control[3]);
- fallthrough;
- case 3:
- w_c0_perfctrl2(WHAT | reg.control[2]);
- fallthrough;
- case 2:
- w_c0_perfctrl1(WHAT | reg.control[1]);
- fallthrough;
- case 1:
- w_c0_perfctrl0(WHAT | reg.control[0]);
- }
-}
-
-/* Stop all counters on current CPU */
-static void mipsxx_cpu_stop(void *args)
-{
- unsigned int counters = op_model_mipsxx_ops.num_counters;
-
- if (oprofile_skip_cpu(smp_processor_id()))
- return;
-
- switch (counters) {
- case 4:
- w_c0_perfctrl3(0);
- fallthrough;
- case 3:
- w_c0_perfctrl2(0);
- fallthrough;
- case 2:
- w_c0_perfctrl1(0);
- fallthrough;
- case 1:
- w_c0_perfctrl0(0);
- }
-}
-
-static int mipsxx_perfcount_handler(void)
-{
- unsigned int counters = op_model_mipsxx_ops.num_counters;
- unsigned int control;
- unsigned int counter;
- int handled = IRQ_NONE;
-
- if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
- return handled;
-
- switch (counters) {
-#define HANDLE_COUNTER(n) \
- case n + 1: \
- control = r_c0_perfctrl ## n(); \
- counter = r_c0_perfcntr ## n(); \
- if ((control & MIPS_PERFCTRL_IE) && \
- (counter & M_COUNTER_OVERFLOW)) { \
- oprofile_add_sample(get_irq_regs(), n); \
- w_c0_perfcntr ## n(reg.counter[n]); \
- handled = IRQ_HANDLED; \
- }
- HANDLE_COUNTER(3)
- fallthrough;
- HANDLE_COUNTER(2)
- fallthrough;
- HANDLE_COUNTER(1)
- fallthrough;
- HANDLE_COUNTER(0)
- }
-
- return handled;
-}
-
-static inline int __n_counters(void)
-{
- if (!cpu_has_perf)
- return 0;
- if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
- return 1;
- if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
- return 2;
- if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
- return 3;
-
- return 4;
-}
-
-static inline int n_counters(void)
-{
- int counters;
-
- switch (current_cpu_type()) {
- case CPU_R10000:
- counters = 2;
- break;
-
- case CPU_R12000:
- case CPU_R14000:
- case CPU_R16000:
- counters = 4;
- break;
-
- default:
- counters = __n_counters();
- }
-
- return counters;
-}
-
-static void reset_counters(void *arg)
-{
- int counters = (int)(long)arg;
- switch (counters) {
- case 4:
- w_c0_perfctrl3(0);
- w_c0_perfcntr3(0);
- fallthrough;
- case 3:
- w_c0_perfctrl2(0);
- w_c0_perfcntr2(0);
- fallthrough;
- case 2:
- w_c0_perfctrl1(0);
- w_c0_perfcntr1(0);
- fallthrough;
- case 1:
- w_c0_perfctrl0(0);
- w_c0_perfcntr0(0);
- }
-}
-
-static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
-{
- return mipsxx_perfcount_handler();
-}
-
-static int __init mipsxx_init(void)
-{
- int counters;
-
- counters = n_counters();
- if (counters == 0) {
- printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
- return -ENODEV;
- }
-
-#ifdef CONFIG_MIPS_MT_SMP
- if (!cpu_has_mipsmt_pertccounters)
- counters = counters_total_to_per_cpu(counters);
-#endif
- on_each_cpu(reset_counters, (void *)(long)counters, 1);
-
- op_model_mipsxx_ops.num_counters = counters;
- switch (current_cpu_type()) {
- case CPU_M14KC:
- op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
- break;
-
- case CPU_M14KEC:
- op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
- break;
-
- case CPU_20KC:
- op_model_mipsxx_ops.cpu_type = "mips/20K";
- break;
-
- case CPU_24K:
- op_model_mipsxx_ops.cpu_type = "mips/24K";
- break;
-
- case CPU_25KF:
- op_model_mipsxx_ops.cpu_type = "mips/25K";
- break;
-
- case CPU_1004K:
- case CPU_34K:
- op_model_mipsxx_ops.cpu_type = "mips/34K";
- break;
-
- case CPU_1074K:
- case CPU_74K:
- op_model_mipsxx_ops.cpu_type = "mips/74K";
- break;
-
- case CPU_INTERAPTIV:
- op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
- break;
-
- case CPU_PROAPTIV:
- op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
- break;
-
- case CPU_P5600:
- op_model_mipsxx_ops.cpu_type = "mips/P5600";
- break;
-
- case CPU_I6400:
- op_model_mipsxx_ops.cpu_type = "mips/I6400";
- break;
-
- case CPU_M5150:
- op_model_mipsxx_ops.cpu_type = "mips/M5150";
- break;
-
- case CPU_5KC:
- op_model_mipsxx_ops.cpu_type = "mips/5K";
- break;
-
- case CPU_R10000:
- if ((current_cpu_data.processor_id & 0xff) == 0x20)
- op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
- else
- op_model_mipsxx_ops.cpu_type = "mips/r10000";
- break;
-
- case CPU_R12000:
- case CPU_R14000:
- op_model_mipsxx_ops.cpu_type = "mips/r12000";
- break;
-
- case CPU_R16000:
- op_model_mipsxx_ops.cpu_type = "mips/r16000";
- break;
-
- case CPU_SB1:
- case CPU_SB1A:
- op_model_mipsxx_ops.cpu_type = "mips/sb1";
- break;
-
- case CPU_LOONGSON32:
- op_model_mipsxx_ops.cpu_type = "mips/loongson1";
- break;
-
- case CPU_XLR:
- op_model_mipsxx_ops.cpu_type = "mips/xlr";
- break;
-
- default:
- printk(KERN_ERR "Profiling unsupported for this CPU\n");
-
- return -ENODEV;
- }
-
- save_perf_irq = perf_irq;
- perf_irq = mipsxx_perfcount_handler;
-
- if (get_c0_perfcount_int)
- perfcount_irq = get_c0_perfcount_int();
- else if (cp0_perfcount_irq >= 0)
- perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
- else
- perfcount_irq = -1;
-
- if (perfcount_irq >= 0)
- return request_irq(perfcount_irq, mipsxx_perfcount_int,
- IRQF_PERCPU | IRQF_NOBALANCING |
- IRQF_NO_THREAD | IRQF_NO_SUSPEND |
- IRQF_SHARED,
- "Perfcounter", save_perf_irq);
-
- return 0;
-}
-
-static void mipsxx_exit(void)
-{
- int counters = op_model_mipsxx_ops.num_counters;
-
- if (perfcount_irq >= 0)
- free_irq(perfcount_irq, save_perf_irq);
-
- counters = counters_per_cpu_to_total(counters);
- on_each_cpu(reset_counters, (void *)(long)counters, 1);
-
- perf_irq = save_perf_irq;
-}
-
-struct op_mips_model op_model_mipsxx_ops = {
- .reg_setup = mipsxx_reg_setup,
- .cpu_setup = mipsxx_cpu_setup,
- .init = mipsxx_init,
- .exit = mipsxx_exit,
- .cpu_start = mipsxx_cpu_start,
- .cpu_stop = mipsxx_cpu_stop,
-};
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index b09d6923f156..e7f611c2719d 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -4,7 +4,6 @@ config PARISC
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 5140c602207f..7d9f71aa829a 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -116,8 +116,6 @@ kernel-y := mm/ kernel/ math-emu/
core-y += $(addprefix arch/parisc/, $(kernel-y))
libs-y += arch/parisc/lib/ $(LIBGCC)
-drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
-
boot := arch/parisc/boot
PALO := $(shell if (which palo 2>&1); then : ; \
diff --git a/arch/parisc/oprofile/Makefile b/arch/parisc/oprofile/Makefile
deleted file mode 100644
index 86a1ccc328eb..000000000000
--- a/arch/parisc/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o
diff --git a/arch/parisc/oprofile/init.c b/arch/parisc/oprofile/init.c
deleted file mode 100644
index 026cba2af07a..000000000000
--- a/arch/parisc/oprofile/init.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- return -ENODEV;
-}
-
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d26a89cd8908..9141f03060ce 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -226,7 +226,6 @@ config PPC
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
- select HAVE_OPROFILE
select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 08cf0eade56a..b959fdaec713 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -276,8 +276,6 @@ head-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += arch/powerpc/kernel/prom_init.o
# See arch/powerpc/Kbuild for content of core part of the kernel
core-y += arch/powerpc/
-drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
-
# Default to zImage, override when needed
all: zImage
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index 3894ba8f8ffc..72b8f93a9bdd 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -8,7 +8,6 @@ CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_SLUB_CPU_PARTIAL is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index 34c86b3abecb..717827219921 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -6,7 +6,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/fsp2_defconfig b/arch/powerpc/configs/44x/fsp2_defconfig
index 30845ce0885a..8da316e61a08 100644
--- a/arch/powerpc/configs/44x/fsp2_defconfig
+++ b/arch/powerpc/configs/44x/fsp2_defconfig
@@ -17,7 +17,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 2c3834eebca3..c11e777b2f3d 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -7,7 +7,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 42fbc70cec33..cc2c0d51f493 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -14,7 +14,6 @@ CONFIG_CPUSETS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 1de0dbf6cbba..63d611cc160f 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -12,7 +12,6 @@ CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 161351a18517..9424c1e67e1c 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 15ed8d0aa014..78606b7e42df 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -7,7 +7,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 665a8d7cded0..7aefac5afab0 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -10,7 +10,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 60a30fffeda0..2c87e856d839 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -30,7 +30,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 48759656a067..4f05a6652478 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -62,7 +62,6 @@ CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 33a01a9e86be..5cf49a515f8e 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -14,7 +14,6 @@ CONFIG_CPUSETS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index ef09f3cce1fa..10c055eaebf0 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -19,7 +19,6 @@ CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 142f1321fa58..f300dcb937cc 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -13,7 +13,6 @@ CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_PPC_POWERNV is not set
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index d5dece981c02..777221775c83 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -29,7 +29,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 5f21a5bab467..e85c849214a2 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -17,16 +17,6 @@ struct cpu_spec;
typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
typedef void (*cpu_restore_t)(void);
-enum powerpc_oprofile_type {
- PPC_OPROFILE_INVALID = 0,
- PPC_OPROFILE_RS64 = 1,
- PPC_OPROFILE_POWER4 = 2,
- PPC_OPROFILE_G4 = 3,
- PPC_OPROFILE_FSL_EMB = 4,
- PPC_OPROFILE_CELL = 5,
- PPC_OPROFILE_PA6T = 6,
-};
-
enum powerpc_pmc_type {
PPC_PMC_DEFAULT = 0,
PPC_PMC_IBM = 1,
@@ -83,16 +73,6 @@ struct cpu_spec {
/* Used by oprofile userspace to select the right counters */
char *oprofile_cpu_type;
- /* Processor specific oprofile operations */
- enum powerpc_oprofile_type oprofile_type;
-
- /* Bit locations inside the mmcra change */
- unsigned long oprofile_mmcra_sihv;
- unsigned long oprofile_mmcra_sipr;
-
- /* Bits to clear during an oprofile exception */
- unsigned long oprofile_mmcra_clear;
-
/* Name of processor class, for the ELF AT_PLATFORM entry */
char *platform;
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
deleted file mode 100644
index 2a166c297f97..000000000000
--- a/arch/powerpc/include/asm/oprofile_impl.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Based on alpha version.
- */
-
-#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
-#define _ASM_POWERPC_OPROFILE_IMPL_H
-#ifdef __KERNEL__
-
-#define OP_MAX_COUNTER 8
-
-/* Per-counter configuration as set via oprofilefs. */
-struct op_counter_config {
- unsigned long enabled;
- unsigned long event;
- unsigned long count;
- /* Classic doesn't support per-counter user/kernel selection */
- unsigned long kernel;
- unsigned long user;
- unsigned long unit_mask;
-};
-
-/* System-wide configuration as set via oprofilefs. */
-struct op_system_config {
-#ifdef CONFIG_PPC64
- unsigned long mmcr0;
- unsigned long mmcr1;
- unsigned long mmcra;
-#ifdef CONFIG_OPROFILE_CELL
- /* Register for oprofile user tool to check cell kernel profiling
- * support.
- */
- unsigned long cell_support;
-#endif
-#endif
- unsigned long enable_kernel;
- unsigned long enable_user;
-};
-
-/* Per-arch configuration */
-struct op_powerpc_model {
- int (*reg_setup) (struct op_counter_config *,
- struct op_system_config *,
- int num_counters);
- int (*cpu_setup) (struct op_counter_config *);
- int (*start) (struct op_counter_config *);
- int (*global_start) (struct op_counter_config *);
- void (*stop) (void);
- void (*global_stop) (void);
- int (*sync_start)(void);
- int (*sync_stop)(void);
- void (*handle_interrupt) (struct pt_regs *,
- struct op_counter_config *);
- int num_counters;
-};
-
-extern struct op_powerpc_model op_model_fsl_emb;
-extern struct op_powerpc_model op_model_power4;
-extern struct op_powerpc_model op_model_7450;
-extern struct op_powerpc_model op_model_cell;
-extern struct op_powerpc_model op_model_pa6t;
-
-
-/* All the classic PPC parts use these */
-static inline unsigned int classic_ctr_read(unsigned int i)
-{
- switch(i) {
- case 0:
- return mfspr(SPRN_PMC1);
- case 1:
- return mfspr(SPRN_PMC2);
- case 2:
- return mfspr(SPRN_PMC3);
- case 3:
- return mfspr(SPRN_PMC4);
- case 4:
- return mfspr(SPRN_PMC5);
- case 5:
- return mfspr(SPRN_PMC6);
-
-/* No PPC32 chip has more than 6 so far */
-#ifdef CONFIG_PPC64
- case 6:
- return mfspr(SPRN_PMC7);
- case 7:
- return mfspr(SPRN_PMC8);
-#endif
- default:
- return 0;
- }
-}
-
-static inline void classic_ctr_write(unsigned int i, unsigned int val)
-{
- switch(i) {
- case 0:
- mtspr(SPRN_PMC1, val);
- break;
- case 1:
- mtspr(SPRN_PMC2, val);
- break;
- case 2:
- mtspr(SPRN_PMC3, val);
- break;
- case 3:
- mtspr(SPRN_PMC4, val);
- break;
- case 4:
- mtspr(SPRN_PMC5, val);
- break;
- case 5:
- mtspr(SPRN_PMC6, val);
- break;
-
-/* No PPC32 chip has more than 6, yet */
-#ifdef CONFIG_PPC64
- case 6:
- mtspr(SPRN_PMC7, val);
- break;
- case 7:
- mtspr(SPRN_PMC8, val);
- break;
-#endif
- default:
- break;
- }
-}
-
-
-extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 9666491bcb8a..8a2d11ba0dae 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -201,20 +201,6 @@ int spu_64k_pages_available(void);
struct mm_struct;
extern void spu_flush_all_slbs(struct mm_struct *mm);
-/* This interface allows a profiler (e.g., OProfile) to store a ref
- * to spu context information that it creates. This caching technique
- * avoids the need to recreate this information after a save/restore operation.
- *
- * Assumes the caller has already incremented the ref count to
- * profile_info; then spu_context_destroy must call kref_put
- * on prof_info_kref.
- */
-void spu_set_profile_private_kref(struct spu_context *ctx,
- struct kref *prof_info_kref,
- void ( * prof_info_release) (struct kref *kref));
-
-void *spu_get_profile_private_kref(struct spu_context *ctx);
-
/* system callbacks from the SPU */
struct spu_syscall_block {
u64 nr_ret;
@@ -266,25 +252,6 @@ void spu_remove_dev_attr(struct device_attribute *attr);
int spu_add_dev_attr_group(struct attribute_group *attrs);
void spu_remove_dev_attr_group(struct attribute_group *attrs);
-/*
- * Notifier blocks:
- *
- * oprofile can get notified when a context switch is performed
- * on an spe. The notifer function that gets called is passed
- * a pointer to the SPU structure as well as the object-id that
- * identifies the binary running on that SPU now.
- *
- * For a context save, the object-id that is passed is zero,
- * identifying that the kernel will run from that moment on.
- *
- * For a context restore, the object-id is the value written
- * to object-id spufs file from user space and the notifer
- * function can assume that spu->ctx is valid.
- */
-struct notifier_block;
-int spu_switch_event_register(struct notifier_block * n);
-int spu_switch_event_unregister(struct notifier_block * n);
-
extern void notify_spus_active(void);
extern void do_notify_spus_active(void);
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 65f35ec052d4..ae0fdef0ac11 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -13,7 +13,6 @@
#include <linux/export.h>
#include <linux/jump_label.h>
-#include <asm/oprofile_impl.h>
#include <asm/cputable.h>
#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
#include <asm/mce.h>
@@ -151,7 +150,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* PPC970FX */
@@ -169,7 +167,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
@@ -187,7 +184,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970,
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970MP",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* PPC970MP */
@@ -205,7 +201,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970MP,
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970MP",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* PPC970GX */
@@ -222,7 +217,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.cpu_setup = __setup_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
{ /* Power5 GR */
@@ -237,12 +231,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power5",
- .oprofile_type = PPC_OPROFILE_POWER4,
- /* SIHV / SIPR bits are implemented on POWER4+ (GQ)
- * and above but only works on POWER5 and above
- */
- .oprofile_mmcra_sihv = MMCRA_SIHV,
- .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5",
},
{ /* Power5++ */
@@ -256,9 +244,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.oprofile_cpu_type = "ppc64/power5++",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_mmcra_sihv = MMCRA_SIHV,
- .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5+",
},
{ /* Power5 GS */
@@ -273,9 +258,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power5+",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_mmcra_sihv = MMCRA_SIHV,
- .oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5+",
},
{ /* POWER6 in P5+ mode; 2.04-compliant processor */
@@ -288,7 +270,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power5+",
},
{ /* Power6 */
@@ -304,11 +285,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power6",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
- .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
- .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
- POWER6_MMCRA_OTHER,
.platform = "power6x",
},
{ /* 2.05-compliant processor, i.e. Power6 "architected" mode */
@@ -321,7 +297,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
- .oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power6",
},
{ /* 2.06-compliant processor, i.e. Power7 "architected" mode */
@@ -334,7 +309,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
@@ -351,7 +325,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_INVALID,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
@@ -368,7 +341,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER9,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_INVALID,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
@@ -384,7 +356,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTRS_POWER10,
.icache_bsize = 128,
.dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_INVALID,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power10,
.cpu_restore = __restore_cpu_power10,
@@ -403,7 +374,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power7",
- .oprofile_type = PPC_OPROFILE_POWER4,
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
.machine_check_early = __machine_check_early_realmode_p7,
@@ -422,7 +392,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power7",
- .oprofile_type = PPC_OPROFILE_POWER4,
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
.machine_check_early = __machine_check_early_realmode_p7,
@@ -441,7 +410,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -460,7 +428,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -479,7 +446,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.machine_check_early = __machine_check_early_realmode_p8,
@@ -498,7 +464,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -517,7 +482,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -536,7 +500,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.machine_check_early = __machine_check_early_realmode_p9,
@@ -555,7 +518,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power10",
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power10,
.cpu_restore = __restore_cpu_power10,
.machine_check_early = __machine_check_early_realmode_p10,
@@ -575,7 +537,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.num_pmcs = 4,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/cell-be",
- .oprofile_type = PPC_OPROFILE_CELL,
.platform = "ppc-cell-be",
},
{ /* PA Semi PA6T */
@@ -592,7 +553,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_pa6t,
.cpu_restore = __restore_cpu_pa6t,
.oprofile_cpu_type = "ppc64/pa6t",
- .oprofile_type = PPC_OPROFILE_PA6T,
.platform = "pa6t",
},
{ /* default match */
@@ -757,7 +717,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
.oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 745/755 */
.pvr_mask = 0xfffff000,
@@ -789,7 +748,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
.oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 750FX rev 2.0 must disable HID0[DPM] */
.pvr_mask = 0xffffffff,
@@ -806,7 +764,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
.oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 750FX (All revs except 2.0) */
.pvr_mask = 0xffff0000,
@@ -823,7 +780,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
.oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 750GX */
.pvr_mask = 0xffff0000,
@@ -840,7 +796,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
.oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 740/750 (L2CR bit need fixup for 740) */
.pvr_mask = 0xffff0000,
@@ -919,7 +874,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -937,7 +891,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -955,7 +908,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -973,7 +925,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -991,7 +942,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1009,7 +959,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1027,7 +976,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1045,7 +993,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1062,7 +1009,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1080,7 +1026,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1098,7 +1043,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_G4,
.cpu_setup = __setup_cpu_745x,
.oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
.machine_check = machine_check_generic,
.platform = "ppc7450",
},
@@ -1211,7 +1155,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_83xx,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e300",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.platform = "ppc603",
},
{ /* e300c4 (e300c1, plus one IU) */
@@ -1228,7 +1171,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_83xx,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e300",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.platform = "ppc603",
},
#endif
@@ -1925,7 +1867,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e500v1,
.machine_check = machine_check_e500,
.platform = "ppc8540",
@@ -1945,7 +1886,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e500v2,
.machine_check = machine_check_e500,
.platform = "ppc8548",
@@ -1965,7 +1905,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 64,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500mc",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e500mc,
.machine_check = machine_check_e500mc,
.platform = "ppce500mc",
@@ -1987,7 +1926,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 64,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500mc",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e5500,
#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e5500,
@@ -2010,7 +1948,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 64,
.num_pmcs = 6,
.oprofile_cpu_type = "ppc/e6500",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e6500,
#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e6500,
@@ -2076,10 +2013,6 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
if (old.num_pmcs && !s->num_pmcs) {
t->num_pmcs = old.num_pmcs;
t->pmc_type = old.pmc_type;
- t->oprofile_type = old.oprofile_type;
- t->oprofile_mmcra_sihv = old.oprofile_mmcra_sihv;
- t->oprofile_mmcra_sipr = old.oprofile_mmcra_sipr;
- t->oprofile_mmcra_clear = old.oprofile_mmcra_clear;
/*
* If we have passed through this logic once before and
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index b5478b72c08c..358aee7c2d79 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -19,7 +19,6 @@
#include <asm/dt_cpu_ftrs.h>
#include <asm/mce.h>
#include <asm/mmu.h>
-#include <asm/oprofile_impl.h>
#include <asm/prom.h>
#include <asm/setup.h>
@@ -103,7 +102,6 @@ static struct cpu_spec __initdata base_cpu_spec = {
.num_pmcs = 0,
.pmc_type = PPC_PMC_DEFAULT,
.oprofile_cpu_type = NULL,
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = NULL,
.cpu_restore = __restore_cpu_cpufeatures,
.machine_check_early = NULL,
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
deleted file mode 100644
index bb2d94c8cbe6..000000000000
--- a/arch/powerpc/oprofile/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
-oprofile-$(CONFIG_OPROFILE_CELL) += op_model_cell.o \
- cell/spu_profiler.o cell/vma_map.o \
- cell/spu_task_sync.o
-oprofile-$(CONFIG_PPC_BOOK3S_64) += op_model_power4.o op_model_pa6t.o
-oprofile-$(CONFIG_FSL_EMB_PERFMON) += op_model_fsl_emb.o
-oprofile-$(CONFIG_PPC_BOOK3S_32) += op_model_7450.o
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
deleted file mode 100644
index 9db7ada79d10..000000000000
--- a/arch/powerpc/oprofile/backtrace.c
+++ /dev/null
@@ -1,120 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/**
- * Copyright (C) 2005 Brian Rogan <bcr6@cornell.edu>, IBM
- *
-**/
-
-#include <linux/time.h>
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <asm/processor.h>
-#include <linux/uaccess.h>
-#include <linux/compat.h>
-#include <asm/oprofile_impl.h>
-
-#define STACK_SP(STACK) *(STACK)
-
-#define STACK_LR64(STACK) *((unsigned long *)(STACK) + 2)
-#define STACK_LR32(STACK) *((unsigned int *)(STACK) + 1)
-
-#ifdef CONFIG_PPC64
-#define STACK_LR(STACK) STACK_LR64(STACK)
-#else
-#define STACK_LR(STACK) STACK_LR32(STACK)
-#endif
-
-static unsigned int user_getsp32(unsigned int sp, int is_first)
-{
- unsigned int stack_frame[2];
- void __user *p = compat_ptr(sp);
-
- /*
- * The most likely reason for this is that we returned -EFAULT,
- * which means that we've done all that we can do from
- * interrupt context.
- */
- if (copy_from_user_nofault(stack_frame, (void __user *)p,
- sizeof(stack_frame)))
- return 0;
-
- if (!is_first)
- oprofile_add_trace(STACK_LR32(stack_frame));
-
- /*
- * We do not enforce increasing stack addresses here because
- * we may transition to a different stack, eg a signal handler.
- */
- return STACK_SP(stack_frame);
-}
-
-#ifdef CONFIG_PPC64
-static unsigned long user_getsp64(unsigned long sp, int is_first)
-{
- unsigned long stack_frame[3];
-
- if (copy_from_user_nofault(stack_frame, (void __user *)sp,
- sizeof(stack_frame)))
- return 0;
-
- if (!is_first)
- oprofile_add_trace(STACK_LR64(stack_frame));
-
- return STACK_SP(stack_frame);
-}
-#endif
-
-static unsigned long kernel_getsp(unsigned long sp, int is_first)
-{
- unsigned long *stack_frame = (unsigned long *)sp;
-
- if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
- return 0;
-
- if (!is_first)
- oprofile_add_trace(STACK_LR(stack_frame));
-
- /*
- * We do not enforce increasing stack addresses here because
- * we might be transitioning from an interrupt stack to a kernel
- * stack. validate_sp() is designed to understand this, so just
- * use it.
- */
- return STACK_SP(stack_frame);
-}
-
-void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- unsigned long sp = regs->gpr[1];
- int first_frame = 1;
-
- /* We ditch the top stackframe so need to loop through an extra time */
- depth += 1;
-
- if (!user_mode(regs)) {
- while (depth--) {
- sp = kernel_getsp(sp, first_frame);
- if (!sp)
- break;
- first_frame = 0;
- }
- } else {
-#ifdef CONFIG_PPC64
- if (!is_32bit_task()) {
- while (depth--) {
- sp = user_getsp64(sp, first_frame);
- if (!sp)
- break;
- first_frame = 0;
- }
- return;
- }
-#endif
-
- while (depth--) {
- sp = user_getsp32(sp, first_frame);
- if (!sp)
- break;
- first_frame = 0;
- }
- }
-}
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
deleted file mode 100644
index e198efa9113a..000000000000
--- a/arch/powerpc/oprofile/cell/pr_util.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
- /*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: Maynard Johnson <maynardj@us.ibm.com>
- */
-
-#ifndef PR_UTIL_H
-#define PR_UTIL_H
-
-#include <linux/cpumask.h>
-#include <linux/oprofile.h>
-#include <asm/cell-pmu.h>
-#include <asm/cell-regs.h>
-#include <asm/spu.h>
-
-/* Defines used for sync_start */
-#define SKIP_GENERIC_SYNC 0
-#define SYNC_START_ERROR -1
-#define DO_GENERIC_SYNC 1
-#define SPUS_PER_NODE 8
-#define DEFAULT_TIMER_EXPIRE (HZ / 10)
-
-extern struct delayed_work spu_work;
-extern int spu_prof_running;
-
-#define TRACE_ARRAY_SIZE 1024
-
-extern spinlock_t oprof_spu_smpl_arry_lck;
-
-struct spu_overlay_info { /* map of sections within an SPU overlay */
- unsigned int vma; /* SPU virtual memory address from elf */
- unsigned int size; /* size of section from elf */
- unsigned int offset; /* offset of section into elf file */
- unsigned int buf;
-};
-
-struct vma_to_fileoffset_map { /* map of sections within an SPU program */
- struct vma_to_fileoffset_map *next; /* list pointer */
- unsigned int vma; /* SPU virtual memory address from elf */
- unsigned int size; /* size of section from elf */
- unsigned int offset; /* offset of section into elf file */
- unsigned int guard_ptr;
- unsigned int guard_val;
- /*
- * The guard pointer is an entry in the _ovly_buf_table,
- * computed using ovly.buf as the index into the table. Since
- * ovly.buf values begin at '1' to reference the first (or 0th)
- * entry in the _ovly_buf_table, the computation subtracts 1
- * from ovly.buf.
- * The guard value is stored in the _ovly_buf_table entry and
- * is an index (starting at 1) back to the _ovly_table entry
- * that is pointing at this _ovly_buf_table entry. So, for
- * example, for an overlay scenario with one overlay segment
- * and two overlay sections:
- * - Section 1 points to the first entry of the
- * _ovly_buf_table, which contains a guard value
- * of '1', referencing the first (index=0) entry of
- * _ovly_table.
- * - Section 2 points to the second entry of the
- * _ovly_buf_table, which contains a guard value
- * of '2', referencing the second (index=1) entry of
- * _ovly_table.
- */
-
-};
-
-struct spu_buffer {
- int last_guard_val;
- int ctx_sw_seen;
- unsigned long *buff;
- unsigned int head, tail;
-};
-
-
-/* The three functions below are for maintaining and accessing
- * the vma-to-fileoffset map.
- */
-struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
- unsigned long objectid);
-unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
- unsigned int vma, const struct spu *aSpu,
- int *grd_val);
-void vma_map_free(struct vma_to_fileoffset_map *map);
-
-/*
- * Entry point for SPU profiling.
- * cycles_reset is the SPU_CYCLES count value specified by the user.
- */
-int start_spu_profiling_cycles(unsigned int cycles_reset);
-void start_spu_profiling_events(void);
-
-void stop_spu_profiling_cycles(void);
-void stop_spu_profiling_events(void);
-
-/* add the necessary profiling hooks */
-int spu_sync_start(void);
-
-/* remove the hooks */
-int spu_sync_stop(void);
-
-/* Record SPU program counter samples to the oprofile event buffer. */
-void spu_sync_buffer(int spu_num, unsigned int *samples,
- int num_samples);
-
-void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset);
-
-#endif /* PR_UTIL_H */
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
deleted file mode 100644
index cdf883445a9f..000000000000
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ /dev/null
@@ -1,248 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Authors: Maynard Johnson <maynardj@us.ibm.com>
- * Carl Love <carll@us.ibm.com>
- */
-
-#include <linux/hrtimer.h>
-#include <linux/smp.h>
-#include <linux/slab.h>
-#include <asm/cell-pmu.h>
-#include <asm/time.h>
-#include "pr_util.h"
-
-#define SCALE_SHIFT 14
-
-static u32 *samples;
-
-/* spu_prof_running is a flag used to indicate if spu profiling is enabled
- * or not. It is set by the routines start_spu_profiling_cycles() and
- * start_spu_profiling_events(). The flag is cleared by the routines
- * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These
- * routines are called via global_start() and global_stop() which are called in
- * op_powerpc_start() and op_powerpc_stop(). These routines are called once
- * per system as a result of the user starting/stopping oprofile. Hence, only
- * one CPU per user at a time will be changing the value of spu_prof_running.
- * In general, OProfile does not protect against multiple users trying to run
- * OProfile at a time.
- */
-int spu_prof_running;
-static unsigned int profiling_interval;
-
-#define NUM_SPU_BITS_TRBUF 16
-#define SPUS_PER_TB_ENTRY 4
-
-#define SPU_PC_MASK 0xFFFF
-
-DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
-static unsigned long oprof_spu_smpl_arry_lck_flags;
-
-void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
-{
- unsigned long ns_per_cyc;
-
- if (!freq_khz)
- freq_khz = ppc_proc_freq/1000;
-
- /* To calculate a timeout in nanoseconds, the basic
- * formula is ns = cycles_reset * (NSEC_PER_SEC / cpu frequency).
- * To avoid floating point math, we use the scale math
- * technique as described in linux/jiffies.h. We use
- * a scale factor of SCALE_SHIFT, which provides 4 decimal places
- * of precision. This is close enough for the purpose at hand.
- *
- * The value of the timeout should be small enough that the hw
- * trace buffer will not get more than about 1/3 full for the
- * maximum user specified (the LFSR value) hw sampling frequency.
- * This is to ensure the trace buffer will never fill even if the
- * kernel thread scheduling varies under a heavy system load.
- */
-
- ns_per_cyc = (USEC_PER_SEC << SCALE_SHIFT)/freq_khz;
- profiling_interval = (ns_per_cyc * cycles_reset) >> SCALE_SHIFT;
-
-}
-
-/*
- * Extract SPU PC from trace buffer entry
- */
-static void spu_pc_extract(int cpu, int entry)
-{
- /* the trace buffer is 128 bits */
- u64 trace_buffer[2];
- u64 spu_mask;
- int spu;
-
- spu_mask = SPU_PC_MASK;
-
- /* Each SPU PC is 16 bits; hence, four spus in each of
- * the two 64-bit buffer entries that make up the
- * 128-bit trace_buffer entry. Process two 64-bit values
- * simultaneously.
- * trace[0] SPU PC contents are: 0 1 2 3
- * trace[1] SPU PC contents are: 4 5 6 7
- */
-
- cbe_read_trace_buffer(cpu, trace_buffer);
-
- for (spu = SPUS_PER_TB_ENTRY-1; spu >= 0; spu--) {
- /* spu PC trace entry is upper 16 bits of the
- * 18 bit SPU program counter
- */
- samples[spu * TRACE_ARRAY_SIZE + entry]
- = (spu_mask & trace_buffer[0]) << 2;
- samples[(spu + SPUS_PER_TB_ENTRY) * TRACE_ARRAY_SIZE + entry]
- = (spu_mask & trace_buffer[1]) << 2;
-
- trace_buffer[0] = trace_buffer[0] >> NUM_SPU_BITS_TRBUF;
- trace_buffer[1] = trace_buffer[1] >> NUM_SPU_BITS_TRBUF;
- }
-}
-
-static int cell_spu_pc_collection(int cpu)
-{
- u32 trace_addr;
- int entry;
-
- /* process the collected SPU PC for the node */
-
- entry = 0;
-
- trace_addr = cbe_read_pm(cpu, trace_address);
- while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
- /* there is data in the trace buffer to process */
- spu_pc_extract(cpu, entry);
-
- entry++;
-
- if (entry >= TRACE_ARRAY_SIZE)
- /* spu_samples is full */
- break;
-
- trace_addr = cbe_read_pm(cpu, trace_address);
- }
-
- return entry;
-}
-
-
-static enum hrtimer_restart profile_spus(struct hrtimer *timer)
-{
- ktime_t kt;
- int cpu, node, k, num_samples, spu_num;
-
- if (!spu_prof_running)
- goto stop;
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- node = cbe_cpu_to_node(cpu);
-
- /* There should only be one kernel thread at a time processing
- * the samples. In the very unlikely case that the processing
- * is taking a very long time and multiple kernel threads are
- * started to process the samples. Make sure only one kernel
- * thread is working on the samples array at a time. The
- * sample array must be loaded and then processed for a given
- * cpu. The sample array is not per cpu.
- */
- spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
- oprof_spu_smpl_arry_lck_flags);
- num_samples = cell_spu_pc_collection(cpu);
-
- if (num_samples == 0) {
- spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
- oprof_spu_smpl_arry_lck_flags);
- continue;
- }
-
- for (k = 0; k < SPUS_PER_NODE; k++) {
- spu_num = k + (node * SPUS_PER_NODE);
- spu_sync_buffer(spu_num,
- samples + (k * TRACE_ARRAY_SIZE),
- num_samples);
- }
-
- spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
- oprof_spu_smpl_arry_lck_flags);
-
- }
- smp_wmb(); /* insure spu event buffer updates are written */
- /* don't want events intermingled... */
-
- kt = profiling_interval;
- if (!spu_prof_running)
- goto stop;
- hrtimer_forward(timer, timer->base->get_time(), kt);
- return HRTIMER_RESTART;
-
- stop:
- printk(KERN_INFO "SPU_PROF: spu-prof timer ending\n");
- return HRTIMER_NORESTART;
-}
-
-static struct hrtimer timer;
-/*
- * Entry point for SPU cycle profiling.
- * NOTE: SPU profiling is done system-wide, not per-CPU.
- *
- * cycles_reset is the count value specified by the user when
- * setting up OProfile to count SPU_CYCLES.
- */
-int start_spu_profiling_cycles(unsigned int cycles_reset)
-{
- ktime_t kt;
-
- pr_debug("timer resolution: %lu\n", TICK_NSEC);
- kt = profiling_interval;
- hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires(&timer, kt);
- timer.function = profile_spus;
-
- /* Allocate arrays for collecting SPU PC samples */
- samples = kcalloc(SPUS_PER_NODE * TRACE_ARRAY_SIZE, sizeof(u32),
- GFP_KERNEL);
-
- if (!samples)
- return -ENOMEM;
-
- spu_prof_running = 1;
- hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
- schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
-
- return 0;
-}
-
-/*
- * Entry point for SPU event profiling.
- * NOTE: SPU profiling is done system-wide, not per-CPU.
- *
- * cycles_reset is the count value specified by the user when
- * setting up OProfile to count SPU_CYCLES.
- */
-void start_spu_profiling_events(void)
-{
- spu_prof_running = 1;
- schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
-
- return;
-}
-
-void stop_spu_profiling_cycles(void)
-{
- spu_prof_running = 0;
- hrtimer_cancel(&timer);
- kfree(samples);
- pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
-}
-
-void stop_spu_profiling_events(void)
-{
- spu_prof_running = 0;
-}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
deleted file mode 100644
index 489f993100d5..000000000000
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ /dev/null
@@ -1,657 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: Maynard Johnson <maynardj@us.ibm.com>
- */
-
-/* The purpose of this file is to handle SPU event task switching
- * and to record SPU context information into the OProfile
- * event buffer.
- *
- * Additionally, the spu_sync_buffer function is provided as a helper
- * for recoding actual SPU program counter samples to the event buffer.
- */
-#include <linux/dcookies.h>
-#include <linux/kref.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/numa.h>
-#include <linux/oprofile.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include "pr_util.h"
-
-#define RELEASE_ALL 9999
-
-static DEFINE_SPINLOCK(buffer_lock);
-static DEFINE_SPINLOCK(cache_lock);
-static int num_spu_nodes;
-static int spu_prof_num_nodes;
-
-struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
-struct delayed_work spu_work;
-static unsigned max_spu_buff;
-
-static void spu_buff_add(unsigned long int value, int spu)
-{
- /* spu buff is a circular buffer. Add entries to the
- * head. Head is the index to store the next value.
- * The buffer is full when there is one available entry
- * in the queue, i.e. head and tail can't be equal.
- * That way we can tell the difference between the
- * buffer being full versus empty.
- *
- * ASSUMPTION: the buffer_lock is held when this function
- * is called to lock the buffer, head and tail.
- */
- int full = 1;
-
- if (spu_buff[spu].head >= spu_buff[spu].tail) {
- if ((spu_buff[spu].head - spu_buff[spu].tail)
- < (max_spu_buff - 1))
- full = 0;
-
- } else if (spu_buff[spu].tail > spu_buff[spu].head) {
- if ((spu_buff[spu].tail - spu_buff[spu].head)
- > 1)
- full = 0;
- }
-
- if (!full) {
- spu_buff[spu].buff[spu_buff[spu].head] = value;
- spu_buff[spu].head++;
-
- if (spu_buff[spu].head >= max_spu_buff)
- spu_buff[spu].head = 0;
- } else {
- /* From the user's perspective make the SPU buffer
- * size management/overflow look like we are using
- * per cpu buffers. The user uses the same
- * per cpu parameter to adjust the SPU buffer size.
- * Increment the sample_lost_overflow to inform
- * the user the buffer size needs to be increased.
- */
- oprofile_cpu_buffer_inc_smpl_lost();
- }
-}
-
-/* This function copies the per SPU buffers to the
- * OProfile kernel buffer.
- */
-static void sync_spu_buff(void)
-{
- int spu;
- unsigned long flags;
- int curr_head;
-
- for (spu = 0; spu < num_spu_nodes; spu++) {
- /* In case there was an issue and the buffer didn't
- * get created skip it.
- */
- if (spu_buff[spu].buff == NULL)
- continue;
-
- /* Hold the lock to make sure the head/tail
- * doesn't change while spu_buff_add() is
- * deciding if the buffer is full or not.
- * Being a little paranoid.
- */
- spin_lock_irqsave(&buffer_lock, flags);
- curr_head = spu_buff[spu].head;
- spin_unlock_irqrestore(&buffer_lock, flags);
-
- /* Transfer the current contents to the kernel buffer.
- * data can still be added to the head of the buffer.
- */
- oprofile_put_buff(spu_buff[spu].buff,
- spu_buff[spu].tail,
- curr_head, max_spu_buff);
-
- spin_lock_irqsave(&buffer_lock, flags);
- spu_buff[spu].tail = curr_head;
- spin_unlock_irqrestore(&buffer_lock, flags);
- }
-
-}
-
-static void wq_sync_spu_buff(struct work_struct *work)
-{
- /* move data from spu buffers to kernel buffer */
- sync_spu_buff();
-
- /* only reschedule if profiling is not done */
- if (spu_prof_running)
- schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
-}
-
-/* Container for caching information about an active SPU task. */
-struct cached_info {
- struct vma_to_fileoffset_map *map;
- struct spu *the_spu; /* needed to access pointer to local_store */
- struct kref cache_ref;
-};
-
-static struct cached_info *spu_info[MAX_NUMNODES * 8];
-
-static void destroy_cached_info(struct kref *kref)
-{
- struct cached_info *info;
-
- info = container_of(kref, struct cached_info, cache_ref);
- vma_map_free(info->map);
- kfree(info);
- module_put(THIS_MODULE);
-}
-
-/* Return the cached_info for the passed SPU number.
- * ATTENTION: Callers are responsible for obtaining the
- * cache_lock if needed prior to invoking this function.
- */
-static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num)
-{
- struct kref *ref;
- struct cached_info *ret_info;
-
- if (spu_num >= num_spu_nodes) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Invalid index %d into spu info cache\n",
- __func__, __LINE__, spu_num);
- ret_info = NULL;
- goto out;
- }
- if (!spu_info[spu_num] && the_spu) {
- ref = spu_get_profile_private_kref(the_spu->ctx);
- if (ref) {
- spu_info[spu_num] = container_of(ref, struct cached_info, cache_ref);
- kref_get(&spu_info[spu_num]->cache_ref);
- }
- }
-
- ret_info = spu_info[spu_num];
- out:
- return ret_info;
-}
-
-
-/* Looks for cached info for the passed spu. If not found, the
- * cached info is created for the passed spu.
- * Returns 0 for success; otherwise, -1 for error.
- */
-static int
-prepare_cached_spu_info(struct spu *spu, unsigned long objectId)
-{
- unsigned long flags;
- struct vma_to_fileoffset_map *new_map;
- int retval = 0;
- struct cached_info *info;
-
- /* We won't bother getting cache_lock here since
- * don't do anything with the cached_info that's returned.
- */
- info = get_cached_info(spu, spu->number);
-
- if (info) {
- pr_debug("Found cached SPU info.\n");
- goto out;
- }
-
- /* Create cached_info and set spu_info[spu->number] to point to it.
- * spu->number is a system-wide value, not a per-node value.
- */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: create vma_map failed\n",
- __func__, __LINE__);
- retval = -ENOMEM;
- goto err_alloc;
- }
- new_map = create_vma_map(spu, objectId);
- if (!new_map) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: create vma_map failed\n",
- __func__, __LINE__);
- retval = -ENOMEM;
- goto err_alloc;
- }
-
- pr_debug("Created vma_map\n");
- info->map = new_map;
- info->the_spu = spu;
- kref_init(&info->cache_ref);
- spin_lock_irqsave(&cache_lock, flags);
- spu_info[spu->number] = info;
- /* Increment count before passing off ref to SPUFS. */
- kref_get(&info->cache_ref);
-
- /* We increment the module refcount here since SPUFS is
- * responsible for the final destruction of the cached_info,
- * and it must be able to access the destroy_cached_info()
- * function defined in the OProfile module. We decrement
- * the module refcount in destroy_cached_info.
- */
- try_module_get(THIS_MODULE);
- spu_set_profile_private_kref(spu->ctx, &info->cache_ref,
- destroy_cached_info);
- spin_unlock_irqrestore(&cache_lock, flags);
- goto out;
-
-err_alloc:
- kfree(info);
-out:
- return retval;
-}
-
-/*
- * NOTE: The caller is responsible for locking the
- * cache_lock prior to calling this function.
- */
-static int release_cached_info(int spu_index)
-{
- int index, end;
-
- if (spu_index == RELEASE_ALL) {
- end = num_spu_nodes;
- index = 0;
- } else {
- if (spu_index >= num_spu_nodes) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: "
- "Invalid index %d into spu info cache\n",
- __func__, __LINE__, spu_index);
- goto out;
- }
- end = spu_index + 1;
- index = spu_index;
- }
- for (; index < end; index++) {
- if (spu_info[index]) {
- kref_put(&spu_info[index]->cache_ref,
- destroy_cached_info);
- spu_info[index] = NULL;
- }
- }
-
-out:
- return 0;
-}
-
-/* The source code for fast_get_dcookie was "borrowed"
- * from drivers/oprofile/buffer_sync.c.
- */
-
-/* Optimisation. We can manage without taking the dcookie sem
- * because we cannot reach this code without at least one
- * dcookie user still being registered (namely, the reader
- * of the event buffer).
- */
-static inline unsigned long fast_get_dcookie(const struct path *path)
-{
- unsigned long cookie;
-
- if (path->dentry->d_flags & DCACHE_COOKIE)
- return (unsigned long)path->dentry;
- get_dcookie(path, &cookie);
- return cookie;
-}
-
-/* Look up the dcookie for the task's mm->exe_file,
- * which corresponds loosely to "application name". Also, determine
- * the offset for the SPU ELF object. If computed offset is
- * non-zero, it implies an embedded SPU object; otherwise, it's a
- * separate SPU binary, in which case we retrieve it's dcookie.
- * For the embedded case, we must determine if SPU ELF is embedded
- * in the executable application or another file (i.e., shared lib).
- * If embedded in a shared lib, we must get the dcookie and return
- * that to the caller.
- */
-static unsigned long
-get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
- unsigned long *spu_bin_dcookie,
- unsigned long spu_ref)
-{
- unsigned long app_cookie = 0;
- unsigned int my_offset = 0;
- struct vm_area_struct *vma;
- struct file *exe_file;
- struct mm_struct *mm = spu->mm;
-
- if (!mm)
- goto out;
-
- exe_file = get_mm_exe_file(mm);
- if (exe_file) {
- app_cookie = fast_get_dcookie(&exe_file->f_path);
- pr_debug("got dcookie for %pD\n", exe_file);
- fput(exe_file);
- }
-
- mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
- continue;
- my_offset = spu_ref - vma->vm_start;
- if (!vma->vm_file)
- goto fail_no_image_cookie;
-
- pr_debug("Found spu ELF at %X(object-id:%lx) for file %pD\n",
- my_offset, spu_ref, vma->vm_file);
- *offsetp = my_offset;
- break;
- }
-
- *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
- pr_debug("got dcookie for %pD\n", vma->vm_file);
-
- mmap_read_unlock(mm);
-
-out:
- return app_cookie;
-
-fail_no_image_cookie:
- mmap_read_unlock(mm);
-
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Cannot find dcookie for SPU binary\n",
- __func__, __LINE__);
- goto out;
-}
-
-
-
-/* This function finds or creates cached context information for the
- * passed SPU and records SPU context information into the OProfile
- * event buffer.
- */
-static int process_context_switch(struct spu *spu, unsigned long objectId)
-{
- unsigned long flags;
- int retval;
- unsigned int offset = 0;
- unsigned long spu_cookie = 0, app_dcookie;
-
- retval = prepare_cached_spu_info(spu, objectId);
- if (retval)
- goto out;
-
- /* Get dcookie first because a mutex_lock is taken in that
- * code path, so interrupts must not be disabled.
- */
- app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId);
- if (!app_dcookie || !spu_cookie) {
- retval = -ENOENT;
- goto out;
- }
-
- /* Record context info in event buffer */
- spin_lock_irqsave(&buffer_lock, flags);
- spu_buff_add(ESCAPE_CODE, spu->number);
- spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
- spu_buff_add(spu->number, spu->number);
- spu_buff_add(spu->pid, spu->number);
- spu_buff_add(spu->tgid, spu->number);
- spu_buff_add(app_dcookie, spu->number);
- spu_buff_add(spu_cookie, spu->number);
- spu_buff_add(offset, spu->number);
-
- /* Set flag to indicate SPU PC data can now be written out. If
- * the SPU program counter data is seen before an SPU context
- * record is seen, the postprocessing will fail.
- */
- spu_buff[spu->number].ctx_sw_seen = 1;
-
- spin_unlock_irqrestore(&buffer_lock, flags);
- smp_wmb(); /* insure spu event buffer updates are written */
- /* don't want entries intermingled... */
-out:
- return retval;
-}
-
-/*
- * This function is invoked on either a bind_context or unbind_context.
- * If called for an unbind_context, the val arg is 0; otherwise,
- * it is the object-id value for the spu context.
- * The data arg is of type 'struct spu *'.
- */
-static int spu_active_notify(struct notifier_block *self, unsigned long val,
- void *data)
-{
- int retval;
- unsigned long flags;
- struct spu *the_spu = data;
-
- pr_debug("SPU event notification arrived\n");
- if (!val) {
- spin_lock_irqsave(&cache_lock, flags);
- retval = release_cached_info(the_spu->number);
- spin_unlock_irqrestore(&cache_lock, flags);
- } else {
- retval = process_context_switch(the_spu, val);
- }
- return retval;
-}
-
-static struct notifier_block spu_active = {
- .notifier_call = spu_active_notify,
-};
-
-static int number_of_online_nodes(void)
-{
- u32 cpu; u32 tmp;
- int nodes = 0;
- for_each_online_cpu(cpu) {
- tmp = cbe_cpu_to_node(cpu) + 1;
- if (tmp > nodes)
- nodes++;
- }
- return nodes;
-}
-
-static int oprofile_spu_buff_create(void)
-{
- int spu;
-
- max_spu_buff = oprofile_get_cpu_buffer_size();
-
- for (spu = 0; spu < num_spu_nodes; spu++) {
- /* create circular buffers to store the data in.
- * use locks to manage accessing the buffers
- */
- spu_buff[spu].head = 0;
- spu_buff[spu].tail = 0;
-
- /*
- * Create a buffer for each SPU. Can't reliably
- * create a single buffer for all spus due to not
- * enough contiguous kernel memory.
- */
-
- spu_buff[spu].buff = kzalloc((max_spu_buff
- * sizeof(unsigned long)),
- GFP_KERNEL);
-
- if (!spu_buff[spu].buff) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: oprofile_spu_buff_create "
- "failed to allocate spu buffer %d.\n",
- __func__, __LINE__, spu);
-
- /* release the spu buffers that have been allocated */
- while (spu >= 0) {
- kfree(spu_buff[spu].buff);
- spu_buff[spu].buff = 0;
- spu--;
- }
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-/* The main purpose of this function is to synchronize
- * OProfile with SPUFS by registering to be notified of
- * SPU task switches.
- *
- * NOTE: When profiling SPUs, we must ensure that only
- * spu_sync_start is invoked and not the generic sync_start
- * in drivers/oprofile/oprof.c. A return value of
- * SKIP_GENERIC_SYNC or SYNC_START_ERROR will
- * accomplish this.
- */
-int spu_sync_start(void)
-{
- int spu;
- int ret = SKIP_GENERIC_SYNC;
- int register_ret;
- unsigned long flags = 0;
-
- spu_prof_num_nodes = number_of_online_nodes();
- num_spu_nodes = spu_prof_num_nodes * 8;
- INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
-
- /* create buffer for storing the SPU data to put in
- * the kernel buffer.
- */
- ret = oprofile_spu_buff_create();
- if (ret)
- goto out;
-
- spin_lock_irqsave(&buffer_lock, flags);
- for (spu = 0; spu < num_spu_nodes; spu++) {
- spu_buff_add(ESCAPE_CODE, spu);
- spu_buff_add(SPU_PROFILING_CODE, spu);
- spu_buff_add(num_spu_nodes, spu);
- }
- spin_unlock_irqrestore(&buffer_lock, flags);
-
- for (spu = 0; spu < num_spu_nodes; spu++) {
- spu_buff[spu].ctx_sw_seen = 0;
- spu_buff[spu].last_guard_val = 0;
- }
-
- /* Register for SPU events */
- register_ret = spu_switch_event_register(&spu_active);
- if (register_ret) {
- ret = SYNC_START_ERROR;
- goto out;
- }
-
- pr_debug("spu_sync_start -- running.\n");
-out:
- return ret;
-}
-
-/* Record SPU program counter samples to the oprofile event buffer. */
-void spu_sync_buffer(int spu_num, unsigned int *samples,
- int num_samples)
-{
- unsigned long long file_offset;
- unsigned long flags;
- int i;
- struct vma_to_fileoffset_map *map;
- struct spu *the_spu;
- unsigned long long spu_num_ll = spu_num;
- unsigned long long spu_num_shifted = spu_num_ll << 32;
- struct cached_info *c_info;
-
- /* We need to obtain the cache_lock here because it's
- * possible that after getting the cached_info, the SPU job
- * corresponding to this cached_info may end, thus resulting
- * in the destruction of the cached_info.
- */
- spin_lock_irqsave(&cache_lock, flags);
- c_info = get_cached_info(NULL, spu_num);
- if (!c_info) {
- /* This legitimately happens when the SPU task ends before all
- * samples are recorded.
- * No big deal -- so we just drop a few samples.
- */
- pr_debug("SPU_PROF: No cached SPU context "
- "for SPU #%d. Dropping samples.\n", spu_num);
- goto out;
- }
-
- map = c_info->map;
- the_spu = c_info->the_spu;
- spin_lock(&buffer_lock);
- for (i = 0; i < num_samples; i++) {
- unsigned int sample = *(samples+i);
- int grd_val = 0;
- file_offset = 0;
- if (sample == 0)
- continue;
- file_offset = vma_map_lookup( map, sample, the_spu, &grd_val);
-
- /* If overlays are used by this SPU application, the guard
- * value is non-zero, indicating which overlay section is in
- * use. We need to discard samples taken during the time
- * period which an overlay occurs (i.e., guard value changes).
- */
- if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
- spu_buff[spu_num].last_guard_val = grd_val;
- /* Drop the rest of the samples. */
- break;
- }
-
- /* We must ensure that the SPU context switch has been written
- * out before samples for the SPU. Otherwise, the SPU context
- * information is not available and the postprocessing of the
- * SPU PC will fail with no available anonymous map information.
- */
- if (spu_buff[spu_num].ctx_sw_seen)
- spu_buff_add((file_offset | spu_num_shifted),
- spu_num);
- }
- spin_unlock(&buffer_lock);
-out:
- spin_unlock_irqrestore(&cache_lock, flags);
-}
-
-
-int spu_sync_stop(void)
-{
- unsigned long flags = 0;
- int ret;
- int k;
-
- ret = spu_switch_event_unregister(&spu_active);
-
- if (ret)
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: spu_switch_event_unregister " \
- "returned %d\n",
- __func__, __LINE__, ret);
-
- /* flush any remaining data in the per SPU buffers */
- sync_spu_buff();
-
- spin_lock_irqsave(&cache_lock, flags);
- ret = release_cached_info(RELEASE_ALL);
- spin_unlock_irqrestore(&cache_lock, flags);
-
- /* remove scheduled work queue item rather then waiting
- * for every queued entry to execute. Then flush pending
- * system wide buffer to event buffer.
- */
- cancel_delayed_work(&spu_work);
-
- for (k = 0; k < num_spu_nodes; k++) {
- spu_buff[k].ctx_sw_seen = 0;
-
- /*
- * spu_sys_buff will be null if there was a problem
- * allocating the buffer. Only delete if it exists.
- */
- kfree(spu_buff[k].buff);
- spu_buff[k].buff = 0;
- }
- pr_debug("spu_sync_stop -- done.\n");
- return ret;
-}
-
diff --git a/arch/powerpc/oprofile/cell/vma_map.c b/arch/powerpc/oprofile/cell/vma_map.c
deleted file mode 100644
index 7c4b19cfde88..000000000000
--- a/arch/powerpc/oprofile/cell/vma_map.c
+++ /dev/null
@@ -1,279 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: Maynard Johnson <maynardj@us.ibm.com>
- */
-
-/* The code in this source file is responsible for generating
- * vma-to-fileOffset maps for both overlay and non-overlay SPU
- * applications.
- */
-
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <linux/elf.h>
-#include <linux/slab.h>
-#include "pr_util.h"
-
-
-void vma_map_free(struct vma_to_fileoffset_map *map)
-{
- while (map) {
- struct vma_to_fileoffset_map *next = map->next;
- kfree(map);
- map = next;
- }
-}
-
-unsigned int
-vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma,
- const struct spu *aSpu, int *grd_val)
-{
- /*
- * Default the offset to the physical address + a flag value.
- * Addresses of dynamically generated code can't be found in the vma
- * map. For those addresses the flagged value will be sent on to
- * the user space tools so they can be reported rather than just
- * thrown away.
- */
- u32 offset = 0x10000000 + vma;
- u32 ovly_grd;
-
- for (; map; map = map->next) {
- if (vma < map->vma || vma >= map->vma + map->size)
- continue;
-
- if (map->guard_ptr) {
- ovly_grd = *(u32 *)(aSpu->local_store + map->guard_ptr);
- if (ovly_grd != map->guard_val)
- continue;
- *grd_val = ovly_grd;
- }
- offset = vma - map->vma + map->offset;
- break;
- }
-
- return offset;
-}
-
-static struct vma_to_fileoffset_map *
-vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma,
- unsigned int size, unsigned int offset, unsigned int guard_ptr,
- unsigned int guard_val)
-{
- struct vma_to_fileoffset_map *new = kzalloc(sizeof(*new), GFP_KERNEL);
-
- if (!new) {
- printk(KERN_ERR "SPU_PROF: %s, line %d: malloc failed\n",
- __func__, __LINE__);
- vma_map_free(map);
- return NULL;
- }
-
- new->next = map;
- new->vma = vma;
- new->size = size;
- new->offset = offset;
- new->guard_ptr = guard_ptr;
- new->guard_val = guard_val;
-
- return new;
-}
-
-
-/* Parse SPE ELF header and generate a list of vma_maps.
- * A pointer to the first vma_map in the generated list
- * of vma_maps is returned. */
-struct vma_to_fileoffset_map *create_vma_map(const struct spu *aSpu,
- unsigned long __spu_elf_start)
-{
- static const unsigned char expected[EI_PAD] = {
- [EI_MAG0] = ELFMAG0,
- [EI_MAG1] = ELFMAG1,
- [EI_MAG2] = ELFMAG2,
- [EI_MAG3] = ELFMAG3,
- [EI_CLASS] = ELFCLASS32,
- [EI_DATA] = ELFDATA2MSB,
- [EI_VERSION] = EV_CURRENT,
- [EI_OSABI] = ELFOSABI_NONE
- };
-
- int grd_val;
- struct vma_to_fileoffset_map *map = NULL;
- void __user *spu_elf_start = (void __user *)__spu_elf_start;
- struct spu_overlay_info ovly;
- unsigned int overlay_tbl_offset = -1;
- Elf32_Phdr __user *phdr_start;
- Elf32_Shdr __user *shdr_start;
- Elf32_Ehdr ehdr;
- Elf32_Phdr phdr;
- Elf32_Shdr shdr, shdr_str;
- Elf32_Sym sym;
- int i, j;
- char name[32];
-
- unsigned int ovly_table_sym = 0;
- unsigned int ovly_buf_table_sym = 0;
- unsigned int ovly_table_end_sym = 0;
- unsigned int ovly_buf_table_end_sym = 0;
- struct spu_overlay_info __user *ovly_table;
- unsigned int n_ovlys;
-
- /* Get and validate ELF header. */
-
- if (copy_from_user(&ehdr, spu_elf_start, sizeof (ehdr)))
- goto fail;
-
- if (memcmp(ehdr.e_ident, expected, EI_PAD) != 0) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Unexpected e_ident parsing SPU ELF\n",
- __func__, __LINE__);
- goto fail;
- }
- if (ehdr.e_machine != EM_SPU) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Unexpected e_machine parsing SPU ELF\n",
- __func__, __LINE__);
- goto fail;
- }
- if (ehdr.e_type != ET_EXEC) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Unexpected e_type parsing SPU ELF\n",
- __func__, __LINE__);
- goto fail;
- }
- phdr_start = spu_elf_start + ehdr.e_phoff;
- shdr_start = spu_elf_start + ehdr.e_shoff;
-
- /* Traverse program headers. */
- for (i = 0; i < ehdr.e_phnum; i++) {
- if (copy_from_user(&phdr, phdr_start + i, sizeof(phdr)))
- goto fail;
-
- if (phdr.p_type != PT_LOAD)
- continue;
- if (phdr.p_flags & (1 << 27))
- continue;
-
- map = vma_map_add(map, phdr.p_vaddr, phdr.p_memsz,
- phdr.p_offset, 0, 0);
- if (!map)
- goto fail;
- }
-
- pr_debug("SPU_PROF: Created non-overlay maps\n");
- /* Traverse section table and search for overlay-related symbols. */
- for (i = 0; i < ehdr.e_shnum; i++) {
- if (copy_from_user(&shdr, shdr_start + i, sizeof(shdr)))
- goto fail;
-
- if (shdr.sh_type != SHT_SYMTAB)
- continue;
- if (shdr.sh_entsize != sizeof (sym))
- continue;
-
- if (copy_from_user(&shdr_str,
- shdr_start + shdr.sh_link,
- sizeof(shdr)))
- goto fail;
-
- if (shdr_str.sh_type != SHT_STRTAB)
- goto fail;
-
- for (j = 0; j < shdr.sh_size / sizeof (sym); j++) {
- if (copy_from_user(&sym, spu_elf_start +
- shdr.sh_offset +
- j * sizeof (sym),
- sizeof (sym)))
- goto fail;
-
- if (copy_from_user(name,
- spu_elf_start + shdr_str.sh_offset +
- sym.st_name,
- 20))
- goto fail;
-
- if (memcmp(name, "_ovly_table", 12) == 0)
- ovly_table_sym = sym.st_value;
- if (memcmp(name, "_ovly_buf_table", 16) == 0)
- ovly_buf_table_sym = sym.st_value;
- if (memcmp(name, "_ovly_table_end", 16) == 0)
- ovly_table_end_sym = sym.st_value;
- if (memcmp(name, "_ovly_buf_table_end", 20) == 0)
- ovly_buf_table_end_sym = sym.st_value;
- }
- }
-
- /* If we don't have overlays, we're done. */
- if (ovly_table_sym == 0 || ovly_buf_table_sym == 0
- || ovly_table_end_sym == 0 || ovly_buf_table_end_sym == 0) {
- pr_debug("SPU_PROF: No overlay table found\n");
- goto out;
- } else {
- pr_debug("SPU_PROF: Overlay table found\n");
- }
-
- /* The _ovly_table symbol represents a table with one entry
- * per overlay section. The _ovly_buf_table symbol represents
- * a table with one entry per overlay region.
- * The struct spu_overlay_info gives the structure of the _ovly_table
- * entries. The structure of _ovly_table_buf is simply one
- * u32 word per entry.
- */
- overlay_tbl_offset = vma_map_lookup(map, ovly_table_sym,
- aSpu, &grd_val);
- if (overlay_tbl_offset > 0x10000000) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Error finding SPU overlay table\n",
- __func__, __LINE__);
- goto fail;
- }
- ovly_table = spu_elf_start + overlay_tbl_offset;
-
- n_ovlys = (ovly_table_end_sym -
- ovly_table_sym) / sizeof (ovly);
-
- /* Traverse overlay table. */
- for (i = 0; i < n_ovlys; i++) {
- if (copy_from_user(&ovly, ovly_table + i, sizeof (ovly)))
- goto fail;
-
- /* The ovly.vma/size/offset arguments are analogous to the same
- * arguments used above for non-overlay maps. The final two
- * args are referred to as the guard pointer and the guard
- * value.
- * The guard pointer is an entry in the _ovly_buf_table,
- * computed using ovly.buf as the index into the table. Since
- * ovly.buf values begin at '1' to reference the first (or 0th)
- * entry in the _ovly_buf_table, the computation subtracts 1
- * from ovly.buf.
- * The guard value is stored in the _ovly_buf_table entry and
- * is an index (starting at 1) back to the _ovly_table entry
- * that is pointing at this _ovly_buf_table entry. So, for
- * example, for an overlay scenario with one overlay segment
- * and two overlay sections:
- * - Section 1 points to the first entry of the
- * _ovly_buf_table, which contains a guard value
- * of '1', referencing the first (index=0) entry of
- * _ovly_table.
- * - Section 2 points to the second entry of the
- * _ovly_buf_table, which contains a guard value
- * of '2', referencing the second (index=1) entry of
- * _ovly_table.
- */
- map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset,
- ovly_buf_table_sym + (ovly.buf-1) * 4, i+1);
- if (!map)
- goto fail;
- }
- goto out;
-
- fail:
- map = NULL;
- out:
- return map;
-}
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
deleted file mode 100644
index 0fb528c2b3a1..000000000000
--- a/arch/powerpc/oprofile/common.c
+++ /dev/null
@@ -1,243 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * PPC 64 oprofile support:
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- * PPC 32 oprofile support: (based on PPC 64 support)
- * Copyright (C) Freescale Semiconductor, Inc 2004
- * Author: Andy Fleming
- *
- * Based on alpha version.
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-#include <asm/pmc.h>
-#include <asm/cputable.h>
-#include <asm/oprofile_impl.h>
-#include <asm/firmware.h>
-
-static struct op_powerpc_model *model;
-
-static struct op_counter_config ctr[OP_MAX_COUNTER];
-static struct op_system_config sys;
-
-static int op_per_cpu_rc;
-
-static void op_handle_interrupt(struct pt_regs *regs)
-{
- model->handle_interrupt(regs, ctr);
-}
-
-static void op_powerpc_cpu_setup(void *dummy)
-{
- int ret;
-
- ret = model->cpu_setup(ctr);
-
- if (ret != 0)
- op_per_cpu_rc = ret;
-}
-
-static int op_powerpc_setup(void)
-{
- int err;
-
- op_per_cpu_rc = 0;
-
- /* Grab the hardware */
- err = reserve_pmc_hardware(op_handle_interrupt);
- if (err)
- return err;
-
- /* Pre-compute the values to stuff in the hardware registers. */
- op_per_cpu_rc = model->reg_setup(ctr, &sys, model->num_counters);
-
- if (op_per_cpu_rc)
- goto out;
-
- /* Configure the registers on all cpus. If an error occurs on one
- * of the cpus, op_per_cpu_rc will be set to the error */
- on_each_cpu(op_powerpc_cpu_setup, NULL, 1);
-
-out: if (op_per_cpu_rc) {
- /* error on setup release the performance counter hardware */
- release_pmc_hardware();
- }
-
- return op_per_cpu_rc;
-}
-
-static void op_powerpc_shutdown(void)
-{
- release_pmc_hardware();
-}
-
-static void op_powerpc_cpu_start(void *dummy)
-{
- /* If any of the cpus have return an error, set the
- * global flag to the error so it can be returned
- * to the generic OProfile caller.
- */
- int ret;
-
- ret = model->start(ctr);
- if (ret != 0)
- op_per_cpu_rc = ret;
-}
-
-static int op_powerpc_start(void)
-{
- op_per_cpu_rc = 0;
-
- if (model->global_start)
- return model->global_start(ctr);
- if (model->start) {
- on_each_cpu(op_powerpc_cpu_start, NULL, 1);
- return op_per_cpu_rc;
- }
- return -EIO; /* No start function is defined for this
- power architecture */
-}
-
-static inline void op_powerpc_cpu_stop(void *dummy)
-{
- model->stop();
-}
-
-static void op_powerpc_stop(void)
-{
- if (model->stop)
- on_each_cpu(op_powerpc_cpu_stop, NULL, 1);
- if (model->global_stop)
- model->global_stop();
-}
-
-static int op_powerpc_create_files(struct dentry *root)
-{
- int i;
-
-#ifdef CONFIG_PPC64
- /*
- * There is one mmcr0, mmcr1 and mmcra for setting the events for
- * all of the counters.
- */
- oprofilefs_create_ulong(root, "mmcr0", &sys.mmcr0);
- oprofilefs_create_ulong(root, "mmcr1", &sys.mmcr1);
- oprofilefs_create_ulong(root, "mmcra", &sys.mmcra);
-#ifdef CONFIG_OPROFILE_CELL
- /* create a file the user tool can check to see what level of profiling
- * support exits with this kernel. Initialize bit mask to indicate
- * what support the kernel has:
- * bit 0 - Supports SPU event profiling in addition to PPU
- * event and cycles; and SPU cycle profiling
- * bits 1-31 - Currently unused.
- *
- * If the file does not exist, then the kernel only supports SPU
- * cycle profiling, PPU event and cycle profiling.
- */
- oprofilefs_create_ulong(root, "cell_support", &sys.cell_support);
- sys.cell_support = 0x1; /* Note, the user OProfile tool must check
- * that this bit is set before attempting to
- * user SPU event profiling. Older kernels
- * will not have this file, hence the user
- * tool is not allowed to do SPU event
- * profiling on older kernels. Older kernels
- * will accept SPU events but collected data
- * is garbage.
- */
-#endif
-#endif
-
- for (i = 0; i < model->num_counters; ++i) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(root, buf);
-
- oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(dir, "count", &ctr[i].count);
-
- /*
- * Classic PowerPC doesn't support per-counter
- * control like this, but the options are
- * expected, so they remain. For Freescale
- * Book-E style performance monitors, we do
- * support them.
- */
- oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(dir, "user", &ctr[i].user);
-
- oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
- }
-
- oprofilefs_create_ulong(root, "enable_kernel", &sys.enable_kernel);
- oprofilefs_create_ulong(root, "enable_user", &sys.enable_user);
-
- /* Default to tracing both kernel and user */
- sys.enable_kernel = 1;
- sys.enable_user = 1;
-
- return 0;
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- if (!cur_cpu_spec->oprofile_cpu_type)
- return -ENODEV;
-
- switch (cur_cpu_spec->oprofile_type) {
-#ifdef CONFIG_PPC_BOOK3S_64
-#ifdef CONFIG_OPROFILE_CELL
- case PPC_OPROFILE_CELL:
- if (firmware_has_feature(FW_FEATURE_LPAR))
- return -ENODEV;
- model = &op_model_cell;
- ops->sync_start = model->sync_start;
- ops->sync_stop = model->sync_stop;
- break;
-#endif
- case PPC_OPROFILE_POWER4:
- model = &op_model_power4;
- break;
- case PPC_OPROFILE_PA6T:
- model = &op_model_pa6t;
- break;
-#endif
-#ifdef CONFIG_PPC_BOOK3S_32
- case PPC_OPROFILE_G4:
- model = &op_model_7450;
- break;
-#endif
-#if defined(CONFIG_FSL_EMB_PERFMON)
- case PPC_OPROFILE_FSL_EMB:
- model = &op_model_fsl_emb;
- break;
-#endif
- default:
- return -ENODEV;
- }
-
- model->num_counters = cur_cpu_spec->num_pmcs;
-
- ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
- ops->create_files = op_powerpc_create_files;
- ops->setup = op_powerpc_setup;
- ops->shutdown = op_powerpc_shutdown;
- ops->start = op_powerpc_start;
- ops->stop = op_powerpc_stop;
- ops->backtrace = op_powerpc_backtrace;
-
- printk(KERN_DEBUG "oprofile: using %s performance monitoring.\n",
- ops->cpu_type);
-
- return 0;
-}
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c
deleted file mode 100644
index 5ebc25188a72..000000000000
--- a/arch/powerpc/oprofile/op_model_7450.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/powerpc/oprofile/op_model_7450.c
- *
- * Freescale 745x/744x oprofile support, based on fsl_booke support
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Copyright (c) 2004 Freescale Semiconductor, Inc
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala <galak@kernel.crashing.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/page.h>
-#include <asm/pmc.h>
-#include <asm/oprofile_impl.h>
-
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-static int oprofile_running;
-static u32 mmcr0_val, mmcr1_val, mmcr2_val, num_pmcs;
-
-#define MMCR0_PMC1_SHIFT 6
-#define MMCR0_PMC2_SHIFT 0
-#define MMCR1_PMC3_SHIFT 27
-#define MMCR1_PMC4_SHIFT 22
-#define MMCR1_PMC5_SHIFT 17
-#define MMCR1_PMC6_SHIFT 11
-
-#define mmcr0_event1(event) \
- ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
-#define mmcr0_event2(event) \
- ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
-
-#define mmcr1_event3(event) \
- ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
-#define mmcr1_event4(event) \
- ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
-#define mmcr1_event5(event) \
- ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
-#define mmcr1_event6(event) \
- ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
-
-#define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0)
-
-/* Unfreezes the counters on this CPU, enables the interrupt,
- * enables the counters to trigger the interrupt, and sets the
- * counters to only count when the mark bit is not set.
- */
-static void pmc_start_ctrs(void)
-{
- u32 mmcr0 = mfspr(SPRN_MMCR0);
-
- mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
- mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
-
- mtspr(SPRN_MMCR0, mmcr0);
-}
-
-/* Disables the counters on this CPU, and freezes them */
-static void pmc_stop_ctrs(void)
-{
- u32 mmcr0 = mfspr(SPRN_MMCR0);
-
- mmcr0 |= MMCR0_FC;
- mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
-
- mtspr(SPRN_MMCR0, mmcr0);
-}
-
-/* Configures the counters on this CPU based on the global
- * settings */
-static int fsl7450_cpu_setup(struct op_counter_config *ctr)
-{
- /* freeze all counters */
- pmc_stop_ctrs();
-
- mtspr(SPRN_MMCR0, mmcr0_val);
- mtspr(SPRN_MMCR1, mmcr1_val);
- if (num_pmcs > 4)
- mtspr(SPRN_MMCR2, mmcr2_val);
-
- return 0;
-}
-
-/* Configures the global settings for the countes on all CPUs. */
-static int fsl7450_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int i;
-
- num_pmcs = num_ctrs;
- /* Our counters count up, and "count" refers to
- * how much before the next interrupt, and we interrupt
- * on overflow. So we calculate the starting value
- * which will give us "count" until overflow.
- * Then we set the events on the enabled counters */
- for (i = 0; i < num_ctrs; ++i)
- reset_value[i] = 0x80000000UL - ctr[i].count;
-
- /* Set events for Counters 1 & 2 */
- mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event)
- | mmcr0_event2(ctr[1].event);
-
- /* Setup user/kernel bits */
- if (sys->enable_kernel)
- mmcr0_val &= ~(MMCR0_FCS);
-
- if (sys->enable_user)
- mmcr0_val &= ~(MMCR0_FCP);
-
- /* Set events for Counters 3-6 */
- mmcr1_val = mmcr1_event3(ctr[2].event)
- | mmcr1_event4(ctr[3].event);
- if (num_ctrs > 4)
- mmcr1_val |= mmcr1_event5(ctr[4].event)
- | mmcr1_event6(ctr[5].event);
-
- mmcr2_val = 0;
-
- return 0;
-}
-
-/* Sets the counters on this CPU to the chosen values, and starts them */
-static int fsl7450_start(struct op_counter_config *ctr)
-{
- int i;
-
- mtmsr(mfmsr() | MSR_PMM);
-
- for (i = 0; i < num_pmcs; ++i) {
- if (ctr[i].enabled)
- classic_ctr_write(i, reset_value[i]);
- else
- classic_ctr_write(i, 0);
- }
-
- /* Clear the freeze bit, and enable the interrupt.
- * The counters won't actually start until the rfi clears
- * the PMM bit */
- pmc_start_ctrs();
-
- oprofile_running = 1;
-
- return 0;
-}
-
-/* Stop the counters on this CPU */
-static void fsl7450_stop(void)
-{
- /* freeze counters */
- pmc_stop_ctrs();
-
- oprofile_running = 0;
-
- mb();
-}
-
-
-/* Handle the interrupt on this CPU, and log a sample for each
- * event that triggered the interrupt */
-static void fsl7450_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc;
- int is_kernel;
- int val;
- int i;
-
- /* set the PMM bit (see comment below) */
- mtmsr(mfmsr() | MSR_PMM);
-
- pc = mfspr(SPRN_SIAR);
- is_kernel = is_kernel_addr(pc);
-
- for (i = 0; i < num_pmcs; ++i) {
- val = classic_ctr_read(i);
- if (val < 0) {
- if (oprofile_running && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- classic_ctr_write(i, reset_value[i]);
- } else {
- classic_ctr_write(i, 0);
- }
- }
- }
-
- /* The freeze bit was set by the interrupt. */
- /* Clear the freeze bit, and reenable the interrupt.
- * The counters won't actually start until the rfi clears
- * the PM/M bit */
- pmc_start_ctrs();
-}
-
-struct op_powerpc_model op_model_7450= {
- .reg_setup = fsl7450_reg_setup,
- .cpu_setup = fsl7450_cpu_setup,
- .start = fsl7450_start,
- .stop = fsl7450_stop,
- .handle_interrupt = fsl7450_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
deleted file mode 100644
index 7eb73070b7be..000000000000
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ /dev/null
@@ -1,1709 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: David Erb (djerb@us.ibm.com)
- * Modifications:
- * Carl Love <carll@us.ibm.com>
- * Maynard Johnson <maynardj@us.ibm.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/kthread.h>
-#include <linux/oprofile.h>
-#include <linux/percpu.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <asm/cell-pmu.h>
-#include <asm/cputable.h>
-#include <asm/firmware.h>
-#include <asm/io.h>
-#include <asm/oprofile_impl.h>
-#include <asm/processor.h>
-#include <asm/prom.h>
-#include <asm/ptrace.h>
-#include <asm/reg.h>
-#include <asm/rtas.h>
-#include <asm/cell-regs.h>
-
-#include "../platforms/cell/interrupt.h"
-#include "cell/pr_util.h"
-
-#define PPU_PROFILING 0
-#define SPU_PROFILING_CYCLES 1
-#define SPU_PROFILING_EVENTS 2
-
-#define SPU_EVENT_NUM_START 4100
-#define SPU_EVENT_NUM_STOP 4399
-#define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */
-#define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */
-#define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */
-
-#define NUM_SPUS_PER_NODE 8
-#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
-
-#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
-#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
- * PPU_CYCLES event
- */
-#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
-
-#define NUM_THREADS 2 /* number of physical threads in
- * physical processor
- */
-#define NUM_DEBUG_BUS_WORDS 4
-#define NUM_INPUT_BUS_WORDS 2
-
-#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
-
-/* Minimum HW interval timer setting to send value to trace buffer is 10 cycle.
- * To configure counter to send value every N cycles set counter to
- * 2^32 - 1 - N.
- */
-#define NUM_INTERVAL_CYC 0xFFFFFFFF - 10
-
-/*
- * spu_cycle_reset is the number of cycles between samples.
- * This variable is used for SPU profiling and should ONLY be set
- * at the beginning of cell_reg_setup; otherwise, it's read-only.
- */
-static unsigned int spu_cycle_reset;
-static unsigned int profiling_mode;
-static int spu_evnt_phys_spu_indx;
-
-struct pmc_cntrl_data {
- unsigned long vcntr;
- unsigned long evnts;
- unsigned long masks;
- unsigned long enabled;
-};
-
-/*
- * ibm,cbe-perftools rtas parameters
- */
-struct pm_signal {
- u16 cpu; /* Processor to modify */
- u16 sub_unit; /* hw subunit this applies to (if applicable)*/
- short int signal_group; /* Signal Group to Enable/Disable */
- u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
- * Bus Word(s) (bitmask)
- */
- u8 bit; /* Trigger/Event bit (if applicable) */
-};
-
-/*
- * rtas call arguments
- */
-enum {
- SUBFUNC_RESET = 1,
- SUBFUNC_ACTIVATE = 2,
- SUBFUNC_DEACTIVATE = 3,
-
- PASSTHRU_IGNORE = 0,
- PASSTHRU_ENABLE = 1,
- PASSTHRU_DISABLE = 2,
-};
-
-struct pm_cntrl {
- u16 enable;
- u16 stop_at_max;
- u16 trace_mode;
- u16 freeze;
- u16 count_mode;
- u16 spu_addr_trace;
- u8 trace_buf_ovflw;
-};
-
-static struct {
- u32 group_control;
- u32 debug_bus_control;
- struct pm_cntrl pm_cntrl;
- u32 pm07_cntrl[NR_PHYS_CTRS];
-} pm_regs;
-
-#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
-#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
-#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
-#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
-#define GET_COUNT_CYCLES(x) (x & 0x00000001)
-#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
-
-static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
-static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
-static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
-
-/*
- * The CELL profiling code makes rtas calls to setup the debug bus to
- * route the performance signals. Additionally, SPU profiling requires
- * a second rtas call to setup the hardware to capture the SPU PCs.
- * The EIO error value is returned if the token lookups or the rtas
- * call fail. The EIO error number is the best choice of the existing
- * error numbers. The probability of rtas related error is very low. But
- * by returning EIO and printing additional information to dmsg the user
- * will know that OProfile did not start and dmesg will tell them why.
- * OProfile does not support returning errors on Stop. Not a huge issue
- * since failure to reset the debug bus or stop the SPU PC collection is
- * not a fatel issue. Chances are if the Stop failed, Start doesn't work
- * either.
- */
-
-/*
- * Interpetation of hdw_thread:
- * 0 - even virtual cpus 0, 2, 4,...
- * 1 - odd virtual cpus 1, 3, 5, ...
- *
- * FIXME: this is strictly wrong, we need to clean this up in a number
- * of places. It works for now. -arnd
- */
-static u32 hdw_thread;
-
-static u32 virt_cntr_inter_mask;
-static struct timer_list timer_virt_cntr;
-static struct timer_list timer_spu_event_swap;
-
-/*
- * pm_signal needs to be global since it is initialized in
- * cell_reg_setup at the time when the necessary information
- * is available.
- */
-static struct pm_signal pm_signal[NR_PHYS_CTRS];
-static int pm_rtas_token; /* token for debug bus setup call */
-static int spu_rtas_token; /* token for SPU cycle profiling */
-
-static u32 reset_value[NR_PHYS_CTRS];
-static int num_counters;
-static int oprofile_running;
-static DEFINE_SPINLOCK(cntr_lock);
-
-static u32 ctr_enabled;
-
-static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
-
-/*
- * Firmware interface functions
- */
-static int
-rtas_ibm_cbe_perftools(int subfunc, int passthru,
- void *address, unsigned long length)
-{
- u64 paddr = __pa(address);
-
- return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
- passthru, paddr >> 32, paddr & 0xffffffff, length);
-}
-
-static void pm_rtas_reset_signals(u32 node)
-{
- int ret;
- struct pm_signal pm_signal_local;
-
- /*
- * The debug bus is being set to the passthru disable state.
- * However, the FW still expects at least one legal signal routing
- * entry or it will return an error on the arguments. If we don't
- * supply a valid entry, we must ignore all return values. Ignoring
- * all return values means we might miss an error we should be
- * concerned about.
- */
-
- /* fw expects physical cpu #. */
- pm_signal_local.cpu = node;
- pm_signal_local.signal_group = 21;
- pm_signal_local.bus_word = 1;
- pm_signal_local.sub_unit = 0;
- pm_signal_local.bit = 0;
-
- ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
- &pm_signal_local,
- sizeof(struct pm_signal));
-
- if (unlikely(ret))
- /*
- * Not a fatal error. For Oprofile stop, the oprofile
- * functions do not support returning an error for
- * failure to stop OProfile.
- */
- printk(KERN_WARNING "%s: rtas returned: %d\n",
- __func__, ret);
-}
-
-static int pm_rtas_activate_signals(u32 node, u32 count)
-{
- int ret;
- int i, j;
- struct pm_signal pm_signal_local[NR_PHYS_CTRS];
-
- /*
- * There is no debug setup required for the cycles event.
- * Note that only events in the same group can be used.
- * Otherwise, there will be conflicts in correctly routing
- * the signals on the debug bus. It is the responsibility
- * of the OProfile user tool to check the events are in
- * the same group.
- */
- i = 0;
- for (j = 0; j < count; j++) {
- if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
-
- /* fw expects physical cpu # */
- pm_signal_local[i].cpu = node;
- pm_signal_local[i].signal_group
- = pm_signal[j].signal_group;
- pm_signal_local[i].bus_word = pm_signal[j].bus_word;
- pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
- pm_signal_local[i].bit = pm_signal[j].bit;
- i++;
- }
- }
-
- if (i != 0) {
- ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
- pm_signal_local,
- i * sizeof(struct pm_signal));
-
- if (unlikely(ret)) {
- printk(KERN_WARNING "%s: rtas returned: %d\n",
- __func__, ret);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-/*
- * PM Signal functions
- */
-static void set_pm_event(u32 ctr, int event, u32 unit_mask)
-{
- struct pm_signal *p;
- u32 signal_bit;
- u32 bus_word, bus_type, count_cycles, polarity, input_control;
- int j, i;
-
- if (event == PPU_CYCLES_EVENT_NUM) {
- /* Special Event: Count all cpu cycles */
- pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
- p = &(pm_signal[ctr]);
- p->signal_group = PPU_CYCLES_GRP_NUM;
- p->bus_word = 1;
- p->sub_unit = 0;
- p->bit = 0;
- goto out;
- } else {
- pm_regs.pm07_cntrl[ctr] = 0;
- }
-
- bus_word = GET_BUS_WORD(unit_mask);
- bus_type = GET_BUS_TYPE(unit_mask);
- count_cycles = GET_COUNT_CYCLES(unit_mask);
- polarity = GET_POLARITY(unit_mask);
- input_control = GET_INPUT_CONTROL(unit_mask);
- signal_bit = (event % 100);
-
- p = &(pm_signal[ctr]);
-
- p->signal_group = event / 100;
- p->bus_word = bus_word;
- p->sub_unit = GET_SUB_UNIT(unit_mask);
-
- pm_regs.pm07_cntrl[ctr] = 0;
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
-
- /*
- * Some of the islands signal selection is based on 64 bit words.
- * The debug bus words are 32 bits, the input words to the performance
- * counters are defined as 32 bits. Need to convert the 64 bit island
- * specification to the appropriate 32 input bit and bus word for the
- * performance counter event selection. See the CELL Performance
- * monitoring signals manual and the Perf cntr hardware descriptions
- * for the details.
- */
- if (input_control == 0) {
- if (signal_bit > 31) {
- signal_bit -= 32;
- if (bus_word == 0x3)
- bus_word = 0x2;
- else if (bus_word == 0xc)
- bus_word = 0x8;
- }
-
- if ((bus_type == 0) && p->signal_group >= 60)
- bus_type = 2;
- if ((bus_type == 1) && p->signal_group >= 50)
- bus_type = 0;
-
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
- } else {
- pm_regs.pm07_cntrl[ctr] = 0;
- p->bit = signal_bit;
- }
-
- for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
- if (bus_word & (1 << i)) {
- pm_regs.debug_bus_control |=
- (bus_type << (30 - (2 * i)));
-
- for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
- if (input_bus[j] == 0xff) {
- input_bus[j] = i;
- pm_regs.group_control |=
- (i << (30 - (2 * j)));
-
- break;
- }
- }
- }
- }
-out:
- ;
-}
-
-static void write_pm_cntrl(int cpu)
-{
- /*
- * Oprofile will use 32 bit counters, set bits 7:10 to 0
- * pmregs.pm_cntrl is a global
- */
-
- u32 val = 0;
- if (pm_regs.pm_cntrl.enable == 1)
- val |= CBE_PM_ENABLE_PERF_MON;
-
- if (pm_regs.pm_cntrl.stop_at_max == 1)
- val |= CBE_PM_STOP_AT_MAX;
-
- if (pm_regs.pm_cntrl.trace_mode != 0)
- val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
-
- if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
- val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
- if (pm_regs.pm_cntrl.freeze == 1)
- val |= CBE_PM_FREEZE_ALL_CTRS;
-
- val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
-
- /*
- * Routine set_count_mode must be called previously to set
- * the count mode based on the user selection of user and kernel.
- */
- val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
- cbe_write_pm(cpu, pm_control, val);
-}
-
-static inline void
-set_count_mode(u32 kernel, u32 user)
-{
- /*
- * The user must specify user and kernel if they want them. If
- * neither is specified, OProfile will count in hypervisor mode.
- * pm_regs.pm_cntrl is a global
- */
- if (kernel) {
- if (user)
- pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
- else
- pm_regs.pm_cntrl.count_mode =
- CBE_COUNT_SUPERVISOR_MODE;
- } else {
- if (user)
- pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
- else
- pm_regs.pm_cntrl.count_mode =
- CBE_COUNT_HYPERVISOR_MODE;
- }
-}
-
-static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
-{
-
- pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
- cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
-}
-
-/*
- * Oprofile is expected to collect data on all CPUs simultaneously.
- * However, there is one set of performance counters per node. There are
- * two hardware threads or virtual CPUs on each node. Hence, OProfile must
- * multiplex in time the performance counter collection on the two virtual
- * CPUs. The multiplexing of the performance counters is done by this
- * virtual counter routine.
- *
- * The pmc_values used below is defined as 'per-cpu' but its use is
- * more akin to 'per-node'. We need to store two sets of counter
- * values per node -- one for the previous run and one for the next.
- * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
- * pair of per-cpu arrays is used for storing the previous and next
- * pmc values for a given node.
- * NOTE: We use the per-cpu variable to improve cache performance.
- *
- * This routine will alternate loading the virtual counters for
- * virtual CPUs
- */
-static void cell_virtual_cntr(struct timer_list *unused)
-{
- int i, prev_hdw_thread, next_hdw_thread;
- u32 cpu;
- unsigned long flags;
-
- /*
- * Make sure that the interrupt_hander and the virt counter are
- * not both playing with the counters on the same node.
- */
-
- spin_lock_irqsave(&cntr_lock, flags);
-
- prev_hdw_thread = hdw_thread;
-
- /* switch the cpu handling the interrupts */
- hdw_thread = 1 ^ hdw_thread;
- next_hdw_thread = hdw_thread;
-
- pm_regs.group_control = 0;
- pm_regs.debug_bus_control = 0;
-
- for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
- input_bus[i] = 0xff;
-
- /*
- * There are some per thread events. Must do the
- * set event, for the thread that is being started
- */
- for (i = 0; i < num_counters; i++)
- set_pm_event(i,
- pmc_cntrl[next_hdw_thread][i].evnts,
- pmc_cntrl[next_hdw_thread][i].masks);
-
- /*
- * The following is done only once per each node, but
- * we need cpu #, not node #, to pass to the cbe_xxx functions.
- */
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- /*
- * stop counters, save counter values, restore counts
- * for previous thread
- */
- cbe_disable_pm(cpu);
- cbe_disable_pm_interrupts(cpu);
- for (i = 0; i < num_counters; i++) {
- per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
- = cbe_read_ctr(cpu, i);
-
- if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
- == 0xFFFFFFFF)
- /* If the cntr value is 0xffffffff, we must
- * reset that to 0xfffffff0 when the current
- * thread is restarted. This will generate a
- * new interrupt and make sure that we never
- * restore the counters to the max value. If
- * the counters were restored to the max value,
- * they do not increment and no interrupts are
- * generated. Hence no more samples will be
- * collected on that cpu.
- */
- cbe_write_ctr(cpu, i, 0xFFFFFFF0);
- else
- cbe_write_ctr(cpu, i,
- per_cpu(pmc_values,
- cpu +
- next_hdw_thread)[i]);
- }
-
- /*
- * Switch to the other thread. Change the interrupt
- * and control regs to be scheduled on the CPU
- * corresponding to the thread to execute.
- */
- for (i = 0; i < num_counters; i++) {
- if (pmc_cntrl[next_hdw_thread][i].enabled) {
- /*
- * There are some per thread events.
- * Must do the set event, enable_cntr
- * for each cpu.
- */
- enable_ctr(cpu, i,
- pm_regs.pm07_cntrl);
- } else {
- cbe_write_pm07_control(cpu, i, 0);
- }
- }
-
- /* Enable interrupts on the CPU thread that is starting */
- cbe_enable_pm_interrupts(cpu, next_hdw_thread,
- virt_cntr_inter_mask);
- cbe_enable_pm(cpu);
- }
-
- spin_unlock_irqrestore(&cntr_lock, flags);
-
- mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
-}
-
-static void start_virt_cntrs(void)
-{
- timer_setup(&timer_virt_cntr, cell_virtual_cntr, 0);
- timer_virt_cntr.expires = jiffies + HZ / 10;
- add_timer(&timer_virt_cntr);
-}
-
-static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- spu_cycle_reset = ctr[0].count;
-
- /*
- * Each node will need to make the rtas call to start
- * and stop SPU profiling. Get the token once and store it.
- */
- spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
-
- if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-spu-perftools unknown\n",
- __func__);
- return -EIO;
- }
- return 0;
-}
-
-/* Unfortunately, the hardware will only support event profiling
- * on one SPU per node at a time. Therefore, we must time slice
- * the profiling across all SPUs in the node. Note, we do this
- * in parallel for each node. The following routine is called
- * periodically based on kernel timer to switch which SPU is
- * being monitored in a round robbin fashion.
- */
-static void spu_evnt_swap(struct timer_list *unused)
-{
- int node;
- int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
- unsigned long flags;
- int cpu;
- int ret;
- u32 interrupt_mask;
-
-
- /* enable interrupts on cntr 0 */
- interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
-
- hdw_thread = 0;
-
- /* Make sure spu event interrupt handler and spu event swap
- * don't access the counters simultaneously.
- */
- spin_lock_irqsave(&cntr_lock, flags);
-
- cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
-
- if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
- spu_evnt_phys_spu_indx = 0;
-
- pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
- pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
- pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
-
- /* switch the SPU being profiled on each node */
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- node = cbe_cpu_to_node(cpu);
- cur_phys_spu = (node * NUM_SPUS_PER_NODE)
- + cur_spu_evnt_phys_spu_indx;
- nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
- + spu_evnt_phys_spu_indx;
-
- /*
- * stop counters, save counter values, restore counts
- * for previous physical SPU
- */
- cbe_disable_pm(cpu);
- cbe_disable_pm_interrupts(cpu);
-
- spu_pm_cnt[cur_phys_spu]
- = cbe_read_ctr(cpu, 0);
-
- /* restore previous count for the next spu to sample */
- /* NOTE, hardware issue, counter will not start if the
- * counter value is at max (0xFFFFFFFF).
- */
- if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
- cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
- else
- cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
-
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-
- /* setup the debug bus measure the one event and
- * the two events to route the next SPU's PC on
- * the debug bus
- */
- ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
- if (ret)
- printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
- "SPU event swap\n", __func__);
-
- /* clear the trace buffer, don't want to take PC for
- * previous SPU*/
- cbe_write_pm(cpu, trace_address, 0);
-
- enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
-
- /* Enable interrupts on the CPU thread that is starting */
- cbe_enable_pm_interrupts(cpu, hdw_thread,
- interrupt_mask);
- cbe_enable_pm(cpu);
- }
-
- spin_unlock_irqrestore(&cntr_lock, flags);
-
- /* swap approximately every 0.1 seconds */
- mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
-}
-
-static void start_spu_event_swap(void)
-{
- timer_setup(&timer_spu_event_swap, spu_evnt_swap, 0);
- timer_spu_event_swap.expires = jiffies + HZ / 25;
- add_timer(&timer_spu_event_swap);
-}
-
-static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- int i;
-
- /* routine is called once for all nodes */
-
- spu_evnt_phys_spu_indx = 0;
- /*
- * For all events except PPU CYCLEs, each node will need to make
- * the rtas cbe-perftools call to setup and reset the debug bus.
- * Make the token lookup call once and store it in the global
- * variable pm_rtas_token.
- */
- pm_rtas_token = rtas_token("ibm,cbe-perftools");
-
- if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-perftools unknown\n",
- __func__);
- return -EIO;
- }
-
- /* setup the pm_control register settings,
- * settings will be written per node by the
- * cell_cpu_setup() function.
- */
- pm_regs.pm_cntrl.trace_buf_ovflw = 1;
-
- /* Use the occurrence trace mode to have SPU PC saved
- * to the trace buffer. Occurrence data in trace buffer
- * is not used. Bit 2 must be set to store SPU addresses.
- */
- pm_regs.pm_cntrl.trace_mode = 2;
-
- pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus
- event 2 & 3 */
-
- /* setup the debug bus event array with the SPU PC routing events.
- * Note, pm_signal[0] will be filled in by set_pm_event() call below.
- */
- pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
- pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
- pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
- pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
-
- pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
- pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
- pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
- pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
-
- /* Set the user selected spu event to profile on,
- * note, only one SPU profiling event is supported
- */
- num_counters = 1; /* Only support one SPU event at a time */
- set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
-
- reset_value[0] = 0xFFFFFFFF - ctr[0].count;
-
- /* global, used by cell_cpu_setup */
- ctr_enabled |= 1;
-
- /* Initialize the count for each SPU to the reset value */
- for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
- spu_pm_cnt[i] = reset_value[0];
-
- return 0;
-}
-
-static int cell_reg_setup_ppu(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- /* routine is called once for all nodes */
- int i, j, cpu;
-
- num_counters = num_ctrs;
-
- if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
- printk(KERN_ERR
- "%s: Oprofile, number of specified events " \
- "exceeds number of physical counters\n",
- __func__);
- return -EIO;
- }
-
- set_count_mode(sys->enable_kernel, sys->enable_user);
-
- /* Setup the thread 0 events */
- for (i = 0; i < num_ctrs; ++i) {
-
- pmc_cntrl[0][i].evnts = ctr[i].event;
- pmc_cntrl[0][i].masks = ctr[i].unit_mask;
- pmc_cntrl[0][i].enabled = ctr[i].enabled;
- pmc_cntrl[0][i].vcntr = i;
-
- for_each_possible_cpu(j)
- per_cpu(pmc_values, j)[i] = 0;
- }
-
- /*
- * Setup the thread 1 events, map the thread 0 event to the
- * equivalent thread 1 event.
- */
- for (i = 0; i < num_ctrs; ++i) {
- if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
- pmc_cntrl[1][i].evnts = ctr[i].event + 19;
- else if (ctr[i].event == 2203)
- pmc_cntrl[1][i].evnts = ctr[i].event;
- else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
- pmc_cntrl[1][i].evnts = ctr[i].event + 16;
- else
- pmc_cntrl[1][i].evnts = ctr[i].event;
-
- pmc_cntrl[1][i].masks = ctr[i].unit_mask;
- pmc_cntrl[1][i].enabled = ctr[i].enabled;
- pmc_cntrl[1][i].vcntr = i;
- }
-
- for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
- input_bus[i] = 0xff;
-
- /*
- * Our counters count up, and "count" refers to
- * how much before the next interrupt, and we interrupt
- * on overflow. So we calculate the starting value
- * which will give us "count" until overflow.
- * Then we set the events on the enabled counters.
- */
- for (i = 0; i < num_counters; ++i) {
- /* start with virtual counter set 0 */
- if (pmc_cntrl[0][i].enabled) {
- /* Using 32bit counters, reset max - count */
- reset_value[i] = 0xFFFFFFFF - ctr[i].count;
- set_pm_event(i,
- pmc_cntrl[0][i].evnts,
- pmc_cntrl[0][i].masks);
-
- /* global, used by cell_cpu_setup */
- ctr_enabled |= (1 << i);
- }
- }
-
- /* initialize the previous counts for the virtual cntrs */
- for_each_online_cpu(cpu)
- for (i = 0; i < num_counters; ++i) {
- per_cpu(pmc_values, cpu)[i] = reset_value[i];
- }
-
- return 0;
-}
-
-
-/* This function is called once for all cpus combined */
-static int cell_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- int ret=0;
- spu_cycle_reset = 0;
-
- /* initialize the spu_arr_trace value, will be reset if
- * doing spu event profiling.
- */
- pm_regs.group_control = 0;
- pm_regs.debug_bus_control = 0;
- pm_regs.pm_cntrl.stop_at_max = 1;
- pm_regs.pm_cntrl.trace_mode = 0;
- pm_regs.pm_cntrl.freeze = 1;
- pm_regs.pm_cntrl.trace_buf_ovflw = 0;
- pm_regs.pm_cntrl.spu_addr_trace = 0;
-
- /*
- * For all events except PPU CYCLEs, each node will need to make
- * the rtas cbe-perftools call to setup and reset the debug bus.
- * Make the token lookup call once and store it in the global
- * variable pm_rtas_token.
- */
- pm_rtas_token = rtas_token("ibm,cbe-perftools");
-
- if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-perftools unknown\n",
- __func__);
- return -EIO;
- }
-
- if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
- profiling_mode = SPU_PROFILING_CYCLES;
- ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
- } else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
- (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
- profiling_mode = SPU_PROFILING_EVENTS;
- spu_cycle_reset = ctr[0].count;
-
- /* for SPU event profiling, need to setup the
- * pm_signal array with the events to route the
- * SPU PC before making the FW call. Note, only
- * one SPU event for profiling can be specified
- * at a time.
- */
- cell_reg_setup_spu_events(ctr, sys, num_ctrs);
- } else {
- profiling_mode = PPU_PROFILING;
- ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
- }
-
- return ret;
-}
-
-
-
-/* This function is called once for each cpu */
-static int cell_cpu_setup(struct op_counter_config *cntr)
-{
- u32 cpu = smp_processor_id();
- u32 num_enabled = 0;
- int i;
- int ret;
-
- /* Cycle based SPU profiling does not use the performance
- * counters. The trace array is configured to collect
- * the data.
- */
- if (profiling_mode == SPU_PROFILING_CYCLES)
- return 0;
-
- /* There is one performance monitor per processor chip (i.e. node),
- * so we only need to perform this function once per node.
- */
- if (cbe_get_hw_thread_id(cpu))
- return 0;
-
- /* Stop all counters */
- cbe_disable_pm(cpu);
- cbe_disable_pm_interrupts(cpu);
-
- cbe_write_pm(cpu, pm_start_stop, 0);
- cbe_write_pm(cpu, group_control, pm_regs.group_control);
- cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
- write_pm_cntrl(cpu);
-
- for (i = 0; i < num_counters; ++i) {
- if (ctr_enabled & (1 << i)) {
- pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
- num_enabled++;
- }
- }
-
- /*
- * The pm_rtas_activate_signals will return -EIO if the FW
- * call failed.
- */
- if (profiling_mode == SPU_PROFILING_EVENTS) {
- /* For SPU event profiling also need to setup the
- * pm interval timer
- */
- ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
- num_enabled+2);
- /* store PC from debug bus to Trace buffer as often
- * as possible (every 10 cycles)
- */
- cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
- return ret;
- } else
- return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
- num_enabled);
-}
-
-#define ENTRIES 303
-#define MAXLFSR 0xFFFFFF
-
-/* precomputed table of 24 bit LFSR values */
-static int initial_lfsr[] = {
- 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
- 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
- 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
- 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
- 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
- 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
- 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
- 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
- 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
- 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
- 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
- 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
- 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
- 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
- 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
- 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
- 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
- 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
- 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
- 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
- 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
- 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
- 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
- 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
- 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
- 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
- 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
- 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
- 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
- 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
- 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
- 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
- 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
- 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
- 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
- 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
- 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
- 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
-};
-
-/*
- * The hardware uses an LFSR counting sequence to determine when to capture
- * the SPU PCs. An LFSR sequence is like a puesdo random number sequence
- * where each number occurs once in the sequence but the sequence is not in
- * numerical order. The SPU PC capture is done when the LFSR sequence reaches
- * the last value in the sequence. Hence the user specified value N
- * corresponds to the LFSR number that is N from the end of the sequence.
- *
- * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit
- * LFSR sequence is broken into four ranges. The spacing of the precomputed
- * values is adjusted in each range so the error between the user specified
- * number (N) of events between samples and the actual number of events based
- * on the precomputed value will be les then about 6.2%. Note, if the user
- * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
- * This is to prevent the loss of samples because the trace buffer is full.
- *
- * User specified N Step between Index in
- * precomputed values precomputed
- * table
- * 0 to 2^16-1 ---- 0
- * 2^16 to 2^16+2^19-1 2^12 1 to 128
- * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256
- * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302
- *
- *
- * For example, the LFSR values in the second range are computed for 2^16,
- * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
- * 1, 2,..., 127, 128.
- *
- * The 24 bit LFSR value for the nth number in the sequence can be
- * calculated using the following code:
- *
- * #define size 24
- * int calculate_lfsr(int n)
- * {
- * int i;
- * unsigned int newlfsr0;
- * unsigned int lfsr = 0xFFFFFF;
- * unsigned int howmany = n;
- *
- * for (i = 2; i < howmany + 2; i++) {
- * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
- * ((lfsr >> (size - 1 - 1)) & 1) ^
- * (((lfsr >> (size - 1 - 6)) & 1) ^
- * ((lfsr >> (size - 1 - 23)) & 1)));
- *
- * lfsr >>= 1;
- * lfsr = lfsr | (newlfsr0 << (size - 1));
- * }
- * return lfsr;
- * }
- */
-
-#define V2_16 (0x1 << 16)
-#define V2_19 (0x1 << 19)
-#define V2_22 (0x1 << 22)
-
-static int calculate_lfsr(int n)
-{
- /*
- * The ranges and steps are in powers of 2 so the calculations
- * can be done using shifts rather then divide.
- */
- int index;
-
- if ((n >> 16) == 0)
- index = 0;
- else if (((n - V2_16) >> 19) == 0)
- index = ((n - V2_16) >> 12) + 1;
- else if (((n - V2_16 - V2_19) >> 22) == 0)
- index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
- else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
- index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
- else
- index = ENTRIES-1;
-
- /* make sure index is valid */
- if ((index >= ENTRIES) || (index < 0))
- index = ENTRIES-1;
-
- return initial_lfsr[index];
-}
-
-static int pm_rtas_activate_spu_profiling(u32 node)
-{
- int ret, i;
- struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
-
- /*
- * Set up the rtas call to configure the debug bus to
- * route the SPU PCs. Setup the pm_signal for each SPU
- */
- for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
- pm_signal_local[i].cpu = node;
- pm_signal_local[i].signal_group = 41;
- /* spu i on word (i/2) */
- pm_signal_local[i].bus_word = 1 << i / 2;
- /* spu i */
- pm_signal_local[i].sub_unit = i;
- pm_signal_local[i].bit = 63;
- }
-
- ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
- PASSTHRU_ENABLE, pm_signal_local,
- (ARRAY_SIZE(pm_signal_local)
- * sizeof(struct pm_signal)));
-
- if (unlikely(ret)) {
- printk(KERN_WARNING "%s: rtas returned: %d\n",
- __func__, ret);
- return -EIO;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_CPU_FREQ
-static int
-oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
-{
- int ret = 0;
- struct cpufreq_freqs *frq = data;
- if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
- (val == CPUFREQ_POSTCHANGE && frq->old > frq->new))
- set_spu_profiling_frequency(frq->new, spu_cycle_reset);
- return ret;
-}
-
-static struct notifier_block cpu_freq_notifier_block = {
- .notifier_call = oprof_cpufreq_notify
-};
-#endif
-
-/*
- * Note the generic OProfile stop calls do not support returning
- * an error on stop. Hence, will not return an error if the FW
- * calls fail on stop. Failure to reset the debug bus is not an issue.
- * Failure to disable the SPU profiling is not an issue. The FW calls
- * to enable the performance counters and debug bus will work even if
- * the hardware was not cleanly reset.
- */
-static void cell_global_stop_spu_cycles(void)
-{
- int subfunc, rtn_value;
- unsigned int lfsr_value;
- int cpu;
-
- oprofile_running = 0;
- smp_wmb();
-
-#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&cpu_freq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-#endif
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- subfunc = 3; /*
- * 2 - activate SPU tracing,
- * 3 - deactivate
- */
- lfsr_value = 0x8f100000;
-
- rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
- subfunc, cbe_cpu_to_node(cpu),
- lfsr_value);
-
- if (unlikely(rtn_value != 0)) {
- printk(KERN_ERR
- "%s: rtas call ibm,cbe-spu-perftools " \
- "failed, return = %d\n",
- __func__, rtn_value);
- }
-
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
- }
-
- stop_spu_profiling_cycles();
-}
-
-static void cell_global_stop_spu_events(void)
-{
- int cpu;
- oprofile_running = 0;
-
- stop_spu_profiling_events();
- smp_wmb();
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- cbe_sync_irq(cbe_cpu_to_node(cpu));
- /* Stop the counters */
- cbe_disable_pm(cpu);
- cbe_write_pm07_control(cpu, 0, 0);
-
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-
- /* Deactivate interrupts */
- cbe_disable_pm_interrupts(cpu);
- }
- del_timer_sync(&timer_spu_event_swap);
-}
-
-static void cell_global_stop_ppu(void)
-{
- int cpu;
-
- /*
- * This routine will be called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
- del_timer_sync(&timer_virt_cntr);
- oprofile_running = 0;
- smp_wmb();
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- cbe_sync_irq(cbe_cpu_to_node(cpu));
- /* Stop the counters */
- cbe_disable_pm(cpu);
-
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-
- /* Deactivate interrupts */
- cbe_disable_pm_interrupts(cpu);
- }
-}
-
-static void cell_global_stop(void)
-{
- if (profiling_mode == PPU_PROFILING)
- cell_global_stop_ppu();
- else if (profiling_mode == SPU_PROFILING_EVENTS)
- cell_global_stop_spu_events();
- else
- cell_global_stop_spu_cycles();
-}
-
-static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
-{
- int subfunc;
- unsigned int lfsr_value;
- int cpu;
- int ret;
- int rtas_error;
- unsigned int cpu_khzfreq = 0;
-
- /* The SPU profiling uses time-based profiling based on
- * cpu frequency, so if configured with the CPU_FREQ
- * option, we should detect frequency changes and react
- * accordingly.
- */
-#ifdef CONFIG_CPU_FREQ
- ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (ret < 0)
- /* this is not a fatal error */
- printk(KERN_ERR "CPU freq change registration failed: %d\n",
- ret);
-
- else
- cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
-#endif
-
- set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- /*
- * Setup SPU cycle-based profiling.
- * Set perf_mon_control bit 0 to a zero before
- * enabling spu collection hardware.
- */
- cbe_write_pm(cpu, pm_control, 0);
-
- if (spu_cycle_reset > MAX_SPU_COUNT)
- /* use largest possible value */
- lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
- else
- lfsr_value = calculate_lfsr(spu_cycle_reset);
-
- /* must use a non zero value. Zero disables data collection. */
- if (lfsr_value == 0)
- lfsr_value = calculate_lfsr(1);
-
- lfsr_value = lfsr_value << 8; /* shift lfsr to correct
- * register location
- */
-
- /* debug bus setup */
- ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
-
- if (unlikely(ret)) {
- rtas_error = ret;
- goto out;
- }
-
-
- subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */
-
- /* start profiling */
- ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
- cbe_cpu_to_node(cpu), lfsr_value);
-
- if (unlikely(ret != 0)) {
- printk(KERN_ERR
- "%s: rtas call ibm,cbe-spu-perftools failed, " \
- "return = %d\n", __func__, ret);
- rtas_error = -EIO;
- goto out;
- }
- }
-
- rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
- if (rtas_error)
- goto out_stop;
-
- oprofile_running = 1;
- return 0;
-
-out_stop:
- cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */
-out:
- return rtas_error;
-}
-
-static int cell_global_start_spu_events(struct op_counter_config *ctr)
-{
- int cpu;
- u32 interrupt_mask = 0;
- int rtn = 0;
-
- hdw_thread = 0;
-
- /* spu event profiling, uses the performance counters to generate
- * an interrupt. The hardware is setup to store the SPU program
- * counter into the trace array. The occurrence mode is used to
- * enable storing data to the trace buffer. The bits are set
- * to send/store the SPU address in the trace buffer. The debug
- * bus must be setup to route the SPU program counter onto the
- * debug bus. The occurrence data in the trace buffer is not used.
- */
-
- /* This routine gets called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- /*
- * Setup SPU event-based profiling.
- * Set perf_mon_control bit 0 to a zero before
- * enabling spu collection hardware.
- *
- * Only support one SPU event on one SPU per node.
- */
- if (ctr_enabled & 1) {
- cbe_write_ctr(cpu, 0, reset_value[0]);
- enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
- interrupt_mask |=
- CBE_PM_CTR_OVERFLOW_INTR(0);
- } else {
- /* Disable counter */
- cbe_write_pm07_control(cpu, 0, 0);
- }
-
- cbe_get_and_clear_pm_interrupts(cpu);
- cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
- cbe_enable_pm(cpu);
-
- /* clear the trace buffer */
- cbe_write_pm(cpu, trace_address, 0);
- }
-
- /* Start the timer to time slice collecting the event profile
- * on each of the SPUs. Note, can collect profile on one SPU
- * per node at a time.
- */
- start_spu_event_swap();
- start_spu_profiling_events();
- oprofile_running = 1;
- smp_wmb();
-
- return rtn;
-}
-
-static int cell_global_start_ppu(struct op_counter_config *ctr)
-{
- u32 cpu, i;
- u32 interrupt_mask = 0;
-
- /* This routine gets called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- interrupt_mask = 0;
-
- for (i = 0; i < num_counters; ++i) {
- if (ctr_enabled & (1 << i)) {
- cbe_write_ctr(cpu, i, reset_value[i]);
- enable_ctr(cpu, i, pm_regs.pm07_cntrl);
- interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
- } else {
- /* Disable counter */
- cbe_write_pm07_control(cpu, i, 0);
- }
- }
-
- cbe_get_and_clear_pm_interrupts(cpu);
- cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
- cbe_enable_pm(cpu);
- }
-
- virt_cntr_inter_mask = interrupt_mask;
- oprofile_running = 1;
- smp_wmb();
-
- /*
- * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
- * executed which manipulates the PMU. We start the "virtual counter"
- * here so that we do not need to synchronize access to the PMU in
- * the above for-loop.
- */
- start_virt_cntrs();
-
- return 0;
-}
-
-static int cell_global_start(struct op_counter_config *ctr)
-{
- if (profiling_mode == SPU_PROFILING_CYCLES)
- return cell_global_start_spu_cycles(ctr);
- else if (profiling_mode == SPU_PROFILING_EVENTS)
- return cell_global_start_spu_events(ctr);
- else
- return cell_global_start_ppu(ctr);
-}
-
-
-/* The SPU interrupt handler
- *
- * SPU event profiling works as follows:
- * The pm_signal[0] holds the one SPU event to be measured. It is routed on
- * the debug bus using word 0 or 1. The value of pm_signal[1] and
- * pm_signal[2] contain the necessary events to route the SPU program
- * counter for the selected SPU onto the debug bus using words 2 and 3.
- * The pm_interval register is setup to write the SPU PC value into the
- * trace buffer at the maximum rate possible. The trace buffer is configured
- * to store the PCs, wrapping when it is full. The performance counter is
- * initialized to the max hardware count minus the number of events, N, between
- * samples. Once the N events have occurred, a HW counter overflow occurs
- * causing the generation of a HW counter interrupt which also stops the
- * writing of the SPU PC values to the trace buffer. Hence the last PC
- * written to the trace buffer is the SPU PC that we want. Unfortunately,
- * we have to read from the beginning of the trace buffer to get to the
- * last value written. We just hope the PPU has nothing better to do then
- * service this interrupt. The PC for the specific SPU being profiled is
- * extracted from the trace buffer processed and stored. The trace buffer
- * is cleared, interrupts are cleared, the counter is reset to max - N.
- * A kernel timer is used to periodically call the routine spu_evnt_swap()
- * to switch to the next physical SPU in the node to profile in round robbin
- * order. This way data is collected for all SPUs on the node. It does mean
- * that we need to use a relatively small value of N to ensure enough samples
- * on each SPU are collected each SPU is being profiled 1/8 of the time.
- * It may also be necessary to use a longer sample collection period.
- */
-static void cell_handle_interrupt_spu(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- u32 cpu, cpu_tmp;
- u64 trace_entry;
- u32 interrupt_mask;
- u64 trace_buffer[2];
- u64 last_trace_buffer;
- u32 sample;
- u32 trace_addr;
- unsigned long sample_array_lock_flags;
- int spu_num;
- unsigned long flags;
-
- /* Make sure spu event interrupt handler and spu event swap
- * don't access the counters simultaneously.
- */
- cpu = smp_processor_id();
- spin_lock_irqsave(&cntr_lock, flags);
-
- cpu_tmp = cpu;
- cbe_disable_pm(cpu);
-
- interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
-
- sample = 0xABCDEF;
- trace_entry = 0xfedcba;
- last_trace_buffer = 0xdeadbeaf;
-
- if ((oprofile_running == 1) && (interrupt_mask != 0)) {
- /* disable writes to trace buff */
- cbe_write_pm(cpu, pm_interval, 0);
-
- /* only have one perf cntr being used, cntr 0 */
- if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
- && ctr[0].enabled)
- /* The SPU PC values will be read
- * from the trace buffer, reset counter
- */
-
- cbe_write_ctr(cpu, 0, reset_value[0]);
-
- trace_addr = cbe_read_pm(cpu, trace_address);
-
- while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
- /* There is data in the trace buffer to process
- * Read the buffer until you get to the last
- * entry. This is the value we want.
- */
-
- cbe_read_trace_buffer(cpu, trace_buffer);
- trace_addr = cbe_read_pm(cpu, trace_address);
- }
-
- /* SPU Address 16 bit count format for 128 bit
- * HW trace buffer is used for the SPU PC storage
- * HDR bits 0:15
- * SPU Addr 0 bits 16:31
- * SPU Addr 1 bits 32:47
- * unused bits 48:127
- *
- * HDR: bit4 = 1 SPU Address 0 valid
- * HDR: bit5 = 1 SPU Address 1 valid
- * - unfortunately, the valid bits don't seem to work
- *
- * Note trace_buffer[0] holds bits 0:63 of the HW
- * trace buffer, trace_buffer[1] holds bits 64:127
- */
-
- trace_entry = trace_buffer[0]
- & 0x00000000FFFF0000;
-
- /* only top 16 of the 18 bit SPU PC address
- * is stored in trace buffer, hence shift right
- * by 16 -2 bits */
- sample = trace_entry >> 14;
- last_trace_buffer = trace_buffer[0];
-
- spu_num = spu_evnt_phys_spu_indx
- + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
-
- /* make sure only one process at a time is calling
- * spu_sync_buffer()
- */
- spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
- sample_array_lock_flags);
- spu_sync_buffer(spu_num, &sample, 1);
- spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
- sample_array_lock_flags);
-
- smp_wmb(); /* insure spu event buffer updates are written
- * don't want events intermingled... */
-
- /* The counters were frozen by the interrupt.
- * Reenable the interrupt and restart the counters.
- */
- cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
- cbe_enable_pm_interrupts(cpu, hdw_thread,
- virt_cntr_inter_mask);
-
- /* clear the trace buffer, re-enable writes to trace buff */
- cbe_write_pm(cpu, trace_address, 0);
- cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
-
- /* The writes to the various performance counters only writes
- * to a latch. The new values (interrupt setting bits, reset
- * counter value etc.) are not copied to the actual registers
- * until the performance monitor is enabled. In order to get
- * this to work as desired, the performance monitor needs to
- * be disabled while writing to the latches. This is a
- * HW design issue.
- */
- write_pm_cntrl(cpu);
- cbe_enable_pm(cpu);
- }
- spin_unlock_irqrestore(&cntr_lock, flags);
-}
-
-static void cell_handle_interrupt_ppu(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- u32 cpu;
- u64 pc;
- int is_kernel;
- unsigned long flags = 0;
- u32 interrupt_mask;
- int i;
-
- cpu = smp_processor_id();
-
- /*
- * Need to make sure the interrupt handler and the virt counter
- * routine are not running at the same time. See the
- * cell_virtual_cntr() routine for additional comments.
- */
- spin_lock_irqsave(&cntr_lock, flags);
-
- /*
- * Need to disable and reenable the performance counters
- * to get the desired behavior from the hardware. This
- * is hardware specific.
- */
-
- cbe_disable_pm(cpu);
-
- interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
-
- /*
- * If the interrupt mask has been cleared, then the virt cntr
- * has cleared the interrupt. When the thread that generated
- * the interrupt is restored, the data count will be restored to
- * 0xffffff0 to cause the interrupt to be regenerated.
- */
-
- if ((oprofile_running == 1) && (interrupt_mask != 0)) {
- pc = regs->nip;
- is_kernel = is_kernel_addr(pc);
-
- for (i = 0; i < num_counters; ++i) {
- if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
- && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- cbe_write_ctr(cpu, i, reset_value[i]);
- }
- }
-
- /*
- * The counters were frozen by the interrupt.
- * Reenable the interrupt and restart the counters.
- * If there was a race between the interrupt handler and
- * the virtual counter routine. The virtual counter
- * routine may have cleared the interrupts. Hence must
- * use the virt_cntr_inter_mask to re-enable the interrupts.
- */
- cbe_enable_pm_interrupts(cpu, hdw_thread,
- virt_cntr_inter_mask);
-
- /*
- * The writes to the various performance counters only writes
- * to a latch. The new values (interrupt setting bits, reset
- * counter value etc.) are not copied to the actual registers
- * until the performance monitor is enabled. In order to get
- * this to work as desired, the performance monitor needs to
- * be disabled while writing to the latches. This is a
- * HW design issue.
- */
- cbe_enable_pm(cpu);
- }
- spin_unlock_irqrestore(&cntr_lock, flags);
-}
-
-static void cell_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- if (profiling_mode == PPU_PROFILING)
- cell_handle_interrupt_ppu(regs, ctr);
- else
- cell_handle_interrupt_spu(regs, ctr);
-}
-
-/*
- * This function is called from the generic OProfile
- * driver. When profiling PPUs, we need to do the
- * generic sync start; otherwise, do spu_sync_start.
- */
-static int cell_sync_start(void)
-{
- if ((profiling_mode == SPU_PROFILING_CYCLES) ||
- (profiling_mode == SPU_PROFILING_EVENTS))
- return spu_sync_start();
- else
- return DO_GENERIC_SYNC;
-}
-
-static int cell_sync_stop(void)
-{
- if ((profiling_mode == SPU_PROFILING_CYCLES) ||
- (profiling_mode == SPU_PROFILING_EVENTS))
- return spu_sync_stop();
- else
- return 1;
-}
-
-struct op_powerpc_model op_model_cell = {
- .reg_setup = cell_reg_setup,
- .cpu_setup = cell_cpu_setup,
- .global_start = cell_global_start,
- .global_stop = cell_global_stop,
- .sync_start = cell_sync_start,
- .sync_stop = cell_sync_stop,
- .handle_interrupt = cell_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c
deleted file mode 100644
index 25dc6813ecee..000000000000
--- a/arch/powerpc/oprofile/op_model_fsl_emb.c
+++ /dev/null
@@ -1,380 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Freescale Embedded oprofile support, based on ppc64 oprofile support
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala <galak@kernel.crashing.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/reg_fsl_emb.h>
-#include <asm/page.h>
-#include <asm/pmc.h>
-#include <asm/oprofile_impl.h>
-
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-static int num_counters;
-static int oprofile_running;
-
-static inline u32 get_pmlca(int ctr)
-{
- u32 pmlca;
-
- switch (ctr) {
- case 0:
- pmlca = mfpmr(PMRN_PMLCA0);
- break;
- case 1:
- pmlca = mfpmr(PMRN_PMLCA1);
- break;
- case 2:
- pmlca = mfpmr(PMRN_PMLCA2);
- break;
- case 3:
- pmlca = mfpmr(PMRN_PMLCA3);
- break;
- case 4:
- pmlca = mfpmr(PMRN_PMLCA4);
- break;
- case 5:
- pmlca = mfpmr(PMRN_PMLCA5);
- break;
- default:
- panic("Bad ctr number\n");
- }
-
- return pmlca;
-}
-
-static inline void set_pmlca(int ctr, u32 pmlca)
-{
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- break;
- case 4:
- mtpmr(PMRN_PMLCA4, pmlca);
- break;
- case 5:
- mtpmr(PMRN_PMLCA5, pmlca);
- break;
- default:
- panic("Bad ctr number\n");
- }
-}
-
-static inline unsigned int ctr_read(unsigned int i)
-{
- switch(i) {
- case 0:
- return mfpmr(PMRN_PMC0);
- case 1:
- return mfpmr(PMRN_PMC1);
- case 2:
- return mfpmr(PMRN_PMC2);
- case 3:
- return mfpmr(PMRN_PMC3);
- case 4:
- return mfpmr(PMRN_PMC4);
- case 5:
- return mfpmr(PMRN_PMC5);
- default:
- return 0;
- }
-}
-
-static inline void ctr_write(unsigned int i, unsigned int val)
-{
- switch(i) {
- case 0:
- mtpmr(PMRN_PMC0, val);
- break;
- case 1:
- mtpmr(PMRN_PMC1, val);
- break;
- case 2:
- mtpmr(PMRN_PMC2, val);
- break;
- case 3:
- mtpmr(PMRN_PMC3, val);
- break;
- case 4:
- mtpmr(PMRN_PMC4, val);
- break;
- case 5:
- mtpmr(PMRN_PMC5, val);
- break;
- default:
- break;
- }
-}
-
-
-static void init_pmc_stop(int ctr)
-{
- u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
- PMLCA_FCM1 | PMLCA_FCM0);
- u32 pmlcb = 0;
-
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- mtpmr(PMRN_PMLCB0, pmlcb);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- mtpmr(PMRN_PMLCB1, pmlcb);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- mtpmr(PMRN_PMLCB2, pmlcb);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- mtpmr(PMRN_PMLCB3, pmlcb);
- break;
- case 4:
- mtpmr(PMRN_PMLCA4, pmlca);
- mtpmr(PMRN_PMLCB4, pmlcb);
- break;
- case 5:
- mtpmr(PMRN_PMLCA5, pmlca);
- mtpmr(PMRN_PMLCB5, pmlcb);
- break;
- default:
- panic("Bad ctr number!\n");
- }
-}
-
-static void set_pmc_event(int ctr, int event)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
- ((event << PMLCA_EVENT_SHIFT) &
- PMLCA_EVENT_MASK);
-
- set_pmlca(ctr, pmlca);
-}
-
-static void set_pmc_user_kernel(int ctr, int user, int kernel)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- if(user)
- pmlca &= ~PMLCA_FCU;
- else
- pmlca |= PMLCA_FCU;
-
- if(kernel)
- pmlca &= ~PMLCA_FCS;
- else
- pmlca |= PMLCA_FCS;
-
- set_pmlca(ctr, pmlca);
-}
-
-static void set_pmc_marked(int ctr, int mark0, int mark1)
-{
- u32 pmlca = get_pmlca(ctr);
-
- if(mark0)
- pmlca &= ~PMLCA_FCM0;
- else
- pmlca |= PMLCA_FCM0;
-
- if(mark1)
- pmlca &= ~PMLCA_FCM1;
- else
- pmlca |= PMLCA_FCM1;
-
- set_pmlca(ctr, pmlca);
-}
-
-static void pmc_start_ctr(int ctr, int enable)
-{
- u32 pmlca = get_pmlca(ctr);
-
- pmlca &= ~PMLCA_FC;
-
- if (enable)
- pmlca |= PMLCA_CE;
- else
- pmlca &= ~PMLCA_CE;
-
- set_pmlca(ctr, pmlca);
-}
-
-static void pmc_start_ctrs(int enable)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 &= ~PMGC0_FAC;
- pmgc0 |= PMGC0_FCECE;
-
- if (enable)
- pmgc0 |= PMGC0_PMIE;
- else
- pmgc0 &= ~PMGC0_PMIE;
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-static void pmc_stop_ctrs(void)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 |= PMGC0_FAC;
-
- pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
-{
- int i;
-
- /* freeze all counters */
- pmc_stop_ctrs();
-
- for (i = 0;i < num_counters;i++) {
- init_pmc_stop(i);
-
- set_pmc_event(i, ctr[i].event);
-
- set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
- }
-
- return 0;
-}
-
-static int fsl_emb_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int i;
-
- num_counters = num_ctrs;
-
- /* Our counters count up, and "count" refers to
- * how much before the next interrupt, and we interrupt
- * on overflow. So we calculate the starting value
- * which will give us "count" until overflow.
- * Then we set the events on the enabled counters */
- for (i = 0; i < num_counters; ++i)
- reset_value[i] = 0x80000000UL - ctr[i].count;
-
- return 0;
-}
-
-static int fsl_emb_start(struct op_counter_config *ctr)
-{
- int i;
-
- mtmsr(mfmsr() | MSR_PMM);
-
- for (i = 0; i < num_counters; ++i) {
- if (ctr[i].enabled) {
- ctr_write(i, reset_value[i]);
- /* Set each enabled counter to only
- * count when the Mark bit is *not* set */
- set_pmc_marked(i, 1, 0);
- pmc_start_ctr(i, 1);
- } else {
- ctr_write(i, 0);
-
- /* Set the ctr to be stopped */
- pmc_start_ctr(i, 0);
- }
- }
-
- /* Clear the freeze bit, and enable the interrupt.
- * The counters won't actually start until the rfi clears
- * the PMM bit */
- pmc_start_ctrs(1);
-
- oprofile_running = 1;
-
- pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
- mfpmr(PMRN_PMGC0));
-
- return 0;
-}
-
-static void fsl_emb_stop(void)
-{
- /* freeze counters */
- pmc_stop_ctrs();
-
- oprofile_running = 0;
-
- pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
- mfpmr(PMRN_PMGC0));
-
- mb();
-}
-
-
-static void fsl_emb_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc;
- int is_kernel;
- int val;
- int i;
-
- pc = regs->nip;
- is_kernel = is_kernel_addr(pc);
-
- for (i = 0; i < num_counters; ++i) {
- val = ctr_read(i);
- if (val < 0) {
- if (oprofile_running && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- ctr_write(i, reset_value[i]);
- } else {
- ctr_write(i, 0);
- }
- }
- }
-
- /* The freeze bit was set by the interrupt. */
- /* Clear the freeze bit, and reenable the interrupt. The
- * counters won't actually start until the rfi clears the PMM
- * bit. The PMM bit should not be set until after the interrupt
- * is cleared to avoid it getting lost in some hypervisor
- * environments.
- */
- mtmsr(mfmsr() | MSR_PMM);
- pmc_start_ctrs(1);
-}
-
-struct op_powerpc_model op_model_fsl_emb = {
- .reg_setup = fsl_emb_reg_setup,
- .cpu_setup = fsl_emb_cpu_setup,
- .start = fsl_emb_start,
- .stop = fsl_emb_stop,
- .handle_interrupt = fsl_emb_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_pa6t.c b/arch/powerpc/oprofile/op_model_pa6t.c
deleted file mode 100644
index d23061cf76bc..000000000000
--- a/arch/powerpc/oprofile/op_model_pa6t.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2006-2007 PA Semi, Inc
- *
- * Author: Shashi Rao, PA Semi
- *
- * Maintained by: Olof Johansson <olof@lixom.net>
- *
- * Based on arch/powerpc/oprofile/op_model_power4.c
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <linux/percpu.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/oprofile_impl.h>
-#include <asm/reg.h>
-
-static unsigned char oprofile_running;
-
-/* mmcr values are set in pa6t_reg_setup, used in pa6t_cpu_setup */
-static u64 mmcr0_val;
-static u64 mmcr1_val;
-
-/* inited in pa6t_reg_setup */
-static u64 reset_value[OP_MAX_COUNTER];
-
-static inline u64 ctr_read(unsigned int i)
-{
- switch (i) {
- case 0:
- return mfspr(SPRN_PA6T_PMC0);
- case 1:
- return mfspr(SPRN_PA6T_PMC1);
- case 2:
- return mfspr(SPRN_PA6T_PMC2);
- case 3:
- return mfspr(SPRN_PA6T_PMC3);
- case 4:
- return mfspr(SPRN_PA6T_PMC4);
- case 5:
- return mfspr(SPRN_PA6T_PMC5);
- default:
- printk(KERN_ERR "ctr_read called with bad arg %u\n", i);
- return 0;
- }
-}
-
-static inline void ctr_write(unsigned int i, u64 val)
-{
- switch (i) {
- case 0:
- mtspr(SPRN_PA6T_PMC0, val);
- break;
- case 1:
- mtspr(SPRN_PA6T_PMC1, val);
- break;
- case 2:
- mtspr(SPRN_PA6T_PMC2, val);
- break;
- case 3:
- mtspr(SPRN_PA6T_PMC3, val);
- break;
- case 4:
- mtspr(SPRN_PA6T_PMC4, val);
- break;
- case 5:
- mtspr(SPRN_PA6T_PMC5, val);
- break;
- default:
- printk(KERN_ERR "ctr_write called with bad arg %u\n", i);
- break;
- }
-}
-
-
-/* precompute the values to stuff in the hardware registers */
-static int pa6t_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int pmc;
-
- /*
- * adjust the mmcr0.en[0-5] and mmcr0.inten[0-5] values obtained from the
- * event_mappings file by turning off the counters that the user doesn't
- * care about
- *
- * setup user and kernel profiling
- */
- for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++)
- if (!ctr[pmc].enabled) {
- sys->mmcr0 &= ~(0x1UL << pmc);
- sys->mmcr0 &= ~(0x1UL << (pmc+12));
- pr_debug("turned off counter %u\n", pmc);
- }
-
- if (sys->enable_kernel)
- sys->mmcr0 |= PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN;
- else
- sys->mmcr0 &= ~(PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN);
-
- if (sys->enable_user)
- sys->mmcr0 |= PA6T_MMCR0_PREN;
- else
- sys->mmcr0 &= ~PA6T_MMCR0_PREN;
-
- /*
- * The performance counter event settings are given in the mmcr0 and
- * mmcr1 values passed from the user in the op_system_config
- * structure (sys variable).
- */
- mmcr0_val = sys->mmcr0;
- mmcr1_val = sys->mmcr1;
- pr_debug("mmcr0_val inited to %016lx\n", sys->mmcr0);
- pr_debug("mmcr1_val inited to %016lx\n", sys->mmcr1);
-
- for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) {
- /* counters are 40 bit. Move to cputable at some point? */
- reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count;
- pr_debug("reset_value for pmc%u inited to 0x%llx\n",
- pmc, reset_value[pmc]);
- }
-
- return 0;
-}
-
-/* configure registers on this cpu */
-static int pa6t_cpu_setup(struct op_counter_config *ctr)
-{
- u64 mmcr0 = mmcr0_val;
- u64 mmcr1 = mmcr1_val;
-
- /* Default is all PMCs off */
- mmcr0 &= ~(0x3FUL);
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-
- /* program selected programmable events in */
- mtspr(SPRN_PA6T_MMCR1, mmcr1);
-
- pr_debug("setup on cpu %d, mmcr0 %016lx\n", smp_processor_id(),
- mfspr(SPRN_PA6T_MMCR0));
- pr_debug("setup on cpu %d, mmcr1 %016lx\n", smp_processor_id(),
- mfspr(SPRN_PA6T_MMCR1));
-
- return 0;
-}
-
-static int pa6t_start(struct op_counter_config *ctr)
-{
- int i;
-
- /* Hold off event counting until rfid */
- u64 mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
- if (ctr[i].enabled)
- ctr_write(i, reset_value[i]);
- else
- ctr_write(i, 0UL);
-
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-
- oprofile_running = 1;
-
- pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
-
- return 0;
-}
-
-static void pa6t_stop(void)
-{
- u64 mmcr0;
-
- /* freeze counters */
- mmcr0 = mfspr(SPRN_PA6T_MMCR0);
- mmcr0 |= PA6T_MMCR0_FCM0;
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-
- oprofile_running = 0;
-
- pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
-}
-
-/* handle the perfmon overflow vector */
-static void pa6t_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc = mfspr(SPRN_PA6T_SIAR);
- int is_kernel = is_kernel_addr(pc);
- u64 val;
- int i;
- u64 mmcr0;
-
- /* disable perfmon counting until rfid */
- mmcr0 = mfspr(SPRN_PA6T_MMCR0);
- mtspr(SPRN_PA6T_MMCR0, mmcr0 | PA6T_MMCR0_HANDDIS);
-
- /* Record samples. We've got one global bit for whether a sample
- * was taken, so add it for any counter that triggered overflow.
- */
- for (i = 0; i < cur_cpu_spec->num_pmcs; i++) {
- val = ctr_read(i);
- if (val & (0x1UL << 39)) { /* Overflow bit set */
- if (oprofile_running && ctr[i].enabled) {
- if (mmcr0 & PA6T_MMCR0_SIARLOG)
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- ctr_write(i, reset_value[i]);
- } else {
- ctr_write(i, 0UL);
- }
- }
- }
-
- /* Restore mmcr0 to a good known value since the PMI changes it */
- mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-}
-
-struct op_powerpc_model op_model_pa6t = {
- .reg_setup = pa6t_reg_setup,
- .cpu_setup = pa6t_cpu_setup,
- .start = pa6t_start,
- .stop = pa6t_stop,
- .handle_interrupt = pa6t_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
deleted file mode 100644
index 2ae6b86ff97b..000000000000
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ /dev/null
@@ -1,438 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- * Added mmcra[slot] support:
- * Copyright (C) 2006-2007 Will Schmidt <willschm@us.ibm.com>, IBM
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/firmware.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/rtas.h>
-#include <asm/oprofile_impl.h>
-#include <asm/reg.h>
-
-#define dbg(args...)
-#define OPROFILE_PM_PMCSEL_MSK 0xffULL
-#define OPROFILE_PM_UNIT_SHIFT 60
-#define OPROFILE_PM_UNIT_MSK 0xfULL
-#define OPROFILE_MAX_PMC_NUM 3
-#define OPROFILE_PMSEL_FIELD_WIDTH 8
-#define OPROFILE_UNIT_FIELD_WIDTH 4
-#define MMCRA_SIAR_VALID_MASK 0x10000000ULL
-
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-static int oprofile_running;
-static int use_slot_nums;
-
-/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
-static u32 mmcr0_val;
-static u64 mmcr1_val;
-static u64 mmcra_val;
-static u32 cntr_marked_events;
-
-static int power7_marked_instr_event(u64 mmcr1)
-{
- u64 psel, unit;
- int pmc, cntr_marked_events = 0;
-
- /* Given the MMCR1 value, look at the field for each counter to
- * determine if it is a marked event. Code based on the function
- * power7_marked_instr_event() in file arch/powerpc/perf/power7-pmu.c.
- */
- for (pmc = 0; pmc < 4; pmc++) {
- psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK
- << (OPROFILE_MAX_PMC_NUM - pmc)
- * OPROFILE_PMSEL_FIELD_WIDTH);
- psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
- * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;
- unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
- << (OPROFILE_PM_UNIT_SHIFT
- - (pmc * OPROFILE_PMSEL_FIELD_WIDTH )));
- unit = unit >> (OPROFILE_PM_UNIT_SHIFT
- - (pmc * OPROFILE_PMSEL_FIELD_WIDTH));
-
- switch (psel >> 4) {
- case 2:
- cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc;
- break;
- case 3:
- if (psel == 0x3c) {
- cntr_marked_events |= (pmc == 0) << pmc;
- break;
- }
-
- if (psel == 0x3e) {
- cntr_marked_events |= (pmc != 1) << pmc;
- break;
- }
-
- cntr_marked_events |= 1 << pmc;
- break;
- case 4:
- case 5:
- cntr_marked_events |= (unit == 0xd) << pmc;
- break;
- case 6:
- if (psel == 0x64)
- cntr_marked_events |= (pmc >= 2) << pmc;
- break;
- case 8:
- cntr_marked_events |= (unit == 0xd) << pmc;
- break;
- }
- }
- return cntr_marked_events;
-}
-
-static int power4_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int i;
-
- /*
- * The performance counter event settings are given in the mmcr0,
- * mmcr1 and mmcra values passed from the user in the
- * op_system_config structure (sys variable).
- */
- mmcr0_val = sys->mmcr0;
- mmcr1_val = sys->mmcr1;
- mmcra_val = sys->mmcra;
-
- /* Power 7+ and newer architectures:
- * Determine which counter events in the group (the group of events is
- * specified by the bit settings in the MMCR1 register) are marked
- * events for use in the interrupt handler. Do the calculation once
- * before OProfile starts. Information is used in the interrupt
- * handler. Starting with Power 7+ we only record the sample for
- * marked events if the SIAR valid bit is set. For non marked events
- * the sample is always recorded.
- */
- if (pvr_version_is(PVR_POWER7p))
- cntr_marked_events = power7_marked_instr_event(mmcr1_val);
- else
- cntr_marked_events = 0; /* For older processors, set the bit map
- * to zero so the sample will always be
- * be recorded.
- */
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
- reset_value[i] = 0x80000000UL - ctr[i].count;
-
- /* setup user and kernel profiling */
- if (sys->enable_kernel)
- mmcr0_val &= ~MMCR0_KERNEL_DISABLE;
- else
- mmcr0_val |= MMCR0_KERNEL_DISABLE;
-
- if (sys->enable_user)
- mmcr0_val &= ~MMCR0_PROBLEM_DISABLE;
- else
- mmcr0_val |= MMCR0_PROBLEM_DISABLE;
-
- if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
- pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
- pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX) ||
- pvr_version_is(PVR_POWER5) || pvr_version_is(PVR_POWER5p))
- use_slot_nums = 1;
-
- return 0;
-}
-
-extern void ppc_enable_pmcs(void);
-
-/*
- * Older CPUs require the MMCRA sample bit to be always set, but newer
- * CPUs only want it set for some groups. Eventually we will remove all
- * knowledge of this bit in the kernel, oprofile userspace should be
- * setting it when required.
- *
- * In order to keep current installations working we force the bit for
- * those older CPUs. Once everyone has updated their oprofile userspace we
- * can remove this hack.
- */
-static inline int mmcra_must_set_sample(void)
-{
- if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
- pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
- pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX))
- return 1;
-
- return 0;
-}
-
-static int power4_cpu_setup(struct op_counter_config *ctr)
-{
- unsigned int mmcr0 = mmcr0_val;
- unsigned long mmcra = mmcra_val;
-
- ppc_enable_pmcs();
-
- /* set the freeze bit */
- mmcr0 |= MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-
- mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
- mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE;
- mtspr(SPRN_MMCR0, mmcr0);
-
- mtspr(SPRN_MMCR1, mmcr1_val);
-
- if (mmcra_must_set_sample())
- mmcra |= MMCRA_SAMPLE_ENABLE;
- mtspr(SPRN_MMCRA, mmcra);
-
- dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
- mfspr(SPRN_MMCR0));
- dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
- mfspr(SPRN_MMCR1));
- dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(),
- mfspr(SPRN_MMCRA));
-
- return 0;
-}
-
-static int power4_start(struct op_counter_config *ctr)
-{
- int i;
- unsigned int mmcr0;
-
- /* set the PMM bit (see comment below) */
- mtmsr(mfmsr() | MSR_PMM);
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
- if (ctr[i].enabled) {
- classic_ctr_write(i, reset_value[i]);
- } else {
- classic_ctr_write(i, 0);
- }
- }
-
- mmcr0 = mfspr(SPRN_MMCR0);
-
- /*
- * We must clear the PMAO bit on some (GQ) chips. Just do it
- * all the time
- */
- mmcr0 &= ~MMCR0_PMAO;
-
- /*
- * now clear the freeze bit, counting will not start until we
- * rfid from this excetion, because only at that point will
- * the PMM bit be cleared
- */
- mmcr0 &= ~MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-
- oprofile_running = 1;
-
- dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
- return 0;
-}
-
-static void power4_stop(void)
-{
- unsigned int mmcr0;
-
- /* freeze counters */
- mmcr0 = mfspr(SPRN_MMCR0);
- mmcr0 |= MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-
- oprofile_running = 0;
-
- dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
-
- mb();
-}
-
-/* Fake functions used by canonicalize_pc */
-static void __used hypervisor_bucket(void)
-{
-}
-
-static void __used rtas_bucket(void)
-{
-}
-
-static void __used kernel_unknown_bucket(void)
-{
-}
-
-/*
- * On GQ and newer the MMCRA stores the HV and PR bits at the time
- * the SIAR was sampled. We use that to work out if the SIAR was sampled in
- * the hypervisor, our exception vectors or RTAS.
- * If the MMCRA_SAMPLE_ENABLE bit is set, we can use the MMCRA[slot] bits
- * to more accurately identify the address of the sampled instruction. The
- * mmcra[slot] bits represent the slot number of a sampled instruction
- * within an instruction group. The slot will contain a value between 1
- * and 5 if MMCRA_SAMPLE_ENABLE is set, otherwise 0.
- */
-static unsigned long get_pc(struct pt_regs *regs)
-{
- unsigned long pc = mfspr(SPRN_SIAR);
- unsigned long mmcra;
- unsigned long slot;
-
- /* Can't do much about it */
- if (!cur_cpu_spec->oprofile_mmcra_sihv)
- return pc;
-
- mmcra = mfspr(SPRN_MMCRA);
-
- if (use_slot_nums && (mmcra & MMCRA_SAMPLE_ENABLE)) {
- slot = ((mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT);
- if (slot > 1)
- pc += 4 * (slot - 1);
- }
-
- /* Were we in the hypervisor? */
- if (firmware_has_feature(FW_FEATURE_LPAR) &&
- (mmcra & cur_cpu_spec->oprofile_mmcra_sihv))
- /* function descriptor madness */
- return *((unsigned long *)hypervisor_bucket);
-
- /* We were in userspace, nothing to do */
- if (mmcra & cur_cpu_spec->oprofile_mmcra_sipr)
- return pc;
-
-#ifdef CONFIG_PPC_RTAS
- /* Were we in RTAS? */
- if (pc >= rtas.base && pc < (rtas.base + rtas.size))
- /* function descriptor madness */
- return *((unsigned long *)rtas_bucket);
-#endif
-
- /* Were we in our exception vectors or SLB real mode miss handler? */
- if (pc < 0x1000000UL)
- return (unsigned long)__va(pc);
-
- /* Not sure where we were */
- if (!is_kernel_addr(pc))
- /* function descriptor madness */
- return *((unsigned long *)kernel_unknown_bucket);
-
- return pc;
-}
-
-static int get_kernel(unsigned long pc, unsigned long mmcra)
-{
- int is_kernel;
-
- if (!cur_cpu_spec->oprofile_mmcra_sihv) {
- is_kernel = is_kernel_addr(pc);
- } else {
- is_kernel = ((mmcra & cur_cpu_spec->oprofile_mmcra_sipr) == 0);
- }
-
- return is_kernel;
-}
-
-static bool pmc_overflow(unsigned long val)
-{
- if ((int)val < 0)
- return true;
-
- /*
- * Events on POWER7 can roll back if a speculative event doesn't
- * eventually complete. Unfortunately in some rare cases they will
- * raise a performance monitor exception. We need to catch this to
- * ensure we reset the PMC. In all cases the PMC will be 256 or less
- * cycles from overflow.
- *
- * We only do this if the first pass fails to find any overflowing
- * PMCs because a user might set a period of less than 256 and we
- * don't want to mistakenly reset them.
- */
- if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
- return true;
-
- return false;
-}
-
-static void power4_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc;
- int is_kernel;
- int val;
- int i;
- unsigned int mmcr0;
- unsigned long mmcra;
- bool siar_valid = false;
-
- mmcra = mfspr(SPRN_MMCRA);
-
- pc = get_pc(regs);
- is_kernel = get_kernel(pc, mmcra);
-
- /* set the PMM bit (see comment below) */
- mtmsr(mfmsr() | MSR_PMM);
-
- /* Check that the SIAR valid bit in MMCRA is set to 1. */
- if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
- siar_valid = true;
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
- val = classic_ctr_read(i);
- if (pmc_overflow(val)) {
- if (oprofile_running && ctr[i].enabled) {
- /* Power 7+ and newer architectures:
- * If the event is a marked event, then only
- * save the sample if the SIAR valid bit is
- * set. If the event is not marked, then
- * always save the sample.
- * Note, the Sample enable bit in the MMCRA
- * register must be set to 1 if the group
- * contains a marked event.
- */
- if ((siar_valid &&
- (cntr_marked_events & (1 << i)))
- || !(cntr_marked_events & (1 << i)))
- oprofile_add_ext_sample(pc, regs, i,
- is_kernel);
-
- classic_ctr_write(i, reset_value[i]);
- } else {
- classic_ctr_write(i, 0);
- }
- }
- }
-
- mmcr0 = mfspr(SPRN_MMCR0);
-
- /* reset the perfmon trigger */
- mmcr0 |= MMCR0_PMXE;
-
- /*
- * We must clear the PMAO bit on some (GQ) chips. Just do it
- * all the time
- */
- mmcr0 &= ~MMCR0_PMAO;
-
- /* Clear the appropriate bits in the MMCRA */
- mmcra &= ~cur_cpu_spec->oprofile_mmcra_clear;
- mtspr(SPRN_MMCRA, mmcra);
-
- /*
- * now clear the freeze bit, counting will not start until we
- * rfid from this exception, because only at that point will
- * the PMM bit be cleared
- */
- mmcr0 &= ~MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-}
-
-struct op_powerpc_model op_model_power4 = {
- .reg_setup = power4_reg_setup,
- .cpu_setup = power4_cpu_setup,
- .start = power4_start,
- .stop = power4_stop,
- .handle_interrupt = power4_handle_interrupt,
-};
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index f2ff359041ee..e7c976bcadff 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -100,8 +100,3 @@ config CBE_CPUFREQ_SPU_GOVERNOR
the minimal possible frequency.
endmenu
-
-config OPROFILE_CELL
- def_bool y
- depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y) && SPU_BASE
-
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 10064a33ca96..7ea6692f67e2 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -19,7 +19,6 @@ spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o
spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
- spu_notify.o \
spu_syscalls.o \
$(spu-priv1-y) \
$(spu-manage-y) \
diff --git a/arch/powerpc/platforms/cell/spu_notify.c b/arch/powerpc/platforms/cell/spu_notify.c
deleted file mode 100644
index 67870abf3715..000000000000
--- a/arch/powerpc/platforms/cell/spu_notify.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Move OProfile dependencies from spufs module to the kernel so it
- * can run on non-cell PPC.
- *
- * Copyright (C) IBM 2005
- */
-
-#undef DEBUG
-
-#include <linux/export.h>
-#include <linux/notifier.h>
-#include <asm/spu.h>
-#include "spufs/spufs.h"
-
-static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
-
-void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
-{
- blocking_notifier_call_chain(&spu_switch_notifier,
- ctx ? ctx->object_id : 0, spu);
-}
-EXPORT_SYMBOL_GPL(spu_switch_notify);
-
-int spu_switch_event_register(struct notifier_block *n)
-{
- int ret;
- ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
- if (!ret)
- notify_spus_active();
- return ret;
-}
-EXPORT_SYMBOL_GPL(spu_switch_event_register);
-
-int spu_switch_event_unregister(struct notifier_block *n)
-{
- return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
-}
-EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
-
-void spu_set_profile_private_kref(struct spu_context *ctx,
- struct kref *prof_info_kref,
- void (* prof_info_release) (struct kref *kref))
-{
- ctx->prof_priv_kref = prof_info_kref;
- ctx->prof_priv_release = prof_info_release;
-}
-EXPORT_SYMBOL_GPL(spu_set_profile_private_kref);
-
-void *spu_get_profile_private_kref(struct spu_context *ctx)
-{
- return ctx->prof_priv_kref;
-}
-EXPORT_SYMBOL_GPL(spu_get_profile_private_kref);
-
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 3f2380f40f99..ce52b87496d2 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -353,7 +353,6 @@ static int spu_process_callback(struct spu_context *ctx)
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
{
int ret;
- struct spu *spu;
u32 status;
if (mutex_lock_interruptible(&ctx->run_mutex))
@@ -386,13 +385,10 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
mutex_lock(&ctx->state_mutex);
break;
}
- spu = ctx->spu;
if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
&ctx->sched_flags))) {
- if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
- spu_switch_notify(spu, ctx);
+ if (!(status & SPU_STATUS_STOPPED_BY_STOP))
continue;
- }
}
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index f18d5067cd0f..9d06fffb1526 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -181,9 +181,6 @@ void do_notify_spus_active(void)
/*
* Wake up the active spu_contexts.
- *
- * When the awakened processes see their "notify_active" flag is set,
- * they will call spu_switch_notify().
*/
for_each_online_node(node) {
struct spu *spu;
@@ -239,7 +236,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
spu_restore(&ctx->csa, spu);
spu->timestamp = jiffies;
- spu_switch_notify(spu, ctx);
ctx->state = SPU_STATE_RUNNABLE;
spuctx_switch_state(ctx, SPU_UTIL_USER);
@@ -440,7 +436,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
*/
atomic_dec_if_positive(&ctx->gang->aff_sched_count);
- spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 1ba4d884febf..afc1d6604d12 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -281,7 +281,6 @@ void spu_del_from_rq(struct spu_context *ctx);
int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
-void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
u32 type, u32 val);
void spu_set_timeslice(struct spu_context *ctx);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 73483614b66b..41d6498dcbaa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -174,7 +174,6 @@ config S390
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_NOP_MCOUNT
- select HAVE_OPROFILE
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 8db267d2a543..e443ed9947bd 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -134,9 +134,6 @@ core-y += arch/s390/
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
-# must be linked after kernel
-drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/
-
boot := arch/s390/boot
syscalls := arch/s390/kernel/syscalls
tools := arch/s390/tools
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index c4f6ff98a612..8b94347705e5 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -57,7 +57,6 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 51135893cffe..9db1232e09f4 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -55,7 +55,6 @@ CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
# CONFIG_GCC_PLUGINS is not set
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
deleted file mode 100644
index 36261f9d360b..000000000000
--- a/arch/s390/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
deleted file mode 100644
index 7441857df51b..000000000000
--- a/arch/s390/oprofile/init.c
+++ /dev/null
@@ -1,37 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * S390 Version
- * Copyright IBM Corp. 2002, 2011
- * Author(s): Thomas Spatzier (tspat@de.ibm.com)
- * Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com)
- * Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com)
- * Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
- *
- * @remark Copyright 2002-2011 OProfile authors
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <asm/processor.h>
-#include <asm/unwind.h>
-
-static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
-{
- struct unwind_state state;
-
- unwind_for_each_frame(&state, current, regs, 0) {
- if (depth-- == 0)
- break;
- oprofile_add_trace(state.ip);
- }
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = s390_backtrace;
- return 0;
-}
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 52646f52f130..7ac847ca6356 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -50,7 +50,6 @@ config SUPERH
select HAVE_MIXED_BREAKPOINTS_REGS
select HAVE_MOD_ARCH_SPECIFIC if DWARF_UNWINDER
select HAVE_NMI
- select HAVE_OPROFILE
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 2faebfd72eca..3bcbf52fb30e 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -170,7 +170,6 @@ cpuincdir-$(CONFIG_CPU_SH4) += cpu-sh4
cpuincdir-y += cpu-common # Must be last
drivers-y += arch/sh/drivers/
-drivers-$(CONFIG_OPROFILE) += arch/sh/oprofile/
cflags-y += $(foreach d, $(cpuincdir-y), -I $(srctree)/arch/sh/include/$(d)) \
$(foreach d, $(machdir-y), -I $(srctree)/arch/sh/include/$(d))
diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig
index 9a988c347e9d..2804cb760a76 100644
--- a/arch/sh/configs/espt_defconfig
+++ b/arch/sh/configs/espt_defconfig
@@ -7,7 +7,6 @@ CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7763=y
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index a24cf8cd2cea..4859cd30cfc4 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -6,7 +6,6 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7722=y
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index e922659fdadb..f823cc6b18f9 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -7,7 +7,6 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_EPOLL is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
index 5978866358ec..f96bc20d4b1a 100644
--- a/arch/sh/configs/r7785rp_defconfig
+++ b/arch/sh/configs/r7785rp_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/sh/configs/rsk7201_defconfig b/arch/sh/configs/rsk7201_defconfig
index 841809b5c2dc..e41526120be1 100644
--- a/arch/sh/configs/rsk7201_defconfig
+++ b/arch/sh/configs/rsk7201_defconfig
@@ -12,7 +12,6 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_AIO is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7201=y
diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig
index 0055031664ad..6af08fa1ddf8 100644
--- a/arch/sh/configs/rsk7203_defconfig
+++ b/arch/sh/configs/rsk7203_defconfig
@@ -13,7 +13,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7203=y
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index fc9c22152b08..96263a4912b7 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -3,7 +3,6 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7751R=y
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index ff3fd6787fd6..92e586e6c974 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -3,7 +3,6 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7751R=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 4a44cac640bc..f776a1d0d277 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -35,7 +35,6 @@ CONFIG_RD_LZO=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index ff5bb4489922..315b04a8dd2f 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -23,7 +23,6 @@ CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 48b457d59e79..ff502683132e 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig
index 9adee9010319..2c46c0004780 100644
--- a/arch/sh/configs/sh7724_generic_defconfig
+++ b/arch/sh/configs/sh7724_generic_defconfig
@@ -4,7 +4,6 @@ CONFIG_CGROUPS=y
# CONFIG_UID16 is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7724=y
CONFIG_NO_HZ=y
diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig
index 26c5fd02c87a..8a6a446f9eb8 100644
--- a/arch/sh/configs/sh7763rdp_defconfig
+++ b/arch/sh/configs/sh7763rdp_defconfig
@@ -7,7 +7,6 @@ CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7763=y
diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig
index c17590f0df67..88193153e51b 100644
--- a/arch/sh/configs/sh7770_generic_defconfig
+++ b/arch/sh/configs/sh7770_generic_defconfig
@@ -4,7 +4,6 @@ CONFIG_CGROUPS=y
# CONFIG_UID16 is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7770=y
CONFIG_SH_PCLK_FREQ=41666666
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index dc2be2514b62..32ec6eb1eabc 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -22,7 +22,6 @@ CONFIG_PID_NS=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/sh/oprofile/Makefile b/arch/sh/oprofile/Makefile
deleted file mode 100644
index d478dd8dac0b..000000000000
--- a/arch/sh/oprofile/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-CFLAGS_common.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-ifeq ($(CONFIG_HW_PERF_EVENTS),y)
-DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
-endif
-
-oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
diff --git a/arch/sh/oprofile/backtrace.c b/arch/sh/oprofile/backtrace.c
deleted file mode 100644
index cc16cf86cd92..000000000000
--- a/arch/sh/oprofile/backtrace.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SH specific backtracing code for oprofile
- *
- * Copyright 2007 STMicroelectronics Ltd.
- *
- * Author: Dave Peverley <dpeverley@mpc-data.co.uk>
- *
- * Based on ARM oprofile backtrace code by Richard Purdie and in turn, i386
- * oprofile backtrace code by John Levon, David Smith
- */
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/kallsyms.h>
-#include <linux/mm.h>
-#include <asm/unwinder.h>
-#include <asm/ptrace.h>
-#include <linux/uaccess.h>
-#include <asm/sections.h>
-#include <asm/stacktrace.h>
-
-static void backtrace_address(void *data, unsigned long addr, int reliable)
-{
- unsigned int *depth = data;
-
- if ((*depth)--)
- oprofile_add_trace(addr);
-}
-
-static struct stacktrace_ops backtrace_ops = {
- .address = backtrace_address,
-};
-
-/* Limit to stop backtracing too far. */
-static int backtrace_limit = 20;
-
-static unsigned long *
-user_backtrace(unsigned long *stackaddr, struct pt_regs *regs)
-{
- unsigned long buf_stack;
-
- /* Also check accessibility of address */
- if (!access_ok(stackaddr, sizeof(unsigned long)))
- return NULL;
-
- if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long)))
- return NULL;
-
- /* Quick paranoia check */
- if (buf_stack & 3)
- return NULL;
-
- oprofile_add_trace(buf_stack);
-
- stackaddr++;
-
- return stackaddr;
-}
-
-void sh_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- unsigned long *stackaddr;
-
- /*
- * Paranoia - clip max depth as we could get lost in the weeds.
- */
- if (depth > backtrace_limit)
- depth = backtrace_limit;
-
- stackaddr = (unsigned long *)kernel_stack_pointer(regs);
- if (!user_mode(regs)) {
- if (depth)
- unwind_stack(NULL, regs, stackaddr,
- &backtrace_ops, &depth);
- return;
- }
-
- while (depth-- && (stackaddr != NULL))
- stackaddr = user_backtrace(stackaddr, regs);
-}
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
deleted file mode 100644
index e4dd5d5a1115..000000000000
--- a/arch/sh/oprofile/common.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * arch/sh/oprofile/init.c
- *
- * Copyright (C) 2003 - 2010 Paul Mundt
- *
- * Based on arch/mips/oprofile/common.c:
- *
- * Copyright (C) 2004, 2005 Ralf Baechle
- * Copyright (C) 2005 MIPS Technologies, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/smp.h>
-#include <linux/perf_event.h>
-#include <linux/slab.h>
-#include <asm/processor.h>
-
-extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-#ifdef CONFIG_HW_PERF_EVENTS
-/*
- * This will need to be reworked when multiple PMUs are supported.
- */
-static char *sh_pmu_op_name;
-
-char *op_name_from_perf_id(void)
-{
- return sh_pmu_op_name;
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = sh_backtrace;
-
- if (perf_num_counters() == 0)
- return -ENODEV;
-
- sh_pmu_op_name = kasprintf(GFP_KERNEL, "%s/%s",
- UTS_MACHINE, perf_pmu_name());
- if (unlikely(!sh_pmu_op_name))
- return -ENOMEM;
-
- return oprofile_perf_init(ops);
-}
-
-void oprofile_arch_exit(void)
-{
- oprofile_perf_exit();
- kfree(sh_pmu_op_name);
-}
-#else
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = sh_backtrace;
- return -ENODEV;
-}
-void oprofile_arch_exit(void) {}
-#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 1a2b5649d267..2c1cee9eed73 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -20,7 +20,6 @@ config SPARC
select OF_PROMTREE
select HAVE_ASM_MODVERSIONS
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_ARCH_KGDB if !SMP || SPARC64
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_SECCOMP if SPARC64
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 4a0919581697..bee99e65fe23 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -65,7 +65,6 @@ libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/
drivers-$(CONFIG_PM) += arch/sparc/power/
-drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
boot := arch/sparc/boot
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index bde4d21a8ac8..d91eb6a76dd1 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -8,7 +8,6 @@ CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/sparc/oprofile/Makefile b/arch/sparc/oprofile/Makefile
deleted file mode 100644
index fe906e403d3a..000000000000
--- a/arch/sparc/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
deleted file mode 100644
index 43730c9b1c86..000000000000
--- a/arch/sparc/oprofile/init.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/param.h> /* for HZ */
-
-#ifdef CONFIG_SPARC64
-#include <linux/notifier.h>
-#include <linux/rcupdate.h>
-#include <linux/kdebug.h>
-#include <asm/nmi.h>
-
-static int profile_timer_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- struct die_args *args = data;
- int ret = NOTIFY_DONE;
-
- switch (val) {
- case DIE_NMI:
- oprofile_add_sample(args->regs, 0);
- ret = NOTIFY_STOP;
- break;
- default:
- break;
- }
- return ret;
-}
-
-static struct notifier_block profile_timer_exceptions_nb = {
- .notifier_call = profile_timer_exceptions_notify,
-};
-
-static int timer_start(void)
-{
- if (register_die_notifier(&profile_timer_exceptions_nb))
- return 1;
- nmi_adjust_hz(HZ);
- return 0;
-}
-
-
-static void timer_stop(void)
-{
- nmi_adjust_hz(1);
- unregister_die_notifier(&profile_timer_exceptions_nb);
- synchronize_rcu(); /* Allow already-started NMIs to complete. */
-}
-
-static int op_nmi_timer_init(struct oprofile_operations *ops)
-{
- if (atomic_read(&nmi_active) <= 0)
- return -ENODEV;
-
- ops->start = timer_start;
- ops->stop = timer_stop;
- ops->cpu_type = "timer";
- printk(KERN_INFO "oprofile: Using perfctr NMI timer interrupt.\n");
- return 0;
-}
-#endif
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- int ret = -ENODEV;
-
-#ifdef CONFIG_SPARC64
- ret = op_nmi_timer_init(ops);
- if (!ret)
- return ret;
-#endif
-
- return ret;
-}
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 31d94448b4e1..7b934a591df2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -207,7 +207,6 @@ config X86
select HAVE_MOVE_PMD
select HAVE_MOVE_PUD
select HAVE_NMI
- select HAVE_OPROFILE
select HAVE_OPTPROBES
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index b9f58b8993b3..b797f1561943 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -232,9 +232,6 @@ core-y += arch/x86/
drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/
drivers-$(CONFIG_PCI) += arch/x86/pci/
-# must be linked after kernel/
-drivers-$(CONFIG_OPROFILE) += arch/x86/oprofile/
-
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/x86/power/
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 9d5d949e662e..1cb9c17a4cb4 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -9,7 +9,6 @@
#ifdef CONFIG_X86_LOCAL_APIC
-extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
extern void release_perfctr_nmi(unsigned int);
extern int reserve_evntsel_nmi(unsigned int);
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index a5ee607a3b89..3ef5868ac588 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -3,7 +3,7 @@
* local apic based NMI watchdog for various CPUs.
*
* This file also handles reservation of performance counters for coordination
- * with other users (like oprofile).
+ * with other users.
*
* Note that these events normally don't tick when the CPU idles. This means
* the frequency varies with CPU load.
@@ -105,15 +105,6 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
}
-/* checks for a bit availability (hack for oprofile) */
-int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
-{
- BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
- return !test_bit(counter, perfctr_nmi_owner);
-}
-EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
-
int reserve_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
diff --git a/arch/x86/oprofile/Makefile b/arch/x86/oprofile/Makefile
deleted file mode 100644
index 4d49b5a27025..000000000000
--- a/arch/x86/oprofile/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o nmi_timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
-oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \
- op_model_ppro.o op_model_p4.o
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
deleted file mode 100644
index 1d8391fcca68..000000000000
--- a/arch/x86/oprofile/backtrace.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * @file backtrace.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- * @author David Smith
- */
-
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/compat.h>
-#include <linux/uaccess.h>
-
-#include <asm/ptrace.h>
-#include <asm/stacktrace.h>
-#include <asm/unwind.h>
-
-#ifdef CONFIG_COMPAT
-static struct stack_frame_ia32 *
-dump_user_backtrace_32(struct stack_frame_ia32 *head)
-{
- /* Also check accessibility of one struct frame_head beyond: */
- struct stack_frame_ia32 bufhead[2];
- struct stack_frame_ia32 *fp;
- unsigned long bytes;
-
- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
- if (bytes != 0)
- return NULL;
-
- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
-
- oprofile_add_trace(bufhead[0].return_address);
-
- /* frame pointers should strictly progress back up the stack
- * (towards higher addresses) */
- if (head >= fp)
- return NULL;
-
- return fp;
-}
-
-static inline int
-x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
-{
- struct stack_frame_ia32 *head;
-
- /* User process is IA32 */
- if (!current || user_64bit_mode(regs))
- return 0;
-
- head = (struct stack_frame_ia32 *) regs->bp;
- while (depth-- && head)
- head = dump_user_backtrace_32(head);
-
- return 1;
-}
-
-#else
-static inline int
-x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
-{
- return 0;
-}
-#endif /* CONFIG_COMPAT */
-
-static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
-{
- /* Also check accessibility of one struct frame_head beyond: */
- struct stack_frame bufhead[2];
- unsigned long bytes;
-
- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
- if (bytes != 0)
- return NULL;
-
- oprofile_add_trace(bufhead[0].return_address);
-
- /* frame pointers should strictly progress back up the stack
- * (towards higher addresses) */
- if (head >= bufhead[0].next_frame)
- return NULL;
-
- return bufhead[0].next_frame;
-}
-
-void
-x86_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
-
- if (!user_mode(regs)) {
- struct unwind_state state;
- unsigned long addr;
-
- if (!depth)
- return;
-
- oprofile_add_trace(regs->ip);
-
- if (!--depth)
- return;
-
- for (unwind_start(&state, current, regs, NULL);
- !unwind_done(&state); unwind_next_frame(&state)) {
- addr = unwind_get_return_address(&state);
- if (!addr)
- break;
-
- oprofile_add_trace(addr);
-
- if (!--depth)
- break;
- }
-
- return;
- }
-
- if (x86_backtrace_32(regs, depth))
- return;
-
- while (depth-- && head)
- head = dump_user_backtrace(head);
-}
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c
deleted file mode 100644
index 9e138d00ad36..000000000000
--- a/arch/x86/oprofile/init.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-
-/*
- * We support CPUs that have performance counters like the Pentium Pro
- * with the NMI mode driver.
- */
-
-#ifdef CONFIG_X86_LOCAL_APIC
-extern int op_nmi_init(struct oprofile_operations *ops);
-extern void op_nmi_exit(void);
-#else
-static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; }
-static void op_nmi_exit(void) { }
-#endif
-
-extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = x86_backtrace;
- return op_nmi_init(ops);
-}
-
-void oprofile_arch_exit(void)
-{
- op_nmi_exit();
-}
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
deleted file mode 100644
index a7a7677265b6..000000000000
--- a/arch/x86/oprofile/nmi_int.c
+++ /dev/null
@@ -1,780 +0,0 @@
-/**
- * @file nmi_int.c
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- * @author Robert Richter <robert.richter@amd.com>
- * @author Barry Kasindorf <barry.kasindorf@amd.com>
- * @author Jason Yeh <jason.yeh@amd.com>
- * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
- */
-
-#include <linux/init.h>
-#include <linux/notifier.h>
-#include <linux/smp.h>
-#include <linux/oprofile.h>
-#include <linux/syscore_ops.h>
-#include <linux/slab.h>
-#include <linux/moduleparam.h>
-#include <linux/kdebug.h>
-#include <linux/cpu.h>
-#include <asm/nmi.h>
-#include <asm/msr.h>
-#include <asm/apic.h>
-
-#include "op_counter.h"
-#include "op_x86_model.h"
-
-static struct op_x86_model_spec *model;
-static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
-static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
-
-/* must be protected with get_online_cpus()/put_online_cpus(): */
-static int nmi_enabled;
-static int ctr_running;
-
-struct op_counter_config counter_config[OP_MAX_COUNTER];
-
-/* common functions */
-
-u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
- struct op_counter_config *counter_config)
-{
- u64 val = 0;
- u16 event = (u16)counter_config->event;
-
- val |= ARCH_PERFMON_EVENTSEL_INT;
- val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
- val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
- val |= (counter_config->unit_mask & 0xFF) << 8;
- counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV |
- ARCH_PERFMON_EVENTSEL_EDGE |
- ARCH_PERFMON_EVENTSEL_CMASK);
- val |= counter_config->extra;
- event &= model->event_mask ? model->event_mask : 0xFF;
- val |= event & 0xFF;
- val |= (u64)(event & 0x0F00) << 24;
-
- return val;
-}
-
-
-static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
-{
- if (ctr_running)
- model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs));
- else if (!nmi_enabled)
- return NMI_DONE;
- else
- model->stop(this_cpu_ptr(&cpu_msrs));
- return NMI_HANDLED;
-}
-
-static void nmi_cpu_save_registers(struct op_msrs *msrs)
-{
- struct op_msr *counters = msrs->counters;
- struct op_msr *controls = msrs->controls;
- unsigned int i;
-
- for (i = 0; i < model->num_counters; ++i) {
- if (counters[i].addr)
- rdmsrl(counters[i].addr, counters[i].saved);
- }
-
- for (i = 0; i < model->num_controls; ++i) {
- if (controls[i].addr)
- rdmsrl(controls[i].addr, controls[i].saved);
- }
-}
-
-static void nmi_cpu_start(void *dummy)
-{
- struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
- if (!msrs->controls)
- WARN_ON_ONCE(1);
- else
- model->start(msrs);
-}
-
-static int nmi_start(void)
-{
- get_online_cpus();
- ctr_running = 1;
- /* make ctr_running visible to the nmi handler: */
- smp_mb();
- on_each_cpu(nmi_cpu_start, NULL, 1);
- put_online_cpus();
- return 0;
-}
-
-static void nmi_cpu_stop(void *dummy)
-{
- struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
- if (!msrs->controls)
- WARN_ON_ONCE(1);
- else
- model->stop(msrs);
-}
-
-static void nmi_stop(void)
-{
- get_online_cpus();
- on_each_cpu(nmi_cpu_stop, NULL, 1);
- ctr_running = 0;
- put_online_cpus();
-}
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static DEFINE_PER_CPU(int, switch_index);
-
-static inline int has_mux(void)
-{
- return !!model->switch_ctrl;
-}
-
-inline int op_x86_phys_to_virt(int phys)
-{
- return __this_cpu_read(switch_index) + phys;
-}
-
-inline int op_x86_virt_to_phys(int virt)
-{
- return virt % model->num_counters;
-}
-
-static void nmi_shutdown_mux(void)
-{
- int i;
-
- if (!has_mux())
- return;
-
- for_each_possible_cpu(i) {
- kfree(per_cpu(cpu_msrs, i).multiplex);
- per_cpu(cpu_msrs, i).multiplex = NULL;
- per_cpu(switch_index, i) = 0;
- }
-}
-
-static int nmi_setup_mux(void)
-{
- size_t multiplex_size =
- sizeof(struct op_msr) * model->num_virt_counters;
- int i;
-
- if (!has_mux())
- return 1;
-
- for_each_possible_cpu(i) {
- per_cpu(cpu_msrs, i).multiplex =
- kzalloc(multiplex_size, GFP_KERNEL);
- if (!per_cpu(cpu_msrs, i).multiplex)
- return 0;
- }
-
- return 1;
-}
-
-static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
-{
- int i;
- struct op_msr *multiplex = msrs->multiplex;
-
- if (!has_mux())
- return;
-
- for (i = 0; i < model->num_virt_counters; ++i) {
- if (counter_config[i].enabled) {
- multiplex[i].saved = -(u64)counter_config[i].count;
- } else {
- multiplex[i].saved = 0;
- }
- }
-
- per_cpu(switch_index, cpu) = 0;
-}
-
-static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
-{
- struct op_msr *counters = msrs->counters;
- struct op_msr *multiplex = msrs->multiplex;
- int i;
-
- for (i = 0; i < model->num_counters; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (counters[i].addr)
- rdmsrl(counters[i].addr, multiplex[virt].saved);
- }
-}
-
-static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
-{
- struct op_msr *counters = msrs->counters;
- struct op_msr *multiplex = msrs->multiplex;
- int i;
-
- for (i = 0; i < model->num_counters; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (counters[i].addr)
- wrmsrl(counters[i].addr, multiplex[virt].saved);
- }
-}
-
-static void nmi_cpu_switch(void *dummy)
-{
- int cpu = smp_processor_id();
- int si = per_cpu(switch_index, cpu);
- struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
-
- nmi_cpu_stop(NULL);
- nmi_cpu_save_mpx_registers(msrs);
-
- /* move to next set */
- si += model->num_counters;
- if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
- per_cpu(switch_index, cpu) = 0;
- else
- per_cpu(switch_index, cpu) = si;
-
- model->switch_ctrl(model, msrs);
- nmi_cpu_restore_mpx_registers(msrs);
-
- nmi_cpu_start(NULL);
-}
-
-
-/*
- * Quick check to see if multiplexing is necessary.
- * The check should be sufficient since counters are used
- * in ordre.
- */
-static int nmi_multiplex_on(void)
-{
- return counter_config[model->num_counters].count ? 0 : -EINVAL;
-}
-
-static int nmi_switch_event(void)
-{
- if (!has_mux())
- return -ENOSYS; /* not implemented */
- if (nmi_multiplex_on() < 0)
- return -EINVAL; /* not necessary */
-
- get_online_cpus();
- if (ctr_running)
- on_each_cpu(nmi_cpu_switch, NULL, 1);
- put_online_cpus();
-
- return 0;
-}
-
-static inline void mux_init(struct oprofile_operations *ops)
-{
- if (has_mux())
- ops->switch_events = nmi_switch_event;
-}
-
-static void mux_clone(int cpu)
-{
- if (!has_mux())
- return;
-
- memcpy(per_cpu(cpu_msrs, cpu).multiplex,
- per_cpu(cpu_msrs, 0).multiplex,
- sizeof(struct op_msr) * model->num_virt_counters);
-}
-
-#else
-
-inline int op_x86_phys_to_virt(int phys) { return phys; }
-inline int op_x86_virt_to_phys(int virt) { return virt; }
-static inline void nmi_shutdown_mux(void) { }
-static inline int nmi_setup_mux(void) { return 1; }
-static inline void
-nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
-static inline void mux_init(struct oprofile_operations *ops) { }
-static void mux_clone(int cpu) { }
-
-#endif
-
-static void free_msrs(void)
-{
- int i;
- for_each_possible_cpu(i) {
- kfree(per_cpu(cpu_msrs, i).counters);
- per_cpu(cpu_msrs, i).counters = NULL;
- kfree(per_cpu(cpu_msrs, i).controls);
- per_cpu(cpu_msrs, i).controls = NULL;
- }
- nmi_shutdown_mux();
-}
-
-static int allocate_msrs(void)
-{
- size_t controls_size = sizeof(struct op_msr) * model->num_controls;
- size_t counters_size = sizeof(struct op_msr) * model->num_counters;
-
- int i;
- for_each_possible_cpu(i) {
- per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
- GFP_KERNEL);
- if (!per_cpu(cpu_msrs, i).counters)
- goto fail;
- per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
- GFP_KERNEL);
- if (!per_cpu(cpu_msrs, i).controls)
- goto fail;
- }
-
- if (!nmi_setup_mux())
- goto fail;
-
- return 1;
-
-fail:
- free_msrs();
- return 0;
-}
-
-static void nmi_cpu_setup(void)
-{
- int cpu = smp_processor_id();
- struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
-
- nmi_cpu_save_registers(msrs);
- raw_spin_lock(&oprofilefs_lock);
- model->setup_ctrs(model, msrs);
- nmi_cpu_setup_mux(cpu, msrs);
- raw_spin_unlock(&oprofilefs_lock);
- per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
- apic_write(APIC_LVTPC, APIC_DM_NMI);
-}
-
-static void nmi_cpu_restore_registers(struct op_msrs *msrs)
-{
- struct op_msr *counters = msrs->counters;
- struct op_msr *controls = msrs->controls;
- unsigned int i;
-
- for (i = 0; i < model->num_controls; ++i) {
- if (controls[i].addr)
- wrmsrl(controls[i].addr, controls[i].saved);
- }
-
- for (i = 0; i < model->num_counters; ++i) {
- if (counters[i].addr)
- wrmsrl(counters[i].addr, counters[i].saved);
- }
-}
-
-static void nmi_cpu_shutdown(void)
-{
- unsigned int v;
- int cpu = smp_processor_id();
- struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
-
- /* restoring APIC_LVTPC can trigger an apic error because the delivery
- * mode and vector nr combination can be illegal. That's by design: on
- * power on apic lvt contain a zero vector nr which are legal only for
- * NMI delivery mode. So inhibit apic err before restoring lvtpc
- */
- v = apic_read(APIC_LVTERR);
- apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
- apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
- apic_write(APIC_LVTERR, v);
- nmi_cpu_restore_registers(msrs);
-}
-
-static int nmi_cpu_online(unsigned int cpu)
-{
- local_irq_disable();
- if (nmi_enabled)
- nmi_cpu_setup();
- if (ctr_running)
- nmi_cpu_start(NULL);
- local_irq_enable();
- return 0;
-}
-
-static int nmi_cpu_down_prep(unsigned int cpu)
-{
- local_irq_disable();
- if (ctr_running)
- nmi_cpu_stop(NULL);
- if (nmi_enabled)
- nmi_cpu_shutdown();
- local_irq_enable();
- return 0;
-}
-
-static int nmi_create_files(struct dentry *root)
-{
- unsigned int i;
-
- for (i = 0; i < model->num_virt_counters; ++i) {
- struct dentry *dir;
- char buf[4];
-
- /* quick little hack to _not_ expose a counter if it is not
- * available for use. This should protect userspace app.
- * NOTE: assumes 1:1 mapping here (that counters are organized
- * sequentially in their struct assignment).
- */
- if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
- continue;
-
- snprintf(buf, sizeof(buf), "%d", i);
- dir = oprofilefs_mkdir(root, buf);
- oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
- oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
- oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
- oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
- oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
- oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
- oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
- }
-
- return 0;
-}
-
-static enum cpuhp_state cpuhp_nmi_online;
-
-static int nmi_setup(void)
-{
- int err = 0;
- int cpu;
-
- if (!allocate_msrs())
- return -ENOMEM;
-
- /* We need to serialize save and setup for HT because the subset
- * of msrs are distinct for save and setup operations
- */
-
- /* Assume saved/restored counters are the same on all CPUs */
- err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
- if (err)
- goto fail;
-
- for_each_possible_cpu(cpu) {
- if (!IS_ENABLED(CONFIG_SMP) || !cpu)
- continue;
-
- memcpy(per_cpu(cpu_msrs, cpu).counters,
- per_cpu(cpu_msrs, 0).counters,
- sizeof(struct op_msr) * model->num_counters);
-
- memcpy(per_cpu(cpu_msrs, cpu).controls,
- per_cpu(cpu_msrs, 0).controls,
- sizeof(struct op_msr) * model->num_controls);
-
- mux_clone(cpu);
- }
-
- nmi_enabled = 0;
- ctr_running = 0;
- /* make variables visible to the nmi handler: */
- smp_mb();
- err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
- 0, "oprofile");
- if (err)
- goto fail;
-
- nmi_enabled = 1;
- /* make nmi_enabled visible to the nmi handler: */
- smp_mb();
- err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online",
- nmi_cpu_online, nmi_cpu_down_prep);
- if (err < 0)
- goto fail_nmi;
- cpuhp_nmi_online = err;
- return 0;
-fail_nmi:
- unregister_nmi_handler(NMI_LOCAL, "oprofile");
-fail:
- free_msrs();
- return err;
-}
-
-static void nmi_shutdown(void)
-{
- struct op_msrs *msrs;
-
- cpuhp_remove_state(cpuhp_nmi_online);
- nmi_enabled = 0;
- ctr_running = 0;
-
- /* make variables visible to the nmi handler: */
- smp_mb();
- unregister_nmi_handler(NMI_LOCAL, "oprofile");
- msrs = &get_cpu_var(cpu_msrs);
- model->shutdown(msrs);
- free_msrs();
- put_cpu_var(cpu_msrs);
-}
-
-#ifdef CONFIG_PM
-
-static int nmi_suspend(void)
-{
- /* Only one CPU left, just stop that one */
- if (nmi_enabled == 1)
- nmi_cpu_stop(NULL);
- return 0;
-}
-
-static void nmi_resume(void)
-{
- if (nmi_enabled == 1)
- nmi_cpu_start(NULL);
-}
-
-static struct syscore_ops oprofile_syscore_ops = {
- .resume = nmi_resume,
- .suspend = nmi_suspend,
-};
-
-static void __init init_suspend_resume(void)
-{
- register_syscore_ops(&oprofile_syscore_ops);
-}
-
-static void exit_suspend_resume(void)
-{
- unregister_syscore_ops(&oprofile_syscore_ops);
-}
-
-#else
-
-static inline void init_suspend_resume(void) { }
-static inline void exit_suspend_resume(void) { }
-
-#endif /* CONFIG_PM */
-
-static int __init p4_init(char **cpu_type)
-{
- __u8 cpu_model = boot_cpu_data.x86_model;
-
- if (cpu_model > 6 || cpu_model == 5)
- return 0;
-
-#ifndef CONFIG_SMP
- *cpu_type = "i386/p4";
- model = &op_p4_spec;
- return 1;
-#else
- switch (smp_num_siblings) {
- case 1:
- *cpu_type = "i386/p4";
- model = &op_p4_spec;
- return 1;
-
- case 2:
- *cpu_type = "i386/p4-ht";
- model = &op_p4_ht2_spec;
- return 1;
- }
-#endif
-
- printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
- printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
- return 0;
-}
-
-enum __force_cpu_type {
- reserved = 0, /* do not force */
- timer,
- arch_perfmon,
-};
-
-static int force_cpu_type;
-
-static int set_cpu_type(const char *str, const struct kernel_param *kp)
-{
- if (!strcmp(str, "timer")) {
- force_cpu_type = timer;
- printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
- } else if (!strcmp(str, "arch_perfmon")) {
- force_cpu_type = arch_perfmon;
- printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
- } else {
- force_cpu_type = 0;
- }
-
- return 0;
-}
-module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
-
-static int __init ppro_init(char **cpu_type)
-{
- __u8 cpu_model = boot_cpu_data.x86_model;
- struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
-
- if (force_cpu_type == arch_perfmon && boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
- return 0;
-
- /*
- * Documentation on identifying Intel processors by CPU family
- * and model can be found in the Intel Software Developer's
- * Manuals (SDM):
- *
- * http://www.intel.com/products/processor/manuals/
- *
- * As of May 2010 the documentation for this was in the:
- * "Intel 64 and IA-32 Architectures Software Developer's
- * Manual Volume 3B: System Programming Guide", "Table B-1
- * CPUID Signature Values of DisplayFamily_DisplayModel".
- */
- switch (cpu_model) {
- case 0 ... 2:
- *cpu_type = "i386/ppro";
- break;
- case 3 ... 5:
- *cpu_type = "i386/pii";
- break;
- case 6 ... 8:
- case 10 ... 11:
- *cpu_type = "i386/piii";
- break;
- case 9:
- case 13:
- *cpu_type = "i386/p6_mobile";
- break;
- case 14:
- *cpu_type = "i386/core";
- break;
- case 0x0f:
- case 0x16:
- case 0x17:
- case 0x1d:
- *cpu_type = "i386/core_2";
- break;
- case 0x1a:
- case 0x1e:
- case 0x2e:
- spec = &op_arch_perfmon_spec;
- *cpu_type = "i386/core_i7";
- break;
- case 0x1c:
- *cpu_type = "i386/atom";
- break;
- default:
- /* Unknown */
- return 0;
- }
-
- model = spec;
- return 1;
-}
-
-int __init op_nmi_init(struct oprofile_operations *ops)
-{
- __u8 vendor = boot_cpu_data.x86_vendor;
- __u8 family = boot_cpu_data.x86;
- char *cpu_type = NULL;
- int ret = 0;
-
- if (!boot_cpu_has(X86_FEATURE_APIC))
- return -ENODEV;
-
- if (force_cpu_type == timer)
- return -ENODEV;
-
- switch (vendor) {
- case X86_VENDOR_AMD:
- /* Needs to be at least an Athlon (or hammer in 32bit mode) */
-
- switch (family) {
- case 6:
- cpu_type = "i386/athlon";
- break;
- case 0xf:
- /*
- * Actually it could be i386/hammer too, but
- * give user space an consistent name.
- */
- cpu_type = "x86-64/hammer";
- break;
- case 0x10:
- cpu_type = "x86-64/family10";
- break;
- case 0x11:
- cpu_type = "x86-64/family11h";
- break;
- case 0x12:
- cpu_type = "x86-64/family12h";
- break;
- case 0x14:
- cpu_type = "x86-64/family14h";
- break;
- case 0x15:
- cpu_type = "x86-64/family15h";
- break;
- default:
- return -ENODEV;
- }
- model = &op_amd_spec;
- break;
-
- case X86_VENDOR_INTEL:
- switch (family) {
- /* Pentium IV */
- case 0xf:
- p4_init(&cpu_type);
- break;
-
- /* A P6-class processor */
- case 6:
- ppro_init(&cpu_type);
- break;
-
- default:
- break;
- }
-
- if (cpu_type)
- break;
-
- if (!boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
- return -ENODEV;
-
- /* use arch perfmon as fallback */
- cpu_type = "i386/arch_perfmon";
- model = &op_arch_perfmon_spec;
- break;
-
- default:
- return -ENODEV;
- }
-
- /* default values, can be overwritten by model */
- ops->create_files = nmi_create_files;
- ops->setup = nmi_setup;
- ops->shutdown = nmi_shutdown;
- ops->start = nmi_start;
- ops->stop = nmi_stop;
- ops->cpu_type = cpu_type;
-
- if (model->init)
- ret = model->init(ops);
- if (ret)
- return ret;
-
- if (!model->num_virt_counters)
- model->num_virt_counters = model->num_counters;
-
- mux_init(ops);
-
- init_suspend_resume();
-
- printk(KERN_INFO "oprofile: using NMI interrupt.\n");
- return 0;
-}
-
-void op_nmi_exit(void)
-{
- exit_suspend_resume();
-}
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h
deleted file mode 100644
index 0b7b7b179cbe..000000000000
--- a/arch/x86/oprofile/op_counter.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * @file op_counter.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- */
-
-#ifndef OP_COUNTER_H
-#define OP_COUNTER_H
-
-#define OP_MAX_COUNTER 32
-
-/* Per-perfctr configuration as set via
- * oprofilefs.
- */
-struct op_counter_config {
- unsigned long count;
- unsigned long enabled;
- unsigned long event;
- unsigned long kernel;
- unsigned long user;
- unsigned long unit_mask;
- unsigned long extra;
-};
-
-extern struct op_counter_config counter_config[];
-
-#endif /* OP_COUNTER_H */
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
deleted file mode 100644
index 660a83c8287b..000000000000
--- a/arch/x86/oprofile/op_model_amd.c
+++ /dev/null
@@ -1,542 +0,0 @@
-/*
- * @file op_model_amd.c
- * athlon / K7 / K8 / Family 10h model-specific MSR operations
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- * @author Philippe Elie
- * @author Graydon Hoare
- * @author Robert Richter <robert.richter@amd.com>
- * @author Barry Kasindorf <barry.kasindorf@amd.com>
- * @author Jason Yeh <jason.yeh@amd.com>
- * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
- */
-
-#include <linux/oprofile.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/percpu.h>
-
-#include <asm/ptrace.h>
-#include <asm/msr.h>
-#include <asm/nmi.h>
-#include <asm/apic.h>
-#include <asm/processor.h>
-
-#include "op_x86_model.h"
-#include "op_counter.h"
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-#define NUM_VIRT_COUNTERS 32
-#else
-#define NUM_VIRT_COUNTERS 0
-#endif
-
-#define OP_EVENT_MASK 0x0FFF
-#define OP_CTR_OVERFLOW (1ULL<<31)
-
-#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
-
-static int num_counters;
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-#define IBS_FETCH_SIZE 6
-#define IBS_OP_SIZE 12
-
-static u32 ibs_caps;
-
-struct ibs_config {
- unsigned long op_enabled;
- unsigned long fetch_enabled;
- unsigned long max_cnt_fetch;
- unsigned long max_cnt_op;
- unsigned long rand_en;
- unsigned long dispatched_ops;
- unsigned long branch_target;
-};
-
-struct ibs_state {
- u64 ibs_op_ctl;
- int branch_target;
- unsigned long sample_size;
-};
-
-static struct ibs_config ibs_config;
-static struct ibs_state ibs_state;
-
-/*
- * IBS randomization macros
- */
-#define IBS_RANDOM_BITS 12
-#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
-#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
-
-/*
- * 16-bit Linear Feedback Shift Register (LFSR)
- *
- * 16 14 13 11
- * Feedback polynomial = X + X + X + X + 1
- */
-static unsigned int lfsr_random(void)
-{
- static unsigned int lfsr_value = 0xF00D;
- unsigned int bit;
-
- /* Compute next bit to shift in */
- bit = ((lfsr_value >> 0) ^
- (lfsr_value >> 2) ^
- (lfsr_value >> 3) ^
- (lfsr_value >> 5)) & 0x0001;
-
- /* Advance to next register value */
- lfsr_value = (lfsr_value >> 1) | (bit << 15);
-
- return lfsr_value;
-}
-
-/*
- * IBS software randomization
- *
- * The IBS periodic op counter is randomized in software. The lower 12
- * bits of the 20 bit counter are randomized. IbsOpCurCnt is
- * initialized with a 12 bit random value.
- */
-static inline u64 op_amd_randomize_ibs_op(u64 val)
-{
- unsigned int random = lfsr_random();
-
- if (!(ibs_caps & IBS_CAPS_RDWROPCNT))
- /*
- * Work around if the hw can not write to IbsOpCurCnt
- *
- * Randomize the lower 8 bits of the 16 bit
- * IbsOpMaxCnt [15:0] value in the range of -128 to
- * +127 by adding/subtracting an offset to the
- * maximum count (IbsOpMaxCnt).
- *
- * To avoid over or underflows and protect upper bits
- * starting at bit 16, the initial value for
- * IbsOpMaxCnt must fit in the range from 0x0081 to
- * 0xff80.
- */
- val += (s8)(random >> 4);
- else
- val |= (u64)(random & IBS_RANDOM_MASK) << 32;
-
- return val;
-}
-
-static inline void
-op_amd_handle_ibs(struct pt_regs * const regs,
- struct op_msrs const * const msrs)
-{
- u64 val, ctl;
- struct op_entry entry;
-
- if (!ibs_caps)
- return;
-
- if (ibs_config.fetch_enabled) {
- rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
- if (ctl & IBS_FETCH_VAL) {
- rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
- oprofile_write_reserve(&entry, regs, val,
- IBS_FETCH_CODE, IBS_FETCH_SIZE);
- oprofile_add_data64(&entry, val);
- oprofile_add_data64(&entry, ctl);
- rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
- oprofile_add_data64(&entry, val);
- oprofile_write_commit(&entry);
-
- /* reenable the IRQ */
- ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT);
- ctl |= IBS_FETCH_ENABLE;
- wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
- }
- }
-
- if (ibs_config.op_enabled) {
- rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
- if (ctl & IBS_OP_VAL) {
- rdmsrl(MSR_AMD64_IBSOPRIP, val);
- oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE,
- ibs_state.sample_size);
- oprofile_add_data64(&entry, val);
- rdmsrl(MSR_AMD64_IBSOPDATA, val);
- oprofile_add_data64(&entry, val);
- rdmsrl(MSR_AMD64_IBSOPDATA2, val);
- oprofile_add_data64(&entry, val);
- rdmsrl(MSR_AMD64_IBSOPDATA3, val);
- oprofile_add_data64(&entry, val);
- rdmsrl(MSR_AMD64_IBSDCLINAD, val);
- oprofile_add_data64(&entry, val);
- rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
- oprofile_add_data64(&entry, val);
- if (ibs_state.branch_target) {
- rdmsrl(MSR_AMD64_IBSBRTARGET, val);
- oprofile_add_data(&entry, (unsigned long)val);
- }
- oprofile_write_commit(&entry);
-
- /* reenable the IRQ */
- ctl = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
- wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
- }
- }
-}
-
-static inline void op_amd_start_ibs(void)
-{
- u64 val;
-
- if (!ibs_caps)
- return;
-
- memset(&ibs_state, 0, sizeof(ibs_state));
-
- /*
- * Note: Since the max count settings may out of range we
- * write back the actual used values so that userland can read
- * it.
- */
-
- if (ibs_config.fetch_enabled) {
- val = ibs_config.max_cnt_fetch >> 4;
- val = min(val, IBS_FETCH_MAX_CNT);
- ibs_config.max_cnt_fetch = val << 4;
- val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
- val |= IBS_FETCH_ENABLE;
- wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
- }
-
- if (ibs_config.op_enabled) {
- val = ibs_config.max_cnt_op >> 4;
- if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
- /*
- * IbsOpCurCnt not supported. See
- * op_amd_randomize_ibs_op() for details.
- */
- val = clamp(val, 0x0081ULL, 0xFF80ULL);
- ibs_config.max_cnt_op = val << 4;
- } else {
- /*
- * The start value is randomized with a
- * positive offset, we need to compensate it
- * with the half of the randomized range. Also
- * avoid underflows.
- */
- val += IBS_RANDOM_MAXCNT_OFFSET;
- if (ibs_caps & IBS_CAPS_OPCNTEXT)
- val = min(val, IBS_OP_MAX_CNT_EXT);
- else
- val = min(val, IBS_OP_MAX_CNT);
- ibs_config.max_cnt_op =
- (val - IBS_RANDOM_MAXCNT_OFFSET) << 4;
- }
- val = ((val & ~IBS_OP_MAX_CNT) << 4) | (val & IBS_OP_MAX_CNT);
- val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
- val |= IBS_OP_ENABLE;
- ibs_state.ibs_op_ctl = val;
- ibs_state.sample_size = IBS_OP_SIZE;
- if (ibs_config.branch_target) {
- ibs_state.branch_target = 1;
- ibs_state.sample_size++;
- }
- val = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
- wrmsrl(MSR_AMD64_IBSOPCTL, val);
- }
-}
-
-static void op_amd_stop_ibs(void)
-{
- if (!ibs_caps)
- return;
-
- if (ibs_config.fetch_enabled)
- /* clear max count and enable */
- wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
-
- if (ibs_config.op_enabled)
- /* clear max count and enable */
- wrmsrl(MSR_AMD64_IBSOPCTL, 0);
-}
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- /* enable active counters */
- for (i = 0; i < num_counters; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (!reset_value[virt])
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- val &= model->reserved;
- val |= op_x86_get_ctrl(model, &counter_config[virt]);
- wrmsrl(msrs->controls[i].addr, val);
- }
-}
-
-#endif
-
-/* functions for op_amd_spec */
-
-static void op_amd_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (!msrs->counters[i].addr)
- continue;
- release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
- release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
- }
-}
-
-static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; i++) {
- if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
- goto fail;
- if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
- release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
- goto fail;
- }
- /* both registers must be reserved */
- if (num_counters == AMD64_NUM_COUNTERS_CORE) {
- msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
- msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
- } else {
- msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
- msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
- }
- continue;
- fail:
- if (!counter_config[i].enabled)
- continue;
- op_x86_warn_reserved(i);
- op_amd_shutdown(msrs);
- return -EBUSY;
- }
-
- return 0;
-}
-
-static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- /* setup reset_value */
- for (i = 0; i < OP_MAX_COUNTER; ++i) {
- if (counter_config[i].enabled
- && msrs->counters[op_x86_virt_to_phys(i)].addr)
- reset_value[i] = counter_config[i].count;
- else
- reset_value[i] = 0;
- }
-
- /* clear all counters */
- for (i = 0; i < num_counters; ++i) {
- if (!msrs->controls[i].addr)
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
- op_x86_warn_in_use(i);
- val &= model->reserved;
- wrmsrl(msrs->controls[i].addr, val);
- /*
- * avoid a false detection of ctr overflows in NMI
- * handler
- */
- wrmsrl(msrs->counters[i].addr, -1LL);
- }
-
- /* enable active counters */
- for (i = 0; i < num_counters; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (!reset_value[virt])
- continue;
-
- /* setup counter registers */
- wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
-
- /* setup control registers */
- rdmsrl(msrs->controls[i].addr, val);
- val &= model->reserved;
- val |= op_x86_get_ctrl(model, &counter_config[virt]);
- wrmsrl(msrs->controls[i].addr, val);
- }
-}
-
-static int op_amd_check_ctrs(struct pt_regs * const regs,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (!reset_value[virt])
- continue;
- rdmsrl(msrs->counters[i].addr, val);
- /* bit is clear if overflowed: */
- if (val & OP_CTR_OVERFLOW)
- continue;
- oprofile_add_sample(regs, virt);
- wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
- }
-
- op_amd_handle_ibs(regs, msrs);
-
- /* See op_model_ppro.c */
- return 1;
-}
-
-static void op_amd_start(struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[op_x86_phys_to_virt(i)])
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(msrs->controls[i].addr, val);
- }
-
- op_amd_start_ibs();
-}
-
-static void op_amd_stop(struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- /*
- * Subtle: stop on all counters to avoid race with setting our
- * pm callback
- */
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[op_x86_phys_to_virt(i)])
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(msrs->controls[i].addr, val);
- }
-
- op_amd_stop_ibs();
-}
-
-/*
- * check and reserve APIC extended interrupt LVT offset for IBS if
- * available
- */
-
-static void init_ibs(void)
-{
- ibs_caps = get_ibs_caps();
-
- if (!ibs_caps)
- return;
-
- printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
-}
-
-static int (*create_arch_files)(struct dentry *root);
-
-static int setup_ibs_files(struct dentry *root)
-{
- struct dentry *dir;
- int ret = 0;
-
- /* architecture specific files */
- if (create_arch_files)
- ret = create_arch_files(root);
-
- if (ret)
- return ret;
-
- if (!ibs_caps)
- return ret;
-
- /* model specific files */
-
- /* setup some reasonable defaults */
- memset(&ibs_config, 0, sizeof(ibs_config));
- ibs_config.max_cnt_fetch = 250000;
- ibs_config.max_cnt_op = 250000;
-
- if (ibs_caps & IBS_CAPS_FETCHSAM) {
- dir = oprofilefs_mkdir(root, "ibs_fetch");
- oprofilefs_create_ulong(dir, "enable",
- &ibs_config.fetch_enabled);
- oprofilefs_create_ulong(dir, "max_count",
- &ibs_config.max_cnt_fetch);
- oprofilefs_create_ulong(dir, "rand_enable",
- &ibs_config.rand_en);
- }
-
- if (ibs_caps & IBS_CAPS_OPSAM) {
- dir = oprofilefs_mkdir(root, "ibs_op");
- oprofilefs_create_ulong(dir, "enable",
- &ibs_config.op_enabled);
- oprofilefs_create_ulong(dir, "max_count",
- &ibs_config.max_cnt_op);
- if (ibs_caps & IBS_CAPS_OPCNT)
- oprofilefs_create_ulong(dir, "dispatched_ops",
- &ibs_config.dispatched_ops);
- if (ibs_caps & IBS_CAPS_BRNTRGT)
- oprofilefs_create_ulong(dir, "branch_target",
- &ibs_config.branch_target);
- }
-
- return 0;
-}
-
-struct op_x86_model_spec op_amd_spec;
-
-static int op_amd_init(struct oprofile_operations *ops)
-{
- init_ibs();
- create_arch_files = ops->create_files;
- ops->create_files = setup_ibs_files;
-
- if (boot_cpu_data.x86 == 0x15) {
- num_counters = AMD64_NUM_COUNTERS_CORE;
- } else {
- num_counters = AMD64_NUM_COUNTERS;
- }
-
- op_amd_spec.num_counters = num_counters;
- op_amd_spec.num_controls = num_counters;
- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
-
- return 0;
-}
-
-struct op_x86_model_spec op_amd_spec = {
- /* num_counters/num_controls filled in at runtime */
- .reserved = MSR_AMD_EVENTSEL_RESERVED,
- .event_mask = OP_EVENT_MASK,
- .init = op_amd_init,
- .fill_in_addresses = &op_amd_fill_in_addresses,
- .setup_ctrs = &op_amd_setup_ctrs,
- .check_ctrs = &op_amd_check_ctrs,
- .start = &op_amd_start,
- .stop = &op_amd_stop,
- .shutdown = &op_amd_shutdown,
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- .switch_ctrl = &op_mux_switch_ctrl,
-#endif
-};
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
deleted file mode 100644
index ad1d91f475ab..000000000000
--- a/arch/x86/oprofile/op_model_p4.c
+++ /dev/null
@@ -1,723 +0,0 @@
-/**
- * @file op_model_p4.c
- * P4 model-specific MSR operations
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Graydon Hoare
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <linux/ptrace.h>
-#include <asm/nmi.h>
-#include <asm/msr.h>
-#include <asm/fixmap.h>
-#include <asm/apic.h>
-
-
-#include "op_x86_model.h"
-#include "op_counter.h"
-
-#define NUM_EVENTS 39
-
-#define NUM_COUNTERS_NON_HT 8
-#define NUM_ESCRS_NON_HT 45
-#define NUM_CCCRS_NON_HT 18
-#define NUM_CONTROLS_NON_HT (NUM_ESCRS_NON_HT + NUM_CCCRS_NON_HT)
-
-#define NUM_COUNTERS_HT2 4
-#define NUM_ESCRS_HT2 23
-#define NUM_CCCRS_HT2 9
-#define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2)
-
-#define OP_CTR_OVERFLOW (1ULL<<31)
-
-static unsigned int num_counters = NUM_COUNTERS_NON_HT;
-static unsigned int num_controls = NUM_CONTROLS_NON_HT;
-
-/* this has to be checked dynamically since the
- hyper-threadedness of a chip is discovered at
- kernel boot-time. */
-static inline void setup_num_counters(void)
-{
-#ifdef CONFIG_SMP
- if (smp_num_siblings == 2) {
- num_counters = NUM_COUNTERS_HT2;
- num_controls = NUM_CONTROLS_HT2;
- }
-#endif
-}
-
-static inline int addr_increment(void)
-{
-#ifdef CONFIG_SMP
- return smp_num_siblings == 2 ? 2 : 1;
-#else
- return 1;
-#endif
-}
-
-
-/* tables to simulate simplified hardware view of p4 registers */
-struct p4_counter_binding {
- int virt_counter;
- int counter_address;
- int cccr_address;
-};
-
-struct p4_event_binding {
- int escr_select; /* value to put in CCCR */
- int event_select; /* value to put in ESCR */
- struct {
- int virt_counter; /* for this counter... */
- int escr_address; /* use this ESCR */
- } bindings[2];
-};
-
-/* nb: these CTR_* defines are a duplicate of defines in
- event/i386.p4*events. */
-
-
-#define CTR_BPU_0 (1 << 0)
-#define CTR_MS_0 (1 << 1)
-#define CTR_FLAME_0 (1 << 2)
-#define CTR_IQ_4 (1 << 3)
-#define CTR_BPU_2 (1 << 4)
-#define CTR_MS_2 (1 << 5)
-#define CTR_FLAME_2 (1 << 6)
-#define CTR_IQ_5 (1 << 7)
-
-static struct p4_counter_binding p4_counters[NUM_COUNTERS_NON_HT] = {
- { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 },
- { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 },
- { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 },
- { CTR_IQ_4, MSR_P4_IQ_PERFCTR4, MSR_P4_IQ_CCCR4 },
- { CTR_BPU_2, MSR_P4_BPU_PERFCTR2, MSR_P4_BPU_CCCR2 },
- { CTR_MS_2, MSR_P4_MS_PERFCTR2, MSR_P4_MS_CCCR2 },
- { CTR_FLAME_2, MSR_P4_FLAME_PERFCTR2, MSR_P4_FLAME_CCCR2 },
- { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 }
-};
-
-#define NUM_UNUSED_CCCRS (NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT)
-
-/* p4 event codes in libop/op_event.h are indices into this table. */
-
-static struct p4_event_binding p4_events[NUM_EVENTS] = {
-
- { /* BRANCH_RETIRED */
- 0x05, 0x06,
- { {CTR_IQ_4, MSR_P4_CRU_ESCR2},
- {CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* MISPRED_BRANCH_RETIRED */
- 0x04, 0x03,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
- { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
- },
-
- { /* TC_DELIVER_MODE */
- 0x01, 0x01,
- { { CTR_MS_0, MSR_P4_TC_ESCR0},
- { CTR_MS_2, MSR_P4_TC_ESCR1} }
- },
-
- { /* BPU_FETCH_REQUEST */
- 0x00, 0x03,
- { { CTR_BPU_0, MSR_P4_BPU_ESCR0},
- { CTR_BPU_2, MSR_P4_BPU_ESCR1} }
- },
-
- { /* ITLB_REFERENCE */
- 0x03, 0x18,
- { { CTR_BPU_0, MSR_P4_ITLB_ESCR0},
- { CTR_BPU_2, MSR_P4_ITLB_ESCR1} }
- },
-
- { /* MEMORY_CANCEL */
- 0x05, 0x02,
- { { CTR_FLAME_0, MSR_P4_DAC_ESCR0},
- { CTR_FLAME_2, MSR_P4_DAC_ESCR1} }
- },
-
- { /* MEMORY_COMPLETE */
- 0x02, 0x08,
- { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
- { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
- },
-
- { /* LOAD_PORT_REPLAY */
- 0x02, 0x04,
- { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
- { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
- },
-
- { /* STORE_PORT_REPLAY */
- 0x02, 0x05,
- { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
- { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
- },
-
- { /* MOB_LOAD_REPLAY */
- 0x02, 0x03,
- { { CTR_BPU_0, MSR_P4_MOB_ESCR0},
- { CTR_BPU_2, MSR_P4_MOB_ESCR1} }
- },
-
- { /* PAGE_WALK_TYPE */
- 0x04, 0x01,
- { { CTR_BPU_0, MSR_P4_PMH_ESCR0},
- { CTR_BPU_2, MSR_P4_PMH_ESCR1} }
- },
-
- { /* BSQ_CACHE_REFERENCE */
- 0x07, 0x0c,
- { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
- { CTR_BPU_2, MSR_P4_BSU_ESCR1} }
- },
-
- { /* IOQ_ALLOCATION */
- 0x06, 0x03,
- { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
- { 0, 0 } }
- },
-
- { /* IOQ_ACTIVE_ENTRIES */
- 0x06, 0x1a,
- { { CTR_BPU_2, MSR_P4_FSB_ESCR1},
- { 0, 0 } }
- },
-
- { /* FSB_DATA_ACTIVITY */
- 0x06, 0x17,
- { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
- { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
- },
-
- { /* BSQ_ALLOCATION */
- 0x07, 0x05,
- { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
- { 0, 0 } }
- },
-
- { /* BSQ_ACTIVE_ENTRIES */
- 0x07, 0x06,
- { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},
- { 0, 0 } }
- },
-
- { /* X87_ASSIST */
- 0x05, 0x03,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
- { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* SSE_INPUT_ASSIST */
- 0x01, 0x34,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* PACKED_SP_UOP */
- 0x01, 0x08,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* PACKED_DP_UOP */
- 0x01, 0x0c,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* SCALAR_SP_UOP */
- 0x01, 0x0a,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* SCALAR_DP_UOP */
- 0x01, 0x0e,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* 64BIT_MMX_UOP */
- 0x01, 0x02,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* 128BIT_MMX_UOP */
- 0x01, 0x1a,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* X87_FP_UOP */
- 0x01, 0x04,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* X87_SIMD_MOVES_UOP */
- 0x01, 0x2e,
- { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
- { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
- },
-
- { /* MACHINE_CLEAR */
- 0x05, 0x02,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
- { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* GLOBAL_POWER_EVENTS */
- 0x06, 0x13 /* older manual says 0x05, newer 0x13 */,
- { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
- { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
- },
-
- { /* TC_MS_XFER */
- 0x00, 0x05,
- { { CTR_MS_0, MSR_P4_MS_ESCR0},
- { CTR_MS_2, MSR_P4_MS_ESCR1} }
- },
-
- { /* UOP_QUEUE_WRITES */
- 0x00, 0x09,
- { { CTR_MS_0, MSR_P4_MS_ESCR0},
- { CTR_MS_2, MSR_P4_MS_ESCR1} }
- },
-
- { /* FRONT_END_EVENT */
- 0x05, 0x08,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
- { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* EXECUTION_EVENT */
- 0x05, 0x0c,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
- { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* REPLAY_EVENT */
- 0x05, 0x09,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
- { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
- },
-
- { /* INSTR_RETIRED */
- 0x04, 0x02,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
- { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
- },
-
- { /* UOPS_RETIRED */
- 0x04, 0x01,
- { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
- { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
- },
-
- { /* UOP_TYPE */
- 0x02, 0x02,
- { { CTR_IQ_4, MSR_P4_RAT_ESCR0},
- { CTR_IQ_5, MSR_P4_RAT_ESCR1} }
- },
-
- { /* RETIRED_MISPRED_BRANCH_TYPE */
- 0x02, 0x05,
- { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
- { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
- },
-
- { /* RETIRED_BRANCH_TYPE */
- 0x02, 0x04,
- { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
- { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
- }
-};
-
-
-#define MISC_PMC_ENABLED_P(x) ((x) & 1 << 7)
-
-#define ESCR_RESERVED_BITS 0x80000003
-#define ESCR_CLEAR(escr) ((escr) &= ESCR_RESERVED_BITS)
-#define ESCR_SET_USR_0(escr, usr) ((escr) |= (((usr) & 1) << 2))
-#define ESCR_SET_OS_0(escr, os) ((escr) |= (((os) & 1) << 3))
-#define ESCR_SET_USR_1(escr, usr) ((escr) |= (((usr) & 1)))
-#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
-#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
-#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
-
-#define CCCR_RESERVED_BITS 0x38030FFF
-#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
-#define CCCR_SET_REQUIRED_BITS(cccr) ((cccr) |= 0x00030000)
-#define CCCR_SET_ESCR_SELECT(cccr, sel) ((cccr) |= (((sel) & 0x07) << 13))
-#define CCCR_SET_PMI_OVF_0(cccr) ((cccr) |= (1<<26))
-#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
-#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
-#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
-#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
-#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
-
-
-/* this assigns a "stagger" to the current CPU, which is used throughout
- the code in this module as an extra array offset, to select the "even"
- or "odd" part of all the divided resources. */
-static unsigned int get_stagger(void)
-{
-#ifdef CONFIG_SMP
- int cpu = smp_processor_id();
- return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
-#endif
- return 0;
-}
-
-
-/* finally, mediate access to a real hardware counter
- by passing a "virtual" counter numer to this macro,
- along with your stagger setting. */
-#define VIRT_CTR(stagger, i) ((i) + ((num_counters) * (stagger)))
-
-static unsigned long reset_value[NUM_COUNTERS_NON_HT];
-
-static void p4_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (msrs->counters[i].addr)
- release_perfctr_nmi(msrs->counters[i].addr);
- }
- /*
- * some of the control registers are specially reserved in
- * conjunction with the counter registers (hence the starting offset).
- * This saves a few bits.
- */
- for (i = num_counters; i < num_controls; ++i) {
- if (msrs->controls[i].addr)
- release_evntsel_nmi(msrs->controls[i].addr);
- }
-}
-
-static int p4_fill_in_addresses(struct op_msrs * const msrs)
-{
- unsigned int i;
- unsigned int addr, cccraddr, stag;
-
- setup_num_counters();
- stag = get_stagger();
-
- /* the counter & cccr registers we pay attention to */
- for (i = 0; i < num_counters; ++i) {
- addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
- cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address;
- if (reserve_perfctr_nmi(addr)) {
- msrs->counters[i].addr = addr;
- msrs->controls[i].addr = cccraddr;
- }
- }
-
- /* 43 ESCR registers in three or four discontiguous group */
- for (addr = MSR_P4_BSU_ESCR0 + stag;
- addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
-
- /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1
- * to avoid special case in nmi_{save|restore}_registers() */
- if (boot_cpu_data.x86_model >= 0x3) {
- for (addr = MSR_P4_BSU_ESCR0 + stag;
- addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
- } else {
- for (addr = MSR_P4_IQ_ESCR0 + stag;
- addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
- }
-
- for (addr = MSR_P4_RAT_ESCR0 + stag;
- addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
-
- for (addr = MSR_P4_MS_ESCR0 + stag;
- addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
-
- for (addr = MSR_P4_IX_ESCR0 + stag;
- addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
- if (reserve_evntsel_nmi(addr))
- msrs->controls[i].addr = addr;
- }
-
- /* there are 2 remaining non-contiguously located ESCRs */
-
- if (num_counters == NUM_COUNTERS_NON_HT) {
- /* standard non-HT CPUs handle both remaining ESCRs*/
- if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5))
- msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
- if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4))
- msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
-
- } else if (stag == 0) {
- /* HT CPUs give the first remainder to the even thread, as
- the 32nd control register */
- if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4))
- msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
-
- } else {
- /* and two copies of the second to the odd thread,
- for the 22st and 23nd control registers */
- if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) {
- msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
- msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
- }
- }
-
- for (i = 0; i < num_counters; ++i) {
- if (!counter_config[i].enabled)
- continue;
- if (msrs->controls[i].addr)
- continue;
- op_x86_warn_reserved(i);
- p4_shutdown(msrs);
- return -EBUSY;
- }
-
- return 0;
-}
-
-
-static void pmc_setup_one_p4_counter(unsigned int ctr)
-{
- int i;
- int const maxbind = 2;
- unsigned int cccr = 0;
- unsigned int escr = 0;
- unsigned int high = 0;
- unsigned int counter_bit;
- struct p4_event_binding *ev = NULL;
- unsigned int stag;
-
- stag = get_stagger();
-
- /* convert from counter *number* to counter *bit* */
- counter_bit = 1 << VIRT_CTR(stag, ctr);
-
- /* find our event binding structure. */
- if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {
- printk(KERN_ERR
- "oprofile: P4 event code 0x%lx out of range\n",
- counter_config[ctr].event);
- return;
- }
-
- ev = &(p4_events[counter_config[ctr].event - 1]);
-
- for (i = 0; i < maxbind; i++) {
- if (ev->bindings[i].virt_counter & counter_bit) {
-
- /* modify ESCR */
- rdmsr(ev->bindings[i].escr_address, escr, high);
- ESCR_CLEAR(escr);
- if (stag == 0) {
- ESCR_SET_USR_0(escr, counter_config[ctr].user);
- ESCR_SET_OS_0(escr, counter_config[ctr].kernel);
- } else {
- ESCR_SET_USR_1(escr, counter_config[ctr].user);
- ESCR_SET_OS_1(escr, counter_config[ctr].kernel);
- }
- ESCR_SET_EVENT_SELECT(escr, ev->event_select);
- ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
- wrmsr(ev->bindings[i].escr_address, escr, high);
-
- /* modify CCCR */
- rdmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address,
- cccr, high);
- CCCR_CLEAR(cccr);
- CCCR_SET_REQUIRED_BITS(cccr);
- CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
- if (stag == 0)
- CCCR_SET_PMI_OVF_0(cccr);
- else
- CCCR_SET_PMI_OVF_1(cccr);
- wrmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address,
- cccr, high);
- return;
- }
- }
-
- printk(KERN_ERR
- "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",
- counter_config[ctr].event, stag, ctr);
-}
-
-
-static void p4_setup_ctrs(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- unsigned int i;
- unsigned int low, high;
- unsigned int stag;
-
- stag = get_stagger();
-
- rdmsr(MSR_IA32_MISC_ENABLE, low, high);
- if (!MISC_PMC_ENABLED_P(low)) {
- printk(KERN_ERR "oprofile: P4 PMC not available\n");
- return;
- }
-
- /* clear the cccrs we will use */
- for (i = 0; i < num_counters; i++) {
- if (unlikely(!msrs->controls[i].addr))
- continue;
- rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- CCCR_CLEAR(low);
- CCCR_SET_REQUIRED_BITS(low);
- wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- }
-
- /* clear all escrs (including those outside our concern) */
- for (i = num_counters; i < num_controls; i++) {
- if (unlikely(!msrs->controls[i].addr))
- continue;
- wrmsr(msrs->controls[i].addr, 0, 0);
- }
-
- /* setup all counters */
- for (i = 0; i < num_counters; ++i) {
- if (counter_config[i].enabled && msrs->controls[i].addr) {
- reset_value[i] = counter_config[i].count;
- pmc_setup_one_p4_counter(i);
- wrmsrl(p4_counters[VIRT_CTR(stag, i)].counter_address,
- -(u64)counter_config[i].count);
- } else {
- reset_value[i] = 0;
- }
- }
-}
-
-
-static int p4_check_ctrs(struct pt_regs * const regs,
- struct op_msrs const * const msrs)
-{
- unsigned long ctr, low, high, stag, real;
- int i;
-
- stag = get_stagger();
-
- for (i = 0; i < num_counters; ++i) {
-
- if (!reset_value[i])
- continue;
-
- /*
- * there is some eccentricity in the hardware which
- * requires that we perform 2 extra corrections:
- *
- * - check both the CCCR:OVF flag for overflow and the
- * counter high bit for un-flagged overflows.
- *
- * - write the counter back twice to ensure it gets
- * updated properly.
- *
- * the former seems to be related to extra NMIs happening
- * during the current NMI; the latter is reported as errata
- * N15 in intel doc 249199-029, pentium 4 specification
- * update, though their suggested work-around does not
- * appear to solve the problem.
- */
-
- real = VIRT_CTR(stag, i);
-
- rdmsr(p4_counters[real].cccr_address, low, high);
- rdmsr(p4_counters[real].counter_address, ctr, high);
- if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) {
- oprofile_add_sample(regs, i);
- wrmsrl(p4_counters[real].counter_address,
- -(u64)reset_value[i]);
- CCCR_CLEAR_OVF(low);
- wrmsr(p4_counters[real].cccr_address, low, high);
- wrmsrl(p4_counters[real].counter_address,
- -(u64)reset_value[i]);
- }
- }
-
- /* P4 quirk: you have to re-unmask the apic vector */
- apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
-
- /* See op_model_ppro.c */
- return 1;
-}
-
-
-static void p4_start(struct op_msrs const * const msrs)
-{
- unsigned int low, high, stag;
- int i;
-
- stag = get_stagger();
-
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[i])
- continue;
- rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- CCCR_SET_ENABLE(low);
- wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- }
-}
-
-
-static void p4_stop(struct op_msrs const * const msrs)
-{
- unsigned int low, high, stag;
- int i;
-
- stag = get_stagger();
-
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[i])
- continue;
- rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- CCCR_SET_DISABLE(low);
- wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
- }
-}
-
-#ifdef CONFIG_SMP
-struct op_x86_model_spec op_p4_ht2_spec = {
- .num_counters = NUM_COUNTERS_HT2,
- .num_controls = NUM_CONTROLS_HT2,
- .fill_in_addresses = &p4_fill_in_addresses,
- .setup_ctrs = &p4_setup_ctrs,
- .check_ctrs = &p4_check_ctrs,
- .start = &p4_start,
- .stop = &p4_stop,
- .shutdown = &p4_shutdown
-};
-#endif
-
-struct op_x86_model_spec op_p4_spec = {
- .num_counters = NUM_COUNTERS_NON_HT,
- .num_controls = NUM_CONTROLS_NON_HT,
- .fill_in_addresses = &p4_fill_in_addresses,
- .setup_ctrs = &p4_setup_ctrs,
- .check_ctrs = &p4_check_ctrs,
- .start = &p4_start,
- .stop = &p4_stop,
- .shutdown = &p4_shutdown
-};
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
deleted file mode 100644
index 7913b6921959..000000000000
--- a/arch/x86/oprofile/op_model_ppro.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * @file op_model_ppro.h
- * Family 6 perfmon and architectural perfmon MSR operations
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Copyright 2008 Intel Corporation
- * @remark Read the file COPYING
- *
- * @author John Levon
- * @author Philippe Elie
- * @author Graydon Hoare
- * @author Andi Kleen
- * @author Robert Richter <robert.richter@amd.com>
- */
-
-#include <linux/oprofile.h>
-#include <linux/slab.h>
-#include <asm/ptrace.h>
-#include <asm/msr.h>
-#include <asm/apic.h>
-#include <asm/nmi.h>
-
-#include "op_x86_model.h"
-#include "op_counter.h"
-
-static int num_counters = 2;
-static int counter_width = 32;
-
-#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
-
-static u64 reset_value[OP_MAX_COUNTER];
-
-static void ppro_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (!msrs->counters[i].addr)
- continue;
- release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
- release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
- }
-}
-
-static int ppro_fill_in_addresses(struct op_msrs * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; i++) {
- if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
- goto fail;
- if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
- release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
- goto fail;
- }
- /* both registers must be reserved */
- msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
- msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
- continue;
- fail:
- if (!counter_config[i].enabled)
- continue;
- op_x86_warn_reserved(i);
- ppro_shutdown(msrs);
- return -EBUSY;
- }
-
- return 0;
-}
-
-
-static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- if (boot_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
- union cpuid10_eax eax;
- eax.full = cpuid_eax(0xa);
-
- /*
- * For Core2 (family 6, model 15), don't reset the
- * counter width:
- */
- if (!(eax.split.version_id == 0 &&
- __this_cpu_read(cpu_info.x86) == 6 &&
- __this_cpu_read(cpu_info.x86_model) == 15)) {
-
- if (counter_width < eax.split.bit_width)
- counter_width = eax.split.bit_width;
- }
- }
-
- /* clear all counters */
- for (i = 0; i < num_counters; ++i) {
- if (!msrs->controls[i].addr)
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
- op_x86_warn_in_use(i);
- val &= model->reserved;
- wrmsrl(msrs->controls[i].addr, val);
- /*
- * avoid a false detection of ctr overflows in NMI *
- * handler
- */
- wrmsrl(msrs->counters[i].addr, -1LL);
- }
-
- /* enable active counters */
- for (i = 0; i < num_counters; ++i) {
- if (counter_config[i].enabled && msrs->counters[i].addr) {
- reset_value[i] = counter_config[i].count;
- wrmsrl(msrs->counters[i].addr, -reset_value[i]);
- rdmsrl(msrs->controls[i].addr, val);
- val &= model->reserved;
- val |= op_x86_get_ctrl(model, &counter_config[i]);
- wrmsrl(msrs->controls[i].addr, val);
- } else {
- reset_value[i] = 0;
- }
- }
-}
-
-
-static int ppro_check_ctrs(struct pt_regs * const regs,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[i])
- continue;
- rdmsrl(msrs->counters[i].addr, val);
- if (val & (1ULL << (counter_width - 1)))
- continue;
- oprofile_add_sample(regs, i);
- wrmsrl(msrs->counters[i].addr, -reset_value[i]);
- }
-
- /* Only P6 based Pentium M need to re-unmask the apic vector but it
- * doesn't hurt other P6 variant */
- apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
-
- /* We can't work out if we really handled an interrupt. We
- * might have caught a *second* counter just after overflowing
- * the interrupt for this counter then arrives
- * and we don't find a counter that's overflowed, so we
- * would return 0 and get dazed + confused. Instead we always
- * assume we found an overflow. This sucks.
- */
- return 1;
-}
-
-
-static void ppro_start(struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (reset_value[i]) {
- rdmsrl(msrs->controls[i].addr, val);
- val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(msrs->controls[i].addr, val);
- }
- }
-}
-
-
-static void ppro_stop(struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (!reset_value[i])
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(msrs->controls[i].addr, val);
- }
-}
-
-struct op_x86_model_spec op_ppro_spec = {
- .num_counters = 2,
- .num_controls = 2,
- .reserved = MSR_PPRO_EVENTSEL_RESERVED,
- .fill_in_addresses = &ppro_fill_in_addresses,
- .setup_ctrs = &ppro_setup_ctrs,
- .check_ctrs = &ppro_check_ctrs,
- .start = &ppro_start,
- .stop = &ppro_stop,
- .shutdown = &ppro_shutdown
-};
-
-/*
- * Architectural performance monitoring.
- *
- * Newer Intel CPUs (Core1+) have support for architectural
- * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
- * The advantage of this is that it can be done without knowing about
- * the specific CPU.
- */
-
-static void arch_perfmon_setup_counters(void)
-{
- union cpuid10_eax eax;
-
- eax.full = cpuid_eax(0xa);
-
- /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
- if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model == 15) {
- eax.split.version_id = 2;
- eax.split.num_counters = 2;
- eax.split.bit_width = 40;
- }
-
- num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
-
- op_arch_perfmon_spec.num_counters = num_counters;
- op_arch_perfmon_spec.num_controls = num_counters;
-}
-
-static int arch_perfmon_init(struct oprofile_operations *ignore)
-{
- arch_perfmon_setup_counters();
- return 0;
-}
-
-struct op_x86_model_spec op_arch_perfmon_spec = {
- .reserved = MSR_PPRO_EVENTSEL_RESERVED,
- .init = &arch_perfmon_init,
- /* num_counters/num_controls filled in at runtime */
- .fill_in_addresses = &ppro_fill_in_addresses,
- /* user space does the cpuid check for available events */
- .setup_ctrs = &ppro_setup_ctrs,
- .check_ctrs = &ppro_check_ctrs,
- .start = &ppro_start,
- .stop = &ppro_stop,
- .shutdown = &ppro_shutdown
-};
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
deleted file mode 100644
index 276cf79b5d24..000000000000
--- a/arch/x86/oprofile/op_x86_model.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * @file op_x86_model.h
- * interface to x86 model-specific MSR operations
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author Graydon Hoare
- * @author Robert Richter <robert.richter@amd.com>
- */
-
-#ifndef OP_X86_MODEL_H
-#define OP_X86_MODEL_H
-
-#include <asm/types.h>
-#include <asm/perf_event.h>
-
-struct op_msr {
- unsigned long addr;
- u64 saved;
-};
-
-struct op_msrs {
- struct op_msr *counters;
- struct op_msr *controls;
- struct op_msr *multiplex;
-};
-
-struct pt_regs;
-
-struct oprofile_operations;
-
-/* The model vtable abstracts the differences between
- * various x86 CPU models' perfctr support.
- */
-struct op_x86_model_spec {
- unsigned int num_counters;
- unsigned int num_controls;
- unsigned int num_virt_counters;
- u64 reserved;
- u16 event_mask;
- int (*init)(struct oprofile_operations *ops);
- int (*fill_in_addresses)(struct op_msrs * const msrs);
- void (*setup_ctrs)(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs);
- int (*check_ctrs)(struct pt_regs * const regs,
- struct op_msrs const * const msrs);
- void (*start)(struct op_msrs const * const msrs);
- void (*stop)(struct op_msrs const * const msrs);
- void (*shutdown)(struct op_msrs const * const msrs);
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- void (*switch_ctrl)(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs);
-#endif
-};
-
-struct op_counter_config;
-
-static inline void op_x86_warn_in_use(int counter)
-{
- /*
- * The warning indicates an already running counter. If
- * oprofile doesn't collect data, then try using a different
- * performance counter on your platform to monitor the desired
- * event. Delete counter #%d from the desired event by editing
- * the /usr/share/oprofile/%s/<cpu>/events file. If the event
- * cannot be monitored by any other counter, contact your
- * hardware or BIOS vendor.
- */
- pr_warn("oprofile: counter #%d on cpu #%d may already be used\n",
- counter, smp_processor_id());
-}
-
-static inline void op_x86_warn_reserved(int counter)
-{
- pr_warn("oprofile: counter #%d is already reserved\n", counter);
-}
-
-extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
- struct op_counter_config *counter_config);
-extern int op_x86_phys_to_virt(int phys);
-extern int op_x86_virt_to_phys(int virt);
-
-extern struct op_x86_model_spec op_ppro_spec;
-extern struct op_x86_model_spec op_p4_spec;
-extern struct op_x86_model_spec op_p4_ht2_spec;
-extern struct op_x86_model_spec op_amd_spec;
-extern struct op_x86_model_spec op_arch_perfmon_spec;
-
-#endif /* OP_X86_MODEL_H */
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 37ce1489364e..9ad6b7b82707 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -32,7 +32,6 @@ config XTENSA
select HAVE_FUTEX_CMPXCHG if !MMU
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_OPROFILE
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_STACKPROTECTOR
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 67a7d151d1e7..cf0940708702 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -83,7 +83,6 @@ core-y += $(buildvar) $(buildplf)
core-y += arch/xtensa/boot/dts/
libs-y += arch/xtensa/lib/ $(LIBGCC)
-drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
boot := arch/xtensa/boot
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index eeb4c5383c83..3be62da8089b 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -18,7 +18,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_XTENSA_VARIANT_CUSTOM=y
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index 412f611033cc..e9d6b6f6eca1 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -18,7 +18,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_XTENSA_VARIANT_DC233C=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 4f1c7998b030..a47c85638ec1 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -18,7 +18,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_XTENSA_VARIANT_CUSTOM=y
diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig
index f9e85c082afc..4f1ff9531f6a 100644
--- a/arch/xtensa/configs/xip_kc705_defconfig
+++ b/arch/xtensa/configs/xip_kc705_defconfig
@@ -31,7 +31,6 @@ CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB_SOURCE="kc705"
# CONFIG_PARSE_BOOTPARAM is not set
-CONFIG_OPROFILE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
CONFIG_NET=y
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
index c822abb93d20..7f7755cd28f0 100644
--- a/arch/xtensa/kernel/stacktrace.c
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -16,7 +16,7 @@
#include <asm/traps.h>
#include <linux/uaccess.h>
-#if IS_ENABLED(CONFIG_OPROFILE) || IS_ENABLED(CONFIG_PERF_EVENTS)
+#if IS_ENABLED(CONFIG_PERF_EVENTS)
/* Address of common_exception_return, used to check the
* transition from kernel to user space.
diff --git a/arch/xtensa/oprofile/Makefile b/arch/xtensa/oprofile/Makefile
deleted file mode 100644
index f559b9ffbb3f..000000000000
--- a/arch/xtensa/oprofile/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c
deleted file mode 100644
index 8f952034e161..000000000000
--- a/arch/xtensa/oprofile/backtrace.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * @file backtrace.c
- *
- * @remark Copyright 2008 Tensilica Inc.
- * Copyright (C) 2015 Cadence Design Systems Inc.
- * @remark Read the file COPYING
- *
- */
-
-#include <linux/oprofile.h>
-#include <asm/ptrace.h>
-#include <asm/stacktrace.h>
-
-static int xtensa_backtrace_cb(struct stackframe *frame, void *data)
-{
- oprofile_add_trace(frame->pc);
- return 0;
-}
-
-void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- if (user_mode(regs))
- xtensa_backtrace_user(regs, depth, xtensa_backtrace_cb, NULL);
- else
- xtensa_backtrace_kernel(regs, depth, xtensa_backtrace_cb,
- xtensa_backtrace_cb, NULL);
-}
diff --git a/arch/xtensa/oprofile/init.c b/arch/xtensa/oprofile/init.c
deleted file mode 100644
index a67eea379766..000000000000
--- a/arch/xtensa/oprofile/init.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2008 Tensilica Inc.
- * @remark Read the file COPYING
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-
-
-extern void xtensa_backtrace(struct pt_regs *const regs, unsigned int depth);
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- ops->backtrace = xtensa_backtrace;
- return -ENODEV;
-}
-
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
deleted file mode 100644
index cc917865f13a..000000000000
--- a/drivers/oprofile/buffer_sync.c
+++ /dev/null
@@ -1,591 +0,0 @@
-/**
- * @file buffer_sync.c
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- * @author Barry Kasindorf
- * @author Robert Richter <robert.richter@amd.com>
- *
- * This is the core of the buffer management. Each
- * CPU buffer is processed and entered into the
- * global event buffer. Such processing is necessary
- * in several circumstances, mentioned below.
- *
- * The processing does the job of converting the
- * transitory EIP value into a persistent dentry/offset
- * value that the profiler can record at its leisure.
- *
- * See fs/dcookies.c for a description of the dentry/offset
- * objects.
- */
-
-#include <linux/file.h>
-#include <linux/mm.h>
-#include <linux/workqueue.h>
-#include <linux/notifier.h>
-#include <linux/dcookies.h>
-#include <linux/profile.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/task.h>
-#include <linux/gfp.h>
-
-#include "oprofile_stats.h"
-#include "event_buffer.h"
-#include "cpu_buffer.h"
-#include "buffer_sync.h"
-
-static LIST_HEAD(dying_tasks);
-static LIST_HEAD(dead_tasks);
-static cpumask_var_t marked_cpus;
-static DEFINE_SPINLOCK(task_mortuary);
-static void process_task_mortuary(void);
-
-/* Take ownership of the task struct and place it on the
- * list for processing. Only after two full buffer syncs
- * does the task eventually get freed, because by then
- * we are sure we will not reference it again.
- * Can be invoked from softirq via RCU callback due to
- * call_rcu() of the task struct, hence the _irqsave.
- */
-static int
-task_free_notify(struct notifier_block *self, unsigned long val, void *data)
-{
- unsigned long flags;
- struct task_struct *task = data;
- spin_lock_irqsave(&task_mortuary, flags);
- list_add(&task->tasks, &dying_tasks);
- spin_unlock_irqrestore(&task_mortuary, flags);
- return NOTIFY_OK;
-}
-
-
-/* The task is on its way out. A sync of the buffer means we can catch
- * any remaining samples for this task.
- */
-static int
-task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
-{
- /* To avoid latency problems, we only process the current CPU,
- * hoping that most samples for the task are on this CPU
- */
- sync_buffer(raw_smp_processor_id());
- return 0;
-}
-
-
-/* The task is about to try a do_munmap(). We peek at what it's going to
- * do, and if it's an executable region, process the samples first, so
- * we don't lose any. This does not have to be exact, it's a QoI issue
- * only.
- */
-static int
-munmap_notify(struct notifier_block *self, unsigned long val, void *data)
-{
- unsigned long addr = (unsigned long)data;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *mpnt;
-
- mmap_read_lock(mm);
-
- mpnt = find_vma(mm, addr);
- if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
- mmap_read_unlock(mm);
- /* To avoid latency problems, we only process the current CPU,
- * hoping that most samples for the task are on this CPU
- */
- sync_buffer(raw_smp_processor_id());
- return 0;
- }
-
- mmap_read_unlock(mm);
- return 0;
-}
-
-
-/* We need to be told about new modules so we don't attribute to a previously
- * loaded module, or drop the samples on the floor.
- */
-static int
-module_load_notify(struct notifier_block *self, unsigned long val, void *data)
-{
-#ifdef CONFIG_MODULES
- if (val != MODULE_STATE_COMING)
- return NOTIFY_DONE;
-
- /* FIXME: should we process all CPU buffers ? */
- mutex_lock(&buffer_mutex);
- add_event_entry(ESCAPE_CODE);
- add_event_entry(MODULE_LOADED_CODE);
- mutex_unlock(&buffer_mutex);
-#endif
- return NOTIFY_OK;
-}
-
-
-static struct notifier_block task_free_nb = {
- .notifier_call = task_free_notify,
-};
-
-static struct notifier_block task_exit_nb = {
- .notifier_call = task_exit_notify,
-};
-
-static struct notifier_block munmap_nb = {
- .notifier_call = munmap_notify,
-};
-
-static struct notifier_block module_load_nb = {
- .notifier_call = module_load_notify,
-};
-
-static void free_all_tasks(void)
-{
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
-}
-
-int sync_start(void)
-{
- int err;
-
- if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
- return -ENOMEM;
-
- err = task_handoff_register(&task_free_nb);
- if (err)
- goto out1;
- err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
- if (err)
- goto out2;
- err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
- if (err)
- goto out3;
- err = register_module_notifier(&module_load_nb);
- if (err)
- goto out4;
-
- start_cpu_work();
-
-out:
- return err;
-out4:
- profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
-out3:
- profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
-out2:
- task_handoff_unregister(&task_free_nb);
- free_all_tasks();
-out1:
- free_cpumask_var(marked_cpus);
- goto out;
-}
-
-
-void sync_stop(void)
-{
- end_cpu_work();
- unregister_module_notifier(&module_load_nb);
- profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
- profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
- task_handoff_unregister(&task_free_nb);
- barrier(); /* do all of the above first */
-
- flush_cpu_work();
-
- free_all_tasks();
- free_cpumask_var(marked_cpus);
-}
-
-
-/* Optimisation. We can manage without taking the dcookie sem
- * because we cannot reach this code without at least one
- * dcookie user still being registered (namely, the reader
- * of the event buffer). */
-static inline unsigned long fast_get_dcookie(const struct path *path)
-{
- unsigned long cookie;
-
- if (path->dentry->d_flags & DCACHE_COOKIE)
- return (unsigned long)path->dentry;
- get_dcookie(path, &cookie);
- return cookie;
-}
-
-
-/* Look up the dcookie for the task's mm->exe_file,
- * which corresponds loosely to "application name". This is
- * not strictly necessary but allows oprofile to associate
- * shared-library samples with particular applications
- */
-static unsigned long get_exec_dcookie(struct mm_struct *mm)
-{
- unsigned long cookie = NO_COOKIE;
- struct file *exe_file;
-
- if (!mm)
- goto done;
-
- exe_file = get_mm_exe_file(mm);
- if (!exe_file)
- goto done;
-
- cookie = fast_get_dcookie(&exe_file->f_path);
- fput(exe_file);
-done:
- return cookie;
-}
-
-
-/* Convert the EIP value of a sample into a persistent dentry/offset
- * pair that can then be added to the global event buffer. We make
- * sure to do this lookup before a mm->mmap modification happens so
- * we don't lose track.
- *
- * The caller must ensure the mm is not nil (ie: not a kernel thread).
- */
-static unsigned long
-lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
-{
- unsigned long cookie = NO_COOKIE;
- struct vm_area_struct *vma;
-
- mmap_read_lock(mm);
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
-
- if (addr < vma->vm_start || addr >= vma->vm_end)
- continue;
-
- if (vma->vm_file) {
- cookie = fast_get_dcookie(&vma->vm_file->f_path);
- *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
- vma->vm_start;
- } else {
- /* must be an anonymous map */
- *offset = addr;
- }
-
- break;
- }
-
- if (!vma)
- cookie = INVALID_COOKIE;
- mmap_read_unlock(mm);
-
- return cookie;
-}
-
-static unsigned long last_cookie = INVALID_COOKIE;
-
-static void add_cpu_switch(int i)
-{
- add_event_entry(ESCAPE_CODE);
- add_event_entry(CPU_SWITCH_CODE);
- add_event_entry(i);
- last_cookie = INVALID_COOKIE;
-}
-
-static void add_kernel_ctx_switch(unsigned int in_kernel)
-{
- add_event_entry(ESCAPE_CODE);
- if (in_kernel)
- add_event_entry(KERNEL_ENTER_SWITCH_CODE);
- else
- add_event_entry(KERNEL_EXIT_SWITCH_CODE);
-}
-
-static void
-add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
-{
- add_event_entry(ESCAPE_CODE);
- add_event_entry(CTX_SWITCH_CODE);
- add_event_entry(task->pid);
- add_event_entry(cookie);
- /* Another code for daemon back-compat */
- add_event_entry(ESCAPE_CODE);
- add_event_entry(CTX_TGID_CODE);
- add_event_entry(task->tgid);
-}
-
-
-static void add_cookie_switch(unsigned long cookie)
-{
- add_event_entry(ESCAPE_CODE);
- add_event_entry(COOKIE_SWITCH_CODE);
- add_event_entry(cookie);
-}
-
-
-static void add_trace_begin(void)
-{
- add_event_entry(ESCAPE_CODE);
- add_event_entry(TRACE_BEGIN_CODE);
-}
-
-static void add_data(struct op_entry *entry, struct mm_struct *mm)
-{
- unsigned long code, pc, val;
- unsigned long cookie;
- off_t offset;
-
- if (!op_cpu_buffer_get_data(entry, &code))
- return;
- if (!op_cpu_buffer_get_data(entry, &pc))
- return;
- if (!op_cpu_buffer_get_size(entry))
- return;
-
- if (mm) {
- cookie = lookup_dcookie(mm, pc, &offset);
-
- if (cookie == NO_COOKIE)
- offset = pc;
- if (cookie == INVALID_COOKIE) {
- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
- offset = pc;
- }
- if (cookie != last_cookie) {
- add_cookie_switch(cookie);
- last_cookie = cookie;
- }
- } else
- offset = pc;
-
- add_event_entry(ESCAPE_CODE);
- add_event_entry(code);
- add_event_entry(offset); /* Offset from Dcookie */
-
- while (op_cpu_buffer_get_data(entry, &val))
- add_event_entry(val);
-}
-
-static inline void add_sample_entry(unsigned long offset, unsigned long event)
-{
- add_event_entry(offset);
- add_event_entry(event);
-}
-
-
-/*
- * Add a sample to the global event buffer. If possible the
- * sample is converted into a persistent dentry/offset pair
- * for later lookup from userspace. Return 0 on failure.
- */
-static int
-add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
-{
- unsigned long cookie;
- off_t offset;
-
- if (in_kernel) {
- add_sample_entry(s->eip, s->event);
- return 1;
- }
-
- /* add userspace sample */
-
- if (!mm) {
- atomic_inc(&oprofile_stats.sample_lost_no_mm);
- return 0;
- }
-
- cookie = lookup_dcookie(mm, s->eip, &offset);
-
- if (cookie == INVALID_COOKIE) {
- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
- return 0;
- }
-
- if (cookie != last_cookie) {
- add_cookie_switch(cookie);
- last_cookie = cookie;
- }
-
- add_sample_entry(offset, s->event);
-
- return 1;
-}
-
-
-static void release_mm(struct mm_struct *mm)
-{
- if (!mm)
- return;
- mmput(mm);
-}
-
-static inline int is_code(unsigned long val)
-{
- return val == ESCAPE_CODE;
-}
-
-
-/* Move tasks along towards death. Any tasks on dead_tasks
- * will definitely have no remaining references in any
- * CPU buffers at this point, because we use two lists,
- * and to have reached the list, it must have gone through
- * one full sync already.
- */
-static void process_task_mortuary(void)
-{
- unsigned long flags;
- LIST_HEAD(local_dead_tasks);
- struct task_struct *task;
- struct task_struct *ttask;
-
- spin_lock_irqsave(&task_mortuary, flags);
-
- list_splice_init(&dead_tasks, &local_dead_tasks);
- list_splice_init(&dying_tasks, &dead_tasks);
-
- spin_unlock_irqrestore(&task_mortuary, flags);
-
- list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
- list_del(&task->tasks);
- free_task(task);
- }
-}
-
-
-static void mark_done(int cpu)
-{
- int i;
-
- cpumask_set_cpu(cpu, marked_cpus);
-
- for_each_online_cpu(i) {
- if (!cpumask_test_cpu(i, marked_cpus))
- return;
- }
-
- /* All CPUs have been processed at least once,
- * we can process the mortuary once
- */
- process_task_mortuary();
-
- cpumask_clear(marked_cpus);
-}
-
-
-/* FIXME: this is not sufficient if we implement syscall barrier backtrace
- * traversal, the code switch to sb_sample_start at first kernel enter/exit
- * switch so we need a fifth state and some special handling in sync_buffer()
- */
-typedef enum {
- sb_bt_ignore = -2,
- sb_buffer_start,
- sb_bt_start,
- sb_sample_start,
-} sync_buffer_state;
-
-/* Sync one of the CPU's buffers into the global event buffer.
- * Here we need to go through each batch of samples punctuated
- * by context switch notes, taking the task's mmap_lock and doing
- * lookup in task->mm->mmap to convert EIP into dcookie/offset
- * value.
- */
-void sync_buffer(int cpu)
-{
- struct mm_struct *mm = NULL;
- struct mm_struct *oldmm;
- unsigned long val;
- struct task_struct *new;
- unsigned long cookie = 0;
- int in_kernel = 1;
- sync_buffer_state state = sb_buffer_start;
- unsigned int i;
- unsigned long available;
- unsigned long flags;
- struct op_entry entry;
- struct op_sample *sample;
-
- mutex_lock(&buffer_mutex);
-
- add_cpu_switch(cpu);
-
- op_cpu_buffer_reset(cpu);
- available = op_cpu_buffer_entries(cpu);
-
- for (i = 0; i < available; ++i) {
- sample = op_cpu_buffer_read_entry(&entry, cpu);
- if (!sample)
- break;
-
- if (is_code(sample->eip)) {
- flags = sample->event;
- if (flags & TRACE_BEGIN) {
- state = sb_bt_start;
- add_trace_begin();
- }
- if (flags & KERNEL_CTX_SWITCH) {
- /* kernel/userspace switch */
- in_kernel = flags & IS_KERNEL;
- if (state == sb_buffer_start)
- state = sb_sample_start;
- add_kernel_ctx_switch(flags & IS_KERNEL);
- }
- if (flags & USER_CTX_SWITCH
- && op_cpu_buffer_get_data(&entry, &val)) {
- /* userspace context switch */
- new = (struct task_struct *)val;
- oldmm = mm;
- release_mm(oldmm);
- mm = get_task_mm(new);
- if (mm != oldmm)
- cookie = get_exec_dcookie(mm);
- add_user_ctx_switch(new, cookie);
- }
- if (op_cpu_buffer_get_size(&entry))
- add_data(&entry, mm);
- continue;
- }
-
- if (state < sb_bt_start)
- /* ignore sample */
- continue;
-
- if (add_sample(mm, sample, in_kernel))
- continue;
-
- /* ignore backtraces if failed to add a sample */
- if (state == sb_bt_start) {
- state = sb_bt_ignore;
- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
- }
- }
- release_mm(mm);
-
- mark_done(cpu);
-
- mutex_unlock(&buffer_mutex);
-}
-
-/* The function can be used to add a buffer worth of data directly to
- * the kernel buffer. The buffer is assumed to be a circular buffer.
- * Take the entries from index start and end at index end, wrapping
- * at max_entries.
- */
-void oprofile_put_buff(unsigned long *buf, unsigned int start,
- unsigned int stop, unsigned int max)
-{
- int i;
-
- i = start;
-
- mutex_lock(&buffer_mutex);
- while (i != stop) {
- add_event_entry(buf[i++]);
-
- if (i >= max)
- i = 0;
- }
-
- mutex_unlock(&buffer_mutex);
-}
-
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h
deleted file mode 100644
index 3110732c1835..000000000000
--- a/drivers/oprofile/buffer_sync.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * @file buffer_sync.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef OPROFILE_BUFFER_SYNC_H
-#define OPROFILE_BUFFER_SYNC_H
-
-/* add the necessary profiling hooks */
-int sync_start(void);
-
-/* remove the hooks */
-void sync_stop(void);
-
-/* sync the given CPU's buffer */
-void sync_buffer(int cpu);
-
-#endif /* OPROFILE_BUFFER_SYNC_H */
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
deleted file mode 100644
index 9210a95cb4e6..000000000000
--- a/drivers/oprofile/cpu_buffer.c
+++ /dev/null
@@ -1,465 +0,0 @@
-/**
- * @file cpu_buffer.c
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- * @author Barry Kasindorf <barry.kasindorf@amd.com>
- * @author Robert Richter <robert.richter@amd.com>
- *
- * Each CPU has a local buffer that stores PC value/event
- * pairs. We also log context switches when we notice them.
- * Eventually each CPU's buffer is processed into the global
- * event buffer by sync_buffer().
- *
- * We use a local buffer for two reasons: an NMI or similar
- * interrupt cannot synchronise, and high sampling rates
- * would lead to catastrophic global synchronisation if
- * a global buffer was used.
- */
-
-#include <linux/sched.h>
-#include <linux/oprofile.h>
-#include <linux/errno.h>
-
-#include <asm/ptrace.h>
-
-#include "event_buffer.h"
-#include "cpu_buffer.h"
-#include "buffer_sync.h"
-#include "oprof.h"
-
-#define OP_BUFFER_FLAGS 0
-
-static struct trace_buffer *op_ring_buffer;
-DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
-
-static void wq_sync_buffer(struct work_struct *work);
-
-#define DEFAULT_TIMER_EXPIRE (HZ / 10)
-static int work_enabled;
-
-unsigned long oprofile_get_cpu_buffer_size(void)
-{
- return oprofile_cpu_buffer_size;
-}
-
-void oprofile_cpu_buffer_inc_smpl_lost(void)
-{
- struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
-
- cpu_buf->sample_lost_overflow++;
-}
-
-void free_cpu_buffers(void)
-{
- if (op_ring_buffer)
- ring_buffer_free(op_ring_buffer);
- op_ring_buffer = NULL;
-}
-
-#define RB_EVENT_HDR_SIZE 4
-
-int alloc_cpu_buffers(void)
-{
- int i;
-
- unsigned long buffer_size = oprofile_cpu_buffer_size;
- unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
- RB_EVENT_HDR_SIZE);
-
- op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
- if (!op_ring_buffer)
- goto fail;
-
- for_each_possible_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
-
- b->last_task = NULL;
- b->last_is_kernel = -1;
- b->tracing = 0;
- b->buffer_size = buffer_size;
- b->sample_received = 0;
- b->sample_lost_overflow = 0;
- b->backtrace_aborted = 0;
- b->sample_invalid_eip = 0;
- b->cpu = i;
- INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
- }
- return 0;
-
-fail:
- free_cpu_buffers();
- return -ENOMEM;
-}
-
-void start_cpu_work(void)
-{
- int i;
-
- work_enabled = 1;
-
- for_each_online_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
-
- /*
- * Spread the work by 1 jiffy per cpu so they dont all
- * fire at once.
- */
- schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
- }
-}
-
-void end_cpu_work(void)
-{
- work_enabled = 0;
-}
-
-void flush_cpu_work(void)
-{
- int i;
-
- for_each_online_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
-
- /* these works are per-cpu, no need for flush_sync */
- flush_delayed_work(&b->work);
- }
-}
-
-/*
- * This function prepares the cpu buffer to write a sample.
- *
- * Struct op_entry is used during operations on the ring buffer while
- * struct op_sample contains the data that is stored in the ring
- * buffer. Struct entry can be uninitialized. The function reserves a
- * data array that is specified by size. Use
- * op_cpu_buffer_write_commit() after preparing the sample. In case of
- * errors a null pointer is returned, otherwise the pointer to the
- * sample.
- *
- */
-struct op_sample
-*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
-{
- entry->event = ring_buffer_lock_reserve
- (op_ring_buffer, sizeof(struct op_sample) +
- size * sizeof(entry->sample->data[0]));
- if (!entry->event)
- return NULL;
- entry->sample = ring_buffer_event_data(entry->event);
- entry->size = size;
- entry->data = entry->sample->data;
-
- return entry->sample;
-}
-
-int op_cpu_buffer_write_commit(struct op_entry *entry)
-{
- return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
-}
-
-struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
-{
- struct ring_buffer_event *e;
- e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
- if (!e)
- return NULL;
-
- entry->event = e;
- entry->sample = ring_buffer_event_data(e);
- entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
- / sizeof(entry->sample->data[0]);
- entry->data = entry->sample->data;
- return entry->sample;
-}
-
-unsigned long op_cpu_buffer_entries(int cpu)
-{
- return ring_buffer_entries_cpu(op_ring_buffer, cpu);
-}
-
-static int
-op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
- int is_kernel, struct task_struct *task)
-{
- struct op_entry entry;
- struct op_sample *sample;
- unsigned long flags;
- int size;
-
- flags = 0;
-
- if (backtrace)
- flags |= TRACE_BEGIN;
-
- /* notice a switch from user->kernel or vice versa */
- is_kernel = !!is_kernel;
- if (cpu_buf->last_is_kernel != is_kernel) {
- cpu_buf->last_is_kernel = is_kernel;
- flags |= KERNEL_CTX_SWITCH;
- if (is_kernel)
- flags |= IS_KERNEL;
- }
-
- /* notice a task switch */
- if (cpu_buf->last_task != task) {
- cpu_buf->last_task = task;
- flags |= USER_CTX_SWITCH;
- }
-
- if (!flags)
- /* nothing to do */
- return 0;
-
- if (flags & USER_CTX_SWITCH)
- size = 1;
- else
- size = 0;
-
- sample = op_cpu_buffer_write_reserve(&entry, size);
- if (!sample)
- return -ENOMEM;
-
- sample->eip = ESCAPE_CODE;
- sample->event = flags;
-
- if (size)
- op_cpu_buffer_add_data(&entry, (unsigned long)task);
-
- op_cpu_buffer_write_commit(&entry);
-
- return 0;
-}
-
-static inline int
-op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
- unsigned long pc, unsigned long event)
-{
- struct op_entry entry;
- struct op_sample *sample;
-
- sample = op_cpu_buffer_write_reserve(&entry, 0);
- if (!sample)
- return -ENOMEM;
-
- sample->eip = pc;
- sample->event = event;
-
- return op_cpu_buffer_write_commit(&entry);
-}
-
-/*
- * This must be safe from any context.
- *
- * is_kernel is needed because on some architectures you cannot
- * tell if you are in kernel or user space simply by looking at
- * pc. We tag this in the buffer by generating kernel enter/exit
- * events whenever is_kernel changes
- */
-static int
-log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
- unsigned long backtrace, int is_kernel, unsigned long event,
- struct task_struct *task)
-{
- struct task_struct *tsk = task ? task : current;
- cpu_buf->sample_received++;
-
- if (pc == ESCAPE_CODE) {
- cpu_buf->sample_invalid_eip++;
- return 0;
- }
-
- if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
- goto fail;
-
- if (op_add_sample(cpu_buf, pc, event))
- goto fail;
-
- return 1;
-
-fail:
- cpu_buf->sample_lost_overflow++;
- return 0;
-}
-
-static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
-{
- cpu_buf->tracing = 1;
-}
-
-static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
-{
- cpu_buf->tracing = 0;
-}
-
-static inline void
-__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel,
- struct task_struct *task)
-{
- struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
- unsigned long backtrace = oprofile_backtrace_depth;
-
- /*
- * if log_sample() fail we can't backtrace since we lost the
- * source of this event
- */
- if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task))
- /* failed */
- return;
-
- if (!backtrace)
- return;
-
- oprofile_begin_trace(cpu_buf);
- oprofile_ops.backtrace(regs, backtrace);
- oprofile_end_trace(cpu_buf);
-}
-
-void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel,
- struct task_struct *task)
-{
- __oprofile_add_ext_sample(pc, regs, event, is_kernel, task);
-}
-
-void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel)
-{
- __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
-}
-
-void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
-{
- int is_kernel;
- unsigned long pc;
-
- if (likely(regs)) {
- is_kernel = !user_mode(regs);
- pc = profile_pc(regs);
- } else {
- is_kernel = 0; /* This value will not be used */
- pc = ESCAPE_CODE; /* as this causes an early return. */
- }
-
- __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
-}
-
-/*
- * Add samples with data to the ring buffer.
- *
- * Use oprofile_add_data(&entry, val) to add data and
- * oprofile_write_commit(&entry) to commit the sample.
- */
-void
-oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
- unsigned long pc, int code, int size)
-{
- struct op_sample *sample;
- int is_kernel = !user_mode(regs);
- struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
-
- cpu_buf->sample_received++;
-
- /* no backtraces for samples with data */
- if (op_add_code(cpu_buf, 0, is_kernel, current))
- goto fail;
-
- sample = op_cpu_buffer_write_reserve(entry, size + 2);
- if (!sample)
- goto fail;
- sample->eip = ESCAPE_CODE;
- sample->event = 0; /* no flags */
-
- op_cpu_buffer_add_data(entry, code);
- op_cpu_buffer_add_data(entry, pc);
-
- return;
-
-fail:
- entry->event = NULL;
- cpu_buf->sample_lost_overflow++;
-}
-
-int oprofile_add_data(struct op_entry *entry, unsigned long val)
-{
- if (!entry->event)
- return 0;
- return op_cpu_buffer_add_data(entry, val);
-}
-
-int oprofile_add_data64(struct op_entry *entry, u64 val)
-{
- if (!entry->event)
- return 0;
- if (op_cpu_buffer_get_size(entry) < 2)
- /*
- * the function returns 0 to indicate a too small
- * buffer, even if there is some space left
- */
- return 0;
- if (!op_cpu_buffer_add_data(entry, (u32)val))
- return 0;
- return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
-}
-
-int oprofile_write_commit(struct op_entry *entry)
-{
- if (!entry->event)
- return -EINVAL;
- return op_cpu_buffer_write_commit(entry);
-}
-
-void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
-{
- struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
- log_sample(cpu_buf, pc, 0, is_kernel, event, NULL);
-}
-
-void oprofile_add_trace(unsigned long pc)
-{
- struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
-
- if (!cpu_buf->tracing)
- return;
-
- /*
- * broken frame can give an eip with the same value as an
- * escape code, abort the trace if we get it
- */
- if (pc == ESCAPE_CODE)
- goto fail;
-
- if (op_add_sample(cpu_buf, pc, 0))
- goto fail;
-
- return;
-fail:
- cpu_buf->tracing = 0;
- cpu_buf->backtrace_aborted++;
- return;
-}
-
-/*
- * This serves to avoid cpu buffer overflow, and makes sure
- * the task mortuary progresses
- *
- * By using schedule_delayed_work_on and then schedule_delayed_work
- * we guarantee this will stay on the correct cpu
- */
-static void wq_sync_buffer(struct work_struct *work)
-{
- struct oprofile_cpu_buffer *b =
- container_of(work, struct oprofile_cpu_buffer, work.work);
- if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) {
- cancel_delayed_work(&b->work);
- return;
- }
- sync_buffer(b->cpu);
-
- /* don't re-add the work if we're shutting down */
- if (work_enabled)
- schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
-}
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
deleted file mode 100644
index 31478c0cff87..000000000000
--- a/drivers/oprofile/cpu_buffer.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * @file cpu_buffer.h
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- * @author Robert Richter <robert.richter@amd.com>
- */
-
-#ifndef OPROFILE_CPU_BUFFER_H
-#define OPROFILE_CPU_BUFFER_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/cache.h>
-#include <linux/sched.h>
-#include <linux/ring_buffer.h>
-
-struct task_struct;
-
-int alloc_cpu_buffers(void);
-void free_cpu_buffers(void);
-
-void start_cpu_work(void);
-void end_cpu_work(void);
-void flush_cpu_work(void);
-
-/* CPU buffer is composed of such entries (which are
- * also used for context switch notes)
- */
-struct op_sample {
- unsigned long eip;
- unsigned long event;
- unsigned long data[];
-};
-
-struct op_entry;
-
-struct oprofile_cpu_buffer {
- unsigned long buffer_size;
- struct task_struct *last_task;
- int last_is_kernel;
- int tracing;
- unsigned long sample_received;
- unsigned long sample_lost_overflow;
- unsigned long backtrace_aborted;
- unsigned long sample_invalid_eip;
- int cpu;
- struct delayed_work work;
-};
-
-DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
-
-/*
- * Resets the cpu buffer to a sane state.
- *
- * reset these to invalid values; the next sample collected will
- * populate the buffer with proper values to initialize the buffer
- */
-static inline void op_cpu_buffer_reset(int cpu)
-{
- struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
-
- cpu_buf->last_is_kernel = -1;
- cpu_buf->last_task = NULL;
-}
-
-/*
- * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
- * called only if op_cpu_buffer_write_reserve() did not return NULL or
- * entry->event != NULL, otherwise entry->size or entry->event will be
- * used uninitialized.
- */
-
-struct op_sample
-*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
-int op_cpu_buffer_write_commit(struct op_entry *entry);
-struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
-unsigned long op_cpu_buffer_entries(int cpu);
-
-/* returns the remaining free size of data in the entry */
-static inline
-int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
-{
- if (!entry->size)
- return 0;
- *entry->data = val;
- entry->size--;
- entry->data++;
- return entry->size;
-}
-
-/* returns the size of data in the entry */
-static inline
-int op_cpu_buffer_get_size(struct op_entry *entry)
-{
- return entry->size;
-}
-
-/* returns 0 if empty or the size of data including the current value */
-static inline
-int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
-{
- int size = entry->size;
- if (!size)
- return 0;
- *val = *entry->data;
- entry->size--;
- entry->data++;
- return size;
-}
-
-/* extra data flags */
-#define KERNEL_CTX_SWITCH (1UL << 0)
-#define IS_KERNEL (1UL << 1)
-#define TRACE_BEGIN (1UL << 2)
-#define USER_CTX_SWITCH (1UL << 3)
-
-#endif /* OPROFILE_CPU_BUFFER_H */
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
deleted file mode 100644
index 6c9edc8bbc95..000000000000
--- a/drivers/oprofile/event_buffer.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * @file event_buffer.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- *
- * This is the global event buffer that the user-space
- * daemon reads from. The event buffer is an untyped array
- * of unsigned longs. Entries are prefixed by the
- * escape value ESCAPE_CODE followed by an identifying code.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/oprofile.h>
-#include <linux/sched/signal.h>
-#include <linux/capability.h>
-#include <linux/dcookies.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "oprof.h"
-#include "event_buffer.h"
-#include "oprofile_stats.h"
-
-DEFINE_MUTEX(buffer_mutex);
-
-static unsigned long buffer_opened;
-static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
-static unsigned long *event_buffer;
-static unsigned long buffer_size;
-static unsigned long buffer_watershed;
-static size_t buffer_pos;
-/* atomic_t because wait_event checks it outside of buffer_mutex */
-static atomic_t buffer_ready = ATOMIC_INIT(0);
-
-/*
- * Add an entry to the event buffer. When we get near to the end we
- * wake up the process sleeping on the read() of the file. To protect
- * the event_buffer this function may only be called when buffer_mutex
- * is set.
- */
-void add_event_entry(unsigned long value)
-{
- /*
- * This shouldn't happen since all workqueues or handlers are
- * canceled or flushed before the event buffer is freed.
- */
- if (!event_buffer) {
- WARN_ON_ONCE(1);
- return;
- }
-
- if (buffer_pos == buffer_size) {
- atomic_inc(&oprofile_stats.event_lost_overflow);
- return;
- }
-
- event_buffer[buffer_pos] = value;
- if (++buffer_pos == buffer_size - buffer_watershed) {
- atomic_set(&buffer_ready, 1);
- wake_up(&buffer_wait);
- }
-}
-
-
-/* Wake up the waiting process if any. This happens
- * on "echo 0 >/dev/oprofile/enable" so the daemon
- * processes the data remaining in the event buffer.
- */
-void wake_up_buffer_waiter(void)
-{
- mutex_lock(&buffer_mutex);
- atomic_set(&buffer_ready, 1);
- wake_up(&buffer_wait);
- mutex_unlock(&buffer_mutex);
-}
-
-
-int alloc_event_buffer(void)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&oprofilefs_lock, flags);
- buffer_size = oprofile_buffer_size;
- buffer_watershed = oprofile_buffer_watershed;
- raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
-
- if (buffer_watershed >= buffer_size)
- return -EINVAL;
-
- buffer_pos = 0;
- event_buffer = vmalloc(array_size(buffer_size, sizeof(unsigned long)));
- if (!event_buffer)
- return -ENOMEM;
-
- return 0;
-}
-
-
-void free_event_buffer(void)
-{
- mutex_lock(&buffer_mutex);
- vfree(event_buffer);
- buffer_pos = 0;
- event_buffer = NULL;
- mutex_unlock(&buffer_mutex);
-}
-
-
-static int event_buffer_open(struct inode *inode, struct file *file)
-{
- int err = -EPERM;
-
- if (!perfmon_capable())
- return -EPERM;
-
- if (test_and_set_bit_lock(0, &buffer_opened))
- return -EBUSY;
-
- /* Register as a user of dcookies
- * to ensure they persist for the lifetime of
- * the open event file
- */
- err = -EINVAL;
- file->private_data = dcookie_register();
- if (!file->private_data)
- goto out;
-
- if ((err = oprofile_setup()))
- goto fail;
-
- /* NB: the actual start happens from userspace
- * echo 1 >/dev/oprofile/enable
- */
-
- return nonseekable_open(inode, file);
-
-fail:
- dcookie_unregister(file->private_data);
-out:
- __clear_bit_unlock(0, &buffer_opened);
- return err;
-}
-
-
-static int event_buffer_release(struct inode *inode, struct file *file)
-{
- oprofile_stop();
- oprofile_shutdown();
- dcookie_unregister(file->private_data);
- buffer_pos = 0;
- atomic_set(&buffer_ready, 0);
- __clear_bit_unlock(0, &buffer_opened);
- return 0;
-}
-
-
-static ssize_t event_buffer_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- int retval = -EINVAL;
- size_t const max = buffer_size * sizeof(unsigned long);
-
- /* handling partial reads is more trouble than it's worth */
- if (count != max || *offset)
- return -EINVAL;
-
- wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
-
- if (signal_pending(current))
- return -EINTR;
-
- /* can't currently happen */
- if (!atomic_read(&buffer_ready))
- return -EAGAIN;
-
- mutex_lock(&buffer_mutex);
-
- /* May happen if the buffer is freed during pending reads. */
- if (!event_buffer) {
- retval = -EINTR;
- goto out;
- }
-
- atomic_set(&buffer_ready, 0);
-
- retval = -EFAULT;
-
- count = buffer_pos * sizeof(unsigned long);
-
- if (copy_to_user(buf, event_buffer, count))
- goto out;
-
- retval = count;
- buffer_pos = 0;
-
-out:
- mutex_unlock(&buffer_mutex);
- return retval;
-}
-
-const struct file_operations event_buffer_fops = {
- .open = event_buffer_open,
- .release = event_buffer_release,
- .read = event_buffer_read,
- .llseek = no_llseek,
-};
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
deleted file mode 100644
index a8d5bb3cba89..000000000000
--- a/drivers/oprofile/event_buffer.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * @file event_buffer.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef EVENT_BUFFER_H
-#define EVENT_BUFFER_H
-
-#include <linux/types.h>
-#include <linux/mutex.h>
-
-int alloc_event_buffer(void);
-
-void free_event_buffer(void);
-
-/**
- * Add data to the event buffer.
- * The data passed is free-form, but typically consists of
- * file offsets, dcookies, context information, and ESCAPE codes.
- */
-void add_event_entry(unsigned long data);
-
-/* wake up the process sleeping on the event file */
-void wake_up_buffer_waiter(void);
-
-#define INVALID_COOKIE ~0UL
-#define NO_COOKIE 0UL
-
-extern const struct file_operations event_buffer_fops;
-
-/* mutex between sync_cpu_buffers() and the
- * file reading code.
- */
-extern struct mutex buffer_mutex;
-
-#endif /* EVENT_BUFFER_H */
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
deleted file mode 100644
index f343bd96609a..000000000000
--- a/drivers/oprofile/nmi_timer_int.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/**
- * @file nmi_timer_int.c
- *
- * @remark Copyright 2011 Advanced Micro Devices, Inc.
- *
- * @author Robert Richter <robert.richter@amd.com>
- */
-
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/oprofile.h>
-#include <linux/perf_event.h>
-
-#ifdef CONFIG_OPROFILE_NMI_TIMER
-
-static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
-static int ctr_running;
-
-static struct perf_event_attr nmi_timer_attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
- .size = sizeof(struct perf_event_attr),
- .pinned = 1,
- .disabled = 1,
-};
-
-static void nmi_timer_callback(struct perf_event *event,
- struct perf_sample_data *data,
- struct pt_regs *regs)
-{
- event->hw.interrupts = 0; /* don't throttle interrupts */
- oprofile_add_sample(regs, 0);
-}
-
-static int nmi_timer_start_cpu(int cpu)
-{
- struct perf_event *event = per_cpu(nmi_timer_events, cpu);
-
- if (!event) {
- event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
- nmi_timer_callback, NULL);
- if (IS_ERR(event))
- return PTR_ERR(event);
- per_cpu(nmi_timer_events, cpu) = event;
- }
-
- if (event && ctr_running)
- perf_event_enable(event);
-
- return 0;
-}
-
-static void nmi_timer_stop_cpu(int cpu)
-{
- struct perf_event *event = per_cpu(nmi_timer_events, cpu);
-
- if (event && ctr_running)
- perf_event_disable(event);
-}
-
-static int nmi_timer_cpu_online(unsigned int cpu)
-{
- nmi_timer_start_cpu(cpu);
- return 0;
-}
-static int nmi_timer_cpu_predown(unsigned int cpu)
-{
- nmi_timer_stop_cpu(cpu);
- return 0;
-}
-
-static int nmi_timer_start(void)
-{
- int cpu;
-
- get_online_cpus();
- ctr_running = 1;
- for_each_online_cpu(cpu)
- nmi_timer_start_cpu(cpu);
- put_online_cpus();
-
- return 0;
-}
-
-static void nmi_timer_stop(void)
-{
- int cpu;
-
- get_online_cpus();
- for_each_online_cpu(cpu)
- nmi_timer_stop_cpu(cpu);
- ctr_running = 0;
- put_online_cpus();
-}
-
-static enum cpuhp_state hp_online;
-
-static void nmi_timer_shutdown(void)
-{
- struct perf_event *event;
- int cpu;
-
- cpuhp_remove_state(hp_online);
- for_each_possible_cpu(cpu) {
- event = per_cpu(nmi_timer_events, cpu);
- if (!event)
- continue;
- perf_event_disable(event);
- per_cpu(nmi_timer_events, cpu) = NULL;
- perf_event_release_kernel(event);
- }
-}
-
-static int nmi_timer_setup(void)
-{
- int err;
- u64 period;
-
- /* clock cycles per tick: */
- period = (u64)cpu_khz * 1000;
- do_div(period, HZ);
- nmi_timer_attr.sample_period = period;
-
- err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "oprofile/nmi:online",
- nmi_timer_cpu_online, nmi_timer_cpu_predown);
- if (err < 0) {
- nmi_timer_shutdown();
- return err;
- }
- hp_online = err;
- return 0;
-}
-
-int __init op_nmi_timer_init(struct oprofile_operations *ops)
-{
- int err = 0;
-
- err = nmi_timer_setup();
- if (err)
- return err;
- nmi_timer_shutdown(); /* only check, don't alloc */
-
- ops->create_files = NULL;
- ops->setup = nmi_timer_setup;
- ops->shutdown = nmi_timer_shutdown;
- ops->start = nmi_timer_start;
- ops->stop = nmi_timer_stop;
- ops->cpu_type = "timer";
-
- printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
-
- return 0;
-}
-
-#endif
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
deleted file mode 100644
index ed2c3ec07024..000000000000
--- a/drivers/oprofile/oprof.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/**
- * @file oprof.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/oprofile.h>
-#include <linux/moduleparam.h>
-#include <linux/workqueue.h>
-#include <linux/time.h>
-#include <linux/mutex.h>
-
-#include "oprof.h"
-#include "event_buffer.h"
-#include "cpu_buffer.h"
-#include "buffer_sync.h"
-#include "oprofile_stats.h"
-
-struct oprofile_operations oprofile_ops;
-
-unsigned long oprofile_started;
-unsigned long oprofile_backtrace_depth;
-static unsigned long is_setup;
-static DEFINE_MUTEX(start_mutex);
-
-/* timer
- 0 - use performance monitoring hardware if available
- 1 - use the timer int mechanism regardless
- */
-static int timer = 0;
-
-int oprofile_setup(void)
-{
- int err;
-
- mutex_lock(&start_mutex);
-
- if ((err = alloc_cpu_buffers()))
- goto out;
-
- if ((err = alloc_event_buffer()))
- goto out1;
-
- if (oprofile_ops.setup && (err = oprofile_ops.setup()))
- goto out2;
-
- /* Note even though this starts part of the
- * profiling overhead, it's necessary to prevent
- * us missing task deaths and eventually oopsing
- * when trying to process the event buffer.
- */
- if (oprofile_ops.sync_start) {
- int sync_ret = oprofile_ops.sync_start();
- switch (sync_ret) {
- case 0:
- goto post_sync;
- case 1:
- goto do_generic;
- case -1:
- goto out3;
- default:
- goto out3;
- }
- }
-do_generic:
- if ((err = sync_start()))
- goto out3;
-
-post_sync:
- is_setup = 1;
- mutex_unlock(&start_mutex);
- return 0;
-
-out3:
- if (oprofile_ops.shutdown)
- oprofile_ops.shutdown();
-out2:
- free_event_buffer();
-out1:
- free_cpu_buffers();
-out:
- mutex_unlock(&start_mutex);
- return err;
-}
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static void switch_worker(struct work_struct *work);
-static DECLARE_DELAYED_WORK(switch_work, switch_worker);
-
-static void start_switch_worker(void)
-{
- if (oprofile_ops.switch_events)
- schedule_delayed_work(&switch_work, oprofile_time_slice);
-}
-
-static void stop_switch_worker(void)
-{
- cancel_delayed_work_sync(&switch_work);
-}
-
-static void switch_worker(struct work_struct *work)
-{
- if (oprofile_ops.switch_events())
- return;
-
- atomic_inc(&oprofile_stats.multiplex_counter);
- start_switch_worker();
-}
-
-/* User inputs in ms, converts to jiffies */
-int oprofile_set_timeout(unsigned long val_msec)
-{
- int err = 0;
- unsigned long time_slice;
-
- mutex_lock(&start_mutex);
-
- if (oprofile_started) {
- err = -EBUSY;
- goto out;
- }
-
- if (!oprofile_ops.switch_events) {
- err = -EINVAL;
- goto out;
- }
-
- time_slice = msecs_to_jiffies(val_msec);
- if (time_slice == MAX_JIFFY_OFFSET) {
- err = -EINVAL;
- goto out;
- }
-
- oprofile_time_slice = time_slice;
-
-out:
- mutex_unlock(&start_mutex);
- return err;
-
-}
-
-#else
-
-static inline void start_switch_worker(void) { }
-static inline void stop_switch_worker(void) { }
-
-#endif
-
-/* Actually start profiling (echo 1>/dev/oprofile/enable) */
-int oprofile_start(void)
-{
- int err = -EINVAL;
-
- mutex_lock(&start_mutex);
-
- if (!is_setup)
- goto out;
-
- err = 0;
-
- if (oprofile_started)
- goto out;
-
- oprofile_reset_stats();
-
- if ((err = oprofile_ops.start()))
- goto out;
-
- start_switch_worker();
-
- oprofile_started = 1;
-out:
- mutex_unlock(&start_mutex);
- return err;
-}
-
-
-/* echo 0>/dev/oprofile/enable */
-void oprofile_stop(void)
-{
- mutex_lock(&start_mutex);
- if (!oprofile_started)
- goto out;
- oprofile_ops.stop();
- oprofile_started = 0;
-
- stop_switch_worker();
-
- /* wake up the daemon to read what remains */
- wake_up_buffer_waiter();
-out:
- mutex_unlock(&start_mutex);
-}
-
-
-void oprofile_shutdown(void)
-{
- mutex_lock(&start_mutex);
- if (oprofile_ops.sync_stop) {
- int sync_ret = oprofile_ops.sync_stop();
- switch (sync_ret) {
- case 0:
- goto post_sync;
- case 1:
- goto do_generic;
- default:
- goto post_sync;
- }
- }
-do_generic:
- sync_stop();
-post_sync:
- if (oprofile_ops.shutdown)
- oprofile_ops.shutdown();
- is_setup = 0;
- free_event_buffer();
- free_cpu_buffers();
- mutex_unlock(&start_mutex);
-}
-
-int oprofile_set_ulong(unsigned long *addr, unsigned long val)
-{
- int err = -EBUSY;
-
- mutex_lock(&start_mutex);
- if (!oprofile_started) {
- *addr = val;
- err = 0;
- }
- mutex_unlock(&start_mutex);
-
- return err;
-}
-
-static int timer_mode;
-
-static int __init oprofile_init(void)
-{
- int err;
-
- /* always init architecture to setup backtrace support */
- timer_mode = 0;
- err = oprofile_arch_init(&oprofile_ops);
- if (!err) {
- if (!timer && !oprofilefs_register())
- return 0;
- oprofile_arch_exit();
- }
-
- /* setup timer mode: */
- timer_mode = 1;
- /* no nmi timer mode if oprofile.timer is set */
- if (timer || op_nmi_timer_init(&oprofile_ops)) {
- err = oprofile_timer_init(&oprofile_ops);
- if (err)
- return err;
- }
-
- return oprofilefs_register();
-}
-
-
-static void __exit oprofile_exit(void)
-{
- oprofilefs_unregister();
- if (!timer_mode)
- oprofile_arch_exit();
-}
-
-
-module_init(oprofile_init);
-module_exit(oprofile_exit);
-
-module_param_named(timer, timer, int, 0644);
-MODULE_PARM_DESC(timer, "force use of timer interrupt");
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Levon <levon@movementarian.org>");
-MODULE_DESCRIPTION("OProfile system profiler");
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
deleted file mode 100644
index d5412060ab0f..000000000000
--- a/drivers/oprofile/oprof.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * @file oprof.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef OPROF_H
-#define OPROF_H
-
-int oprofile_setup(void);
-void oprofile_shutdown(void);
-
-int oprofilefs_register(void);
-void oprofilefs_unregister(void);
-
-int oprofile_start(void);
-void oprofile_stop(void);
-
-struct oprofile_operations;
-
-extern unsigned long oprofile_buffer_size;
-extern unsigned long oprofile_cpu_buffer_size;
-extern unsigned long oprofile_buffer_watershed;
-extern unsigned long oprofile_time_slice;
-
-extern struct oprofile_operations oprofile_ops;
-extern unsigned long oprofile_started;
-extern unsigned long oprofile_backtrace_depth;
-
-struct dentry;
-
-void oprofile_create_files(struct dentry *root);
-int oprofile_timer_init(struct oprofile_operations *ops);
-#ifdef CONFIG_OPROFILE_NMI_TIMER
-int op_nmi_timer_init(struct oprofile_operations *ops);
-#else
-static inline int op_nmi_timer_init(struct oprofile_operations *ops)
-{
- return -ENODEV;
-}
-#endif
-
-
-int oprofile_set_ulong(unsigned long *addr, unsigned long val);
-int oprofile_set_timeout(unsigned long time);
-
-#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
deleted file mode 100644
index ee2cfce358b9..000000000000
--- a/drivers/oprofile/oprofile_files.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/**
- * @file oprofile_files.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/fs.h>
-#include <linux/oprofile.h>
-#include <linux/jiffies.h>
-
-#include "event_buffer.h"
-#include "oprofile_stats.h"
-#include "oprof.h"
-
-#define BUFFER_SIZE_DEFAULT 131072
-#define CPU_BUFFER_SIZE_DEFAULT 8192
-#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
-#define TIME_SLICE_DEFAULT 1
-
-unsigned long oprofile_buffer_size;
-unsigned long oprofile_cpu_buffer_size;
-unsigned long oprofile_buffer_watershed;
-unsigned long oprofile_time_slice;
-
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static ssize_t timeout_read(struct file *file, char __user *buf,
- size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
- buf, count, offset);
-}
-
-
-static ssize_t timeout_write(struct file *file, char const __user *buf,
- size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
-
- retval = oprofile_set_timeout(val);
-
- if (retval)
- return retval;
- return count;
-}
-
-
-static const struct file_operations timeout_fops = {
- .read = timeout_read,
- .write = timeout_write,
- .llseek = default_llseek,
-};
-
-#endif
-
-
-static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
- offset);
-}
-
-
-static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- if (!oprofile_ops.backtrace)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
-
- retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
- if (retval)
- return retval;
-
- return count;
-}
-
-
-static const struct file_operations depth_fops = {
- .read = depth_read,
- .write = depth_write,
- .llseek = default_llseek,
-};
-
-
-static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
-}
-
-
-static const struct file_operations pointer_size_fops = {
- .read = pointer_size_read,
- .llseek = default_llseek,
-};
-
-
-static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
-}
-
-
-static const struct file_operations cpu_type_fops = {
- .read = cpu_type_read,
- .llseek = default_llseek,
-};
-
-
-static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
-}
-
-
-static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
-{
- unsigned long val;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval <= 0)
- return retval;
-
- retval = 0;
- if (val)
- retval = oprofile_start();
- else
- oprofile_stop();
-
- if (retval)
- return retval;
- return count;
-}
-
-
-static const struct file_operations enable_fops = {
- .read = enable_read,
- .write = enable_write,
- .llseek = default_llseek,
-};
-
-
-static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
-{
- wake_up_buffer_waiter();
- return count;
-}
-
-
-static const struct file_operations dump_fops = {
- .write = dump_write,
- .llseek = noop_llseek,
-};
-
-void oprofile_create_files(struct dentry *root)
-{
- /* reinitialize default values */
- oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
- oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
- oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
- oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT);
-
- oprofilefs_create_file(root, "enable", &enable_fops);
- oprofilefs_create_file_perm(root, "dump", &dump_fops, 0666);
- oprofilefs_create_file(root, "buffer", &event_buffer_fops);
- oprofilefs_create_ulong(root, "buffer_size", &oprofile_buffer_size);
- oprofilefs_create_ulong(root, "buffer_watershed", &oprofile_buffer_watershed);
- oprofilefs_create_ulong(root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
- oprofilefs_create_file(root, "cpu_type", &cpu_type_fops);
- oprofilefs_create_file(root, "backtrace_depth", &depth_fops);
- oprofilefs_create_file(root, "pointer_size", &pointer_size_fops);
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- oprofilefs_create_file(root, "time_slice", &timeout_fops);
-#endif
- oprofile_create_stats_files(root);
- if (oprofile_ops.create_files)
- oprofile_ops.create_files(root);
-}
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
deleted file mode 100644
index 98a63a5f8763..000000000000
--- a/drivers/oprofile/oprofile_perf.c
+++ /dev/null
@@ -1,328 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2010 ARM Ltd.
- * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
- *
- * Perf-events backend for OProfile.
- */
-#include <linux/perf_event.h>
-#include <linux/platform_device.h>
-#include <linux/oprofile.h>
-#include <linux/slab.h>
-
-/*
- * Per performance monitor configuration as set via oprofilefs.
- */
-struct op_counter_config {
- unsigned long count;
- unsigned long enabled;
- unsigned long event;
- unsigned long unit_mask;
- unsigned long kernel;
- unsigned long user;
- struct perf_event_attr attr;
-};
-
-static int oprofile_perf_enabled;
-static DEFINE_MUTEX(oprofile_perf_mutex);
-
-static struct op_counter_config *counter_config;
-static DEFINE_PER_CPU(struct perf_event **, perf_events);
-static int num_counters;
-
-/*
- * Overflow callback for oprofile.
- */
-static void op_overflow_handler(struct perf_event *event,
- struct perf_sample_data *data, struct pt_regs *regs)
-{
- int id;
- u32 cpu = smp_processor_id();
-
- for (id = 0; id < num_counters; ++id)
- if (per_cpu(perf_events, cpu)[id] == event)
- break;
-
- if (id != num_counters)
- oprofile_add_sample(regs, id);
- else
- pr_warn("oprofile: ignoring spurious overflow on cpu %u\n",
- cpu);
-}
-
-/*
- * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
- * settings in counter_config. Attributes are created as `pinned' events and
- * so are permanently scheduled on the PMU.
- */
-static void op_perf_setup(void)
-{
- int i;
- u32 size = sizeof(struct perf_event_attr);
- struct perf_event_attr *attr;
-
- for (i = 0; i < num_counters; ++i) {
- attr = &counter_config[i].attr;
- memset(attr, 0, size);
- attr->type = PERF_TYPE_RAW;
- attr->size = size;
- attr->config = counter_config[i].event;
- attr->sample_period = counter_config[i].count;
- attr->pinned = 1;
- }
-}
-
-static int op_create_counter(int cpu, int event)
-{
- struct perf_event *pevent;
-
- if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
- return 0;
-
- pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
- cpu, NULL,
- op_overflow_handler, NULL);
-
- if (IS_ERR(pevent))
- return PTR_ERR(pevent);
-
- if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
- perf_event_release_kernel(pevent);
- pr_warn("oprofile: failed to enable event %d on CPU %d\n",
- event, cpu);
- return -EBUSY;
- }
-
- per_cpu(perf_events, cpu)[event] = pevent;
-
- return 0;
-}
-
-static void op_destroy_counter(int cpu, int event)
-{
- struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
-
- if (pevent) {
- perf_event_release_kernel(pevent);
- per_cpu(perf_events, cpu)[event] = NULL;
- }
-}
-
-/*
- * Called by oprofile_perf_start to create active perf events based on the
- * perviously configured attributes.
- */
-static int op_perf_start(void)
-{
- int cpu, event, ret = 0;
-
- for_each_online_cpu(cpu) {
- for (event = 0; event < num_counters; ++event) {
- ret = op_create_counter(cpu, event);
- if (ret)
- return ret;
- }
- }
-
- return ret;
-}
-
-/*
- * Called by oprofile_perf_stop at the end of a profiling run.
- */
-static void op_perf_stop(void)
-{
- int cpu, event;
-
- for_each_online_cpu(cpu)
- for (event = 0; event < num_counters; ++event)
- op_destroy_counter(cpu, event);
-}
-
-static int oprofile_perf_create_files(struct dentry *root)
-{
- unsigned int i;
-
- for (i = 0; i < num_counters; i++) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(root, buf);
- oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
- oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
- oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
- oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
- oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
- oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
- }
-
- return 0;
-}
-
-static int oprofile_perf_setup(void)
-{
- raw_spin_lock(&oprofilefs_lock);
- op_perf_setup();
- raw_spin_unlock(&oprofilefs_lock);
- return 0;
-}
-
-static int oprofile_perf_start(void)
-{
- int ret = -EBUSY;
-
- mutex_lock(&oprofile_perf_mutex);
- if (!oprofile_perf_enabled) {
- ret = 0;
- op_perf_start();
- oprofile_perf_enabled = 1;
- }
- mutex_unlock(&oprofile_perf_mutex);
- return ret;
-}
-
-static void oprofile_perf_stop(void)
-{
- mutex_lock(&oprofile_perf_mutex);
- if (oprofile_perf_enabled)
- op_perf_stop();
- oprofile_perf_enabled = 0;
- mutex_unlock(&oprofile_perf_mutex);
-}
-
-#ifdef CONFIG_PM
-
-static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
-{
- mutex_lock(&oprofile_perf_mutex);
- if (oprofile_perf_enabled)
- op_perf_stop();
- mutex_unlock(&oprofile_perf_mutex);
- return 0;
-}
-
-static int oprofile_perf_resume(struct platform_device *dev)
-{
- mutex_lock(&oprofile_perf_mutex);
- if (oprofile_perf_enabled && op_perf_start())
- oprofile_perf_enabled = 0;
- mutex_unlock(&oprofile_perf_mutex);
- return 0;
-}
-
-static struct platform_driver oprofile_driver = {
- .driver = {
- .name = "oprofile-perf",
- },
- .resume = oprofile_perf_resume,
- .suspend = oprofile_perf_suspend,
-};
-
-static struct platform_device *oprofile_pdev;
-
-static int __init init_driverfs(void)
-{
- int ret;
-
- ret = platform_driver_register(&oprofile_driver);
- if (ret)
- return ret;
-
- oprofile_pdev = platform_device_register_simple(
- oprofile_driver.driver.name, 0, NULL, 0);
- if (IS_ERR(oprofile_pdev)) {
- ret = PTR_ERR(oprofile_pdev);
- platform_driver_unregister(&oprofile_driver);
- }
-
- return ret;
-}
-
-static void exit_driverfs(void)
-{
- platform_device_unregister(oprofile_pdev);
- platform_driver_unregister(&oprofile_driver);
-}
-
-#else
-
-static inline int init_driverfs(void) { return 0; }
-static inline void exit_driverfs(void) { }
-
-#endif /* CONFIG_PM */
-
-void oprofile_perf_exit(void)
-{
- int cpu, id;
- struct perf_event *event;
-
- for_each_possible_cpu(cpu) {
- for (id = 0; id < num_counters; ++id) {
- event = per_cpu(perf_events, cpu)[id];
- if (event)
- perf_event_release_kernel(event);
- }
-
- kfree(per_cpu(perf_events, cpu));
- }
-
- kfree(counter_config);
- exit_driverfs();
-}
-
-int __init oprofile_perf_init(struct oprofile_operations *ops)
-{
- int cpu, ret = 0;
-
- ret = init_driverfs();
- if (ret)
- return ret;
-
- num_counters = perf_num_counters();
- if (num_counters <= 0) {
- pr_info("oprofile: no performance counters\n");
- ret = -ENODEV;
- goto out;
- }
-
- counter_config = kcalloc(num_counters,
- sizeof(struct op_counter_config), GFP_KERNEL);
-
- if (!counter_config) {
- pr_info("oprofile: failed to allocate %d "
- "counters\n", num_counters);
- ret = -ENOMEM;
- num_counters = 0;
- goto out;
- }
-
- for_each_possible_cpu(cpu) {
- per_cpu(perf_events, cpu) = kcalloc(num_counters,
- sizeof(struct perf_event *), GFP_KERNEL);
- if (!per_cpu(perf_events, cpu)) {
- pr_info("oprofile: failed to allocate %d perf events "
- "for cpu %d\n", num_counters, cpu);
- ret = -ENOMEM;
- goto out;
- }
- }
-
- ops->create_files = oprofile_perf_create_files;
- ops->setup = oprofile_perf_setup;
- ops->start = oprofile_perf_start;
- ops->stop = oprofile_perf_stop;
- ops->shutdown = oprofile_perf_stop;
- ops->cpu_type = op_name_from_perf_id();
-
- if (!ops->cpu_type)
- ret = -ENODEV;
- else
- pr_info("oprofile: using %s\n", ops->cpu_type);
-
-out:
- if (ret)
- oprofile_perf_exit();
-
- return ret;
-}
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
deleted file mode 100644
index 59659cea4582..000000000000
--- a/drivers/oprofile/oprofile_stats.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * @file oprofile_stats.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <linux/cpumask.h>
-#include <linux/threads.h>
-
-#include "oprofile_stats.h"
-#include "cpu_buffer.h"
-
-struct oprofile_stat_struct oprofile_stats;
-
-void oprofile_reset_stats(void)
-{
- struct oprofile_cpu_buffer *cpu_buf;
- int i;
-
- for_each_possible_cpu(i) {
- cpu_buf = &per_cpu(op_cpu_buffer, i);
- cpu_buf->sample_received = 0;
- cpu_buf->sample_lost_overflow = 0;
- cpu_buf->backtrace_aborted = 0;
- cpu_buf->sample_invalid_eip = 0;
- }
-
- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
- atomic_set(&oprofile_stats.event_lost_overflow, 0);
- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
- atomic_set(&oprofile_stats.multiplex_counter, 0);
-}
-
-
-void oprofile_create_stats_files(struct dentry *root)
-{
- struct oprofile_cpu_buffer *cpu_buf;
- struct dentry *cpudir;
- struct dentry *dir;
- char buf[10];
- int i;
-
- dir = oprofilefs_mkdir(root, "stats");
- if (!dir)
- return;
-
- for_each_possible_cpu(i) {
- cpu_buf = &per_cpu(op_cpu_buffer, i);
- snprintf(buf, 10, "cpu%d", i);
- cpudir = oprofilefs_mkdir(dir, buf);
-
- /* Strictly speaking access to these ulongs is racy,
- * but we can't simply lock them, and they are
- * informational only.
- */
- oprofilefs_create_ro_ulong(cpudir, "sample_received",
- &cpu_buf->sample_received);
- oprofilefs_create_ro_ulong(cpudir, "sample_lost_overflow",
- &cpu_buf->sample_lost_overflow);
- oprofilefs_create_ro_ulong(cpudir, "backtrace_aborted",
- &cpu_buf->backtrace_aborted);
- oprofilefs_create_ro_ulong(cpudir, "sample_invalid_eip",
- &cpu_buf->sample_invalid_eip);
- }
-
- oprofilefs_create_ro_atomic(dir, "sample_lost_no_mm",
- &oprofile_stats.sample_lost_no_mm);
- oprofilefs_create_ro_atomic(dir, "sample_lost_no_mapping",
- &oprofile_stats.sample_lost_no_mapping);
- oprofilefs_create_ro_atomic(dir, "event_lost_overflow",
- &oprofile_stats.event_lost_overflow);
- oprofilefs_create_ro_atomic(dir, "bt_lost_no_mapping",
- &oprofile_stats.bt_lost_no_mapping);
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- oprofilefs_create_ro_atomic(dir, "multiplex_counter",
- &oprofile_stats.multiplex_counter);
-#endif
-}
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
deleted file mode 100644
index 1fc622bd1834..000000000000
--- a/drivers/oprofile/oprofile_stats.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * @file oprofile_stats.h
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- */
-
-#ifndef OPROFILE_STATS_H
-#define OPROFILE_STATS_H
-
-#include <linux/atomic.h>
-
-struct oprofile_stat_struct {
- atomic_t sample_lost_no_mm;
- atomic_t sample_lost_no_mapping;
- atomic_t bt_lost_no_mapping;
- atomic_t event_lost_overflow;
- atomic_t multiplex_counter;
-};
-
-extern struct oprofile_stat_struct oprofile_stats;
-
-/* reset all stats to zero */
-void oprofile_reset_stats(void);
-
-struct dentry;
-
-/* create the stats/ dir */
-void oprofile_create_stats_files(struct dentry *root);
-
-#endif /* OPROFILE_STATS_H */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
deleted file mode 100644
index 0875f2f122b3..000000000000
--- a/drivers/oprofile/oprofilefs.c
+++ /dev/null
@@ -1,300 +0,0 @@
-/**
- * @file oprofilefs.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon
- *
- * A simple filesystem for configuration and
- * access of oprofile.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/oprofile.h>
-#include <linux/fs.h>
-#include <linux/fs_context.h>
-#include <linux/pagemap.h>
-#include <linux/uaccess.h>
-
-#include "oprof.h"
-
-#define OPROFILEFS_MAGIC 0x6f70726f
-
-DEFINE_RAW_SPINLOCK(oprofilefs_lock);
-
-static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
-{
- struct inode *inode = new_inode(sb);
-
- if (inode) {
- inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
- }
- return inode;
-}
-
-
-static const struct super_operations s_ops = {
- .statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
-};
-
-
-ssize_t oprofilefs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset)
-{
- return simple_read_from_buffer(buf, count, offset, str, strlen(str));
-}
-
-
-#define TMPBUFSIZE 50
-
-ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
-{
- char tmpbuf[TMPBUFSIZE];
- size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
- if (maxlen > TMPBUFSIZE)
- maxlen = TMPBUFSIZE;
- return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
-}
-
-
-/*
- * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
- * unchanged and might be uninitialized. This follows write syscall
- * implementation when count is zero: "If count is zero ... [and if]
- * no errors are detected, 0 will be returned without causing any
- * other effect." (man 2 write)
- */
-int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
-{
- char tmpbuf[TMPBUFSIZE];
- unsigned long flags;
-
- if (!count)
- return 0;
-
- if (count > TMPBUFSIZE - 1)
- return -EINVAL;
-
- memset(tmpbuf, 0x0, TMPBUFSIZE);
-
- if (copy_from_user(tmpbuf, buf, count))
- return -EFAULT;
-
- raw_spin_lock_irqsave(&oprofilefs_lock, flags);
- *val = simple_strtoul(tmpbuf, NULL, 0);
- raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- return count;
-}
-
-
-static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- unsigned long *val = file->private_data;
- return oprofilefs_ulong_to_user(*val, buf, count, offset);
-}
-
-
-static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
-{
- unsigned long value;
- int retval;
-
- if (*offset)
- return -EINVAL;
-
- retval = oprofilefs_ulong_from_user(&value, buf, count);
- if (retval <= 0)
- return retval;
-
- retval = oprofile_set_ulong(file->private_data, value);
- if (retval)
- return retval;
-
- return count;
-}
-
-
-static const struct file_operations ulong_fops = {
- .read = ulong_read_file,
- .write = ulong_write_file,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-
-static const struct file_operations ulong_ro_fops = {
- .read = ulong_read_file,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-
-static int __oprofilefs_create_file(struct dentry *root, char const *name,
- const struct file_operations *fops, int perm, void *priv)
-{
- struct dentry *dentry;
- struct inode *inode;
-
- if (!root)
- return -ENOMEM;
-
- inode_lock(d_inode(root));
- dentry = d_alloc_name(root, name);
- if (!dentry) {
- inode_unlock(d_inode(root));
- return -ENOMEM;
- }
- inode = oprofilefs_get_inode(root->d_sb, S_IFREG | perm);
- if (!inode) {
- dput(dentry);
- inode_unlock(d_inode(root));
- return -ENOMEM;
- }
- inode->i_fop = fops;
- inode->i_private = priv;
- d_add(dentry, inode);
- inode_unlock(d_inode(root));
- return 0;
-}
-
-
-int oprofilefs_create_ulong(struct dentry *root,
- char const *name, unsigned long *val)
-{
- return __oprofilefs_create_file(root, name,
- &ulong_fops, 0644, val);
-}
-
-
-int oprofilefs_create_ro_ulong(struct dentry *root,
- char const *name, unsigned long *val)
-{
- return __oprofilefs_create_file(root, name,
- &ulong_ro_fops, 0444, val);
-}
-
-
-static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- atomic_t *val = file->private_data;
- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
-}
-
-
-static const struct file_operations atomic_ro_fops = {
- .read = atomic_read_file,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-
-int oprofilefs_create_ro_atomic(struct dentry *root,
- char const *name, atomic_t *val)
-{
- return __oprofilefs_create_file(root, name,
- &atomic_ro_fops, 0444, val);
-}
-
-
-int oprofilefs_create_file(struct dentry *root,
- char const *name, const struct file_operations *fops)
-{
- return __oprofilefs_create_file(root, name, fops, 0644, NULL);
-}
-
-
-int oprofilefs_create_file_perm(struct dentry *root,
- char const *name, const struct file_operations *fops, int perm)
-{
- return __oprofilefs_create_file(root, name, fops, perm, NULL);
-}
-
-
-struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name)
-{
- struct dentry *dentry;
- struct inode *inode;
-
- inode_lock(d_inode(parent));
- dentry = d_alloc_name(parent, name);
- if (!dentry) {
- inode_unlock(d_inode(parent));
- return NULL;
- }
- inode = oprofilefs_get_inode(parent->d_sb, S_IFDIR | 0755);
- if (!inode) {
- dput(dentry);
- inode_unlock(d_inode(parent));
- return NULL;
- }
- inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- d_add(dentry, inode);
- inode_unlock(d_inode(parent));
- return dentry;
-}
-
-
-static int oprofilefs_fill_super(struct super_block *sb, struct fs_context *fc)
-{
- struct inode *root_inode;
-
- sb->s_blocksize = PAGE_SIZE;
- sb->s_blocksize_bits = PAGE_SHIFT;
- sb->s_magic = OPROFILEFS_MAGIC;
- sb->s_op = &s_ops;
- sb->s_time_gran = 1;
-
- root_inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
- if (!root_inode)
- return -ENOMEM;
- root_inode->i_op = &simple_dir_inode_operations;
- root_inode->i_fop = &simple_dir_operations;
- sb->s_root = d_make_root(root_inode);
- if (!sb->s_root)
- return -ENOMEM;
-
- oprofile_create_files(sb->s_root);
-
- // FIXME: verify kill_litter_super removes our dentries
- return 0;
-}
-
-static int oprofilefs_get_tree(struct fs_context *fc)
-{
- return get_tree_single(fc, oprofilefs_fill_super);
-}
-
-static const struct fs_context_operations oprofilefs_context_ops = {
- .get_tree = oprofilefs_get_tree,
-};
-
-static int oprofilefs_init_fs_context(struct fs_context *fc)
-{
- fc->ops = &oprofilefs_context_ops;
- return 0;
-}
-
-static struct file_system_type oprofilefs_type = {
- .owner = THIS_MODULE,
- .name = "oprofilefs",
- .init_fs_context = oprofilefs_init_fs_context,
- .kill_sb = kill_litter_super,
-};
-MODULE_ALIAS_FS("oprofilefs");
-
-
-int __init oprofilefs_register(void)
-{
- return register_filesystem(&oprofilefs_type);
-}
-
-
-void __exit oprofilefs_unregister(void)
-{
- unregister_filesystem(&oprofilefs_type);
-}
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
deleted file mode 100644
index 2498a6cd7c24..000000000000
--- a/drivers/oprofile/timer_int.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * @file timer_int.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/notifier.h>
-#include <linux/smp.h>
-#include <linux/oprofile.h>
-#include <linux/profile.h>
-#include <linux/init.h>
-#include <linux/cpu.h>
-#include <linux/hrtimer.h>
-#include <asm/irq_regs.h>
-#include <asm/ptrace.h>
-
-#include "oprof.h"
-
-static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
-static int ctr_running;
-
-static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
-{
- oprofile_add_sample(get_irq_regs(), 0);
- hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC));
- return HRTIMER_RESTART;
-}
-
-static void __oprofile_hrtimer_start(void *unused)
-{
- struct hrtimer *hrtimer = this_cpu_ptr(&oprofile_hrtimer);
-
- if (!ctr_running)
- return;
-
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = oprofile_hrtimer_notify;
-
- hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC),
- HRTIMER_MODE_REL_PINNED);
-}
-
-static int oprofile_hrtimer_start(void)
-{
- get_online_cpus();
- ctr_running = 1;
- on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
- put_online_cpus();
- return 0;
-}
-
-static void __oprofile_hrtimer_stop(int cpu)
-{
- struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
-
- if (!ctr_running)
- return;
-
- hrtimer_cancel(hrtimer);
-}
-
-static void oprofile_hrtimer_stop(void)
-{
- int cpu;
-
- get_online_cpus();
- for_each_online_cpu(cpu)
- __oprofile_hrtimer_stop(cpu);
- ctr_running = 0;
- put_online_cpus();
-}
-
-static int oprofile_timer_online(unsigned int cpu)
-{
- local_irq_disable();
- __oprofile_hrtimer_start(NULL);
- local_irq_enable();
- return 0;
-}
-
-static int oprofile_timer_prep_down(unsigned int cpu)
-{
- __oprofile_hrtimer_stop(cpu);
- return 0;
-}
-
-static enum cpuhp_state hp_online;
-
-static int oprofile_hrtimer_setup(void)
-{
- int ret;
-
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "oprofile/timer:online",
- oprofile_timer_online,
- oprofile_timer_prep_down);
- if (ret < 0)
- return ret;
- hp_online = ret;
- return 0;
-}
-
-static void oprofile_hrtimer_shutdown(void)
-{
- cpuhp_remove_state_nocalls(hp_online);
-}
-
-int oprofile_timer_init(struct oprofile_operations *ops)
-{
- ops->create_files = NULL;
- ops->setup = oprofile_hrtimer_setup;
- ops->shutdown = oprofile_hrtimer_shutdown;
- ops->start = oprofile_hrtimer_start;
- ops->stop = oprofile_hrtimer_stop;
- ops->cpu_type = "timer";
- printk(KERN_INFO "oprofile: using timer interrupt.\n");
- return 0;
-}
diff --git a/fs/Makefile b/fs/Makefile
index 999d1a23f036..3215fe205256 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -64,7 +64,6 @@ obj-$(CONFIG_SYSFS) += sysfs/
obj-$(CONFIG_CONFIGFS_FS) += configfs/
obj-y += devpts/
-obj-$(CONFIG_PROFILING) += dcookies.o
obj-$(CONFIG_DLM) += dlm/
# Do not add any filesystems before this line
diff --git a/fs/dcookies.c b/fs/dcookies.c
deleted file mode 100644
index 6eeb61100a09..000000000000
--- a/fs/dcookies.c
+++ /dev/null
@@ -1,356 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * dcookies.c
- *
- * Copyright 2002 John Levon <levon@movementarian.org>
- *
- * Persistent cookie-path mappings. These are used by
- * profilers to convert a per-task EIP value into something
- * non-transitory that can be processed at a later date.
- * This is done by locking the dentry/vfsmnt pair in the
- * kernel until released by the tasks needing the persistent
- * objects. The tag is simply an unsigned long that refers
- * to the pair and can be looked up from userspace.
- */
-
-#include <linux/syscalls.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/mount.h>
-#include <linux/capability.h>
-#include <linux/dcache.h>
-#include <linux/mm.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/dcookies.h>
-#include <linux/mutex.h>
-#include <linux/path.h>
-#include <linux/compat.h>
-#include <linux/uaccess.h>
-
-/* The dcookies are allocated from a kmem_cache and
- * hashed onto a small number of lists. None of the
- * code here is particularly performance critical
- */
-struct dcookie_struct {
- struct path path;
- struct list_head hash_list;
-};
-
-static LIST_HEAD(dcookie_users);
-static DEFINE_MUTEX(dcookie_mutex);
-static struct kmem_cache *dcookie_cache __read_mostly;
-static struct list_head *dcookie_hashtable __read_mostly;
-static size_t hash_size __read_mostly;
-
-static inline int is_live(void)
-{
- return !(list_empty(&dcookie_users));
-}
-
-
-/* The dentry is locked, its address will do for the cookie */
-static inline unsigned long dcookie_value(struct dcookie_struct * dcs)
-{
- return (unsigned long)dcs->path.dentry;
-}
-
-
-static size_t dcookie_hash(unsigned long dcookie)
-{
- return (dcookie >> L1_CACHE_SHIFT) & (hash_size - 1);
-}
-
-
-static struct dcookie_struct * find_dcookie(unsigned long dcookie)
-{
- struct dcookie_struct *found = NULL;
- struct dcookie_struct * dcs;
- struct list_head * pos;
- struct list_head * list;
-
- list = dcookie_hashtable + dcookie_hash(dcookie);
-
- list_for_each(pos, list) {
- dcs = list_entry(pos, struct dcookie_struct, hash_list);
- if (dcookie_value(dcs) == dcookie) {
- found = dcs;
- break;
- }
- }
-
- return found;
-}
-
-
-static void hash_dcookie(struct dcookie_struct * dcs)
-{
- struct list_head * list = dcookie_hashtable + dcookie_hash(dcookie_value(dcs));
- list_add(&dcs->hash_list, list);
-}
-
-
-static struct dcookie_struct *alloc_dcookie(const struct path *path)
-{
- struct dcookie_struct *dcs = kmem_cache_alloc(dcookie_cache,
- GFP_KERNEL);
- struct dentry *d;
- if (!dcs)
- return NULL;
-
- d = path->dentry;
- spin_lock(&d->d_lock);
- d->d_flags |= DCACHE_COOKIE;
- spin_unlock(&d->d_lock);
-
- dcs->path = *path;
- path_get(path);
- hash_dcookie(dcs);
- return dcs;
-}
-
-
-/* This is the main kernel-side routine that retrieves the cookie
- * value for a dentry/vfsmnt pair.
- */
-int get_dcookie(const struct path *path, unsigned long *cookie)
-{
- int err = 0;
- struct dcookie_struct * dcs;
-
- mutex_lock(&dcookie_mutex);
-
- if (!is_live()) {
- err = -EINVAL;
- goto out;
- }
-
- if (path->dentry->d_flags & DCACHE_COOKIE) {
- dcs = find_dcookie((unsigned long)path->dentry);
- } else {
- dcs = alloc_dcookie(path);
- if (!dcs) {
- err = -ENOMEM;
- goto out;
- }
- }
-
- *cookie = dcookie_value(dcs);
-
-out:
- mutex_unlock(&dcookie_mutex);
- return err;
-}
-
-
-/* And here is where the userspace process can look up the cookie value
- * to retrieve the path.
- */
-static int do_lookup_dcookie(u64 cookie64, char __user *buf, size_t len)
-{
- unsigned long cookie = (unsigned long)cookie64;
- int err = -EINVAL;
- char * kbuf;
- char * path;
- size_t pathlen;
- struct dcookie_struct * dcs;
-
- /* we could leak path information to users
- * without dir read permission without this
- */
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- mutex_lock(&dcookie_mutex);
-
- if (!is_live()) {
- err = -EINVAL;
- goto out;
- }
-
- if (!(dcs = find_dcookie(cookie)))
- goto out;
-
- err = -ENOMEM;
- kbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!kbuf)
- goto out;
-
- /* FIXME: (deleted) ? */
- path = d_path(&dcs->path, kbuf, PAGE_SIZE);
-
- mutex_unlock(&dcookie_mutex);
-
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out_free;
- }
-
- err = -ERANGE;
-
- pathlen = kbuf + PAGE_SIZE - path;
- if (pathlen <= len) {
- err = pathlen;
- if (copy_to_user(buf, path, pathlen))
- err = -EFAULT;
- }
-
-out_free:
- kfree(kbuf);
- return err;
-out:
- mutex_unlock(&dcookie_mutex);
- return err;
-}
-
-SYSCALL_DEFINE3(lookup_dcookie, u64, cookie64, char __user *, buf, size_t, len)
-{
- return do_lookup_dcookie(cookie64, buf, len);
-}
-
-#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, compat_size_t, len)
-{
-#ifdef __BIG_ENDIAN
- return do_lookup_dcookie(((u64)w0 << 32) | w1, buf, len);
-#else
- return do_lookup_dcookie(((u64)w1 << 32) | w0, buf, len);
-#endif
-}
-#endif
-
-static int dcookie_init(void)
-{
- struct list_head * d;
- unsigned int i, hash_bits;
- int err = -ENOMEM;
-
- dcookie_cache = kmem_cache_create("dcookie_cache",
- sizeof(struct dcookie_struct),
- 0, 0, NULL);
-
- if (!dcookie_cache)
- goto out;
-
- dcookie_hashtable = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!dcookie_hashtable)
- goto out_kmem;
-
- err = 0;
-
- /*
- * Find the power-of-two list-heads that can fit into the allocation..
- * We don't guarantee that "sizeof(struct list_head)" is necessarily
- * a power-of-two.
- */
- hash_size = PAGE_SIZE / sizeof(struct list_head);
- hash_bits = 0;
- do {
- hash_bits++;
- } while ((hash_size >> hash_bits) != 0);
- hash_bits--;
-
- /*
- * Re-calculate the actual number of entries and the mask
- * from the number of bits we can fit.
- */
- hash_size = 1UL << hash_bits;
-
- /* And initialize the newly allocated array */
- d = dcookie_hashtable;
- i = hash_size;
- do {
- INIT_LIST_HEAD(d);
- d++;
- i--;
- } while (i);
-
-out:
- return err;
-out_kmem:
- kmem_cache_destroy(dcookie_cache);
- goto out;
-}
-
-
-static void free_dcookie(struct dcookie_struct * dcs)
-{
- struct dentry *d = dcs->path.dentry;
-
- spin_lock(&d->d_lock);
- d->d_flags &= ~DCACHE_COOKIE;
- spin_unlock(&d->d_lock);
-
- path_put(&dcs->path);
- kmem_cache_free(dcookie_cache, dcs);
-}
-
-
-static void dcookie_exit(void)
-{
- struct list_head * list;
- struct list_head * pos;
- struct list_head * pos2;
- struct dcookie_struct * dcs;
- size_t i;
-
- for (i = 0; i < hash_size; ++i) {
- list = dcookie_hashtable + i;
- list_for_each_safe(pos, pos2, list) {
- dcs = list_entry(pos, struct dcookie_struct, hash_list);
- list_del(&dcs->hash_list);
- free_dcookie(dcs);
- }
- }
-
- kfree(dcookie_hashtable);
- kmem_cache_destroy(dcookie_cache);
-}
-
-
-struct dcookie_user {
- struct list_head next;
-};
-
-struct dcookie_user * dcookie_register(void)
-{
- struct dcookie_user * user;
-
- mutex_lock(&dcookie_mutex);
-
- user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL);
- if (!user)
- goto out;
-
- if (!is_live() && dcookie_init())
- goto out_free;
-
- list_add(&user->next, &dcookie_users);
-
-out:
- mutex_unlock(&dcookie_mutex);
- return user;
-out_free:
- kfree(user);
- user = NULL;
- goto out;
-}
-
-
-void dcookie_unregister(struct dcookie_user * user)
-{
- mutex_lock(&dcookie_mutex);
-
- list_del(&user->next);
- kfree(user);
-
- if (!is_live())
- dcookie_exit();
-
- mutex_unlock(&dcookie_mutex);
-}
-
-EXPORT_SYMBOL_GPL(dcookie_register);
-EXPORT_SYMBOL_GPL(dcookie_unregister);
-EXPORT_SYMBOL_GPL(get_dcookie);
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
deleted file mode 100644
index ddfdac20cad0..000000000000
--- a/include/linux/dcookies.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * dcookies.h
- *
- * Persistent cookie-path mappings
- *
- * Copyright 2002 John Levon <levon@movementarian.org>
- */
-
-#ifndef DCOOKIES_H
-#define DCOOKIES_H
-
-
-#ifdef CONFIG_PROFILING
-
-#include <linux/dcache.h>
-#include <linux/types.h>
-
-struct dcookie_user;
-struct path;
-
-/**
- * dcookie_register - register a user of dcookies
- *
- * Register as a dcookie user. Returns %NULL on failure.
- */
-struct dcookie_user * dcookie_register(void);
-
-/**
- * dcookie_unregister - unregister a user of dcookies
- *
- * Unregister as a dcookie user. This may invalidate
- * any dcookie values returned from get_dcookie().
- */
-void dcookie_unregister(struct dcookie_user * user);
-
-/**
- * get_dcookie - acquire a dcookie
- *
- * Convert the given dentry/vfsmount pair into
- * a cookie value.
- *
- * Returns -EINVAL if no living task has registered as a
- * dcookie user.
- *
- * Returns 0 on success, with *cookie filled in
- */
-int get_dcookie(const struct path *path, unsigned long *cookie);
-
-#else
-
-static inline struct dcookie_user * dcookie_register(void)
-{
- return NULL;
-}
-
-static inline void dcookie_unregister(struct dcookie_user * user)
-{
- return;
-}
-
-static inline int get_dcookie(const struct path *path, unsigned long *cookie)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_PROFILING */
-
-#endif /* DCOOKIES_H */
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
deleted file mode 100644
index b2a0f15f11fe..000000000000
--- a/include/linux/oprofile.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * @file oprofile.h
- *
- * API for machine-specific interrupts to interface
- * to oprofile.
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef OPROFILE_H
-#define OPROFILE_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/printk.h>
-#include <linux/atomic.h>
-
-/* Each escaped entry is prefixed by ESCAPE_CODE
- * then one of the following codes, then the
- * relevant data.
- * These #defines live in this file so that arch-specific
- * buffer sync'ing code can access them.
- */
-#define ESCAPE_CODE ~0UL
-#define CTX_SWITCH_CODE 1
-#define CPU_SWITCH_CODE 2
-#define COOKIE_SWITCH_CODE 3
-#define KERNEL_ENTER_SWITCH_CODE 4
-#define KERNEL_EXIT_SWITCH_CODE 5
-#define MODULE_LOADED_CODE 6
-#define CTX_TGID_CODE 7
-#define TRACE_BEGIN_CODE 8
-#define TRACE_END_CODE 9
-#define XEN_ENTER_SWITCH_CODE 10
-#define SPU_PROFILING_CODE 11
-#define SPU_CTX_SWITCH_CODE 12
-#define IBS_FETCH_CODE 13
-#define IBS_OP_CODE 14
-
-struct dentry;
-struct file_operations;
-struct pt_regs;
-
-/* Operations structure to be filled in */
-struct oprofile_operations {
- /* create any necessary configuration files in the oprofile fs.
- * Optional. */
- int (*create_files)(struct dentry * root);
- /* Do any necessary interrupt setup. Optional. */
- int (*setup)(void);
- /* Do any necessary interrupt shutdown. Optional. */
- void (*shutdown)(void);
- /* Start delivering interrupts. */
- int (*start)(void);
- /* Stop delivering interrupts. */
- void (*stop)(void);
- /* Arch-specific buffer sync functions.
- * Return value = 0: Success
- * Return value = -1: Failure
- * Return value = 1: Run generic sync function
- */
- int (*sync_start)(void);
- int (*sync_stop)(void);
-
- /* Initiate a stack backtrace. Optional. */
- void (*backtrace)(struct pt_regs * const regs, unsigned int depth);
-
- /* Multiplex between different events. Optional. */
- int (*switch_events)(void);
- /* CPU identification string. */
- char * cpu_type;
-};
-
-/**
- * One-time initialisation. *ops must be set to a filled-in
- * operations structure. This is called even in timer interrupt
- * mode so an arch can set a backtrace callback.
- *
- * If an error occurs, the fields should be left untouched.
- */
-int oprofile_arch_init(struct oprofile_operations * ops);
-
-/**
- * One-time exit/cleanup for the arch.
- */
-void oprofile_arch_exit(void);
-
-/**
- * Add a sample. This may be called from any context.
- */
-void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
-
-/**
- * Add an extended sample. Use this when the PC is not from the regs, and
- * we cannot determine if we're in kernel mode from the regs.
- *
- * This function does perform a backtrace.
- *
- */
-void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel);
-
-/**
- * Add an hardware sample.
- */
-void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel,
- struct task_struct *task);
-
-/* Use this instead when the PC value is not from the regs. Doesn't
- * backtrace. */
-void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event);
-
-/* add a backtrace entry, to be called from the ->backtrace callback */
-void oprofile_add_trace(unsigned long eip);
-
-
-/**
- * Create a file of the given name as a child of the given root, with
- * the specified file operations.
- */
-int oprofilefs_create_file(struct dentry * root,
- char const * name, const struct file_operations * fops);
-
-int oprofilefs_create_file_perm(struct dentry * root,
- char const * name, const struct file_operations * fops, int perm);
-
-/** Create a file for read/write access to an unsigned long. */
-int oprofilefs_create_ulong(struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an unsigned long. */
-int oprofilefs_create_ro_ulong(struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an atomic_t. */
-int oprofilefs_create_ro_atomic(struct dentry * root,
- char const * name, atomic_t * val);
-
-/** create a directory */
-struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
-
-/**
- * Write the given asciz string to the given user buffer @buf, updating *offset
- * appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Convert an unsigned long value into ASCII and copy it to the user buffer @buf,
- * updating *offset appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Read an ASCII string for a number from a userspace buffer and fill *val on success.
- * Returns 0 on success, < 0 on error.
- */
-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
-
-/** lock for read/write safety */
-extern raw_spinlock_t oprofilefs_lock;
-
-/**
- * Add the contents of a circular buffer to the event buffer.
- */
-void oprofile_put_buff(unsigned long *buf, unsigned int start,
- unsigned int stop, unsigned int max);
-
-unsigned long oprofile_get_cpu_buffer_size(void);
-void oprofile_cpu_buffer_inc_smpl_lost(void);
-
-/* cpu buffer functions */
-
-struct op_sample;
-
-struct op_entry {
- struct ring_buffer_event *event;
- struct op_sample *sample;
- unsigned long size;
- unsigned long *data;
-};
-
-void oprofile_write_reserve(struct op_entry *entry,
- struct pt_regs * const regs,
- unsigned long pc, int code, int size);
-int oprofile_add_data(struct op_entry *entry, unsigned long val);
-int oprofile_add_data64(struct op_entry *entry, u64 val);
-int oprofile_write_commit(struct op_entry *entry);
-
-#ifdef CONFIG_HW_PERF_EVENTS
-int __init oprofile_perf_init(struct oprofile_operations *ops);
-void oprofile_perf_exit(void);
-char *op_name_from_perf_id(void);
-#else
-static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
-{
- pr_info("oprofile: hardware counters not available\n");
- return -ENODEV;
-}
-static inline void oprofile_perf_exit(void) { }
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* OPROFILE_H */
diff --git a/init/Kconfig b/init/Kconfig
index 29ad68325028..17e955fdec97 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -2023,7 +2023,7 @@ config PROFILING
bool "Profiling support"
help
Say Y here to enable the extended profiling support mechanisms used
- by profilers such as OProfile.
+ by profilers.
#
# Place an empty function call at each tracepoint site. Can be
diff --git a/kernel/sys.c b/kernel/sys.c
index 51f00fe20e4d..6928d23c46ea 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -24,7 +24,6 @@
#include <linux/times.h>
#include <linux/posix-timers.h>
#include <linux/security.h>
-#include <linux/dcookies.h>
#include <linux/suspend.h>
#include <linux/tty.h>
#include <linux/signal.h>
diff --git a/usr/include/Makefile b/usr/include/Makefile
index f6b3c85d900e..1c2ae1368079 100644
--- a/usr/include/Makefile
+++ b/usr/include/Makefile
@@ -67,8 +67,6 @@ endif
ifeq ($(SRCARCH),ia64)
no-header-test += asm/setup.h
no-header-test += asm/sigcontext.h
-no-header-test += asm/perfmon.h
-no-header-test += asm/perfmon_default_smpl.h
no-header-test += linux/if_bonding.h
endif