aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorDave Martin <Dave.Martin@arm.com>2018-04-12 16:47:20 +0100
committerMarc Zyngier <marc.zyngier@arm.com>2018-05-25 12:28:29 +0100
commit31dc52b3c8faf47bf3ff5ced661488a20e5d1811 (patch)
tree39ec9ba8565964bcceae7494bfdb8e7c1c94cb3c /arch/arm64/include
parentKVM: arm64: Optimise FPSIMD handling to reduce guest/host thrashing (diff)
downloadlinux-dev-31dc52b3c8faf47bf3ff5ced661488a20e5d1811.tar.xz
linux-dev-31dc52b3c8faf47bf3ff5ced661488a20e5d1811.zip
arm64/sve: Move read_zcr_features() out of cpufeature.h
Having read_zcr_features() inline in cpufeature.h results in that header requiring #includes which make it hard to include <asm/fpsimd.h> elsewhere without triggering header inclusion cycles. This is not a hot-path function and arguably should not be in cpufeature.h in the first place, so this patch moves it to fpsimd.c, compiled conditionally if CONFIG_ARM64_SVE=y. This allows some SVE-related #includes to be dropped from cpufeature.h, which will ease future maintenance. A couple of missing #includes of <asm/fpsimd.h> are exposed by this change under arch/arm64/. This patch adds the missing #includes as necessary. No functional change. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/cpufeature.h29
-rw-r--r--arch/arm64/include/asm/fpsimd.h2
-rw-r--r--arch/arm64/include/asm/processor.h1
3 files changed, 3 insertions, 29 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 09b0f2a80c8f..0a6b7133195e 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -11,9 +11,7 @@
#include <asm/cpucaps.h>
#include <asm/cputype.h>
-#include <asm/fpsimd.h>
#include <asm/hwcap.h>
-#include <asm/sigcontext.h>
#include <asm/sysreg.h>
/*
@@ -510,33 +508,6 @@ static inline bool system_supports_sve(void)
cpus_have_const_cap(ARM64_SVE);
}
-/*
- * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
- * vector length.
- *
- * Use only if SVE is present.
- * This function clobbers the SVE vector length.
- */
-static inline u64 read_zcr_features(void)
-{
- u64 zcr;
- unsigned int vq_max;
-
- /*
- * Set the maximum possible VL, and write zeroes to all other
- * bits to see if they stick.
- */
- sve_kernel_enable(NULL);
- write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
-
- zcr = read_sysreg_s(SYS_ZCR_EL1);
- zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
- vq_max = sve_vq_from_vl(sve_get_vl());
- zcr |= vq_max - 1; /* set LEN field to maximum effective value */
-
- return zcr;
-}
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 3e00f701cb9c..fb60b22b8bbf 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -69,6 +69,8 @@ extern unsigned int sve_get_vl(void);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern u64 read_zcr_features(void);
+
extern int __ro_after_init sve_max_vl;
#ifdef CONFIG_ARM64_SVE
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 36d64f83cdfb..9231b8762ca6 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -40,6 +40,7 @@
#include <asm/alternative.h>
#include <asm/cpufeature.h>
+#include <asm/fpsimd.h>
#include <asm/hw_breakpoint.h>
#include <asm/lse.h>
#include <asm/pgtable-hwdef.h>