aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp/hyp-entry.S
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2020-02-18 19:58:39 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2020-03-09 17:35:43 +0000
commit4db61fef16a104f94bde24fe163064b98cee6b7c (patch)
treeceed464120ef2bf90a00910c21aadb6be5f40c75 /arch/arm64/kvm/hyp/hyp-entry.S
parentarm64: kvm: Modernize annotation for __bp_harden_hyp_vecs (diff)
downloadlinux-dev-4db61fef16a104f94bde24fe163064b98cee6b7c.tar.xz
linux-dev-4db61fef16a104f94bde24fe163064b98cee6b7c.zip
arm64: kvm: Modernize __smccc_workaround_1_smc_start annotations
In an effort to clarify and simplify the annotation of assembly functions in the kernel new macros have been introduced. These replace ENTRY and ENDPROC with separate annotations for standard C callable functions, data and code with different calling conventions. Using these for __smccc_workaround_1_smc is more involved than for most symbols as this symbol is annotated quite unusually, rather than just have the explicit symbol we define _start and _end symbols which we then use to compute the length. This does not play at all nicely with the new style macros. Instead define a constant for the size of the function and use that in both the C code and for .org based size checks in the assembly code. Signed-off-by: Mark Brown <broonie@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm/hyp/hyp-entry.S')
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 1e2ab928a92f..c2a13ab3c471 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -322,7 +322,7 @@ SYM_CODE_END(__bp_harden_hyp_vecs)
.popsection
-ENTRY(__smccc_workaround_1_smc_start)
+SYM_CODE_START(__smccc_workaround_1_smc)
esb
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
@@ -332,5 +332,7 @@ ENTRY(__smccc_workaround_1_smc_start)
ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4)
-ENTRY(__smccc_workaround_1_smc_end)
+1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
+ .org 1b
+SYM_CODE_END(__smccc_workaround_1_smc)
#endif