aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/x86/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/.gitignore4
-rw-r--r--arch/x86/lib/Makefile29
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S9
-rw-r--r--arch/x86/lib/bhi.S147
-rw-r--r--arch/x86/lib/cache-smp.c27
-rw-r--r--arch/x86/lib/checksum_32.S2
-rw-r--r--arch/x86/lib/clear_page_64.S100
-rw-r--r--arch/x86/lib/cmdline.c29
-rw-r--r--arch/x86/lib/cmpxchg16b_emu.S43
-rw-r--r--arch/x86/lib/cmpxchg8b_emu.S81
-rw-r--r--arch/x86/lib/copy_mc.c29
-rw-r--r--arch/x86/lib/copy_page_64.S5
-rw-r--r--arch/x86/lib/copy_user_64.S468
-rw-r--r--arch/x86/lib/copy_user_uncached_64.S244
-rw-r--r--arch/x86/lib/csum-partial_64.c102
-rw-r--r--arch/x86/lib/csum-wrappers_64.c5
-rw-r--r--arch/x86/lib/delay.c6
-rw-r--r--arch/x86/lib/error-inject.c1
-rw-r--r--arch/x86/lib/getuser.S142
-rw-r--r--arch/x86/lib/hweight.S25
-rw-r--r--arch/x86/lib/insn-eval.c46
-rw-r--r--arch/x86/lib/insn.c96
-rw-r--r--arch/x86/lib/iomap_copy_64.S15
-rw-r--r--arch/x86/lib/iomem.c10
-rw-r--r--arch/x86/lib/kaslr.c2
-rw-r--r--arch/x86/lib/memcpy_32.c187
-rw-r--r--arch/x86/lib/memcpy_64.S45
-rw-r--r--arch/x86/lib/memmove_32.S200
-rw-r--r--arch/x86/lib/memmove_64.S22
-rw-r--r--arch/x86/lib/memset_64.S57
-rw-r--r--arch/x86/lib/misc.c4
-rw-r--r--arch/x86/lib/msr-reg.S3
-rw-r--r--arch/x86/lib/msr-smp.c28
-rw-r--r--arch/x86/lib/msr.c52
-rw-r--r--arch/x86/lib/putuser.S116
-rw-r--r--arch/x86/lib/retpoline.S369
-rw-r--r--arch/x86/lib/string_32.c17
-rw-r--r--arch/x86/lib/strstr_32.c6
-rw-r--r--arch/x86/lib/usercopy.c5
-rw-r--r--arch/x86/lib/usercopy_32.c18
-rw-r--r--arch/x86/lib/usercopy_64.c58
-rw-r--r--arch/x86/lib/x86-opcode-map.txt324
42 files changed, 1936 insertions, 1242 deletions
diff --git a/arch/x86/lib/.gitignore b/arch/x86/lib/.gitignore
index 8ae0f93ecbfd..ec2131c9fd20 100644
--- a/arch/x86/lib/.gitignore
+++ b/arch/x86/lib/.gitignore
@@ -1,2 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+# This now-removed directory used to contain generated files.
+/crypto/
+
inat-tables.c
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index f76747862bd2..2dba7f83ef97 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -14,19 +14,6 @@ ifdef CONFIG_KCSAN
CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
endif
-# Early boot use of cmdline; don't instrument it
-ifdef CONFIG_AMD_MEM_ENCRYPT
-KCOV_INSTRUMENT_cmdline.o := n
-KASAN_SANITIZE_cmdline.o := n
-KCSAN_SANITIZE_cmdline.o := n
-
-ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_cmdline.o = -pg
-endif
-
-CFLAGS_cmdline.o := -fno-stack-protector -fno-jump-tables
-endif
-
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
quiet_cmd_inat_tables = GEN $@
@@ -49,7 +36,7 @@ lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
-lib-$(CONFIG_RETPOLINE) += retpoline.o
+lib-$(CONFIG_MITIGATION_RETPOLINE) += retpoline.o
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
obj-y += iomem.o
@@ -60,14 +47,18 @@ ifeq ($(CONFIG_X86_32),y)
lib-y += checksum_32.o
lib-y += strstr_32.o
lib-y += string_32.o
-ifneq ($(CONFIG_X86_CMPXCHG64),y)
- lib-y += cmpxchg8b_emu.o atomic64_386_32.o
+ lib-y += memmove_32.o
+ lib-y += cmpxchg8b_emu.o
+ifneq ($(CONFIG_X86_CX8),y)
+ lib-y += atomic64_386_32.o
endif
else
- obj-y += iomap_copy_64.o
+ifneq ($(CONFIG_GENERIC_CSUM),y)
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
+endif
lib-y += clear_page_64.o copy_page_64.o
lib-y += memmove_64.o memset_64.o
- lib-y += copy_user_64.o
- lib-y += cmpxchg16b_emu.o
+ lib-y += copy_user_64.o copy_user_uncached_64.o
+ lib-y += cmpxchg16b_emu.o
+ lib-y += bhi.o
endif
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 90afb488b396..b2eff07d65e4 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -16,6 +16,11 @@
cmpxchg8b (\reg)
.endm
+.macro read64_nonatomic reg
+ movl (\reg), %eax
+ movl 4(\reg), %edx
+.endm
+
SYM_FUNC_START(atomic64_read_cx8)
read64 %ecx
RET
@@ -51,7 +56,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8)
movl %edx, %edi
movl %ecx, %ebp
- read64 %ecx
+ read64_nonatomic %ecx
1:
movl %eax, %ebx
movl %edx, %ecx
@@ -79,7 +84,7 @@ addsub_return sub sub sbb
SYM_FUNC_START(atomic64_\func\()_return_cx8)
pushl %ebx
- read64 %esi
+ read64_nonatomic %esi
1:
movl %eax, %ebx
movl %edx, %ecx
diff --git a/arch/x86/lib/bhi.S b/arch/x86/lib/bhi.S
new file mode 100644
index 000000000000..58891681261b
--- /dev/null
+++ b/arch/x86/lib/bhi.S
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/linkage.h>
+#include <asm/unwind_hints.h>
+#include <asm/nospec-branch.h>
+
+/*
+ * Notably, the FineIBT preamble calling these will have ZF set and r10 zero.
+ *
+ * The very last element is in fact larger than 32 bytes, but since its the
+ * last element, this does not matter,
+ *
+ * There are 2 #UD sites, located between 0,1-2,3 and 4,5-6,7 such that they
+ * can be reached using Jcc.d8, these elements (1 and 5) have sufficiently
+ * big alignment holes for this to not stagger the array.
+ */
+
+.pushsection .noinstr.text, "ax"
+
+ .align 32
+SYM_CODE_START(__bhi_args)
+
+#ifdef CONFIG_FINEIBT_BHI
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_0, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_1
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_1, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_1
+ cmovne %r10, %rdi
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 8
+ ANNOTATE_REACHABLE
+.Lud_1: ud2
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_2, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_1
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_3, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_1
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ cmovne %r10, %rdx
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_4, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_2
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ cmovne %r10, %rdx
+ cmovne %r10, %rcx
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_5, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_2
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ cmovne %r10, %rdx
+ cmovne %r10, %rcx
+ cmovne %r10, %r8
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 8
+ ANNOTATE_REACHABLE
+.Lud_2: ud2
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_6, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_2
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ cmovne %r10, %rdx
+ cmovne %r10, %rcx
+ cmovne %r10, %r8
+ cmovne %r10, %r9
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_7, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_2
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ cmovne %r10, %rdx
+ cmovne %r10, %rcx
+ cmovne %r10, %r8
+ cmovne %r10, %r9
+ cmovne %r10, %rsp
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+#endif /* CONFIG_FINEIBT_BHI */
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_end, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
+ nop /* Work around toolchain+objtool quirk */
+SYM_CODE_END(__bhi_args)
+
+.popsection
diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
index 7c48ff4ae8d1..c5c60d07308c 100644
--- a/arch/x86/lib/cache-smp.c
+++ b/arch/x86/lib/cache-smp.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <asm/paravirt.h>
#include <linux/smp.h>
#include <linux/export.h>
@@ -13,9 +14,31 @@ void wbinvd_on_cpu(int cpu)
}
EXPORT_SYMBOL(wbinvd_on_cpu);
-int wbinvd_on_all_cpus(void)
+void wbinvd_on_all_cpus(void)
{
on_each_cpu(__wbinvd, NULL, 1);
- return 0;
}
EXPORT_SYMBOL(wbinvd_on_all_cpus);
+
+void wbinvd_on_cpus_mask(struct cpumask *cpus)
+{
+ on_each_cpu_mask(cpus, __wbinvd, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(wbinvd_on_cpus_mask);
+
+static void __wbnoinvd(void *dummy)
+{
+ wbnoinvd();
+}
+
+void wbnoinvd_on_all_cpus(void)
+{
+ on_each_cpu(__wbnoinvd, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(wbnoinvd_on_all_cpus);
+
+void wbnoinvd_on_cpus_mask(struct cpumask *cpus)
+{
+ on_each_cpu_mask(cpus, __wbnoinvd, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(wbnoinvd_on_cpus_mask);
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 23318c338db0..68f7fa3e1322 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -21,10 +21,10 @@
* converted to pure assembler
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/asm.h>
-#include <asm/export.h>
#include <asm/nospec-branch.h>
/*
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index fe59b8ac4fcc..a508e4a8c66a 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
+#include <linux/cfi_types.h>
+#include <linux/objtool.h>
+#include <asm/asm.h>
/*
* Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
@@ -13,7 +16,7 @@
* Zero a page.
* %rdi - page
*/
-SYM_FUNC_START(clear_page_rep)
+SYM_TYPED_FUNC_START(clear_page_rep)
movl $4096/8,%ecx
xorl %eax,%eax
rep stosq
@@ -21,7 +24,7 @@ SYM_FUNC_START(clear_page_rep)
SYM_FUNC_END(clear_page_rep)
EXPORT_SYMBOL_GPL(clear_page_rep)
-SYM_FUNC_START(clear_page_orig)
+SYM_TYPED_FUNC_START(clear_page_orig)
xorl %eax,%eax
movl $4096/64,%ecx
.p2align 4
@@ -43,10 +46,99 @@ SYM_FUNC_START(clear_page_orig)
SYM_FUNC_END(clear_page_orig)
EXPORT_SYMBOL_GPL(clear_page_orig)
-SYM_FUNC_START(clear_page_erms)
+SYM_TYPED_FUNC_START(clear_page_erms)
movl $4096,%ecx
xorl %eax,%eax
rep stosb
RET
SYM_FUNC_END(clear_page_erms)
EXPORT_SYMBOL_GPL(clear_page_erms)
+
+/*
+ * Default clear user-space.
+ * Input:
+ * rdi destination
+ * rcx count
+ * rax is zero
+ *
+ * Output:
+ * rcx: uncleared bytes or 0 if successful.
+ */
+SYM_FUNC_START(rep_stos_alternative)
+ ANNOTATE_NOENDBR
+ cmpq $64,%rcx
+ jae .Lunrolled
+
+ cmp $8,%ecx
+ jae .Lword
+
+ testl %ecx,%ecx
+ je .Lexit
+
+.Lclear_user_tail:
+0: movb %al,(%rdi)
+ inc %rdi
+ dec %rcx
+ jnz .Lclear_user_tail
+.Lexit:
+ RET
+
+ _ASM_EXTABLE_UA( 0b, .Lexit)
+
+.Lword:
+1: movq %rax,(%rdi)
+ addq $8,%rdi
+ sub $8,%ecx
+ je .Lexit
+ cmp $8,%ecx
+ jae .Lword
+ jmp .Lclear_user_tail
+
+ .p2align 4
+.Lunrolled:
+10: movq %rax,(%rdi)
+11: movq %rax,8(%rdi)
+12: movq %rax,16(%rdi)
+13: movq %rax,24(%rdi)
+14: movq %rax,32(%rdi)
+15: movq %rax,40(%rdi)
+16: movq %rax,48(%rdi)
+17: movq %rax,56(%rdi)
+ addq $64,%rdi
+ subq $64,%rcx
+ cmpq $64,%rcx
+ jae .Lunrolled
+ cmpl $8,%ecx
+ jae .Lword
+ testl %ecx,%ecx
+ jne .Lclear_user_tail
+ RET
+
+ /*
+ * If we take an exception on any of the
+ * word stores, we know that %rcx isn't zero,
+ * so we can just go to the tail clearing to
+ * get the exact count.
+ *
+ * The unrolled case might end up clearing
+ * some bytes twice. Don't care.
+ *
+ * We could use the value in %rdi to avoid
+ * a second fault on the exact count case,
+ * but do we really care? No.
+ *
+ * Finally, we could try to align %rdi at the
+ * top of the unrolling. But unaligned stores
+ * just aren't that common or expensive.
+ */
+ _ASM_EXTABLE_UA( 1b, .Lclear_user_tail)
+ _ASM_EXTABLE_UA(10b, .Lclear_user_tail)
+ _ASM_EXTABLE_UA(11b, .Lclear_user_tail)
+ _ASM_EXTABLE_UA(12b, .Lclear_user_tail)
+ _ASM_EXTABLE_UA(13b, .Lclear_user_tail)
+ _ASM_EXTABLE_UA(14b, .Lclear_user_tail)
+ _ASM_EXTABLE_UA(15b, .Lclear_user_tail)
+ _ASM_EXTABLE_UA(16b, .Lclear_user_tail)
+ _ASM_EXTABLE_UA(17b, .Lclear_user_tail)
+SYM_FUNC_END(rep_stos_alternative)
+EXPORT_SYMBOL(rep_stos_alternative)
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index b6da09339308..c65cd5550454 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -6,17 +6,21 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
+
#include <asm/setup.h>
+#include <asm/cmdline.h>
+#include <asm/bug.h>
static inline int myisspace(u8 c)
{
return c <= ' '; /* Close enough approximation */
}
-/**
+/*
* Find a boolean option (like quiet,noapic,nosmp....)
*
* @cmdline: the cmdline string
+ * @max_cmdline_size: the maximum size of cmdline
* @option: option string to look for
*
* Returns the position of that @option (starts counting with 1)
@@ -203,12 +207,29 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
int cmdline_find_option_bool(const char *cmdline, const char *option)
{
- return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
+ int ret;
+
+ ret = __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
+ if (ret > 0)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
+ return __cmdline_find_option_bool(builtin_cmdline, COMMAND_LINE_SIZE, option);
+
+ return ret;
}
int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
int bufsize)
{
- return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option,
- buffer, bufsize);
+ int ret;
+
+ ret = __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
+ if (ret > 0)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
+ return __cmdline_find_option(builtin_cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
+
+ return ret;
}
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
index 33c70c0160ea..4fb44894ad87 100644
--- a/arch/x86/lib/cmpxchg16b_emu.S
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -1,47 +1,54 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/linkage.h>
#include <asm/percpu.h>
+#include <asm/processor-flags.h>
.text
/*
+ * Emulate 'cmpxchg16b %gs:(%rsi)'
+ *
* Inputs:
* %rsi : memory location to compare
* %rax : low 64 bits of old value
* %rdx : high 64 bits of old value
* %rbx : low 64 bits of new value
* %rcx : high 64 bits of new value
- * %al : Operation successful
+ *
+ * Notably this is not LOCK prefixed and is not safe against NMIs
*/
SYM_FUNC_START(this_cpu_cmpxchg16b_emu)
-#
-# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
-# via the ZF. Caller will access %al to get result.
-#
-# Note that this is only useful for a cpuops operation. Meaning that we
-# do *not* have a fully atomic operation but just an operation that is
-# *atomic* on a single cpu (as provided by the this_cpu_xx class of
-# macros).
-#
pushfq
cli
- cmpq PER_CPU_VAR((%rsi)), %rax
- jne .Lnot_same
- cmpq PER_CPU_VAR(8(%rsi)), %rdx
- jne .Lnot_same
+ /* if (*ptr == old) */
+ cmpq __percpu (%rsi), %rax
+ jne .Lnot_same
+ cmpq __percpu 8(%rsi), %rdx
+ jne .Lnot_same
- movq %rbx, PER_CPU_VAR((%rsi))
- movq %rcx, PER_CPU_VAR(8(%rsi))
+ /* *ptr = new */
+ movq %rbx, __percpu (%rsi)
+ movq %rcx, __percpu 8(%rsi)
+
+ /* set ZF in EFLAGS to indicate success */
+ orl $X86_EFLAGS_ZF, (%rsp)
popfq
- mov $1, %al
RET
.Lnot_same:
+ /* *ptr != old */
+
+ /* old = *ptr */
+ movq __percpu (%rsi), %rax
+ movq __percpu 8(%rsi), %rdx
+
+ /* clear ZF in EFLAGS to indicate failure */
+ andl $(~X86_EFLAGS_ZF), (%rsp)
+
popfq
- xor %al,%al
RET
SYM_FUNC_END(this_cpu_cmpxchg16b_emu)
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index 6a912d58fecc..d4bb24347ff8 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -1,11 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
+#include <asm/percpu.h>
+#include <asm/processor-flags.h>
.text
+#ifndef CONFIG_X86_CX8
+
/*
+ * Emulate 'cmpxchg8b (%esi)' on UP
+ *
* Inputs:
* %esi : memory location to compare
* %eax : low 32 bits of old value
@@ -15,32 +21,77 @@
*/
SYM_FUNC_START(cmpxchg8b_emu)
-#
-# Emulate 'cmpxchg8b (%esi)' on UP except we don't
-# set the whole ZF thing (caller will just compare
-# eax:edx with the expected value)
-#
pushfl
cli
- cmpl (%esi), %eax
- jne .Lnot_same
- cmpl 4(%esi), %edx
- jne .Lhalf_same
+ cmpl (%esi), %eax
+ jne .Lnot_same
+ cmpl 4(%esi), %edx
+ jne .Lnot_same
+
+ movl %ebx, (%esi)
+ movl %ecx, 4(%esi)
- movl %ebx, (%esi)
- movl %ecx, 4(%esi)
+ orl $X86_EFLAGS_ZF, (%esp)
popfl
RET
.Lnot_same:
- movl (%esi), %eax
-.Lhalf_same:
- movl 4(%esi), %edx
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+
+ andl $(~X86_EFLAGS_ZF), (%esp)
popfl
RET
SYM_FUNC_END(cmpxchg8b_emu)
EXPORT_SYMBOL(cmpxchg8b_emu)
+
+#endif
+
+#ifndef CONFIG_UML
+
+/*
+ * Emulate 'cmpxchg8b %fs:(%rsi)'
+ *
+ * Inputs:
+ * %esi : memory location to compare
+ * %eax : low 32 bits of old value
+ * %edx : high 32 bits of old value
+ * %ebx : low 32 bits of new value
+ * %ecx : high 32 bits of new value
+ *
+ * Notably this is not LOCK prefixed and is not safe against NMIs
+ */
+SYM_FUNC_START(this_cpu_cmpxchg8b_emu)
+
+ pushfl
+ cli
+
+ cmpl __percpu (%esi), %eax
+ jne .Lnot_same2
+ cmpl __percpu 4(%esi), %edx
+ jne .Lnot_same2
+
+ movl %ebx, __percpu (%esi)
+ movl %ecx, __percpu 4(%esi)
+
+ orl $X86_EFLAGS_ZF, (%esp)
+
+ popfl
+ RET
+
+.Lnot_same2:
+ movl __percpu (%esi), %eax
+ movl __percpu 4(%esi), %edx
+
+ andl $(~X86_EFLAGS_ZF), (%esp)
+
+ popfl
+ RET
+
+SYM_FUNC_END(this_cpu_cmpxchg8b_emu)
+
+#endif
diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
index 80efd45a7761..97e88e58567b 100644
--- a/arch/x86/lib/copy_mc.c
+++ b/arch/x86/lib/copy_mc.c
@@ -4,6 +4,7 @@
#include <linux/jump_label.h>
#include <linux/uaccess.h>
#include <linux/export.h>
+#include <linux/instrumented.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -61,32 +62,44 @@ unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned
*/
unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
{
- if (copy_mc_fragile_enabled)
- return copy_mc_fragile(dst, src, len);
- if (static_cpu_has(X86_FEATURE_ERMS))
- return copy_mc_enhanced_fast_string(dst, src, len);
+ unsigned long ret;
+
+ if (copy_mc_fragile_enabled) {
+ instrument_memcpy_before(dst, src, len);
+ ret = copy_mc_fragile(dst, src, len);
+ instrument_memcpy_after(dst, src, len, ret);
+ return ret;
+ }
+ if (static_cpu_has(X86_FEATURE_ERMS)) {
+ instrument_memcpy_before(dst, src, len);
+ ret = copy_mc_enhanced_fast_string(dst, src, len);
+ instrument_memcpy_after(dst, src, len, ret);
+ return ret;
+ }
memcpy(dst, src, len);
return 0;
}
EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
-unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
+unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
{
unsigned long ret;
if (copy_mc_fragile_enabled) {
+ instrument_copy_to_user(dst, src, len);
__uaccess_begin();
- ret = copy_mc_fragile(dst, src, len);
+ ret = copy_mc_fragile((__force void *)dst, src, len);
__uaccess_end();
return ret;
}
if (static_cpu_has(X86_FEATURE_ERMS)) {
+ instrument_copy_to_user(dst, src, len);
__uaccess_begin();
- ret = copy_mc_enhanced_fast_string(dst, src, len);
+ ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
__uaccess_end();
return ret;
}
- return copy_user_generic(dst, src, len);
+ return copy_user_generic((__force void *)dst, src, len);
}
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 30ea644bf446..d8e87fedc20d 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -1,10 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
+#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-#include <asm/export.h>
/*
* Some CPUs run faster using the string copy instructions (sane microcode).
@@ -13,7 +14,7 @@
* prefetch distance based on SMP/UP.
*/
ALIGN
-SYM_FUNC_START(copy_page)
+SYM_TYPED_FUNC_START(copy_page)
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx
rep movsq
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 9dec1b38a98f..06296eb69fd4 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -6,405 +6,107 @@
* Functions to copy from and to user space.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/current.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
+#include <linux/cfi_types.h>
+#include <linux/objtool.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
#include <asm/asm.h>
-#include <asm/smap.h>
-#include <asm/export.h>
-#include <asm/trapnr.h>
-
-.macro ALIGN_DESTINATION
- /* check for bad alignment of destination */
- movl %edi,%ecx
- andl $7,%ecx
- jz 102f /* already aligned */
- subl $8,%ecx
- negl %ecx
- subl %ecx,%edx
-100: movb (%rsi),%al
-101: movb %al,(%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz 100b
-102:
-
- _ASM_EXTABLE_CPY(100b, .Lcopy_user_handle_align)
- _ASM_EXTABLE_CPY(101b, .Lcopy_user_handle_align)
-.endm
-
-/*
- * copy_user_generic_unrolled - memory copy with exception handling.
- * This version is for CPUs like P4 that don't have efficient micro
- * code for rep movsq
- *
- * Input:
- * rdi destination
- * rsi source
- * rdx count
- *
- * Output:
- * eax uncopied bytes or 0 if successful.
- */
-SYM_FUNC_START(copy_user_generic_unrolled)
- ASM_STAC
- cmpl $8,%edx
- jb .Lcopy_user_short_string_bytes
- ALIGN_DESTINATION
- movl %edx,%ecx
- andl $63,%edx
- shrl $6,%ecx
- jz copy_user_short_string
-1: movq (%rsi),%r8
-2: movq 1*8(%rsi),%r9
-3: movq 2*8(%rsi),%r10
-4: movq 3*8(%rsi),%r11
-5: movq %r8,(%rdi)
-6: movq %r9,1*8(%rdi)
-7: movq %r10,2*8(%rdi)
-8: movq %r11,3*8(%rdi)
-9: movq 4*8(%rsi),%r8
-10: movq 5*8(%rsi),%r9
-11: movq 6*8(%rsi),%r10
-12: movq 7*8(%rsi),%r11
-13: movq %r8,4*8(%rdi)
-14: movq %r9,5*8(%rdi)
-15: movq %r10,6*8(%rdi)
-16: movq %r11,7*8(%rdi)
- leaq 64(%rsi),%rsi
- leaq 64(%rdi),%rdi
- decl %ecx
- jnz 1b
- jmp copy_user_short_string
-
-30: shll $6,%ecx
- addl %ecx,%edx
- jmp .Lcopy_user_handle_tail
-
- _ASM_EXTABLE_CPY(1b, 30b)
- _ASM_EXTABLE_CPY(2b, 30b)
- _ASM_EXTABLE_CPY(3b, 30b)
- _ASM_EXTABLE_CPY(4b, 30b)
- _ASM_EXTABLE_CPY(5b, 30b)
- _ASM_EXTABLE_CPY(6b, 30b)
- _ASM_EXTABLE_CPY(7b, 30b)
- _ASM_EXTABLE_CPY(8b, 30b)
- _ASM_EXTABLE_CPY(9b, 30b)
- _ASM_EXTABLE_CPY(10b, 30b)
- _ASM_EXTABLE_CPY(11b, 30b)
- _ASM_EXTABLE_CPY(12b, 30b)
- _ASM_EXTABLE_CPY(13b, 30b)
- _ASM_EXTABLE_CPY(14b, 30b)
- _ASM_EXTABLE_CPY(15b, 30b)
- _ASM_EXTABLE_CPY(16b, 30b)
-SYM_FUNC_END(copy_user_generic_unrolled)
-EXPORT_SYMBOL(copy_user_generic_unrolled)
-
-/* Some CPUs run faster using the string copy instructions.
- * This is also a lot simpler. Use them when possible.
- *
- * Only 4GB of copy is supported. This shouldn't be a problem
- * because the kernel normally only writes from/to page sized chunks
- * even if user space passed a longer buffer.
- * And more would be dangerous because both Intel and AMD have
- * errata with rep movsq > 4GB. If someone feels the need to fix
- * this please consider this.
- *
- * Input:
- * rdi destination
- * rsi source
- * rdx count
- *
- * Output:
- * eax uncopied bytes or 0 if successful.
- */
-SYM_FUNC_START(copy_user_generic_string)
- ASM_STAC
- cmpl $8,%edx
- jb 2f /* less than 8 bytes, go to byte copy loop */
- ALIGN_DESTINATION
- movl %edx,%ecx
- shrl $3,%ecx
- andl $7,%edx
-1: rep movsq
-2: movl %edx,%ecx
-3: rep movsb
- xorl %eax,%eax
- ASM_CLAC
- RET
-
-11: leal (%rdx,%rcx,8),%ecx
-12: movl %ecx,%edx /* ecx is zerorest also */
- jmp .Lcopy_user_handle_tail
-
- _ASM_EXTABLE_CPY(1b, 11b)
- _ASM_EXTABLE_CPY(3b, 12b)
-SYM_FUNC_END(copy_user_generic_string)
-EXPORT_SYMBOL(copy_user_generic_string)
-
-/*
- * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
- * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
- *
- * Input:
- * rdi destination
- * rsi source
- * rdx count
- *
- * Output:
- * eax uncopied bytes or 0 if successful.
- */
-SYM_FUNC_START(copy_user_enhanced_fast_string)
- ASM_STAC
- /* CPUs without FSRM should avoid rep movsb for short copies */
- ALTERNATIVE "cmpl $64, %edx; jb copy_user_short_string", "", X86_FEATURE_FSRM
- movl %edx,%ecx
-1: rep movsb
- xorl %eax,%eax
- ASM_CLAC
- RET
-
-12: movl %ecx,%edx /* ecx is zerorest also */
- jmp .Lcopy_user_handle_tail
-
- _ASM_EXTABLE_CPY(1b, 12b)
-SYM_FUNC_END(copy_user_enhanced_fast_string)
-EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/*
- * Try to copy last bytes and clear the rest if needed.
- * Since protection fault in copy_from/to_user is not a normal situation,
- * it is not necessary to optimize tail handling.
- * Don't try to copy the tail if machine check happened
+ * rep_movs_alternative - memory copy with exception handling.
+ * This version is for CPUs that don't have FSRM (Fast Short Rep Movs)
*
* Input:
- * eax trap number written by ex_handler_copy()
* rdi destination
* rsi source
- * rdx count
+ * rcx count
*
* Output:
- * eax uncopied bytes or 0 if successful.
- */
-SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
- cmp $X86_TRAP_MC,%eax
- je 3f
-
- movl %edx,%ecx
-1: rep movsb
-2: mov %ecx,%eax
- ASM_CLAC
- RET
-
-3:
- movl %edx,%eax
- ASM_CLAC
- RET
-
- _ASM_EXTABLE_CPY(1b, 2b)
-
-.Lcopy_user_handle_align:
- addl %ecx,%edx /* ecx is zerorest also */
- jmp .Lcopy_user_handle_tail
-
-SYM_CODE_END(.Lcopy_user_handle_tail)
-
-/*
- * Finish memcpy of less than 64 bytes. #AC should already be set.
+ * rcx uncopied bytes or 0 if successful.
*
- * Input:
- * rdi destination
- * rsi source
- * rdx count (< 64)
- *
- * Output:
- * eax uncopied bytes or 0 if successful.
+ * NOTE! The calling convention is very intentionally the same as
+ * for 'rep movs', so that we can rewrite the function call with
+ * just a plain 'rep movs' on machines that have FSRM. But to make
+ * it simpler for us, we can clobber rsi/rdi and rax freely.
*/
-SYM_CODE_START_LOCAL(copy_user_short_string)
- movl %edx,%ecx
- andl $7,%edx
- shrl $3,%ecx
- jz .Lcopy_user_short_string_bytes
-18: movq (%rsi),%r8
-19: movq %r8,(%rdi)
- leaq 8(%rsi),%rsi
- leaq 8(%rdi),%rdi
- decl %ecx
- jnz 18b
-.Lcopy_user_short_string_bytes:
- andl %edx,%edx
- jz 23f
- movl %edx,%ecx
-21: movb (%rsi),%al
-22: movb %al,(%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz 21b
-23: xor %eax,%eax
- ASM_CLAC
+SYM_FUNC_START(rep_movs_alternative)
+ ANNOTATE_NOENDBR
+ cmpq $64,%rcx
+ jae .Llarge
+
+ cmp $8,%ecx
+ jae .Lword
+
+ testl %ecx,%ecx
+ je .Lexit
+
+.Lcopy_user_tail:
+0: movb (%rsi),%al
+1: movb %al,(%rdi)
+ inc %rdi
+ inc %rsi
+ dec %rcx
+ jne .Lcopy_user_tail
+.Lexit:
RET
-40: leal (%rdx,%rcx,8),%edx
- jmp 60f
-50: movl %ecx,%edx /* ecx is zerorest also */
-60: jmp .Lcopy_user_handle_tail
-
- _ASM_EXTABLE_CPY(18b, 40b)
- _ASM_EXTABLE_CPY(19b, 40b)
- _ASM_EXTABLE_CPY(21b, 50b)
- _ASM_EXTABLE_CPY(22b, 50b)
-SYM_CODE_END(copy_user_short_string)
-
-/*
- * copy_user_nocache - Uncached memory copy with exception handling
- * This will force destination out of cache for more performance.
- *
- * Note: Cached memory copy is used when destination or size is not
- * naturally aligned. That is:
- * - Require 8-byte alignment when size is 8 bytes or larger.
- * - Require 4-byte alignment when size is 4 bytes.
- */
-SYM_FUNC_START(__copy_user_nocache)
- ASM_STAC
-
- /* If size is less than 8 bytes, go to 4-byte copy */
- cmpl $8,%edx
- jb .L_4b_nocache_copy_entry
-
- /* If destination is not 8-byte aligned, "cache" copy to align it */
- ALIGN_DESTINATION
-
- /* Set 4x8-byte copy count and remainder */
- movl %edx,%ecx
- andl $63,%edx
- shrl $6,%ecx
- jz .L_8b_nocache_copy_entry /* jump if count is 0 */
-
- /* Perform 4x8-byte nocache loop-copy */
-.L_4x8b_nocache_copy_loop:
-1: movq (%rsi),%r8
-2: movq 1*8(%rsi),%r9
-3: movq 2*8(%rsi),%r10
-4: movq 3*8(%rsi),%r11
-5: movnti %r8,(%rdi)
-6: movnti %r9,1*8(%rdi)
-7: movnti %r10,2*8(%rdi)
-8: movnti %r11,3*8(%rdi)
-9: movq 4*8(%rsi),%r8
-10: movq 5*8(%rsi),%r9
-11: movq 6*8(%rsi),%r10
-12: movq 7*8(%rsi),%r11
-13: movnti %r8,4*8(%rdi)
-14: movnti %r9,5*8(%rdi)
-15: movnti %r10,6*8(%rdi)
-16: movnti %r11,7*8(%rdi)
- leaq 64(%rsi),%rsi
- leaq 64(%rdi),%rdi
- decl %ecx
- jnz .L_4x8b_nocache_copy_loop
-
- /* Set 8-byte copy count and remainder */
-.L_8b_nocache_copy_entry:
- movl %edx,%ecx
- andl $7,%edx
- shrl $3,%ecx
- jz .L_4b_nocache_copy_entry /* jump if count is 0 */
-
- /* Perform 8-byte nocache loop-copy */
-.L_8b_nocache_copy_loop:
-20: movq (%rsi),%r8
-21: movnti %r8,(%rdi)
- leaq 8(%rsi),%rsi
- leaq 8(%rdi),%rdi
- decl %ecx
- jnz .L_8b_nocache_copy_loop
-
- /* If no byte left, we're done */
-.L_4b_nocache_copy_entry:
- andl %edx,%edx
- jz .L_finish_copy
-
- /* If destination is not 4-byte aligned, go to byte copy: */
- movl %edi,%ecx
- andl $3,%ecx
- jnz .L_1b_cache_copy_entry
-
- /* Set 4-byte copy count (1 or 0) and remainder */
- movl %edx,%ecx
- andl $3,%edx
- shrl $2,%ecx
- jz .L_1b_cache_copy_entry /* jump if count is 0 */
-
- /* Perform 4-byte nocache copy: */
-30: movl (%rsi),%r8d
-31: movnti %r8d,(%rdi)
- leaq 4(%rsi),%rsi
- leaq 4(%rdi),%rdi
-
- /* If no bytes left, we're done: */
- andl %edx,%edx
- jz .L_finish_copy
-
- /* Perform byte "cache" loop-copy for the remainder */
-.L_1b_cache_copy_entry:
- movl %edx,%ecx
-.L_1b_cache_copy_loop:
-40: movb (%rsi),%al
-41: movb %al,(%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz .L_1b_cache_copy_loop
-
- /* Finished copying; fence the prior stores */
-.L_finish_copy:
- xorl %eax,%eax
- ASM_CLAC
- sfence
+ _ASM_EXTABLE_UA( 0b, .Lexit)
+ _ASM_EXTABLE_UA( 1b, .Lexit)
+
+ .p2align 4
+.Lword:
+2: movq (%rsi),%rax
+3: movq %rax,(%rdi)
+ addq $8,%rsi
+ addq $8,%rdi
+ sub $8,%ecx
+ je .Lexit
+ cmp $8,%ecx
+ jae .Lword
+ jmp .Lcopy_user_tail
+
+ _ASM_EXTABLE_UA( 2b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA( 3b, .Lcopy_user_tail)
+
+.Llarge:
+0: ALTERNATIVE "jmp .Llarge_movsq", "rep movsb", X86_FEATURE_ERMS
+1: RET
+
+ _ASM_EXTABLE_UA( 0b, 1b)
+
+.Llarge_movsq:
+ /* Do the first possibly unaligned word */
+0: movq (%rsi),%rax
+1: movq %rax,(%rdi)
+
+ _ASM_EXTABLE_UA( 0b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA( 1b, .Lcopy_user_tail)
+
+ /* What would be the offset to the aligned destination? */
+ leaq 8(%rdi),%rax
+ andq $-8,%rax
+ subq %rdi,%rax
+
+ /* .. and update pointers and count to match */
+ addq %rax,%rdi
+ addq %rax,%rsi
+ subq %rax,%rcx
+
+ /* make %rcx contain the number of words, %rax the remainder */
+ movq %rcx,%rax
+ shrq $3,%rcx
+ andl $7,%eax
+0: rep movsq
+ movl %eax,%ecx
+ testl %ecx,%ecx
+ jne .Lcopy_user_tail
RET
-.L_fixup_4x8b_copy:
- shll $6,%ecx
- addl %ecx,%edx
- jmp .L_fixup_handle_tail
-.L_fixup_8b_copy:
- lea (%rdx,%rcx,8),%rdx
- jmp .L_fixup_handle_tail
-.L_fixup_4b_copy:
- lea (%rdx,%rcx,4),%rdx
- jmp .L_fixup_handle_tail
-.L_fixup_1b_copy:
- movl %ecx,%edx
-.L_fixup_handle_tail:
- sfence
- jmp .Lcopy_user_handle_tail
+1: leaq (%rax,%rcx,8),%rcx
+ jmp .Lcopy_user_tail
- _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy)
- _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy)
- _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy)
- _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy)
- _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy)
- _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy)
- _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy)
-SYM_FUNC_END(__copy_user_nocache)
-EXPORT_SYMBOL(__copy_user_nocache)
+ _ASM_EXTABLE_UA( 0b, 1b)
+SYM_FUNC_END(rep_movs_alternative)
+EXPORT_SYMBOL(rep_movs_alternative)
diff --git a/arch/x86/lib/copy_user_uncached_64.S b/arch/x86/lib/copy_user_uncached_64.S
new file mode 100644
index 000000000000..18350b343c2a
--- /dev/null
+++ b/arch/x86/lib/copy_user_uncached_64.S
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Linus Torvalds <torvalds@linux-foundation.org>
+ */
+
+#include <linux/export.h>
+#include <linux/linkage.h>
+#include <linux/objtool.h>
+#include <asm/asm.h>
+
+/*
+ * copy_user_nocache - Uncached memory copy with exception handling
+ *
+ * This copies from user space into kernel space, but the kernel
+ * space accesses can take a machine check exception, so they too
+ * need exception handling.
+ *
+ * Note: only 32-bit and 64-bit stores have non-temporal versions,
+ * and we only use aligned versions. Any unaligned parts at the
+ * start or end of the copy will be done using normal cached stores.
+ *
+ * Input:
+ * rdi destination
+ * rsi source
+ * edx count
+ *
+ * Output:
+ * rax uncopied bytes or 0 if successful.
+ */
+SYM_FUNC_START(__copy_user_nocache)
+ ANNOTATE_NOENDBR
+ /* If destination is not 7-byte aligned, we'll have to align it */
+ testb $7,%dil
+ jne .Lalign
+
+.Lis_aligned:
+ cmp $64,%edx
+ jb .Lquadwords
+
+ .p2align 4,0x90
+.Lunrolled:
+10: movq (%rsi),%r8
+11: movq 8(%rsi),%r9
+12: movq 16(%rsi),%r10
+13: movq 24(%rsi),%r11
+20: movnti %r8,(%rdi)
+21: movnti %r9,8(%rdi)
+22: movnti %r10,16(%rdi)
+23: movnti %r11,24(%rdi)
+30: movq 32(%rsi),%r8
+31: movq 40(%rsi),%r9
+32: movq 48(%rsi),%r10
+33: movq 56(%rsi),%r11
+40: movnti %r8,32(%rdi)
+41: movnti %r9,40(%rdi)
+42: movnti %r10,48(%rdi)
+43: movnti %r11,56(%rdi)
+
+ addq $64,%rsi
+ addq $64,%rdi
+ sub $64,%edx
+ cmp $64,%edx
+ jae .Lunrolled
+
+/*
+ * First set of user mode loads have been done
+ * without any stores, so if they fail, we can
+ * just try the non-unrolled loop.
+ */
+_ASM_EXTABLE_UA(10b, .Lquadwords)
+_ASM_EXTABLE_UA(11b, .Lquadwords)
+_ASM_EXTABLE_UA(12b, .Lquadwords)
+_ASM_EXTABLE_UA(13b, .Lquadwords)
+
+/*
+ * The second set of user mode loads have been
+ * done with 32 bytes stored to the destination,
+ * so we need to take that into account before
+ * falling back to the unrolled loop.
+ */
+_ASM_EXTABLE_UA(30b, .Lfixup32)
+_ASM_EXTABLE_UA(31b, .Lfixup32)
+_ASM_EXTABLE_UA(32b, .Lfixup32)
+_ASM_EXTABLE_UA(33b, .Lfixup32)
+
+/*
+ * An exception on a write means that we're
+ * done, but we need to update the count
+ * depending on where in the unrolled loop
+ * we were.
+ */
+_ASM_EXTABLE_UA(20b, .Ldone0)
+_ASM_EXTABLE_UA(21b, .Ldone8)
+_ASM_EXTABLE_UA(22b, .Ldone16)
+_ASM_EXTABLE_UA(23b, .Ldone24)
+_ASM_EXTABLE_UA(40b, .Ldone32)
+_ASM_EXTABLE_UA(41b, .Ldone40)
+_ASM_EXTABLE_UA(42b, .Ldone48)
+_ASM_EXTABLE_UA(43b, .Ldone56)
+
+.Lquadwords:
+ cmp $8,%edx
+ jb .Llong
+50: movq (%rsi),%rax
+51: movnti %rax,(%rdi)
+ addq $8,%rsi
+ addq $8,%rdi
+ sub $8,%edx
+ jmp .Lquadwords
+
+/*
+ * If we fail on the last full quadword, we will
+ * not try to do any byte-wise cached accesses.
+ * We will try to do one more 4-byte uncached
+ * one, though.
+ */
+_ASM_EXTABLE_UA(50b, .Llast4)
+_ASM_EXTABLE_UA(51b, .Ldone0)
+
+.Llong:
+ test $4,%dl
+ je .Lword
+60: movl (%rsi),%eax
+61: movnti %eax,(%rdi)
+ addq $4,%rsi
+ addq $4,%rdi
+ sub $4,%edx
+.Lword:
+ sfence
+ test $2,%dl
+ je .Lbyte
+70: movw (%rsi),%ax
+71: movw %ax,(%rdi)
+ addq $2,%rsi
+ addq $2,%rdi
+ sub $2,%edx
+.Lbyte:
+ test $1,%dl
+ je .Ldone
+80: movb (%rsi),%al
+81: movb %al,(%rdi)
+ dec %edx
+.Ldone:
+ mov %edx,%eax
+ RET
+
+/*
+ * If we fail on the last four bytes, we won't
+ * bother with any fixups. It's dead, Jim. Note
+ * that there's no need for 'sfence' for any
+ * of this, since the exception will have been
+ * serializing.
+ */
+_ASM_EXTABLE_UA(60b, .Ldone)
+_ASM_EXTABLE_UA(61b, .Ldone)
+_ASM_EXTABLE_UA(70b, .Ldone)
+_ASM_EXTABLE_UA(71b, .Ldone)
+_ASM_EXTABLE_UA(80b, .Ldone)
+_ASM_EXTABLE_UA(81b, .Ldone)
+
+/*
+ * This is the "head needs aliging" case when
+ * the destination isn't 8-byte aligned. The
+ * 4-byte case can be done uncached, but any
+ * smaller alignment is done with regular stores.
+ */
+.Lalign:
+ test $1,%dil
+ je .Lalign_word
+ test %edx,%edx
+ je .Ldone
+90: movb (%rsi),%al
+91: movb %al,(%rdi)
+ inc %rsi
+ inc %rdi
+ dec %edx
+.Lalign_word:
+ test $2,%dil
+ je .Lalign_long
+ cmp $2,%edx
+ jb .Lbyte
+92: movw (%rsi),%ax
+93: movw %ax,(%rdi)
+ addq $2,%rsi
+ addq $2,%rdi
+ sub $2,%edx
+.Lalign_long:
+ test $4,%dil
+ je .Lis_aligned
+ cmp $4,%edx
+ jb .Lword
+94: movl (%rsi),%eax
+95: movnti %eax,(%rdi)
+ addq $4,%rsi
+ addq $4,%rdi
+ sub $4,%edx
+ jmp .Lis_aligned
+
+/*
+ * If we fail on the initial alignment accesses,
+ * we're all done. Again, no point in trying to
+ * do byte-by-byte probing if the 4-byte load
+ * fails - we're not doing any uncached accesses
+ * any more.
+ */
+_ASM_EXTABLE_UA(90b, .Ldone)
+_ASM_EXTABLE_UA(91b, .Ldone)
+_ASM_EXTABLE_UA(92b, .Ldone)
+_ASM_EXTABLE_UA(93b, .Ldone)
+_ASM_EXTABLE_UA(94b, .Ldone)
+_ASM_EXTABLE_UA(95b, .Ldone)
+
+/*
+ * Exception table fixups for faults in the middle
+ */
+.Ldone56: sub $8,%edx
+.Ldone48: sub $8,%edx
+.Ldone40: sub $8,%edx
+.Ldone32: sub $8,%edx
+.Ldone24: sub $8,%edx
+.Ldone16: sub $8,%edx
+.Ldone8: sub $8,%edx
+.Ldone0:
+ mov %edx,%eax
+ RET
+
+.Lfixup32:
+ addq $32,%rsi
+ addq $32,%rdi
+ sub $32,%edx
+ jmp .Lquadwords
+
+.Llast4:
+52: movl (%rsi),%eax
+53: movnti %eax,(%rdi)
+ sfence
+ sub $4,%edx
+ mov %edx,%eax
+ RET
+_ASM_EXTABLE_UA(52b, .Ldone0)
+_ASM_EXTABLE_UA(53b, .Ldone0)
+
+SYM_FUNC_END(__copy_user_nocache)
+EXPORT_SYMBOL(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 50734a23034c..c9dae65ac01b 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -5,20 +5,29 @@
* This file contains network checksum routines that are better done
* in an architecture-specific manner due to speed.
*/
-
+
#include <linux/compiler.h>
#include <linux/export.h>
#include <asm/checksum.h>
#include <asm/word-at-a-time.h>
-static inline unsigned short from32to16(unsigned a)
+static inline __wsum csum_finalize_sum(u64 temp64)
+{
+ return (__force __wsum)((temp64 + ror64(temp64, 32)) >> 32);
+}
+
+static inline unsigned long update_csum_40b(unsigned long sum, const unsigned long m[5])
{
- unsigned short b = a >> 16;
- asm("addw %w2,%w0\n\t"
- "adcw $0,%w0\n"
- : "=r" (b)
- : "0" (b), "r" (a));
- return b;
+ asm("addq %1,%0\n\t"
+ "adcq %2,%0\n\t"
+ "adcq %3,%0\n\t"
+ "adcq %4,%0\n\t"
+ "adcq %5,%0\n\t"
+ "adcq $0,%0"
+ :"+r" (sum)
+ :"m" (m[0]), "m" (m[1]), "m" (m[2]),
+ "m" (m[3]), "m" (m[4]));
+ return sum;
}
/*
@@ -35,33 +44,32 @@ static inline unsigned short from32to16(unsigned a)
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
u64 temp64 = (__force u64)sum;
- unsigned odd, result;
- odd = 1 & (unsigned long) buff;
- if (unlikely(odd)) {
- if (unlikely(len == 0))
- return sum;
- temp64 = ror32((__force u32)sum, 8);
- temp64 += (*(unsigned char *)buff << 8);
- len--;
- buff++;
+ /* Do two 40-byte chunks in parallel to get better ILP */
+ if (likely(len >= 80)) {
+ u64 temp64_2 = 0;
+ do {
+ temp64 = update_csum_40b(temp64, buff);
+ temp64_2 = update_csum_40b(temp64_2, buff + 40);
+ buff += 80;
+ len -= 80;
+ } while (len >= 80);
+
+ asm("addq %1,%0\n\t"
+ "adcq $0,%0"
+ :"+r" (temp64): "r" (temp64_2));
}
- while (unlikely(len >= 64)) {
- asm("addq 0*8(%[src]),%[res]\n\t"
- "adcq 1*8(%[src]),%[res]\n\t"
- "adcq 2*8(%[src]),%[res]\n\t"
- "adcq 3*8(%[src]),%[res]\n\t"
- "adcq 4*8(%[src]),%[res]\n\t"
- "adcq 5*8(%[src]),%[res]\n\t"
- "adcq 6*8(%[src]),%[res]\n\t"
- "adcq 7*8(%[src]),%[res]\n\t"
- "adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [src] "r" (buff)
- : "memory");
- buff += 64;
- len -= 64;
+ /*
+ * len == 40 is the hot case due to IPv6 headers, so return
+ * early for that exact case without checking the tail bytes.
+ */
+ if (len >= 40) {
+ temp64 = update_csum_40b(temp64, buff);
+ len -= 40;
+ if (!len)
+ return csum_finalize_sum(temp64);
+ buff += 40;
}
if (len & 32) {
@@ -70,45 +78,37 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [src] "r" (buff)
- : "memory");
+ : [res] "+r"(temp64)
+ : [src] "r"(buff), "m"(*(const char(*)[32])buff));
buff += 32;
}
if (len & 16) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [src] "r" (buff)
- : "memory");
+ : [res] "+r"(temp64)
+ : [src] "r"(buff), "m"(*(const char(*)[16])buff));
buff += 16;
}
if (len & 8) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [src] "r" (buff)
- : "memory");
+ : [res] "+r"(temp64)
+ : [src] "r"(buff), "m"(*(const char(*)[8])buff));
buff += 8;
}
if (len & 7) {
- unsigned int shift = (8 - (len & 7)) * 8;
+ unsigned int shift = (-len << 3) & 63;
unsigned long trail;
trail = (load_unaligned_zeropad(buff) << shift) >> shift;
asm("addq %[trail],%[res]\n\t"
"adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [trail] "r" (trail));
- }
- result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
- if (unlikely(odd)) {
- result = from32to16(result);
- result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+ : [res] "+r"(temp64)
+ : [trail] "r"(trail));
}
- return (__force __wsum)result;
+ return csum_finalize_sum(temp64);
}
EXPORT_SYMBOL(csum_partial);
@@ -118,6 +118,6 @@ EXPORT_SYMBOL(csum_partial);
*/
__sum16 ip_compute_csum(const void *buff, int len)
{
- return csum_fold(csum_partial(buff,len,0));
+ return csum_fold(csum_partial(buff, len, 0));
}
EXPORT_SYMBOL(ip_compute_csum);
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 145f9a0bde29..f4df4d241526 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -14,8 +14,6 @@
* @src: source address (user space)
* @dst: destination address
* @len: number of bytes to be copied.
- * @isum: initial sum that is added into the result (32bit unfolded)
- * @errp: set to -EFAULT for an bad source address.
*
* Returns an 32bit unfolded checksum of the buffer.
* src and dst are best aligned to 64bits.
@@ -38,8 +36,6 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len)
* @src: source address
* @dst: destination address (user space)
* @len: number of bytes to be copied.
- * @isum: initial sum that is added into the result (32bit unfolded)
- * @errp: set to -EFAULT for an bad destination address.
*
* Returns an 32bit unfolded checksum of the buffer.
* src and dst are best aligned to 64bits.
@@ -62,7 +58,6 @@ csum_and_copy_to_user(const void *src, void __user *dst, int len)
* @src: source address
* @dst: destination address
* @len: number of bytes to be copied.
- * @sum: initial sum that is added into the result (32bit unfolded)
*
* Returns an 32bit unfolded checksum of the buffer.
*/
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 0e65d00e2339..eb2d2e1cbddd 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -75,7 +75,7 @@ static void delay_tsc(u64 cycles)
/* Allow RT tasks to run */
preempt_enable();
- rep_nop();
+ native_pause();
preempt_disable();
/*
@@ -128,10 +128,10 @@ static void delay_halt_mwaitx(u64 unused, u64 cycles)
delay = min_t(u64, MWAITX_MAX_WAIT_CYCLES, cycles);
/*
- * Use cpu_tss_rw as a cacheline-aligned, seldomly accessed per-cpu
+ * Use cpu_tss_rw as a cacheline-aligned, seldom accessed per-cpu
* variable as the monitor target.
*/
- __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
+ __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
/*
* AMD, like Intel, supports the EAX hint and EAX=0xf means, do not
diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c
index 1e3de0769b81..b5a6d83106bc 100644
--- a/arch/x86/lib/error-inject.c
+++ b/arch/x86/lib/error-inject.c
@@ -11,6 +11,7 @@ asm(
".text\n"
".type just_return_func, @function\n"
".globl just_return_func\n"
+ ASM_FUNC_ALIGN
"just_return_func:\n"
ANNOTATE_NOENDBR
ASM_RET
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index b70d98d79a9d..9d5654b8a72a 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -26,35 +26,44 @@
* as they get called from within inline assembly.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <asm/page_types.h>
#include <asm/errno.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <asm/smap.h>
-#include <asm/export.h>
+#include <asm/runtime-const.h>
#define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC
-#ifdef CONFIG_X86_5LEVEL
-#define LOAD_TASK_SIZE_MINUS_N(n) \
- ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rdx), \
- __stringify(mov $((1 << 56) - 4096 - (n)),%rdx), X86_FEATURE_LA57
-#else
-#define LOAD_TASK_SIZE_MINUS_N(n) \
- mov $(TASK_SIZE_MAX - (n)),%_ASM_DX
-#endif
+.macro check_range size:req
+.if IS_ENABLED(CONFIG_X86_64)
+ RUNTIME_CONST_PTR USER_PTR_MAX, rdx
+ cmp %rdx, %rax
+ cmova %rdx, %rax
+.else
+ cmp $TASK_SIZE_MAX-\size+1, %eax
+ jae .Lbad_get_user
+ sbb %edx, %edx /* array_index_mask_nospec() */
+ and %edx, %eax
+.endif
+.endm
+
+.macro UACCESS op src dst
+1: \op \src,\dst
+ _ASM_EXTABLE_UA(1b, __get_user_handle_exception)
+.endm
+
.text
SYM_FUNC_START(__get_user_1)
- LOAD_TASK_SIZE_MINUS_N(0)
- cmp %_ASM_DX,%_ASM_AX
- jae bad_get_user
- sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
- and %_ASM_DX, %_ASM_AX
+ ANNOTATE_NOENDBR
+ check_range size=1
ASM_STAC
-1: movzbl (%_ASM_AX),%edx
+ UACCESS movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -62,13 +71,10 @@ SYM_FUNC_END(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
SYM_FUNC_START(__get_user_2)
- LOAD_TASK_SIZE_MINUS_N(1)
- cmp %_ASM_DX,%_ASM_AX
- jae bad_get_user
- sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
- and %_ASM_DX, %_ASM_AX
+ ANNOTATE_NOENDBR
+ check_range size=2
ASM_STAC
-2: movzwl (%_ASM_AX),%edx
+ UACCESS movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -76,13 +82,10 @@ SYM_FUNC_END(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
SYM_FUNC_START(__get_user_4)
- LOAD_TASK_SIZE_MINUS_N(3)
- cmp %_ASM_DX,%_ASM_AX
- jae bad_get_user
- sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
- and %_ASM_DX, %_ASM_AX
+ ANNOTATE_NOENDBR
+ check_range size=4
ASM_STAC
-3: movl (%_ASM_AX),%edx
+ UACCESS movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -90,38 +93,30 @@ SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
SYM_FUNC_START(__get_user_8)
-#ifdef CONFIG_X86_64
- LOAD_TASK_SIZE_MINUS_N(7)
- cmp %_ASM_DX,%_ASM_AX
- jae bad_get_user
- sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
- and %_ASM_DX, %_ASM_AX
+ ANNOTATE_NOENDBR
+#ifndef CONFIG_X86_64
+ xor %ecx,%ecx
+#endif
+ check_range size=8
ASM_STAC
-4: movq (%_ASM_AX),%rdx
- xor %eax,%eax
- ASM_CLAC
- RET
+#ifdef CONFIG_X86_64
+ UACCESS movq (%_ASM_AX),%rdx
#else
- LOAD_TASK_SIZE_MINUS_N(7)
- cmp %_ASM_DX,%_ASM_AX
- jae bad_get_user_8
- sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
- and %_ASM_DX, %_ASM_AX
- ASM_STAC
-4: movl (%_ASM_AX),%edx
-5: movl 4(%_ASM_AX),%ecx
+ UACCESS movl (%_ASM_AX),%edx
+ UACCESS movl 4(%_ASM_AX),%ecx
+#endif
xor %eax,%eax
ASM_CLAC
RET
-#endif
SYM_FUNC_END(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
/* .. and the same for __get_user, just without the range checks */
SYM_FUNC_START(__get_user_nocheck_1)
+ ANNOTATE_NOENDBR
ASM_STAC
ASM_BARRIER_NOSPEC
-6: movzbl (%_ASM_AX),%edx
+ UACCESS movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -129,9 +124,10 @@ SYM_FUNC_END(__get_user_nocheck_1)
EXPORT_SYMBOL(__get_user_nocheck_1)
SYM_FUNC_START(__get_user_nocheck_2)
+ ANNOTATE_NOENDBR
ASM_STAC
ASM_BARRIER_NOSPEC
-7: movzwl (%_ASM_AX),%edx
+ UACCESS movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -139,9 +135,10 @@ SYM_FUNC_END(__get_user_nocheck_2)
EXPORT_SYMBOL(__get_user_nocheck_2)
SYM_FUNC_START(__get_user_nocheck_4)
+ ANNOTATE_NOENDBR
ASM_STAC
ASM_BARRIER_NOSPEC
-8: movl (%_ASM_AX),%edx
+ UACCESS movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
RET
@@ -149,13 +146,15 @@ SYM_FUNC_END(__get_user_nocheck_4)
EXPORT_SYMBOL(__get_user_nocheck_4)
SYM_FUNC_START(__get_user_nocheck_8)
+ ANNOTATE_NOENDBR
ASM_STAC
ASM_BARRIER_NOSPEC
#ifdef CONFIG_X86_64
-9: movq (%_ASM_AX),%rdx
+ UACCESS movq (%_ASM_AX),%rdx
#else
-9: movl (%_ASM_AX),%edx
-10: movl 4(%_ASM_AX),%ecx
+ xor %ecx,%ecx
+ UACCESS movl (%_ASM_AX),%edx
+ UACCESS movl 4(%_ASM_AX),%ecx
#endif
xor %eax,%eax
ASM_CLAC
@@ -164,43 +163,10 @@ SYM_FUNC_END(__get_user_nocheck_8)
EXPORT_SYMBOL(__get_user_nocheck_8)
-SYM_CODE_START_LOCAL(.Lbad_get_user_clac)
+SYM_CODE_START_LOCAL(__get_user_handle_exception)
ASM_CLAC
-bad_get_user:
+.Lbad_get_user:
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
RET
-SYM_CODE_END(.Lbad_get_user_clac)
-
-#ifdef CONFIG_X86_32
-SYM_CODE_START_LOCAL(.Lbad_get_user_8_clac)
- ASM_CLAC
-bad_get_user_8:
- xor %edx,%edx
- xor %ecx,%ecx
- mov $(-EFAULT),%_ASM_AX
- RET
-SYM_CODE_END(.Lbad_get_user_8_clac)
-#endif
-
-/* get_user */
- _ASM_EXTABLE_UA(1b, .Lbad_get_user_clac)
- _ASM_EXTABLE_UA(2b, .Lbad_get_user_clac)
- _ASM_EXTABLE_UA(3b, .Lbad_get_user_clac)
-#ifdef CONFIG_X86_64
- _ASM_EXTABLE_UA(4b, .Lbad_get_user_clac)
-#else
- _ASM_EXTABLE_UA(4b, .Lbad_get_user_8_clac)
- _ASM_EXTABLE_UA(5b, .Lbad_get_user_8_clac)
-#endif
-
-/* __get_user */
- _ASM_EXTABLE_UA(6b, .Lbad_get_user_clac)
- _ASM_EXTABLE_UA(7b, .Lbad_get_user_clac)
- _ASM_EXTABLE_UA(8b, .Lbad_get_user_clac)
-#ifdef CONFIG_X86_64
- _ASM_EXTABLE_UA(9b, .Lbad_get_user_clac)
-#else
- _ASM_EXTABLE_UA(9b, .Lbad_get_user_8_clac)
- _ASM_EXTABLE_UA(10b, .Lbad_get_user_8_clac)
-#endif
+SYM_CODE_END(__get_user_handle_exception)
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
index 12c16c6aa44a..edbeb3ecad38 100644
--- a/arch/x86/lib/hweight.S
+++ b/arch/x86/lib/hweight.S
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
+#include <linux/objtool.h>
#include <asm/asm.h>
@@ -9,6 +10,7 @@
* %rdi: w
*/
SYM_FUNC_START(__sw_hweight32)
+ ANNOTATE_NOENDBR
#ifdef CONFIG_X86_64
movl %edi, %eax # w
@@ -36,8 +38,13 @@ SYM_FUNC_START(__sw_hweight32)
SYM_FUNC_END(__sw_hweight32)
EXPORT_SYMBOL(__sw_hweight32)
-SYM_FUNC_START(__sw_hweight64)
+/*
+ * No 32-bit variant, because it's implemented as an inline wrapper
+ * on top of __arch_hweight32():
+ */
#ifdef CONFIG_X86_64
+SYM_FUNC_START(__sw_hweight64)
+ ANNOTATE_NOENDBR
pushq %rdi
pushq %rdx
@@ -66,18 +73,6 @@ SYM_FUNC_START(__sw_hweight64)
popq %rdx
popq %rdi
RET
-#else /* CONFIG_X86_32 */
- /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
- pushl %ecx
-
- call __sw_hweight32
- movl %eax, %ecx # stash away result
- movl %edx, %eax # second part of input
- call __sw_hweight32
- addl %ecx, %eax # result
-
- popl %ecx
- RET
-#endif
SYM_FUNC_END(__sw_hweight64)
EXPORT_SYMBOL(__sw_hweight64)
+#endif
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 21104c41cba0..4e385cbfd444 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -13,6 +13,7 @@
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/ldt.h>
+#include <asm/msr.h>
#include <asm/vm86.h>
#undef pr_fmt
@@ -631,14 +632,21 @@ static bool get_desc(struct desc_struct *out, unsigned short sel)
/* Bits [15:3] contain the index of the desired entry. */
sel >>= 3;
- mutex_lock(&current->active_mm->context.lock);
- ldt = current->active_mm->context.ldt;
+ /*
+ * If we're not in a valid context with a real (not just lazy)
+ * user mm, then don't even try.
+ */
+ if (!nmi_uaccess_okay())
+ return false;
+
+ mutex_lock(&current->mm->context.lock);
+ ldt = current->mm->context.ldt;
if (ldt && sel < ldt->nr_entries) {
*out = ldt->entries[sel];
success = true;
}
- mutex_unlock(&current->active_mm->context.lock);
+ mutex_unlock(&current->mm->context.lock);
return success;
}
@@ -702,16 +710,16 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
unsigned long base;
if (seg_reg_idx == INAT_SEG_REG_FS) {
- rdmsrl(MSR_FS_BASE, base);
+ rdmsrq(MSR_FS_BASE, base);
} else if (seg_reg_idx == INAT_SEG_REG_GS) {
/*
* swapgs was called at the kernel entry point. Thus,
* MSR_KERNEL_GS_BASE will have the user-space GS base.
*/
if (user_mode(regs))
- rdmsrl(MSR_KERNEL_GS_BASE, base);
+ rdmsrq(MSR_KERNEL_GS_BASE, base);
else
- rdmsrl(MSR_GS_BASE, base);
+ rdmsrq(MSR_GS_BASE, base);
} else {
base = 0;
}
@@ -1129,15 +1137,15 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs,
* get_eff_addr_sib() - Obtain referenced effective address via SIB
* @insn: Instruction. Must be valid.
* @regs: Register values as seen when entering kernel mode
- * @regoff: Obtained operand offset, in pt_regs, associated with segment
+ * @base_offset: Obtained operand offset, in pt_regs, associated with segment
* @eff_addr: Obtained effective address
*
* Obtain the effective address referenced by the SIB byte of @insn. After
* identifying the registers involved in the indexed, register-indirect memory
* reference, its value is obtained from the operands in @regs. The computed
* address is stored @eff_addr. Also, the register operand that indicates the
- * associated segment is stored in @regoff, this parameter can later be used to
- * determine such segment.
+ * associated segment is stored in @base_offset; this parameter can later be
+ * used to determine such segment.
*
* Returns:
*
@@ -1595,16 +1603,16 @@ bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
* Returns:
*
* Type of the instruction. Size of the memory operand is stored in
- * @bytes. If decode failed, MMIO_DECODE_FAILED returned.
+ * @bytes. If decode failed, INSN_MMIO_DECODE_FAILED returned.
*/
-enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
+enum insn_mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
{
- enum mmio_type type = MMIO_DECODE_FAILED;
+ enum insn_mmio_type type = INSN_MMIO_DECODE_FAILED;
*bytes = 0;
if (insn_get_opcode(insn))
- return MMIO_DECODE_FAILED;
+ return INSN_MMIO_DECODE_FAILED;
switch (insn->opcode.bytes[0]) {
case 0x88: /* MOV m8,r8 */
@@ -1613,7 +1621,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
case 0x89: /* MOV m16/m32/m64, r16/m32/m64 */
if (!*bytes)
*bytes = insn->opnd_bytes;
- type = MMIO_WRITE;
+ type = INSN_MMIO_WRITE;
break;
case 0xc6: /* MOV m8, imm8 */
@@ -1622,7 +1630,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
case 0xc7: /* MOV m16/m32/m64, imm16/imm32/imm64 */
if (!*bytes)
*bytes = insn->opnd_bytes;
- type = MMIO_WRITE_IMM;
+ type = INSN_MMIO_WRITE_IMM;
break;
case 0x8a: /* MOV r8, m8 */
@@ -1631,7 +1639,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
case 0x8b: /* MOV r16/r32/r64, m16/m32/m64 */
if (!*bytes)
*bytes = insn->opnd_bytes;
- type = MMIO_READ;
+ type = INSN_MMIO_READ;
break;
case 0xa4: /* MOVS m8, m8 */
@@ -1640,7 +1648,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
case 0xa5: /* MOVS m16/m32/m64, m16/m32/m64 */
if (!*bytes)
*bytes = insn->opnd_bytes;
- type = MMIO_MOVS;
+ type = INSN_MMIO_MOVS;
break;
case 0x0f: /* Two-byte instruction */
@@ -1651,7 +1659,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
case 0xb7: /* MOVZX r32/r64, m16 */
if (!*bytes)
*bytes = 2;
- type = MMIO_READ_ZERO_EXTEND;
+ type = INSN_MMIO_READ_ZERO_EXTEND;
break;
case 0xbe: /* MOVSX r16/r32/r64, m8 */
@@ -1660,7 +1668,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
case 0xbf: /* MOVSX r32/r64, m16 */
if (!*bytes)
*bytes = 2;
- type = MMIO_READ_SIGN_EXTEND;
+ type = INSN_MMIO_READ_SIGN_EXTEND;
break;
}
break;
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 55e371cc69fd..149a57e334ab 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -13,7 +13,7 @@
#endif
#include <asm/inat.h> /*__ignore_sync_check__ */
#include <asm/insn.h> /* __ignore_sync_check__ */
-#include <asm/unaligned.h> /* __ignore_sync_check__ */
+#include <linux/unaligned.h> /* __ignore_sync_check__ */
#include <linux/errno.h>
#include <linux/kconfig.h>
@@ -71,7 +71,7 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
insn->kaddr = kaddr;
insn->end_kaddr = kaddr + buf_len;
insn->next_byte = kaddr;
- insn->x86_64 = x86_64 ? 1 : 0;
+ insn->x86_64 = x86_64;
insn->opnd_bytes = 4;
if (x86_64)
insn->addr_bytes = 8;
@@ -185,6 +185,17 @@ found:
if (X86_REX_W(b))
/* REX.W overrides opnd_size */
insn->opnd_bytes = 8;
+ } else if (inat_is_rex2_prefix(attr)) {
+ insn_set_byte(&insn->rex_prefix, 0, b);
+ b = peek_nbyte_next(insn_byte_t, insn, 1);
+ insn_set_byte(&insn->rex_prefix, 1, b);
+ insn->rex_prefix.nbytes = 2;
+ insn->next_byte += 2;
+ if (X86_REX_W(b))
+ /* REX.W overrides opnd_size */
+ insn->opnd_bytes = 8;
+ insn->rex_prefix.got = 1;
+ goto vex_end;
}
}
insn->rex_prefix.got = 1;
@@ -268,11 +279,9 @@ int insn_get_opcode(struct insn *insn)
if (opcode->got)
return 0;
- if (!insn->prefixes.got) {
- ret = insn_get_prefixes(insn);
- if (ret)
- return ret;
- }
+ ret = insn_get_prefixes(insn);
+ if (ret)
+ return ret;
/* Get first opcode */
op = get_next(insn_byte_t, insn);
@@ -285,6 +294,10 @@ int insn_get_opcode(struct insn *insn)
m = insn_vex_m_bits(insn);
p = insn_vex_p_bits(insn);
insn->attr = inat_get_avx_attribute(op, m, p);
+ /* SCALABLE EVEX uses p bits to encode operand size */
+ if (inat_evex_scalable(insn->attr) && !insn_vex_w_bit(insn) &&
+ p == INAT_PFX_OPNDSZ)
+ insn->opnd_bytes = 2;
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
(!inat_accept_vex(insn->attr) &&
!inat_is_group(insn->attr))) {
@@ -296,7 +309,26 @@ int insn_get_opcode(struct insn *insn)
goto end;
}
+ /* Check if there is REX2 prefix or not */
+ if (insn_is_rex2(insn)) {
+ if (insn_rex2_m_bit(insn)) {
+ /* map 1 is escape 0x0f */
+ insn_attr_t esc_attr = inat_get_opcode_attribute(0x0f);
+
+ pfx_id = insn_last_prefix_id(insn);
+ insn->attr = inat_get_escape_attribute(op, pfx_id, esc_attr);
+ } else {
+ insn->attr = inat_get_opcode_attribute(op);
+ }
+ goto end;
+ }
+
insn->attr = inat_get_opcode_attribute(op);
+ if (insn->x86_64 && inat_is_invalid64(insn->attr)) {
+ /* This instruction is invalid, like UD2. Stop decoding. */
+ insn->attr &= INAT_INV64;
+ }
+
while (inat_is_escape(insn->attr)) {
/* Get escaped opcode */
op = get_next(insn_byte_t, insn);
@@ -310,6 +342,7 @@ int insn_get_opcode(struct insn *insn)
insn->attr = 0;
return -EINVAL;
}
+
end:
opcode->got = 1;
return 0;
@@ -339,11 +372,9 @@ int insn_get_modrm(struct insn *insn)
if (modrm->got)
return 0;
- if (!insn->opcode.got) {
- ret = insn_get_opcode(insn);
- if (ret)
- return ret;
- }
+ ret = insn_get_opcode(insn);
+ if (ret)
+ return ret;
if (inat_has_modrm(insn->attr)) {
mod = get_next(insn_byte_t, insn);
@@ -386,11 +417,9 @@ int insn_rip_relative(struct insn *insn)
if (!insn->x86_64)
return 0;
- if (!modrm->got) {
- ret = insn_get_modrm(insn);
- if (ret)
- return 0;
- }
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return 0;
/*
* For rip-relative instructions, the mod field (top 2 bits)
* is zero and the r/m field (bottom 3 bits) is 0x5.
@@ -417,11 +446,9 @@ int insn_get_sib(struct insn *insn)
if (insn->sib.got)
return 0;
- if (!insn->modrm.got) {
- ret = insn_get_modrm(insn);
- if (ret)
- return ret;
- }
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return ret;
if (insn->modrm.nbytes) {
modrm = insn->modrm.bytes[0];
@@ -460,11 +487,9 @@ int insn_get_displacement(struct insn *insn)
if (insn->displacement.got)
return 0;
- if (!insn->sib.got) {
- ret = insn_get_sib(insn);
- if (ret)
- return ret;
- }
+ ret = insn_get_sib(insn);
+ if (ret)
+ return ret;
if (insn->modrm.nbytes) {
/*
@@ -628,11 +653,9 @@ int insn_get_immediate(struct insn *insn)
if (insn->immediate.got)
return 0;
- if (!insn->displacement.got) {
- ret = insn_get_displacement(insn);
- if (ret)
- return ret;
- }
+ ret = insn_get_displacement(insn);
+ if (ret)
+ return ret;
if (inat_has_moffset(insn->attr)) {
if (!__get_moffset(insn))
@@ -641,7 +664,6 @@ int insn_get_immediate(struct insn *insn)
}
if (!inat_has_immediate(insn->attr))
- /* no immediates */
goto done;
switch (inat_immediate_size(insn->attr)) {
@@ -703,11 +725,9 @@ int insn_get_length(struct insn *insn)
if (insn->length)
return 0;
- if (!insn->immediate.got) {
- ret = insn_get_immediate(insn);
- if (ret)
- return ret;
- }
+ ret = insn_get_immediate(insn);
+ if (ret)
+ return ret;
insn->length = (unsigned char)((unsigned long)insn->next_byte
- (unsigned long)insn->kaddr);
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
deleted file mode 100644
index a1f9416bf67a..000000000000
--- a/arch/x86/lib/iomap_copy_64.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2006 PathScale, Inc. All Rights Reserved.
- */
-
-#include <linux/linkage.h>
-
-/*
- * override generic version in lib/iomap_copy.c
- */
-SYM_FUNC_START(__iowrite32_copy)
- movl %edx,%ecx
- rep movsd
- RET
-SYM_FUNC_END(__iowrite32_copy)
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index 3e2f33fc33de..c20e04764edc 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -1,6 +1,7 @@
#include <linux/string.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/kmsan-checks.h>
#define movs(type,to,from) \
asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
@@ -9,7 +10,7 @@
static __always_inline void rep_movs(void *to, const void *from, size_t n)
{
unsigned long d0, d1, d2;
- asm volatile("rep ; movsl\n\t"
+ asm volatile("rep movsl\n\t"
"testb $2,%b4\n\t"
"je 1f\n\t"
"movsw\n"
@@ -24,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
{
+ const void *orig_to = to;
+ const size_t orig_n = n;
+
if (unlikely(!n))
return;
@@ -37,6 +41,8 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si
n-=2;
}
rep_movs(to, (const void *)from, n);
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(orig_to, orig_n);
}
static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
@@ -44,6 +50,8 @@ static void string_memcpy_toio(volatile void __iomem *to, const void *from, size
if (unlikely(!n))
return;
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(from, n);
/* Align any unaligned destination IO */
if (unlikely(1 & (unsigned long)to)) {
movs("b", to, from);
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index a58f451a7dd3..b5893928d55c 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -8,7 +8,7 @@
*/
#include <asm/asm.h>
#include <asm/kaslr.h>
-#include <asm/msr.h>
+#include <asm/tsc.h>
#include <asm/archrandom.h>
#include <asm/e820/api.h>
#include <asm/shared/io.h>
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index ef3af7ff2c8a..a29b64befb93 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -17,190 +17,3 @@ __visible void *memset(void *s, int c, size_t count)
return __memset(s, c, count);
}
EXPORT_SYMBOL(memset);
-
-__visible void *memmove(void *dest, const void *src, size_t n)
-{
- int d0,d1,d2,d3,d4,d5;
- char *ret = dest;
-
- __asm__ __volatile__(
- /* Handle more 16 bytes in loop */
- "cmp $0x10, %0\n\t"
- "jb 1f\n\t"
-
- /* Decide forward/backward copy mode */
- "cmp %2, %1\n\t"
- "jb 2f\n\t"
-
- /*
- * movs instruction have many startup latency
- * so we handle small size by general register.
- */
- "cmp $680, %0\n\t"
- "jb 3f\n\t"
- /*
- * movs instruction is only good for aligned case.
- */
- "mov %1, %3\n\t"
- "xor %2, %3\n\t"
- "and $0xff, %3\n\t"
- "jz 4f\n\t"
- "3:\n\t"
- "sub $0x10, %0\n\t"
-
- /*
- * We gobble 16 bytes forward in each loop.
- */
- "3:\n\t"
- "sub $0x10, %0\n\t"
- "mov 0*4(%1), %3\n\t"
- "mov 1*4(%1), %4\n\t"
- "mov %3, 0*4(%2)\n\t"
- "mov %4, 1*4(%2)\n\t"
- "mov 2*4(%1), %3\n\t"
- "mov 3*4(%1), %4\n\t"
- "mov %3, 2*4(%2)\n\t"
- "mov %4, 3*4(%2)\n\t"
- "lea 0x10(%1), %1\n\t"
- "lea 0x10(%2), %2\n\t"
- "jae 3b\n\t"
- "add $0x10, %0\n\t"
- "jmp 1f\n\t"
-
- /*
- * Handle data forward by movs.
- */
- ".p2align 4\n\t"
- "4:\n\t"
- "mov -4(%1, %0), %3\n\t"
- "lea -4(%2, %0), %4\n\t"
- "shr $2, %0\n\t"
- "rep movsl\n\t"
- "mov %3, (%4)\n\t"
- "jmp 11f\n\t"
- /*
- * Handle data backward by movs.
- */
- ".p2align 4\n\t"
- "6:\n\t"
- "mov (%1), %3\n\t"
- "mov %2, %4\n\t"
- "lea -4(%1, %0), %1\n\t"
- "lea -4(%2, %0), %2\n\t"
- "shr $2, %0\n\t"
- "std\n\t"
- "rep movsl\n\t"
- "mov %3,(%4)\n\t"
- "cld\n\t"
- "jmp 11f\n\t"
-
- /*
- * Start to prepare for backward copy.
- */
- ".p2align 4\n\t"
- "2:\n\t"
- "cmp $680, %0\n\t"
- "jb 5f\n\t"
- "mov %1, %3\n\t"
- "xor %2, %3\n\t"
- "and $0xff, %3\n\t"
- "jz 6b\n\t"
-
- /*
- * Calculate copy position to tail.
- */
- "5:\n\t"
- "add %0, %1\n\t"
- "add %0, %2\n\t"
- "sub $0x10, %0\n\t"
-
- /*
- * We gobble 16 bytes backward in each loop.
- */
- "7:\n\t"
- "sub $0x10, %0\n\t"
-
- "mov -1*4(%1), %3\n\t"
- "mov -2*4(%1), %4\n\t"
- "mov %3, -1*4(%2)\n\t"
- "mov %4, -2*4(%2)\n\t"
- "mov -3*4(%1), %3\n\t"
- "mov -4*4(%1), %4\n\t"
- "mov %3, -3*4(%2)\n\t"
- "mov %4, -4*4(%2)\n\t"
- "lea -0x10(%1), %1\n\t"
- "lea -0x10(%2), %2\n\t"
- "jae 7b\n\t"
- /*
- * Calculate copy position to head.
- */
- "add $0x10, %0\n\t"
- "sub %0, %1\n\t"
- "sub %0, %2\n\t"
-
- /*
- * Move data from 8 bytes to 15 bytes.
- */
- ".p2align 4\n\t"
- "1:\n\t"
- "cmp $8, %0\n\t"
- "jb 8f\n\t"
- "mov 0*4(%1), %3\n\t"
- "mov 1*4(%1), %4\n\t"
- "mov -2*4(%1, %0), %5\n\t"
- "mov -1*4(%1, %0), %1\n\t"
-
- "mov %3, 0*4(%2)\n\t"
- "mov %4, 1*4(%2)\n\t"
- "mov %5, -2*4(%2, %0)\n\t"
- "mov %1, -1*4(%2, %0)\n\t"
- "jmp 11f\n\t"
-
- /*
- * Move data from 4 bytes to 7 bytes.
- */
- ".p2align 4\n\t"
- "8:\n\t"
- "cmp $4, %0\n\t"
- "jb 9f\n\t"
- "mov 0*4(%1), %3\n\t"
- "mov -1*4(%1, %0), %4\n\t"
- "mov %3, 0*4(%2)\n\t"
- "mov %4, -1*4(%2, %0)\n\t"
- "jmp 11f\n\t"
-
- /*
- * Move data from 2 bytes to 3 bytes.
- */
- ".p2align 4\n\t"
- "9:\n\t"
- "cmp $2, %0\n\t"
- "jb 10f\n\t"
- "movw 0*2(%1), %%dx\n\t"
- "movw -1*2(%1, %0), %%bx\n\t"
- "movw %%dx, 0*2(%2)\n\t"
- "movw %%bx, -1*2(%2, %0)\n\t"
- "jmp 11f\n\t"
-
- /*
- * Move data for 1 byte.
- */
- ".p2align 4\n\t"
- "10:\n\t"
- "cmp $1, %0\n\t"
- "jb 11f\n\t"
- "movb (%1), %%cl\n\t"
- "movb %%cl, (%2)\n\t"
- ".p2align 4\n\t"
- "11:"
- : "=&c" (d0), "=&S" (d1), "=&D" (d2),
- "=r" (d3),"=r" (d4), "=r"(d5)
- :"0" (n),
- "1" (src),
- "2" (dest)
- :"memory");
-
- return ret;
-
-}
-EXPORT_SYMBOL(memmove);
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index d0d7b9bc6cad..12a23fa7c44c 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -1,20 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright 2002 Andi Kleen */
+#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-#include <asm/export.h>
-.pushsection .noinstr.text, "ax"
-
-/*
- * We build a jump to memcpy_orig by default which gets NOPped out on
- * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
- * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
- * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
- */
+.section .noinstr.text, "ax"
/*
* memcpy - Copy a memory block.
@@ -26,36 +20,30 @@
*
* Output:
* rax original destination
+ *
+ * The FSRM alternative should be done inline (avoiding the call and
+ * the disgusting return handling), but that would require some help
+ * from the compiler for better calling conventions.
+ *
+ * The 'rep movsb' itself is small enough to replace the call, but the
+ * two register moves blow up the code. And one of them is "needed"
+ * only for the return value that is the same as the source input,
+ * which the compiler could/should do much better anyway.
*/
-SYM_FUNC_START(__memcpy)
- ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
- "jmp memcpy_erms", X86_FEATURE_ERMS
+SYM_TYPED_FUNC_START(__memcpy)
+ ALTERNATIVE "jmp memcpy_orig", "", X86_FEATURE_FSRM
movq %rdi, %rax
movq %rdx, %rcx
- shrq $3, %rcx
- andl $7, %edx
- rep movsq
- movl %edx, %ecx
rep movsb
RET
SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
-SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
+SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
+SYM_PIC_ALIAS(memcpy)
EXPORT_SYMBOL(memcpy)
-/*
- * memcpy_erms() - enhanced fast string memcpy. This is faster and
- * simpler than memcpy. Use memcpy_erms when possible.
- */
-SYM_FUNC_START_LOCAL(memcpy_erms)
- movq %rdi, %rax
- movq %rdx, %rcx
- rep movsb
- RET
-SYM_FUNC_END(memcpy_erms)
-
SYM_FUNC_START_LOCAL(memcpy_orig)
movq %rdi, %rax
@@ -183,4 +171,3 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
RET
SYM_FUNC_END(memcpy_orig)
-.popsection
diff --git a/arch/x86/lib/memmove_32.S b/arch/x86/lib/memmove_32.S
new file mode 100644
index 000000000000..35010ba3dd6f
--- /dev/null
+++ b/arch/x86/lib/memmove_32.S
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/export.h>
+#include <linux/linkage.h>
+
+SYM_FUNC_START(memmove)
+/*
+ * void *memmove(void *dest_in, const void *src_in, size_t n)
+ * -mregparm=3 passes these in registers:
+ * dest_in: %eax
+ * src_in: %edx
+ * n: %ecx
+ * See also: arch/x86/entry/calling.h for description of the calling convention.
+ *
+ * n can remain in %ecx, but for `rep movsl`, we'll need dest in %edi and src
+ * in %esi.
+ */
+.set dest_in, %eax
+.set dest, %edi
+.set src_in, %edx
+.set src, %esi
+.set n, %ecx
+.set tmp0, %edx
+.set tmp0w, %dx
+.set tmp1, %ebx
+.set tmp1w, %bx
+.set tmp2, %eax
+.set tmp3b, %cl
+
+/*
+ * Save all callee-saved registers, because this function is going to clobber
+ * all of them:
+ */
+ pushl %ebp
+ movl %esp, %ebp // set standard frame pointer
+
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax // save 'dest_in' parameter [eax] as the return value
+
+ movl src_in, src
+ movl dest_in, dest
+
+ /* Handle more 16 bytes in loop */
+ cmpl $0x10, n
+ jb .Lmove_16B
+
+ /* Decide forward/backward copy mode */
+ cmpl dest, src
+ jb .Lbackwards_header
+
+ /*
+ * movs instruction have many startup latency
+ * so we handle small size by general register.
+ */
+ cmpl $680, n
+ jb .Ltoo_small_forwards
+ /* movs instruction is only good for aligned case. */
+ movl src, tmp0
+ xorl dest, tmp0
+ andl $0xff, tmp0
+ jz .Lforward_movs
+.Ltoo_small_forwards:
+ subl $0x10, n
+
+ /* We gobble 16 bytes forward in each loop. */
+.Lmove_16B_forwards_loop:
+ subl $0x10, n
+ movl 0*4(src), tmp0
+ movl 1*4(src), tmp1
+ movl tmp0, 0*4(dest)
+ movl tmp1, 1*4(dest)
+ movl 2*4(src), tmp0
+ movl 3*4(src), tmp1
+ movl tmp0, 2*4(dest)
+ movl tmp1, 3*4(dest)
+ leal 0x10(src), src
+ leal 0x10(dest), dest
+ jae .Lmove_16B_forwards_loop
+ addl $0x10, n
+ jmp .Lmove_16B
+
+ /* Handle data forward by movs. */
+.p2align 4
+.Lforward_movs:
+ movl -4(src, n), tmp0
+ leal -4(dest, n), tmp1
+ shrl $2, n
+ rep movsl
+ movl tmp0, (tmp1)
+ jmp .Ldone
+
+ /* Handle data backward by movs. */
+.p2align 4
+.Lbackwards_movs:
+ movl (src), tmp0
+ movl dest, tmp1
+ leal -4(src, n), src
+ leal -4(dest, n), dest
+ shrl $2, n
+ std
+ rep movsl
+ movl tmp0,(tmp1)
+ cld
+ jmp .Ldone
+
+ /* Start to prepare for backward copy. */
+.p2align 4
+.Lbackwards_header:
+ cmpl $680, n
+ jb .Ltoo_small_backwards
+ movl src, tmp0
+ xorl dest, tmp0
+ andl $0xff, tmp0
+ jz .Lbackwards_movs
+
+ /* Calculate copy position to tail. */
+.Ltoo_small_backwards:
+ addl n, src
+ addl n, dest
+ subl $0x10, n
+
+ /* We gobble 16 bytes backward in each loop. */
+.Lmove_16B_backwards_loop:
+ subl $0x10, n
+
+ movl -1*4(src), tmp0
+ movl -2*4(src), tmp1
+ movl tmp0, -1*4(dest)
+ movl tmp1, -2*4(dest)
+ movl -3*4(src), tmp0
+ movl -4*4(src), tmp1
+ movl tmp0, -3*4(dest)
+ movl tmp1, -4*4(dest)
+ leal -0x10(src), src
+ leal -0x10(dest), dest
+ jae .Lmove_16B_backwards_loop
+ /* Calculate copy position to head. */
+ addl $0x10, n
+ subl n, src
+ subl n, dest
+
+ /* Move data from 8 bytes to 15 bytes. */
+.p2align 4
+.Lmove_16B:
+ cmpl $8, n
+ jb .Lmove_8B
+ movl 0*4(src), tmp0
+ movl 1*4(src), tmp1
+ movl -2*4(src, n), tmp2
+ movl -1*4(src, n), src
+
+ movl tmp0, 0*4(dest)
+ movl tmp1, 1*4(dest)
+ movl tmp2, -2*4(dest, n)
+ movl src, -1*4(dest, n)
+ jmp .Ldone
+
+ /* Move data from 4 bytes to 7 bytes. */
+.p2align 4
+.Lmove_8B:
+ cmpl $4, n
+ jb .Lmove_4B
+ movl 0*4(src), tmp0
+ movl -1*4(src, n), tmp1
+ movl tmp0, 0*4(dest)
+ movl tmp1, -1*4(dest, n)
+ jmp .Ldone
+
+ /* Move data from 2 bytes to 3 bytes. */
+.p2align 4
+.Lmove_4B:
+ cmpl $2, n
+ jb .Lmove_1B
+ movw 0*2(src), tmp0w
+ movw -1*2(src, n), tmp1w
+ movw tmp0w, 0*2(dest)
+ movw tmp1w, -1*2(dest, n)
+ jmp .Ldone
+
+ /* Move data for 1 byte. */
+.p2align 4
+.Lmove_1B:
+ cmpl $1, n
+ jb .Ldone
+ movb (src), tmp3b
+ movb tmp3b, (dest)
+.p2align 4
+.Ldone:
+ popl dest_in // restore 'dest_in' [eax] as the return value
+ /* Restore all callee-saved registers: */
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+
+ RET
+SYM_FUNC_END(memmove)
+EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 724bbf83eb5b..aa1f92ee6b2e 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -6,13 +6,16 @@
* This assembly file is re-written from memmove_64.c file.
* - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
*/
+#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-#include <asm/export.h>
#undef memmove
+.section .noinstr.text, "ax"
+
/*
* Implement memmove(). This can handle overlap between src and dst.
*
@@ -24,7 +27,7 @@
* Output:
* rax: dest
*/
-SYM_FUNC_START(__memmove)
+SYM_TYPED_FUNC_START(__memmove)
mov %rdi, %rax
@@ -36,10 +39,12 @@ SYM_FUNC_START(__memmove)
cmp %rdi, %r8
jg 2f
- /* FSRM implies ERMS => no length checks, do the copy directly */
+#define CHECK_LEN cmp $0x20, %rdx; jb 1f
+#define MEMMOVE_BYTES movq %rdx, %rcx; rep movsb; RET
.Lmemmove_begin_forward:
- ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
- ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS
+ ALTERNATIVE_2 __stringify(CHECK_LEN), \
+ __stringify(CHECK_LEN; MEMMOVE_BYTES), X86_FEATURE_ERMS, \
+ __stringify(MEMMOVE_BYTES), X86_FEATURE_FSRM
/*
* movsq instruction have many startup latency
@@ -205,13 +210,8 @@ SYM_FUNC_START(__memmove)
movb %r11b, (%rdi)
13:
RET
-
-.Lmemmove_erms:
- movq %rdx, %rcx
- rep movsb
- RET
SYM_FUNC_END(__memmove)
EXPORT_SYMBOL(__memmove)
-SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
+SYM_FUNC_ALIAS_MEMFUNC(memmove, __memmove)
EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index fc9ffd3ff3b2..fb5a03cf5ab7 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -1,10 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2002 Andi Kleen, SuSE Labs */
+#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-#include <asm/export.h>
+
+.section .noinstr.text, "ax"
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
@@ -16,56 +19,32 @@
* rdx count (bytes)
*
* rax original destination
+ *
+ * The FSRS alternative should be done inline (avoiding the call and
+ * the disgusting return handling), but that would require some help
+ * from the compiler for better calling conventions.
+ *
+ * The 'rep stosb' itself is small enough to replace the call, but all
+ * the register moves blow up the code. And two of them are "needed"
+ * only for the return value that is the same as the source input,
+ * which the compiler could/should do much better anyway.
*/
-SYM_FUNC_START(__memset)
- /*
- * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
- * to use it when possible. If not available, use fast string instructions.
- *
- * Otherwise, use original memset function.
- */
- ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
- "jmp memset_erms", X86_FEATURE_ERMS
+SYM_TYPED_FUNC_START(__memset)
+ ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS
movq %rdi,%r9
+ movb %sil,%al
movq %rdx,%rcx
- andl $7,%edx
- shrq $3,%rcx
- /* expand byte value */
- movzbl %sil,%esi
- movabs $0x0101010101010101,%rax
- imulq %rsi,%rax
- rep stosq
- movl %edx,%ecx
rep stosb
movq %r9,%rax
RET
SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)
-SYM_FUNC_ALIAS_WEAK(memset, __memset)
+SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
+SYM_PIC_ALIAS(memset)
EXPORT_SYMBOL(memset)
-/*
- * ISO C memset - set a memory block to a byte value. This function uses
- * enhanced rep stosb to override the fast string function.
- * The code is simpler and shorter than the fast string function as well.
- *
- * rdi destination
- * rsi value (char)
- * rdx count (bytes)
- *
- * rax original destination
- */
-SYM_FUNC_START_LOCAL(memset_erms)
- movq %rdi,%r9
- movb %sil,%al
- movq %rdx,%rcx
- rep stosb
- movq %r9,%rax
- RET
-SYM_FUNC_END(memset_erms)
-
SYM_FUNC_START_LOCAL(memset_orig)
movq %rdi,%r10
diff --git a/arch/x86/lib/misc.c b/arch/x86/lib/misc.c
index a018ec4fba53..40b81c338ae5 100644
--- a/arch/x86/lib/misc.c
+++ b/arch/x86/lib/misc.c
@@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <asm/misc.h>
+
/*
* Count the digits of @val including a possible sign.
*
@@ -6,7 +8,7 @@
*/
int num_digits(int val)
{
- int m = 10;
+ long long m = 10;
int d = 1;
if (val < 0) {
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index ebd259f31496..5ef8494896e8 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <linux/errno.h>
+#include <linux/cfi_types.h>
#include <asm/asm.h>
#include <asm/msr.h>
@@ -12,7 +13,7 @@
*
*/
.macro op_safe_regs op
-SYM_FUNC_START(\op\()_safe_regs)
+SYM_TYPED_FUNC_START(\op\()_safe_regs)
pushq %rbx
pushq %r12
movq %rdi, %r10 /* Save pointer */
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index 40bbe56bde32..b8f63419e6ae 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -9,10 +9,9 @@ static void __rdmsr_on_cpu(void *info)
{
struct msr_info *rv = info;
struct msr *reg;
- int this_cpu = raw_smp_processor_id();
if (rv->msrs)
- reg = per_cpu_ptr(rv->msrs, this_cpu);
+ reg = this_cpu_ptr(rv->msrs);
else
reg = &rv->reg;
@@ -23,10 +22,9 @@ static void __wrmsr_on_cpu(void *info)
{
struct msr_info *rv = info;
struct msr *reg;
- int this_cpu = raw_smp_processor_id();
if (rv->msrs)
- reg = per_cpu_ptr(rv->msrs, this_cpu);
+ reg = this_cpu_ptr(rv->msrs);
else
reg = &rv->reg;
@@ -49,7 +47,7 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
}
EXPORT_SYMBOL(rdmsr_on_cpu);
-int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
int err;
struct msr_info rv;
@@ -62,7 +60,7 @@ int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
return err;
}
-EXPORT_SYMBOL(rdmsrl_on_cpu);
+EXPORT_SYMBOL(rdmsrq_on_cpu);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
@@ -80,7 +78,7 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
}
EXPORT_SYMBOL(wrmsr_on_cpu);
-int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
int err;
struct msr_info rv;
@@ -94,10 +92,10 @@ int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
return err;
}
-EXPORT_SYMBOL(wrmsrl_on_cpu);
+EXPORT_SYMBOL(wrmsrq_on_cpu);
static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
- struct msr *msrs,
+ struct msr __percpu *msrs,
void (*msr_func) (void *info))
{
struct msr_info rv;
@@ -124,7 +122,7 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
* @msrs: array of MSR values
*
*/
-void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
{
__rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
}
@@ -138,7 +136,7 @@ EXPORT_SYMBOL(rdmsr_on_cpus);
* @msrs: array of MSR values
*
*/
-void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
{
__rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
}
@@ -206,7 +204,7 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
}
EXPORT_SYMBOL(wrmsr_safe_on_cpu);
-int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
int err;
struct msr_info rv;
@@ -220,9 +218,9 @@ int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
return err ? err : rv.err;
}
-EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
+EXPORT_SYMBOL(wrmsrq_safe_on_cpu);
-int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
u32 low, high;
int err;
@@ -232,7 +230,7 @@ int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
return err;
}
-EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
+EXPORT_SYMBOL(rdmsrq_safe_on_cpu);
/*
* These variants are significantly slower, but allows control over
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index b09cd2ad426c..4ef7c6dcbea6 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -6,9 +6,9 @@
#define CREATE_TRACE_POINTS
#include <asm/msr-trace.h>
-struct msr *msrs_alloc(void)
+struct msr __percpu *msrs_alloc(void)
{
- struct msr *msrs = NULL;
+ struct msr __percpu *msrs = NULL;
msrs = alloc_percpu(struct msr);
if (!msrs) {
@@ -20,28 +20,28 @@ struct msr *msrs_alloc(void)
}
EXPORT_SYMBOL(msrs_alloc);
-void msrs_free(struct msr *msrs)
+void msrs_free(struct msr __percpu *msrs)
{
free_percpu(msrs);
}
EXPORT_SYMBOL(msrs_free);
/**
- * Read an MSR with error handling
- *
+ * msr_read - Read an MSR with error handling
* @msr: MSR to read
* @m: value to read into
*
* It returns read data only on success, otherwise it doesn't change the output
* argument @m.
*
+ * Return: %0 for success, otherwise an error code
*/
static int msr_read(u32 msr, struct msr *m)
{
int err;
u64 val;
- err = rdmsrl_safe(msr, &val);
+ err = rdmsrq_safe(msr, &val);
if (!err)
m->q = val;
@@ -49,14 +49,16 @@ static int msr_read(u32 msr, struct msr *m)
}
/**
- * Write an MSR with error handling
+ * msr_write - Write an MSR with error handling
*
* @msr: MSR to write
* @m: value to write
+ *
+ * Return: %0 for success, otherwise an error code
*/
static int msr_write(u32 msr, struct msr *m)
{
- return wrmsrl_safe(msr, m->q);
+ return wrmsrq_safe(msr, m->q);
}
static inline int __flip_bit(u32 msr, u8 bit, bool set)
@@ -88,49 +90,55 @@ static inline int __flip_bit(u32 msr, u8 bit, bool set)
}
/**
- * Set @bit in a MSR @msr.
+ * msr_set_bit - Set @bit in a MSR @msr.
+ * @msr: MSR to write
+ * @bit: bit number to set
*
- * Retval:
- * < 0: An error was encountered.
- * = 0: Bit was already set.
- * > 0: Hardware accepted the MSR write.
+ * Return:
+ * * < 0: An error was encountered.
+ * * = 0: Bit was already set.
+ * * > 0: Hardware accepted the MSR write.
*/
int msr_set_bit(u32 msr, u8 bit)
{
return __flip_bit(msr, bit, true);
}
+EXPORT_SYMBOL_GPL(msr_set_bit);
/**
- * Clear @bit in a MSR @msr.
+ * msr_clear_bit - Clear @bit in a MSR @msr.
+ * @msr: MSR to write
+ * @bit: bit number to clear
*
- * Retval:
- * < 0: An error was encountered.
- * = 0: Bit was already cleared.
- * > 0: Hardware accepted the MSR write.
+ * Return:
+ * * < 0: An error was encountered.
+ * * = 0: Bit was already cleared.
+ * * > 0: Hardware accepted the MSR write.
*/
int msr_clear_bit(u32 msr, u8 bit)
{
return __flip_bit(msr, bit, false);
}
+EXPORT_SYMBOL_GPL(msr_clear_bit);
#ifdef CONFIG_TRACEPOINTS
-void do_trace_write_msr(unsigned int msr, u64 val, int failed)
+void do_trace_write_msr(u32 msr, u64 val, int failed)
{
trace_write_msr(msr, val, failed);
}
EXPORT_SYMBOL(do_trace_write_msr);
EXPORT_TRACEPOINT_SYMBOL(write_msr);
-void do_trace_read_msr(unsigned int msr, u64 val, int failed)
+void do_trace_read_msr(u32 msr, u64 val, int failed)
{
trace_read_msr(msr, val, failed);
}
EXPORT_SYMBOL(do_trace_read_msr);
EXPORT_TRACEPOINT_SYMBOL(read_msr);
-void do_trace_rdpmc(unsigned counter, u64 val, int failed)
+void do_trace_rdpmc(u32 msr, u64 val, int failed)
{
- trace_rdpmc(counter, val, failed);
+ trace_rdpmc(msr, val, failed);
}
EXPORT_SYMBOL(do_trace_rdpmc);
EXPORT_TRACEPOINT_SYMBOL(rdpmc);
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index b7dfd60243b7..46d9e9b98a61 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -11,13 +11,13 @@
* return an error value in addition to the "real"
* return value.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/smap.h>
-#include <asm/export.h>
-
/*
* __put_user_X
@@ -33,22 +33,21 @@
* as they get called from within inline assembly.
*/
-#ifdef CONFIG_X86_5LEVEL
-#define LOAD_TASK_SIZE_MINUS_N(n) \
- ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rbx), \
- __stringify(mov $((1 << 56) - 4096 - (n)),%rbx), X86_FEATURE_LA57
-#else
-#define LOAD_TASK_SIZE_MINUS_N(n) \
- mov $(TASK_SIZE_MAX - (n)),%_ASM_BX
-#endif
+.macro check_range size:req
+.if IS_ENABLED(CONFIG_X86_64)
+ mov %rcx, %rbx
+ sar $63, %rbx
+ or %rbx, %rcx
+.else
+ cmp $TASK_SIZE_MAX-\size+1, %ecx
+ jae .Lbad_put_user
+.endif
+.endm
.text
SYM_FUNC_START(__put_user_1)
- LOAD_TASK_SIZE_MINUS_N(0)
- cmp %_ASM_BX,%_ASM_CX
- jae .Lbad_put_user
-SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL)
- ENDBR
+ ANNOTATE_NOENDBR
+ check_range size=1
ASM_STAC
1: movb %al,(%_ASM_CX)
xor %ecx,%ecx
@@ -56,67 +55,102 @@ SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL)
RET
SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
+
+SYM_FUNC_START(__put_user_nocheck_1)
+ ANNOTATE_NOENDBR
+ ASM_STAC
+2: movb %al,(%_ASM_CX)
+ xor %ecx,%ecx
+ ASM_CLAC
+ RET
+SYM_FUNC_END(__put_user_nocheck_1)
EXPORT_SYMBOL(__put_user_nocheck_1)
SYM_FUNC_START(__put_user_2)
- LOAD_TASK_SIZE_MINUS_N(1)
- cmp %_ASM_BX,%_ASM_CX
- jae .Lbad_put_user
-SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL)
- ENDBR
+ ANNOTATE_NOENDBR
+ check_range size=2
ASM_STAC
-2: movw %ax,(%_ASM_CX)
+3: movw %ax,(%_ASM_CX)
xor %ecx,%ecx
ASM_CLAC
RET
SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
+
+SYM_FUNC_START(__put_user_nocheck_2)
+ ANNOTATE_NOENDBR
+ ASM_STAC
+4: movw %ax,(%_ASM_CX)
+ xor %ecx,%ecx
+ ASM_CLAC
+ RET
+SYM_FUNC_END(__put_user_nocheck_2)
EXPORT_SYMBOL(__put_user_nocheck_2)
SYM_FUNC_START(__put_user_4)
- LOAD_TASK_SIZE_MINUS_N(3)
- cmp %_ASM_BX,%_ASM_CX
- jae .Lbad_put_user
-SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL)
- ENDBR
+ ANNOTATE_NOENDBR
+ check_range size=4
ASM_STAC
-3: movl %eax,(%_ASM_CX)
+5: movl %eax,(%_ASM_CX)
xor %ecx,%ecx
ASM_CLAC
RET
SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
+
+SYM_FUNC_START(__put_user_nocheck_4)
+ ANNOTATE_NOENDBR
+ ASM_STAC
+6: movl %eax,(%_ASM_CX)
+ xor %ecx,%ecx
+ ASM_CLAC
+ RET
+SYM_FUNC_END(__put_user_nocheck_4)
EXPORT_SYMBOL(__put_user_nocheck_4)
SYM_FUNC_START(__put_user_8)
- LOAD_TASK_SIZE_MINUS_N(7)
- cmp %_ASM_BX,%_ASM_CX
- jae .Lbad_put_user
-SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL)
- ENDBR
+ ANNOTATE_NOENDBR
+ check_range size=8
ASM_STAC
-4: mov %_ASM_AX,(%_ASM_CX)
+7: mov %_ASM_AX,(%_ASM_CX)
#ifdef CONFIG_X86_32
-5: movl %edx,4(%_ASM_CX)
+8: movl %edx,4(%_ASM_CX)
#endif
xor %ecx,%ecx
ASM_CLAC
RET
SYM_FUNC_END(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
+
+SYM_FUNC_START(__put_user_nocheck_8)
+ ANNOTATE_NOENDBR
+ ASM_STAC
+9: mov %_ASM_AX,(%_ASM_CX)
+#ifdef CONFIG_X86_32
+10: movl %edx,4(%_ASM_CX)
+#endif
+ xor %ecx,%ecx
+ ASM_CLAC
+ RET
+SYM_FUNC_END(__put_user_nocheck_8)
EXPORT_SYMBOL(__put_user_nocheck_8)
-SYM_CODE_START_LOCAL(.Lbad_put_user_clac)
+SYM_CODE_START_LOCAL(__put_user_handle_exception)
ASM_CLAC
.Lbad_put_user:
movl $-EFAULT,%ecx
RET
-SYM_CODE_END(.Lbad_put_user_clac)
+SYM_CODE_END(__put_user_handle_exception)
- _ASM_EXTABLE_UA(1b, .Lbad_put_user_clac)
- _ASM_EXTABLE_UA(2b, .Lbad_put_user_clac)
- _ASM_EXTABLE_UA(3b, .Lbad_put_user_clac)
- _ASM_EXTABLE_UA(4b, .Lbad_put_user_clac)
+ _ASM_EXTABLE_UA(1b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(2b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(3b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(4b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(5b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(6b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(7b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(9b, __put_user_handle_exception)
#ifdef CONFIG_X86_32
- _ASM_EXTABLE_UA(5b, .Lbad_put_user_clac)
+ _ASM_EXTABLE_UA(8b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(10b, __put_user_handle_exception)
#endif
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 073289a55f84..d78d769a02bd 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -1,28 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/stringify.h>
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-#include <asm/export.h>
+#include <asm/asm-offsets.h>
#include <asm/nospec-branch.h>
#include <asm/unwind_hints.h>
+#include <asm/percpu.h>
#include <asm/frame.h>
+#include <asm/nops.h>
- .section .text.__x86.indirect_thunk
+ .section .text..__x86.indirect_thunk
-.macro RETPOLINE reg
+
+.macro POLINE reg
ANNOTATE_INTRA_FUNCTION_CALL
call .Ldo_rop_\@
-.Lspec_trap_\@:
- UNWIND_HINT_EMPTY
- pause
- lfence
- jmp .Lspec_trap_\@
+ int3
.Ldo_rop_\@:
mov %\reg, (%_ASM_SP)
UNWIND_HINT_FUNC
+.endm
+
+.macro RETPOLINE reg
+ POLINE \reg
RET
.endm
@@ -30,12 +34,13 @@
.align RETPOLINE_THUNK_SIZE
SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
- UNWIND_HINT_EMPTY
+ UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
ALTERNATIVE_2 __stringify(RETPOLINE \reg), \
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_LFENCE, \
__stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), ALT_NOT(X86_FEATURE_RETPOLINE)
+SYM_PIC_ALIAS(__x86_indirect_thunk_\reg)
.endm
@@ -52,7 +57,6 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
*/
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
-#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
.align RETPOLINE_THUNK_SIZE
SYM_CODE_START(__x86_indirect_thunk_array)
@@ -64,46 +68,225 @@ SYM_CODE_START(__x86_indirect_thunk_array)
.align RETPOLINE_THUNK_SIZE
SYM_CODE_END(__x86_indirect_thunk_array)
-#define GEN(reg) EXPORT_THUNK(reg)
+#define GEN(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
+#include <asm/GEN-for-each-reg.h>
+#undef GEN
+
+#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
+.macro CALL_THUNK reg
+ .align RETPOLINE_THUNK_SIZE
+
+SYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL)
+ UNWIND_HINT_UNDEFINED
+ ANNOTATE_NOENDBR
+
+ CALL_DEPTH_ACCOUNT
+ POLINE \reg
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+.endm
+
+ .align RETPOLINE_THUNK_SIZE
+SYM_CODE_START(__x86_indirect_call_thunk_array)
+
+#define GEN(reg) CALL_THUNK reg
+#include <asm/GEN-for-each-reg.h>
+#undef GEN
+
+ .align RETPOLINE_THUNK_SIZE
+SYM_CODE_END(__x86_indirect_call_thunk_array)
+
+#define GEN(reg) __EXPORT_THUNK(__x86_indirect_call_thunk_ ## reg)
+#include <asm/GEN-for-each-reg.h>
+#undef GEN
+
+.macro JUMP_THUNK reg
+ .align RETPOLINE_THUNK_SIZE
+
+SYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL)
+ UNWIND_HINT_UNDEFINED
+ ANNOTATE_NOENDBR
+ POLINE \reg
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+.endm
+
+ .align RETPOLINE_THUNK_SIZE
+SYM_CODE_START(__x86_indirect_jump_thunk_array)
+
+#define GEN(reg) JUMP_THUNK reg
#include <asm/GEN-for-each-reg.h>
#undef GEN
+ .align RETPOLINE_THUNK_SIZE
+SYM_CODE_END(__x86_indirect_jump_thunk_array)
+
+#define GEN(reg) __EXPORT_THUNK(__x86_indirect_jump_thunk_ ## reg)
+#include <asm/GEN-for-each-reg.h>
+#undef GEN
+#endif
+
+#ifdef CONFIG_MITIGATION_RETHUNK
+
/*
- * This function name is magical and is used by -mfunction-return=thunk-extern
- * for the compiler to generate JMPs to it.
+ * Be careful here: that label cannot really be removed because in
+ * some configurations and toolchains, the JMP __x86_return_thunk the
+ * compiler issues is either a short one or the compiler doesn't use
+ * relocations for same-section JMPs and that breaks the returns
+ * detection logic in apply_returns() and in objtool.
*/
-#ifdef CONFIG_RETHUNK
+ .section .text..__x86.return_thunk
+
+#ifdef CONFIG_MITIGATION_SRSO
+
+/*
+ * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
+ * special addresses:
+ *
+ * - srso_alias_untrain_ret() is 2M aligned
+ * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
+ * and 20 in its virtual address are set (while those bits in the
+ * srso_alias_untrain_ret() function are cleared).
+ *
+ * This guarantees that those two addresses will alias in the branch
+ * target buffer of Zen3/4 generations, leading to any potential
+ * poisoned entries at that BTB slot to get evicted.
+ *
+ * As a result, srso_alias_safe_ret() becomes a safe return.
+ */
+ .pushsection .text..__x86.rethunk_untrain
+SYM_CODE_START_NOALIGN(srso_alias_untrain_ret)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+ ASM_NOP2
+ lfence
+ jmp srso_alias_return_thunk
+SYM_FUNC_END(srso_alias_untrain_ret)
+__EXPORT_THUNK(srso_alias_untrain_ret)
+ .popsection
+
+ .pushsection .text..__x86.rethunk_safe
+SYM_CODE_START_NOALIGN(srso_alias_safe_ret)
+ lea 8(%_ASM_SP), %_ASM_SP
+ UNWIND_HINT_FUNC
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+SYM_FUNC_END(srso_alias_safe_ret)
- .section .text.__x86.return_thunk
+SYM_CODE_START_NOALIGN(srso_alias_return_thunk)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+ call srso_alias_safe_ret
+ ud2
+SYM_CODE_END(srso_alias_return_thunk)
+ .popsection
+
+/*
+ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
+ * above. On kernel entry, srso_untrain_ret() is executed which is a
+ *
+ * movabs $0xccccc30824648d48,%rax
+ *
+ * and when the return thunk executes the inner label srso_safe_ret()
+ * later, it is a stack manipulation and a RET which is mispredicted and
+ * thus a "safe" one to use.
+ */
+ .align 64
+ .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
+SYM_CODE_START_LOCAL_NOALIGN(srso_untrain_ret)
+ ANNOTATE_NOENDBR
+ .byte 0x48, 0xb8
+
+/*
+ * This forces the function return instruction to speculate into a trap
+ * (UD2 in srso_return_thunk() below). This RET will then mispredict
+ * and execution will continue at the return site read from the top of
+ * the stack.
+ */
+SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+ lea 8(%_ASM_SP), %_ASM_SP
+ ret
+ int3
+ int3
+ /* end of movabs */
+ lfence
+ call srso_safe_ret
+ ud2
+SYM_CODE_END(srso_safe_ret)
+SYM_FUNC_END(srso_untrain_ret)
+
+SYM_CODE_START(srso_return_thunk)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+ call srso_safe_ret
+ ud2
+SYM_CODE_END(srso_return_thunk)
+
+#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
+#else /* !CONFIG_MITIGATION_SRSO */
+/* Dummy for the alternative in CALL_UNTRAIN_RET. */
+SYM_CODE_START(srso_alias_untrain_ret)
+ ANNOTATE_UNRET_SAFE
+ ANNOTATE_NOENDBR
+ ret
+ int3
+SYM_FUNC_END(srso_alias_untrain_ret)
+__EXPORT_THUNK(srso_alias_untrain_ret)
+#define JMP_SRSO_UNTRAIN_RET "ud2"
+#endif /* CONFIG_MITIGATION_SRSO */
+
+#ifdef CONFIG_MITIGATION_UNRET_ENTRY
+
+/*
+ * Some generic notes on the untraining sequences:
+ *
+ * They are interchangeable when it comes to flushing potentially wrong
+ * RET predictions from the BTB.
+ *
+ * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
+ * Retbleed sequence because the return sequence done there
+ * (srso_safe_ret()) is longer and the return sequence must fully nest
+ * (end before) the untraining sequence. Therefore, the untraining
+ * sequence must fully overlap the return sequence.
+ *
+ * Regarding alignment - the instructions which need to be untrained,
+ * must all start at a cacheline boundary for Zen1/2 generations. That
+ * is, instruction sequences starting at srso_safe_ret() and
+ * the respective instruction sequences at retbleed_return_thunk()
+ * must start at a cacheline boundary.
+ */
/*
* Safety details here pertain to the AMD Zen{1,2} microarchitecture:
- * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for
+ * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
* alignment within the BTB.
- * 2) The instruction at zen_untrain_ret must contain, and not
+ * 2) The instruction at retbleed_untrain_ret must contain, and not
* end with, the 0xc3 byte of the RET.
* 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
* from re-poisioning the BTB prediction.
*/
.align 64
- .skip 63, 0xcc
-SYM_FUNC_START_NOALIGN(zen_untrain_ret);
-
+ .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
+SYM_CODE_START_LOCAL_NOALIGN(retbleed_untrain_ret)
+ ANNOTATE_NOENDBR
/*
- * As executed from zen_untrain_ret, this is:
+ * As executed from retbleed_untrain_ret, this is:
*
* TEST $0xcc, %bl
* LFENCE
- * JMP __x86_return_thunk
+ * JMP retbleed_return_thunk
*
* Executing the TEST instruction has a side effect of evicting any BTB
* prediction (potentially attacker controlled) attached to the RET, as
- * __x86_return_thunk + 1 isn't an instruction boundary at the moment.
+ * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
*/
.byte 0xf6
/*
- * As executed from __x86_return_thunk, this is a plain RET.
+ * As executed from retbleed_return_thunk, this is a plain RET.
*
* As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
*
@@ -115,13 +298,13 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret);
* With SMT enabled and STIBP active, a sibling thread cannot poison
* RET's prediction to a type of its choice, but can evict the
* prediction due to competitive sharing. If the prediction is
- * evicted, __x86_return_thunk will suffer Straight Line Speculation
+ * evicted, retbleed_return_thunk will suffer Straight Line Speculation
* which will be contained safely by the INT3.
*/
-SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL)
+SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
ret
int3
-SYM_CODE_END(__x86_return_thunk)
+SYM_CODE_END(retbleed_return_thunk)
/*
* Ensure the TEST decoding / BTB invalidation is complete.
@@ -132,11 +315,135 @@ SYM_CODE_END(__x86_return_thunk)
* Jump back and execute the RET in the middle of the TEST instruction.
* INT3 is for SLS protection.
*/
- jmp __x86_return_thunk
+ jmp retbleed_return_thunk
+ int3
+SYM_FUNC_END(retbleed_untrain_ret)
+
+#define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"
+#else /* !CONFIG_MITIGATION_UNRET_ENTRY */
+#define JMP_RETBLEED_UNTRAIN_RET "ud2"
+#endif /* CONFIG_MITIGATION_UNRET_ENTRY */
+
+#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
+
+SYM_FUNC_START(entry_untrain_ret)
+ ANNOTATE_NOENDBR
+ ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
+SYM_FUNC_END(entry_untrain_ret)
+__EXPORT_THUNK(entry_untrain_ret)
+
+#endif /* CONFIG_MITIGATION_UNRET_ENTRY || CONFIG_MITIGATION_SRSO */
+
+#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
+
+ .align 64
+SYM_FUNC_START(call_depth_return_thunk)
+ ANNOTATE_NOENDBR
+ /*
+ * Keep the hotpath in a 16byte I-fetch for the non-debug
+ * case.
+ */
+ CALL_THUNKS_DEBUG_INC_RETS
+ shlq $5, PER_CPU_VAR(__x86_call_depth)
+ jz 1f
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+1:
+ CALL_THUNKS_DEBUG_INC_STUFFS
+ .rept 16
+ ANNOTATE_INTRA_FUNCTION_CALL
+ call 2f
int3
-SYM_FUNC_END(zen_untrain_ret)
-__EXPORT_THUNK(zen_untrain_ret)
+2:
+ .endr
+ add $(8*16), %rsp
+
+ CREDIT_CALL_DEPTH
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+SYM_FUNC_END(call_depth_return_thunk)
+
+#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
+
+#ifdef CONFIG_MITIGATION_ITS
+
+.macro ITS_THUNK reg
+
+/*
+ * If CFI paranoid is used then the ITS thunk starts with opcodes (0xea; jne 1b)
+ * that complete the fineibt_paranoid caller sequence.
+ */
+1: .byte 0xea
+SYM_INNER_LABEL(__x86_indirect_paranoid_thunk_\reg, SYM_L_GLOBAL)
+ UNWIND_HINT_UNDEFINED
+ ANNOTATE_NOENDBR
+ jne 1b
+SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)
+ UNWIND_HINT_UNDEFINED
+ ANNOTATE_NOENDBR
+ ANNOTATE_RETPOLINE_SAFE
+ jmp *%\reg
+ int3
+ .align 32, 0xcc /* fill to the end of the line */
+ .skip 32 - (__x86_indirect_its_thunk_\reg - 1b), 0xcc /* skip to the next upper half */
+.endm
+
+/* ITS mitigation requires thunks be aligned to upper half of cacheline */
+.align 64, 0xcc
+.skip 29, 0xcc
+
+#define GEN(reg) ITS_THUNK reg
+#include <asm/GEN-for-each-reg.h>
+#undef GEN
+
+ .align 64, 0xcc
+SYM_FUNC_ALIAS(__x86_indirect_its_thunk_array, __x86_indirect_its_thunk_rax)
+SYM_CODE_END(__x86_indirect_its_thunk_array)
+
+.align 64, 0xcc
+.skip 32, 0xcc
+SYM_CODE_START(its_return_thunk)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+SYM_CODE_END(its_return_thunk)
+EXPORT_SYMBOL(its_return_thunk)
+
+#endif /* CONFIG_MITIGATION_ITS */
+
+/*
+ * This function name is magical and is used by -mfunction-return=thunk-extern
+ * for the compiler to generate JMPs to it.
+ *
+ * This code is only used during kernel boot or module init. All
+ * 'JMP __x86_return_thunk' sites are changed to something else by
+ * apply_returns().
+ *
+ * The ALTERNATIVE below adds a really loud warning to catch the case
+ * where the insufficient default return thunk ends up getting used for
+ * whatever reason like miscompilation or failure of
+ * objtool/alternatives/etc to patch all the return sites.
+ */
+SYM_CODE_START(__x86_return_thunk)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || \
+ defined(CONFIG_MITIGATION_SRSO) || \
+ defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)
+ ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \
+ "jmp warn_thunk_thunk", X86_FEATURE_ALWAYS
+#else
+ ANNOTATE_UNRET_SAFE
+ ret
+#endif
+ int3
+SYM_CODE_END(__x86_return_thunk)
+SYM_PIC_ALIAS(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
-#endif /* CONFIG_RETHUNK */
+#endif /* CONFIG_MITIGATION_RETHUNK */
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index 53b3f202267c..f87ec24fa579 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -40,8 +40,7 @@ char *strncpy(char *dest, const char *src, size_t count)
"stosb\n\t"
"testb %%al,%%al\n\t"
"jne 1b\n\t"
- "rep\n\t"
- "stosb\n"
+ "rep stosb\n"
"2:"
: "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
: "0" (src), "1" (dest), "2" (count) : "memory");
@@ -54,8 +53,7 @@ EXPORT_SYMBOL(strncpy);
char *strcat(char *dest, const char *src)
{
int d0, d1, d2, d3;
- asm volatile("repne\n\t"
- "scasb\n\t"
+ asm volatile("repne scasb\n\t"
"decl %1\n"
"1:\tlodsb\n\t"
"stosb\n\t"
@@ -72,8 +70,7 @@ EXPORT_SYMBOL(strcat);
char *strncat(char *dest, const char *src, size_t count)
{
int d0, d1, d2, d3;
- asm volatile("repne\n\t"
- "scasb\n\t"
+ asm volatile("repne scasb\n\t"
"decl %1\n\t"
"movl %8,%3\n"
"1:\tdecl %3\n\t"
@@ -167,8 +164,7 @@ size_t strlen(const char *s)
{
int d0;
size_t res;
- asm volatile("repne\n\t"
- "scasb"
+ asm volatile("repne scasb"
: "=c" (res), "=&D" (d0)
: "1" (s), "a" (0), "0" (0xffffffffu)
: "memory");
@@ -184,8 +180,7 @@ void *memchr(const void *cs, int c, size_t count)
void *res;
if (!count)
return NULL;
- asm volatile("repne\n\t"
- "scasb\n\t"
+ asm volatile("repne scasb\n\t"
"je 1f\n\t"
"movl $1,%0\n"
"1:\tdecl %0"
@@ -202,7 +197,7 @@ void *memscan(void *addr, int c, size_t size)
{
if (!size)
return addr;
- asm volatile("repnz; scasb\n\t"
+ asm volatile("repnz scasb\n\t"
"jnz 1f\n\t"
"dec %%edi\n"
"1:"
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index 38f37df056f7..28267985e85f 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -8,16 +8,14 @@ int d0, d1;
register char *__res;
__asm__ __volatile__(
"movl %6,%%edi\n\t"
- "repne\n\t"
- "scasb\n\t"
+ "repne scasb\n\t"
"notl %%ecx\n\t"
"decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */
"movl %%ecx,%%edx\n"
"1:\tmovl %6,%%edi\n\t"
"movl %%esi,%%eax\n\t"
"movl %%edx,%%ecx\n\t"
- "repe\n\t"
- "cmpsb\n\t"
+ "repe cmpsb\n\t"
"je 2f\n\t" /* also works for empty string, see above */
"xchgl %%eax,%%esi\n\t"
"incl %%esi\n\t"
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index ad0139d25401..24b48af27417 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -6,6 +6,7 @@
#include <linux/uaccess.h>
#include <linux/export.h>
+#include <linux/instrumented.h>
#include <asm/tlbflush.h>
@@ -44,7 +45,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
* called from other contexts.
*/
pagefault_disable();
- ret = __copy_from_user_inatomic(to, from, n);
+ instrument_copy_from_user_before(to, from, n);
+ ret = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, ret);
pagefault_enable();
return ret;
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 422257c350c6..f6f436f1d573 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -38,9 +38,9 @@ do { \
might_fault(); \
__asm__ __volatile__( \
ASM_STAC "\n" \
- "0: rep; stosl\n" \
+ "0: rep stosl\n" \
" movl %2,%0\n" \
- "1: rep; stosb\n" \
+ "1: rep stosb\n" \
"2: " ASM_CLAC "\n" \
_ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %2) \
_ASM_EXTABLE_UA(1b, 2b) \
@@ -140,9 +140,9 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
- "99: rep; movsl\n"
+ "99: rep movsl\n"
"36: movl %%eax, %0\n"
- "37: rep; movsb\n"
+ "37: rep movsb\n"
"100:\n"
_ASM_EXTABLE_UA(1b, 100b)
_ASM_EXTABLE_UA(2b, 100b)
@@ -242,9 +242,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
" shrl $2, %0\n"
" andl $3, %%eax\n"
" cld\n"
- "6: rep; movsl\n"
+ "6: rep movsl\n"
" movl %%eax,%0\n"
- "7: rep; movsb\n"
+ "7: rep movsb\n"
"8:\n"
_ASM_EXTABLE_UA(0b, 8b)
_ASM_EXTABLE_UA(1b, 8b)
@@ -293,14 +293,14 @@ do { \
" negl %0\n" \
" andl $7,%0\n" \
" subl %0,%3\n" \
- "4: rep; movsb\n" \
+ "4: rep movsb\n" \
" movl %3,%0\n" \
" shrl $2,%0\n" \
" andl $3,%3\n" \
" .align 2,0x90\n" \
- "0: rep; movsl\n" \
+ "0: rep movsl\n" \
" movl %3,%0\n" \
- "1: rep; movsb\n" \
+ "1: rep movsb\n" \
"2:\n" \
_ASM_EXTABLE_TYPE_REG(4b, 2b, EX_TYPE_UCOPY_LEN1, %3) \
_ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %3) \
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 0ae6cf804197..654280aaa3e9 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -9,55 +9,16 @@
#include <linux/export.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
+#include <linux/libnvdimm.h>
/*
* Zero Userspace
*/
-unsigned long __clear_user(void __user *addr, unsigned long size)
-{
- long __d0;
- might_fault();
- /* no memory constraint because it doesn't change any memory gcc knows
- about */
- stac();
- asm volatile(
- " testq %[size8],%[size8]\n"
- " jz 4f\n"
- " .align 16\n"
- "0: movq $0,(%[dst])\n"
- " addq $8,%[dst]\n"
- " decl %%ecx ; jnz 0b\n"
- "4: movq %[size1],%%rcx\n"
- " testl %%ecx,%%ecx\n"
- " jz 2f\n"
- "1: movb $0,(%[dst])\n"
- " incq %[dst]\n"
- " decl %%ecx ; jnz 1b\n"
- "2:\n"
-
- _ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN8, %[size1])
- _ASM_EXTABLE_UA(1b, 2b)
-
- : [size8] "=&c"(size), [dst] "=&D" (__d0)
- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
- clac();
- return size;
-}
-EXPORT_SYMBOL(__clear_user);
-
-unsigned long clear_user(void __user *to, unsigned long n)
-{
- if (access_ok(to, n))
- return __clear_user(to, n);
- return n;
-}
-EXPORT_SYMBOL(clear_user);
-
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/**
* clean_cache_range - write back a cache range with CLWB
- * @vaddr: virtual start address
+ * @addr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
@@ -85,7 +46,11 @@ EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
{
unsigned long flushed, dest = (unsigned long) dst;
- long rc = __copy_user_nocache(dst, src, size, 0);
+ long rc;
+
+ stac();
+ rc = __copy_user_nocache(dst, src, size);
+ clac();
/*
* __copy_user_nocache() uses non-temporal stores for the bulk
@@ -176,13 +141,4 @@ void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
}
}
EXPORT_SYMBOL_GPL(__memcpy_flushcache);
-
-void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
- size_t len)
-{
- char *from = kmap_atomic(page);
-
- memcpy_flushcache(to, from + offset, len);
- kunmap_atomic(from);
-}
#endif
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index d12d1358f96d..262f7ca1fb95 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -23,6 +23,7 @@
#
# AVX Superscripts
# (ev): this opcode requires EVEX prefix.
+# (es): this opcode requires EVEX prefix and is SCALABALE.
# (evo): this opcode is changed by EVEX prefix (EVEX opcode)
# (v): this opcode requires VEX prefix.
# (v1): this opcode only supports 128bit VEX.
@@ -33,6 +34,10 @@
# - (F2): the last prefix is 0xF2
# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
+#
+# REX2 Prefix Superscripts
+# - (!REX2): REX2 is not allowed
+# - (REX2): REX2 variant e.g. JMPABS
Table: one byte opcode
Referrer:
@@ -142,13 +147,13 @@ AVXcode:
# 0x60 - 0x6f
60: PUSHA/PUSHAD (i64)
61: POPA/POPAD (i64)
-62: BOUND Gv,Ma (i64) | EVEX (Prefix)
+62: BOUND Gv,Ma (i64) | EVEX (Prefix),(o64)
63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
64: SEG=FS (Prefix)
65: SEG=GS (Prefix)
66: Operand-Size (Prefix)
67: Address-Size (Prefix)
-68: PUSH Iz (d64)
+68: PUSH Iz
69: IMUL Gv,Ev,Iz
6a: PUSH Ib (d64)
6b: IMUL Gv,Ev,Ib
@@ -157,22 +162,22 @@ AVXcode:
6e: OUTS/OUTSB DX,Xb
6f: OUTS/OUTSW/OUTSD DX,Xz
# 0x70 - 0x7f
-70: JO Jb
-71: JNO Jb
-72: JB/JNAE/JC Jb
-73: JNB/JAE/JNC Jb
-74: JZ/JE Jb
-75: JNZ/JNE Jb
-76: JBE/JNA Jb
-77: JNBE/JA Jb
-78: JS Jb
-79: JNS Jb
-7a: JP/JPE Jb
-7b: JNP/JPO Jb
-7c: JL/JNGE Jb
-7d: JNL/JGE Jb
-7e: JLE/JNG Jb
-7f: JNLE/JG Jb
+70: JO Jb (!REX2)
+71: JNO Jb (!REX2)
+72: JB/JNAE/JC Jb (!REX2)
+73: JNB/JAE/JNC Jb (!REX2)
+74: JZ/JE Jb (!REX2)
+75: JNZ/JNE Jb (!REX2)
+76: JBE/JNA Jb (!REX2)
+77: JNBE/JA Jb (!REX2)
+78: JS Jb (!REX2)
+79: JNS Jb (!REX2)
+7a: JP/JPE Jb (!REX2)
+7b: JNP/JPO Jb (!REX2)
+7c: JL/JNGE Jb (!REX2)
+7d: JNL/JGE Jb (!REX2)
+7e: JLE/JNG Jb (!REX2)
+7f: JNLE/JG Jb (!REX2)
# 0x80 - 0x8f
80: Grp1 Eb,Ib (1A)
81: Grp1 Ev,Iz (1A)
@@ -208,24 +213,24 @@ AVXcode:
9e: SAHF
9f: LAHF
# 0xa0 - 0xaf
-a0: MOV AL,Ob
-a1: MOV rAX,Ov
-a2: MOV Ob,AL
-a3: MOV Ov,rAX
-a4: MOVS/B Yb,Xb
-a5: MOVS/W/D/Q Yv,Xv
-a6: CMPS/B Xb,Yb
-a7: CMPS/W/D Xv,Yv
-a8: TEST AL,Ib
-a9: TEST rAX,Iz
-aa: STOS/B Yb,AL
-ab: STOS/W/D/Q Yv,rAX
-ac: LODS/B AL,Xb
-ad: LODS/W/D/Q rAX,Xv
-ae: SCAS/B AL,Yb
+a0: MOV AL,Ob (!REX2)
+a1: MOV rAX,Ov (!REX2) | JMPABS O (REX2),(o64)
+a2: MOV Ob,AL (!REX2)
+a3: MOV Ov,rAX (!REX2)
+a4: MOVS/B Yb,Xb (!REX2)
+a5: MOVS/W/D/Q Yv,Xv (!REX2)
+a6: CMPS/B Xb,Yb (!REX2)
+a7: CMPS/W/D Xv,Yv (!REX2)
+a8: TEST AL,Ib (!REX2)
+a9: TEST rAX,Iz (!REX2)
+aa: STOS/B Yb,AL (!REX2)
+ab: STOS/W/D/Q Yv,rAX (!REX2)
+ac: LODS/B AL,Xb (!REX2)
+ad: LODS/W/D/Q rAX,Xv (!REX2)
+ae: SCAS/B AL,Yb (!REX2)
# Note: The May 2011 Intel manual shows Xv for the second parameter of the
# next instruction but Yv is correct
-af: SCAS/W/D/Q rAX,Yv
+af: SCAS/W/D/Q rAX,Yv (!REX2)
# 0xb0 - 0xbf
b0: MOV AL/R8L,Ib
b1: MOV CL/R9L,Ib
@@ -248,8 +253,8 @@ c0: Grp2 Eb,Ib (1A)
c1: Grp2 Ev,Ib (1A)
c2: RETN Iw (f64)
c3: RETN
-c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
-c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix),(o64)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix),(o64)
c6: Grp11A Eb,Ib (1A)
c7: Grp11B Ev,Iz (1A)
c8: ENTER Iw,Ib
@@ -266,7 +271,7 @@ d1: Grp2 Ev,1 (1A)
d2: Grp2 Eb,CL (1A)
d3: Grp2 Ev,CL (1A)
d4: AAM Ib (i64)
-d5: AAD Ib (i64)
+d5: AAD Ib (i64) | REX2 (Prefix),(o64)
d6:
d7: XLAT/XLATB
d8: ESC
@@ -281,26 +286,26 @@ df: ESC
# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
-e0: LOOPNE/LOOPNZ Jb (f64)
-e1: LOOPE/LOOPZ Jb (f64)
-e2: LOOP Jb (f64)
-e3: JrCXZ Jb (f64)
-e4: IN AL,Ib
-e5: IN eAX,Ib
-e6: OUT Ib,AL
-e7: OUT Ib,eAX
+e0: LOOPNE/LOOPNZ Jb (f64),(!REX2)
+e1: LOOPE/LOOPZ Jb (f64),(!REX2)
+e2: LOOP Jb (f64),(!REX2)
+e3: JrCXZ Jb (f64),(!REX2)
+e4: IN AL,Ib (!REX2)
+e5: IN eAX,Ib (!REX2)
+e6: OUT Ib,AL (!REX2)
+e7: OUT Ib,eAX (!REX2)
# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
# in "near" jumps and calls is 16-bit. For CALL,
# push of return address is 16-bit wide, RSP is decremented by 2
# but is not truncated to 16 bits, unlike RIP.
-e8: CALL Jz (f64)
-e9: JMP-near Jz (f64)
-ea: JMP-far Ap (i64)
-eb: JMP-short Jb (f64)
-ec: IN AL,DX
-ed: IN eAX,DX
-ee: OUT DX,AL
-ef: OUT DX,eAX
+e8: CALL Jz (f64),(!REX2)
+e9: JMP-near Jz (f64),(!REX2)
+ea: JMP-far Ap (i64),(!REX2)
+eb: JMP-short Jb (f64),(!REX2)
+ec: IN AL,DX (!REX2)
+ed: IN eAX,DX (!REX2)
+ee: OUT DX,AL (!REX2)
+ef: OUT DX,eAX (!REX2)
# 0xf0 - 0xff
f0: LOCK (Prefix)
f1:
@@ -386,14 +391,14 @@ AVXcode: 1
2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1)
2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1)
# 0x0f 0x30-0x3f
-30: WRMSR
-31: RDTSC
-32: RDMSR
-33: RDPMC
-34: SYSENTER
-35: SYSEXIT
+30: WRMSR (!REX2)
+31: RDTSC (!REX2)
+32: RDMSR (!REX2)
+33: RDPMC (!REX2)
+34: SYSENTER (!REX2)
+35: SYSEXIT (!REX2)
36:
-37: GETSEC
+37: GETSEC (!REX2)
38: escape # 3-byte escape 1
39:
3a: escape # 3-byte escape 2
@@ -473,22 +478,22 @@ AVXcode: 1
7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
# 0x0f 0x80-0x8f
# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-80: JO Jz (f64)
-81: JNO Jz (f64)
-82: JB/JC/JNAE Jz (f64)
-83: JAE/JNB/JNC Jz (f64)
-84: JE/JZ Jz (f64)
-85: JNE/JNZ Jz (f64)
-86: JBE/JNA Jz (f64)
-87: JA/JNBE Jz (f64)
-88: JS Jz (f64)
-89: JNS Jz (f64)
-8a: JP/JPE Jz (f64)
-8b: JNP/JPO Jz (f64)
-8c: JL/JNGE Jz (f64)
-8d: JNL/JGE Jz (f64)
-8e: JLE/JNG Jz (f64)
-8f: JNLE/JG Jz (f64)
+80: JO Jz (f64),(!REX2)
+81: JNO Jz (f64),(!REX2)
+82: JB/JC/JNAE Jz (f64),(!REX2)
+83: JAE/JNB/JNC Jz (f64),(!REX2)
+84: JE/JZ Jz (f64),(!REX2)
+85: JNE/JNZ Jz (f64),(!REX2)
+86: JBE/JNA Jz (f64),(!REX2)
+87: JA/JNBE Jz (f64),(!REX2)
+88: JS Jz (f64),(!REX2)
+89: JNS Jz (f64),(!REX2)
+8a: JP/JPE Jz (f64),(!REX2)
+8b: JNP/JPO Jz (f64),(!REX2)
+8c: JL/JNGE Jz (f64),(!REX2)
+8d: JNL/JGE Jz (f64),(!REX2)
+8e: JLE/JNG Jz (f64),(!REX2)
+8f: JNLE/JG Jz (f64),(!REX2)
# 0x0f 0x90-0x9f
90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
@@ -698,17 +703,17 @@ AVXcode: 2
4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
-50: vpdpbusd Vx,Hx,Wx (66),(ev)
-51: vpdpbusds Vx,Hx,Wx (66),(ev)
-52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
-53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+50: vpdpbusd Vx,Hx,Wx (66) | vpdpbssd Vx,Hx,Wx (F2),(v) | vpdpbsud Vx,Hx,Wx (F3),(v) | vpdpbuud Vx,Hx,Wx (v)
+51: vpdpbusds Vx,Hx,Wx (66) | vpdpbssds Vx,Hx,Wx (F2),(v) | vpdpbsuds Vx,Hx,Wx (F3),(v) | vpdpbuuds Vx,Hx,Wx (v)
+52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+53: vpdpwssds Vx,Hx,Wx (66) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
54: vpopcntb/w Vx,Wx (66),(ev)
55: vpopcntd/q Vx,Wx (66),(ev)
58: vpbroadcastd Vx,Wx (66),(v)
59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
-5c: TDPBF16PS Vt,Wt,Ht (F3),(v1)
+5c: TDPBF16PS Vt,Wt,Ht (F3),(v1) | TDPFP16PS Vt,Wt,Ht (F2),(v1),(o64)
# Skip 0x5d
5e: TDPBSSD Vt,Wt,Ht (F2),(v1) | TDPBSUD Vt,Wt,Ht (F3),(v1) | TDPBUSD Vt,Wt,Ht (66),(v1) | TDPBUUD Vt,Wt,Ht (v1)
# Skip 0x5f-0x61
@@ -718,10 +723,12 @@ AVXcode: 2
65: vblendmps/d Vx,Hx,Wx (66),(ev)
66: vpblendmb/w Vx,Hx,Wx (66),(ev)
68: vp2intersectd/q Kx,Hx,Wx (F2),(ev)
-# Skip 0x69-0x6f
+# Skip 0x69-0x6b
+6c: TCMMIMFP16PS Vt,Wt,Ht (66),(v1),(o64) | TCMMRLFP16PS Vt,Wt,Ht (v1),(o64)
+# Skip 0x6d-0x6f
70: vpshldvw Vx,Hx,Wx (66),(ev)
71: vpshldvd/q Vx,Hx,Wx (66),(ev)
-72: vcvtne2ps2bf16 Vx,Hx,Wx (F2),(ev) | vcvtneps2bf16 Vx,Wx (F3),(ev) | vpshrdvw Vx,Hx,Wx (66),(ev)
+72: vcvtne2ps2bf16 Vx,Hx,Wx (F2),(ev) | vcvtneps2bf16 Vx,Wx (F3) | vpshrdvw Vx,Hx,Wx (66),(ev)
73: vpshrdvd/q Vx,Hx,Wx (66),(ev)
75: vpermi2b/w Vx,Hx,Wx (66),(ev)
76: vpermi2d/q Vx,Hx,Wx (66),(ev)
@@ -777,8 +784,10 @@ ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
-b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
-b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
+b0: vcvtneebf162ps Vx,Mx (F3),(!11B),(v) | vcvtneeph2ps Vx,Mx (66),(!11B),(v) | vcvtneobf162ps Vx,Mx (F2),(!11B),(v) | vcvtneoph2ps Vx,Mx (!11B),(v)
+b1: vbcstnebf162ps Vx,Mw (F3),(!11B),(v) | vbcstnesh2ps Vx,Mw (66),(!11B),(v)
+b4: vpmadd52luq Vx,Hx,Wx (66)
+b5: vpmadd52huq Vx,Hx,Wx (66)
b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
@@ -796,15 +805,35 @@ c7: Grp19 (1A)
c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
c9: sha1msg1 Vdq,Wdq
ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
-cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
-cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
-cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
+cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev) | vsha512rnds2 Vqq,Hqq,Udq (F2),(11B),(v)
+cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev) | vsha512msg1 Vqq,Udq (F2),(11B),(v)
+cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev) | vsha512msg2 Vqq,Uqq (F2),(11B),(v)
cf: vgf2p8mulb Vx,Wx (66)
+d2: vpdpwsud Vx,Hx,Wx (F3),(v) | vpdpwusd Vx,Hx,Wx (66),(v) | vpdpwuud Vx,Hx,Wx (v)
+d3: vpdpwsuds Vx,Hx,Wx (F3),(v) | vpdpwusds Vx,Hx,Wx (66),(v) | vpdpwuuds Vx,Hx,Wx (v)
+d8: AESENCWIDE128KL Qpi (F3),(000),(00B) | AESENCWIDE256KL Qpi (F3),(000),(10B) | AESDECWIDE128KL Qpi (F3),(000),(01B) | AESDECWIDE256KL Qpi (F3),(000),(11B)
+da: vsm3msg1 Vdq,Hdq,Udq (v1) | vsm3msg2 Vdq,Hdq,Udq (66),(v1) | vsm4key4 Vx,Hx,Wx (F3),(v) | vsm4rnds4 Vx,Hx,Wx (F2),(v)
db: VAESIMC Vdq,Wdq (66),(v1)
-dc: vaesenc Vx,Hx,Wx (66)
-dd: vaesenclast Vx,Hx,Wx (66)
-de: vaesdec Vx,Hx,Wx (66)
-df: vaesdeclast Vx,Hx,Wx (66)
+dc: vaesenc Vx,Hx,Wx (66) | LOADIWKEY Vx,Hx (F3) | AESENC128KL Vpd,Qpi (F3)
+dd: vaesenclast Vx,Hx,Wx (66) | AESDEC128KL Vpd,Qpi (F3)
+de: vaesdec Vx,Hx,Wx (66) | AESENC256KL Vpd,Qpi (F3)
+df: vaesdeclast Vx,Hx,Wx (66) | AESDEC256KL Vpd,Qpi (F3)
+e0: CMPOXADD My,Gy,By (66),(v1),(o64)
+e1: CMPNOXADD My,Gy,By (66),(v1),(o64)
+e2: CMPBXADD My,Gy,By (66),(v1),(o64)
+e3: CMPNBXADD My,Gy,By (66),(v1),(o64)
+e4: CMPZXADD My,Gy,By (66),(v1),(o64)
+e5: CMPNZXADD My,Gy,By (66),(v1),(o64)
+e6: CMPBEXADD My,Gy,By (66),(v1),(o64)
+e7: CMPNBEXADD My,Gy,By (66),(v1),(o64)
+e8: CMPSXADD My,Gy,By (66),(v1),(o64)
+e9: CMPNSXADD My,Gy,By (66),(v1),(o64)
+ea: CMPPXADD My,Gy,By (66),(v1),(o64)
+eb: CMPNPXADD My,Gy,By (66),(v1),(o64)
+ec: CMPLXADD My,Gy,By (66),(v1),(o64)
+ed: CMPNLXADD My,Gy,By (66),(v1),(o64)
+ee: CMPLEXADD My,Gy,By (66),(v1),(o64)
+ef: CMPNLEXADD My,Gy,By (66),(v1),(o64)
f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
f2: ANDN Gy,By,Ey (v)
@@ -812,8 +841,11 @@ f3: Grp17 (1A)
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) | WRUSSD/Q My,Gy (66)
f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v) | WRSSD/Q My,Gy
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
-f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
+f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3) | URDMSR Rq,Gq (F2),(11B) | UWRMSR Gq,Rq (F3),(11B)
f9: MOVDIRI My,Gy
+fa: ENCODEKEY128 Ew,Ew (F3)
+fb: ENCODEKEY256 Ew,Ew (F3)
+fc: AADD My,Gy | AAND My,Gy (66) | AOR My,Gy (F2) | AXOR My,Gy (F3)
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
@@ -893,10 +925,103 @@ c2: vcmpph Vx,Hx,Wx,Ib (ev) | vcmpsh Vx,Hx,Wx,Ib (F3),(ev)
cc: sha1rnds4 Vdq,Wdq,Ib
ce: vgf2p8affineqb Vx,Wx,Ib (66)
cf: vgf2p8affineinvqb Vx,Wx,Ib (66)
+de: vsm3rnds2 Vdq,Hdq,Wdq,Ib (66),(v1)
df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
f0: RORX Gy,Ey,Ib (F2),(v) | HRESET Gv,Ib (F3),(000),(11B)
EndTable
+Table: EVEX map 4
+Referrer:
+AVXcode: 4
+00: ADD Eb,Gb (ev)
+01: ADD Ev,Gv (es) | ADD Ev,Gv (66),(es)
+02: ADD Gb,Eb (ev)
+03: ADD Gv,Ev (es) | ADD Gv,Ev (66),(es)
+08: OR Eb,Gb (ev)
+09: OR Ev,Gv (es) | OR Ev,Gv (66),(es)
+0a: OR Gb,Eb (ev)
+0b: OR Gv,Ev (es) | OR Gv,Ev (66),(es)
+10: ADC Eb,Gb (ev)
+11: ADC Ev,Gv (es) | ADC Ev,Gv (66),(es)
+12: ADC Gb,Eb (ev)
+13: ADC Gv,Ev (es) | ADC Gv,Ev (66),(es)
+18: SBB Eb,Gb (ev)
+19: SBB Ev,Gv (es) | SBB Ev,Gv (66),(es)
+1a: SBB Gb,Eb (ev)
+1b: SBB Gv,Ev (es) | SBB Gv,Ev (66),(es)
+20: AND Eb,Gb (ev)
+21: AND Ev,Gv (es) | AND Ev,Gv (66),(es)
+22: AND Gb,Eb (ev)
+23: AND Gv,Ev (es) | AND Gv,Ev (66),(es)
+24: SHLD Ev,Gv,Ib (es) | SHLD Ev,Gv,Ib (66),(es)
+28: SUB Eb,Gb (ev)
+29: SUB Ev,Gv (es) | SUB Ev,Gv (66),(es)
+2a: SUB Gb,Eb (ev)
+2b: SUB Gv,Ev (es) | SUB Gv,Ev (66),(es)
+2c: SHRD Ev,Gv,Ib (es) | SHRD Ev,Gv,Ib (66),(es)
+30: XOR Eb,Gb (ev)
+31: XOR Ev,Gv (es) | XOR Ev,Gv (66),(es)
+32: XOR Gb,Eb (ev)
+33: XOR Gv,Ev (es) | XOR Gv,Ev (66),(es)
+# CCMPSCC instructions are: CCOMB, CCOMBE, CCOMF, CCOML, CCOMLE, CCOMNB, CCOMNBE, CCOMNL, CCOMNLE,
+# CCOMNO, CCOMNS, CCOMNZ, CCOMO, CCOMS, CCOMT, CCOMZ
+38: CCMPSCC Eb,Gb (ev)
+39: CCMPSCC Ev,Gv (es) | CCMPSCC Ev,Gv (66),(es)
+3a: CCMPSCC Gv,Ev (ev)
+3b: CCMPSCC Gv,Ev (es) | CCMPSCC Gv,Ev (66),(es)
+40: CMOVO Gv,Ev (es) | CMOVO Gv,Ev (66),(es) | CFCMOVO Ev,Ev (es) | CFCMOVO Ev,Ev (66),(es) | SETO Eb (F2),(ev)
+41: CMOVNO Gv,Ev (es) | CMOVNO Gv,Ev (66),(es) | CFCMOVNO Ev,Ev (es) | CFCMOVNO Ev,Ev (66),(es) | SETNO Eb (F2),(ev)
+42: CMOVB Gv,Ev (es) | CMOVB Gv,Ev (66),(es) | CFCMOVB Ev,Ev (es) | CFCMOVB Ev,Ev (66),(es) | SETB Eb (F2),(ev)
+43: CMOVNB Gv,Ev (es) | CMOVNB Gv,Ev (66),(es) | CFCMOVNB Ev,Ev (es) | CFCMOVNB Ev,Ev (66),(es) | SETNB Eb (F2),(ev)
+44: CMOVZ Gv,Ev (es) | CMOVZ Gv,Ev (66),(es) | CFCMOVZ Ev,Ev (es) | CFCMOVZ Ev,Ev (66),(es) | SETZ Eb (F2),(ev)
+45: CMOVNZ Gv,Ev (es) | CMOVNZ Gv,Ev (66),(es) | CFCMOVNZ Ev,Ev (es) | CFCMOVNZ Ev,Ev (66),(es) | SETNZ Eb (F2),(ev)
+46: CMOVBE Gv,Ev (es) | CMOVBE Gv,Ev (66),(es) | CFCMOVBE Ev,Ev (es) | CFCMOVBE Ev,Ev (66),(es) | SETBE Eb (F2),(ev)
+47: CMOVNBE Gv,Ev (es) | CMOVNBE Gv,Ev (66),(es) | CFCMOVNBE Ev,Ev (es) | CFCMOVNBE Ev,Ev (66),(es) | SETNBE Eb (F2),(ev)
+48: CMOVS Gv,Ev (es) | CMOVS Gv,Ev (66),(es) | CFCMOVS Ev,Ev (es) | CFCMOVS Ev,Ev (66),(es) | SETS Eb (F2),(ev)
+49: CMOVNS Gv,Ev (es) | CMOVNS Gv,Ev (66),(es) | CFCMOVNS Ev,Ev (es) | CFCMOVNS Ev,Ev (66),(es) | SETNS Eb (F2),(ev)
+4a: CMOVP Gv,Ev (es) | CMOVP Gv,Ev (66),(es) | CFCMOVP Ev,Ev (es) | CFCMOVP Ev,Ev (66),(es) | SETP Eb (F2),(ev)
+4b: CMOVNP Gv,Ev (es) | CMOVNP Gv,Ev (66),(es) | CFCMOVNP Ev,Ev (es) | CFCMOVNP Ev,Ev (66),(es) | SETNP Eb (F2),(ev)
+4c: CMOVL Gv,Ev (es) | CMOVL Gv,Ev (66),(es) | CFCMOVL Ev,Ev (es) | CFCMOVL Ev,Ev (66),(es) | SETL Eb (F2),(ev)
+4d: CMOVNL Gv,Ev (es) | CMOVNL Gv,Ev (66),(es) | CFCMOVNL Ev,Ev (es) | CFCMOVNL Ev,Ev (66),(es) | SETNL Eb (F2),(ev)
+4e: CMOVLE Gv,Ev (es) | CMOVLE Gv,Ev (66),(es) | CFCMOVLE Ev,Ev (es) | CFCMOVLE Ev,Ev (66),(es) | SETLE Eb (F2),(ev)
+4f: CMOVNLE Gv,Ev (es) | CMOVNLE Gv,Ev (66),(es) | CFCMOVNLE Ev,Ev (es) | CFCMOVNLE Ev,Ev (66),(es) | SETNLE Eb (F2),(ev)
+60: MOVBE Gv,Ev (es) | MOVBE Gv,Ev (66),(es)
+61: MOVBE Ev,Gv (es) | MOVBE Ev,Gv (66),(es)
+65: WRUSSD Md,Gd (66),(ev) | WRUSSQ Mq,Gq (66),(ev)
+66: ADCX Gy,Ey (66),(ev) | ADOX Gy,Ey (F3),(ev) | WRSSD Md,Gd (ev) | WRSSQ Mq,Gq (66),(ev)
+69: IMUL Gv,Ev,Iz (es) | IMUL Gv,Ev,Iz (66),(es)
+6b: IMUL Gv,Ev,Ib (es) | IMUL Gv,Ev,Ib (66),(es)
+80: Grp1 Eb,Ib (1A),(ev)
+81: Grp1 Ev,Iz (1A),(es)
+83: Grp1 Ev,Ib (1A),(es)
+# CTESTSCC instructions are: CTESTB, CTESTBE, CTESTF, CTESTL, CTESTLE, CTESTNB, CTESTNBE, CTESTNL,
+# CTESTNLE, CTESTNO, CTESTNS, CTESTNZ, CTESTO, CTESTS, CTESTT, CTESTZ
+84: CTESTSCC Eb,Gb (ev)
+85: CTESTSCC Ev,Gv (es) | CTESTSCC Ev,Gv (66),(es)
+88: POPCNT Gv,Ev (es) | POPCNT Gv,Ev (66),(es)
+8f: POP2 Bq,Rq (000),(11B),(ev)
+a5: SHLD Ev,Gv,CL (es) | SHLD Ev,Gv,CL (66),(es)
+ad: SHRD Ev,Gv,CL (es) | SHRD Ev,Gv,CL (66),(es)
+af: IMUL Gv,Ev (es) | IMUL Gv,Ev (66),(es)
+c0: Grp2 Eb,Ib (1A),(ev)
+c1: Grp2 Ev,Ib (1A),(es)
+d0: Grp2 Eb,1 (1A),(ev)
+d1: Grp2 Ev,1 (1A),(es)
+d2: Grp2 Eb,CL (1A),(ev)
+d3: Grp2 Ev,CL (1A),(es)
+f0: CRC32 Gy,Eb (es) | INVEPT Gq,Mdq (F3),(ev)
+f1: CRC32 Gy,Ey (es) | CRC32 Gy,Ey (66),(es) | INVVPID Gy,Mdq (F3),(ev)
+f2: INVPCID Gy,Mdq (F3),(ev)
+f4: TZCNT Gv,Ev (es) | TZCNT Gv,Ev (66),(es)
+f5: LZCNT Gv,Ev (es) | LZCNT Gv,Ev (66),(es)
+f6: Grp3_1 Eb (1A),(ev)
+f7: Grp3_2 Ev (1A),(es)
+f8: MOVDIR64B Gv,Mdqq (66),(ev) | ENQCMD Gv,Mdqq (F2),(ev) | ENQCMDS Gv,Mdqq (F3),(ev) | URDMSR Rq,Gq (F2),(11B),(ev) | UWRMSR Gq,Rq (F3),(11B),(ev)
+f9: MOVDIRI My,Gy (ev)
+fe: Grp4 (1A),(ev)
+ff: Grp5 (1A),(es) | PUSH2 Bq,Rq (110),(11B),(ev)
+EndTable
+
Table: EVEX map 5
Referrer:
AVXcode: 5
@@ -975,6 +1100,12 @@ d6: vfcmulcph Vx,Hx,Wx (F2),(ev) | vfmulcph Vx,Hx,Wx (F3),(ev)
d7: vfcmulcsh Vx,Hx,Wx (F2),(ev) | vfmulcsh Vx,Hx,Wx (F3),(ev)
EndTable
+Table: VEX map 7
+Referrer:
+AVXcode: 7
+f8: URDMSR Rq,Id (F2),(v1),(11B) | UWRMSR Id,Rq (F3),(v1),(11B)
+EndTable
+
GrpTable: Grp1
0: ADD
1: OR
@@ -1047,11 +1178,12 @@ GrpTable: Grp6
3: LTR Ew
4: VERR Ew
5: VERW Ew
+6: LKGS Ew (F2)
EndTable
GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B) | WRMSRNS (110),(11B) | RDMSRLIST (F2),(110),(11B) | WRMSRLIST (F3),(110),(11B) | PBNDKB (111),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B) | ERETU (F3),(010),(11B) | ERETS (F2),(010),(11B)
2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
@@ -1136,6 +1268,8 @@ GrpTable: Grp16
1: prefetch T0
2: prefetch T1
3: prefetch T2
+6: prefetch IT1
+7: prefetch IT0
EndTable
GrpTable: Grp17