diff options
Diffstat (limited to '')
| -rw-r--r-- | arch/x86/kernel/cpu/Makefile | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 159 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 99 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpu.h | 18 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel.c | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/resctrl/rdtgroup.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/tsx.c | 140 | 
7 files changed, 384 insertions, 43 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index d7a1e5a9331c..890f60083eca 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -30,7 +30,7 @@ obj-$(CONFIG_PROC_FS)	+= proc.o  obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o  ifdef CONFIG_CPU_SUP_INTEL -obj-y			+= intel.o intel_pconfig.o +obj-y			+= intel.o intel_pconfig.o tsx.o  obj-$(CONFIG_PM)	+= intel_epb.o  endif  obj-$(CONFIG_CPU_SUP_AMD)		+= amd.o diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 91c2561b905f..4c7b0fa15a19 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void);  static void __init ssb_select_mitigation(void);  static void __init l1tf_select_mitigation(void);  static void __init mds_select_mitigation(void); +static void __init taa_select_mitigation(void);  /* The base value of the SPEC_CTRL MSR that always has to be preserved. */  u64 x86_spec_ctrl_base; @@ -105,6 +106,7 @@ void __init check_bugs(void)  	ssb_select_mitigation();  	l1tf_select_mitigation();  	mds_select_mitigation(); +	taa_select_mitigation();  	arch_smt_update(); @@ -269,6 +271,100 @@ static int __init mds_cmdline(char *str)  early_param("mds", mds_cmdline);  #undef pr_fmt +#define pr_fmt(fmt)	"TAA: " fmt + +/* Default mitigation for TAA-affected CPUs */ +static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW; +static bool taa_nosmt __ro_after_init; + +static const char * const taa_strings[] = { +	[TAA_MITIGATION_OFF]		= "Vulnerable", +	[TAA_MITIGATION_UCODE_NEEDED]	= "Vulnerable: Clear CPU buffers attempted, no microcode", +	[TAA_MITIGATION_VERW]		= "Mitigation: Clear CPU buffers", +	[TAA_MITIGATION_TSX_DISABLED]	= "Mitigation: TSX disabled", +}; + +static void __init taa_select_mitigation(void) +{ +	u64 ia32_cap; + +	if (!boot_cpu_has_bug(X86_BUG_TAA)) { +		taa_mitigation = TAA_MITIGATION_OFF; +		return; +	} + +	/* TSX previously disabled by tsx=off */ +	if (!boot_cpu_has(X86_FEATURE_RTM)) { +		taa_mitigation = TAA_MITIGATION_TSX_DISABLED; +		goto out; +	} + +	if (cpu_mitigations_off()) { +		taa_mitigation = TAA_MITIGATION_OFF; +		return; +	} + +	/* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */ +	if (taa_mitigation == TAA_MITIGATION_OFF) +		goto out; + +	if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) +		taa_mitigation = TAA_MITIGATION_VERW; +	else +		taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; + +	/* +	 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. +	 * A microcode update fixes this behavior to clear CPU buffers. It also +	 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the +	 * ARCH_CAP_TSX_CTRL_MSR bit. +	 * +	 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode +	 * update is required. +	 */ +	ia32_cap = x86_read_arch_cap_msr(); +	if ( (ia32_cap & ARCH_CAP_MDS_NO) && +	    !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)) +		taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; + +	/* +	 * TSX is enabled, select alternate mitigation for TAA which is +	 * the same as MDS. Enable MDS static branch to clear CPU buffers. +	 * +	 * For guests that can't determine whether the correct microcode is +	 * present on host, enable the mitigation for UCODE_NEEDED as well. +	 */ +	static_branch_enable(&mds_user_clear); + +	if (taa_nosmt || cpu_mitigations_auto_nosmt()) +		cpu_smt_disable(false); + +out: +	pr_info("%s\n", taa_strings[taa_mitigation]); +} + +static int __init tsx_async_abort_parse_cmdline(char *str) +{ +	if (!boot_cpu_has_bug(X86_BUG_TAA)) +		return 0; + +	if (!str) +		return -EINVAL; + +	if (!strcmp(str, "off")) { +		taa_mitigation = TAA_MITIGATION_OFF; +	} else if (!strcmp(str, "full")) { +		taa_mitigation = TAA_MITIGATION_VERW; +	} else if (!strcmp(str, "full,nosmt")) { +		taa_mitigation = TAA_MITIGATION_VERW; +		taa_nosmt = true; +	} + +	return 0; +} +early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); + +#undef pr_fmt  #define pr_fmt(fmt)     "Spectre V1 : " fmt  enum spectre_v1_mitigation { @@ -786,13 +882,10 @@ static void update_mds_branch_idle(void)  }  #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" +#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"  void cpu_bugs_smt_update(void)  { -	/* Enhanced IBRS implies STIBP. No update required. */ -	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) -		return; -  	mutex_lock(&spec_ctrl_mutex);  	switch (spectre_v2_user) { @@ -819,6 +912,17 @@ void cpu_bugs_smt_update(void)  		break;  	} +	switch (taa_mitigation) { +	case TAA_MITIGATION_VERW: +	case TAA_MITIGATION_UCODE_NEEDED: +		if (sched_smt_active()) +			pr_warn_once(TAA_MSG_SMT); +		break; +	case TAA_MITIGATION_TSX_DISABLED: +	case TAA_MITIGATION_OFF: +		break; +	} +  	mutex_unlock(&spec_ctrl_mutex);  } @@ -1149,6 +1253,9 @@ void x86_spec_ctrl_setup_ap(void)  		x86_amd_ssb_disable();  } +bool itlb_multihit_kvm_mitigation; +EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); +  #undef pr_fmt  #define pr_fmt(fmt)	"L1TF: " fmt @@ -1304,11 +1411,24 @@ static ssize_t l1tf_show_state(char *buf)  		       l1tf_vmx_states[l1tf_vmx_mitigation],  		       sched_smt_active() ? "vulnerable" : "disabled");  } + +static ssize_t itlb_multihit_show_state(char *buf) +{ +	if (itlb_multihit_kvm_mitigation) +		return sprintf(buf, "KVM: Mitigation: Split huge pages\n"); +	else +		return sprintf(buf, "KVM: Vulnerable\n"); +}  #else  static ssize_t l1tf_show_state(char *buf)  {  	return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);  } + +static ssize_t itlb_multihit_show_state(char *buf) +{ +	return sprintf(buf, "Processor vulnerable\n"); +}  #endif  static ssize_t mds_show_state(char *buf) @@ -1328,6 +1448,21 @@ static ssize_t mds_show_state(char *buf)  		       sched_smt_active() ? "vulnerable" : "disabled");  } +static ssize_t tsx_async_abort_show_state(char *buf) +{ +	if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || +	    (taa_mitigation == TAA_MITIGATION_OFF)) +		return sprintf(buf, "%s\n", taa_strings[taa_mitigation]); + +	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { +		return sprintf(buf, "%s; SMT Host state unknown\n", +			       taa_strings[taa_mitigation]); +	} + +	return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], +		       sched_smt_active() ? "vulnerable" : "disabled"); +} +  static char *stibp_state(void)  {  	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) @@ -1398,6 +1533,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr  	case X86_BUG_MDS:  		return mds_show_state(buf); +	case X86_BUG_TAA: +		return tsx_async_abort_show_state(buf); + +	case X86_BUG_ITLB_MULTIHIT: +		return itlb_multihit_show_state(buf); +  	default:  		break;  	} @@ -1434,4 +1575,14 @@ ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *bu  {  	return cpu_show_common(dev, attr, buf, X86_BUG_MDS);  } + +ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) +{ +	return cpu_show_common(dev, attr, buf, X86_BUG_TAA); +} + +ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) +{ +	return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); +}  #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9ae7d1bcd4f4..fffe21945374 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1016,13 +1016,14 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)  #endif  } -#define NO_SPECULATION	BIT(0) -#define NO_MELTDOWN	BIT(1) -#define NO_SSB		BIT(2) -#define NO_L1TF		BIT(3) -#define NO_MDS		BIT(4) -#define MSBDS_ONLY	BIT(5) -#define NO_SWAPGS	BIT(6) +#define NO_SPECULATION		BIT(0) +#define NO_MELTDOWN		BIT(1) +#define NO_SSB			BIT(2) +#define NO_L1TF			BIT(3) +#define NO_MDS			BIT(4) +#define MSBDS_ONLY		BIT(5) +#define NO_SWAPGS		BIT(6) +#define NO_ITLB_MULTIHIT	BIT(7)  #define VULNWL(_vendor, _family, _model, _whitelist)	\  	{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } @@ -1043,27 +1044,27 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {  	VULNWL(NSC,	5, X86_MODEL_ANY,	NO_SPECULATION),  	/* Intel Family 6 */ -	VULNWL_INTEL(ATOM_SALTWELL,		NO_SPECULATION), -	VULNWL_INTEL(ATOM_SALTWELL_TABLET,	NO_SPECULATION), -	VULNWL_INTEL(ATOM_SALTWELL_MID,		NO_SPECULATION), -	VULNWL_INTEL(ATOM_BONNELL,		NO_SPECULATION), -	VULNWL_INTEL(ATOM_BONNELL_MID,		NO_SPECULATION), - -	VULNWL_INTEL(ATOM_SILVERMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), -	VULNWL_INTEL(ATOM_SILVERMONT_D,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), -	VULNWL_INTEL(ATOM_SILVERMONT_MID,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), -	VULNWL_INTEL(ATOM_AIRMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), -	VULNWL_INTEL(XEON_PHI_KNL,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), -	VULNWL_INTEL(XEON_PHI_KNM,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS), +	VULNWL_INTEL(ATOM_SALTWELL,		NO_SPECULATION | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(ATOM_SALTWELL_TABLET,	NO_SPECULATION | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(ATOM_SALTWELL_MID,		NO_SPECULATION | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(ATOM_BONNELL,		NO_SPECULATION | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(ATOM_BONNELL_MID,		NO_SPECULATION | NO_ITLB_MULTIHIT), + +	VULNWL_INTEL(ATOM_SILVERMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(ATOM_SILVERMONT_D,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(ATOM_SILVERMONT_MID,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(ATOM_AIRMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(XEON_PHI_KNL,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(XEON_PHI_KNM,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),  	VULNWL_INTEL(CORE_YONAH,		NO_SSB), -	VULNWL_INTEL(ATOM_AIRMONT_MID,		NO_L1TF | MSBDS_ONLY | NO_SWAPGS), -	VULNWL_INTEL(ATOM_AIRMONT_NP,		NO_L1TF | NO_SWAPGS), +	VULNWL_INTEL(ATOM_AIRMONT_MID,		NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(ATOM_AIRMONT_NP,		NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), -	VULNWL_INTEL(ATOM_GOLDMONT,		NO_MDS | NO_L1TF | NO_SWAPGS), -	VULNWL_INTEL(ATOM_GOLDMONT_D,		NO_MDS | NO_L1TF | NO_SWAPGS), -	VULNWL_INTEL(ATOM_GOLDMONT_PLUS,	NO_MDS | NO_L1TF | NO_SWAPGS), +	VULNWL_INTEL(ATOM_GOLDMONT,		NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(ATOM_GOLDMONT_D,		NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_INTEL(ATOM_GOLDMONT_PLUS,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),  	/*  	 * Technically, swapgs isn't serializing on AMD (despite it previously @@ -1073,15 +1074,17 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {  	 * good enough for our purposes.  	 */ +	VULNWL_INTEL(ATOM_TREMONT_D,		NO_ITLB_MULTIHIT), +  	/* AMD Family 0xf - 0x12 */ -	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), -	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), -	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), -	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS), +	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),  	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ -	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS), -	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS), +	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), +	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),  	{}  }; @@ -1092,19 +1095,30 @@ static bool __init cpu_matches(unsigned long which)  	return m && !!(m->driver_data & which);  } -static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) +u64 x86_read_arch_cap_msr(void)  {  	u64 ia32_cap = 0; +	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) +		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + +	return ia32_cap; +} + +static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) +{ +	u64 ia32_cap = x86_read_arch_cap_msr(); + +	/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ +	if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) +		setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); +  	if (cpu_matches(NO_SPECULATION))  		return;  	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);  	setup_force_cpu_bug(X86_BUG_SPECTRE_V2); -	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) -		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); -  	if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&  	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))  		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); @@ -1121,6 +1135,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)  	if (!cpu_matches(NO_SWAPGS))  		setup_force_cpu_bug(X86_BUG_SWAPGS); +	/* +	 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: +	 *	- TSX is supported or +	 *	- TSX_CTRL is present +	 * +	 * TSX_CTRL check is needed for cases when TSX could be disabled before +	 * the kernel boot e.g. kexec. +	 * TSX_CTRL check alone is not sufficient for cases when the microcode +	 * update is not present or running as guest that don't get TSX_CTRL. +	 */ +	if (!(ia32_cap & ARCH_CAP_TAA_NO) && +	    (cpu_has(c, X86_FEATURE_RTM) || +	     (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) +		setup_force_cpu_bug(X86_BUG_TAA); +  	if (cpu_matches(NO_MELTDOWN))  		return; @@ -1554,6 +1583,8 @@ void __init identify_boot_cpu(void)  #endif  	cpu_detect_tlb(&boot_cpu_data);  	setup_cr_pinning(); + +	tsx_init();  }  void identify_secondary_cpu(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index c0e2407abdd6..38ab6e115eac 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -44,6 +44,22 @@ struct _tlb_table {  extern const struct cpu_dev *const __x86_cpu_dev_start[],  			    *const __x86_cpu_dev_end[]; +#ifdef CONFIG_CPU_SUP_INTEL +enum tsx_ctrl_states { +	TSX_CTRL_ENABLE, +	TSX_CTRL_DISABLE, +	TSX_CTRL_NOT_SUPPORTED, +}; + +extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state; + +extern void __init tsx_init(void); +extern void tsx_enable(void); +extern void tsx_disable(void); +#else +static inline void tsx_init(void) { } +#endif /* CONFIG_CPU_SUP_INTEL */ +  extern void get_cpu_cap(struct cpuinfo_x86 *c);  extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);  extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); @@ -62,4 +78,6 @@ unsigned int aperfmperf_get_khz(int cpu);  extern void x86_spec_ctrl_setup_ap(void); +extern u64 x86_read_arch_cap_msr(void); +  #endif /* ARCH_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index c2fdc00df163..11d5c5950e2d 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -762,6 +762,11 @@ static void init_intel(struct cpuinfo_x86 *c)  		detect_tme(c);  	init_intel_misc_features(c); + +	if (tsx_ctrl_state == TSX_CTRL_ENABLE) +		tsx_enable(); +	if (tsx_ctrl_state == TSX_CTRL_DISABLE) +		tsx_disable();  }  #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index a46dee8e78db..2e3b06d6bbc6 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -461,10 +461,8 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,  	}  	rdtgrp = rdtgroup_kn_lock_live(of->kn); -	rdt_last_cmd_clear();  	if (!rdtgrp) {  		ret = -ENOENT; -		rdt_last_cmd_puts("Directory was removed\n");  		goto unlock;  	} @@ -2648,10 +2646,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,  	int ret;  	prdtgrp = rdtgroup_kn_lock_live(prgrp_kn); -	rdt_last_cmd_clear();  	if (!prdtgrp) {  		ret = -ENODEV; -		rdt_last_cmd_puts("Directory was removed\n");  		goto out_unlock;  	} diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c new file mode 100644 index 000000000000..3e20d322bc98 --- /dev/null +++ b/arch/x86/kernel/cpu/tsx.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Transactional Synchronization Extensions (TSX) control. + * + * Copyright (C) 2019 Intel Corporation + * + * Author: + *	Pawan Gupta <pawan.kumar.gupta@linux.intel.com> + */ + +#include <linux/cpufeature.h> + +#include <asm/cmdline.h> + +#include "cpu.h" + +enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED; + +void tsx_disable(void) +{ +	u64 tsx; + +	rdmsrl(MSR_IA32_TSX_CTRL, tsx); + +	/* Force all transactions to immediately abort */ +	tsx |= TSX_CTRL_RTM_DISABLE; + +	/* +	 * Ensure TSX support is not enumerated in CPUID. +	 * This is visible to userspace and will ensure they +	 * do not waste resources trying TSX transactions that +	 * will always abort. +	 */ +	tsx |= TSX_CTRL_CPUID_CLEAR; + +	wrmsrl(MSR_IA32_TSX_CTRL, tsx); +} + +void tsx_enable(void) +{ +	u64 tsx; + +	rdmsrl(MSR_IA32_TSX_CTRL, tsx); + +	/* Enable the RTM feature in the cpu */ +	tsx &= ~TSX_CTRL_RTM_DISABLE; + +	/* +	 * Ensure TSX support is enumerated in CPUID. +	 * This is visible to userspace and will ensure they +	 * can enumerate and use the TSX feature. +	 */ +	tsx &= ~TSX_CTRL_CPUID_CLEAR; + +	wrmsrl(MSR_IA32_TSX_CTRL, tsx); +} + +static bool __init tsx_ctrl_is_supported(void) +{ +	u64 ia32_cap = x86_read_arch_cap_msr(); + +	/* +	 * TSX is controlled via MSR_IA32_TSX_CTRL.  However, support for this +	 * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES. +	 * +	 * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a +	 * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES +	 * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get +	 * MSR_IA32_TSX_CTRL support even after a microcode update. Thus, +	 * tsx= cmdline requests will do nothing on CPUs without +	 * MSR_IA32_TSX_CTRL support. +	 */ +	return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR); +} + +static enum tsx_ctrl_states x86_get_tsx_auto_mode(void) +{ +	if (boot_cpu_has_bug(X86_BUG_TAA)) +		return TSX_CTRL_DISABLE; + +	return TSX_CTRL_ENABLE; +} + +void __init tsx_init(void) +{ +	char arg[5] = {}; +	int ret; + +	if (!tsx_ctrl_is_supported()) +		return; + +	ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg)); +	if (ret >= 0) { +		if (!strcmp(arg, "on")) { +			tsx_ctrl_state = TSX_CTRL_ENABLE; +		} else if (!strcmp(arg, "off")) { +			tsx_ctrl_state = TSX_CTRL_DISABLE; +		} else if (!strcmp(arg, "auto")) { +			tsx_ctrl_state = x86_get_tsx_auto_mode(); +		} else { +			tsx_ctrl_state = TSX_CTRL_DISABLE; +			pr_err("tsx: invalid option, defaulting to off\n"); +		} +	} else { +		/* tsx= not provided */ +		if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO)) +			tsx_ctrl_state = x86_get_tsx_auto_mode(); +		else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF)) +			tsx_ctrl_state = TSX_CTRL_DISABLE; +		else +			tsx_ctrl_state = TSX_CTRL_ENABLE; +	} + +	if (tsx_ctrl_state == TSX_CTRL_DISABLE) { +		tsx_disable(); + +		/* +		 * tsx_disable() will change the state of the +		 * RTM CPUID bit.  Clear it here since it is now +		 * expected to be not set. +		 */ +		setup_clear_cpu_cap(X86_FEATURE_RTM); +	} else if (tsx_ctrl_state == TSX_CTRL_ENABLE) { + +		/* +		 * HW defaults TSX to be enabled at bootup. +		 * We may still need the TSX enable support +		 * during init for special cases like +		 * kexec after TSX is disabled. +		 */ +		tsx_enable(); + +		/* +		 * tsx_enable() will change the state of the +		 * RTM CPUID bit.  Force it here since it is now +		 * expected to be set. +		 */ +		setup_force_cpu_cap(X86_FEATURE_RTM); +	} +}  | 
