aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-07-31 23:52:20 -0700
committerDavid S. Miller <davem@davemloft.net>2015-07-31 23:52:20 -0700
commit5510b3c2a173921374ec847848fb20b98e1c698a (patch)
treec9e185281ef17280ce0dc30be7923124874736b0 /arch/x86
parentMerge branch 'ipv6-auto-flow-labels' (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff)
downloadlinux-dev-5510b3c2a173921374ec847848fb20b98e1c698a.tar.xz
linux-dev-5510b3c2a173921374ec847848fb20b98e1c698a.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: arch/s390/net/bpf_jit_comp.c drivers/net/ethernet/ti/netcp_ethss.c net/bridge/br_multicast.c net/ipv4/ip_fragment.c All four conflicts were cases of simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/entry/entry_64_compat.S14
-rw-r--r--arch/x86/include/uapi/asm/kvm.h4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c8
-rw-r--r--arch/x86/kernel/fpu/init.c6
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mtrr.c40
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c5
-rw-r--r--arch/x86/kvm/x86.h5
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/mmap.c7
-rw-r--r--arch/x86/mm/mpx.c24
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c8
14 files changed, 87 insertions, 63 deletions
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index bb187a6a877c..5a1844765a7a 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -205,7 +205,6 @@ sysexit_from_sys_call:
movl RDX(%rsp), %edx /* arg3 */
movl RSI(%rsp), %ecx /* arg4 */
movl RDI(%rsp), %r8d /* arg5 */
- movl %ebp, %r9d /* arg6 */
.endm
.macro auditsys_exit exit
@@ -236,6 +235,7 @@ sysexit_from_sys_call:
sysenter_auditsys:
auditsys_entry_common
+ movl %ebp, %r9d /* reload 6th syscall arg */
jmp sysenter_dispatch
sysexit_audit:
@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
* 32-bit zero extended:
*/
ASM_STAC
-1: movl (%r8), %ebp
+1: movl (%r8), %r9d
_ASM_EXTABLE(1b, ia32_badarg)
ASM_CLAC
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
cstar_do_call:
/* 32-bit syscall -> 64-bit C ABI argument conversion */
movl %edi, %r8d /* arg5 */
- movl %ebp, %r9d /* arg6 */
+ /* r9 already loaded */ /* arg6 */
xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
movl %ebx, %edi /* arg1 */
movl %edx, %edx /* arg3 (zero extension) */
@@ -358,7 +358,6 @@ cstar_dispatch:
call *ia32_sys_call_table(, %rax, 8)
movq %rax, RAX(%rsp)
1:
- movl RCX(%rsp), %ebp
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -392,7 +391,9 @@ sysretl_from_sys_call:
#ifdef CONFIG_AUDITSYSCALL
cstar_auditsys:
+ movl %r9d, R9(%rsp) /* register to be clobbered by call */
auditsys_entry_common
+ movl R9(%rsp), %r9d /* reload 6th syscall arg */
jmp cstar_dispatch
sysretl_audit:
@@ -404,14 +405,16 @@ cstar_tracesys:
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jz cstar_auditsys
#endif
+ xchgl %r9d, %ebp
SAVE_EXTRA_REGS
xorl %eax, %eax /* Do not leak kernel information */
movq %rax, R11(%rsp)
movq %rax, R10(%rsp)
- movq %rax, R9(%rsp)
+ movq %r9, R9(%rsp)
movq %rax, R8(%rsp)
movq %rsp, %rdi /* &pt_regs -> arg1 */
call syscall_trace_enter
+ movl R9(%rsp), %r9d
/* Reload arg registers from stack. (see sysenter_tracesys) */
movl RCX(%rsp), %ecx
@@ -421,6 +424,7 @@ cstar_tracesys:
movl %eax, %eax /* zero extension */
RESTORE_EXTRA_REGS
+ xchgl %ebp, %r9d
jmp cstar_do_call
END(entry_SYSCALL_compat)
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index a4ae82eb82aa..cd54147cb365 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -354,7 +354,7 @@ struct kvm_xcrs {
struct kvm_sync_regs {
};
-#define KVM_QUIRK_LINT0_REENABLED (1 << 0)
-#define KVM_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 188076161c1b..63eb68b73589 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -952,6 +952,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
return 0;
/*
+ * Getting up-to-date values requires an SMP IPI which is not
+ * possible if we're being called in interrupt context. Return
+ * the cached values instead.
+ */
+ if (unlikely(in_interrupt()))
+ goto out;
+
+ /*
* Notice that we don't perform the reading of an RMID
* atomically, because we can't hold a spin lock across the
* IPIs.
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 0b39173dd971..1e173f6285c7 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s)
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+ setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
setup_clear_cpu_cap(X86_FEATURE_AVX);
setup_clear_cpu_cap(X86_FEATURE_AVX2);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512F);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
+ setup_clear_cpu_cap(X86_FEATURE_MPX);
return 1;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 954e98a8c2e3..2a5ca97c263b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
for (i = 0; i < APIC_LVT_NUM; i++)
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
apic_update_lvtt(apic);
- if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED))
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
apic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index de1d2d8062e2..dc0a84a6f309 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
}
+static u8 mtrr_disabled_type(void)
+{
+ /*
+ * Intel SDM 11.11.2.2: all MTRRs are disabled when
+ * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
+ * memory type is applied to all of physical memory.
+ */
+ return MTRR_TYPE_UNCACHABLE;
+}
+
/*
* Three terms are used in the following code:
* - segment, it indicates the address segments covered by fixed MTRRs.
@@ -434,6 +444,8 @@ struct mtrr_iter {
/* output fields. */
int mem_type;
+ /* mtrr is completely disabled? */
+ bool mtrr_disabled;
/* [start, end) is not fully covered in MTRRs? */
bool partial_map;
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
static void mtrr_lookup_start(struct mtrr_iter *iter)
{
if (!mtrr_is_enabled(iter->mtrr_state)) {
- iter->partial_map = true;
+ iter->mtrr_disabled = true;
return;
}
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
iter->mtrr_state = mtrr_state;
iter->start = start;
iter->end = end;
+ iter->mtrr_disabled = false;
iter->partial_map = false;
iter->fixed = false;
iter->range = NULL;
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
return MTRR_TYPE_WRBACK;
}
- /* It is not covered by MTRRs. */
- if (iter.partial_map) {
- /*
- * We just check one page, partially covered by MTRRs is
- * impossible.
- */
- WARN_ON(type != -1);
- type = mtrr_default_type(mtrr_state);
- }
+ if (iter.mtrr_disabled)
+ return mtrr_disabled_type();
+
+ /*
+ * We just check one page, partially covered by MTRRs is
+ * impossible.
+ */
+ WARN_ON(iter.partial_map);
+
+ /* not contained in any MTRRs. */
+ if (type == -1)
+ return mtrr_default_type(mtrr_state);
+
return type;
}
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
return false;
}
+ if (iter.mtrr_disabled)
+ return true;
+
if (!iter.partial_map)
return true;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index bbc678a66b18..8e0c0844c6b9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
* does not do it - this results in some delay at
* reboot
*/
- if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED))
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
mark_dirty(svm->vmcb, VMCB_CR);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5b4e9384717a..83b7b5cd75d5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
ipat = VMX_EPT_IPAT_BIT;
- cache = MTRR_TYPE_UNCACHABLE;
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+ cache = MTRR_TYPE_WRBACK;
+ else
+ cache = MTRR_TYPE_UNCACHABLE;
goto exit;
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index edc8cdcd786b..0ca2f3e4803c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
return kvm_register_write(vcpu, reg, val);
}
+static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
+{
+ return !(kvm->arch.disabled_quirks & quirk);
+}
+
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index cc5ccc415cc0..b9c78f3bcd67 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
!PageReserved(pfn_to_page(start_pfn + i)))
return 1;
- WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
-
return 0;
}
@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
pgprot_t prot;
int retval;
void __iomem *ret_addr;
- int ram_region;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
- /* First check if whole region can be identified as RAM or not */
- ram_region = region_is_ram(phys_addr, size);
- if (ram_region > 0) {
- WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
- (unsigned long int)phys_addr,
- (unsigned long int)last_addr);
+ pfn = phys_addr >> PAGE_SHIFT;
+ last_pfn = last_addr >> PAGE_SHIFT;
+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ __ioremap_check_ram) == 1) {
+ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+ &phys_addr, &last_addr);
return NULL;
}
- /* If could not be identified(-1), check page by page */
- if (ram_region < 0) {
- pfn = phys_addr >> PAGE_SHIFT;
- last_pfn = last_addr >> PAGE_SHIFT;
- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
- __ioremap_check_ram) == 1)
- return NULL;
- }
/*
* Mappings have to be page-aligned
*/
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 9d518d693b4b..844b06d67df4 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_MPX)
+ return "[mpx]";
+ return NULL;
+}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 7a657f58bbea..db1b0bc5017c 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -20,20 +20,6 @@
#define CREATE_TRACE_POINTS
#include <asm/trace/mpx.h>
-static const char *mpx_mapping_name(struct vm_area_struct *vma)
-{
- return "[mpx]";
-}
-
-static struct vm_operations_struct mpx_vma_ops = {
- .name = mpx_mapping_name,
-};
-
-static int is_mpx_vma(struct vm_area_struct *vma)
-{
- return (vma->vm_ops == &mpx_vma_ops);
-}
-
static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
{
if (is_64bit_mm(mm))
@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
/*
* This is really a simplified "vm_mmap". it only handles MPX
* bounds tables (the bounds directory is user-allocated).
- *
- * Later on, we use the vma->vm_ops to uniquely identify these
- * VMAs.
*/
static unsigned long mpx_mmap(unsigned long len)
{
@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
ret = -ENOMEM;
goto out;
}
- vma->vm_ops = &mpx_vma_ops;
if (vm_flags & VM_LOCKED) {
up_write(&mm->mmap_sem);
@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
* so stop immediately and return an error. This
* probably results in a SIGSEGV.
*/
- if (!is_mpx_vma(vma))
+ if (!(vma->vm_flags & VM_MPX))
return -EINVAL;
len = min(vma->vm_end, end) - addr;
@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
* lots of tables even though we have no actual table
* entries in use.
*/
- while (next && is_mpx_vma(next))
+ while (next && (next->vm_flags & VM_MPX))
next = next->vm_next;
- while (prev && is_mpx_vma(prev))
+ while (prev && (prev->vm_flags & VM_MPX))
prev = prev->vm_prev;
/*
* We know 'start' and 'end' lie within an area controlled
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3250f2371aea..90b924acd982 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
} else {
unsigned long addr;
unsigned long nr_pages =
- f->flush_end - f->flush_start / PAGE_SIZE;
+ (f->flush_end - f->flush_start) / PAGE_SIZE;
addr = f->flush_start;
while (addr < f->flush_end) {
__flush_tlb_single(addr);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index c08000b850ef..ec5214f39aa8 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
offsetof(struct bpf_array, map.max_entries));
EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
-#define OFFSET1 44 /* number of bytes to jump */
+#define OFFSET1 47 /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt;
@@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
*/
EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
-#define OFFSET2 33
+#define OFFSET2 36
EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
/* prog = array->prog[index]; */
- EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */
- EMIT1(offsetof(struct bpf_array, prog));
+ EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
+ offsetof(struct bpf_array, prog));
EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
/* if (prog == NULL)