aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/bitrev.h20
-rw-r--r--arch/arm/include/asm/compiler.h15
-rw-r--r--arch/arm/include/asm/insn.h29
-rw-r--r--arch/arm/include/asm/kprobes.h33
-rw-r--r--arch/arm/include/asm/kvm_asm.h1
-rw-r--r--arch/arm/include/asm/kvm_emulate.h15
-rw-r--r--arch/arm/include/asm/kvm_host.h9
-rw-r--r--arch/arm/include/asm/kvm_mmio.h1
-rw-r--r--arch/arm/include/asm/kvm_mmu.h98
-rw-r--r--arch/arm/include/asm/mach/irda.h20
-rw-r--r--arch/arm/include/asm/mach/pci.h6
-rw-r--r--arch/arm/include/asm/outercache.h3
-rw-r--r--arch/arm/include/asm/patch.h17
-rw-r--r--arch/arm/include/asm/pci.h7
-rw-r--r--arch/arm/include/asm/pgtable-2level.h3
-rw-r--r--arch/arm/include/asm/pgtable-3level.h7
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h4
-rw-r--r--arch/arm/include/asm/pgtable.h20
-rw-r--r--arch/arm/include/asm/probes.h15
-rw-r--r--arch/arm/include/asm/thread_info.h4
-rw-r--r--arch/arm/include/asm/uaccess.h96
-rw-r--r--arch/arm/include/asm/xen/page.h2
22 files changed, 299 insertions, 126 deletions
diff --git a/arch/arm/include/asm/bitrev.h b/arch/arm/include/asm/bitrev.h
new file mode 100644
index 000000000000..ec291c350ea3
--- /dev/null
+++ b/arch/arm/include/asm/bitrev.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_BITREV_H
+#define __ASM_BITREV_H
+
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+ __asm__ ("rbit %0, %1" : "=r" (x) : "r" (x));
+ return x;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+ return __arch_bitrev32((u32)x) >> 16;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+ return __arch_bitrev32((u32)x) >> 24;
+}
+
+#endif
diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h
index 8155db2f7fa1..29fe85e59439 100644
--- a/arch/arm/include/asm/compiler.h
+++ b/arch/arm/include/asm/compiler.h
@@ -8,8 +8,21 @@
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch.
* (for details, see gcc PR 15089)
+ * For compatibility with clang, we have to specifically take the equivalence
+ * of 'r11' <-> 'fp' and 'r12' <-> 'ip' into account as well.
*/
-#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
+#define __asmeq(x, y) \
+ ".ifnc " x "," y "; " \
+ ".ifnc " x y ",fpr11; " \
+ ".ifnc " x y ",r11fp; " \
+ ".ifnc " x y ",ipr12; " \
+ ".ifnc " x y ",r12ip; " \
+ ".err; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif; " \
+ ".endif\n\t"
#endif /* __ASM_ARM_COMPILER_H */
diff --git a/arch/arm/include/asm/insn.h b/arch/arm/include/asm/insn.h
new file mode 100644
index 000000000000..e96065da4dae
--- /dev/null
+++ b/arch/arm/include/asm/insn.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_ARM_INSN_H
+#define __ASM_ARM_INSN_H
+
+static inline unsigned long
+arm_gen_nop(void)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ return 0xf3af8000; /* nop.w */
+#else
+ return 0xe1a00000; /* mov r0, r0 */
+#endif
+}
+
+unsigned long
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link);
+
+static inline unsigned long
+arm_gen_branch(unsigned long pc, unsigned long addr)
+{
+ return __arm_gen_branch(pc, addr, false);
+}
+
+static inline unsigned long
+arm_gen_branch_link(unsigned long pc, unsigned long addr)
+{
+ return __arm_gen_branch(pc, addr, true);
+}
+
+#endif
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index 49fa0dfaad33..3ea9be559726 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -22,7 +22,6 @@
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 2
-#define MAX_STACK_SIZE 64 /* 32 would probably be OK */
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
@@ -51,5 +50,37 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
+/* optinsn template addresses */
+extern __visible kprobe_opcode_t optprobe_template_entry;
+extern __visible kprobe_opcode_t optprobe_template_val;
+extern __visible kprobe_opcode_t optprobe_template_call;
+extern __visible kprobe_opcode_t optprobe_template_end;
+extern __visible kprobe_opcode_t optprobe_template_sub_sp;
+extern __visible kprobe_opcode_t optprobe_template_add_sp;
+extern __visible kprobe_opcode_t optprobe_template_restore_begin;
+extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
+extern __visible kprobe_opcode_t optprobe_template_restore_end;
+
+#define MAX_OPTIMIZED_LENGTH 4
+#define MAX_OPTINSN_SIZE \
+ ((unsigned long)&optprobe_template_end - \
+ (unsigned long)&optprobe_template_entry)
+#define RELATIVEJUMP_SIZE 4
+
+struct arch_optimized_insn {
+ /*
+ * copy of the original instructions.
+ * Different from x86, ARM kprobe_opcode_t is u32.
+ */
+#define MAX_COPIED_INSN DIV_ROUND_UP(RELATIVEJUMP_SIZE, sizeof(kprobe_opcode_t))
+ kprobe_opcode_t copied_insn[MAX_COPIED_INSN];
+ /* detour code buffer */
+ kprobe_opcode_t *insn;
+ /*
+ * We always copy one instruction on ARM,
+ * so size will always be 4, and unlike x86, there is no
+ * need for a size field.
+ */
+};
#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 3a67bec72d0c..25410b2d8bc1 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -96,6 +96,7 @@ extern char __kvm_hyp_code_end[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 66ce17655bb9..a9c80a2ea1a7 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -23,6 +23,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_arm.h>
+#include <asm/cputype.h>
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
@@ -38,6 +39,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr = HCR_GUEST_MASK;
}
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.hcr;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+ vcpu->arch.hcr = hcr;
+}
+
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
return 1;
@@ -167,9 +178,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
}
-static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cp15[c0_MPIDR];
+ return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 254e0650e48b..41008cd7c53f 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -68,6 +68,7 @@ struct kvm_arch {
/* Interrupt controller */
struct vgic_dist vgic;
+ int max_vcpus;
};
#define KVM_NR_MEM_OBJS 40
@@ -125,9 +126,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
- /* dcache set/way operation pending */
- int last_pcpu;
- cpumask_t require_dcache_flush;
/* Don't run the guest on this vcpu */
bool pause;
@@ -147,6 +145,7 @@ struct kvm_vm_stat {
};
struct kvm_vcpu_stat {
+ u32 halt_successful_poll;
u32 halt_wakeup;
};
@@ -234,6 +233,10 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic)
int kvm_perf_init(void);
int kvm_perf_teardown(void);
+void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
+
+struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
index adcc0d7d3175..3f83db2f6cf0 100644
--- a/arch/arm/include/asm/kvm_mmio.h
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -37,6 +37,7 @@ struct kvm_exit_mmio {
u8 data[8];
u32 len;
bool is_write;
+ void *private;
};
static inline void kvm_prepare_mmio(struct kvm_run *run,
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 63e0ecc04901..37ca2a4c6f09 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -44,6 +44,7 @@
#ifndef __ASSEMBLY__
+#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
@@ -114,6 +115,27 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
pmd_val(*pmd) |= L_PMD_S2_RDWR;
}
+static inline void kvm_set_s2pte_readonly(pte_t *pte)
+{
+ pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
+}
+
+static inline bool kvm_s2pte_readonly(pte_t *pte)
+{
+ return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
+}
+
+static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
+{
+ pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
+}
+
+static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
+{
+ return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
+}
+
+
/* Open coded p*d_addr_end that can deal with 64bit addresses */
#define kvm_pgd_addr_end(addr, end) \
({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
@@ -161,13 +183,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
}
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size,
- bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size,
+ bool ipa_uncached)
{
- if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
- kvm_flush_dcache_to_poc((void *)hva, size);
-
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
@@ -179,18 +198,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
+ *
+ * We need to do this through a kernel mapping (using the
+ * user-space mapping has proved to be the wrong
+ * solution). For that, we need to kmap one page at a time,
+ * and iterate over the range.
*/
- if (icache_is_pipt()) {
- __cpuc_coherent_user_range(hva, hva + size);
- } else if (!icache_is_vivt_asid_tagged()) {
+
+ bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
+
+ VM_BUG_ON(size & PAGE_MASK);
+
+ if (!need_flush && !icache_is_pipt())
+ goto vipt_cache;
+
+ while (size) {
+ void *va = kmap_atomic_pfn(pfn);
+
+ if (need_flush)
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ if (icache_is_pipt())
+ __cpuc_coherent_user_range((unsigned long)va,
+ (unsigned long)va + PAGE_SIZE);
+
+ size -= PAGE_SIZE;
+ pfn++;
+
+ kunmap_atomic(va);
+ }
+
+vipt_cache:
+ if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
}
}
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+ void *va = kmap_atomic(pte_page(pte));
+
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ kunmap_atomic(va);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ unsigned long size = PMD_SIZE;
+ pfn_t pfn = pmd_pfn(pmd);
+
+ while (size) {
+ void *va = kmap_atomic_pfn(pfn);
+
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ pfn++;
+ size -= PAGE_SIZE;
+
+ kunmap_atomic(va);
+ }
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+}
+
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/mach/irda.h b/arch/arm/include/asm/mach/irda.h
deleted file mode 100644
index 38f77b5e56cf..000000000000
--- a/arch/arm/include/asm/mach/irda.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/include/asm/mach/irda.h
- *
- * Copyright (C) 2004 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_ARM_MACH_IRDA_H
-#define __ASM_ARM_MACH_IRDA_H
-
-struct irda_platform_data {
- int (*startup)(struct device *);
- void (*shutdown)(struct device *);
- int (*set_power)(struct device *, unsigned int state);
- void (*set_speed)(struct device *, unsigned int speed);
-};
-
-#endif
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 8292b5f81e23..28b9bb35949e 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -19,9 +19,6 @@ struct pci_bus;
struct device;
struct hw_pci {
-#ifdef CONFIG_PCI_DOMAINS
- int domain;
-#endif
#ifdef CONFIG_PCI_MSI
struct msi_controller *msi_ctrl;
#endif
@@ -45,9 +42,6 @@ struct hw_pci {
* Per-controller structure
*/
struct pci_sys_data {
-#ifdef CONFIG_PCI_DOMAINS
- int domain;
-#endif
#ifdef CONFIG_PCI_MSI
struct msi_controller *msi_ctrl;
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 891a56b35bcf..563b92fc2f41 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -23,6 +23,8 @@
#include <linux/types.h>
+struct l2x0_regs;
+
struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
@@ -36,6 +38,7 @@ struct outer_cache_fns {
/* This is an ARM L2C thing */
void (*write_sec)(unsigned long, unsigned);
+ void (*configure)(const struct l2x0_regs *);
};
extern struct outer_cache_fns outer_cache;
diff --git a/arch/arm/include/asm/patch.h b/arch/arm/include/asm/patch.h
new file mode 100644
index 000000000000..77e054c2f6cd
--- /dev/null
+++ b/arch/arm/include/asm/patch.h
@@ -0,0 +1,17 @@
+#ifndef _ARM_KERNEL_PATCH_H
+#define _ARM_KERNEL_PATCH_H
+
+void patch_text(void *addr, unsigned int insn);
+void __patch_text_real(void *addr, unsigned int insn, bool remap);
+
+static inline void __patch_text(void *addr, unsigned int insn)
+{
+ __patch_text_real(addr, insn, true);
+}
+
+static inline void __patch_text_early(void *addr, unsigned int insn)
+{
+ __patch_text_real(addr, insn, false);
+}
+
+#endif
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 7e95d8535e24..585dc33a7a24 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -18,13 +18,6 @@ static inline int pcibios_assign_all_busses(void)
}
#ifdef CONFIG_PCI_DOMAINS
-static inline int pci_domain_nr(struct pci_bus *bus)
-{
- struct pci_sys_data *root = bus->sysdata;
-
- return root->domain;
-}
-
static inline int pci_proc_domain(struct pci_bus *bus)
{
return pci_domain_nr(bus);
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f0279411847d..bfd662e49a25 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -10,6 +10,8 @@
#ifndef _ASM_PGTABLE_2LEVEL_H
#define _ASM_PGTABLE_2LEVEL_H
+#define __PAGETABLE_PMD_FOLDED
+
/*
* Hardware-wise, we have a two level page table structure, where the first
* level has 4096 entries, and the second level has 256 entries. Each entry
@@ -118,7 +120,6 @@
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
-#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
#define L_PTE_USER (_AT(pteval_t, 1) << 8)
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index a31ecdad4b59..a745a2a53853 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -77,7 +77,6 @@
*/
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
-#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
@@ -130,6 +129,7 @@
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */
#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
/*
@@ -258,7 +258,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
-#define pmd_mknotpresent(pmd) (__pmd(0))
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+ return __pmd(0);
+}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 0642228ff785..add094d09e3e 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -54,8 +54,6 @@
typedef pte_t *pte_addr_t;
-static inline int pte_file(pte_t pte) { return 0; }
-
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -87,7 +85,7 @@ extern unsigned int kobjsize(const void *objp);
#define VMALLOC_START 0UL
#define VMALLOC_END 0xffffffffUL
-#define FIRST_USER_ADDRESS (0)
+#define FIRST_USER_ADDRESS 0UL
#include <asm-generic/pgtable.h>
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index d5cac545ba33..f40354198bad 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -318,12 +318,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <--------------- offset ----------------------> < type -> 0 0 0
+ * <--------------- offset ------------------------> < type -> 0 0
*
- * This gives us up to 31 swap files and 64GB per swap file. Note that
+ * This gives us up to 31 swap files and 128GB per swap file. Note that
* the offset field is always non-zero.
*/
-#define __SWP_TYPE_SHIFT 3
+#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
@@ -342,20 +342,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
-/*
- * Encode and decode a file entry. File entries are stored in the Linux
- * page tables as follows:
- *
- * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <----------------------- offset ------------------------> 1 0 0
- */
-#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 3)
-#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
-
-#define PTE_FILE_MAX_BITS 29
-
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
index 806cfe622a9e..1e5b9bb92270 100644
--- a/arch/arm/include/asm/probes.h
+++ b/arch/arm/include/asm/probes.h
@@ -19,6 +19,8 @@
#ifndef _ASM_PROBES_H
#define _ASM_PROBES_H
+#ifndef __ASSEMBLY__
+
typedef u32 probes_opcode_t;
struct arch_probes_insn;
@@ -38,6 +40,19 @@ struct arch_probes_insn {
probes_check_cc *insn_check_cc;
probes_insn_singlestep_t *insn_singlestep;
probes_insn_fn_t *insn_fn;
+ int stack_space;
+ unsigned long register_usage_flags;
+ bool kprobe_direct_exec;
};
+#endif /* __ASSEMBLY__ */
+
+/*
+ * We assume one instruction can consume at most 64 bytes stack, which is
+ * 'push {r0-r15}'. Instructions consume more or unknown stack space like
+ * 'str r0, [sp, #-80]' and 'str r0, [sp, r1]' should be prohibit to probe.
+ * Both kprobe and jprobe use this macro.
+ */
+#define MAX_STACK_SIZE 64
+
#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index d890e41f5520..72812a1f3d1c 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -68,7 +68,6 @@ struct thread_info {
#ifdef CONFIG_ARM_THUMBEE
unsigned long thumbee_state; /* ThumbEE Handler Base register */
#endif
- struct restart_block restart_block;
};
#define INIT_THREAD_INFO(tsk) \
@@ -81,9 +80,6 @@ struct thread_info {
.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 4767eb9caa78..ce0786efd26c 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -73,7 +73,7 @@ static inline void set_fs(mm_segment_t fs)
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}
-#define segment_eq(a,b) ((a) == (b))
+#define segment_eq(a, b) ((a) == (b))
#define __addr_ok(addr) ({ \
unsigned long flag; \
@@ -84,7 +84,7 @@ static inline void set_fs(mm_segment_t fs)
(flag == 0); })
/* We use 33-bit arithmetic here... */
-#define __range_ok(addr,size) ({ \
+#define __range_ok(addr, size) ({ \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
@@ -123,7 +123,7 @@ extern int __get_user_64t_4(void *);
#define __GUP_CLOBBER_32t_8 "lr", "cc"
#define __GUP_CLOBBER_8 "lr", "cc"
-#define __get_user_x(__r2,__p,__e,__l,__s) \
+#define __get_user_x(__r2, __p, __e, __l, __s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \
__asmeq("%3", "r1") \
@@ -134,7 +134,7 @@ extern int __get_user_64t_4(void *);
/* narrowing a double-word get into a single 32bit word register: */
#ifdef __ARMEB__
-#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
+#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
__get_user_x(__r2, __p, __e, __l, 32t_8)
#else
#define __get_user_x_32t __get_user_x
@@ -158,7 +158,7 @@ extern int __get_user_64t_4(void *);
#endif
-#define __get_user_check(x,p) \
+#define __get_user_check(x, p) \
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
@@ -196,10 +196,10 @@ extern int __get_user_64t_4(void *);
__e; \
})
-#define get_user(x,p) \
+#define get_user(x, p) \
({ \
might_fault(); \
- __get_user_check(x,p); \
+ __get_user_check(x, p); \
})
extern int __put_user_1(void *, unsigned int);
@@ -207,7 +207,7 @@ extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
-#define __put_user_x(__r2,__p,__e,__l,__s) \
+#define __put_user_x(__r2, __p, __e, __l, __s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \
__asmeq("%3", "r1") \
@@ -216,7 +216,7 @@ extern int __put_user_8(void *, unsigned long long);
: "0" (__p), "r" (__r2), "r" (__l) \
: "ip", "lr", "cc")
-#define __put_user_check(x,p) \
+#define __put_user_check(x, p) \
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
const typeof(*(p)) __user *__tmp_p = (p); \
@@ -242,10 +242,10 @@ extern int __put_user_8(void *, unsigned long long);
__e; \
})
-#define put_user(x,p) \
+#define put_user(x, p) \
({ \
might_fault(); \
- __put_user_check(x,p); \
+ __put_user_check(x, p); \
})
#else /* CONFIG_MMU */
@@ -255,21 +255,21 @@ extern int __put_user_8(void *, unsigned long long);
*/
#define USER_DS KERNEL_DS
-#define segment_eq(a,b) (1)
-#define __addr_ok(addr) ((void)(addr),1)
-#define __range_ok(addr,size) ((void)(addr),0)
+#define segment_eq(a, b) (1)
+#define __addr_ok(addr) ((void)(addr), 1)
+#define __range_ok(addr, size) ((void)(addr), 0)
#define get_fs() (KERNEL_DS)
static inline void set_fs(mm_segment_t fs)
{
}
-#define get_user(x,p) __get_user(x,p)
-#define put_user(x,p) __put_user(x,p)
+#define get_user(x, p) __get_user(x, p)
+#define put_user(x, p) __put_user(x, p)
#endif /* CONFIG_MMU */
-#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
+#define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
#define user_addr_max() \
(segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
@@ -283,35 +283,35 @@ static inline void set_fs(mm_segment_t fs)
* error occurs, and leave it unchanged on success. Note that these
* versions are void (ie, don't return a value as such).
*/
-#define __get_user(x,ptr) \
+#define __get_user(x, ptr) \
({ \
long __gu_err = 0; \
- __get_user_err((x),(ptr),__gu_err); \
+ __get_user_err((x), (ptr), __gu_err); \
__gu_err; \
})
-#define __get_user_error(x,ptr,err) \
+#define __get_user_error(x, ptr, err) \
({ \
- __get_user_err((x),(ptr),err); \
+ __get_user_err((x), (ptr), err); \
(void) 0; \
})
-#define __get_user_err(x,ptr,err) \
+#define __get_user_err(x, ptr, err) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
might_fault(); \
switch (sizeof(*(ptr))) { \
- case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
- case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
- case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \
+ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
+ case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
+ case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
default: (__gu_val) = __get_user_bad(); \
} \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
-#define __get_user_asm_byte(x,addr,err) \
+#define __get_user_asm_byte(x, addr, err) \
__asm__ __volatile__( \
"1: " TUSER(ldrb) " %1,[%2],#0\n" \
"2:\n" \
@@ -330,7 +330,7 @@ do { \
: "cc")
#ifndef __ARMEB__
-#define __get_user_asm_half(x,__gu_addr,err) \
+#define __get_user_asm_half(x, __gu_addr, err) \
({ \
unsigned long __b1, __b2; \
__get_user_asm_byte(__b1, __gu_addr, err); \
@@ -338,7 +338,7 @@ do { \
(x) = __b1 | (__b2 << 8); \
})
#else
-#define __get_user_asm_half(x,__gu_addr,err) \
+#define __get_user_asm_half(x, __gu_addr, err) \
({ \
unsigned long __b1, __b2; \
__get_user_asm_byte(__b1, __gu_addr, err); \
@@ -347,7 +347,7 @@ do { \
})
#endif
-#define __get_user_asm_word(x,addr,err) \
+#define __get_user_asm_word(x, addr, err) \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1,[%2],#0\n" \
"2:\n" \
@@ -365,35 +365,35 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
-#define __put_user(x,ptr) \
+#define __put_user(x, ptr) \
({ \
long __pu_err = 0; \
- __put_user_err((x),(ptr),__pu_err); \
+ __put_user_err((x), (ptr), __pu_err); \
__pu_err; \
})
-#define __put_user_error(x,ptr,err) \
+#define __put_user_error(x, ptr, err) \
({ \
- __put_user_err((x),(ptr),err); \
+ __put_user_err((x), (ptr), err); \
(void) 0; \
})
-#define __put_user_err(x,ptr,err) \
+#define __put_user_err(x, ptr, err) \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
might_fault(); \
switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
- case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
- case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \
- case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \
+ case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
+ case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
+ case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \
+ case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \
} \
} while (0)
-#define __put_user_asm_byte(x,__pu_addr,err) \
+#define __put_user_asm_byte(x, __pu_addr, err) \
__asm__ __volatile__( \
"1: " TUSER(strb) " %1,[%2],#0\n" \
"2:\n" \
@@ -411,22 +411,22 @@ do { \
: "cc")
#ifndef __ARMEB__
-#define __put_user_asm_half(x,__pu_addr,err) \
+#define __put_user_asm_half(x, __pu_addr, err) \
({ \
- unsigned long __temp = (unsigned long)(x); \
+ unsigned long __temp = (__force unsigned long)(x); \
__put_user_asm_byte(__temp, __pu_addr, err); \
__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
})
#else
-#define __put_user_asm_half(x,__pu_addr,err) \
+#define __put_user_asm_half(x, __pu_addr, err) \
({ \
- unsigned long __temp = (unsigned long)(x); \
+ unsigned long __temp = (__force unsigned long)(x); \
__put_user_asm_byte(__temp >> 8, __pu_addr, err); \
__put_user_asm_byte(__temp, __pu_addr + 1, err); \
})
#endif
-#define __put_user_asm_word(x,__pu_addr,err) \
+#define __put_user_asm_word(x, __pu_addr, err) \
__asm__ __volatile__( \
"1: " TUSER(str) " %1,[%2],#0\n" \
"2:\n" \
@@ -451,7 +451,7 @@ do { \
#define __reg_oper1 "%R2"
#endif
-#define __put_user_asm_dword(x,__pu_addr,err) \
+#define __put_user_asm_dword(x, __pu_addr, err) \
__asm__ __volatile__( \
ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
@@ -480,9 +480,9 @@ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
#else
-#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
-#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
-#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
+#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
+#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
+#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
#endif
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 68c739b3fdf4..2f7e6ff67d51 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -92,7 +92,7 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);