aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/acpi.h4
-rw-r--r--arch/arm64/include/asm/alternative.h4
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h32
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h2
-rw-r--r--arch/arm64/include/asm/assembler.h36
-rw-r--r--arch/arm64/include/asm/atomic.h237
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h28
-rw-r--r--arch/arm64/include/asm/atomic_lse.h38
-rw-r--r--arch/arm64/include/asm/cmpxchg.h60
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h10
-rw-r--r--arch/arm64/include/asm/cputype.h9
-rw-r--r--arch/arm64/include/asm/daifflags.h61
-rw-r--r--arch/arm64/include/asm/device.h3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h9
-rw-r--r--arch/arm64/include/asm/efi.h11
-rw-r--r--arch/arm64/include/asm/fixmap.h6
-rw-r--r--arch/arm64/include/asm/hardirq.h31
-rw-r--r--arch/arm64/include/asm/hugetlb.h5
-rw-r--r--arch/arm64/include/asm/io.h1
-rw-r--r--arch/arm64/include/asm/irqflags.h100
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h12
-rw-r--r--arch/arm64/include/asm/kvm_host.h75
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h13
-rw-r--r--arch/arm64/include/asm/kvm_ras.h25
-rw-r--r--arch/arm64/include/asm/memblock.h21
-rw-r--r--arch/arm64/include/asm/memory.h20
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/neon-intrinsics.h4
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/processor.h3
-rw-r--r--arch/arm64/include/asm/ptdump.h9
-rw-r--r--arch/arm64/include/asm/ptrace.h26
-rw-r--r--arch/arm64/include/asm/sync_bitops.h16
-rw-r--r--arch/arm64/include/asm/sysreg.h7
-rw-r--r--arch/arm64/include/asm/system_misc.h2
-rw-r--r--arch/arm64/include/asm/thread_info.h1
-rw-r--r--arch/arm64/include/asm/uaccess.h37
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h99
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h76
42 files changed, 784 insertions, 359 deletions
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 2def77ec14be..7628efbe6c12 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -18,6 +18,7 @@
#include <asm/cputype.h>
#include <asm/io.h>
+#include <asm/ptrace.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
@@ -110,9 +111,10 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
-
+int apei_claim_sea(struct pt_regs *regs);
#else
static inline void acpi_init_cpus(void) { }
+static inline int apei_claim_sea(struct pt_regs *regs) { return -ENOENT; }
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 4b650ec1d7dd..b9f8d787eea9 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -14,8 +14,6 @@
#include <linux/stddef.h>
#include <linux/stringify.h>
-extern int alternatives_applied;
-
struct alt_instr {
s32 orig_offset; /* offset to original instruction */
s32 alt_offset; /* offset to replacement instruction */
@@ -27,7 +25,9 @@ struct alt_instr {
typedef void (*alternative_cb_t)(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
+void __init apply_boot_alternatives(void);
void __init apply_alternatives_all(void);
+bool alternative_is_applied(u16 cpufeature);
#ifdef CONFIG_MODULES
void apply_alternatives_module(void *start, size_t length);
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index e278f94df0c9..14b41ddc68ba 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -22,6 +22,7 @@
#ifndef __ASSEMBLY__
+#include <linux/irqchip/arm-gic-common.h>
#include <linux/stringify.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
@@ -114,6 +115,21 @@ static inline void gic_write_bpr1(u32 val)
write_sysreg_s(val, SYS_ICC_BPR1_EL1);
}
+static inline u32 gic_read_pmr(void)
+{
+ return read_sysreg_s(SYS_ICC_PMR_EL1);
+}
+
+static inline void gic_write_pmr(u32 val)
+{
+ write_sysreg_s(val, SYS_ICC_PMR_EL1);
+}
+
+static inline u32 gic_read_rpr(void)
+{
+ return read_sysreg_s(SYS_ICC_RPR_EL1);
+}
+
#define gic_read_typer(c) readq_relaxed(c)
#define gic_write_irouter(v, c) writeq_relaxed(v, c)
#define gic_read_lpir(c) readq_relaxed(c)
@@ -140,5 +156,21 @@ static inline void gic_write_bpr1(u32 val)
#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c)
#define gits_read_vpendbaser(c) readq_relaxed(c)
+static inline bool gic_prio_masking_enabled(void)
+{
+ return system_uses_irq_prio_masking();
+}
+
+static inline void gic_pmr_mask_irqs(void)
+{
+ BUILD_BUG_ON(GICD_INT_DEF_PRI <= GIC_PRIO_IRQOFF);
+ gic_write_pmr(GIC_PRIO_IRQOFF);
+}
+
+static inline void gic_arch_enable_irqs(void)
+{
+ asm volatile ("msr daifclr, #2" : : : "memory");
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 4128bec033f6..f74909ba29bd 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -24,7 +24,7 @@
.endm
.macro __uaccess_ttbr0_enable, tmp1, tmp2
- get_thread_info \tmp1
+ get_current_task \tmp1
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
mrs \tmp2, ttbr1_el1
extr \tmp2, \tmp2, \tmp1, #48
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 4feb6119c3c9..c5308d01e228 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -27,6 +27,7 @@
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
+#include <asm/cputype.h>
#include <asm/debug-monitors.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
@@ -62,16 +63,8 @@
.endm
/*
- * Enable and disable interrupts.
+ * Save/restore interrupts.
*/
- .macro disable_irq
- msr daifset, #2
- .endm
-
- .macro enable_irq
- msr daifclr, #2
- .endm
-
.macro save_and_disable_irq, flags
mrs \flags, daif
msr daifset, #2
@@ -536,9 +529,9 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm
/*
- * Return the current thread_info.
+ * Return the current task_struct.
*/
- .macro get_thread_info, rd
+ .macro get_current_task, rd
mrs \rd, sp_el0
.endm
@@ -604,6 +597,25 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
#endif
.endm
+/*
+ * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
+ */
+ .macro tcr_clear_errata_bits, tcr, tmp1, tmp2
+#ifdef CONFIG_FUJITSU_ERRATUM_010001
+ mrs \tmp1, midr_el1
+
+ mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
+ and \tmp1, \tmp1, \tmp2
+ mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001
+ cmp \tmp1, \tmp2
+ b.ne 10f
+
+ mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
+ bic \tcr, \tcr, \tmp2
+10:
+#endif /* CONFIG_FUJITSU_ERRATUM_010001 */
+ .endm
+
/**
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
@@ -721,7 +733,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.macro if_will_cond_yield_neon
#ifdef CONFIG_PREEMPT
- get_thread_info x0
+ get_current_task x0
ldr x0, [x0, #TSK_TI_PREEMPT]
sub x0, x0, #PREEMPT_DISABLE_OFFSET
cbz x0, .Lyield_\@
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 9bca54dda75c..1f4e9ee641c9 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -42,124 +42,131 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_add_return_acquire atomic_add_return_acquire
-#define atomic_add_return_release atomic_add_return_release
-#define atomic_add_return atomic_add_return
-
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#define atomic_sub_return_release atomic_sub_return_release
-#define atomic_sub_return atomic_sub_return
-
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#define atomic_fetch_add_release atomic_fetch_add_release
-#define atomic_fetch_add atomic_fetch_add
-
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#define atomic_fetch_sub atomic_fetch_sub
-
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#define atomic_fetch_and_release atomic_fetch_and_release
-#define atomic_fetch_and atomic_fetch_and
-
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#define atomic_fetch_andnot atomic_fetch_andnot
-
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#define atomic_fetch_or_release atomic_fetch_or_release
-#define atomic_fetch_or atomic_fetch_or
-
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#define atomic_fetch_xor atomic_fetch_xor
-
-#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
-#define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new))
-#define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new))
-#define atomic_xchg(v, new) xchg(&((v)->counter), (new))
-
-#define atomic_cmpxchg_relaxed(v, old, new) \
- cmpxchg_relaxed(&((v)->counter), (old), (new))
-#define atomic_cmpxchg_acquire(v, old, new) \
- cmpxchg_acquire(&((v)->counter), (old), (new))
-#define atomic_cmpxchg_release(v, old, new) \
- cmpxchg_release(&((v)->counter), (old), (new))
-#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
-
-#define atomic_andnot atomic_andnot
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
+#define arch_atomic_add_return_release arch_atomic_add_return_release
+#define arch_atomic_add_return arch_atomic_add_return
+
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
+#define arch_atomic_sub_return_release arch_atomic_sub_return_release
+#define arch_atomic_sub_return arch_atomic_sub_return
+
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+#define arch_atomic_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic_xchg_acquire(v, new) \
+ arch_xchg_acquire(&((v)->counter), (new))
+#define arch_atomic_xchg_release(v, new) \
+ arch_xchg_release(&((v)->counter), (new))
+#define arch_atomic_xchg(v, new) \
+ arch_xchg(&((v)->counter), (new))
+
+#define arch_atomic_cmpxchg_relaxed(v, old, new) \
+ arch_cmpxchg_relaxed(&((v)->counter), (old), (new))
+#define arch_atomic_cmpxchg_acquire(v, old, new) \
+ arch_cmpxchg_acquire(&((v)->counter), (old), (new))
+#define arch_atomic_cmpxchg_release(v, old, new) \
+ arch_cmpxchg_release(&((v)->counter), (old), (new))
+#define arch_atomic_cmpxchg(v, old, new) \
+ arch_cmpxchg(&((v)->counter), (old), (new))
+
+#define arch_atomic_andnot arch_atomic_andnot
/*
- * 64-bit atomic operations.
+ * 64-bit arch_atomic operations.
*/
-#define ATOMIC64_INIT ATOMIC_INIT
-#define atomic64_read atomic_read
-#define atomic64_set atomic_set
-
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#define atomic64_add_return_release atomic64_add_return_release
-#define atomic64_add_return atomic64_add_return
-
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#define atomic64_sub_return_release atomic64_sub_return_release
-#define atomic64_sub_return atomic64_sub_return
-
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#define atomic64_fetch_add atomic64_fetch_add
-
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#define atomic64_fetch_sub atomic64_fetch_sub
-
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#define atomic64_fetch_and atomic64_fetch_and
-
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#define atomic64_fetch_or atomic64_fetch_or
-
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#define atomic64_fetch_xor atomic64_fetch_xor
-
-#define atomic64_xchg_relaxed atomic_xchg_relaxed
-#define atomic64_xchg_acquire atomic_xchg_acquire
-#define atomic64_xchg_release atomic_xchg_release
-#define atomic64_xchg atomic_xchg
-
-#define atomic64_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#define atomic64_cmpxchg_acquire atomic_cmpxchg_acquire
-#define atomic64_cmpxchg_release atomic_cmpxchg_release
-#define atomic64_cmpxchg atomic_cmpxchg
-
-#define atomic64_andnot atomic64_andnot
-
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define ATOMIC64_INIT ATOMIC_INIT
+#define arch_atomic64_read arch_atomic_read
+#define arch_atomic64_set arch_atomic_set
+
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
+#define arch_atomic64_add_return_release arch_atomic64_add_return_release
+#define arch_atomic64_add_return arch_atomic64_add_return
+
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+
+#define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed
+#define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire
+#define arch_atomic64_xchg_release arch_atomic_xchg_release
+#define arch_atomic64_xchg arch_atomic_xchg
+
+#define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release
+#define arch_atomic64_cmpxchg arch_atomic_cmpxchg
+
+#define arch_atomic64_andnot arch_atomic64_andnot
+
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+
+#include <asm-generic/atomic-instrumented.h>
#endif
#endif
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index af7b99005453..e321293e0c89 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -39,7 +39,7 @@
#define ATOMIC_OP(op, asm_op) \
__LL_SC_INLINE void \
-__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
+__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int result; \
@@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
-__LL_SC_EXPORT(atomic_##op);
+__LL_SC_EXPORT(arch_atomic_##op);
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \
-__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
+__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int result; \
@@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
\
return result; \
} \
-__LL_SC_EXPORT(atomic_##op##_return##name);
+__LL_SC_EXPORT(arch_atomic_##op##_return##name);
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \
-__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
+__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
{ \
unsigned long tmp; \
int val, result; \
@@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
\
return result; \
} \
-__LL_SC_EXPORT(atomic_fetch_##op##name);
+__LL_SC_EXPORT(arch_atomic_fetch_##op##name);
#define ATOMIC_OPS(...) \
ATOMIC_OP(__VA_ARGS__) \
@@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor)
#define ATOMIC64_OP(op, asm_op) \
__LL_SC_INLINE void \
-__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
+__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \
{ \
long result; \
unsigned long tmp; \
@@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
-__LL_SC_EXPORT(atomic64_##op);
+__LL_SC_EXPORT(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \
-__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
+__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
{ \
long result; \
unsigned long tmp; \
@@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
\
return result; \
} \
-__LL_SC_EXPORT(atomic64_##op##_return##name);
+__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \
-__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
+__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \
{ \
long result, val; \
unsigned long tmp; \
@@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
\
return result; \
} \
-__LL_SC_EXPORT(atomic64_fetch_##op##name);
+__LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
#define ATOMIC64_OPS(...) \
ATOMIC64_OP(__VA_ARGS__) \
@@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor)
#undef ATOMIC64_OP
__LL_SC_INLINE long
-__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
+__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
{
long result;
unsigned long tmp;
@@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
return result;
}
-__LL_SC_EXPORT(atomic64_dec_if_positive);
+__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
__LL_SC_INLINE u##sz \
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index a424355240c5..9256a3921e4b 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -25,9 +25,9 @@
#error "please don't include this file directly"
#endif
-#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
+#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op)
#define ATOMIC_OP(op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
-static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd)
#undef ATOMIC_FETCH_OPS
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
-static inline int atomic_add_return##name(int i, atomic_t *v) \
+static inline int arch_atomic_add_return##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC_OP_ADD_RETURN
-static inline void atomic_and(int i, atomic_t *v)
+static inline void arch_atomic_and(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
@@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v)
}
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
-static inline int atomic_fetch_and##name(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
#undef ATOMIC_FETCH_OP_AND
-static inline void atomic_sub(int i, atomic_t *v)
+static inline void arch_atomic_sub(int i, atomic_t *v)
{
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
@@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v)
}
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
-static inline int atomic_sub_return##name(int i, atomic_t *v) \
+static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC_OP_SUB_RETURN
#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
-static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
@@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC_FETCH_OP_SUB
#undef __LL_SC_ATOMIC
-#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
+#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op)
#define ATOMIC64_OP(op, asm_op) \
-static inline void atomic64_##op(long i, atomic64_t *v) \
+static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd)
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
-static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
+static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OPS
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
-static inline long atomic64_add_return##name(long i, atomic64_t *v) \
+static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC64_OP_ADD_RETURN
-static inline void atomic64_and(long i, atomic64_t *v)
+static inline void arch_atomic64_and(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
@@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
}
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
-static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
+static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
#undef ATOMIC64_FETCH_OP_AND
-static inline void atomic64_sub(long i, atomic64_t *v)
+static inline void arch_atomic64_sub(long i, atomic64_t *v)
{
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
@@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
}
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
-static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
+static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC64_OP_SUB_RETURN
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
-static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
+static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v) \
{ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC64_FETCH_OP_SUB
-static inline long atomic64_dec_if_positive(atomic64_t *v)
+static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
register long x0 asm ("x0") = (long)v;
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 3f9376f1c409..e6ea0f42e097 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -110,10 +110,10 @@ __XCHG_GEN(_mb)
})
/* xchg */
-#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
-#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
-#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
-#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
+#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
+#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
+#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
+#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
#define __CMPXCHG_GEN(sfx) \
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
@@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb)
})
/* cmpxchg */
-#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
-#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
-#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
-#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
-#define cmpxchg_local cmpxchg_relaxed
+#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
+#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
+#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
+#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
+#define arch_cmpxchg_local arch_cmpxchg_relaxed
/* cmpxchg64 */
-#define cmpxchg64_relaxed cmpxchg_relaxed
-#define cmpxchg64_acquire cmpxchg_acquire
-#define cmpxchg64_release cmpxchg_release
-#define cmpxchg64 cmpxchg
-#define cmpxchg64_local cmpxchg_local
+#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
+#define arch_cmpxchg64_acquire arch_cmpxchg_acquire
+#define arch_cmpxchg64_release arch_cmpxchg_release
+#define arch_cmpxchg64 arch_cmpxchg
+#define arch_cmpxchg64_local arch_cmpxchg_local
/* cmpxchg_double */
#define system_has_cmpxchg_double() 1
@@ -177,24 +177,24 @@ __CMPXCHG_GEN(_mb)
VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
})
-#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
-({\
- int __ret;\
- __cmpxchg_double_check(ptr1, ptr2); \
- __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
- (unsigned long)(n1), (unsigned long)(n2), \
- ptr1); \
- __ret; \
+#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
+({ \
+ int __ret; \
+ __cmpxchg_double_check(ptr1, ptr2); \
+ __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
+ (unsigned long)(n1), (unsigned long)(n2), \
+ ptr1); \
+ __ret; \
})
-#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
-({\
- int __ret;\
- __cmpxchg_double_check(ptr1, ptr2); \
- __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
- (unsigned long)(n1), (unsigned long)(n2), \
- ptr1); \
- __ret; \
+#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
+({ \
+ int __ret; \
+ __cmpxchg_double_check(ptr1, ptr2); \
+ __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
+ (unsigned long)(n1), (unsigned long)(n2), \
+ ptr1); \
+ __ret; \
})
#define __CMPWAIT_CASE(w, sfx, sz) \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 82e9099834ae..f6a76e43f39e 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -60,7 +60,8 @@
#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
#define ARM64_HAS_GENERIC_AUTH_ARCH 40
#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
+#define ARM64_HAS_IRQ_PRIO_MASKING 42
-#define ARM64_NCAPS 42
+#define ARM64_NCAPS 43
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index dfcfba725d72..e505e1fbd2b9 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -391,6 +391,10 @@ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
+/* ARM64 CAPS + alternative_cb */
+#define ARM64_NPATCHABLE (ARM64_NCAPS + 1)
+extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
+
#define for_each_available_cap(cap) \
for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
@@ -612,6 +616,12 @@ static inline bool system_supports_generic_auth(void)
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
}
+static inline bool system_uses_irq_prio_masking(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
+ cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
+}
+
#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 951ed1a4e5c9..2afb1338b48a 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -76,6 +76,7 @@
#define ARM_CPU_IMP_BRCM 0x42
#define ARM_CPU_IMP_QCOM 0x51
#define ARM_CPU_IMP_NVIDIA 0x4E
+#define ARM_CPU_IMP_FUJITSU 0x46
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -104,6 +105,8 @@
#define NVIDIA_CPU_PART_DENVER 0x003
#define NVIDIA_CPU_PART_CARMEL 0x004
+#define FUJITSU_CPU_PART_A64FX 0x001
+
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -122,6 +125,12 @@
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
+#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
+
+/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
+#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_VARIANT(1))
+#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 8d91f2233135..db452aa9e651 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -18,8 +18,11 @@
#include <linux/irqflags.h>
+#include <asm/cpufeature.h>
+
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
+#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
/* mask/save/unmask/restore all exceptions, including interrupts. */
static inline void local_daif_mask(void)
@@ -36,31 +39,61 @@ static inline unsigned long local_daif_save(void)
{
unsigned long flags;
- flags = arch_local_save_flags();
+ flags = read_sysreg(daif);
+
+ if (system_uses_irq_prio_masking()) {
+ /* If IRQs are masked with PMR, reflect it in the flags */
+ if (read_sysreg_s(SYS_ICC_PMR_EL1) <= GIC_PRIO_IRQOFF)
+ flags |= PSR_I_BIT;
+ }
local_daif_mask();
return flags;
}
-static inline void local_daif_unmask(void)
-{
- trace_hardirqs_on();
- asm volatile(
- "msr daifclr, #0xf // local_daif_unmask"
- :
- :
- : "memory");
-}
-
static inline void local_daif_restore(unsigned long flags)
{
- if (!arch_irqs_disabled_flags(flags))
+ bool irq_disabled = flags & PSR_I_BIT;
+
+ if (!irq_disabled) {
trace_hardirqs_on();
- arch_local_irq_restore(flags);
+ if (system_uses_irq_prio_masking())
+ arch_local_irq_enable();
+ } else if (!(flags & PSR_A_BIT)) {
+ /*
+ * If interrupts are disabled but we can take
+ * asynchronous errors, we can take NMIs
+ */
+ if (system_uses_irq_prio_masking()) {
+ flags &= ~PSR_I_BIT;
+ /*
+ * There has been concern that the write to daif
+ * might be reordered before this write to PMR.
+ * From the ARM ARM DDI 0487D.a, section D1.7.1
+ * "Accessing PSTATE fields":
+ * Writes to the PSTATE fields have side-effects on
+ * various aspects of the PE operation. All of these
+ * side-effects are guaranteed:
+ * - Not to be visible to earlier instructions in
+ * the execution stream.
+ * - To be visible to later instructions in the
+ * execution stream
+ *
+ * Also, writes to PMR are self-synchronizing, so no
+ * interrupts with a lower priority than PMR is signaled
+ * to the PE after the write.
+ *
+ * So we don't need additional synchronization here.
+ */
+ arch_local_irq_disable();
+ }
+ }
+
+ write_sysreg(flags, daif);
- if (arch_irqs_disabled_flags(flags))
+ if (irq_disabled)
trace_hardirqs_off();
}
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 3dd3d664c5c5..4658c937e173 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -20,9 +20,6 @@ struct dev_archdata {
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
-#ifdef CONFIG_XEN
- const struct dma_map_ops *dev_dma_ops;
-#endif
};
struct pdev_archdata {
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 95dbf3ef735a..de98191e4c7d 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -29,15 +29,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return NULL;
}
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
-#define arch_setup_dma_ops arch_setup_dma_ops
-
-#ifdef CONFIG_IOMMU_DMA
-void arch_teardown_dma_ops(struct device *dev);
-#define arch_teardown_dma_ops arch_teardown_dma_ops
-#endif
-
/*
* Do not use this function in a driver, it is only provided for
* arch/arm/mm/xen.c, which is used by arm64 as well.
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 7ed320895d1f..c9e9a6978e73 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -44,6 +44,17 @@ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+/*
+ * Even when Linux uses IRQ priorities for IRQ disabling, EFI does not.
+ * And EFI shouldn't really play around with priority masking as it is not aware
+ * which priorities the OS has assigned to its interrupts.
+ */
+#define arch_efi_save_flags(state_flags) \
+ ((void)((state_flags) = read_sysreg(daif)))
+
+#define arch_efi_restore_flags(state_flags) write_sysreg(state_flags, daif)
+
+
/* arch specific definitions used by the stub code */
/*
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index ec1e6d6fa14c..f987b8a8f325 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -55,7 +55,11 @@ enum fixed_addresses {
#ifdef CONFIG_ACPI_APEI_GHES
/* Used for GHES mapping from assorted contexts */
FIX_APEI_GHES_IRQ,
- FIX_APEI_GHES_NMI,
+ FIX_APEI_GHES_SEA,
+#ifdef CONFIG_ARM_SDE_INTERFACE
+ FIX_APEI_GHES_SDEI_NORMAL,
+ FIX_APEI_GHES_SDEI_CRITICAL,
+#endif
#endif /* CONFIG_ACPI_APEI_GHES */
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 1473fc2f7ab7..89691c86640a 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -17,8 +17,12 @@
#define __ASM_HARDIRQ_H
#include <linux/cache.h>
+#include <linux/percpu.h>
#include <linux/threads.h>
+#include <asm/barrier.h>
#include <asm/irq.h>
+#include <asm/kvm_arm.h>
+#include <asm/sysreg.h>
#define NR_IPI 7
@@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+struct nmi_ctx {
+ u64 hcr;
+};
+
+DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
+
+#define arch_nmi_enter() \
+ do { \
+ if (is_kernel_in_hyp_mode()) { \
+ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
+ nmi_ctx->hcr = read_sysreg(hcr_el2); \
+ if (!(nmi_ctx->hcr & HCR_TGE)) { \
+ write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
+ isb(); \
+ } \
+ } \
+ } while (0)
+
+#define arch_nmi_exit() \
+ do { \
+ if (is_kernel_in_hyp_mode()) { \
+ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
+ if (!(nmi_ctx->hcr & HCR_TGE)) \
+ write_sysreg(nmi_ctx->hcr, hcr_el2); \
+ } \
+ } while (0)
+
static inline void ack_bad_irq(unsigned int irq)
{
extern unsigned long irq_err_count;
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index fb6609875455..c6a07a3b433e 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -20,6 +20,11 @@
#include <asm/page.h>
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
+extern bool arch_hugetlb_migration_supported(struct hstate *h);
+#endif
+
#define __HAVE_ARCH_HUGE_PTEP_GET
static inline pte_t huge_ptep_get(pte_t *ptep)
{
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index ee723835c1f4..8bb7210ac286 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -121,6 +121,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
: "memory"); \
})
+#define __io_par(v) __iormb(v)
#define __iowmb() wmb()
#define mmiowb() do { } while (0)
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 24692edf1a69..43d8366c1e87 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -18,7 +18,9 @@
#ifdef __KERNEL__
+#include <asm/alternative.h>
#include <asm/ptrace.h>
+#include <asm/sysreg.h>
/*
* Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
@@ -36,33 +38,27 @@
/*
* CPU interrupt mask handling.
*/
-static inline unsigned long arch_local_irq_save(void)
-{
- unsigned long flags;
- asm volatile(
- "mrs %0, daif // arch_local_irq_save\n"
- "msr daifset, #2"
- : "=r" (flags)
- :
- : "memory");
- return flags;
-}
-
static inline void arch_local_irq_enable(void)
{
- asm volatile(
- "msr daifclr, #2 // arch_local_irq_enable"
- :
+ asm volatile(ALTERNATIVE(
+ "msr daifclr, #2 // arch_local_irq_enable\n"
+ "nop",
+ "msr_s " __stringify(SYS_ICC_PMR_EL1) ",%0\n"
+ "dsb sy",
+ ARM64_HAS_IRQ_PRIO_MASKING)
:
+ : "r" ((unsigned long) GIC_PRIO_IRQON)
: "memory");
}
static inline void arch_local_irq_disable(void)
{
- asm volatile(
- "msr daifset, #2 // arch_local_irq_disable"
- :
+ asm volatile(ALTERNATIVE(
+ "msr daifset, #2 // arch_local_irq_disable",
+ "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0",
+ ARM64_HAS_IRQ_PRIO_MASKING)
:
+ : "r" ((unsigned long) GIC_PRIO_IRQOFF)
: "memory");
}
@@ -71,12 +67,44 @@ static inline void arch_local_irq_disable(void)
*/
static inline unsigned long arch_local_save_flags(void)
{
+ unsigned long daif_bits;
unsigned long flags;
- asm volatile(
- "mrs %0, daif // arch_local_save_flags"
- : "=r" (flags)
- :
+
+ daif_bits = read_sysreg(daif);
+
+ /*
+ * The asm is logically equivalent to:
+ *
+ * if (system_uses_irq_prio_masking())
+ * flags = (daif_bits & PSR_I_BIT) ?
+ * GIC_PRIO_IRQOFF :
+ * read_sysreg_s(SYS_ICC_PMR_EL1);
+ * else
+ * flags = daif_bits;
+ */
+ asm volatile(ALTERNATIVE(
+ "mov %0, %1\n"
+ "nop\n"
+ "nop",
+ "mrs_s %0, " __stringify(SYS_ICC_PMR_EL1) "\n"
+ "ands %1, %1, " __stringify(PSR_I_BIT) "\n"
+ "csel %0, %0, %2, eq",
+ ARM64_HAS_IRQ_PRIO_MASKING)
+ : "=&r" (flags), "+r" (daif_bits)
+ : "r" ((unsigned long) GIC_PRIO_IRQOFF)
: "memory");
+
+ return flags;
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = arch_local_save_flags();
+
+ arch_local_irq_disable();
+
return flags;
}
@@ -85,16 +113,32 @@ static inline unsigned long arch_local_save_flags(void)
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
- asm volatile(
- "msr daif, %0 // arch_local_irq_restore"
- :
- : "r" (flags)
- : "memory");
+ asm volatile(ALTERNATIVE(
+ "msr daif, %0\n"
+ "nop",
+ "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0\n"
+ "dsb sy",
+ ARM64_HAS_IRQ_PRIO_MASKING)
+ : "+r" (flags)
+ :
+ : "memory");
}
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
- return flags & PSR_I_BIT;
+ int res;
+
+ asm volatile(ALTERNATIVE(
+ "and %w0, %w1, #" __stringify(PSR_I_BIT) "\n"
+ "nop",
+ "cmp %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
+ "cset %w0, ls",
+ ARM64_HAS_IRQ_PRIO_MASKING)
+ : "=&r" (res)
+ : "r" ((int) flags)
+ : "memory");
+
+ return res;
}
#endif
#endif
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 506386a3edde..d3842791e1c4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -77,6 +77,10 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
*/
if (!vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID3;
+
+ if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
+ vcpu_el1_is_32bit(vcpu))
+ vcpu->arch.hcr_el2 |= HCR_TID2;
}
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
@@ -331,6 +335,14 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
return ESR_ELx_SYS64_ISS_RT(esr);
}
+static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+{
+ if (kvm_vcpu_trap_is_iabt(vcpu))
+ return false;
+
+ return kvm_vcpu_dabt_iswrite(vcpu);
+}
+
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7732d0ba4e60..a01fe087e022 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -24,12 +24,14 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
+#include <asm/arch_gicv3.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
+#include <asm/smp_plat.h>
#include <asm/thread_info.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -48,6 +50,7 @@
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
@@ -56,16 +59,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
-struct kvm_arch {
+struct kvm_vmid {
/* The VMID generation used for the virt. memory system */
u64 vmid_gen;
u32 vmid;
+};
+
+struct kvm_arch {
+ struct kvm_vmid vmid;
/* stage2 entry level table */
pgd_t *pgd;
+ phys_addr_t pgd_phys;
- /* VTTBR value associated with above pgd and vmid */
- u64 vttbr;
/* VTCR_EL2 value for this VM */
u64 vtcr;
@@ -208,6 +214,13 @@ struct kvm_cpu_context {
typedef struct kvm_cpu_context kvm_cpu_context_t;
+struct vcpu_reset_state {
+ unsigned long pc;
+ unsigned long r0;
+ bool be;
+ bool reset;
+};
+
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
@@ -297,6 +310,9 @@ struct kvm_vcpu_arch {
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2;
+ /* Additional reset state */
+ struct vcpu_reset_state reset_state;
+
/* True when deferrable sysregs are loaded on the physical CPU,
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
bool sysregs_loaded_on_cpu;
@@ -370,7 +386,36 @@ void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
u64 __kvm_call_hyp(void *hypfn, ...);
-#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
+
+/*
+ * The couple of isb() below are there to guarantee the same behaviour
+ * on VHE as on !VHE, where the eret to EL1 acts as a context
+ * synchronization event.
+ */
+#define kvm_call_hyp(f, ...) \
+ do { \
+ if (has_vhe()) { \
+ f(__VA_ARGS__); \
+ isb(); \
+ } else { \
+ __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+ } \
+ } while(0)
+
+#define kvm_call_hyp_ret(f, ...) \
+ ({ \
+ typeof(f(__VA_ARGS__)) ret; \
+ \
+ if (has_vhe()) { \
+ ret = f(__VA_ARGS__); \
+ isb(); \
+ } else { \
+ ret = __kvm_call_hyp(kvm_ksym_ref(f), \
+ ##__VA_ARGS__); \
+ } \
+ \
+ ret; \
+ })
void force_vm_exit(const cpumask_t *mask);
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
@@ -389,6 +434,13 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+ int cpu)
+{
+ /* The host's MPIDR is immutable, so let's set it up at boot time */
+ cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
+}
+
void __kvm_enable_ssbs(void);
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
@@ -474,10 +526,25 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
static inline void kvm_arm_vhe_guest_enter(void)
{
local_daif_mask();
+
+ /*
+ * Having IRQs masked via PMR when entering the guest means the GIC
+ * will not signal the CPU of interrupts of lower priority, and the
+ * only way to get out will be via guest exceptions.
+ * Naturally, we want to avoid this.
+ */
+ if (system_uses_irq_prio_masking()) {
+ gic_write_pmr(GIC_PRIO_IRQON);
+ dsb(sy);
+ }
}
static inline void kvm_arm_vhe_guest_exit(void)
{
+ /*
+ * local_daif_restore() takes care to properly restore PSTATE.DAIF
+ * and the GIC PMR if the host is using IRQ priorities.
+ */
local_daif_restore(DAIF_PROCCTX_NOIRQ);
/*
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index a80a7ef57325..4da765f2cca5 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -21,6 +21,7 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/alternative.h>
+#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace
@@ -163,7 +164,7 @@ void __noreturn __hyp_do_panic(unsigned long, ...);
static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
{
write_sysreg(kvm->arch.vtcr, vtcr_el2);
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
/*
* ARM erratum 1165522 requires the actual execution of the above
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 8af4b1befa42..b0742a16c6c9 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -138,7 +138,8 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
})
/*
- * We currently only support a 40bit IPA.
+ * We currently support using a VM-specified IPA size. For backward
+ * compatibility, the default IPA size is fixed to 40bits.
*/
#define KVM_PHYS_SHIFT (40)
@@ -591,9 +592,15 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
}
-static inline bool kvm_cpu_has_cnp(void)
+static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
{
- return system_supports_cnp();
+ struct kvm_vmid *vmid = &kvm->arch.vmid;
+ u64 vmid_field, baddr;
+ u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
+
+ baddr = kvm->arch.pgd_phys;
+ vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
+ return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/kvm_ras.h b/arch/arm64/include/asm/kvm_ras.h
new file mode 100644
index 000000000000..8ac6ee77437c
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_ras.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - Arm Ltd */
+
+#ifndef __ARM64_KVM_RAS_H__
+#define __ARM64_KVM_RAS_H__
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+#include <asm/acpi.h>
+
+/*
+ * Was this synchronous external abort a RAS notification?
+ * Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
+ */
+static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr)
+{
+ /* apei_claim_sea(NULL) expects to mask interrupts itself */
+ lockdep_assert_irqs_enabled();
+
+ return apei_claim_sea(NULL);
+}
+
+#endif /* __ARM64_KVM_RAS_H__ */
diff --git a/arch/arm64/include/asm/memblock.h b/arch/arm64/include/asm/memblock.h
deleted file mode 100644
index 6afeed2467f1..000000000000
--- a/arch/arm64/include/asm/memblock.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_MEMBLOCK_H
-#define __ASM_MEMBLOCK_H
-
-extern void arm64_memblock_init(void);
-
-#endif
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index e1ec947e7c0c..290195168bb3 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -80,11 +80,7 @@
*/
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
-#ifdef CONFIG_KASAN_EXTRA
-#define KASAN_THREAD_SHIFT 2
-#else
#define KASAN_THREAD_SHIFT 1
-#endif /* CONFIG_KASAN_EXTRA */
#else
#define KASAN_SHADOW_SIZE (0)
#define KASAN_THREAD_SHIFT 0
@@ -316,8 +312,9 @@ static inline void *phys_to_virt(phys_addr_t x)
#define page_to_virt(page) ({ \
unsigned long __addr = \
((__page_to_voff(page)) | PAGE_OFFSET); \
- __addr = __tag_set(__addr, page_kasan_tag(page)); \
- ((void *)__addr); \
+ unsigned long __addr_tag = \
+ __tag_set(__addr, page_kasan_tag(page)); \
+ ((void *)__addr_tag); \
})
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
@@ -332,6 +329,17 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_addr_valid(kaddr) \
(_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
+/*
+ * Given that the GIC architecture permits ITS implementations that can only be
+ * configured with a LPI table address once, GICv3 systems with many CPUs may
+ * end up reserving a lot of different regions after a kexec for their LPI
+ * tables (one per CPU), as we are forced to reuse the same memory after kexec
+ * (and thus reserve it persistently with EFI beforehand)
+ */
+#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
+# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
+#endif
+
#include <asm-generic/memory_model.h>
#endif
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 3e8063f4f9d3..67ef25d037ea 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -129,6 +129,7 @@ static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
static inline void arm64_apply_bp_hardening(void) { }
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+extern void arm64_memblock_init(void);
extern void paging_init(void);
extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h
index 2ba6c6b9541f..71abfc7612b2 100644
--- a/arch/arm64/include/asm/neon-intrinsics.h
+++ b/arch/arm64/include/asm/neon-intrinsics.h
@@ -36,4 +36,8 @@
#include <arm_neon.h>
#endif
+#ifdef CONFIG_CC_IS_CLANG
+#pragma clang diagnostic ignored "-Wincompatible-pointer-types"
+#endif
+
#endif /* __ASM_NEON_INTRINSICS_H */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index e9b0a7d75184..a69259cc1f16 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -302,6 +302,7 @@
#define TCR_TBI1 (UL(1) << 38)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
+#define TCR_NFD0 (UL(1) << 53)
#define TCR_NFD1 (UL(1) << 54)
/*
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index f1a7ab18faf3..5d9ce62bdebd 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -191,6 +191,9 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
memset(regs, 0, sizeof(*regs));
forget_syscall(regs);
regs->pc = pc;
+
+ if (system_uses_irq_prio_masking())
+ regs->pmr_save = GIC_PRIO_IRQON;
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 6afd8476c60c..9e948a93d26c 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -34,13 +34,10 @@ struct ptdump_info {
void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
-int ptdump_debugfs_register(struct ptdump_info *info, const char *name);
+void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
-static inline int ptdump_debugfs_register(struct ptdump_info *info,
- const char *name)
-{
- return 0;
-}
+static inline void ptdump_debugfs_register(struct ptdump_info *info,
+ const char *name) { }
#endif
void ptdump_check_wx(void);
#endif /* CONFIG_ARM64_PTDUMP_CORE */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index fce22c4b2f73..ec60174c8c18 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -19,12 +19,26 @@
#ifndef __ASM_PTRACE_H
#define __ASM_PTRACE_H
+#include <asm/cpufeature.h>
+
#include <uapi/asm/ptrace.h>
/* Current Exception Level values, as contained in CurrentEL */
#define CurrentEL_EL1 (1 << 2)
#define CurrentEL_EL2 (2 << 2)
+/*
+ * PMR values used to mask/unmask interrupts.
+ *
+ * GIC priority masking works as follows: if an IRQ's priority is a higher value
+ * than the value held in PMR, that IRQ is masked. Lowering the value of PMR
+ * means masking more IRQs (or at least that the same IRQs remain masked).
+ *
+ * To mask interrupts, we clear the most significant bit of PMR.
+ */
+#define GIC_PRIO_IRQON 0xf0
+#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
+
/* Additional SPSR bits not exposed in the UABI */
#define PSR_IL_BIT (1 << 20)
@@ -167,7 +181,8 @@ struct pt_regs {
#endif
u64 orig_addr_limit;
- u64 unused; // maintain 16 byte alignment
+ /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
+ u64 pmr_save;
u64 stackframe[2];
};
@@ -202,8 +217,13 @@ static inline void forget_syscall(struct pt_regs *regs)
#define processor_mode(regs) \
((regs)->pstate & PSR_MODE_MASK)
-#define interrupts_enabled(regs) \
- (!((regs)->pstate & PSR_I_BIT))
+#define irqs_priority_unmasked(regs) \
+ (system_uses_irq_prio_masking() ? \
+ (regs)->pmr_save == GIC_PRIO_IRQON : \
+ true)
+
+#define interrupts_enabled(regs) \
+ (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs))
#define fast_interrupts_enabled(regs) \
(!((regs)->pstate & PSR_F_BIT))
diff --git a/arch/arm64/include/asm/sync_bitops.h b/arch/arm64/include/asm/sync_bitops.h
index eee31a9f72a5..e9c1a02c2154 100644
--- a/arch/arm64/include/asm/sync_bitops.h
+++ b/arch/arm64/include/asm/sync_bitops.h
@@ -15,13 +15,13 @@
* ops which are SMP safe even on a UP kernel.
*/
-#define sync_set_bit(nr, p) set_bit(nr, p)
-#define sync_clear_bit(nr, p) clear_bit(nr, p)
-#define sync_change_bit(nr, p) change_bit(nr, p)
-#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
-#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
-#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
-#define sync_test_bit(nr, addr) test_bit(nr, addr)
-#define sync_cmpxchg cmpxchg
+#define sync_set_bit(nr, p) set_bit(nr, p)
+#define sync_clear_bit(nr, p) clear_bit(nr, p)
+#define sync_change_bit(nr, p) change_bit(nr, p)
+#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
+#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
+#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
+#define sync_test_bit(nr, addr) test_bit(nr, addr)
+#define arch_sync_cmpxchg arch_cmpxchg
#endif
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 72dc4c011014..5b267dec6194 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -361,6 +361,7 @@
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
+#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
@@ -392,6 +393,10 @@
#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
+#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
+#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
+#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
+
#define __PMEV_op2(n) ((n) & 0x7)
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
@@ -426,7 +431,7 @@
#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
-#define SYS_ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
+#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5)
#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 0e2a0ecaf484..32693f34f431 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -46,8 +46,6 @@ extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-int handle_guest_sea(phys_addr_t addr, unsigned int esr);
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SYSTEM_MISC_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index bbca68b54732..eb3ef73e07cf 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -79,7 +79,6 @@ void arch_release_task_struct(struct task_struct *tsk);
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
- * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
*/
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 547d7a0c9d05..e5d5f31c6d36 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -34,7 +34,6 @@
#include <asm/memory.h>
#include <asm/extable.h>
-#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
@@ -268,7 +267,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT))
-#define __get_user_err(x, ptr, err) \
+#define __raw_get_user(x, ptr, err) \
do { \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
@@ -297,28 +296,22 @@ do { \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
-#define __get_user_check(x, ptr, err) \
-({ \
+#define __get_user_error(x, ptr, err) \
+do { \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
- __get_user_err((x), __p, (err)); \
+ __raw_get_user((x), __p, (err)); \
} else { \
(x) = 0; (err) = -EFAULT; \
} \
-})
-
-#define __get_user_error(x, ptr, err) \
-({ \
- __get_user_check((x), (ptr), (err)); \
- (void)0; \
-})
+} while (0)
#define __get_user(x, ptr) \
({ \
int __gu_err = 0; \
- __get_user_check((x), (ptr), __gu_err); \
+ __get_user_error((x), (ptr), __gu_err); \
__gu_err; \
})
@@ -338,7 +331,7 @@ do { \
: "+r" (err) \
: "r" (x), "r" (addr), "i" (-EFAULT))
-#define __put_user_err(x, ptr, err) \
+#define __raw_put_user(x, ptr, err) \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
@@ -366,28 +359,22 @@ do { \
uaccess_disable_not_uao(); \
} while (0)
-#define __put_user_check(x, ptr, err) \
-({ \
+#define __put_user_error(x, ptr, err) \
+do { \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
- __put_user_err((x), __p, (err)); \
+ __raw_put_user((x), __p, (err)); \
} else { \
(err) = -EFAULT; \
} \
-})
-
-#define __put_user_error(x, ptr, err) \
-({ \
- __put_user_check((x), (ptr), (err)); \
- (void)0; \
-})
+} while (0)
#define __put_user(x, ptr) \
({ \
int __pu_err = 0; \
- __put_user_check((x), (ptr), __pu_err); \
+ __put_user_error((x), (ptr), __pu_err); \
__pu_err; \
})
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index a7b1fc58ffdf..d1dd93436e1e 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 400
+#define __NR_compat_syscalls 424
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 04ee190b90fe..5590f2623690 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -270,7 +270,7 @@ __SYSCALL(__NR_uname, sys_newuname)
/* 123 was sys_modify_ldt */
__SYSCALL(123, sys_ni_syscall)
#define __NR_adjtimex 124
-__SYSCALL(__NR_adjtimex, compat_sys_adjtimex)
+__SYSCALL(__NR_adjtimex, sys_adjtimex_time32)
#define __NR_mprotect 125
__SYSCALL(__NR_mprotect, sys_mprotect)
#define __NR_sigprocmask 126
@@ -344,9 +344,9 @@ __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 160
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
#define __NR_sched_rr_get_interval 161
-__SYSCALL(__NR_sched_rr_get_interval, compat_sys_sched_rr_get_interval)
+__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32)
#define __NR_nanosleep 162
-__SYSCALL(__NR_nanosleep, compat_sys_nanosleep)
+__SYSCALL(__NR_nanosleep, sys_nanosleep_time32)
#define __NR_mremap 163
__SYSCALL(__NR_mremap, sys_mremap)
#define __NR_setresuid 164
@@ -376,7 +376,7 @@ __SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 176
__SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending)
#define __NR_rt_sigtimedwait 177
-__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait)
+__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
#define __NR_rt_sigqueueinfo 178
__SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigsuspend 179
@@ -502,7 +502,7 @@ __SYSCALL(__NR_tkill, sys_tkill)
#define __NR_sendfile64 239
__SYSCALL(__NR_sendfile64, sys_sendfile64)
#define __NR_futex 240
-__SYSCALL(__NR_futex, compat_sys_futex)
+__SYSCALL(__NR_futex, sys_futex_time32)
#define __NR_sched_setaffinity 241
__SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity)
#define __NR_sched_getaffinity 242
@@ -512,7 +512,7 @@ __SYSCALL(__NR_io_setup, compat_sys_io_setup)
#define __NR_io_destroy 244
__SYSCALL(__NR_io_destroy, sys_io_destroy)
#define __NR_io_getevents 245
-__SYSCALL(__NR_io_getevents, compat_sys_io_getevents)
+__SYSCALL(__NR_io_getevents, sys_io_getevents_time32)
#define __NR_io_submit 246
__SYSCALL(__NR_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 247
@@ -538,21 +538,21 @@ __SYSCALL(__NR_set_tid_address, sys_set_tid_address)
#define __NR_timer_create 257
__SYSCALL(__NR_timer_create, compat_sys_timer_create)
#define __NR_timer_settime 258
-__SYSCALL(__NR_timer_settime, compat_sys_timer_settime)
+__SYSCALL(__NR_timer_settime, sys_timer_settime32)
#define __NR_timer_gettime 259
-__SYSCALL(__NR_timer_gettime, compat_sys_timer_gettime)
+__SYSCALL(__NR_timer_gettime, sys_timer_gettime32)
#define __NR_timer_getoverrun 260
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
#define __NR_timer_delete 261
__SYSCALL(__NR_timer_delete, sys_timer_delete)
#define __NR_clock_settime 262
-__SYSCALL(__NR_clock_settime, compat_sys_clock_settime)
+__SYSCALL(__NR_clock_settime, sys_clock_settime32)
#define __NR_clock_gettime 263
-__SYSCALL(__NR_clock_gettime, compat_sys_clock_gettime)
+__SYSCALL(__NR_clock_gettime, sys_clock_gettime32)
#define __NR_clock_getres 264
-__SYSCALL(__NR_clock_getres, compat_sys_clock_getres)
+__SYSCALL(__NR_clock_getres, sys_clock_getres_time32)
#define __NR_clock_nanosleep 265
-__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep)
+__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep_time32)
#define __NR_statfs64 266
__SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64)
#define __NR_fstatfs64 267
@@ -560,7 +560,7 @@ __SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64)
#define __NR_tgkill 268
__SYSCALL(__NR_tgkill, sys_tgkill)
#define __NR_utimes 269
-__SYSCALL(__NR_utimes, compat_sys_utimes)
+__SYSCALL(__NR_utimes, sys_utimes_time32)
#define __NR_arm_fadvise64_64 270
__SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64)
#define __NR_pciconfig_iobase 271
@@ -574,9 +574,9 @@ __SYSCALL(__NR_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 275
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
#define __NR_mq_timedsend 276
-__SYSCALL(__NR_mq_timedsend, compat_sys_mq_timedsend)
+__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend_time32)
#define __NR_mq_timedreceive 277
-__SYSCALL(__NR_mq_timedreceive, compat_sys_mq_timedreceive)
+__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive_time32)
#define __NR_mq_notify 278
__SYSCALL(__NR_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 279
@@ -622,7 +622,7 @@ __SYSCALL(__NR_semop, sys_semop)
#define __NR_semget 299
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 300
-__SYSCALL(__NR_semctl, compat_sys_semctl)
+__SYSCALL(__NR_semctl, compat_sys_old_semctl)
#define __NR_msgsnd 301
__SYSCALL(__NR_msgsnd, compat_sys_msgsnd)
#define __NR_msgrcv 302
@@ -630,7 +630,7 @@ __SYSCALL(__NR_msgrcv, compat_sys_msgrcv)
#define __NR_msgget 303
__SYSCALL(__NR_msgget, sys_msgget)
#define __NR_msgctl 304
-__SYSCALL(__NR_msgctl, compat_sys_msgctl)
+__SYSCALL(__NR_msgctl, compat_sys_old_msgctl)
#define __NR_shmat 305
__SYSCALL(__NR_shmat, compat_sys_shmat)
#define __NR_shmdt 306
@@ -638,7 +638,7 @@ __SYSCALL(__NR_shmdt, sys_shmdt)
#define __NR_shmget 307
__SYSCALL(__NR_shmget, sys_shmget)
#define __NR_shmctl 308
-__SYSCALL(__NR_shmctl, compat_sys_shmctl)
+__SYSCALL(__NR_shmctl, compat_sys_old_shmctl)
#define __NR_add_key 309
__SYSCALL(__NR_add_key, sys_add_key)
#define __NR_request_key 310
@@ -646,7 +646,7 @@ __SYSCALL(__NR_request_key, sys_request_key)
#define __NR_keyctl 311
__SYSCALL(__NR_keyctl, compat_sys_keyctl)
#define __NR_semtimedop 312
-__SYSCALL(__NR_semtimedop, compat_sys_semtimedop)
+__SYSCALL(__NR_semtimedop, sys_semtimedop_time32)
#define __NR_vserver 313
__SYSCALL(__NR_vserver, sys_ni_syscall)
#define __NR_ioprio_set 314
@@ -674,7 +674,7 @@ __SYSCALL(__NR_mknodat, sys_mknodat)
#define __NR_fchownat 325
__SYSCALL(__NR_fchownat, sys_fchownat)
#define __NR_futimesat 326
-__SYSCALL(__NR_futimesat, compat_sys_futimesat)
+__SYSCALL(__NR_futimesat, sys_futimesat_time32)
#define __NR_fstatat64 327
__SYSCALL(__NR_fstatat64, sys_fstatat64)
#define __NR_unlinkat 328
@@ -692,9 +692,9 @@ __SYSCALL(__NR_fchmodat, sys_fchmodat)
#define __NR_faccessat 334
__SYSCALL(__NR_faccessat, sys_faccessat)
#define __NR_pselect6 335
-__SYSCALL(__NR_pselect6, compat_sys_pselect6)
+__SYSCALL(__NR_pselect6, compat_sys_pselect6_time32)
#define __NR_ppoll 336
-__SYSCALL(__NR_ppoll, compat_sys_ppoll)
+__SYSCALL(__NR_ppoll, compat_sys_ppoll_time32)
#define __NR_unshare 337
__SYSCALL(__NR_unshare, sys_unshare)
#define __NR_set_robust_list 338
@@ -718,7 +718,7 @@ __SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait)
#define __NR_kexec_load 347
__SYSCALL(__NR_kexec_load, compat_sys_kexec_load)
#define __NR_utimensat 348
-__SYSCALL(__NR_utimensat, compat_sys_utimensat)
+__SYSCALL(__NR_utimensat, sys_utimensat_time32)
#define __NR_signalfd 349
__SYSCALL(__NR_signalfd, compat_sys_signalfd)
#define __NR_timerfd_create 350
@@ -728,9 +728,9 @@ __SYSCALL(__NR_eventfd, sys_eventfd)
#define __NR_fallocate 352
__SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate)
#define __NR_timerfd_settime 353
-__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime)
+__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime32)
#define __NR_timerfd_gettime 354
-__SYSCALL(__NR_timerfd_gettime, compat_sys_timerfd_gettime)
+__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime32)
#define __NR_signalfd4 355
__SYSCALL(__NR_signalfd4, compat_sys_signalfd4)
#define __NR_eventfd2 356
@@ -752,7 +752,7 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo)
#define __NR_perf_event_open 364
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_recvmmsg 365
-__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg)
+__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg_time32)
#define __NR_accept4 366
__SYSCALL(__NR_accept4, sys_accept4)
#define __NR_fanotify_init 367
@@ -766,7 +766,7 @@ __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 371
__SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at)
#define __NR_clock_adjtime 372
-__SYSCALL(__NR_clock_adjtime, compat_sys_clock_adjtime)
+__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime32)
#define __NR_syncfs 373
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_sendmmsg 374
@@ -821,6 +821,51 @@ __SYSCALL(__NR_statx, sys_statx)
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_io_pgetevents 399
__SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents)
+#define __NR_migrate_pages 400
+__SYSCALL(__NR_migrate_pages, compat_sys_migrate_pages)
+#define __NR_kexec_file_load 401
+__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
+/* 402 is unused */
+#define __NR_clock_gettime64 403
+__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
+#define __NR_clock_settime64 404
+__SYSCALL(__NR_clock_settime64, sys_clock_settime)
+#define __NR_clock_adjtime64 405
+__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime)
+#define __NR_clock_getres_time64 406
+__SYSCALL(__NR_clock_getres_time64, sys_clock_getres)
+#define __NR_clock_nanosleep_time64 407
+__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep)
+#define __NR_timer_gettime64 408
+__SYSCALL(__NR_timer_gettime64, sys_timer_gettime)
+#define __NR_timer_settime64 409
+__SYSCALL(__NR_timer_settime64, sys_timer_settime)
+#define __NR_timerfd_gettime64 410
+__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime)
+#define __NR_timerfd_settime64 411
+__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime)
+#define __NR_utimensat_time64 412
+__SYSCALL(__NR_utimensat_time64, sys_utimensat)
+#define __NR_pselect6_time64 413
+__SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64)
+#define __NR_ppoll_time64 414
+__SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64)
+#define __NR_io_pgetevents_time64 416
+__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+#define __NR_recvmmsg_time64 417
+__SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64)
+#define __NR_mq_timedsend_time64 418
+__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend)
+#define __NR_mq_timedreceive_time64 419
+__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive)
+#define __NR_semtimedop_time64 420
+__SYSCALL(__NR_semtimedop_time64, sys_semtimedop)
+#define __NR_rt_sigtimedwait_time64 421
+__SYSCALL(__NR_rt_sigtimedwait_time64, compat_sys_rt_sigtimedwait_time64)
+#define __NR_futex_time64 422
+__SYSCALL(__NR_futex_time64, sys_futex)
+#define __NR_sched_rr_get_interval_time64 423
+__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index b3ef061d8b74..d88e56b90b93 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
+#define _ASM_ARM64_XEN_PAGE_COHERENT_H
+
+#include <linux/dma-mapping.h>
+#include <asm/page.h>
#include <xen/arm/page-coherent.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
+{
+ return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
+{
+ dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+
+ if (pfn_valid(pfn))
+ dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
+ else
+ __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ if (pfn_valid(pfn))
+ dma_direct_sync_single_for_device(hwdev, handle, size, dir);
+ else
+ __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ unsigned long page_pfn = page_to_xen_pfn(page);
+ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+ unsigned long compound_pages =
+ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+ bool local = (page_pfn <= dev_pfn) &&
+ (dev_pfn - page_pfn < compound_pages);
+
+ if (local)
+ dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
+ else
+ __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ unsigned long pfn = PFN_DOWN(handle);
+ /*
+ * Dom0 is mapped 1:1, while the Linux page can be spanned accross
+ * multiple Xen page, it's not possible to have a mix of local and
+ * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
+ * foreign mfn will always return false. If the page is local we can
+ * safely call the native dma_ops function, otherwise we call the xen
+ * specific function.
+ */
+ if (pfn_valid(pfn))
+ dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
+ else
+ __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */