aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-28 13:29:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-28 13:29:36 -0700
commit3d9d7405c0699ade882fec0c1cc6685cd5742ab3 (patch)
tree0f6e4683cf2fe354d972144a92c09d24ed648d2e /arch/arm64/include
parentMerge tag 'powerpc-4.13-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux (diff)
parentarm64: mmu: Place guard page after mapping of kernel image (diff)
downloadlinux-dev-3d9d7405c0699ade882fec0c1cc6685cd5742ab3.tar.xz
linux-dev-3d9d7405c0699ade882fec0c1cc6685cd5742ab3.zip
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "I'd been collecting these whilst we debugged a CPU hotplug failure, but we ended up diagnosing that one to tglx, who has taken a fix via the -tip tree separately. We're seeing some NFS issues that we haven't gotten to the bottom of yet, and we've uncovered some issues with our backtracing too so there might be another fixes pull before we're done. Summary: - Ensure we have a guard page after the kernel image in vmalloc - Fix incorrect prefetch stride in copy_page - Ensure irqs are disabled in die() - Fix for event group validation in QCOM L2 PMU driver - Fix requesting of PMU IRQs on AMD Seattle - Minor cleanups and fixes" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: mmu: Place guard page after mapping of kernel image drivers/perf: arm_pmu: Request PMU SPIs with IRQF_PER_CPU arm64: sysreg: Fix unprotected macro argmuent in write_sysreg perf: qcom_l2: fix column exclusion check arm64/lib: copy_page: use consistent prefetch stride arm64/numa: Drop duplicate message perf: Convert to using %pOF instead of full_name arm64: Convert to using %pOF instead of full_name arm64: traps: disable irq in die() arm64: atomics: Remove '&' from '+&' asm constraint in lse atomics arm64: uaccess: Remove redundant __force from addr cast in __range_ok
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/atomic_lse.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/include/asm/uaccess.h2
3 files changed, 4 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 99fa69c9c3cf..9ef0797380cb 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
" sub x30, x30, %[ret]\n"
" cbnz x30, 1b\n"
"2:")
- : [ret] "+&r" (x0), [v] "+Q" (v->counter)
+ : [ret] "+r" (x0), [v] "+Q" (v->counter)
:
: __LL_SC_CLOBBERS, "cc", "memory");
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 16e44fa9b3b6..248339e4aaf5 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -492,7 +492,7 @@ asm(
* the "%x0" template means XZR.
*/
#define write_sysreg(v, r) do { \
- u64 __val = (u64)v; \
+ u64 __val = (u64)(v); \
asm volatile("msr " __stringify(r) ", %x0" \
: : "rZ" (__val)); \
} while (0)
@@ -508,7 +508,7 @@ asm(
})
#define write_sysreg_s(v, r) do { \
- u64 __val = (u64)v; \
+ u64 __val = (u64)(v); \
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 8f0a1de11e4a..fab46a0ea223 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -69,7 +69,7 @@ static inline void set_fs(mm_segment_t fs)
*/
#define __range_ok(addr, size) \
({ \
- unsigned long __addr = (unsigned long __force)(addr); \
+ unsigned long __addr = (unsigned long)(addr); \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \