aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/Kbuild3
-rw-r--r--arch/arc/include/asm/arcregs.h4
-rw-r--r--arch/arc/include/asm/atomic.h4
-rw-r--r--arch/arc/include/asm/atomic64-arcv2.h15
-rw-r--r--arch/arc/include/asm/bitops.h4
-rw-r--r--arch/arc/include/asm/bug.h4
-rw-r--r--arch/arc/include/asm/cache.h4
-rw-r--r--arch/arc/include/asm/cachetype.h5
-rw-r--r--arch/arc/include/asm/cmpxchg.h6
-rw-r--r--arch/arc/include/asm/current.h4
-rw-r--r--arch/arc/include/asm/dsp-impl.h2
-rw-r--r--arch/arc/include/asm/dsp.h6
-rw-r--r--arch/arc/include/asm/dwarf.h4
-rw-r--r--arch/arc/include/asm/entry-compact.h10
-rw-r--r--arch/arc/include/asm/entry.h8
-rw-r--r--arch/arc/include/asm/fb.h8
-rw-r--r--arch/arc/include/asm/hugepage.h2
-rw-r--r--arch/arc/include/asm/io.h5
-rw-r--r--arch/arc/include/asm/irq.h2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h4
-rw-r--r--arch/arc/include/asm/irqflags-compact.h6
-rw-r--r--arch/arc/include/asm/jump_label.h4
-rw-r--r--arch/arc/include/asm/linkage.h6
-rw-r--r--arch/arc/include/asm/mmu-arcv2.h6
-rw-r--r--arch/arc/include/asm/mmu.h3
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/page.h4
-rw-r--r--arch/arc/include/asm/pgalloc.h9
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h8
-rw-r--r--arch/arc/include/asm/pgtable-levels.h6
-rw-r--r--arch/arc/include/asm/pgtable.h4
-rw-r--r--arch/arc/include/asm/processor.h4
-rw-r--r--arch/arc/include/asm/ptrace.h6
-rw-r--r--arch/arc/include/asm/shmparam.h2
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/include/asm/switch_to.h2
-rw-r--r--arch/arc/include/asm/syscall.h25
-rw-r--r--arch/arc/include/asm/thread_info.h6
-rw-r--r--arch/arc/include/asm/unaligned.h27
-rw-r--r--arch/arc/include/asm/unistd.h14
-rw-r--r--arch/arc/include/uapi/asm/Kbuild2
-rw-r--r--arch/arc/include/uapi/asm/page.h7
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arc/include/uapi/asm/swab.h2
-rw-r--r--arch/arc/include/uapi/asm/unistd.h44
45 files changed, 132 insertions, 179 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 3c1afa524b9c..4c69522e0328 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
+
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += user.h
+generic-y += text-patching.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 4b13f60fe7ca..a31bbf5c8bbc 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -144,9 +144,9 @@
#define ARC_AUX_AGU_MOD2 0x5E2
#define ARC_AUX_AGU_MOD3 0x5E3
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
-#include <soc/arc/aux.h>
+#include <soc/arc/arc_aux.h>
/* Helpers */
#define TO_KB(bytes) ((bytes) >> 10)
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 592d7fffc223..e615c42b93ba 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -6,7 +6,7 @@
#ifndef _ASM_ARC_ATOMIC_H
#define _ASM_ARC_ATOMIC_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/compiler.h>
@@ -31,6 +31,6 @@
#include <asm/atomic64-arcv2.h>
#endif
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h
index 9b5791b85471..73080a664369 100644
--- a/arch/arc/include/asm/atomic64-arcv2.h
+++ b/arch/arc/include/asm/atomic64-arcv2.h
@@ -137,12 +137,9 @@ ATOMIC64_OPS(xor, xor, xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline s64
-arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
+static inline u64 __arch_cmpxchg64_relaxed(volatile void *ptr, u64 old, u64 new)
{
- s64 prev;
-
- smp_mb();
+ u64 prev;
__asm__ __volatile__(
"1: llockd %0, [%1] \n"
@@ -152,14 +149,12 @@ arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
" bnz 1b \n"
"2: \n"
: "=&r"(prev)
- : "r"(ptr), "ir"(expected), "r"(new)
- : "cc"); /* memory clobber comes from smp_mb() */
-
- smp_mb();
+ : "r"(ptr), "ir"(old), "r"(new)
+ : "memory", "cc");
return prev;
}
-#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#define arch_cmpxchg64_relaxed __arch_cmpxchg64_relaxed
static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
{
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index f5a936496f06..5340c2871392 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -10,7 +10,7 @@
#error only <linux/bitops.h> can be included directly
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/compiler.h>
@@ -192,6 +192,6 @@ static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
index 4c453ba96c51..171c16021f70 100644
--- a/arch/arc/include/asm/bug.h
+++ b/arch/arc/include/asm/bug.h
@@ -6,7 +6,7 @@
#ifndef _ASM_ARC_BUG_H
#define _ASM_ARC_BUG_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/ptrace.h>
@@ -29,6 +29,6 @@ void die(const char *str, struct pt_regs *regs, unsigned long address);
#include <asm-generic/bug.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index f0f1fc5d62b6..040a97f4dd82 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -23,7 +23,7 @@
*/
#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/build_bug.h>
@@ -65,7 +65,7 @@
extern int ioc_enable;
extern unsigned long perip_base, perip_end;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* Instruction cache related Auxiliary registers */
#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
diff --git a/arch/arc/include/asm/cachetype.h b/arch/arc/include/asm/cachetype.h
index 05fc7ed59712..acd3b6cb4bf5 100644
--- a/arch/arc/include/asm/cachetype.h
+++ b/arch/arc/include/asm/cachetype.h
@@ -2,8 +2,7 @@
#ifndef __ASM_ARC_CACHETYPE_H
#define __ASM_ARC_CACHETYPE_H
-#include <linux/types.h>
-
-#define cpu_dcache_is_aliasing() true
+#define cpu_dcache_is_aliasing() false
+#define cpu_icache_is_aliasing() true
#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index e138fde067de..76f43db0890f 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -8,6 +8,7 @@
#include <linux/build_bug.h>
#include <linux/types.h>
+#include <linux/cmpxchg-emu.h>
#include <asm/barrier.h>
#include <asm/smp.h>
@@ -46,6 +47,9 @@
__typeof__(*(ptr)) _prev_; \
\
switch(sizeof((_p_))) { \
+ case 1: \
+ _prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *__force)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
+ break; \
case 4: \
_prev_ = __cmpxchg(_p_, _o_, _n_); \
break; \
@@ -65,8 +69,6 @@
__typeof__(*(ptr)) _prev_; \
unsigned long __flags; \
\
- BUILD_BUG_ON(sizeof(_p_) != 4); \
- \
/* \
* spin lock/unlock provide the needed smp_mb() before/after \
*/ \
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
index 06be89f6f2f0..03ffd005f3fa 100644
--- a/arch/arc/include/asm/current.h
+++ b/arch/arc/include/asm/current.h
@@ -9,7 +9,7 @@
#ifndef _ASM_ARC_CURRENT_H
#define _ASM_ARC_CURRENT_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_ARC_CURR_IN_REG
@@ -20,6 +20,6 @@ register struct task_struct *curr_arc asm("gp");
#include <asm-generic/current.h>
#endif /* ! CONFIG_ARC_CURR_IN_REG */
-#endif /* ! __ASSEMBLY__ */
+#endif /* ! __ASSEMBLER__ */
#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/dsp-impl.h b/arch/arc/include/asm/dsp-impl.h
index cd5636dfeb6f..fd5fdaad90c1 100644
--- a/arch/arc/include/asm/dsp-impl.h
+++ b/arch/arc/include/asm/dsp-impl.h
@@ -11,7 +11,7 @@
#define DSP_CTRL_DISABLED_ALL 0
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
/* clobbers r5 register */
.macro DSP_EARLY_INIT
diff --git a/arch/arc/include/asm/dsp.h b/arch/arc/include/asm/dsp.h
index 202c78e56704..eeaaf4e4eabd 100644
--- a/arch/arc/include/asm/dsp.h
+++ b/arch/arc/include/asm/dsp.h
@@ -7,12 +7,12 @@
#ifndef __ASM_ARC_DSP_H
#define __ASM_ARC_DSP_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* DSP-related saved registers - need to be saved only when you are
* scheduled out.
- * structure fields name must correspond to aux register defenitions for
+ * structure fields name must correspond to aux register definitions for
* automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
*/
struct dsp_callee_regs {
@@ -24,6 +24,6 @@ struct dsp_callee_regs {
#endif
};
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_ARC_DSP_H */
diff --git a/arch/arc/include/asm/dwarf.h b/arch/arc/include/asm/dwarf.h
index a0d5ebe1bc3f..1524c5cf8b59 100644
--- a/arch/arc/include/asm/dwarf.h
+++ b/arch/arc/include/asm/dwarf.h
@@ -6,7 +6,7 @@
#ifndef _ASM_ARC_DWARF_H
#define _ASM_ARC_DWARF_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#ifdef ARC_DW2_UNWIND_AS_CFI
@@ -38,6 +38,6 @@
#endif /* !ARC_DW2_UNWIND_AS_CFI */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_ARC_DWARF_H */
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 92c3e9f13252..00946fe04c9b 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -7,7 +7,7 @@
* Stack switching code can no longer reliably rely on the fact that
* if we are NOT in user mode, stack is switched to kernel mode.
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
- * it's prologue including stack switching from user mode
+ * its prologue including stack switching from user mode
*
* Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
@@ -143,7 +143,7 @@
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
* 3. But before it could switch SP from USER to KERNEL stack
* a L2 IRQ "Interrupts" L1
- * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * That way although L2 IRQ happened in Kernel mode, stack is still
* not switched.
* To handle this, we may need to switch stack even if in kernel mode
* provided SP has values in range of USER mode stack ( < 0x7000_0000 )
@@ -173,7 +173,7 @@
GET_CURR_TASK_ON_CPU r9
- /* With current tsk in r9, get it's kernel mode stack base */
+ /* With current tsk in r9, get its kernel mode stack base */
GET_TSK_STACK_BASE r9, r9
/* save U mode SP @ pt_regs->sp */
@@ -282,7 +282,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro EXCEPTION_EPILOGUE
@@ -350,7 +350,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro INTERRUPT_EPILOGUE LVL
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index cf1ba376e992..f453af251a1a 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -7,13 +7,13 @@
#ifndef __ASM_ARC_ENTRY_H
#define __ASM_ARC_ENTRY_H
-#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/unistd.h> /* For NR_syscalls definition */
#include <asm/arcregs.h>
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
#include <asm/mmu.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#ifdef CONFIG_ISA_ARCOMPACT
#include <asm/entry-compact.h> /* ISA specific bits */
@@ -56,7 +56,7 @@
.endm
/*-------------------------------------------------------------
- * given a tsk struct, get to the base of it's kernel mode stack
+ * given a tsk struct, get to the base of its kernel mode stack
* tsk->thread_info is really a PAGE, whose bottom hoists stack
* which grows upwards towards thread_info
*------------------------------------------------------------*/
@@ -146,7 +146,7 @@
#endif /* CONFIG_ARC_CURR_IN_REG */
-#else /* !__ASSEMBLY__ */
+#else /* !__ASSEMBLER__ */
extern void do_signal(struct pt_regs *);
extern void do_notify_resume(struct pt_regs *);
diff --git a/arch/arc/include/asm/fb.h b/arch/arc/include/asm/fb.h
deleted file mode 100644
index 9c2383d29cbb..000000000000
--- a/arch/arc/include/asm/fb.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-
-#include <asm-generic/fb.h>
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 8a2441670a8f..7765dc105d54 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -40,8 +40,6 @@ static inline pmd_t pte_pmd(pte_t pte)
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
-#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))
-
#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 4fdb7350636c..00171a212b3c 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/page.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#ifdef CONFIG_ISA_ARCV2
#include <asm/barrier.h>
@@ -42,9 +42,6 @@ static inline void ioport_unmap(void __iomem *addr)
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
-/* Change struct page to physical address */
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index c574712ad865..9cd79263acba 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -10,7 +10,7 @@
* ARCv2 can support 240 interrupts in the core interrupts controllers and
* 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most
* configurations of boards.
- * This doesnt affect ARCompact, but we change it to same value
+ * This doesn't affect ARCompact, but we change it to same value
*/
#define NR_IRQS 512
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index fb3c21f1a238..30aea562f8aa 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -50,7 +50,7 @@
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | __AD_ENB | \
(ARCV2_IRQ_DEF_PRIO << 1))
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Save IRQ state and disable IRQs
@@ -170,6 +170,6 @@ static inline void arc_softirq_clear(int irq)
seti
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index 0d63e568d64c..85c2f6bcde0c 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -40,13 +40,13 @@
#define ISA_INIT_STATUS_BITS STATUS_IE_MASK
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/******************************************************************
* IRQ Control Macros
*
* All of them have "memory" clobber (compiler barrier) which is needed to
- * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * ensure that LD/ST requiring irq safety (R-M-W when LLSC is not available)
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
*
* Noted at the time of Abilis Timer List corruption
@@ -196,6 +196,6 @@ static inline int arch_irqs_disabled(void)
flag \scratch
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/jump_label.h b/arch/arc/include/asm/jump_label.h
index a339223d9e05..66ead75784d9 100644
--- a/arch/arc/include/asm/jump_label.h
+++ b/arch/arc/include/asm/jump_label.h
@@ -2,7 +2,7 @@
#ifndef _ASM_ARC_JUMP_LABEL_H
#define _ASM_ARC_JUMP_LABEL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/stringify.h>
#include <linux/types.h>
@@ -68,5 +68,5 @@ struct jump_entry {
jump_label_t key;
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index 8a3fb71e9cfa..ba3cb65b5eaa 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -12,7 +12,7 @@
#define __ALIGN .align 4
#define __ALIGN_STR __stringify(__ALIGN)
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.macro ST2 e, o, off
#ifdef CONFIG_ARC_HAS_LL64
@@ -61,7 +61,7 @@
CFI_ENDPROC ASM_NL \
.size name, .-name
-#else /* !__ASSEMBLY__ */
+#else /* !__ASSEMBLER__ */
#ifdef CONFIG_ARC_HAS_ICCM
#define __arcfp_code __section(".text.arcfp")
@@ -75,6 +75,6 @@
#define __arcfp_data __section(".data")
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/mmu-arcv2.h b/arch/arc/include/asm/mmu-arcv2.h
index ed9036d4ede3..5e5482026ac9 100644
--- a/arch/arc/include/asm/mmu-arcv2.h
+++ b/arch/arc/include/asm/mmu-arcv2.h
@@ -9,6 +9,8 @@
#ifndef _ASM_ARC_MMU_ARCV2_H
#define _ASM_ARC_MMU_ARCV2_H
+#include <soc/arc/arc_aux.h>
+
/*
* TLB Management regs
*/
@@ -67,7 +69,7 @@
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct mm_struct;
extern int pae40_exist_but_not_enab(void);
@@ -98,6 +100,6 @@ static inline void mmu_setup_pgd(struct mm_struct *mm, void *pgd)
sr \reg, [ARC_REG_PID]
.endm
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index 9febf5bc3de6..e3b35ceab582 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -6,7 +6,7 @@
#ifndef _ASM_ARC_MMU_H
#define _ASM_ARC_MMU_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/threads.h> /* NR_CPUS */
@@ -14,6 +14,7 @@ typedef struct {
unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
+struct pt_regs;
extern void do_tlb_overlap_fault(unsigned long, unsigned long, struct pt_regs *);
#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index dda471f5f05b..9963bb1a5733 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -165,7 +165,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
* mmput => .. => __mmdrop( ) => destroy_context( )
- * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * there is a good chance that task gets sched-out/in, making its ASID valid
* again (this teased me for a whole day).
*/
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index def0dfb95b43..9720fe6b2c24 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -19,7 +19,7 @@
#endif /* CONFIG_ARC_HAS_PAE40 */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
@@ -136,6 +136,6 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
#include <asm-generic/getorder.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 096b8ef58edb..dfae070fe8d5 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -53,19 +53,14 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
+ pgd_t *ret = __pgd_alloc(mm, 0);
if (ret) {
int num, num2;
- num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
- memzero(ret, num * sizeof(pgd_t));
+ num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
num2 = VMALLOC_SIZE / PGDIR_SIZE;
memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
-
- memzero(ret + num + num2,
- (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
-
}
return ret;
}
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
index f3eea3f30b2e..4630c5acca05 100644
--- a/arch/arc/include/asm/pgtable-bits-arcv2.h
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -66,7 +66,7 @@
* Other rules which cause the divergence from 1:1 mapping
*
* 1. Although ARC700 can do exclusive execute/write protection (meaning R
- * can be tracked independet of X/W unlike some other CPUs), still to
+ * can be tracked independently of X/W unlike some other CPUs), still to
* keep things consistent with other archs:
* -Write implies Read: W => R
* -Execute implies Read: X => R
@@ -75,7 +75,7 @@
* This is to enable COW mechanism
*/
/* xwr */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
@@ -130,7 +130,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-static inline int pte_swp_exclusive(pte_t pte)
+static inline bool pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
}
@@ -142,6 +142,6 @@ PTE_BIT_FUNC(swp_clear_exclusive, &= ~(_PAGE_SWP_EXCLUSIVE));
#include <asm/hugepage.h>
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h
index 86e148226463..c8f9273372c0 100644
--- a/arch/arc/include/asm/pgtable-levels.h
+++ b/arch/arc/include/asm/pgtable-levels.h
@@ -85,7 +85,7 @@
#define PTRS_PER_PTE BIT(PMD_SHIFT - PAGE_SHIFT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#if CONFIG_PGTABLE_LEVELS > 3
#include <asm-generic/pgtable-nop4d.h>
@@ -142,7 +142,6 @@
#define pmd_pfn(pmd) ((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#endif
@@ -177,12 +176,11 @@
#define set_pte(ptep, pte) ((*(ptep)) = (pte))
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
-#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
#ifdef CONFIG_ISA_ARCV2
#define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ)
#endif
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 4cf45a99fd79..bd580e2b62d7 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -19,7 +19,7 @@
*/
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern char empty_zero_page[PAGE_SIZE];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
@@ -29,6 +29,6 @@ extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
/* to cope with aliasing VIPT cache */
#define HAVE_ARCH_UNMAPPED_AREA
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index d606658e2fe7..7f7901ac6643 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -11,7 +11,7 @@
#ifndef __ASM_ARC_PROCESSOR_H
#define __ASM_ARC_PROCESSOR_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/ptrace.h>
#include <asm/dsp.h>
@@ -66,7 +66,7 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc,
extern unsigned int __get_wchan(struct task_struct *p);
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/*
* Default System Memory Map on ARC
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 00b9318e551e..f6c052af8f4d 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -10,7 +10,7 @@
#include <uapi/asm/ptrace.h>
#include <linux/compiler.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
typedef union {
struct {
@@ -169,9 +169,9 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
-extern int syscall_trace_entry(struct pt_regs *);
+extern int syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_exit(struct pt_regs *);
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
index 8b0251464ffd..719112af0f41 100644
--- a/arch/arc/include/asm/shmparam.h
+++ b/arch/arc/include/asm/shmparam.h
@@ -6,7 +6,7 @@
#ifndef __ARC_ASM_SHMPARAM_H
#define __ARC_ASM_SHMPARAM_H
-/* Handle upto 2 cache bins */
+/* Handle up to 2 cache bins */
#define SHMLBA (2 * PAGE_SIZE)
/* Enforce SHMLBA in shmat */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index e0913f52c2cd..990f834909f0 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -77,7 +77,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
/*
* ARC700 doesn't support atomic Read-Modify-Write ops.
- * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * Originally Interrupts had to be disabled around code to guarantee atomicity.
* The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
* based on retry-if-irq-in-atomic (with hardware assist).
* However despite these, we provide the IRQ disabling variant
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
- * gaurantted by the platform (not something which core handles).
+ * guaranteed by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
*
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
index 1f85de8288b1..5806106a65f9 100644
--- a/arch/arc/include/asm/switch_to.h
+++ b/arch/arc/include/asm/switch_to.h
@@ -6,7 +6,7 @@
#ifndef _ASM_ARC_SWITCH_TO_H
#define _ASM_ARC_SWITCH_TO_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/sched.h>
#include <asm/dsp-impl.h>
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 9709256e31c8..728d625a10f1 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -24,6 +24,17 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
}
static inline void
+syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
+{
+ /*
+ * Unlike syscall_get_nr(), syscall_set_nr() can be called only when
+ * the target task is stopped for tracing on entering syscall, so
+ * there is no need to have the same check syscall_get_nr() has.
+ */
+ regs->r8 = nr;
+}
+
+static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
regs->r0 = regs->orig_r0;
@@ -67,6 +78,20 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
}
}
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *args)
+{
+ unsigned long *inside_ptregs = &regs->r0;
+ unsigned int n = 6;
+ unsigned int i = 0;
+
+ while (n--) {
+ *inside_ptregs = args[i++];
+ inside_ptregs--;
+ }
+}
+
static inline int
syscall_get_arch(struct task_struct *task)
{
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 4c530cf131f3..255d2c774219 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -24,7 +24,7 @@
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_SHIFT (PAGE_SHIFT << THREAD_SIZE_ORDER)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/thread_info.h>
@@ -38,7 +38,7 @@
struct thread_info {
unsigned long flags; /* low level flags */
unsigned long ksp; /* kernel mode stack top in __switch_to */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
+ int preempt_count; /* 0 => preemptible, <0 => BUG */
int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
struct task_struct *task; /* main task structure */
@@ -62,7 +62,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/*
* thread information flags
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
deleted file mode 100644
index cf5a02382e0e..000000000000
--- a/arch/arc/include/asm/unaligned.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- */
-
-#ifndef _ASM_ARC_UNALIGNED_H
-#define _ASM_ARC_UNALIGNED_H
-
-/* ARC700 can't handle unaligned Data accesses. */
-
-#include <asm-generic/unaligned.h>
-#include <asm/ptrace.h>
-
-#ifdef CONFIG_ARC_EMUL_UNALIGNED
-int misaligned_fixup(unsigned long address, struct pt_regs *regs,
- struct callee_regs *cregs);
-#else
-static inline int
-misaligned_fixup(unsigned long address, struct pt_regs *regs,
- struct callee_regs *cregs)
-{
- /* Not fixed */
- return 1;
-}
-#endif
-
-#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unistd.h b/arch/arc/include/asm/unistd.h
new file mode 100644
index 000000000000..211c230d88d6
--- /dev/null
+++ b/arch/arc/include/asm/unistd.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_ARC_UNISTD_H
+#define _ASM_ARC_UNISTD_H
+
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#define NR_syscalls __NR_syscalls
+
+#endif
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index e78470141932..2501e82a1a0a 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
+
generic-y += ucontext.h
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
index 7fd9e741b527..4606a326af5c 100644
--- a/arch/arc/include/uapi/asm/page.h
+++ b/arch/arc/include/uapi/asm/page.h
@@ -14,7 +14,7 @@
/* PAGE_SHIFT determines the page size */
#ifdef __KERNEL__
-#define PAGE_SHIFT CONFIG_PAGE_SHIFT
+#include <vdso/page.h>
#else
/*
* Default 8k
@@ -24,11 +24,10 @@
* not available
*/
#define PAGE_SHIFT 13
+#define PAGE_SIZE _BITUL(PAGE_SHIFT) /* Default 8K */
+#define PAGE_MASK (~(PAGE_SIZE-1))
#endif
-#define PAGE_SIZE _BITUL(PAGE_SHIFT) /* Default 8K */
#define PAGE_OFFSET _AC(0x80000000, UL) /* Kernel starts at 2G onwrds */
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 2a6eff57f6dd..3ae832db278c 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -14,7 +14,7 @@
#define PTRACE_GET_THREAD_AREA 25
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Userspace ABI: Register state needed by
* -ptrace (gdbserver)
@@ -53,6 +53,6 @@ struct user_regs_arcv2 {
unsigned long r30, r58, r59;
};
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 02109cd48ee1..8d1f1ef44ba7 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -62,7 +62,7 @@
* 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
*
* Joern suggested a better "C" algorithm which is great since
- * (1) It is portable to any architecure
+ * (1) It is portable to any architecture
* (2) At the same time it takes advantage of ARC ISA (rotate intrns)
*/
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index fa2713ae6bea..cb2905c7c5da 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -7,46 +7,4 @@
* published by the Free Software Foundation.
*/
-/******** no-legacy-syscalls-ABI *******/
-
-/*
- * Non-typical guard macro to enable inclusion twice in ARCH sys.c
- * That is how the Generic syscall wrapper generator works
- */
-#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
-#define _UAPI_ASM_ARC_UNISTD_H
-
-#define __ARCH_WANT_RENAMEAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_EXECVE
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#define __ARCH_WANT_SYS_VFORK
-#define __ARCH_WANT_SYS_FORK
-#define __ARCH_WANT_TIME32_SYSCALLS
-
-#define sys_mmap2 sys_mmap_pgoff
-
-#include <asm-generic/unistd.h>
-
-#define NR_syscalls __NR_syscalls
-
-/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
-#define __NR_sysfs (__NR_arch_specific_syscall + 3)
-
-/* ARC specific syscall */
-#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
-#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
-#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
-#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
-
-__SYSCALL(__NR_cacheflush, sys_cacheflush)
-__SYSCALL(__NR_arc_settls, sys_arc_settls)
-__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
-__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
-__SYSCALL(__NR_sysfs, sys_sysfs)
-
-#undef __SYSCALL
-
-#endif
+#include <asm/unistd_32.h>