aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/Kbuild3
-rw-r--r--arch/arc/include/asm/arcregs.h103
-rw-r--r--arch/arc/include/asm/atomic-llsc.h6
-rw-r--r--arch/arc/include/asm/atomic-spinlock.h9
-rw-r--r--arch/arc/include/asm/atomic.h28
-rw-r--r--arch/arc/include/asm/atomic64-arcv2.h38
-rw-r--r--arch/arc/include/asm/bitops.h8
-rw-r--r--arch/arc/include/asm/bug.h4
-rw-r--r--arch/arc/include/asm/cache.h4
-rw-r--r--arch/arc/include/asm/cacheflush.h58
-rw-r--r--arch/arc/include/asm/cachetype.h8
-rw-r--r--arch/arc/include/asm/cmpxchg.h10
-rw-r--r--arch/arc/include/asm/current.h6
-rw-r--r--arch/arc/include/asm/dma.h5
-rw-r--r--arch/arc/include/asm/dsp-impl.h2
-rw-r--r--arch/arc/include/asm/dsp.h6
-rw-r--r--arch/arc/include/asm/dwarf.h36
-rw-r--r--arch/arc/include/asm/entry-arcv2.h98
-rw-r--r--arch/arc/include/asm/entry-compact.h149
-rw-r--r--arch/arc/include/asm/entry.h224
-rw-r--r--arch/arc/include/asm/fb.h20
-rw-r--r--arch/arc/include/asm/hugepage.h11
-rw-r--r--arch/arc/include/asm/io.h12
-rw-r--r--arch/arc/include/asm/irq.h3
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h4
-rw-r--r--arch/arc/include/asm/irqflags-compact.h6
-rw-r--r--arch/arc/include/asm/jump_label.h8
-rw-r--r--arch/arc/include/asm/kprobes.h3
-rw-r--r--arch/arc/include/asm/linkage.h14
-rw-r--r--arch/arc/include/asm/mmu-arcv2.h6
-rw-r--r--arch/arc/include/asm/mmu.h5
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/page.h28
-rw-r--r--arch/arc/include/asm/pgalloc.h9
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h64
-rw-r--r--arch/arc/include/asm/pgtable-levels.h11
-rw-r--r--arch/arc/include/asm/pgtable.h4
-rw-r--r--arch/arc/include/asm/processor.h14
-rw-r--r--arch/arc/include/asm/ptrace.h81
-rw-r--r--arch/arc/include/asm/setup.h8
-rw-r--r--arch/arc/include/asm/shmparam.h2
-rw-r--r--arch/arc/include/asm/smp.h6
-rw-r--r--arch/arc/include/asm/switch_to.h2
-rw-r--r--arch/arc/include/asm/syscall.h25
-rw-r--r--arch/arc/include/asm/thread_info.h16
-rw-r--r--arch/arc/include/asm/uaccess.h21
-rw-r--r--arch/arc/include/asm/unaligned.h27
-rw-r--r--arch/arc/include/asm/unistd.h14
-rw-r--r--arch/arc/include/uapi/asm/Kbuild2
-rw-r--r--arch/arc/include/uapi/asm/page.h11
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arc/include/uapi/asm/swab.h2
-rw-r--r--arch/arc/include/uapi/asm/unistd.h44
53 files changed, 581 insertions, 713 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 3c1afa524b9c..4c69522e0328 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
+
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += user.h
+generic-y += text-patching.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 2162023195c5..a31bbf5c8bbc 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -23,7 +23,7 @@
#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */
#define ARC_REG_XY_MEM_BCR 0x79
#define ARC_REG_MAC_BCR 0x7a
-#define ARC_REG_MUL_BCR 0x7b
+#define ARC_REG_MPY_BCR 0x7b
#define ARC_REG_SWAP_BCR 0x7c
#define ARC_REG_NORM_BCR 0x7d
#define ARC_REG_MIXMAX_BCR 0x7e
@@ -144,9 +144,9 @@
#define ARC_AUX_AGU_MOD2 0x5E2
#define ARC_AUX_AGU_MOD3 0x5E3
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
-#include <soc/arc/aux.h>
+#include <soc/arc/arc_aux.h>
/* Helpers */
#define TO_KB(bytes) ((bytes) >> 10)
@@ -177,7 +177,7 @@ struct bcr_isa_arcv2 {
#endif
};
-struct bcr_uarch_build_arcv2 {
+struct bcr_uarch_build {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, prod:8, maj:8, min:8;
#else
@@ -185,6 +185,59 @@ struct bcr_uarch_build_arcv2 {
#endif
};
+struct bcr_mmu_3 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
+ u_itlb:4, u_dtlb:4;
+#else
+ unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
+ ways:4, ver:8;
+#endif
+};
+
+struct bcr_mmu_4 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
+ n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
+#else
+ /* DTLB ITLB JES JE JA */
+ unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
+ pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
+#endif
+};
+
+struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+ unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+};
+
+struct bcr_slc_cfg {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:24, way:2, lsz:2, sz:4;
+#else
+ unsigned int sz:4, lsz:2, way:2, pad:24;
+#endif
+};
+
+struct bcr_clust_cfg {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
+#else
+ unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
+#endif
+};
+
+struct bcr_volatile {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int start:4, limit:4, pad:22, order:1, disable:1;
+#else
+ unsigned int disable:1, order:1, pad:22, limit:4, start:4;
+#endif
+};
+
struct bcr_mpy {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
@@ -302,48 +355,6 @@ struct bcr_generic {
#endif
};
-/*
- *******************************************************************
- * Generic structures to hold build configuration used at runtime
- */
-
-struct cpuinfo_arc_mmu {
- unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1;
- unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8;
-};
-
-struct cpuinfo_arc_cache {
- unsigned int sz_k:14, line_len:8, assoc:4, alias:1, vipt:1, pad:4;
-};
-
-struct cpuinfo_arc_bpu {
- unsigned int ver, full, num_cache, num_pred, ret_stk;
-};
-
-struct cpuinfo_arc_ccm {
- unsigned int base_addr, sz;
-};
-
-struct cpuinfo_arc {
- struct cpuinfo_arc_cache icache, dcache, slc;
- struct cpuinfo_arc_mmu mmu;
- struct cpuinfo_arc_bpu bpu;
- struct bcr_identity core;
- struct bcr_isa_arcv2 isa;
- const char *release, *name;
- unsigned int vec_base;
- struct cpuinfo_arc_ccm iccm, dccm;
- struct {
- unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
- fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
- ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
- timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
- } extn;
- struct bcr_mpy extn_mpy;
-};
-
-extern struct cpuinfo_arc cpuinfo_arc700[];
-
static inline int is_isa_arcv2(void)
{
return IS_ENABLED(CONFIG_ISA_ARCV2);
diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h
index 1b0ffaeee16d..5258cb81a16b 100644
--- a/arch/arc/include/asm/atomic-llsc.h
+++ b/arch/arc/include/asm/atomic-llsc.h
@@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
@@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
: [val] "=&r" (val) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
\
return val; \
}
@@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
[orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
\
return orig; \
}
diff --git a/arch/arc/include/asm/atomic-spinlock.h b/arch/arc/include/asm/atomic-spinlock.h
index 2c830347bfb4..89d12a60f84c 100644
--- a/arch/arc/include/asm/atomic-spinlock.h
+++ b/arch/arc/include/asm/atomic-spinlock.h
@@ -81,6 +81,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
@@ -92,7 +97,11 @@ ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)
#define arch_atomic_andnot arch_atomic_andnot
+
+#define arch_atomic_fetch_and arch_atomic_fetch_and
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 52ee51e1ff7c..e615c42b93ba 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -6,7 +6,7 @@
#ifndef _ASM_ARC_ATOMIC_H
#define _ASM_ARC_ATOMIC_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/compiler.h>
@@ -22,30 +22,6 @@
#include <asm/atomic-spinlock.h>
#endif
-#define arch_atomic_cmpxchg(v, o, n) \
-({ \
- arch_cmpxchg(&((v)->counter), (o), (n)); \
-})
-
-#ifdef arch_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_relaxed(v, o, n) \
-({ \
- arch_cmpxchg_relaxed(&((v)->counter), (o), (n)); \
-})
-#endif
-
-#define arch_atomic_xchg(v, n) \
-({ \
- arch_xchg(&((v)->counter), (n)); \
-})
-
-#ifdef arch_xchg_relaxed
-#define arch_atomic_xchg_relaxed(v, n) \
-({ \
- arch_xchg_relaxed(&((v)->counter), (n)); \
-})
-#endif
-
/*
* 64-bit atomics
*/
@@ -55,6 +31,6 @@
#include <asm/atomic64-arcv2.h>
#endif
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h
index c5a8010fdc97..73080a664369 100644
--- a/arch/arc/include/asm/atomic64-arcv2.h
+++ b/arch/arc/include/asm/atomic64-arcv2.h
@@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); \
+ : "cc", "memory"); \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
@@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: [val] "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); /* memory clobber comes from smp_mb() */ \
+ : "cc", "memory"); \
\
return val; \
}
@@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(orig), "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); /* memory clobber comes from smp_mb() */ \
+ : "cc", "memory"); \
\
return orig; \
}
@@ -137,12 +137,9 @@ ATOMIC64_OPS(xor, xor, xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline s64
-arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
+static inline u64 __arch_cmpxchg64_relaxed(volatile void *ptr, u64 old, u64 new)
{
- s64 prev;
-
- smp_mb();
+ u64 prev;
__asm__ __volatile__(
"1: llockd %0, [%1] \n"
@@ -152,13 +149,12 @@ arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
" bnz 1b \n"
"2: \n"
: "=&r"(prev)
- : "r"(ptr), "ir"(expected), "r"(new)
- : "cc"); /* memory clobber comes from smp_mb() */
-
- smp_mb();
+ : "r"(ptr), "ir"(old), "r"(new)
+ : "memory", "cc");
return prev;
}
+#define arch_cmpxchg64_relaxed __arch_cmpxchg64_relaxed
static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
{
@@ -179,14 +175,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
return prev;
}
-
-/**
- * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic64_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
+#define arch_atomic64_xchg arch_atomic64_xchg
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
@@ -212,15 +201,6 @@ static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
}
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-/**
- * arch_atomic64_fetch_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if it was not @u.
- * Returns the old value of @v
- */
static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 old, temp;
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index bdb7e190a294..5340c2871392 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -10,7 +10,7 @@
#error only <linux/bitops.h> can be included directly
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/compiler.h>
@@ -82,7 +82,7 @@ static inline __attribute__ ((const)) int fls(unsigned int x)
/*
* __fls: Similar to fls, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __fls(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __fls(unsigned long x)
{
if (!x)
return 0;
@@ -131,7 +131,7 @@ static inline __attribute__ ((const)) int fls(unsigned int x)
/*
* __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
*/
-static inline __attribute__ ((const)) int __fls(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __fls(unsigned long x)
{
/* FLS insn has exactly same semantics as the API */
return __builtin_arc_fls(x);
@@ -192,6 +192,6 @@ static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
index 4c453ba96c51..171c16021f70 100644
--- a/arch/arc/include/asm/bug.h
+++ b/arch/arc/include/asm/bug.h
@@ -6,7 +6,7 @@
#ifndef _ASM_ARC_BUG_H
#define _ASM_ARC_BUG_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/ptrace.h>
@@ -29,6 +29,6 @@ void die(const char *str, struct pt_regs *regs, unsigned long address);
#include <asm-generic/bug.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index f0f1fc5d62b6..040a97f4dd82 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -23,7 +23,7 @@
*/
#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/build_bug.h>
@@ -65,7 +65,7 @@
extern int ioc_enable;
extern unsigned long perip_base, perip_end;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* Instruction cache related Auxiliary registers */
#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index e201b4b1655a..329c94cd45d8 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -18,24 +18,18 @@
#include <linux/mm.h>
#include <asm/shmparam.h>
-/*
- * Semantically we need this because icache doesn't snoop dcache/dma.
- * However ARC Cache flush requires paddr as well as vaddr, latter not available
- * in the flush_icache_page() API. So we no-op it but do the equivalent work
- * in update_mmu_cache()
- */
-#define flush_icache_page(vma, page)
-
void flush_cache_all(void);
void flush_icache_range(unsigned long kstart, unsigned long kend);
void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
-void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
-void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
+void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr);
+void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
void dma_cache_inv(phys_addr_t start, unsigned long sz);
@@ -46,35 +40,15 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
/* TBD: optimize this */
#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
-#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
-
#define flush_cache_mm(mm) /* called on munmap/exit */
#define flush_cache_range(mm, u_vstart, u_vend)
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
-#else /* VIPT aliasing dcache */
-
-/* To clear out stale userspace mappings */
-void flush_cache_mm(struct mm_struct *mm);
-void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start,unsigned long end);
-void flush_cache_page(struct vm_area_struct *vma,
- unsigned long user_addr, unsigned long page);
-
-/*
- * To make sure that userspace mapping is flushed to memory before
- * get_user_pages() uses a kernel mapping to access the page
- */
-#define ARCH_HAS_FLUSH_ANON_PAGE
-void flush_anon_page(struct vm_area_struct *vma,
- struct page *page, unsigned long u_vaddr);
-
-#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
-
/*
* A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
* This works around some PIO based drivers which don't call flush_dcache_page
@@ -82,28 +56,6 @@ void flush_anon_page(struct vm_area_struct *vma,
*/
#define PG_dc_clean PG_arch_1
-#define CACHE_COLORS_NUM 4
-#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
-#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
-
-/*
- * Simple wrapper over config option
- * Bootup code ensures that hardware matches kernel configuration
- */
-static inline int cache_is_vipt_aliasing(void)
-{
- return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
-}
-
-/*
- * checks if two addresses (after page aligning) index into same cache set
- */
-#define addr_not_cache_congruent(addr1, addr2) \
-({ \
- cache_is_vipt_aliasing() ? \
- (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
-})
-
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
diff --git a/arch/arc/include/asm/cachetype.h b/arch/arc/include/asm/cachetype.h
new file mode 100644
index 000000000000..acd3b6cb4bf5
--- /dev/null
+++ b/arch/arc/include/asm/cachetype.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARC_CACHETYPE_H
+#define __ASM_ARC_CACHETYPE_H
+
+#define cpu_dcache_is_aliasing() false
+#define cpu_icache_is_aliasing() true
+
+#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index c5b544a5fe81..76f43db0890f 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -8,6 +8,7 @@
#include <linux/build_bug.h>
#include <linux/types.h>
+#include <linux/cmpxchg-emu.h>
#include <asm/barrier.h>
#include <asm/smp.h>
@@ -46,6 +47,9 @@
__typeof__(*(ptr)) _prev_; \
\
switch(sizeof((_p_))) { \
+ case 1: \
+ _prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *__force)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
+ break; \
case 4: \
_prev_ = __cmpxchg(_p_, _o_, _n_); \
break; \
@@ -65,8 +69,6 @@
__typeof__(*(ptr)) _prev_; \
unsigned long __flags; \
\
- BUILD_BUG_ON(sizeof(_p_) != 4); \
- \
/* \
* spin lock/unlock provide the needed smp_mb() before/after \
*/ \
@@ -85,7 +87,7 @@
*/
#ifdef CONFIG_ARC_HAS_LLSC
-#define __xchg(ptr, val) \
+#define __arch_xchg(ptr, val) \
({ \
__asm__ __volatile__( \
" ex %0, [%1] \n" /* set new value */ \
@@ -102,7 +104,7 @@
\
switch(sizeof(*(_p_))) { \
case 4: \
- _val_ = __xchg(_p_, _val_); \
+ _val_ = __arch_xchg(_p_, _val_); \
break; \
default: \
BUILD_BUG(); \
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
index 9b9bdd3e6538..03ffd005f3fa 100644
--- a/arch/arc/include/asm/current.h
+++ b/arch/arc/include/asm/current.h
@@ -9,17 +9,17 @@
#ifndef _ASM_ARC_CURRENT_H
#define _ASM_ARC_CURRENT_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_ARC_CURR_IN_REG
-register struct task_struct *curr_arc asm("r25");
+register struct task_struct *curr_arc asm("gp");
#define current (curr_arc)
#else
#include <asm-generic/current.h>
#endif /* ! CONFIG_ARC_CURR_IN_REG */
-#endif /* ! __ASSEMBLY__ */
+#endif /* ! __ASSEMBLER__ */
#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
index 5b744f4b10a7..02431027ed2f 100644
--- a/arch/arc/include/asm/dma.h
+++ b/arch/arc/include/asm/dma.h
@@ -7,10 +7,5 @@
#define ASM_ARC_DMA_H
#define MAX_DMA_ADDRESS 0xC0000000
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy 0
-#endif
#endif
diff --git a/arch/arc/include/asm/dsp-impl.h b/arch/arc/include/asm/dsp-impl.h
index cd5636dfeb6f..fd5fdaad90c1 100644
--- a/arch/arc/include/asm/dsp-impl.h
+++ b/arch/arc/include/asm/dsp-impl.h
@@ -11,7 +11,7 @@
#define DSP_CTRL_DISABLED_ALL 0
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
/* clobbers r5 register */
.macro DSP_EARLY_INIT
diff --git a/arch/arc/include/asm/dsp.h b/arch/arc/include/asm/dsp.h
index 202c78e56704..eeaaf4e4eabd 100644
--- a/arch/arc/include/asm/dsp.h
+++ b/arch/arc/include/asm/dsp.h
@@ -7,12 +7,12 @@
#ifndef __ASM_ARC_DSP_H
#define __ASM_ARC_DSP_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* DSP-related saved registers - need to be saved only when you are
* scheduled out.
- * structure fields name must correspond to aux register defenitions for
+ * structure fields name must correspond to aux register definitions for
* automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
*/
struct dsp_callee_regs {
@@ -24,6 +24,6 @@ struct dsp_callee_regs {
#endif
};
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_ARC_DSP_H */
diff --git a/arch/arc/include/asm/dwarf.h b/arch/arc/include/asm/dwarf.h
index 5f4de05bd4ee..1524c5cf8b59 100644
--- a/arch/arc/include/asm/dwarf.h
+++ b/arch/arc/include/asm/dwarf.h
@@ -6,30 +6,38 @@
#ifndef _ASM_ARC_DWARF_H
#define _ASM_ARC_DWARF_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#ifdef ARC_DW2_UNWIND_AS_CFI
-#define CFI_STARTPROC .cfi_startproc
-#define CFI_ENDPROC .cfi_endproc
-#define CFI_DEF_CFA .cfi_def_cfa
-#define CFI_REGISTER .cfi_register
-#define CFI_REL_OFFSET .cfi_rel_offset
-#define CFI_UNDEFINED .cfi_undefined
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
+#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
+#define CFI_OFFSET .cfi_offset
+#define CFI_REL_OFFSET .cfi_rel_offset
+#define CFI_REGISTER .cfi_register
+#define CFI_RESTORE .cfi_restore
+#define CFI_UNDEFINED .cfi_undefined
#else
#define CFI_IGNORE #
-#define CFI_STARTPROC CFI_IGNORE
-#define CFI_ENDPROC CFI_IGNORE
-#define CFI_DEF_CFA CFI_IGNORE
-#define CFI_REGISTER CFI_IGNORE
-#define CFI_REL_OFFSET CFI_IGNORE
-#define CFI_UNDEFINED CFI_IGNORE
+#define CFI_STARTPROC CFI_IGNORE
+#define CFI_ENDPROC CFI_IGNORE
+#define CFI_DEF_CFA CFI_IGNORE
+#define CFI_DEF_CFA_OFFSET CFI_IGNORE
+#define CFI_DEF_CFA_REGISTER CFI_IGNORE
+#define CFI_OFFSET CFI_IGNORE
+#define CFI_REL_OFFSET CFI_IGNORE
+#define CFI_REGISTER CFI_IGNORE
+#define CFI_RESTORE CFI_IGNORE
+#define CFI_UNDEFINED CFI_IGNORE
#endif /* !ARC_DW2_UNWIND_AS_CFI */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_ARC_DWARF_H */
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index 0ff4c0610561..3802a2daaf86 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -18,7 +18,6 @@
* | orig_r0 |
* | event/ECR |
* | bta |
- * | user_r25 |
* | gp |
* | fp |
* | sp |
@@ -49,14 +48,18 @@
/*------------------------------------------------------------------------*/
.macro INTERRUPT_PROLOGUE
- ; (A) Before jumping to Interrupt Vector, hardware micro-ops did following:
+ ; Before jumping to Interrupt Vector, hardware micro-ops did following:
; 1. SP auto-switched to kernel mode stack
; 2. STATUS32.Z flag set if in U mode at time of interrupt (U:1,K:0)
; 3. Auto save: (mandatory) Push PC and STAT32 on stack
; hardware does even if CONFIG_ARC_IRQ_NO_AUTOSAVE
- ; 4. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
+ ; 4a. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
;
- ; (B) Manually saved some regs: r12,r25,r30, sp,fp,gp, ACCL pair
+ ; Now
+ ; 4b. If Auto-save (optional) not enabled in hw, manually save them
+ ; 5. Manually save: r12,r30, sp,fp,gp, ACCL pair
+ ;
+ ; At the end, SP points to pt_regs
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
; carve pt_regs on stack (case #3), PC/STAT32 already on stack
@@ -72,15 +75,16 @@
.endm
/*------------------------------------------------------------------------*/
-.macro EXCEPTION_PROLOGUE
+.macro EXCEPTION_PROLOGUE_KEEP_AE
- ; (A) Before jumping to Exception Vector, hardware micro-ops did following:
+ ; Before jumping to Exception Vector, hardware micro-ops did following:
; 1. SP auto-switched to kernel mode stack
; 2. STATUS32.Z flag set if in U mode at time of exception (U:1,K:0)
;
- ; (B) Manually save the complete reg file below
+ ; Now manually save rest of reg file
+ ; At the end, SP points to pt_regs
- sub sp, sp, SZ_PT_REGS ; carve pt_regs
+ sub sp, sp, SZ_PT_REGS ; carve space for pt_regs
; _HARD saves r10 clobbered by _SOFT as scratch hence comes first
@@ -100,6 +104,16 @@
; OUTPUT: r10 has ECR expected by EV_Trap
.endm
+.macro EXCEPTION_PROLOGUE
+
+ EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
+
+ lr r0, [efa]
+ mov r1, sp
+
+ FAKE_RET_FROM_EXCPN ; clobbers r9
+.endm
+
/*------------------------------------------------------------------------
* This macro saves the registers manually which would normally be autosaved
* by hardware on taken interrupts. It is used by
@@ -135,10 +149,10 @@
*/
.macro __SAVE_REGFILE_SOFT
- ST2 gp, fp, PT_r26 ; gp (r26), fp (r27)
-
- st r12, [sp, PT_sp + 4]
- st r30, [sp, PT_sp + 8]
+ st fp, [sp, PT_fp] ; r27
+ st r30, [sp, PT_r30]
+ st r12, [sp, PT_r12]
+ st r26, [sp, PT_r26] ; gp
; Saving pt_regs->sp correctly requires some extra work due to the way
; Auto stack switch works
@@ -153,30 +167,30 @@
; ISA requires ADD.nz to have same dest and src reg operands
mov.nz r10, sp
- add.nz r10, r10, SZ_PT_REGS ; K mode SP
+ add2.nz r10, r10, SZ_PT_REGS/4 ; K mode SP
st r10, [sp, PT_sp] ; SP (pt_regs->sp)
-#ifdef CONFIG_ARC_CURR_IN_REG
- st r25, [sp, PT_user_r25]
- GET_CURR_TASK_ON_CPU r25
-#endif
-
#ifdef CONFIG_ARC_HAS_ACCL_REGS
ST2 r58, r59, PT_r58
#endif
/* clobbers r10, r11 registers pair */
DSP_SAVE_REGFILE_IRQ
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ GET_CURR_TASK_ON_CPU gp
+#endif
+
.endm
/*------------------------------------------------------------------------*/
.macro __RESTORE_REGFILE_SOFT
- LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
-
- ld r12, [sp, PT_r12]
+ ld fp, [sp, PT_fp]
ld r30, [sp, PT_r30]
+ ld r12, [sp, PT_r12]
+ ld r26, [sp, PT_r26]
; Restore SP (into AUX_USER_SP) only if returning to U mode
; - for K mode, it will be implicitly restored as stack is unwound
@@ -188,10 +202,6 @@
sr r10, [AUX_USER_SP]
1:
-#ifdef CONFIG_ARC_CURR_IN_REG
- ld r25, [sp, PT_user_r25]
-#endif
-
/* clobbers r10, r11 registers pair */
DSP_RESTORE_REGFILE_IRQ
@@ -249,7 +259,7 @@
btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP
- ld r10, [sp, PT_event + 4]
+ ld r10, [sp, PT_bta]
sr r10, [erbta]
LD2 r10, r11, PT_ret
@@ -264,8 +274,8 @@
.macro FAKE_RET_FROM_EXCPN
lr r9, [status32]
- bic r9, r9, STATUS_AE_MASK
- or r9, r9, STATUS_IE_MASK
+ bclr r9, r9, STATUS_AE_BIT
+ bset r9, r9, STATUS_IE_BIT
kflag r9
.endm
@@ -281,4 +291,36 @@
/* M = 8-1 N = 8 */
.endm
+.macro SAVE_ABI_CALLEE_REGS
+ push r13
+ push r14
+ push r15
+ push r16
+ push r17
+ push r18
+ push r19
+ push r20
+ push r21
+ push r22
+ push r23
+ push r24
+ push r25
+.endm
+
+.macro RESTORE_ABI_CALLEE_REGS
+ pop r25
+ pop r24
+ pop r23
+ pop r22
+ pop r21
+ pop r20
+ pop r19
+ pop r18
+ pop r17
+ pop r16
+ pop r15
+ pop r14
+ pop r13
+.endm
+
#endif
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 5aab4f93ab8a..00946fe04c9b 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -7,7 +7,7 @@
* Stack switching code can no longer reliably rely on the fact that
* if we are NOT in user mode, stack is switched to kernel mode.
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
- * it's prologue including stack switching from user mode
+ * its prologue including stack switching from user mode
*
* Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
@@ -21,7 +21,7 @@
* r25 contains the kernel current task ptr
* - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
* - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
- * address Write back load ld.ab instead of seperate ld/add instn
+ * address Write back load ld.ab instead of separate ld/add instn
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
@@ -33,6 +33,91 @@
#include <asm/irqflags-compact.h>
#include <asm/thread_info.h> /* For THREAD_SIZE */
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a reg1, [reg2, x] => Pre Incr
+ * Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab reg1, [reg2, x] => Post Incr
+ * Eff Addr for load = [reg2]
+ */
+
+.macro PUSHAX aux
+ lr r9, [\aux]
+ push r9
+.endm
+
+.macro POPAX aux
+ pop r9
+ sr r9, [\aux]
+.endm
+
+.macro SAVE_R0_TO_R12
+ push r0
+ push r1
+ push r2
+ push r3
+ push r4
+ push r5
+ push r6
+ push r7
+ push r8
+ push r9
+ push r10
+ push r11
+ push r12
+.endm
+
+.macro RESTORE_R12_TO_R0
+ pop r12
+ pop r11
+ pop r10
+ pop r9
+ pop r8
+ pop r7
+ pop r6
+ pop r5
+ pop r4
+ pop r3
+ pop r2
+ pop r1
+ pop r0
+.endm
+
+.macro SAVE_ABI_CALLEE_REGS
+ push r13
+ push r14
+ push r15
+ push r16
+ push r17
+ push r18
+ push r19
+ push r20
+ push r21
+ push r22
+ push r23
+ push r24
+ push r25
+.endm
+
+.macro RESTORE_ABI_CALLEE_REGS
+ pop r25
+ pop r24
+ pop r23
+ pop r22
+ pop r21
+ pop r20
+ pop r19
+ pop r18
+ pop r17
+ pop r16
+ pop r15
+ pop r14
+ pop r13
+.endm
+
/*--------------------------------------------------------------
* Switch to Kernel Mode stack if SP points to User Mode stack
*
@@ -58,7 +143,7 @@
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
* 3. But before it could switch SP from USER to KERNEL stack
* a L2 IRQ "Interrupts" L1
- * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * That way although L2 IRQ happened in Kernel mode, stack is still
* not switched.
* To handle this, we may need to switch stack even if in kernel mode
* provided SP has values in range of USER mode stack ( < 0x7000_0000 )
@@ -88,7 +173,7 @@
GET_CURR_TASK_ON_CPU r9
- /* With current tsk in r9, get it's kernel mode stack base */
+ /* With current tsk in r9, get its kernel mode stack base */
GET_TSK_STACK_BASE r9, r9
/* save U mode SP @ pt_regs->sp */
@@ -140,7 +225,7 @@
*
* After this it is safe to call the "C" handlers
*-------------------------------------------------------------*/
-.macro EXCEPTION_PROLOGUE
+.macro EXCEPTION_PROLOGUE_KEEP_AE
/* Need at least 1 reg to code the early exception prologue */
PROLOG_FREEUP_REG r9, @ex_saved_reg1
@@ -151,14 +236,6 @@
/* ARC700 doesn't provide auto-stack switching */
SWITCH_TO_KERNEL_STK
-#ifdef CONFIG_ARC_CURR_IN_REG
- /* Treat r25 as scratch reg (save on stack) and load with "current" */
- PUSH r25
- GET_CURR_TASK_ON_CPU r25
-#else
- sub sp, sp, 4
-#endif
-
st.a r0, [sp, -8] /* orig_r0 needed for syscall (skip ECR slot) */
sub sp, sp, 4 /* skip pt_regs->sp, already saved above */
@@ -178,7 +255,23 @@
PUSHAX erbta
lr r10, [ecr]
- st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */
+ st r10, [sp, PT_event]
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /* gp already saved on stack: now load with "current" */
+ GET_CURR_TASK_ON_CPU gp
+#endif
+ ; OUTPUT: r10 has ECR expected by EV_Trap
+.endm
+
+.macro EXCEPTION_PROLOGUE
+
+ EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
+
+ lr r0, [efa]
+ mov r1, sp
+
+ FAKE_RET_FROM_EXCPN ; clobbers r9
.endm
/*--------------------------------------------------------------
@@ -189,7 +282,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro EXCEPTION_EPILOGUE
@@ -208,11 +301,8 @@
POP gp
RESTORE_R12_TO_R0
-#ifdef CONFIG_ARC_CURR_IN_REG
- ld r25, [sp, 12]
-#endif
ld sp, [sp] /* restore original sp */
- /* orig_r0, ECR, user_r25 skipped automatically */
+ /* orig_r0, ECR skipped automatically */
.endm
/* Dummy ECR values for Interrupts */
@@ -229,15 +319,8 @@
SWITCH_TO_KERNEL_STK
-#ifdef CONFIG_ARC_CURR_IN_REG
- /* Treat r25 as scratch reg (save on stack) and load with "current" */
- PUSH r25
- GET_CURR_TASK_ON_CPU r25
-#else
- sub sp, sp, 4
-#endif
- PUSH 0x003\LVL\()abcd /* Dummy ECR */
+ st.a 0x003\LVL\()abcd, [sp, -4] /* Dummy ECR */
sub sp, sp, 8 /* skip orig_r0 (not needed)
skip pt_regs->sp, already saved above */
@@ -255,6 +338,10 @@
PUSHAX lp_start
PUSHAX bta_l\LVL\()
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /* gp already saved on stack: now load with "current" */
+ GET_CURR_TASK_ON_CPU gp
+#endif
.endm
/*--------------------------------------------------------------
@@ -263,7 +350,7 @@
* NOTE:
*
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
+ * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good.
*-------------------------------------------------------------*/
.macro INTERRUPT_EPILOGUE LVL
@@ -282,11 +369,7 @@
POP gp
RESTORE_R12_TO_R0
-#ifdef CONFIG_ARC_CURR_IN_REG
- ld r25, [sp, 12]
-#endif
- ld sp, [sp] /* restore original sp */
- /* orig_r0, ECR, user_r25 skipped automatically */
+ ld sp, [sp] /* restore original sp; orig_r0, ECR skipped implicitly */
.endm
/* Get thread_info of "current" tsk */
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index fcdd59d77f42..f453af251a1a 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -7,193 +7,45 @@
#ifndef __ASM_ARC_ENTRY_H
#define __ASM_ARC_ENTRY_H
-#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/unistd.h> /* For NR_syscalls definition */
#include <asm/arcregs.h>
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
#include <asm/mmu.h>
+#ifdef __ASSEMBLER__
+
#ifdef CONFIG_ISA_ARCOMPACT
#include <asm/entry-compact.h> /* ISA specific bits */
#else
#include <asm/entry-arcv2.h>
#endif
-/* Note on the LD/ST addr modes with addr reg wback
- *
- * LD.a same as LD.aw
- *
- * LD.a reg1, [reg2, x] => Pre Incr
- * Eff Addr for load = [reg2 + x]
- *
- * LD.ab reg1, [reg2, x] => Post Incr
- * Eff Addr for load = [reg2]
+/*
+ * save user mode callee regs as struct callee_regs
+ * - needed by fork/do_signal/unaligned-access-emulation.
*/
-
-.macro PUSH reg
- st.a \reg, [sp, -4]
-.endm
-
-.macro PUSHAX aux
- lr r9, [\aux]
- PUSH r9
-.endm
-
-.macro POP reg
- ld.ab \reg, [sp, 4]
-.endm
-
-.macro POPAX aux
- POP r9
- sr r9, [\aux]
-.endm
-
-/*--------------------------------------------------------------
- * Helpers to save/restore Scratch Regs:
- * used by Interrupt/Exception Prologue/Epilogue
- *-------------------------------------------------------------*/
-.macro SAVE_R0_TO_R12
- PUSH r0
- PUSH r1
- PUSH r2
- PUSH r3
- PUSH r4
- PUSH r5
- PUSH r6
- PUSH r7
- PUSH r8
- PUSH r9
- PUSH r10
- PUSH r11
- PUSH r12
-.endm
-
-.macro RESTORE_R12_TO_R0
- POP r12
- POP r11
- POP r10
- POP r9
- POP r8
- POP r7
- POP r6
- POP r5
- POP r4
- POP r3
- POP r2
- POP r1
- POP r0
-
-.endm
-
-/*--------------------------------------------------------------
- * Helpers to save/restore callee-saved regs:
- * used by several macros below
- *-------------------------------------------------------------*/
-.macro SAVE_R13_TO_R24
- PUSH r13
- PUSH r14
- PUSH r15
- PUSH r16
- PUSH r17
- PUSH r18
- PUSH r19
- PUSH r20
- PUSH r21
- PUSH r22
- PUSH r23
- PUSH r24
-.endm
-
-.macro RESTORE_R24_TO_R13
- POP r24
- POP r23
- POP r22
- POP r21
- POP r20
- POP r19
- POP r18
- POP r17
- POP r16
- POP r15
- POP r14
- POP r13
-.endm
-
-/*--------------------------------------------------------------
- * Collect User Mode callee regs as struct callee_regs - needed by
- * fork/do_signal/unaligned-access-emulation.
- * (By default only scratch regs are saved on entry to kernel)
- *
- * Special handling for r25 if used for caching Task Pointer.
- * It would have been saved in task->thread.user_r25 already, but to keep
- * the interface same it is copied into regular r25 placeholder in
- * struct callee_regs.
- *-------------------------------------------------------------*/
.macro SAVE_CALLEE_SAVED_USER
+ SAVE_ABI_CALLEE_REGS
+.endm
- mov r12, sp ; save SP as ref to pt_regs
- SAVE_R13_TO_R24
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- ; Retrieve orig r25 and save it with rest of callee_regs
- ld r12, [r12, PT_user_r25]
- PUSH r12
-#else
- PUSH r25
-#endif
-
+/*
+ * restore user mode callee regs as struct callee_regs
+ * - could have been changed by ptrace tracer or unaligned-access fixup
+ */
+.macro RESTORE_CALLEE_SAVED_USER
+ RESTORE_ABI_CALLEE_REGS
.endm
-/*--------------------------------------------------------------
- * Save kernel Mode callee regs at the time of Contect Switch.
- *
- * Special handling for r25 if used for caching Task Pointer.
- * Kernel simply skips saving it since it will be loaded with
- * incoming task pointer anyways
- *-------------------------------------------------------------*/
+/*
+ * save/restore kernel mode callee regs at the time of context switch
+ */
.macro SAVE_CALLEE_SAVED_KERNEL
-
- SAVE_R13_TO_R24
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- sub sp, sp, 4
-#else
- PUSH r25
-#endif
+ SAVE_ABI_CALLEE_REGS
.endm
-/*--------------------------------------------------------------
- * Opposite of SAVE_CALLEE_SAVED_KERNEL
- *-------------------------------------------------------------*/
.macro RESTORE_CALLEE_SAVED_KERNEL
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- add sp, sp, 4 /* skip usual r25 placeholder */
-#else
- POP r25
-#endif
- RESTORE_R24_TO_R13
-.endm
-
-/*--------------------------------------------------------------
- * Opposite of SAVE_CALLEE_SAVED_USER
- *
- * ptrace tracer or unaligned-access fixup might have changed a user mode
- * callee reg which is saved back to usual r25 storage location
- *-------------------------------------------------------------*/
-.macro RESTORE_CALLEE_SAVED_USER
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- POP r12
-#else
- POP r25
-#endif
- RESTORE_R24_TO_R13
-
- ; SP is back to start of pt_regs
-#ifdef CONFIG_ARC_CURR_IN_REG
- st r12, [sp, PT_user_r25]
-#endif
+ RESTORE_ABI_CALLEE_REGS
.endm
/*--------------------------------------------------------------
@@ -204,7 +56,7 @@
.endm
/*-------------------------------------------------------------
- * given a tsk struct, get to the base of it's kernel mode stack
+ * given a tsk struct, get to the base of its kernel mode stack
* tsk->thread_info is really a PAGE, whose bottom hoists stack
* which grows upwards towards thread_info
*------------------------------------------------------------*/
@@ -229,10 +81,10 @@
#ifdef CONFIG_SMP
-/*-------------------------------------------------
+/*
* Retrieve the current running task on this CPU
- * 1. Determine curr CPU id.
- * 2. Use it to index into _current_task[ ]
+ * - loads it from backing _current_task[] (and can't use the
+ * caching reg for current task
*/
.macro GET_CURR_TASK_ON_CPU reg
GET_CPU_ID \reg
@@ -254,7 +106,7 @@
add2 \tmp, @_current_task, \tmp
st \tsk, [\tmp]
#ifdef CONFIG_ARC_CURR_IN_REG
- mov r25, \tsk
+ mov gp, \tsk
#endif
.endm
@@ -269,21 +121,20 @@
.macro SET_CURR_TASK_ON_CPU tsk, tmp
st \tsk, [@_current_task]
#ifdef CONFIG_ARC_CURR_IN_REG
- mov r25, \tsk
+ mov gp, \tsk
#endif
.endm
#endif /* SMP / UNI */
-/* ------------------------------------------------------------------
+/*
* Get the ptr to some field of Current Task at @off in task struct
- * -Uses r25 for Current task ptr if that is enabled
+ * - Uses current task cached in reg if enabled
*/
-
#ifdef CONFIG_ARC_CURR_IN_REG
.macro GET_CURR_TASK_FIELD_PTR off, reg
- add \reg, r25, \off
+ add \reg, gp, \off
.endm
#else
@@ -295,4 +146,23 @@
#endif /* CONFIG_ARC_CURR_IN_REG */
+#else /* !__ASSEMBLER__ */
+
+extern void do_signal(struct pt_regs *);
+extern void do_notify_resume(struct pt_regs *);
+extern int do_privilege_fault(unsigned long, struct pt_regs *);
+extern int do_extension_fault(unsigned long, struct pt_regs *);
+extern int insterror_is_error(unsigned long, struct pt_regs *);
+extern int do_memory_error(unsigned long, struct pt_regs *);
+extern int trap_is_brkpt(unsigned long, struct pt_regs *);
+extern int do_misaligned_error(unsigned long, struct pt_regs *);
+extern int do_trap5_error(unsigned long, struct pt_regs *);
+extern int do_misaligned_access(unsigned long, struct pt_regs *, struct callee_regs *);
+extern void do_machine_check_fault(unsigned long, struct pt_regs *);
+extern void do_non_swi_trap(unsigned long, struct pt_regs *);
+extern void do_insterror_or_kprobe(unsigned long, struct pt_regs *);
+extern void do_page_fault(unsigned long, struct pt_regs *);
+
+#endif
+
#endif /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/fb.h b/arch/arc/include/asm/fb.h
deleted file mode 100644
index dc2e303cdbbb..000000000000
--- a/arch/arc/include/asm/fb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-
-#include <linux/fb.h>
-#include <linux/fs.h>
-#include <asm/page.h>
-
-static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
- unsigned long off)
-{
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-}
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 5001b796fb8d..7765dc105d54 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -10,6 +10,13 @@
#include <linux/types.h>
#include <asm-generic/pgtable-nopmd.h>
+/*
+ * Hugetlb definitions.
+ */
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte(pmd_val(pmd));
@@ -21,7 +28,7 @@ static inline pmd_t pte_pmd(pte_t pte)
}
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
-#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
@@ -33,8 +40,6 @@ static inline pmd_t pte_pmd(pte_t pte)
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
-#define mk_pmd(page, prot) pte_pmd(mk_pte(page, prot))
-
#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 8f777d6441a5..00171a212b3c 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/page.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#ifdef CONFIG_ISA_ARCV2
#include <asm/barrier.h>
@@ -21,8 +21,9 @@
#endif
extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
-extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
- unsigned long flags);
+#define ioremap ioremap
+#define ioremap_prot ioremap_prot
+#define iounmap iounmap
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
return (void __iomem *)port;
@@ -32,8 +33,6 @@ static inline void ioport_unmap(void __iomem *addr)
{
}
-extern void iounmap(const void __iomem *addr);
-
/*
* io{read,write}{16,32}be() macros
*/
@@ -43,9 +42,6 @@ extern void iounmap(const void __iomem *addr);
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
-/* Change struct page to physical address */
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 0309cb405cfb..9cd79263acba 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -10,7 +10,7 @@
* ARCv2 can support 240 interrupts in the core interrupts controllers and
* 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most
* configurations of boards.
- * This doesnt affect ARCompact, but we change it to same value
+ * This doesn't affect ARCompact, but we change it to same value
*/
#define NR_IRQS 512
@@ -25,5 +25,6 @@
#include <asm-generic/irq.h>
extern void arc_init_IRQ(void);
+extern void arch_do_IRQ(unsigned int, struct pt_regs *);
#endif
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index fb3c21f1a238..30aea562f8aa 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -50,7 +50,7 @@
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | __AD_ENB | \
(ARCV2_IRQ_DEF_PRIO << 1))
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Save IRQ state and disable IRQs
@@ -170,6 +170,6 @@ static inline void arc_softirq_clear(int irq)
seti
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index 0d63e568d64c..85c2f6bcde0c 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -40,13 +40,13 @@
#define ISA_INIT_STATUS_BITS STATUS_IE_MASK
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/******************************************************************
* IRQ Control Macros
*
* All of them have "memory" clobber (compiler barrier) which is needed to
- * ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
+ * ensure that LD/ST requiring irq safety (R-M-W when LLSC is not available)
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
*
* Noted at the time of Abilis Timer List corruption
@@ -196,6 +196,6 @@ static inline int arch_irqs_disabled(void)
flag \scratch
.endm
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/jump_label.h b/arch/arc/include/asm/jump_label.h
index 9d9618079739..66ead75784d9 100644
--- a/arch/arc/include/asm/jump_label.h
+++ b/arch/arc/include/asm/jump_label.h
@@ -2,7 +2,7 @@
#ifndef _ASM_ARC_JUMP_LABEL_H
#define _ASM_ARC_JUMP_LABEL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/stringify.h>
#include <linux/types.h>
@@ -31,7 +31,7 @@
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
- asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
+ asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
"1: \n"
"nop \n"
".pushsection __jump_table, \"aw\" \n"
@@ -47,7 +47,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key,
bool branch)
{
- asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
+ asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
"1: \n"
"b %l[l_yes] \n"
".pushsection __jump_table, \"aw\" \n"
@@ -68,5 +68,5 @@ struct jump_entry {
jump_label_t key;
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index de1566e32cb8..68e8301c0df2 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -32,9 +32,6 @@ struct kprobe;
void arch_remove_kprobe(struct kprobe *p);
-int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
-
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index c9434ff3aa4c..ba3cb65b5eaa 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -8,7 +8,11 @@
#include <asm/dwarf.h>
-#ifdef __ASSEMBLY__
+#define ASM_NL ` /* use '`' to mark new line in macro */
+#define __ALIGN .align 4
+#define __ALIGN_STR __stringify(__ALIGN)
+
+#ifdef __ASSEMBLER__
.macro ST2 e, o, off
#ifdef CONFIG_ARC_HAS_LL64
@@ -28,10 +32,6 @@
#endif
.endm
-#define ASM_NL ` /* use '`' to mark new line in macro */
-#define __ALIGN .align 4
-#define __ALIGN_STR __stringify(__ALIGN)
-
/* annotation for data we want in DCCM - if enabled in .config */
.macro ARCFP_DATA nm
#ifdef CONFIG_ARC_HAS_DCCM
@@ -61,7 +61,7 @@
CFI_ENDPROC ASM_NL \
.size name, .-name
-#else /* !__ASSEMBLY__ */
+#else /* !__ASSEMBLER__ */
#ifdef CONFIG_ARC_HAS_ICCM
#define __arcfp_code __section(".text.arcfp")
@@ -75,6 +75,6 @@
#define __arcfp_data __section(".data")
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/mmu-arcv2.h b/arch/arc/include/asm/mmu-arcv2.h
index ed9036d4ede3..5e5482026ac9 100644
--- a/arch/arc/include/asm/mmu-arcv2.h
+++ b/arch/arc/include/asm/mmu-arcv2.h
@@ -9,6 +9,8 @@
#ifndef _ASM_ARC_MMU_ARCV2_H
#define _ASM_ARC_MMU_ARCV2_H
+#include <soc/arc/arc_aux.h>
+
/*
* TLB Management regs
*/
@@ -67,7 +69,7 @@
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct mm_struct;
extern int pae40_exist_but_not_enab(void);
@@ -98,6 +100,6 @@ static inline void mmu_setup_pgd(struct mm_struct *mm, void *pgd)
sr \reg, [ARC_REG_PID]
.endm
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index ca427c30f70e..e3b35ceab582 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -6,7 +6,7 @@
#ifndef _ASM_ARC_MMU_H
#define _ASM_ARC_MMU_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/threads.h> /* NR_CPUS */
@@ -14,6 +14,9 @@ typedef struct {
unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
+struct pt_regs;
+extern void do_tlb_overlap_fault(unsigned long, unsigned long, struct pt_regs *);
+
#endif
#include <asm/mmu-arcv2.h>
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index dda471f5f05b..9963bb1a5733 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -165,7 +165,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
* mmput => .. => __mmdrop( ) => destroy_context( )
- * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * there is a good chance that task gets sched-out/in, making its ASID valid
* again (this teased me for a whole day).
*/
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 9a62e1d87967..9720fe6b2c24 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -19,7 +19,7 @@
#endif /* CONFIG_ARC_HAS_PAE40 */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
@@ -85,15 +85,6 @@ typedef struct {
typedef struct page *pgtable_t;
/*
- * Use virt_to_pfn with caution:
- * If used in pte or paddr related macros, it could cause truncation
- * in PAE40 builds
- * As a rule of thumb, only use it in helpers starting with virt_
- * You have been warned !
- */
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-
-/*
* When HIGHMEM is enabled we have holes in the memory map so we need
* pfn_valid() that takes into account the actual extents of the physical
* memory
@@ -108,8 +99,7 @@ extern int pfn_valid(unsigned long pfn);
#else /* CONFIG_HIGHMEM */
-#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_RAM_BASE)
-#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+#define ARCH_PFN_OFFSET virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE)
#endif /* CONFIG_HIGHMEM */
@@ -123,6 +113,18 @@ extern int pfn_valid(unsigned long pfn);
#define __pa(vaddr) ((unsigned long)(vaddr))
#define __va(paddr) ((void *)((unsigned long)(paddr)))
+/*
+ * Use virt_to_pfn with caution:
+ * If used in pte or paddr related macros, it could cause truncation
+ * in PAE40 builds
+ * As a rule of thumb, only use it in helpers starting with virt_
+ * You have been warned !
+ */
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
+
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
@@ -134,6 +136,6 @@ extern int pfn_valid(unsigned long pfn);
#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
#include <asm-generic/getorder.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 096b8ef58edb..dfae070fe8d5 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -53,19 +53,14 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
+ pgd_t *ret = __pgd_alloc(mm, 0);
if (ret) {
int num, num2;
- num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
- memzero(ret, num * sizeof(pgd_t));
+ num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
num2 = VMALLOC_SIZE / PGDIR_SIZE;
memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
-
- memzero(ret + num + num2,
- (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
-
}
return ret;
}
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
index 183d23bc1e00..4630c5acca05 100644
--- a/arch/arc/include/asm/pgtable-bits-arcv2.h
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -26,6 +26,9 @@
#define _PAGE_GLOBAL (1 << 8) /* ASID agnostic (H) */
#define _PAGE_PRESENT (1 << 9) /* PTE/TLB Valid (H) */
+/* We borrow bit 5 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_DIRTY
+
#ifdef CONFIG_ARC_MMU_V4
#define _PAGE_HW_SZ (1 << 10) /* Normal/super (H) */
#else
@@ -63,7 +66,7 @@
* Other rules which cause the divergence from 1:1 mapping
*
* 1. Although ARC700 can do exclusive execute/write protection (meaning R
- * can be tracked independet of X/W unlike some other CPUs), still to
+ * can be tracked independently of X/W unlike some other CPUs), still to
* keep things consistent with other archs:
* -Write implies Read: W => R
* -Execute implies Read: X => R
@@ -72,25 +75,7 @@
* This is to enable COW mechanism
*/
/* xwr */
-#define __P000 PAGE_U_NONE
-#define __P001 PAGE_U_R
-#define __P010 PAGE_U_R /* Pvt-W => !W */
-#define __P011 PAGE_U_R /* Pvt-W => !W */
-#define __P100 PAGE_U_X_R /* X => R */
-#define __P101 PAGE_U_X_R
-#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
-#define __P111 PAGE_U_X_R /* Pvt-W => !W */
-
-#define __S000 PAGE_U_NONE
-#define __S001 PAGE_U_R
-#define __S010 PAGE_U_W_R /* W => R */
-#define __S011 PAGE_U_W_R
-#define __S100 PAGE_U_X_R /* X => R */
-#define __S101 PAGE_U_X_R
-#define __S110 PAGE_U_X_W_R /* X => R */
-#define __S111 PAGE_U_X_W_R
-
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
@@ -102,7 +87,7 @@
PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
-PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite_novma, |= (_PAGE_WRITE));
PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
@@ -115,18 +100,25 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
}
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- set_pte(ptep, pteval);
-}
+struct vm_fault;
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, unsigned int nr);
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep);
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
-/* Encode swap {type,off} tuple into PTE
- * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
- * PAGE_PRESENT is zero in a PTE holding swap "identifier"
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTEs:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <-------------- offset -------------> <--- zero --> E < type ->
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * The zero'ed bits include _PAGE_PRESENT.
*/
#define __swp_entry(type, off) ((swp_entry_t) \
{ ((type) & 0x1f) | ((off) << 13) })
@@ -138,12 +130,18 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define kern_addr_valid(addr) (1)
+static inline bool pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+PTE_BIT_FUNC(swp_mkexclusive, |= (_PAGE_SWP_EXCLUSIVE));
+PTE_BIT_FUNC(swp_clear_exclusive, &= ~(_PAGE_SWP_EXCLUSIVE));
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#include <asm/hugepage.h>
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h
index 64ca25d199be..c8f9273372c0 100644
--- a/arch/arc/include/asm/pgtable-levels.h
+++ b/arch/arc/include/asm/pgtable-levels.h
@@ -85,7 +85,7 @@
#define PTRS_PER_PTE BIT(PMD_SHIFT - PAGE_SHIFT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#if CONFIG_PGTABLE_LEVELS > 3
#include <asm-generic/pgtable-nop4d.h>
@@ -142,7 +142,6 @@
#define pmd_pfn(pmd) ((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#endif
@@ -159,9 +158,9 @@
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
#define pmd_pfn(pmd) ((pmd_val(pmd) & PAGE_MASK) >> PAGE_SHIFT)
-#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
+#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
#define set_pmd(pmdp, pmd) (*(pmdp) = pmd)
-#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
+#define pmd_pgtable(pmd) ((pgtable_t) pmd_page(pmd))
/*
* 4th level paging: pte
@@ -169,6 +168,7 @@
#define pte_ERROR(e) \
pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define PFN_PTE_SHIFT PAGE_SHIFT
#define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm,addr,ptep) set_pte_at(mm, addr, ptep, __pte(0))
@@ -176,12 +176,11 @@
#define set_pte(ptep, pte) ((*(ptep)) = (pte))
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
-#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
#ifdef CONFIG_ISA_ARCV2
#define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ)
#endif
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 4cf45a99fd79..bd580e2b62d7 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -19,7 +19,7 @@
*/
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern char empty_zero_page[PAGE_SIZE];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
@@ -29,6 +29,6 @@ extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
/* to cope with aliasing VIPT cache */
#define HAVE_ARCH_UNMAPPED_AREA
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 54db9d7bb562..7f7901ac6643 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -11,7 +11,7 @@
#ifndef __ASM_ARC_PROCESSOR_H
#define __ASM_ARC_PROCESSOR_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/ptrace.h>
#include <asm/dsp.h>
@@ -22,7 +22,6 @@
* struct thread_info
*/
struct thread_struct {
- unsigned long ksp; /* kernel mode stack pointer */
unsigned long callee_reg; /* pointer to callee regs */
unsigned long fault_address; /* dbls as brkpt holder as well */
#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
@@ -33,9 +32,7 @@ struct thread_struct {
#endif
};
-#define INIT_THREAD { \
- .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
-}
+#define INIT_THREAD { }
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -43,9 +40,6 @@ struct task_struct;
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
-/* Free all resources held by a thread */
-#define release_thread(thread) do { } while (0)
-
/*
* A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
* get optimised away by gcc
@@ -59,7 +53,7 @@ struct task_struct;
* Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
-#define TSK_K_ESP(tsk) (tsk->thread.ksp)
+#define TSK_K_ESP(tsk) (task_thread_info(tsk)->ksp)
#define TSK_K_REG(tsk, off) (*((unsigned long *)(TSK_K_ESP(tsk) + \
sizeof(struct callee_regs) + off)))
@@ -72,7 +66,7 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc,
extern unsigned int __get_wchan(struct task_struct *p);
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/*
* Default System Memory Map on ARC
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 5869a74c0db2..f6c052af8f4d 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -10,7 +10,18 @@
#include <uapi/asm/ptrace.h>
#include <linux/compiler.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
+
+typedef union {
+ struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned long state:8, vec:8, cause:8, param:8;
+#else
+ unsigned long param:8, cause:8, vec:8, state:8;
+#endif
+ };
+ unsigned long full;
+} ecr_reg;
/* THE pt_regs: Defines how regs are saved during entry into kernel */
@@ -40,23 +51,14 @@ struct pt_regs {
* Last word used by Linux for extra state mgmt (syscall-restart)
* For interrupts, use artificial ECR values to note current prio-level
*/
- union {
- struct {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned long state:8, ecr_vec:8,
- ecr_cause:8, ecr_param:8;
-#else
- unsigned long ecr_param:8, ecr_cause:8,
- ecr_vec:8, state:8;
-#endif
- };
- unsigned long event;
- };
+ ecr_reg ecr;
+};
- unsigned long user_r25;
+struct callee_regs {
+ unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
-#define MAX_REG_OFFSET offsetof(struct pt_regs, user_r25)
+#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
#else
@@ -64,28 +66,14 @@ struct pt_regs {
unsigned long orig_r0;
- union {
- struct {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned long state:8, ecr_vec:8,
- ecr_cause:8, ecr_param:8;
-#else
- unsigned long ecr_param:8, ecr_cause:8,
- ecr_vec:8, state:8;
-#endif
- };
- unsigned long event;
- };
-
- unsigned long bta; /* bta_l1, bta_l2, erbta */
+ ecr_reg ecr; /* Exception Cause Reg */
- unsigned long user_r25;
+ unsigned long bta; /* erbta */
- unsigned long r26; /* gp */
unsigned long fp;
- unsigned long sp; /* user/kernel sp depending on where we came from */
-
- unsigned long r12, r30;
+ unsigned long r30;
+ unsigned long r12;
+ unsigned long r26; /* gp */
#ifdef CONFIG_ARC_HAS_ACCL_REGS
unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */
@@ -94,6 +82,8 @@ struct pt_regs {
unsigned long DSP_CTRL;
#endif
+ unsigned long sp; /* user/kernel sp depending on entry */
+
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
@@ -106,16 +96,14 @@ struct pt_regs {
unsigned long status32;
};
-#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
-
-#endif
-
-/* Callee saved registers - need to be saved only when you are scheduled out */
-
struct callee_regs {
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
+#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
+
+#endif
+
#define instruction_pointer(regs) ((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs)
@@ -134,13 +122,13 @@ struct callee_regs {
/* return 1 if PC in delay slot */
#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
-#define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param)
-#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param)
+#define in_syscall(regs) ((regs->ecr.vec == ECR_V_TRAP) && !regs->ecr.param)
+#define in_brkpt_trap(regs) ((regs->ecr.vec == ECR_V_TRAP) && regs->ecr.param)
#define STATE_SCALL_RESTARTED 0x01
-#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED)
-#define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED)
+#define syscall_wont_restart(regs) (regs->ecr.state |= STATE_SCALL_RESTARTED)
+#define syscall_restartable(regs) !(regs->ecr.state & STATE_SCALL_RESTARTED)
#define current_pt_regs() \
({ \
@@ -181,6 +169,9 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
-#endif /* !__ASSEMBLY__ */
+extern int syscall_trace_enter(struct pt_regs *);
+extern void syscall_trace_exit(struct pt_regs *);
+
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 028a8cf76206..1c6db599e1fc 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -35,11 +35,11 @@ long __init arc_get_mem_sz(void);
#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
extern void arc_mmu_init(void);
-extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
-extern void read_decode_mmu_bcr(void);
+extern int arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
extern void arc_cache_init(void);
-extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
-extern void read_decode_cache_bcr(void);
+extern int arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+
+extern void __init handle_uboot_args(void);
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
index 8b0251464ffd..719112af0f41 100644
--- a/arch/arc/include/asm/shmparam.h
+++ b/arch/arc/include/asm/shmparam.h
@@ -6,7 +6,7 @@
#ifndef __ARC_ASM_SHMPARAM_H
#define __ARC_ASM_SHMPARAM_H
-/* Handle upto 2 cache bins */
+/* Handle up to 2 cache bins */
#define SHMLBA (2 * PAGE_SIZE)
/* Enforce SHMLBA in shmat */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index d856491606ac..990f834909f0 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -29,6 +29,8 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void __init smp_init_cpus(void);
extern void first_lines_of_secondary(void);
extern const char *arc_platform_smp_cpuinfo(void);
+extern void arc_platform_smp_wait_to_boot(int);
+extern void start_kernel_secondary(void);
/*
* API expected BY platform smp code (FROM arch smp code)
@@ -75,7 +77,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
/*
* ARC700 doesn't support atomic Read-Modify-Write ops.
- * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * Originally Interrupts had to be disabled around code to guarantee atomicity.
* The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
* based on retry-if-irq-in-atomic (with hardware assist).
* However despite these, we provide the IRQ disabling variant
@@ -84,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* support needed.
*
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
- * gaurantted by the platform (not something which core handles).
+ * guaranteed by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
*
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
index 1f85de8288b1..5806106a65f9 100644
--- a/arch/arc/include/asm/switch_to.h
+++ b/arch/arc/include/asm/switch_to.h
@@ -6,7 +6,7 @@
#ifndef _ASM_ARC_SWITCH_TO_H
#define _ASM_ARC_SWITCH_TO_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/sched.h>
#include <asm/dsp-impl.h>
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 9709256e31c8..728d625a10f1 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -24,6 +24,17 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
}
static inline void
+syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
+{
+ /*
+ * Unlike syscall_get_nr(), syscall_set_nr() can be called only when
+ * the target task is stopped for tracing on entering syscall, so
+ * there is no need to have the same check syscall_get_nr() has.
+ */
+ regs->r8 = nr;
+}
+
+static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
regs->r0 = regs->orig_r0;
@@ -67,6 +78,20 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
}
}
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *args)
+{
+ unsigned long *inside_ptregs = &regs->r0;
+ unsigned int n = 6;
+ unsigned int i = 0;
+
+ while (n--) {
+ *inside_ptregs = args[i++];
+ inside_ptregs--;
+ }
+}
+
static inline int
syscall_get_arch(struct task_struct *task)
{
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 6ba7fe417095..255d2c774219 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -24,7 +24,7 @@
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_SHIFT (PAGE_SHIFT << THREAD_SIZE_ORDER)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/thread_info.h>
@@ -37,16 +37,16 @@
*/
struct thread_info {
unsigned long flags; /* low level flags */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
- struct task_struct *task; /* main task structure */
- __u32 cpu; /* current CPU */
+ unsigned long ksp; /* kernel mode stack top in __switch_to */
+ int preempt_count; /* 0 => preemptible, <0 => BUG */
+ int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
+ struct task_struct *task; /* main task structure */
};
/*
- * macros/functions for gaining access to the thread information structure
- *
- * preempt_count needs to be 1 initially, until the scheduler is functional.
+ * initilaize thread_info for any @tsk
+ * - this is not related to init_task per se
*/
#define INIT_THREAD_INFO(tsk) \
{ \
@@ -62,7 +62,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/*
* thread information flags
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 99712471c96a..1e8809ea000a 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -146,8 +146,9 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
if (n == 0)
return 0;
- /* unaligned */
- if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+ /* fallback for unaligned access when hardware doesn't support */
+ if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
+ (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
unsigned char tmp;
@@ -373,8 +374,9 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
if (n == 0)
return 0;
- /* unaligned */
- if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+ /* fallback for unaligned access when hardware doesn't support */
+ if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
+ (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
unsigned char tmp;
@@ -584,7 +586,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
return res;
}
-static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+static inline unsigned long __clear_user(void __user *to, unsigned long n)
{
long res = n;
unsigned char *d_char = to;
@@ -626,17 +628,10 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
return res;
}
-#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
-
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
-#define __clear_user(d, n) __arc_clear_user(d, n)
-#else
-extern unsigned long arc_clear_user_noinline(void __user *to,
- unsigned long n);
-#define __clear_user(d, n) arc_clear_user_noinline(d, n)
-#endif
+#define __clear_user __clear_user
#include <asm-generic/uaccess.h>
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
deleted file mode 100644
index cf5a02382e0e..000000000000
--- a/arch/arc/include/asm/unaligned.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- */
-
-#ifndef _ASM_ARC_UNALIGNED_H
-#define _ASM_ARC_UNALIGNED_H
-
-/* ARC700 can't handle unaligned Data accesses. */
-
-#include <asm-generic/unaligned.h>
-#include <asm/ptrace.h>
-
-#ifdef CONFIG_ARC_EMUL_UNALIGNED
-int misaligned_fixup(unsigned long address, struct pt_regs *regs,
- struct callee_regs *cregs);
-#else
-static inline int
-misaligned_fixup(unsigned long address, struct pt_regs *regs,
- struct callee_regs *cregs)
-{
- /* Not fixed */
- return 1;
-}
-#endif
-
-#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unistd.h b/arch/arc/include/asm/unistd.h
new file mode 100644
index 000000000000..211c230d88d6
--- /dev/null
+++ b/arch/arc/include/asm/unistd.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_ARC_UNISTD_H
+#define _ASM_ARC_UNISTD_H
+
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#define NR_syscalls __NR_syscalls
+
+#endif
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index e78470141932..2501e82a1a0a 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += unistd_32.h
+
generic-y += ucontext.h
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
index 2a4ad619abfb..4606a326af5c 100644
--- a/arch/arc/include/uapi/asm/page.h
+++ b/arch/arc/include/uapi/asm/page.h
@@ -13,10 +13,8 @@
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
-#if defined(CONFIG_ARC_PAGE_SIZE_16K)
-#define PAGE_SHIFT 14
-#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
-#define PAGE_SHIFT 12
+#ifdef __KERNEL__
+#include <vdso/page.h>
#else
/*
* Default 8k
@@ -26,11 +24,10 @@
* not available
*/
#define PAGE_SHIFT 13
+#define PAGE_SIZE _BITUL(PAGE_SHIFT) /* Default 8K */
+#define PAGE_MASK (~(PAGE_SIZE-1))
#endif
-#define PAGE_SIZE _BITUL(PAGE_SHIFT) /* Default 8K */
#define PAGE_OFFSET _AC(0x80000000, UL) /* Kernel starts at 2G onwrds */
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 2a6eff57f6dd..3ae832db278c 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -14,7 +14,7 @@
#define PTRACE_GET_THREAD_AREA 25
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Userspace ABI: Register state needed by
* -ptrace (gdbserver)
@@ -53,6 +53,6 @@ struct user_regs_arcv2 {
unsigned long r30, r58, r59;
};
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 02109cd48ee1..8d1f1ef44ba7 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -62,7 +62,7 @@
* 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
*
* Joern suggested a better "C" algorithm which is great since
- * (1) It is portable to any architecure
+ * (1) It is portable to any architecture
* (2) At the same time it takes advantage of ARC ISA (rotate intrns)
*/
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index fa2713ae6bea..cb2905c7c5da 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -7,46 +7,4 @@
* published by the Free Software Foundation.
*/
-/******** no-legacy-syscalls-ABI *******/
-
-/*
- * Non-typical guard macro to enable inclusion twice in ARCH sys.c
- * That is how the Generic syscall wrapper generator works
- */
-#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
-#define _UAPI_ASM_ARC_UNISTD_H
-
-#define __ARCH_WANT_RENAMEAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SET_GET_RLIMIT
-#define __ARCH_WANT_SYS_EXECVE
-#define __ARCH_WANT_SYS_CLONE
-#define __ARCH_WANT_SYS_CLONE3
-#define __ARCH_WANT_SYS_VFORK
-#define __ARCH_WANT_SYS_FORK
-#define __ARCH_WANT_TIME32_SYSCALLS
-
-#define sys_mmap2 sys_mmap_pgoff
-
-#include <asm-generic/unistd.h>
-
-#define NR_syscalls __NR_syscalls
-
-/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
-#define __NR_sysfs (__NR_arch_specific_syscall + 3)
-
-/* ARC specific syscall */
-#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
-#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
-#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
-#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
-
-__SYSCALL(__NR_cacheflush, sys_cacheflush)
-__SYSCALL(__NR_arc_settls, sys_arc_settls)
-__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
-__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
-__SYSCALL(__NR_sysfs, sys_sysfs)
-
-#undef __SYSCALL
-
-#endif
+#include <asm/unistd_32.h>