aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/atomic.h83
-rw-r--r--arch/arc/include/asm/barrier.h12
-rw-r--r--arch/arc/include/asm/bitops.h60
-rw-r--r--arch/arc/include/asm/clk.h22
-rw-r--r--arch/arc/include/asm/cmpxchg.h76
-rw-r--r--arch/arc/include/asm/entry-compact.h6
-rw-r--r--arch/arc/include/asm/irq.h13
-rw-r--r--arch/arc/include/asm/page.h4
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/include/asm/processor.h51
-rw-r--r--arch/arc/include/asm/setup.h4
-rw-r--r--arch/arc/include/asm/spinlock.h14
-rw-r--r--arch/arc/include/uapi/asm/byteorder.h2
13 files changed, 286 insertions, 63 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 7730d302cadb..5f3dcbbc0cc9 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -17,6 +17,8 @@
#include <asm/barrier.h>
#include <asm/smp.h>
+#ifndef CONFIG_ARC_PLAT_EZNPS
+
#define atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
@@ -180,13 +182,88 @@ ATOMIC_OP(andnot, &= ~, bic)
ATOMIC_OP(or, |=, or)
ATOMIC_OP(xor, ^=, xor)
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+static inline int atomic_read(const atomic_t *v)
+{
+ int temp;
+
+ __asm__ __volatile__(
+ " ld.di %0, [%1]"
+ : "=r"(temp)
+ : "r"(&v->counter)
+ : "memory");
+ return temp;
+}
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ __asm__ __volatile__(
+ " st.di %0,[%1]"
+ :
+ : "r"(i), "r"(&v->counter)
+ : "memory");
+}
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ : \
+ : "r"(i), "r"(&v->counter), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned int temp = i; \
+ \
+ /* Explicit full memory barrier needed before/after */ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ " mov %0, r2" \
+ : "+r"(temp) \
+ : "r"(&v->counter), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+ \
+ smp_mb(); \
+ \
+ temp c_op i; \
+ \
+ return temp; \
+}
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
+#define atomic_sub(i, v) atomic_add(-(i), (v))
+#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
+
+ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
+#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
+ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
+ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
index a7209983ee64..b1e327495c7d 100644
--- a/arch/arc/include/asm/barrier.h
+++ b/arch/arc/include/asm/barrier.h
@@ -30,9 +30,7 @@
#define rmb() asm volatile("dmb 1\n" : : : "memory")
#define wmb() asm volatile("dmb 2\n" : : : "memory")
-#endif
-
-#ifdef CONFIG_ISA_ARCOMPACT
+#elif !defined(CONFIG_ARC_PLAT_EZNPS) /* CONFIG_ISA_ARCOMPACT */
/*
* ARCompact based cores (ARC700) only have SYNC instruction which is super
@@ -41,6 +39,14 @@
*/
#define mb() asm volatile("sync\n" : : : "memory")
+
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+#include <plat/ctop.h>
+
+#define mb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
+#define rmb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RD) : "memory")
+
#endif
#include <asm-generic/barrier.h>
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 0352fb8d21b9..8da87feec59a 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -22,7 +22,7 @@
#include <asm/smp.h>
#endif
-#if defined(CONFIG_ARC_HAS_LLSC)
+#ifdef CONFIG_ARC_HAS_LLSC
/*
* Hardware assisted Atomic-R-M-W
@@ -88,7 +88,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
return (old & (1 << nr)) != 0; \
}
-#else /* !CONFIG_ARC_HAS_LLSC */
+#elif !defined(CONFIG_ARC_PLAT_EZNPS)
/*
* Non hardware assisted Atomic-R-M-W
@@ -139,7 +139,55 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
return (old & (1UL << (nr & 0x1f))) != 0; \
}
-#endif /* CONFIG_ARC_HAS_LLSC */
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+#define BIT_OP(op, c_op, asm_op) \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ m += nr >> 5; \
+ \
+ nr = (1UL << (nr & 0x1f)); \
+ if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \
+ nr = ~nr; \
+ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ : \
+ : "r"(nr), "r"(m), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+}
+
+#define TEST_N_BIT_OP(op, c_op, asm_op) \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ unsigned long old; \
+ \
+ m += nr >> 5; \
+ \
+ nr = old = (1UL << (nr & 0x1f)); \
+ if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \
+ old = ~old; \
+ \
+ /* Explicit full memory barrier needed before/after */ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ " mov %0, r2" \
+ : "+r"(old) \
+ : "r"(m), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+ \
+ smp_mb(); \
+ \
+ return (old & nr) != 0; \
+}
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
/***************************************
* Non atomic variants
@@ -181,9 +229,15 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long
/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
__TEST_N_BIT_OP(op, c_op, asm_op)
+#ifndef CONFIG_ARC_PLAT_EZNPS
BIT_OPS(set, |, bset)
BIT_OPS(clear, & ~, bclr)
BIT_OPS(change, ^, bxor)
+#else
+BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3)
+BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3)
+BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3)
+#endif
/*
* This routine doesn't need to be atomic.
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
deleted file mode 100644
index bf9d29f5bd53..000000000000
--- a/arch/arc/include/asm/clk.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARC_CLK_H
-#define _ASM_ARC_CLK_H
-
-/* Although we can't really hide core_freq, the accessor is still better way */
-extern unsigned long core_freq;
-
-static inline unsigned long arc_get_core_freq(void)
-{
- return core_freq;
-}
-
-extern int arc_set_core_freq(unsigned long);
-
-#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index a444be67cd53..d819de1c5d10 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -44,7 +44,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
return prev;
}
-#else
+#elif !defined(CONFIG_ARC_PLAT_EZNPS)
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
@@ -64,23 +64,48 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
return prev;
}
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ /*
+ * Explicit full memory barrier needed before/after
+ */
+ smp_mb();
+
+ write_aux_reg(CTOP_AUX_GPA1, expected);
+
+ __asm__ __volatile__(
+ " mov r2, %0\n"
+ " mov r3, %1\n"
+ " .word %2\n"
+ " mov %0, r2"
+ : "+r"(new)
+ : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3)
+ : "r2", "r3", "memory");
+
+ smp_mb();
+
+ return new;
+}
+
#endif /* CONFIG_ARC_HAS_LLSC */
#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), (unsigned long)(n)))
/*
- * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
- * just to gaurantee semantics.
- * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
- * which also happens to be atomic_ops_lock.
- *
- * Thus despite semantically being different, implementation of atomic_cmpxchg()
- * is same as cmpxchg().
+ * atomic_cmpxchg is same as cmpxchg
+ * LLSC: only different in data-type, semantics are exactly same
+ * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
+ * semantics, and this lock also happens to be used by atomic_*()
*/
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#ifndef CONFIG_ARC_PLAT_EZNPS
+
/*
* xchg (reg with memory) based on "Native atomic" EX insn
*/
@@ -143,6 +168,41 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
#endif
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+ int size)
+{
+ extern unsigned long __xchg_bad_pointer(void);
+
+ switch (size) {
+ case 4:
+ /*
+ * Explicit full memory barrier needed before/after
+ */
+ smp_mb();
+
+ __asm__ __volatile__(
+ " mov r2, %0\n"
+ " mov r3, %1\n"
+ " .word %2\n"
+ " mov %0, r2\n"
+ : "+r"(val)
+ : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3)
+ : "r2", "r3", "memory");
+
+ smp_mb();
+
+ return val;
+ }
+ return __xchg_bad_pointer();
+}
+
+#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+ sizeof(*(ptr))))
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
+
/*
* "atomic" variant of xchg()
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 1d8f57cd6057..e0e1faf03c50 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -36,6 +36,10 @@
#include <asm/irqflags-compact.h>
#include <asm/thread_info.h> /* For THREAD_SIZE */
+#ifdef CONFIG_ARC_PLAT_EZNPS
+#include <plat/ctop.h>
+#endif
+
/*--------------------------------------------------------------
* Switch to Kernel Mode stack if SP points to User Mode stack
*
@@ -296,11 +300,13 @@
bic \reg, sp, (THREAD_SIZE - 1)
.endm
+#ifndef CONFIG_ARC_PLAT_EZNPS
/* Get CPU-ID of this core */
.macro GET_CPU_ID reg
lr \reg, [identity]
lsr \reg, \reg, 8
bmsk \reg, \reg, 7
.endm
+#endif
#endif /* __ASM_ARC_ENTRY_COMPACT_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 49014f0ef36d..c0fa0d2de400 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -13,21 +13,14 @@
#define NR_IRQS 128 /* allow some CPU external IRQ handling */
/* Platform Independent IRQs */
-#ifdef CONFIG_ISA_ARCOMPACT
-#define TIMER0_IRQ 3
-#define TIMER1_IRQ 4
-#else
-#define TIMER0_IRQ 16
-#define TIMER1_IRQ 17
+#ifdef CONFIG_ISA_ARCV2
+#define IPI_IRQ 19
+#define SOFTIRQ_IRQ 21
#endif
#include <linux/interrupt.h>
#include <asm-generic/irq.h>
extern void arc_init_IRQ(void);
-void arc_local_timer_setup(void);
-void arc_request_percpu_irq(int irq, int cpu,
- irqreturn_t (*isr)(int irq, void *dev),
- const char *irq_nm, void *percpu_dev);
#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 0d53854884d0..296c3426a6ad 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -31,7 +31,11 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
* These are used to make use of C type-checking..
*/
typedef struct {
+#ifdef CONFIG_ARC_HAS_PAE40
+ unsigned long long pte;
+#else
unsigned long pte;
+#endif
} pte_t;
typedef struct {
unsigned long pgd;
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 10d4b8b8e545..034bbdc0ff61 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -217,7 +217,7 @@
#define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT)
#define BITS_FOR_PGD (32 - PGDIR_SHIFT)
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
+#define PGDIR_SIZE _BITUL(PGDIR_SHIFT) /* vaddr span, not PDG sz */
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PTE _BITUL(BITS_FOR_PTE)
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 1d694c1ef6d6..f9048994b22f 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -57,9 +57,19 @@ struct task_struct;
* A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
* get optimised away by gcc
*/
-#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
+#ifndef CONFIG_EZNPS_MTM_EXT
-#define cpu_relax_lowlatency() cpu_relax()
+#define cpu_relax() barrier()
+#define cpu_relax_lowlatency() cpu_relax()
+
+#else
+
+#define cpu_relax() \
+ __asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
+
+#define cpu_relax_lowlatency() barrier()
+
+#endif
#define copy_segments(tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
@@ -97,7 +107,7 @@ extern unsigned int get_wchan(struct task_struct *p);
#endif /* !__ASSEMBLY__ */
/*
- * System Memory Map on ARC
+ * Default System Memory Map on ARC
*
* ---------------------------- (lower 2G, Translated) -------------------------
* 0x0000_0000 0x5FFF_FFFF (user vaddr: TASK_SIZE)
@@ -109,20 +119,37 @@ extern unsigned int get_wchan(struct task_struct *p);
* 0xC000_0000 0xFFFF_FFFF (peripheral uncached space)
* -----------------------------------------------------------------------------
*/
-#define VMALLOC_START 0x70000000
-/*
- * 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter
- * See asm/highmem.h for details
- */
-#define VMALLOC_SIZE (PAGE_OFFSET - VMALLOC_START - PGDIR_SIZE * 4)
-#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
+#define TASK_SIZE 0x60000000
-#define USER_KERNEL_GUTTER 0x10000000
+#define VMALLOC_START (PAGE_OFFSET - (CONFIG_ARC_KVADDR_SIZE << 20))
-#define TASK_SIZE (VMALLOC_START - USER_KERNEL_GUTTER)
+/* 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter (see asm/highmem.h) */
+#define VMALLOC_SIZE ((CONFIG_ARC_KVADDR_SIZE << 20) - PGDIR_SIZE * 4)
+#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
+
+#define USER_KERNEL_GUTTER (VMALLOC_START - TASK_SIZE)
+
+#ifdef CONFIG_ARC_PLAT_EZNPS
+/* NPS architecture defines special window of 129M in user address space for
+ * special memory areas, when accessing this window the MMU do not use TLB.
+ * Instead MMU direct the access to:
+ * 0x57f00000:0x57ffffff -- 1M of closely coupled memory (aka CMEM)
+ * 0x58000000:0x5fffffff -- 16 huge pages, 8M each, with fixed map (aka FMTs)
+ *
+ * CMEM - is the fastest memory we got and its size is 16K.
+ * FMT - is used to map either to internal/external memory.
+ * Internal memory is the second fast memory and its size is 16M
+ * External memory is the biggest memory (16G) and also the slowest.
+ *
+ * STACK_TOP need to be PMD align (21bit) that is why we supply 0x57e00000.
+ */
+#define STACK_TOP 0x57e00000
+#else
#define STACK_TOP TASK_SIZE
+#endif
+
#define STACK_TOP_MAX STACK_TOP
/* This decides where the kernel will search for a free chunk of vm
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 307846691be6..48b37c693db3 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -12,7 +12,11 @@
#include <linux/types.h>
#include <uapi/asm/setup.h>
+#ifdef CONFIG_ARC_PLAT_EZNPS
+#define COMMAND_LINE_SIZE 2048
+#else
#define COMMAND_LINE_SIZE 256
+#endif
/*
* Data structure to map a ID to string
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index db8c59d1eaeb..800e7c430ca5 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -610,7 +610,9 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
+ unsigned long flags;
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
/*
@@ -623,6 +625,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
}
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
smp_mb();
return ret;
@@ -632,7 +635,9 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
+ unsigned long flags;
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
/*
@@ -646,6 +651,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
return ret;
}
@@ -664,16 +670,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter++;
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
}
#endif
diff --git a/arch/arc/include/uapi/asm/byteorder.h b/arch/arc/include/uapi/asm/byteorder.h
index 9da71d415c38..ea5ca444c7e3 100644
--- a/arch/arc/include/uapi/asm/byteorder.h
+++ b/arch/arc/include/uapi/asm/byteorder.h
@@ -9,7 +9,7 @@
#ifndef __ASM_ARC_BYTEORDER_H
#define __ASM_ARC_BYTEORDER_H
-#ifdef CONFIG_CPU_BIG_ENDIAN
+#ifdef __BIG_ENDIAN__
#include <linux/byteorder/big_endian.h>
#else
#include <linux/byteorder/little_endian.h>