aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/atomic.h154
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mpi-defs.h328
-rw-r--r--arch/mips/include/asm/pgalloc.h6
-rw-r--r--arch/mips/include/asm/pgtable.h10
-rw-r--r--arch/mips/include/asm/spinlock.h19
5 files changed, 163 insertions, 354 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 835b402e4574..0ab176bdb8e8 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -66,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
@@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
}
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
- smp_mb__before_llsc(); \
- \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
int temp; \
\
@@ -125,23 +123,84 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
raw_local_irq_restore(flags); \
} \
\
- smp_llsc_mb(); \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+{ \
+ int result; \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %1, %2 # atomic_fetch_" #op " \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " move %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set "MIPS_ISA_LEVEL" \n" \
+ " ll %1, %2 # atomic_fetch_" #op " \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
\
return result; \
}
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op)
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, addu)
ATOMIC_OPS(sub, -=, subu)
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, xor)
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -362,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
}
#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \
long result; \
\
- smp_mb__before_llsc(); \
- \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \
\
@@ -409,22 +466,85 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
raw_local_irq_restore(flags); \
} \
\
- smp_llsc_mb(); \
+ return result; \
+}
+
+#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
+static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+{ \
+ long result; \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " move %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set "MIPS_ISA_LEVEL" \n" \
+ " lld %1, %2 # atomic64_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
+ : "memory"); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
\
return result; \
}
#define ATOMIC64_OPS(op, c_op, asm_op) \
ATOMIC64_OP(op, c_op, asm_op) \
- ATOMIC64_OP_RETURN(op, c_op, asm_op)
+ ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC64_FETCH_OP(op, c_op, asm_op)
ATOMIC64_OPS(add, +=, daddu)
ATOMIC64_OPS(sub, -=, dsubu)
-ATOMIC64_OP(and, &=, and)
-ATOMIC64_OP(or, |=, or)
-ATOMIC64_OP(xor, ^=, xor)
+
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op, asm_op) \
+ ATOMIC64_OP(op, c_op, asm_op) \
+ ATOMIC64_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC64_OPS(and, &=, and)
+ATOMIC64_OPS(or, |=, or)
+ATOMIC64_OPS(xor, ^=, xor)
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/mips/include/asm/octeon/cvmx-mpi-defs.h b/arch/mips/include/asm/octeon/cvmx-mpi-defs.h
deleted file mode 100644
index 4615b102625b..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-mpi-defs.h
+++ /dev/null
@@ -1,328 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_MPI_DEFS_H__
-#define __CVMX_MPI_DEFS_H__
-
-#define CVMX_MPI_CFG (CVMX_ADD_IO_SEG(0x0001070000001000ull))
-#define CVMX_MPI_DATX(offset) (CVMX_ADD_IO_SEG(0x0001070000001080ull) + ((offset) & 15) * 8)
-#define CVMX_MPI_STS (CVMX_ADD_IO_SEG(0x0001070000001008ull))
-#define CVMX_MPI_TX (CVMX_ADD_IO_SEG(0x0001070000001010ull))
-
-union cvmx_mpi_cfg {
- uint64_t u64;
- struct cvmx_mpi_cfg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t csena3:1;
- uint64_t csena2:1;
- uint64_t csena1:1;
- uint64_t csena0:1;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t csena:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t csena:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t csena0:1;
- uint64_t csena1:1;
- uint64_t csena2:1;
- uint64_t csena3:1;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } s;
- struct cvmx_mpi_cfg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t reserved_12_15:4;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t csena:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t csena:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t reserved_12_15:4;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn30xx;
- struct cvmx_mpi_cfg_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t reserved_11_15:5;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t csena:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t csena:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t reserved_11_15:5;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn31xx;
- struct cvmx_mpi_cfg_cn30xx cn50xx;
- struct cvmx_mpi_cfg_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t reserved_14_15:2;
- uint64_t csena1:1;
- uint64_t csena0:1;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t reserved_6_6:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t reserved_6_6:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t csena0:1;
- uint64_t csena1:1;
- uint64_t reserved_14_15:2;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn61xx;
- struct cvmx_mpi_cfg_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t csena3:1;
- uint64_t csena2:1;
- uint64_t reserved_12_13:2;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t reserved_6_6:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t reserved_6_6:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t reserved_12_13:2;
- uint64_t csena2:1;
- uint64_t csena3:1;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn66xx;
- struct cvmx_mpi_cfg_cn61xx cnf71xx;
-};
-
-union cvmx_mpi_datx {
- uint64_t u64;
- struct cvmx_mpi_datx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t data:8;
-#else
- uint64_t data:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_mpi_datx_s cn30xx;
- struct cvmx_mpi_datx_s cn31xx;
- struct cvmx_mpi_datx_s cn50xx;
- struct cvmx_mpi_datx_s cn61xx;
- struct cvmx_mpi_datx_s cn66xx;
- struct cvmx_mpi_datx_s cnf71xx;
-};
-
-union cvmx_mpi_sts {
- uint64_t u64;
- struct cvmx_mpi_sts_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_13_63:51;
- uint64_t rxnum:5;
- uint64_t reserved_1_7:7;
- uint64_t busy:1;
-#else
- uint64_t busy:1;
- uint64_t reserved_1_7:7;
- uint64_t rxnum:5;
- uint64_t reserved_13_63:51;
-#endif
- } s;
- struct cvmx_mpi_sts_s cn30xx;
- struct cvmx_mpi_sts_s cn31xx;
- struct cvmx_mpi_sts_s cn50xx;
- struct cvmx_mpi_sts_s cn61xx;
- struct cvmx_mpi_sts_s cn66xx;
- struct cvmx_mpi_sts_s cnf71xx;
-};
-
-union cvmx_mpi_tx {
- uint64_t u64;
- struct cvmx_mpi_tx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_22_63:42;
- uint64_t csid:2;
- uint64_t reserved_17_19:3;
- uint64_t leavecs:1;
- uint64_t reserved_13_15:3;
- uint64_t txnum:5;
- uint64_t reserved_5_7:3;
- uint64_t totnum:5;
-#else
- uint64_t totnum:5;
- uint64_t reserved_5_7:3;
- uint64_t txnum:5;
- uint64_t reserved_13_15:3;
- uint64_t leavecs:1;
- uint64_t reserved_17_19:3;
- uint64_t csid:2;
- uint64_t reserved_22_63:42;
-#endif
- } s;
- struct cvmx_mpi_tx_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t leavecs:1;
- uint64_t reserved_13_15:3;
- uint64_t txnum:5;
- uint64_t reserved_5_7:3;
- uint64_t totnum:5;
-#else
- uint64_t totnum:5;
- uint64_t reserved_5_7:3;
- uint64_t txnum:5;
- uint64_t reserved_13_15:3;
- uint64_t leavecs:1;
- uint64_t reserved_17_63:47;
-#endif
- } cn30xx;
- struct cvmx_mpi_tx_cn30xx cn31xx;
- struct cvmx_mpi_tx_cn30xx cn50xx;
- struct cvmx_mpi_tx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_21_63:43;
- uint64_t csid:1;
- uint64_t reserved_17_19:3;
- uint64_t leavecs:1;
- uint64_t reserved_13_15:3;
- uint64_t txnum:5;
- uint64_t reserved_5_7:3;
- uint64_t totnum:5;
-#else
- uint64_t totnum:5;
- uint64_t reserved_5_7:3;
- uint64_t txnum:5;
- uint64_t reserved_13_15:3;
- uint64_t leavecs:1;
- uint64_t reserved_17_19:3;
- uint64_t csid:1;
- uint64_t reserved_21_63:43;
-#endif
- } cn61xx;
- struct cvmx_mpi_tx_s cn66xx;
- struct cvmx_mpi_tx_cn61xx cnf71xx;
-};
-
-#endif
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index b336037e8768..93c079a1cfc8 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
@@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte)
return NULL;
clear_highpage(pte);
@@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
- pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER);
+ pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
return pmd;
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index a6b611f1da43..7d44e888134f 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -24,7 +24,7 @@ struct mm_struct;
struct vm_area_struct;
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
- _CACHE_CACHABLE_NONCOHERENT)
+ _page_cachable_default)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
@@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
- pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
+ pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte;
}
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
@@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#else
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
#endif
@@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
return pmd;
}
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 40196bebe849..f485afe51514 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <asm/barrier.h>
+#include <asm/processor.h>
#include <asm/compiler.h>
#include <asm/war.h>
@@ -48,8 +49,22 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
- while (arch_spin_is_locked(x)) { cpu_relax(); }
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ u16 owner = READ_ONCE(lock->h.serving_now);
+ smp_rmb();
+ for (;;) {
+ arch_spinlock_t tmp = READ_ONCE(*lock);
+
+ if (tmp.h.serving_now == tmp.h.ticket ||
+ tmp.h.serving_now != owner)
+ break;
+
+ cpu_relax();
+ }
+ smp_acquire__after_ctrl_dep();
+}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{