aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-17 15:50:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-17 15:50:54 -0400
commitbfaf245022b4b8661af2e35f467cf0e91943c24c (patch)
treeb5a6ee49a047557a791eb897c8c9545a155e36b7 /arch/mips/mm
parentMerge tag 'xtensa-20150416' of git://github.com/czankel/xtensa-linux (diff)
parentMerge branch '4.0-fixes' into mips-for-linux-next (diff)
downloadlinux-dev-bfaf245022b4b8661af2e35f467cf0e91943c24c.tar.xz
linux-dev-bfaf245022b4b8661af2e35f467cf0e91943c24c.zip
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: "This is the main pull request for MIPS for Linux 4.1. Most noteworthy: - Add more Octeon-optimized crypto functions - Octeon crypto preemption and locking fixes - Little endian support for Octeon - Use correct CSR to soft reset Octeons - Support LEDs on the Octeon-based DSR-1000N - Fix PCI interrupt mapping for the Octeon-based DSR-1000N - Mark prom_free_prom_memory() as __init for a number of systems - Support for Imagination's Pistachio SOC. This includes arch and CLK bits. I'd like to merge pinctrl bits later - Improve parallelism of csum_partial for certain pipelines - Organize DTB files in subdirs like other architectures - Implement read_sched_clock for all MIPS platforms other than Octeon - Massive series of 38 fixes and cleanups for the FPU emulator / kernel - Further FPU remulator work to support new features. This sits on a separate branch which also has been pulled into the 4.1 KVM branch - Clean up and fixes for the SEAD3 eval board; remove unused file - Various updates for Netlogic platforms - A number of small updates for Loongson 3 platforms - Increase the memory limit for ATH79 platforms to 256MB - A fair number of fixes and updates for BCM47xx platforms - Finish the implementation of XPA support - MIPS FDC support. No, not floppy controller but Fast Debug Channel :) - Detect the R16000 used in SGI legacy platforms - Fix Kconfig dependencies for the SSB bus support" * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (265 commits) MIPS: Makefile: Fix MIPS ASE detection code MIPS: asm: elf: Set O32 default FPU flags MIPS: BCM47XX: Fix detecting Microsoft MN-700 & Asus WL500G MIPS: Kconfig: Disable SMP/CPS for 64-bit MIPS: Hibernate: flush TLB entries earlier MIPS: smp-cps: cpu_set FPU mask if FPU present MIPS: lose_fpu(): Disable FPU when MSA enabled MIPS: ralink: add missing symbol for RALINK_ILL_ACC MIPS: ralink: Fix bad config symbol in PCI makefile. SSB: fix Kconfig dependencies MIPS: Malta: Detect and fix bad memsize values Revert "MIPS: Avoid pipeline stalls on some MIPS32R2 cores." MIPS: Octeon: Delete override of cpu_has_mips_r2_exec_hazard. MIPS: Fix cpu_has_mips_r2_exec_hazard. MIPS: kernel: entry.S: Set correct ISA level for mips_ihb MIPS: asm: spinlock: Fix addiu instruction for R10000_LLSC_WAR case MIPS: r4kcache: Use correct base register for MIPS R6 cache flushes MIPS: Kconfig: Fix typo for the r2-to-r6 emulator kernel parameter MIPS: unaligned: Fix regular load/store instruction emulation for EVA MIPS: unaligned: Surround load/store macros in do {} while statements ...
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r4k.c33
-rw-r--r--arch/mips/mm/cache.c39
-rw-r--r--arch/mips/mm/dma-default.c4
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mm/page.c1
-rw-r--r--arch/mips/mm/tlb-r4k.c15
-rw-r--r--arch/mips/mm/tlbex.c116
7 files changed, 148 insertions, 67 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 3f8059602765..0dbb65a51ce5 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -430,6 +430,7 @@ static inline void local_r4k___flush_cache_all(void * args)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
/*
* These caches are inclusive caches, that is, if something
* is not cached in the S-cache, we know it also won't be
@@ -506,7 +507,7 @@ static inline void local_r4k_flush_cache_mm(void * args)
/*
* Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
- * only flush the primary caches but R10000 and R12000 behave sane ...
+ * only flush the primary caches but R1x000 behave sane ...
* R4000SC and R4400SC indexed S-cache ops also invalidate primary
* caches, so we can bail out early.
*/
@@ -888,33 +889,39 @@ static inline void rm7k_erratum31(void)
}
}
-static inline void alias_74k_erratum(struct cpuinfo_mips *c)
+static inline int alias_74k_erratum(struct cpuinfo_mips *c)
{
unsigned int imp = c->processor_id & PRID_IMP_MASK;
unsigned int rev = c->processor_id & PRID_REV_MASK;
+ int present = 0;
/*
* Early versions of the 74K do not update the cache tags on a
* vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
- * aliases. In this case it is better to treat the cache as always
- * having aliases.
+ * aliases. In this case it is better to treat the cache as always
+ * having aliases. Also disable the synonym tag update feature
+ * where available. In this case no opportunistic tag update will
+ * happen where a load causes a virtual address miss but a physical
+ * address hit during a D-cache look-up.
*/
switch (imp) {
case PRID_IMP_74K:
if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
- c->dcache.flags |= MIPS_CACHE_VTAG;
+ present = 1;
if (rev == PRID_REV_ENCODE_332(2, 4, 0))
write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
break;
case PRID_IMP_1074K:
if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
- c->dcache.flags |= MIPS_CACHE_VTAG;
+ present = 1;
write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
}
break;
default:
BUG();
}
+
+ return present;
}
static void b5k_instruction_hazard(void)
@@ -938,6 +945,7 @@ static void probe_pcache(void)
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
unsigned int prid = read_c0_prid();
+ int has_74k_erratum = 0;
unsigned long config1;
unsigned int lsize;
@@ -1012,6 +1020,7 @@ static void probe_pcache(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
c->icache.linesz = 64;
c->icache.ways = 2;
@@ -1223,8 +1232,8 @@ static void probe_pcache(void)
dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
/*
- * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
- * 2-way virtually indexed so normally would suffer from aliases. So
+ * R1x000 P-caches are odd in a positive way. They're 32kB 2-way
+ * virtually indexed so normally would suffer from aliases. So
* normally they'd suffer from aliases but magic in the hardware deals
* with that for us so we don't need to take care ourselves.
*/
@@ -1240,11 +1249,12 @@ static void probe_pcache(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
break;
case CPU_74K:
case CPU_1074K:
- alias_74k_erratum(c);
+ has_74k_erratum = alias_74k_erratum(c);
/* Fall through. */
case CPU_M14KC:
case CPU_M14KEC:
@@ -1259,7 +1269,7 @@ static void probe_pcache(void)
if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
(c->icache.waysize > PAGE_SIZE))
c->icache.flags |= MIPS_CACHE_ALIASES;
- if (read_c0_config7() & MIPS_CONF7_AR) {
+ if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
/*
* Effectively physically indexed dcache,
* thus no virtual aliases.
@@ -1268,7 +1278,7 @@ static void probe_pcache(void)
break;
}
default:
- if (c->dcache.waysize > PAGE_SIZE)
+ if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
c->dcache.flags |= MIPS_CACHE_ALIASES;
}
@@ -1438,6 +1448,7 @@ static void setup_scache(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
c->scache.linesz = 64 << ((config >> 13) & 1);
c->scache.ways = 2;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 7e3ea7766822..77d96db8253c 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -119,36 +119,37 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
EXPORT_SYMBOL(__flush_anon_page);
-static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
+void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long addr;
+
+ if (PageHighMem(page))
+ return;
+
+ addr = (unsigned long) page_address(page);
+ flush_data_cache_page(addr);
+}
+EXPORT_SYMBOL_GPL(__flush_icache_page);
+
+void __update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte)
{
struct page *page;
- unsigned long pfn = pte_pfn(pteval);
+ unsigned long pfn, addr;
+ int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
+ pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn)))
return;
-
page = pfn_to_page(pfn);
if (page_mapping(page) && Page_dcache_dirty(page)) {
- unsigned long page_addr = (unsigned long) page_address(page);
-
- if (!cpu_has_ic_fills_f_dc ||
- pages_do_alias(page_addr, address & PAGE_MASK))
- flush_data_cache_page(page_addr);
+ addr = (unsigned long) page_address(page);
+ if (exec || pages_do_alias(addr, address & PAGE_MASK))
+ flush_data_cache_page(addr);
ClearPageDcacheDirty(page);
}
}
-void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
- if (pte_present(pteval))
- mips_flush_dcache_from_pte(pteval, addr);
- }
-
- set_pte(ptep, pteval);
-}
-
unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index af5f046e627e..609d1241b0c4 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -258,7 +258,7 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_addr),
dma_addr & ~PAGE_MASK, size, direction);
-
+ plat_post_dma_flush(dev);
plat_unmap_dma_mem(dev, dma_addr, size, direction);
}
@@ -312,6 +312,7 @@ static void mips_dma_sync_single_for_cpu(struct device *dev,
if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction);
+ plat_post_dma_flush(dev);
}
static void mips_dma_sync_single_for_device(struct device *dev,
@@ -331,6 +332,7 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
for (i = 0; i < nelems; i++, sg++)
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
+ plat_post_dma_flush(dev);
}
static void mips_dma_sync_sg_for_device(struct device *dev,
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 448cde372af0..faa5c9822ecc 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -96,7 +96,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, prot);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
- entrylo = pte.pte_high;
+ entrylo = pte_to_entrylo(pte.pte_high);
#else
entrylo = pte_to_entrylo(pte_val(pte));
#endif
@@ -106,6 +106,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
+#ifdef CONFIG_XPA
+ entrylo = (pte.pte_low & _PFNX_MASK);
+ writex_c0_entrylo0(entrylo);
+ writex_c0_entrylo1(entrylo);
+#endif
tlbidx = read_c0_wired();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 3f85f921801b..885d73ffd6fb 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -157,6 +157,7 @@ static void set_prefetch_parameters(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
/*
* Those values have been experimentally tuned for an
* Origin 200.
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index b2afa49beab0..a27a088e6f9f 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -333,9 +333,17 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
ptep = pte_offset_map(pmdp, address);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#ifdef CONFIG_XPA
+ write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
+ writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+ ptep++;
+ write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
+ writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+#else
write_c0_entrylo0(ptep->pte_high);
ptep++;
write_c0_entrylo1(ptep->pte_high);
+#endif
#else
write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
@@ -355,6 +363,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
+#ifdef CONFIG_XPA
+ panic("Broken for XPA kernels");
+#else
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
@@ -383,6 +394,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
+#endif
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -477,7 +489,8 @@ static void r4k_tlb_configure(void)
write_c0_wired(0);
if (current_cpu_type() == CPU_R10000 ||
current_cpu_type() == CPU_R12000 ||
- current_cpu_type() == CPU_R14000)
+ current_cpu_type() == CPU_R14000 ||
+ current_cpu_type() == CPU_R16000)
write_c0_framemask(0);
if (cpu_has_rixi) {
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index d75ff73a2012..97c87027c17f 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -35,6 +35,17 @@
#include <asm/uasm.h>
#include <asm/setup.h>
+static int __cpuinitdata mips_xpa_disabled;
+
+static int __init xpa_disable(char *s)
+{
+ mips_xpa_disabled = 1;
+
+ return 1;
+}
+
+__setup("noxpa", xpa_disable);
+
/*
* TLB load/store/modify handlers.
*
@@ -231,14 +242,14 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
#endif
+#ifdef CONFIG_CPU_MIPSR2
if (cpu_has_rixi) {
#ifdef _PAGE_NO_EXEC_SHIFT
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
-#endif
-#ifdef _PAGE_NO_READ_SHIFT
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
#endif
}
+#endif
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
@@ -501,26 +512,9 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case tlb_indexed: tlbw = uasm_i_tlbwi; break;
}
- if (cpu_has_mips_r2_exec_hazard) {
- /*
- * The architecture spec says an ehb is required here,
- * but a number of cores do not have the hazard and
- * using an ehb causes an expensive pipeline stall.
- */
- switch (current_cpu_type()) {
- case CPU_M14KC:
- case CPU_74K:
- case CPU_1074K:
- case CPU_PROAPTIV:
- case CPU_P5600:
- case CPU_M5150:
- case CPU_QEMU_GENERIC:
- break;
-
- default:
+ if (cpu_has_mips_r2_r6) {
+ if (cpu_has_mips_r2_exec_hazard)
uasm_i_ehb(p);
- break;
- }
tlbw(p);
return;
}
@@ -569,6 +563,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
+ case CPU_R16000:
case CPU_4KC:
case CPU_4KEC:
case CPU_M14KC:
@@ -1027,12 +1022,27 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
} else {
int pte_off_even = sizeof(pte_t) / 2;
int pte_off_odd = pte_off_even + sizeof(pte_t);
+#ifdef CONFIG_XPA
+ const int scratch = 1; /* Our extra working register */
- /* The pte entries are pre-shifted */
- uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
- UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
- UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
+ uasm_i_addu(p, scratch, 0, ptep);
+#endif
+ uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
+ uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
+ UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
+#ifdef CONFIG_XPA
+ uasm_i_lw(p, tmp, 0, scratch);
+ uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
+ uasm_i_lui(p, scratch, 0xff);
+ uasm_i_ori(p, scratch, scratch, 0xffff);
+ uasm_i_and(p, tmp, scratch, tmp);
+ uasm_i_and(p, ptep, scratch, ptep);
+ uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+ uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
+#endif
}
#else
UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
@@ -1533,8 +1543,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
{
#ifdef CONFIG_PHYS_ADDR_T_64BIT
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
-#endif
+ if (!cpu_has_64bits) {
+ const int scratch = 1; /* Our extra working register */
+
+ uasm_i_lui(p, scratch, (mode >> 16));
+ uasm_i_or(p, pte, pte, scratch);
+ } else
+#endif
uasm_i_ori(p, pte, pte, mode);
#ifdef CONFIG_SMP
# ifdef CONFIG_PHYS_ADDR_T_64BIT
@@ -1598,15 +1614,17 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
uasm_i_nop(p);
} else {
- uasm_i_andi(p, t, pte, _PAGE_PRESENT);
+ uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+ uasm_i_andi(p, t, t, 1);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
} else {
- uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
- uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
+ uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+ uasm_i_andi(p, t, t, 3);
+ uasm_i_xori(p, t, t, 3);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
@@ -1635,8 +1653,9 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
{
int t = scratch >= 0 ? scratch : pte;
- uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
- uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
+ uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+ uasm_i_andi(p, t, t, 5);
+ uasm_i_xori(p, t, t, 5);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
@@ -1672,7 +1691,8 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
uasm_i_nop(p);
} else {
int t = scratch >= 0 ? scratch : pte;
- uasm_i_andi(p, t, pte, _PAGE_WRITE);
+ uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
+ uasm_i_andi(p, t, t, 1);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
@@ -2285,6 +2305,11 @@ static void config_htw_params(void)
pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
+
+ /* If XPA has been enabled, PTEs are 64-bit in size. */
+ if (read_c0_pagegrain() & PG_ELPA)
+ pwsize |= 1;
+
write_c0_pwsize(pwsize);
/* Make sure everything is set before we enable the HTW */
@@ -2298,6 +2323,28 @@ static void config_htw_params(void)
print_htw_config();
}
+static void config_xpa_params(void)
+{
+#ifdef CONFIG_XPA
+ unsigned int pagegrain;
+
+ if (mips_xpa_disabled) {
+ pr_info("Extended Physical Addressing (XPA) disabled\n");
+ return;
+ }
+
+ pagegrain = read_c0_pagegrain();
+ write_c0_pagegrain(pagegrain | PG_ELPA);
+ back_to_back_c0_hazard();
+ pagegrain = read_c0_pagegrain();
+
+ if (pagegrain & PG_ELPA)
+ pr_info("Extended Physical Addressing (XPA) enabled\n");
+ else
+ panic("Extended Physical Addressing (XPA) disabled");
+#endif
+}
+
void build_tlb_refill_handler(void)
{
/*
@@ -2362,8 +2409,9 @@ void build_tlb_refill_handler(void)
}
if (cpu_has_local_ebase)
build_r4000_tlb_refill_handler();
+ if (cpu_has_xpa)
+ config_xpa_params();
if (cpu_has_htw)
config_htw_params();
-
}
}