aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/sc-rm7k.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/sc-rm7k.c')
-rw-r--r--arch/mips/mm/sc-rm7k.c163
1 files changed, 130 insertions, 33 deletions
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index de69bfbf506e..1ef75cd80a0d 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -16,6 +16,7 @@
#include <asm/cacheops.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
+#include <asm/sections.h>
#include <asm/cacheflush.h> /* for run_uncached() */
/* Primary cache parameters. */
@@ -25,11 +26,15 @@
/* Secondary cache parameters. */
#define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */
+/* Tertiary cache parameters */
+#define tc_lsize 32
+
extern unsigned long icache_way_size, dcache_way_size;
+unsigned long tcache_size;
#include <asm/r4kcache.h>
-static int rm7k_tcache_enabled;
+static int rm7k_tcache_init;
/*
* Writeback and invalidate the primary cache dcache before DMA.
@@ -46,7 +51,7 @@ static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
blast_scache_range(addr, addr + size);
- if (!rm7k_tcache_enabled)
+ if (!rm7k_tcache_init)
return;
a = addr & ~(tc_pagesize - 1);
@@ -70,7 +75,7 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
blast_inv_scache_range(addr, addr + size);
- if (!rm7k_tcache_enabled)
+ if (!rm7k_tcache_init)
return;
a = addr & ~(tc_pagesize - 1);
@@ -83,6 +88,45 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
}
}
+static void blast_rm7k_tcache(void)
+{
+ unsigned long start = CKSEG0ADDR(0);
+ unsigned long end = start + tcache_size;
+
+ write_c0_taglo(0);
+
+ while (start < end) {
+ cache_op(Page_Invalidate_T, start);
+ start += tc_pagesize;
+ }
+}
+
+/*
+ * This function is executed in uncached address space.
+ */
+static __cpuinit void __rm7k_tc_enable(void)
+{
+ int i;
+
+ set_c0_config(RM7K_CONF_TE);
+
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+
+ for (i = 0; i < tcache_size; i += tc_lsize)
+ cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
+}
+
+static __cpuinit void rm7k_tc_enable(void)
+{
+ if (read_c0_config() & RM7K_CONF_TE)
+ return;
+
+ BUG_ON(tcache_size == 0);
+
+ run_uncached(__rm7k_tc_enable);
+}
+
/*
* This function is executed in uncached address space.
*/
@@ -95,16 +139,8 @@ static __cpuinit void __rm7k_sc_enable(void)
write_c0_taglo(0);
write_c0_taghi(0);
- for (i = 0; i < scache_size; i += sc_lsize) {
- __asm__ __volatile__ (
- ".set noreorder\n\t"
- ".set mips3\n\t"
- "cache %1, (%0)\n\t"
- ".set mips0\n\t"
- ".set reorder"
- :
- : "r" (CKSEG0ADDR(i)), "i" (Index_Store_Tag_SD));
- }
+ for (i = 0; i < scache_size; i += sc_lsize)
+ cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
}
static __cpuinit void rm7k_sc_enable(void)
@@ -112,13 +148,29 @@ static __cpuinit void rm7k_sc_enable(void)
if (read_c0_config() & RM7K_CONF_SE)
return;
- printk(KERN_INFO "Enabling secondary cache...\n");
+ pr_info("Enabling secondary cache...\n");
run_uncached(__rm7k_sc_enable);
+
+ if (rm7k_tcache_init)
+ rm7k_tc_enable();
+}
+
+static void rm7k_tc_disable(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_rm7k_tcache();
+ clear_c0_config(RM7K_CONF_TE);
+ local_irq_save(flags);
}
static void rm7k_sc_disable(void)
{
clear_c0_config(RM7K_CONF_SE);
+
+ if (rm7k_tcache_init)
+ rm7k_tc_disable();
}
static struct bcache_ops rm7k_sc_ops = {
@@ -128,6 +180,52 @@ static struct bcache_ops rm7k_sc_ops = {
.bc_inv = rm7k_sc_inv
};
+/*
+ * This is a probing function like the one found in c-r4k.c, we look for the
+ * wrap around point with different addresses.
+ */
+static __cpuinit void __probe_tcache(void)
+{
+ unsigned long flags, addr, begin, end, pow2;
+
+ begin = (unsigned long) &_stext;
+ begin &= ~((8 * 1024 * 1024) - 1);
+ end = begin + (8 * 1024 * 1024);
+
+ local_irq_save(flags);
+
+ set_c0_config(RM7K_CONF_TE);
+
+ /* Fill size-multiple lines with a valid tag */
+ pow2 = (256 * 1024);
+ for (addr = begin; addr <= end; addr = (begin + pow2)) {
+ unsigned long *p = (unsigned long *) addr;
+ __asm__ __volatile__("nop" : : "r" (*p));
+ pow2 <<= 1;
+ }
+
+ /* Load first line with a 0 tag, to check after */
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+ cache_op(Index_Store_Tag_T, begin);
+
+ /* Look for the wrap-around */
+ pow2 = (512 * 1024);
+ for (addr = begin + (512 * 1024); addr <= end; addr = begin + pow2) {
+ cache_op(Index_Load_Tag_T, addr);
+ if (!read_c0_taglo())
+ break;
+ pow2 <<= 1;
+ }
+
+ addr -= begin;
+ tcache_size = addr;
+
+ clear_c0_config(RM7K_CONF_TE);
+
+ local_irq_restore(flags);
+}
+
void __cpuinit rm7k_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -147,27 +245,26 @@ void __cpuinit rm7k_sc_init(void)
if (!(config & RM7K_CONF_SE))
rm7k_sc_enable();
+ bcops = &rm7k_sc_ops;
+
/*
* While we're at it let's deal with the tertiary cache.
*/
- if (!(config & RM7K_CONF_TC)) {
-
- /*
- * We can't enable the L3 cache yet. There may be board-specific
- * magic necessary to turn it on, and blindly asking the CPU to
- * start using it would may give cache errors.
- *
- * Also, board-specific knowledge may allow us to use the
- * CACHE Flash_Invalidate_T instruction if the tag RAM supports
- * it, and may specify the size of the L3 cache so we don't have
- * to probe it.
- */
- printk(KERN_INFO "Tertiary cache present, %s enabled\n",
- (config & RM7K_CONF_TE) ? "already" : "not (yet)");
-
- if ((config & RM7K_CONF_TE))
- rm7k_tcache_enabled = 1;
- }
- bcops = &rm7k_sc_ops;
+ rm7k_tcache_init = 0;
+ tcache_size = 0;
+
+ if (config & RM7K_CONF_TC)
+ return;
+
+ /*
+ * No efficient way to ask the hardware for the size of the tcache,
+ * so must probe for it.
+ */
+ run_uncached(__probe_tcache);
+ rm7k_tc_enable();
+ rm7k_tcache_init = 1;
+ c->tcache.linesz = tc_lsize;
+ c->tcache.ways = 1;
+ pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10));
}