summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormiod <miod@openbsd.org>2015-08-15 22:31:38 +0000
committermiod <miod@openbsd.org>2015-08-15 22:31:38 +0000
commitf8fa4920b8f2ba8b0c10836c6de848ecc5d934e1 (patch)
tree1adac282ca813956a9186665bee98d583d8d2d46
parenttypo (diff)
downloadwireguard-openbsd-f8fa4920b8f2ba8b0c10836c6de848ecc5d934e1.tar.xz
wireguard-openbsd-f8fa4920b8f2ba8b0c10836c6de848ecc5d934e1.zip
Some bits for Loongson 3A support.
-rw-r--r--sys/arch/loongson/conf/files.loongson15
-rw-r--r--sys/arch/loongson/include/cpu.h23
-rw-r--r--sys/arch/loongson/loongson/bus_dma.c10
-rw-r--r--sys/arch/loongson/loongson/loongson3_machdep.c75
-rw-r--r--sys/arch/mips64/conf/files.mips645
-rw-r--r--sys/arch/mips64/include/cache.h12
-rw-r--r--sys/arch/mips64/mips64/cache_loongson3.c132
-rw-r--r--sys/arch/mips64/mips64/cache_mips64r2.c474
8 files changed, 738 insertions, 8 deletions
diff --git a/sys/arch/loongson/conf/files.loongson b/sys/arch/loongson/conf/files.loongson
index 70ce4a6ea79..8a8e1dd7066 100644
--- a/sys/arch/loongson/conf/files.loongson
+++ b/sys/arch/loongson/conf/files.loongson
@@ -1,4 +1,4 @@
-# $OpenBSD: files.loongson,v 1.19 2015/07/08 13:37:31 dlg Exp $
+# $OpenBSD: files.loongson,v 1.20 2015/08/15 22:31:38 miod Exp $
# Standard stanzas config(8) can't run without
maxpartitions 16
@@ -17,17 +17,19 @@ file arch/loongson/loongson/bus_dma.c
file arch/loongson/loongson/bus_space.c
file arch/loongson/loongson/conf.c
file arch/loongson/loongson/disksubr.c disk
-file arch/loongson/loongson/gdium_machdep.c
-file arch/loongson/loongson/generic2e_machdep.c
+file arch/loongson/loongson/gdium_machdep.c cpu_loongson2
+file arch/loongson/loongson/generic2e_machdep.c cpu_loongson2
file arch/loongson/loongson/isa_machdep.c isa
+file arch/loongson/loongson/lemote3a_machdep.c cpu_loongson3
file arch/loongson/loongson/loongson2_machdep.c
+file arch/loongson/loongson/loongson3_machdep.c cpu_loongson3
file arch/loongson/loongson/machdep.c
file arch/loongson/loongson/hibernate_machdep.c hibernate
file arch/loongson/loongson/pciide_machdep.c pciide
file arch/loongson/loongson/pmon.c
file arch/loongson/loongson/pmon32.S
file arch/loongson/loongson/wscons_machdep.c wsdisplay
-file arch/loongson/loongson/yeeloong_machdep.c
+file arch/loongson/loongson/yeeloong_machdep.c cpu_loongson2
include "dev/ata/files.ata"
include "dev/atapiscsi/files.atapiscsi"
@@ -113,6 +115,11 @@ device sisfb: wsemuldisplaydev, rasops8, rasops15, rasops16, rasops32
attach sisfb at pci
file arch/loongson/dev/sisfb.c sisfb needs-flag
+# Radeon frame buffer
+device radeonfb: wsemuldisplaydev, rasops16
+attach radeonfb at pci
+file arch/loongson/dev/radeonfb.c radeonfb needs-flag
+
device apm
attach apm at mainbus
file arch/loongson/dev/apm.c apm needs-flag
diff --git a/sys/arch/loongson/include/cpu.h b/sys/arch/loongson/include/cpu.h
index 9d3214ac08d..51d004f955c 100644
--- a/sys/arch/loongson/include/cpu.h
+++ b/sys/arch/loongson/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.4 2014/03/31 20:21:18 miod Exp $ */
+/* $OpenBSD: cpu.h,v 1.5 2015/08/15 22:31:38 miod Exp $ */
/*-
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
@@ -43,6 +43,7 @@
#ifdef _KERNEL
+#if defined(CPU_LOONGSON2) && !defined(CPU_LOONGSON3)
#define Mips_SyncCache(ci) \
Loongson2_SyncCache((ci))
#define Mips_InvalidateICache(ci, va, l) \
@@ -59,6 +60,26 @@
Loongson2_IOSyncDCache((ci), (va), (l), (h))
#define Mips_HitInvalidateDCache(ci, va, l) \
Loongson2_HitInvalidateDCache((ci), (va), (l))
+#endif
+
+#if defined(CPU_LOONGSON3) && !defined(CPU_LOONGSON2)
+#define Mips_SyncCache(ci) \
+ Loongson3_SyncCache((ci))
+#define Mips_InvalidateICache(ci, va, l) \
+ Loongson3_InvalidateICache((ci), (va), (l))
+#define Mips_InvalidateICachePage(ci, va) \
+ Loongson3_InvalidateICachePage((ci), (va))
+#define Mips_SyncICache(ci) \
+ Loongson3_SyncICache((ci))
+#define Mips_SyncDCachePage(ci, va, pa) \
+ Loongson3_SyncDCachePage((ci), (va), (pa))
+#define Mips_HitSyncDCache(ci, va, l) \
+ Loongson3_HitSyncDCache((ci), (va), (l))
+#define Mips_IOSyncDCache(ci, va, l, h) \
+ Loongson3_IOSyncDCache((ci), (va), (l), (h))
+#define Mips_HitInvalidateDCache(ci, va, l) \
+ Loongson3_HitInvalidateDCache((ci), (va), (l))
+#endif
#endif /* _KERNEL */
diff --git a/sys/arch/loongson/loongson/bus_dma.c b/sys/arch/loongson/loongson/bus_dma.c
index 33b0a265ae1..b60d72c1175 100644
--- a/sys/arch/loongson/loongson/bus_dma.c
+++ b/sys/arch/loongson/loongson/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.18 2014/11/16 12:30:57 deraadt Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.19 2015/08/15 22:31:38 miod Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -434,6 +434,14 @@ _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
int curseg, error, pmap_flags;
const struct kmem_dyn_mode *kd;
+#ifdef CPU_LOONGSON3
+ /*
+ * Loongson 3 caches are coherent.
+ */
+ if (loongson_ver >= 0x3a)
+ flags &= ~BUS_DMA_COHERENT;
+#endif
+
if (nsegs == 1) {
pa = (*t->_device_to_pa)(segs[0].ds_addr);
if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
diff --git a/sys/arch/loongson/loongson/loongson3_machdep.c b/sys/arch/loongson/loongson/loongson3_machdep.c
new file mode 100644
index 00000000000..4935616cb2b
--- /dev/null
+++ b/sys/arch/loongson/loongson/loongson3_machdep.c
@@ -0,0 +1,75 @@
+/* $OpenBSD: loongson3_machdep.c,v 1.1 2015/08/15 22:31:38 miod Exp $ */
+
+/*
+ * Copyright (c) 2009, 2010, 2014 Miodrag Vallat.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/autoconf.h>
+#include <machine/cpu.h>
+#include <machine/memconf.h>
+#include <machine/pmon.h>
+
+#ifdef HIBERNATE
+#include <machine/hibernate_var.h>
+#endif /* HIBERNATE */
+
+extern struct phys_mem_desc mem_layout[MAXMEMSEGS];
+
+void loongson3a_setup(u_long, u_long);
+
+#if 0
+/* PCI view of CPU memory */
+paddr_t loongson_dma_base = 0;
+#endif
+
+#define MEMLO_BASE 0x00000000UL
+#define MEMHI_BASE 0x90000000UL /* 2G + 256MB */
+
+/*
+ * Setup memory mappings for Loongson 3A processors.
+ */
+
+void
+loongson3a_setup(u_long memlo, u_long memhi)
+{
+ physmem = memlo + memhi + 16; /* in MB so far */
+
+ memlo = atop(memlo << 20);
+ memhi = atop(memhi << 20);
+ physmem = memlo + memhi + atop(16 << 20);
+
+ /* do NOT stomp on exception area */
+ mem_layout[0].mem_first_page = atop(MEMLO_BASE) + 1;
+ mem_layout[0].mem_last_page = atop(MEMLO_BASE) + memlo;
+#ifdef HIBERNATE
+ mem_layout[0].mem_first_page += HIBERNATE_RESERVED_PAGES;
+#endif
+
+ if (memhi != 0) {
+#ifdef notyet
+ mem_layout[1].mem_first_page = atop(MEMHI_BASE);
+ mem_layout[1].mem_last_page = atop(MEMHI_BASE) +
+ memhi;
+#endif
+ }
+}
diff --git a/sys/arch/mips64/conf/files.mips64 b/sys/arch/mips64/conf/files.mips64
index 08d2e367d72..d5b3f02ecad 100644
--- a/sys/arch/mips64/conf/files.mips64
+++ b/sys/arch/mips64/conf/files.mips64
@@ -1,4 +1,4 @@
-# $OpenBSD: files.mips64,v 1.24 2015/07/17 22:52:28 tedu Exp $
+# $OpenBSD: files.mips64,v 1.25 2015/08/15 22:31:38 miod Exp $
file arch/mips64/mips64/arcbios.c arcbios
file arch/mips64/mips64/clock.c clock
@@ -16,6 +16,8 @@ file arch/mips64/mips64/vm_machdep.c
file arch/mips64/mips64/mutex.c
file arch/mips64/mips64/cache_loongson2.c cpu_loongson2
+file arch/mips64/mips64/cache_loongson3.c cpu_loongson3
+file arch/mips64/mips64/cache_mips64r2.c cpu_mips64r2
file arch/mips64/mips64/cache_octeon.c cpu_octeon
file arch/mips64/mips64/cache_r4k.c cpu_r4000
file arch/mips64/mips64/cache_r5k.c cpu_r5000 | cpu_rm7000
@@ -29,6 +31,7 @@ file arch/mips64/mips64/exception_tfp.S cpu_r8000
file arch/mips64/mips64/fp_emulate.c
file arch/mips64/mips64/lcore_access.S
file arch/mips64/mips64/lcore_float.S
+file arch/mips64/mips64/mips64r2.S cpu_mips64r2
file arch/mips64/mips64/r4000_errata.c cpu_r4000
file arch/mips64/mips64/tlbhandler.S !cpu_r8000
file arch/mips64/mips64/tlb_tfp.S cpu_r8000
diff --git a/sys/arch/mips64/include/cache.h b/sys/arch/mips64/include/cache.h
index e28d69edc8e..2b9ece4037c 100644
--- a/sys/arch/mips64/include/cache.h
+++ b/sys/arch/mips64/include/cache.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cache.h,v 1.6 2014/03/31 20:21:19 miod Exp $ */
+/* $OpenBSD: cache.h,v 1.7 2015/08/15 22:31:38 miod Exp $ */
/*
* Copyright (c) 2012 Miodrag Vallat.
@@ -60,6 +60,11 @@ CACHE_PROTOS(Octeon)
CACHE_PROTOS(Loongson2)
/*
+ * Loongson 3A and 2Gq.
+ */
+CACHE_PROTOS(Loongson3)
+
+/*
* MIPS R4000 and R4400.
*/
CACHE_PROTOS(Mips4k)
@@ -80,6 +85,11 @@ CACHE_PROTOS(tfp)
CACHE_PROTOS(Mips10k)
/*
+ * mips64r2-compliant processors.
+ */
+CACHE_PROTOS(mips64r2)
+
+/*
* Values used by the IOSyncDCache routine [which acts as the backend of
* bus_dmamap_sync()].
*/
diff --git a/sys/arch/mips64/mips64/cache_loongson3.c b/sys/arch/mips64/mips64/cache_loongson3.c
new file mode 100644
index 00000000000..1914f1d00bc
--- /dev/null
+++ b/sys/arch/mips64/mips64/cache_loongson3.c
@@ -0,0 +1,132 @@
+/* $OpenBSD: cache_loongson3.c,v 1.1 2015/08/15 22:31:38 miod Exp $ */
+
+/*
+ * Copyright (c) 2014 Miodrag Vallat.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Cache handling code for Loongson 3A and compatible processors
+ * (including Loongson 2Gq)
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <mips64/cache.h>
+#include <machine/cpu.h>
+
+#include <uvm/uvm_extern.h>
+
+void
+Loongson3_ConfigCache(struct cpu_info *ci)
+{
+ mips64r2_ConfigCache(ci);
+
+ ci->ci_SyncCache = Loongson3_SyncCache;
+ ci->ci_InvalidateICache = Loongson3_InvalidateICache;
+ ci->ci_InvalidateICachePage = Loongson3_InvalidateICachePage;
+ ci->ci_SyncICache = Loongson3_SyncICache;
+ ci->ci_SyncDCachePage = Loongson3_SyncDCachePage;
+ ci->ci_HitSyncDCache = Loongson3_HitSyncDCache;
+ ci->ci_HitInvalidateDCache = Loongson3_HitInvalidateDCache;
+ ci->ci_IOSyncDCache = Loongson3_IOSyncDCache;
+}
+
+/*
+ * Writeback and invalidate all caches.
+ */
+void
+Loongson3_SyncCache(struct cpu_info *ci)
+{
+ mips_sync();
+}
+
+/*
+ * Invalidate I$ for the given range.
+ */
+void
+Loongson3_InvalidateICache(struct cpu_info *ci, vaddr_t va, size_t sz)
+{
+ /* nothing to do */
+}
+
+/*
+ * Register a given page for I$ invalidation.
+ */
+void
+Loongson3_InvalidateICachePage(struct cpu_info *ci, vaddr_t va)
+{
+ /* nothing to do */
+}
+
+/*
+ * Perform postponed I$ invalidation.
+ */
+void
+Loongson3_SyncICache(struct cpu_info *ci)
+{
+ /* nothing to do */
+}
+
+/*
+ * Writeback D$ for the given page.
+ */
+void
+Loongson3_SyncDCachePage(struct cpu_info *ci, vaddr_t va, paddr_t pa)
+{
+ /* nothing to do */
+}
+
+/*
+ * Writeback D$ for the given range. Range is expected to be currently
+ * mapped, allowing the use of `Hit' operations. This is less aggressive
+ * than using `Index' operations.
+ */
+
+void
+Loongson3_HitSyncDCache(struct cpu_info *ci, vaddr_t va, size_t sz)
+{
+ /* nothing to do */
+}
+
+/*
+ * Invalidate D$ for the given range. Range is expected to be currently
+ * mapped, allowing the use of `Hit' operations. This is less aggressive
+ * than using `Index' operations.
+ */
+
+void
+Loongson3_HitInvalidateDCache(struct cpu_info *ci, vaddr_t va, size_t sz)
+{
+ /* nothing to do */
+}
+
+/*
+ * Backend for bus_dmamap_sync(). Enforce coherency of the given range
+ * by performing the necessary cache writeback and/or invalidate
+ * operations.
+ */
+void
+Loongson3_IOSyncDCache(struct cpu_info *ci, vaddr_t va, size_t sz, int how)
+{
+ switch (how) {
+ case CACHE_SYNC_R:
+ break;
+ case CACHE_SYNC_X:
+ case CACHE_SYNC_W:
+ mips_sync(); /* XXX necessary? */
+ break;
+ }
+}
diff --git a/sys/arch/mips64/mips64/cache_mips64r2.c b/sys/arch/mips64/mips64/cache_mips64r2.c
new file mode 100644
index 00000000000..950ee4e80d5
--- /dev/null
+++ b/sys/arch/mips64/mips64/cache_mips64r2.c
@@ -0,0 +1,474 @@
+/* $OpenBSD: cache_mips64r2.c,v 1.1 2015/08/15 22:31:38 miod Exp $ */
+
+/*
+ * Copyright (c) 2014 Miodrag Vallat.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Cache handling code for mips64r2 compatible processors
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <mips64/cache.h>
+#include <machine/cpu.h>
+
+#include <uvm/uvm_extern.h>
+
+#define IndexInvalidate_I 0x00
+#define IndexWBInvalidate_D 0x01
+#define IndexWBInvalidate_T 0x02
+#define IndexWBInvalidate_S 0x03
+
+#define HitInvalidate_D 0x11
+#define HitInvalidate_T 0x12
+#define HitInvalidate_S 0x13
+
+#define HitWBInvalidate_D 0x15
+#define HitWBInvalidate_T 0x16
+#define HitWBInvalidate_S 0x17
+
+#define cache(op,addr) \
+ __asm__ __volatile__ \
+ ("cache %0, 0(%1)" :: "i"(op), "r"(addr) : "memory")
+
+static __inline__ void mips64r2_hitinv_primary(vaddr_t, vsize_t, vsize_t);
+static __inline__ void mips64r2_hitinv_secondary(vaddr_t, vsize_t, vsize_t);
+static __inline__ void mips64r2_hitinv_ternary(vaddr_t, vsize_t, vsize_t);
+static __inline__ void mips64r2_hitwbinv_primary(vaddr_t, vsize_t, vsize_t);
+static __inline__ void mips64r2_hitwbinv_secondary(vaddr_t, vsize_t, vsize_t);
+static __inline__ void mips64r2_hitwbinv_ternary(vaddr_t, vsize_t, vsize_t);
+
+void
+mips64r2_ConfigCache(struct cpu_info *ci)
+{
+ uint32_t cfg, valias_mask;
+ uint32_t s, l, a;
+
+ cfg = cp0_get_config();
+ if ((cfg & 0x80000000) == 0)
+ panic("no M bit in cfg0.0");
+
+ cfg = cp0_get_config_1();
+
+ a = 1 + ((cfg >> 7) & 0x07);
+ l = (cfg >> 10) & 0x07;
+ s = (cfg >> 13) & 0x07;
+ ci->ci_l1data.linesize = 2 << l;
+ ci->ci_l1data.setsize = (64 << s) * ci->ci_l1data.linesize;
+ ci->ci_l1data.sets = a;
+ ci->ci_l1data.size = ci->ci_l1data.sets * ci->ci_l1data.setsize;
+
+ a = 1 + ((cfg >> 16) & 0x07);
+ l = (cfg >> 19) & 0x07;
+ s = (cfg >> 22) & 0x07;
+ ci->ci_l1inst.linesize = 2 << l;
+ ci->ci_l1inst.setsize = (64 << s) * ci->ci_l1inst.linesize;
+ ci->ci_l1inst.sets = a;
+ ci->ci_l1inst.size = ci->ci_l1inst.sets * ci->ci_l1inst.setsize;
+
+ memset(&ci->ci_l2, 0, sizeof(struct cache_info));
+ memset(&ci->ci_l3, 0, sizeof(struct cache_info));
+
+ if ((cfg & 0x80000000) != 0) {
+ cfg = cp0_get_config_2();
+
+ a = 1 + ((cfg >> 0) & 0x0f);
+ l = (cfg >> 4) & 0x0f;
+ s = (cfg >> 8) & 0x0f;
+ if (l != 0) {
+ ci->ci_l2.linesize = 2 << l;
+ ci->ci_l2.setsize = (64 << s) * ci->ci_l2.linesize;
+ ci->ci_l2.sets = a;
+ ci->ci_l2.size = ci->ci_l2.sets * ci->ci_l2.setsize;
+ }
+
+ a = 1 + ((cfg >> 16) & 0x0f);
+ l = (cfg >> 20) & 0x0f;
+ s = (cfg >> 24) & 0x0f;
+ if (l != 0) {
+ ci->ci_l3.linesize = 2 << l;
+ ci->ci_l3.setsize = (64 << s) * ci->ci_l3.linesize;
+ ci->ci_l3.sets = a;
+ ci->ci_l3.size = ci->ci_l3.sets * ci->ci_l3.setsize;
+ }
+ }
+
+ valias_mask = (max(ci->ci_l1inst.setsize, ci->ci_l1data.setsize) - 1) &
+ ~PAGE_MASK;
+
+ if (valias_mask != 0) {
+ valias_mask |= PAGE_MASK;
+#ifdef MULTIPROCESSOR
+ if (valias_mask > cache_valias_mask) {
+#endif
+ cache_valias_mask = valias_mask;
+ pmap_prefer_mask = valias_mask;
+#ifdef MULTIPROCESSOR
+ }
+#endif
+ }
+
+ ci->ci_SyncCache = mips64r2_SyncCache;
+ ci->ci_InvalidateICache = mips64r2_InvalidateICache;
+ ci->ci_InvalidateICachePage = mips64r2_InvalidateICachePage;
+ ci->ci_SyncICache = mips64r2_SyncICache;
+ ci->ci_SyncDCachePage = mips64r2_SyncDCachePage;
+ ci->ci_HitSyncDCache = mips64r2_HitSyncDCache;
+ ci->ci_HitInvalidateDCache = mips64r2_HitInvalidateDCache;
+ ci->ci_IOSyncDCache = mips64r2_IOSyncDCache;
+}
+
+static __inline__ void
+mips64r2_hitwbinv_primary(vaddr_t va, vsize_t sz, vsize_t line)
+{
+ vaddr_t eva;
+
+ eva = va + sz;
+ while (va != eva) {
+ cache(HitWBInvalidate_D, va);
+ va += line;
+ }
+}
+
+static __inline__ void
+mips64r2_hitwbinv_secondary(vaddr_t va, vsize_t sz, vsize_t line)
+{
+ vaddr_t eva;
+
+ eva = va + sz;
+ while (va != eva) {
+ cache(HitWBInvalidate_S, va);
+ va += line;
+ }
+}
+
+static __inline__ void
+mips64r2_hitwbinv_ternary(vaddr_t va, vsize_t sz, vsize_t line)
+{
+ vaddr_t eva;
+
+ eva = va + sz;
+ while (va != eva) {
+ cache(HitWBInvalidate_T, va);
+ va += line;
+ }
+}
+
+static __inline__ void
+mips64r2_hitinv_primary(vaddr_t va, vsize_t sz, vsize_t line)
+{
+ vaddr_t eva;
+
+ eva = va + sz;
+ while (va != eva) {
+ cache(HitInvalidate_D, va);
+ va += line;
+ }
+}
+
+static __inline__ void
+mips64r2_hitinv_secondary(vaddr_t va, vsize_t sz, vsize_t line)
+{
+ vaddr_t eva;
+
+ eva = va + sz;
+ while (va != eva) {
+ cache(HitInvalidate_S, va);
+ va += line;
+ }
+}
+
+static __inline__ void
+mips64r2_hitinv_ternary(vaddr_t va, vsize_t sz, vsize_t line)
+{
+ vaddr_t eva;
+
+ eva = va + sz;
+ while (va != eva) {
+ cache(HitInvalidate_T, va);
+ va += line;
+ }
+}
+
+/*
+ * Writeback and invalidate all caches.
+ */
+void
+mips64r2_SyncCache(struct cpu_info *ci)
+{
+ vaddr_t sva, eva;
+
+ sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
+ eva = sva + ci->ci_l1inst.linesize;
+ while (sva != eva) {
+ cache(IndexInvalidate_I, sva);
+ sva += ci->ci_l1inst.linesize;
+ }
+
+ sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
+ eva = sva + ci->ci_l1data.linesize;
+ while (sva != eva) {
+ cache(IndexWBInvalidate_D, sva);
+ sva += ci->ci_l1data.linesize;
+ }
+
+ if (ci->ci_l2.size != 0) {
+ sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
+ eva = sva + ci->ci_l2.size;
+ while (sva != eva) {
+ cache(IndexWBInvalidate_S, sva);
+ sva += ci->ci_l2.linesize;
+ }
+ }
+
+ if (ci->ci_l3.size != 0) {
+ sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
+ eva = sva + ci->ci_l3.size;
+ while (sva != eva) {
+ cache(IndexWBInvalidate_T, sva);
+ sva += ci->ci_l3.linesize;
+ }
+ }
+}
+
+/*
+ * Invalidate I$ for the given range.
+ */
+void
+mips64r2_InvalidateICache(struct cpu_info *ci, vaddr_t _va, size_t _sz)
+{
+ vaddr_t va, sva, eva, iva;
+ vsize_t sz, offs;
+ uint set, nsets;
+
+ /* extend the range to integral cache lines */
+ va = _va & ~(ci->ci_l1inst.linesize - 1);
+ sz = ((_va + _sz + ci->ci_l1inst.linesize - 1) & ~(ci->ci_l1inst.linesize - 1)) - va;
+
+ sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
+ offs = ci->ci_l1inst.setsize;
+ nsets = ci->ci_l1inst.sets;
+ /* keep only the index bits */
+ sva |= va & (offs - 1);
+ eva = sva + sz;
+
+ while (sva != eva) {
+ for (set = nsets, iva = sva; set != 0; set--, iva += offs)
+ cache(IndexInvalidate_I, iva);
+ sva += ci->ci_l1inst.linesize;
+ }
+}
+
+/*
+ * Register a given page for I$ invalidation.
+ */
+void
+mips64r2_InvalidateICachePage(struct cpu_info *ci, vaddr_t va)
+{
+ /* this code is too generic to allow for lazy I$ invalidates, yet */
+ mips64r2_InvalidateICache(ci, va, PAGE_SIZE);
+}
+
+/*
+ * Perform postponed I$ invalidation.
+ */
+void
+mips64r2_SyncICache(struct cpu_info *ci)
+{
+}
+
+/*
+ * Writeback D$ for the given page.
+ */
+void
+mips64r2_SyncDCachePage(struct cpu_info *ci, vaddr_t va, paddr_t pa)
+{
+ vaddr_t sva, eva, iva;
+ vsize_t line, offs;
+ uint set, nsets;
+
+ line = ci->ci_l1data.linesize;
+ sva = PHYS_TO_XKPHYS(0, CCA_CACHED);
+ offs = ci->ci_l1data.setsize;
+ nsets = ci->ci_l1data.sets;
+ /* keep only the index bits */
+ sva += va & (offs - 1);
+ eva = sva + PAGE_SIZE;
+ while (sva != eva) {
+ for (set = nsets, iva = sva; set != 0; set--, iva += offs)
+ cache(IndexWBInvalidate_D, iva);
+ sva += ci->ci_l1data.linesize;
+ }
+}
+
+/*
+ * Writeback D$ for the given range. Range is expected to be currently
+ * mapped, allowing the use of `Hit' operations. This is less aggressive
+ * than using `Index' operations.
+ */
+
+void
+mips64r2_HitSyncDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz)
+{
+ vaddr_t va;
+ vsize_t sz;
+
+ /* extend the range to integral cache lines */
+ va = _va & ~(ci->ci_l1data.linesize - 1);
+ sz = ((_va + _sz + ci->ci_l1data.linesize - 1) & ~(ci->ci_l1data.linesize - 1)) - va;
+ mips64r2_hitwbinv_primary(va, sz, ci->ci_l1data.linesize);
+}
+
+/*
+ * Invalidate D$ for the given range. Range is expected to be currently
+ * mapped, allowing the use of `Hit' operations. This is less aggressive
+ * than using `Index' operations.
+ */
+
+void
+mips64r2_HitInvalidateDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz)
+{
+ vaddr_t va;
+ vsize_t sz;
+
+ /* extend the range to integral cache lines */
+ va = _va & ~(ci->ci_l1data.linesize - 1);
+ sz = ((_va + _sz + ci->ci_l1data.linesize - 1) & ~(ci->ci_l1data.linesize - 1)) - va;
+ mips64r2_hitinv_primary(va, sz, ci->ci_l1data.linesize);
+}
+
+/*
+ * Backend for bus_dmamap_sync(). Enforce coherency of the given range
+ * by performing the necessary cache writeback and/or invalidate
+ * operations.
+ */
+void
+mips64r2_IOSyncDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz, int how)
+{
+ vaddr_t va;
+ vsize_t sz;
+ int partial_start, partial_end;
+
+ /*
+ * L1
+ */
+
+ /* extend the range to integral cache lines */
+ va = _va & ~(ci->ci_l1data.linesize - 1);
+ sz = ((_va + _sz + ci->ci_l1data.linesize - 1) & ~(ci->ci_l1data.linesize - 1)) - va;
+
+ switch (how) {
+ case CACHE_SYNC_R:
+ /* writeback partial cachelines */
+ if (((_va | _sz) & (ci->ci_l1data.linesize - 1)) != 0) {
+ partial_start = va != _va;
+ partial_end = va + sz != _va + _sz;
+ } else {
+ partial_start = partial_end = 0;
+ }
+ if (partial_start) {
+ cache(HitWBInvalidate_D, va);
+ va += ci->ci_l1data.linesize;
+ sz -= ci->ci_l1data.linesize;
+ }
+ if (sz != 0 && partial_end) {
+ sz -= ci->ci_l1data.linesize;
+ cache(HitWBInvalidate_D, va + sz);
+ }
+ if (sz != 0)
+ mips64r2_hitinv_primary(va, sz, ci->ci_l1data.linesize);
+ break;
+ case CACHE_SYNC_X:
+ case CACHE_SYNC_W:
+ mips64r2_hitwbinv_primary(va, sz, ci->ci_l1data.linesize);
+ break;
+ }
+
+ /*
+ * L2
+ */
+
+ if (ci->ci_l2.size != 0) {
+ /* extend the range to integral cache lines */
+ va = _va & ~(ci->ci_l2.linesize - 1);
+ sz = ((_va + _sz + ci->ci_l2.linesize - 1) & ~(ci->ci_l2.linesize - 1)) - va;
+
+ switch (how) {
+ case CACHE_SYNC_R:
+ /* writeback partial cachelines */
+ if (((_va | _sz) & (ci->ci_l2.linesize - 1)) != 0) {
+ partial_start = va != _va;
+ partial_end = va + sz != _va + _sz;
+ } else {
+ partial_start = partial_end = 0;
+ }
+ if (partial_start) {
+ cache(HitWBInvalidate_S, va);
+ va += ci->ci_l2.linesize;
+ sz -= ci->ci_l2.linesize;
+ }
+ if (sz != 0 && partial_end) {
+ sz -= ci->ci_l2.linesize;
+ cache(HitWBInvalidate_S, va + sz);
+ }
+ if (sz != 0)
+ mips64r2_hitinv_secondary(va, sz, ci->ci_l2.linesize);
+ break;
+ case CACHE_SYNC_X:
+ case CACHE_SYNC_W:
+ mips64r2_hitwbinv_secondary(va, sz, ci->ci_l2.linesize);
+ break;
+ }
+ }
+
+ /*
+ * L3
+ */
+
+ if (ci->ci_l3.size != 0) {
+ /* extend the range to integral cache lines */
+ va = _va & ~(ci->ci_l3.linesize - 1);
+ sz = ((_va + _sz + ci->ci_l3.linesize - 1) & ~(ci->ci_l3.linesize - 1)) - va;
+
+ switch (how) {
+ case CACHE_SYNC_R:
+ /* writeback partial cachelines */
+ if (((_va | _sz) & (ci->ci_l3.linesize - 1)) != 0) {
+ partial_start = va != _va;
+ partial_end = va + sz != _va + _sz;
+ } else {
+ partial_start = partial_end = 0;
+ }
+ if (partial_start) {
+ cache(HitWBInvalidate_S, va);
+ va += ci->ci_l3.linesize;
+ sz -= ci->ci_l3.linesize;
+ }
+ if (sz != 0 && partial_end) {
+ sz -= ci->ci_l3.linesize;
+ cache(HitWBInvalidate_S, va + sz);
+ }
+ if (sz != 0)
+ mips64r2_hitinv_ternary(va, sz, ci->ci_l3.linesize);
+ break;
+ case CACHE_SYNC_X:
+ case CACHE_SYNC_W:
+ mips64r2_hitwbinv_ternary(va, sz, ci->ci_l3.linesize);
+ break;
+ }
+ }
+}