summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorart <art@openbsd.org>2000-11-08 16:00:54 +0000
committerart <art@openbsd.org>2000-11-08 16:00:54 +0000
commitaed035ab2761cb28eae778ad6c65cdb5676b2612 (patch)
treebf91ff8cf83181f7b5c80707ff18fbe9af804cd7
parentRemove kernel configs that will stop working soon. (diff)
downloadwireguard-openbsd-aed035ab2761cb28eae778ad6c65cdb5676b2612.tar.xz
wireguard-openbsd-aed035ab2761cb28eae778ad6c65cdb5676b2612.zip
Merge in big portions of the improvements NetBSD did to their alpha port.
Highlights: UVM, PMAP_NEW, bus_dma (only on some buses for now), new hardware support, possiblity for ELF, etc, etc. Too much to mention. This is still work in progress. video consoles might be broken, otherwise we have basically the same functionality as before plus more.
-rw-r--r--sys/arch/alpha/alpha/autoconf.c13
-rw-r--r--sys/arch/alpha/alpha/clock.c5
-rw-r--r--sys/arch/alpha/alpha/cpu.c688
-rw-r--r--sys/arch/alpha/alpha/cpuconf.c313
-rw-r--r--sys/arch/alpha/alpha/db_disasm.c1320
-rw-r--r--sys/arch/alpha/alpha/db_instruction.h727
-rw-r--r--sys/arch/alpha/alpha/db_interface.c638
-rw-r--r--sys/arch/alpha/alpha/db_trace.c521
-rw-r--r--sys/arch/alpha/alpha/debug.s114
-rw-r--r--sys/arch/alpha/alpha/dec_2100_a50.c152
-rw-r--r--sys/arch/alpha/alpha/dec_3000_300.c193
-rw-r--r--sys/arch/alpha/alpha/dec_3000_500.c226
-rw-r--r--sys/arch/alpha/alpha/dec_550.c312
-rw-r--r--sys/arch/alpha/alpha/dec_axppci_33.c182
-rw-r--r--sys/arch/alpha/alpha/dec_eb164.c187
-rw-r--r--sys/arch/alpha/alpha/dec_kn20aa.c128
-rw-r--r--sys/arch/alpha/alpha/genassym.c291
-rw-r--r--sys/arch/alpha/alpha/interrupt.c284
-rw-r--r--sys/arch/alpha/alpha/locore.s1020
-rw-r--r--sys/arch/alpha/alpha/machdep.c1251
-rw-r--r--sys/arch/alpha/alpha/mainbus.c124
-rw-r--r--sys/arch/alpha/alpha/mem.c75
-rw-r--r--sys/arch/alpha/alpha/multiproc.s84
-rw-r--r--sys/arch/alpha/alpha/pal.s158
-rw-r--r--sys/arch/alpha/alpha/pmap.c6115
-rw-r--r--sys/arch/alpha/alpha/process_machdep.c4
-rw-r--r--sys/arch/alpha/alpha/prom.c243
-rw-r--r--sys/arch/alpha/alpha/prom_disp.s4
-rw-r--r--sys/arch/alpha/alpha/trap.c733
-rw-r--r--sys/arch/alpha/alpha/vm_machdep.c192
-rw-r--r--sys/arch/alpha/common/bus_dma.c673
-rw-r--r--sys/arch/alpha/common/sgmap_common.c224
-rw-r--r--sys/arch/alpha/common/sgmap_typedep.c330
-rw-r--r--sys/arch/alpha/common/sgmap_typedep.h58
-rw-r--r--sys/arch/alpha/common/sgmapvar.h95
-rw-r--r--sys/arch/alpha/conf/GENERIC4
-rw-r--r--sys/arch/alpha/conf/files.alpha26
-rw-r--r--sys/arch/alpha/dev/shared_intr.c86
-rw-r--r--sys/arch/alpha/include/alpha.h112
-rw-r--r--sys/arch/alpha/include/alpha_cpu.h289
-rw-r--r--sys/arch/alpha/include/asm.h152
-rw-r--r--sys/arch/alpha/include/atomic.h182
-rw-r--r--sys/arch/alpha/include/autoconf.h117
-rw-r--r--sys/arch/alpha/include/bus.h457
-rw-r--r--sys/arch/alpha/include/bwx.h117
-rw-r--r--sys/arch/alpha/include/cpu.h206
-rw-r--r--sys/arch/alpha/include/cpuconf.h105
-rw-r--r--sys/arch/alpha/include/db_machdep.h228
-rw-r--r--sys/arch/alpha/include/intr.h93
-rw-r--r--sys/arch/alpha/include/kcore.h8
-rw-r--r--sys/arch/alpha/include/pal.h92
-rw-r--r--sys/arch/alpha/include/param.h57
-rw-r--r--sys/arch/alpha/include/pmap.h322
-rw-r--r--sys/arch/alpha/include/proc.h3
-rw-r--r--sys/arch/alpha/include/prom.h51
-rw-r--r--sys/arch/alpha/include/pte.h118
-rw-r--r--sys/arch/alpha/include/rpb.h273
-rw-r--r--sys/arch/alpha/include/vmparam.h81
-rw-r--r--sys/arch/alpha/isa/isa_machdep.h7
-rw-r--r--sys/arch/alpha/pci/apecs.c6
-rw-r--r--sys/arch/alpha/pci/cia.c334
-rw-r--r--sys/arch/alpha/pci/cia_bwx_bus_io.c57
-rw-r--r--sys/arch/alpha/pci/cia_bwx_bus_mem.c57
-rw-r--r--sys/arch/alpha/pci/cia_dma.c544
-rw-r--r--sys/arch/alpha/pci/cia_pci.c147
-rw-r--r--sys/arch/alpha/pci/ciareg.h230
-rw-r--r--sys/arch/alpha/pci/ciavar.h32
-rw-r--r--sys/arch/alpha/pci/lca.c6
-rw-r--r--sys/arch/alpha/pci/pci_550.c450
-rw-r--r--sys/arch/alpha/pci/pci_550.h30
-rw-r--r--sys/arch/alpha/pci/pci_bwx_bus_io_chipdep.c698
-rw-r--r--sys/arch/alpha/pci/pci_bwx_bus_mem_chipdep.c720
-rw-r--r--sys/arch/alpha/pci/pci_sgmap_pte64.c54
-rw-r--r--sys/arch/alpha/pci/pci_sgmap_pte64.h57
-rw-r--r--sys/arch/alpha/pci/sio.c3
-rw-r--r--sys/arch/alpha/pci/sio_pic.c13
-rw-r--r--sys/arch/alpha/pci/siovar.h3
-rw-r--r--sys/arch/alpha/stand/Makefile4
-rw-r--r--sys/arch/alpha/stand/Makefile.inc4
-rw-r--r--sys/arch/alpha/stand/boot/boot.c39
-rw-r--r--sys/arch/alpha/stand/loadfile.c6
-rw-r--r--sys/arch/alpha/tc/tcasic.c6
-rw-r--r--sys/arch/alpha/tc/tcds_dma.c4
83 files changed, 17809 insertions, 6558 deletions
diff --git a/sys/arch/alpha/alpha/autoconf.c b/sys/arch/alpha/alpha/autoconf.c
index ad094f3ed04..321bcf3feb1 100644
--- a/sys/arch/alpha/alpha/autoconf.c
+++ b/sys/arch/alpha/alpha/autoconf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: autoconf.c,v 1.9 1999/07/30 19:05:49 deraadt Exp $ */
+/* $OpenBSD: autoconf.c,v 1.10 2000/11/08 16:00:54 art Exp $ */
/* $NetBSD: autoconf.c,v 1.16 1996/11/13 21:13:04 cgd Exp $ */
/*
@@ -87,6 +87,7 @@ static int getstr __P((char *cp, int size));
void
configure()
{
+ extern int cold;
parse_prom_bootdev();
@@ -101,6 +102,8 @@ configure()
panic("no mainbus found");
(void)spl0();
+ hwrpb_restart_setup();
+
if (booted_device == NULL)
printf("WARNING: can't figure what device matches \"%s\"\n",
boot_dev);
@@ -525,8 +528,7 @@ parse_prom_bootdev()
booted_partition = 0;
bootdev_data = NULL;
- prom_getenv(PROM_E_BOOTED_DEV, boot_dev, sizeof(boot_dev));
- bcopy(boot_dev, hacked_boot_dev, sizeof hacked_boot_dev);
+ bcopy(bootinfo.booted_dev, hacked_boot_dev, sizeof hacked_boot_dev);
#if 0
printf("parse_prom_bootdev: boot dev = \"%s\"\n", boot_dev);
#endif
@@ -603,8 +605,6 @@ device_register(dev, aux)
struct device *dev;
void *aux;
{
- extern const struct cpusw *cpu_fn_switch;
-
if (bootdev_data == NULL) {
/*
* There is no hope.
@@ -613,5 +613,6 @@ device_register(dev, aux)
return;
}
- (*cpu_fn_switch->device_register)(dev, aux);
+ if (platform.device_register)
+ (*platform.device_register)(dev, aux);
}
diff --git a/sys/arch/alpha/alpha/clock.c b/sys/arch/alpha/alpha/clock.c
index 84e5e8ee106..5cace47155d 100644
--- a/sys/arch/alpha/alpha/clock.c
+++ b/sys/arch/alpha/alpha/clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: clock.c,v 1.9 1999/09/25 16:23:49 pjanzen Exp $ */
+/* $OpenBSD: clock.c,v 1.10 2000/11/08 16:00:56 art Exp $ */
/* $NetBSD: clock.c,v 1.14 1996/11/23 06:31:57 cgd Exp $ */
/*
@@ -50,6 +50,7 @@
#include <machine/rpb.h>
#include <machine/autoconf.h>
+#include <machine/cpuconf.h>
#include <alpha/alpha/clockvar.h>
@@ -132,7 +133,7 @@ cpu_initclocks()
* hardclock, which would then fall over because p->p_stats
* isn't set at that time.
*/
- set_clockintr();
+ platform.clockintr = hardclock;
schedhz = 16;
/*
diff --git a/sys/arch/alpha/alpha/cpu.c b/sys/arch/alpha/alpha/cpu.c
index dccffa17760..47cab329dd0 100644
--- a/sys/arch/alpha/alpha/cpu.c
+++ b/sys/arch/alpha/alpha/cpu.c
@@ -1,5 +1,41 @@
-/* $OpenBSD: cpu.c,v 1.6 1997/01/24 19:56:20 niklas Exp $ */
-/* $NetBSD: cpu.c,v 1.16 1996/12/05 01:39:27 cgd Exp $ */
+/* $NetBSD: cpu.c,v 1.44 2000/05/23 05:12:53 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -28,20 +64,53 @@
* rights to redistribute these changes.
*/
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
+#include <sys/proc.h>
+#include <sys/user.h>
+
+#include <vm/vm.h>
+#include <machine/atomic.h>
#include <machine/autoconf.h>
+#include <machine/cpu.h>
#include <machine/rpb.h>
+#include <machine/prom.h>
+#include <machine/alpha.h>
+
+#if defined(MULTIPROCESSOR)
+#include <sys/malloc.h>
+#include <sys/kthread.h>
+
+/*
+ * Array of CPU info structures. Must be statically-allocated because
+ * curproc, etc. are used early.
+ */
+struct cpu_info cpu_info[ALPHA_MAXPROCS];
+
+/* Bitmask of CPUs currently running. */
+__volatile u_long cpus_running;
+
+void cpu_boot_secondary __P((struct cpu_info *));
+#else /* MULTIPROCESSOR */
+struct cpu_info cpu_info_store;
+#endif /* MULTIPROCESSOR */
+
+/*
+ * The Implementation Version and the Architecture Mask must be
+ * consistent across all CPUs in the system, so we set it for the
+ * primary and announce the AMASK extensions if they exist.
+ *
+ * Note, we invert the AMASK so that if a bit is set, it means "has
+ * extension".
+ */
+u_long cpu_implver, cpu_amask;
/* Definition of the driver for autoconfig. */
-#ifdef __BROKEN_INDIRECT_CONFIG
int cpumatch(struct device *, void *, void *);
-#else
-int cpumatch(struct device *, struct cfdata *, void *);
-#endif
-void cpuattach __P((struct device *, struct device *, void *));
+void cpuattach(struct device *, struct device *, void *);
struct cfattach cpu_ca = {
sizeof(struct device), cpumatch, cpuattach
@@ -51,22 +120,85 @@ struct cfdriver cpu_cd = {
NULL, "cpu", DV_DULL
};
+extern struct cfdriver cpu_cd;
+
+static char *ev4minor[] = {
+ "pass 2 or 2.1", "pass 3", 0
+}, *lcaminor[] = {
+ "",
+ "21066 pass 1 or 1.1", "21066 pass 2",
+ "21068 pass 1 or 1.1", "21068 pass 2",
+ "21066A pass 1", "21068A pass 1", 0
+}, *ev5minor[] = {
+ "", "pass 2, rev BA or 2.2, rev CA", "pass 2.3, rev DA or EA",
+ "pass 3", "pass 3.2", "pass 4", 0
+}, *ev45minor[] = {
+ "", "pass 1", "pass 1.1", "pass 2", 0
+}, *ev56minor[] = {
+ "", "pass 1", "pass 2", 0
+}, *ev6minor[] = {
+ "pass 1", "pass 2", "pass 2.2", "pass 2.3", "pass 3", 0
+}, *pca56minor[] = {
+ "", "pass 1", 0
+};
+
+struct cputable_struct {
+ int cpu_major_code;
+ char *cpu_major_name;
+ char **cpu_minor_names;
+} cpunametable[] = {
+ { PCS_PROC_EV3, "EV3", 0 },
+ { PCS_PROC_EV4, "21064", ev4minor },
+ { PCS_PROC_SIMULATION, "Sim", 0 },
+ { PCS_PROC_LCA4, "LCA", lcaminor },
+ { PCS_PROC_EV5, "21164", ev5minor },
+ { PCS_PROC_EV45, "21064A", ev45minor },
+ { PCS_PROC_EV56, "21164A", ev56minor },
+ { PCS_PROC_EV6, "21264", ev6minor },
+ { PCS_PROC_PCA56, "PCA56", pca56minor }
+};
+
+/*
+ * The following is an attempt to map out how booting secondary CPUs
+ * works.
+ *
+ * As we find processors during the autoconfiguration sequence, all
+ * processors have idle stacks and PCBs created for them, including
+ * the primary (although the primary idles on proc0's PCB until its
+ * idle PCB is created).
+ *
+ * Right before calling uvm_scheduler(), main() calls, on proc0's
+ * context, cpu_boot_secondary_processors(). This is our key to
+ * actually spin up the additional processor's we've found. We
+ * run through our cpu_info[] array looking for secondary processors
+ * with idle PCBs, and spin them up.
+ *
+ * The spinup involves switching the secondary processor to the
+ * OSF/1 PALcode, setting the entry point to cpu_spinup_trampoline(),
+ * and sending a "START" message to the secondary's console.
+ *
+ * Upon successful processor bootup, the cpu_spinup_trampoline will call
+ * cpu_hatch(), which will print a message indicating that the processor
+ * is running, and will set the "hatched" flag in its softc. At the end
+ * of cpu_hatch() is a spin-forever loop; we do not yet attempt to schedule
+ * anything on secondary CPUs.
+ */
+
int
cpumatch(parent, cfdata, aux)
struct device *parent;
-#ifdef __BROKEN_INDIRECT_CONFIG
void *cfdata;
-#else
- struct cfdata *cfdata;
-#endif
void *aux;
{
- struct confargs *ca = aux;
+ struct mainbus_attach_args *ma = aux;
/* make sure that we're looking for a CPU. */
- if (strcmp(ca->ca_name, cpu_cd.cd_name) != 0)
+ if (strcmp(ma->ma_name, cpu_cd.cd_name) != 0)
return (0);
+ /* XXX CHECK SLOT? */
+ /* XXX CHECK PRIMARY? */
+
return (1);
}
@@ -76,174 +208,61 @@ cpuattach(parent, dev, aux)
struct device *dev;
void *aux;
{
- struct pcs *p;
+ struct mainbus_attach_args *ma = aux;
+ int i;
+ char **s;
+ struct pcs *p;
+#ifdef DEBUG
int needcomma;
+#endif
u_int32_t major, minor;
+#if defined(MULTIPROCESSOR)
+ extern paddr_t avail_start, avail_end;
+ struct pcb *pcb;
+ struct cpu_info *ci;
+ struct pglist mlist;
+ int error;
+#endif
- p = (struct pcs*)((char *)hwrpb + hwrpb->rpb_pcs_off +
- (dev->dv_unit * hwrpb->rpb_pcs_size));
- major = (p->pcs_proc_type & PCS_PROC_MAJOR) >> PCS_PROC_MAJORSHIFT;
- minor = (p->pcs_proc_type & PCS_PROC_MINOR) >> PCS_PROC_MINORSHIFT;
-
- printf(": ");
- switch (major) {
- case PCS_PROC_EV3:
- printf("EV3 (minor type 0x%x)", minor);
- break;
-
- case PCS_PROC_EV4:
- printf("21064 ");
- switch (minor) {
- case 0:
- printf("(pass 2 or 2.1)");
- break;
- case 1:
- printf("(pass 3)");
- break;
- default:
- printf("(unknown minor type 0x%x)", minor);
- break;
- }
- break;
-
- case PCS_PROC_SIMULATION:
- printf("simulation (minor type 0x%x)", minor);
- break;
-
- case PCS_PROC_LCA4:
- switch (minor) {
- case 0:
- printf("LCA family (reserved minor type)");
- break;
- case 1:
- printf("21066 (pass 1 or 1.1)");
- break;
- case 2:
- printf("21066 (pass 2)");
- break;
- case 3:
- printf("21068 (pass 1 or 1.1)");
- break;
- case 4:
- printf("21068 (pass 2)");
- break;
- case 5:
- printf("21066A (pass 1)");
- break;
- case 6:
- printf("21068A (pass 1)");
- break;
- default:
- printf("LCA family (unknown minor type 0x%x)", minor);
- break;
- }
- break;
-
- case PCS_PROC_EV5:
- printf("21164 ");
- switch (minor) {
- case 0:
- printf("(reserved minor type/pass 1)");
- break;
- case 1:
- printf("(pass 2 or 2.2)");
- break;
- case 2:
- printf("(pass 2.3)");
- break;
- case 3:
- printf("(pass 3)");
- break;
- case 4:
- printf("(pass 3.2)");
- break;
- case 5:
- printf("(pass 4)");
- break;
- default:
- printf("(unknown minor type 0x%x)", minor);
- break;
- }
- break;
+ p = LOCATE_PCS(hwrpb, ma->ma_slot);
+ major = PCS_CPU_MAJORTYPE(p);
+ minor = PCS_CPU_MINORTYPE(p);
- case PCS_PROC_EV45:
- printf("21064A ");
- switch (minor) {
- case 0:
- printf("(reserved minor type)");
- break;
- case 1:
- printf("(pass 1)");
- break;
- case 2:
- printf("(pass 1.1)");
- break;
- case 3:
- printf("(pass 2)");
- break;
- default:
- printf("(unknown minor type 0x%x)", minor);
- break;
- }
- break;
+ printf(": ID %d%s, ", ma->ma_slot,
+ ma->ma_slot == hwrpb->rpb_primary_cpu_id ? " (primary)" : "");
- case PCS_PROC_EV56:
- printf("21164A ");
- switch (minor) {
- case 0:
- printf("(reserved minor type)");
- break;
- case 1:
- printf("(pass 1)");
- break;
- case 2:
- printf("(pass 2)");
- break;
- default:
- printf("(unknown minor type 0x%x)", minor);
- break;
+ for(i = 0; i < sizeof cpunametable / sizeof cpunametable[0]; ++i) {
+ if (cpunametable[i].cpu_major_code == major) {
+ printf("%s-%d", cpunametable[i].cpu_major_name, minor);
+ s = cpunametable[i].cpu_minor_names;
+ for(i = 0; s && s[i]; ++i) {
+ if (i == minor && strlen(s[i]) != 0) {
+ printf(" (%s)\n", s[i]);
+ goto recognized;
+ }
+ }
+ printf(" (unknown minor type %d)\n", minor);
+ goto recognized;
}
- break;
+ }
+ printf("UNKNOWN CPU TYPE (%d:%d)", major, minor);
- case PCS_PROC_EV6:
- printf("21264 ");
- switch (minor) {
- case 0:
- printf("(reserved minor type)");
- break;
- case 1:
- printf("(pass 1)");
- break;
- default:
- printf("(unknown minor type 0x%x)", minor);
- break;
- }
- break;
+recognized:
- case PCS_PROC_PCA56:
- printf("21164PC ");
- switch (minor) {
- case 0:
- printf("(reserved minor type)");
- break;
- case 1:
- printf("(pass 1)");
- break;
- default:
- printf("(unknown minor type 0x%x)", minor);
- break;
+ if (ma->ma_slot == hwrpb->rpb_primary_cpu_id) {
+ cpu_implver = alpha_implver();
+ if (cpu_implver >= ALPHA_IMPLVER_EV5)
+ cpu_amask =
+ (~alpha_amask(ALPHA_AMASK_ALL)) & ALPHA_AMASK_ALL;
+ if (cpu_amask) {
+ printf("%s: Architecture extensions: %b\n",
+ dev->dv_xname, cpu_amask, ALPHA_AMASK_BITS);
}
- break;
-
- default:
- printf("UNKNOWN CPU TYPE (0x%x:0x%x)", major, minor);
- break;
}
- printf("\n");
- /* XXX SHOULD CHECK ARCHITECTURE MASK, TOO */
+#ifdef DEBUG
if (p->pcs_proc_var != 0) {
- printf("cpu%d: ", dev->dv_unit);
+ printf("%s: ", dev->dv_xname);
needcomma = 0;
if (p->pcs_proc_var & PCS_VAR_VAXFP) {
@@ -263,6 +282,19 @@ cpuattach(parent, dev, aux)
p->pcs_proc_var & PCS_VAR_RESERVED);
printf("\n");
}
+#endif
+
+#if defined(MULTIPROCESSOR)
+ if (ma->ma_slot > ALPHA_WHAMI_MAXID) {
+ printf("%s: procssor ID too large, ignoring\n", dev->dv_xname);
+ return;
+ }
+
+ ci = &cpu_info[ma->ma_slot];
+ simple_lock_init(&ci->ci_slock);
+ ci->ci_cpuid = ma->ma_slot;
+ ci->ci_dev = dev;
+#endif /* MULTIPROCESSOR */
/*
* Though we could (should?) attach the LCA cpus' PCI
@@ -270,4 +302,322 @@ cpuattach(parent, dev, aux)
* the bus attachment code is easier to understand
* and more compact if done the 'normal' way.
*/
+
+#if defined(MULTIPROCESSOR)
+ /*
+ * Make sure the processor is available for use.
+ */
+ if ((p->pcs_flags & PCS_PA) == 0) {
+ if (ma->ma_slot == hwrpb->rpb_primary_cpu_id)
+ panic("cpu_attach: primary not available?!");
+ printf("%s: processor not available for use\n", dev->dv_xname);
+ return;
+ }
+
+ /* Make sure the processor has valid PALcode. */
+ if ((p->pcs_flags & PCS_PV) == 0) {
+ if (ma->ma_slot == hwrpb->rpb_primary_cpu_id)
+ panic("cpu_attach: primary has invalid PALcode?!");
+ printf("%s: PALcode not valid\n", ci->ci_dev->dv_xname);
+ return;
+ }
+
+ /*
+ * Allocate UPAGES contiguous pages for the idle PCB and stack.
+ */
+ TAILQ_INIT(&mlist);
+ error = uvm_pglistalloc(USPACE, avail_start, avail_end, 0, 0,
+ &mlist, 1, 1);
+ if (error != 0) {
+ if (ma->ma_slot == hwrpb->rpb_primary_cpu_id) {
+ panic("cpu_attach: unable to allocate idle stack for"
+ " primary");
+ }
+ printf("%s: unable to allocate idle stack\n", dev->dv_xname);
+ return;
+ }
+
+ ci->ci_idle_pcb_paddr = VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist));
+ pcb = ci->ci_idle_pcb = (struct pcb *)
+ ALPHA_PHYS_TO_K0SEG(ci->ci_idle_pcb_paddr);
+ memset(pcb, 0, USPACE);
+
+ /*
+ * Initialize the idle stack pointer, reserving space for an
+ * (empty) trapframe (XXX is the trapframe really necessary?)
+ */
+ pcb->pcb_hw.apcb_ksp =
+ (u_int64_t)pcb + USPACE - sizeof(struct trapframe);
+
+ /*
+ * Initialize the idle PCB.
+ */
+ pcb->pcb_hw.apcb_backup_ksp = pcb->pcb_hw.apcb_ksp;
+ pcb->pcb_hw.apcb_asn = proc0.p_addr->u_pcb.pcb_hw.apcb_asn;
+ pcb->pcb_hw.apcb_ptbr = proc0.p_addr->u_pcb.pcb_hw.apcb_ptbr;
+#if 0
+ printf("%s: hwpcb ksp = 0x%lx\n", sc->sc_dev.dv_xname,
+ pcb->pcb_hw.apcb_ksp);
+ printf("%s: hwpcb ptbr = 0x%lx\n", sc->sc_dev.dv_xname,
+ pcb->pcb_hw.apcb_ptbr);
+#endif
+
+ /*
+ * If we're the primary CPU, no more work to do; we're already
+ * running!
+ */
+ if (ma->ma_slot == hwrpb->rpb_primary_cpu_id) {
+ ci->ci_flags |= CPUF_PRIMARY;
+ atomic_setbits_ulong(&cpus_running, (1UL << ma->ma_slot));
+ }
+#endif /* MULTIPROCESSOR */
+}
+
+#if defined(MULTIPROCESSOR)
+void
+cpu_boot_secondary_processors()
+{
+ struct cpu_info *ci;
+ u_long i;
+
+ for (i = 0; i < ALPHA_MAXPROCS; i++) {
+ ci = &cpu_info[i];
+ if (ci->ci_idle_pcb == NULL)
+ continue;
+ if (ci->ci_flags & CPUF_PRIMARY)
+ continue;
+
+ /* This processor is all set up; boot it! */
+ cpu_boot_secondary(ci);
+ }
+}
+
+void
+cpu_boot_secondary(ci)
+ struct cpu_info *ci;
+{
+ long timeout;
+ struct pcs *pcsp, *primary_pcsp;
+ struct pcb *pcb;
+ u_long cpumask;
+
+ pcb = ci->ci_idle_pcb;
+ primary_pcsp = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
+ pcsp = LOCATE_PCS(hwrpb, ci->ci_cpuid);
+ cpumask = (1UL << ci->ci_cpuid);
+
+ /*
+ * Set up the PCS's HWPCB to match ours.
+ */
+ memcpy(pcsp->pcs_hwpcb, &pcb->pcb_hw, sizeof(pcb->pcb_hw));
+
+ /*
+ * Set up the HWRPB to restart the secondary processor
+ * with our spin-up trampoline.
+ */
+ hwrpb->rpb_restart = (u_int64_t) cpu_spinup_trampoline;
+ hwrpb->rpb_restart_val = (u_int64_t) ci;
+ hwrpb->rpb_checksum = hwrpb_checksum();
+
+ /*
+ * Configure the CPU to start in OSF/1 PALcode by copying
+ * the primary CPU's PALcode revision info to the secondary
+ * CPUs PCS.
+ */
+
+ /*
+ * XXX Until I can update the boot block on my test system.
+ * XXX --thorpej
+ */
+#if 0
+ memcpy(&pcsp->pcs_pal_rev, &primary_pcsp->pcs_pal_rev,
+ sizeof(pcsp->pcs_pal_rev));
+#else
+ memcpy(&pcsp->pcs_pal_rev, &pcsp->pcs_palrevisions[PALvar_OSF1],
+ sizeof(pcsp->pcs_pal_rev));
+#endif
+ pcsp->pcs_flags |= (PCS_CV|PCS_RC);
+ pcsp->pcs_flags &= ~PCS_BIP;
+
+ /* Make sure the secondary console sees all this. */
+ alpha_mb();
+
+ /* Send a "START" command to the secondary CPU's console. */
+ if (cpu_iccb_send(ci->ci_cpuid, "START\r\n")) {
+ printf("%s: unable to issue `START' command\n",
+ ci->ci_dev->dv_xname);
+ return;
+ }
+
+ /* Wait for the processor to boot. */
+ for (timeout = 10000; timeout != 0; timeout--) {
+ alpha_mb();
+ if (pcsp->pcs_flags & PCS_BIP)
+ break;
+ delay(1000);
+ }
+ if (timeout == 0)
+ printf("%s: processor failed to boot\n", ci->ci_dev->dv_xname);
+
+ /*
+ * ...and now wait for verification that it's running kernel
+ * code.
+ */
+ for (timeout = 10000; timeout != 0; timeout--) {
+ alpha_mb();
+ if (cpus_running & cpumask)
+ break;
+ delay(1000);
+ }
+ if (timeout == 0)
+ printf("%s: processor failed to hatch\n", ci->ci_dev->dv_xname);
+}
+
+void
+cpu_halt_secondary(cpu_id)
+ u_long cpu_id;
+{
+ long timeout;
+ u_long cpumask = (1UL << cpu_id);
+
+#ifdef DIAGNOSTIC
+ if (cpu_id >= hwrpb->rpb_pcs_cnt ||
+ cpu_info[cpu_id].ci_dev == NULL)
+ panic("cpu_halt_secondary: bogus cpu_id");
+#endif
+
+ alpha_mb();
+ if ((cpus_running & cpumask) == 0) {
+ /* Processor not running. */
+ return;
+ }
+
+ /* Send the HALT IPI to the secondary. */
+ alpha_send_ipi(cpu_id, ALPHA_IPI_HALT);
+
+ /* ...and wait for it to shut down. */
+ for (timeout = 10000; timeout != 0; timeout--) {
+ alpha_mb();
+ if ((cpus_running & cpumask) == 0)
+ return;
+ delay(1000);
+ }
+
+ /* Erk, secondary failed to halt. */
+ printf("WARNING: %s (ID %lu) failed to halt\n",
+ cpu_info[cpu_id].ci_dev->dv_xname, cpu_id);
+}
+
+void
+cpu_hatch(ci)
+ struct cpu_info *ci;
+{
+ u_long cpumask = (1UL << ci->ci_cpuid);
+
+ /* Set our `curpcb' to reflect our context. */
+ curpcb = ci->ci_idle_pcb_paddr;
+
+ /* Mark the kernel pmap active on this processor. */
+ atomic_setbits_ulong(&pmap_kernel()->pm_cpus, cpumask);
+
+ /* Initialize trap vectors for this processor. */
+ trap_init();
+
+ /* Yahoo! We're running kernel code! Announce it! */
+ printf("%s: processor ID %lu running\n", ci->ci_dev->dv_xname,
+ alpha_pal_whami());
+ atomic_setbits_ulong(&cpus_running, cpumask);
+
+ /*
+ * Lower interrupt level so that we can get IPIs. Don't use
+ * spl0() because we don't want to hassle w/ software interrupts
+ * right now. Note that interrupt() prevents the secondaries
+ * from servicing DEVICE and CLOCK interrupts.
+ */
+ (void) alpha_pal_swpipl(ALPHA_PSL_IPL_0);
+
+ /* Ok, so all we do is spin for now... */
+ for (;;)
+ /* nothing */ ;
+}
+
+int
+cpu_iccb_send(cpu_id, msg)
+ long cpu_id;
+ const char *msg;
+{
+ struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id);
+ int timeout;
+ u_long cpumask = (1UL << cpu_id);
+
+ /* Wait for the ICCB to become available. */
+ for (timeout = 10000; timeout != 0; timeout--) {
+ alpha_mb();
+ if ((hwrpb->rpb_rxrdy & cpumask) == 0)
+ break;
+ delay(1000);
+ }
+ if (timeout == 0)
+ return (EIO);
+
+ /*
+ * Copy the message into the ICCB, and tell the secondary console
+ * that it's there. The atomic operation performs a memory barrier.
+ */
+ strcpy(pcsp->pcs_iccb.iccb_rxbuf, msg);
+ pcsp->pcs_iccb.iccb_rxlen = strlen(msg);
+ atomic_setbits_ulong(&hwrpb->rpb_rxrdy, cpumask);
+
+ /* Wait for the message to be received. */
+ for (timeout = 10000; timeout != 0; timeout--) {
+ alpha_mb();
+ if ((hwrpb->rpb_rxrdy & cpumask) == 0)
+ break;
+ delay(1000);
+ }
+ if (timeout == 0)
+ return (EIO);
+
+ return (0);
+}
+
+void
+cpu_iccb_receive()
+{
+#if 0 /* Don't bother... we don't get any important messages anyhow. */
+ u_int64_t txrdy;
+ char *cp1, *cp2, buf[80];
+ struct pcs *pcsp;
+ u_int cnt;
+ long cpu_id;
+
+ txrdy = hwrpb->rpb_txrdy;
+
+ for (cpu_id = 0; cpu_id < hwrpb->rpb_pcs_cnt; cpu_id++) {
+ if (txrdy & (1UL << cpu_id)) {
+ pcsp = LOCATE_PCS(hwrpb, cpu_id);
+ printf("Inter-console message from CPU %lu "
+ "HALT REASON = 0x%lx, FLAGS = 0x%lx\n",
+ cpu_id, pcsp->pcs_halt_reason, pcsp->pcs_flags);
+
+ cnt = pcsp->pcs_iccb.iccb_txlen;
+ if (cnt >= 80) {
+ printf("Malformed inter-console message\n");
+ continue;
+ }
+ cp1 = pcsp->pcs_iccb.iccb_txbuf;
+ cp2 = buf;
+ while (cnt--) {
+ if (*cp1 != '\r' && *cp1 != '\n')
+ *cp2++ = *cp1;
+ cp1++;
+ }
+ *cp2 = '\0';
+ printf("Message from CPU %lu: %s\n", cpu_id, buf);
+ }
+ }
+#endif /* 0 */
+ hwrpb->rpb_txrdy = 0;
+ alpha_mb();
}
+#endif /* MULTIPROCESSOR */
diff --git a/sys/arch/alpha/alpha/cpuconf.c b/sys/arch/alpha/alpha/cpuconf.c
index 0aa986ce2fe..9a99f36ff34 100644
--- a/sys/arch/alpha/alpha/cpuconf.c
+++ b/sys/arch/alpha/alpha/cpuconf.c
@@ -1,5 +1,40 @@
-/* $OpenBSD: cpuconf.c,v 1.1 1997/01/24 19:56:21 niklas Exp $ */
-/* $NetBSD: cpuconf.c,v 1.2 1996/11/13 23:42:55 cgd Exp $ */
+/* $NetBSD: cpuconf.c,v 1.27 2000/06/26 02:42:04 enami Exp $ */
+
+/*-
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
/*
* Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
@@ -31,84 +66,206 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*
- * CPU (machine) type configuration switch.
- *
- * This table should probably go at the end of conf.c, but
- * I didn't want to make conf.c "different."
- */
-
#include <sys/param.h>
#include <sys/device.h>
+#include <sys/systm.h>
#include <machine/cpuconf.h>
+#include <machine/rpb.h>
+
+#ifdef DEC_3000_500
+extern void dec_3000_500_init __P((void));
+#else
+#define dec_3000_500_init platform_not_configured
+#endif
+
+#ifdef DEC_3000_300
+extern void dec_3000_300_init __P((void));
+#else
+#define dec_3000_300_init platform_not_configured
+#endif
+
+#ifdef DEC_AXPPCI_33
+extern void dec_axppci_33_init __P((void));
+#else
+#define dec_axppci_33_init platform_not_configured
+#endif
-#undef DEC_2100_A50 /* config 'option' with flag brokenness */
-#include "dec_2100_a50.h"
-cpu_decl(dec_2100_a50);
-
-#undef DEC_21000 /* config 'option' with flag brokenness */
-#include "dec_21000.h"
-cpu_decl(dec_21000);
-
-#undef DEC_3000_300 /* config 'option' with flag brokenness */
-#include "dec_3000_300.h"
-cpu_decl(dec_3000_300);
-
-#undef DEC_3000_500 /* config 'option' with flag brokenness */
-#include "dec_3000_500.h"
-cpu_decl(dec_3000_500);
-
-#undef DEC_AXPPCI_33 /* config 'option' with flag brokenness */
-#include "dec_axppci_33.h"
-cpu_decl(dec_axppci_33);
-
-#undef DEC_EB164 /* config 'option' with flag brokenness */
-#include "dec_eb164.h"
-cpu_decl(dec_eb164);
-
-#undef DEC_KN20AA /* config 'option' with flag brokenness */
-#include "dec_kn20aa.h"
-cpu_decl(dec_kn20aa);
-
-const struct cpusw cpusw[] = {
- cpu_unknown(), /* 0: ??? */
- cpu_notdef("Alpha Demonstration Unit"), /* 1: ST_ADU */
- cpu_notdef("DEC 4000 (\"Cobra\")"), /* 2: ST_DEC_4000 */
- cpu_notdef("DEC 7000 (\"Ruby\")"), /* 3: ST_DEC_7000 */
- cpu_init("DEC 3000/500 (\"Flamingo\")",DEC_3000_500,dec_3000_500),
- /* 4: ST_DEC_3000_500 */
- cpu_unknown(), /* 5: ??? */
- cpu_notdef("DEC 2000/300 (\"Jensen\")"),
- /* 6: ST_DEC_2000_300 */
- cpu_init("DEC 3000/300 (\"Pelican\")",DEC_3000_300,dec_3000_300),
- /* 7: ST_DEC_3000_300 */
- cpu_unknown(), /* 8: ??? */
- cpu_notdef("DEC 2100/A500 (\"Sable\")"),
- /* 9: ST_DEC_2100_A500 */
- cpu_notdef("AXPvme 64"), /* 10: ST_DEC_APXVME_64 */
- cpu_init("DEC AXPpci",DEC_AXPPCI_33,dec_axppci_33),
- /* 11: ST_DEC_AXPPCI_33 */
- cpu_init("DEC 21000",DEC_21000,dec_21000),
- /* 12: ST_DEC_21000 */
- cpu_init("AlphaStation 200/400 (\"Avanti\")",DEC_2100_A50,dec_2100_a50),
- /* 13: ST_DEC_2100_A50 */
- cpu_notdef("Mustang"), /* 14: ST_DEC_MUSTANG */
- cpu_init("AlphaStation 600 (KN20AA)",DEC_KN20AA,dec_kn20aa),
- /* 15: ST_DEC_KN20AA */
- cpu_unknown(), /* 16: ??? */
- cpu_notdef("DEC 1000 (\"Mikasa\")"), /* 17: ST_DEC_1000 */
- cpu_unknown(), /* 18: ??? */
- cpu_notdef("EB66"), /* 19: ST_EB66 */
- cpu_notdef("EB64+"), /* 20: ST_EB64P */
- cpu_unknown(), /* 21: ??? */
- cpu_notdef("DEC 4100 (\"Rawhide\")"), /* 22: ST_DEC_4100 */
- cpu_notdef("??? (\"Lego\")"), /* 23: ST_DEC_EV45_PBP */
- cpu_notdef("DEC 2100A/A500 (\"Lynx\")"),
- /* 24: ST_DEC_2100A_A500 */
- cpu_unknown(), /* 25: ??? */
- cpu_init("EB164",DEC_EB164,dec_eb164), /* 26: ST_EB164 */
- cpu_notdef("DEC 1000A (\"Noritake\")"), /* 27: ST_DEC_1000A */
- cpu_notdef("AlphaVME 224 (\"Cortex\")"),
- /* 28: ST_DEC_ALPHAVME_224 */
+#ifdef DEC_KN8AE
+extern void dec_kn8ae_init __P((void));
+#else
+#define dec_kn8ae_init platform_not_configured
+#endif
+
+#ifdef DEC_2100_A50
+extern void dec_2100_a50_init __P((void));
+#else
+#define dec_2100_a50_init platform_not_configured
+#endif
+
+#ifdef DEC_KN20AA
+extern void dec_kn20aa_init __P((void));
+#else
+#define dec_kn20aa_init platform_not_configured
+#endif
+
+#ifdef DEC_EB64PLUS
+extern void dec_eb64plus_init __P((void));
+#else
+#define dec_eb64plus_init platform_not_configured
+#endif
+
+#ifdef DEC_EB164
+extern void dec_eb164_init __P((void));
+#else
+#define dec_eb164_init platform_not_configured
+#endif
+
+#ifdef AVALON_A12
+extern void avalon_a12_init __P((void));
+#else
+#define avalon_a12_init platform_not_configured
+#endif
+
+#ifdef DEC_KN300
+extern void dec_kn300_init __P((void));
+#else
+#define dec_kn300_init platform_not_configured
+#endif
+
+#ifdef DEC_550
+extern void dec_550_init __P((void));
+#else
+#define dec_550_init platform_not_configured
+#endif
+
+#if defined(DEC_1000) || defined(DEC_1000A)
+extern void _dec_1000a_init __P((void));
+#endif
+#ifdef DEC_1000A
+#define dec_1000a_init _dec_1000a_init
+#else
+#define dec_1000a_init platform_not_configured
+#endif
+#ifdef DEC_1000
+#define dec_1000_init _dec_1000a_init
+#else
+#define dec_1000_init platform_not_configured
+#endif
+
+#ifdef DEC_ALPHABOOK1
+extern void dec_alphabook1_init __P((void));
+#else
+#define dec_alphabook1_init platform_not_configured
+#endif
+
+#ifdef DEC_EB66
+extern void dec_eb66_init __P((void));
+#else
+#define dec_eb66_init platform_not_configured
+#endif
+
+#ifdef DEC_6600
+extern void dec_6600_init __P((void));
+#else
+#define dec_6600_init platform_not_configured
+#endif
+
+#if defined(DEC_2100_A500) || defined(DEC_2100A_A500)
+extern void _dec_2100_a500_init __P((void));
+#endif
+#ifdef DEC_2100_A500
+#define dec_2100_a500_init _dec_2100_a500_init
+#else
+#define dec_2100_a500_init platform_not_configured
+#endif
+#ifdef DEC_2100A_A500
+#define dec_2100a_a500_init _dec_2100_a500_init
+#else
+#define dec_2100a_a500_init platform_not_configured
+#endif
+
+#ifdef API_UP1000
+extern void api_up1000_init __P((void));
+#else
+#define api_up1000_init platform_not_configured
+#endif
+
+#undef DEC_2000_300 /* XXX - why is it defined in config? */
+#ifdef DEC_2000_300
+extern void dec_2000_300_init __P((void));
+#else
+#define dec_2000_300_init platform_not_configured
+#endif
+
+static const struct cpuinit cpuinit[] = {
+ cpu_notsupp(ST_ADU, "Alpha Demo Unit"),
+ cpu_notsupp(ST_DEC_4000, "DEC 4000 (``Cobra'')"),
+ cpu_notsupp(ST_DEC_7000, "DEC 7000 (``Ruby'')"),
+ cpu_init(ST_DEC_3000_500, dec_3000_500_init, "DEC_3000_500"),
+ cpu_init(ST_DEC_2000_300, dec_2000_300_init, "DEC_2000_300"),
+ cpu_init(ST_DEC_3000_300, dec_3000_300_init, "DEC_3000_300"),
+ cpu_init(ST_AVALON_A12, avalon_a12_init, "AVALON_A12"),
+ cpu_init(ST_DEC_2100_A500, dec_2100_a500_init, "DEC_2100_A500"),
+ cpu_notsupp(ST_DEC_APXVME_64, "AXPvme 64"),
+ cpu_init(ST_DEC_AXPPCI_33, dec_axppci_33_init, "DEC_AXPPCI_33"),
+ cpu_init(ST_DEC_21000, dec_kn8ae_init, "DEC_KN8AE"),
+ cpu_init(ST_DEC_2100_A50, dec_2100_a50_init, "DEC_2100_A50"),
+ cpu_notsupp(ST_DEC_MUSTANG, "Mustang"),
+ cpu_init(ST_DEC_KN20AA, dec_kn20aa_init, "DEC_KN20AA"),
+ cpu_init(ST_DEC_1000, dec_1000_init, "DEC_1000"),
+ cpu_init(ST_EB66, dec_eb66_init, "DEC_EB66"),
+ cpu_init(ST_EB64P, dec_eb64plus_init, "DEC_EB64PLUS"),
+ cpu_init(ST_ALPHABOOK1, dec_alphabook1_init, "DEC_ALPHABOOK1"),
+ cpu_init(ST_DEC_4100, dec_kn300_init, "DEC_KN300"),
+ cpu_notsupp(ST_DEC_EV45_PBP, "EV45 Passive Backplane Board"),
+ cpu_init(ST_DEC_2100A_A500, dec_2100a_a500_init, "DEC_2100A_A500"),
+ cpu_init(ST_EB164, dec_eb164_init, "DEC_EB164"),
+ cpu_init(ST_DEC_1000A, dec_1000a_init, "DEC_1000A"),
+ cpu_notsupp(ST_DEC_ALPHAVME_224, "AlphaVME 224"),
+ cpu_init(ST_DEC_550, dec_550_init, "DEC_550"),
+ cpu_notsupp(ST_DEC_EV56_PBP, "EV56 Passive Backplane Board"),
+ cpu_notsupp(ST_DEC_ALPHAVME_320, "AlphaVME 320"),
+ cpu_init(ST_DEC_6600, dec_6600_init, "DEC_6600"),
+ cpu_init(ST_API_NAUTILUS, api_up1000_init, "API_UP1000"),
};
-const int ncpusw = sizeof (cpusw) / sizeof (cpusw[0]);
+static const int ncpuinit = (sizeof(cpuinit) / sizeof(cpuinit[0]));
+
+const struct cpuinit *
+platform_lookup(int systype)
+{
+ const struct cpuinit *c;
+ int i;
+
+ for (i = 0; i < ncpuinit; i++) {
+ c = &cpuinit[i];
+ if (c->systype == systype)
+ return (c);
+ }
+ return (NULL);
+}
+
+void
+platform_not_configured()
+{
+ const struct cpuinit *c = platform_lookup(cputype);
+
+ printf("\n");
+ printf("Support for system type %d is not present in this kernel.\n",
+ cputype);
+ printf("Please build a kernel with \"options %s\" and reboot.\n",
+ c->option);
+ printf("\n");
+ panic("platform not configured\n");
+}
+
+void
+platform_not_supported()
+{
+ const struct cpuinit *c = platform_lookup(cputype);
+
+ printf("\n");
+ printf("OpenBSD does not yet support system type %d (%s).\n", cputype,
+ (c != NULL) ? c->option : "???");
+ printf("\n");
+ panic("platform not supported");
+}
diff --git a/sys/arch/alpha/alpha/db_disasm.c b/sys/arch/alpha/alpha/db_disasm.c
index 5420767c7d7..d59babfd349 100644
--- a/sys/arch/alpha/alpha/db_disasm.c
+++ b/sys/arch/alpha/alpha/db_disasm.c
@@ -1,304 +1,1104 @@
-/* $OpenBSD: db_disasm.c,v 1.12 1997/11/06 23:48:53 deraadt Exp $ */
+/* $NetBSD: db_disasm.c,v 1.8 2000/05/25 19:57:30 jhawk Exp $ */
+
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
/*
- * Copyright (c) 1997 Niklas Hallqvist. All rights reserverd.
- * Copyright (c) 1997 Theo de Raadt. All rights reserved.
+ * File: db_disasm.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 11/91
+ *
+ * Disassembler for Alpha
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Niklas Hallqvist and
- * Theo de Raadt.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
+ * Modified for NetBSD/alpha by:
*
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Christopher G. Demetriou, Carnegie Mellon University
+ *
+ * Jason R. Thorpe, Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
*/
#include <sys/param.h>
#include <sys/systm.h>
-
-#include <vm/vm.h>
-
+#include <sys/proc.h>
#include <machine/db_machdep.h>
-#include <machine/frame.h>
+#include <alpha/alpha/db_instruction.h>
-#include <ddb/db_interface.h>
-#include <ddb/db_variables.h>
-#include <ddb/db_output.h>
+#include <machine/pal.h>
+
+#include <ddb/db_access.h>
#include <ddb/db_sym.h>
+#include <ddb/db_output.h>
+#include <ddb/db_interface.h>
-struct opcode opcode[] = {
- { OPC_PAL, "call_pal", 0 }, /* 00 */
- { OPC_RES, "opc01", 0 }, /* 01 */
- { OPC_RES, "opc02", 0 }, /* 02 */
- { OPC_RES, "opc03", 0 }, /* 03 */
- { OPC_RES, "opc04", 0 }, /* 04 */
- { OPC_RES, "opc05", 0 }, /* 05 */
- { OPC_RES, "opc06", 0 }, /* 06 */
- { OPC_RES, "opc07", 0 }, /* 07 */
- { OPC_MEM, "lda", 1 }, /* 08 */
- { OPC_MEM, "ldah", 1 }, /* 09 */
- { OPC_RES, "opc0a", 0 }, /* 0A */
- { OPC_MEM, "ldq_u", 1 }, /* 0B */
- { OPC_RES, "opc0c", 0 }, /* 0C */
- { OPC_RES, "opc0d", 0 }, /* 0D */
- { OPC_RES, "opc0e", 0 }, /* 0E */
- { OPC_MEM, "stq_u", 1 }, /* 0F */
- { OPC_OP, "inta", 0 }, /* 10 */
- { OPC_OP, "intl", 0 }, /* 11 */
- { OPC_OP, "ints", 0 }, /* 12 */
- { OPC_OP, "intm", 0 }, /* 13 */
- { OPC_RES, "opc14", 0 }, /* 14 */
- { OPC_OP, "fltv", 1 }, /* 15 */
- { OPC_OP, "flti", 1 }, /* 16 */
- { OPC_OP, "fltl", 1 }, /* 17 */
- { OPC_MEM, "misc", 0 }, /* 18 */
- { OPC_PAL, "pal19", 0 }, /* 19 */
- { OPC_MEM, "jsr", 0 }, /* 1A */
- { OPC_PAL, "pal1b", 0 }, /* 1B */
- { OPC_RES, "opc1c", 0 }, /* 1C */
- { OPC_PAL, "pal1d", 0 }, /* 1D */
- { OPC_PAL, "pal1e", 0 }, /* 1E */
- { OPC_PAL, "pal1f", 0 }, /* 1F */
- { OPC_MEM, "ldf", 1 }, /* 20 */
- { OPC_MEM, "ldg", 1 }, /* 21 */
- { OPC_MEM, "lds", 1 }, /* 22 */
- { OPC_MEM, "ldt", 1 }, /* 23 */
- { OPC_MEM, "stf", 1 }, /* 24 */
- { OPC_MEM, "stg", 1 }, /* 25 */
- { OPC_MEM, "sts", 1 }, /* 26 */
- { OPC_MEM, "stt", 1 }, /* 27 */
- { OPC_MEM, "ldl", 1 }, /* 28 */
- { OPC_MEM, "ldq", 1 }, /* 29 */
- { OPC_MEM, "ldl_l", 1 }, /* 2A */
- { OPC_MEM, "ldq_l", 1 }, /* 2B */
- { OPC_MEM, "stl", 1 }, /* 2C */
- { OPC_MEM, "stq", 1 }, /* 2D */
- { OPC_MEM, "stl_c", 1 }, /* 2E */
- { OPC_MEM, "stq_c", 1 }, /* 2F */
- { OPC_BR, "br", 1 }, /* 30 */
- { OPC_BR, "fbeq", 1 }, /* 31 */
- { OPC_BR, "fblt", 1 }, /* 32 */
- { OPC_BR, "fble", 1 }, /* 33 */
- { OPC_BR, "bsr", 1 }, /* 34 */
- { OPC_BR, "fbne", 1 }, /* 35 */
- { OPC_BR, "fbge", 1 }, /* 36 */
- { OPC_BR, "fbgt", 1 }, /* 37 */
- { OPC_BR, "blbc", 1 }, /* 38 */
- { OPC_BR, "beq", 1 }, /* 39 */
- { OPC_BR, "blt", 1 }, /* 3A */
- { OPC_BR, "ble", 1 }, /* 3B */
- { OPC_BR, "blbs", 1 }, /* 3C */
- { OPC_BR, "bne", 1 }, /* 3D */
- { OPC_BR, "bge", 1 }, /* 3E */
- { OPC_BR, "bgt", 1 }, /* 3F */
+/*
+ * This would belong in a header file, except noone else needs it
+ *
+ * XXX THESE SHOULD BE CONVERTED TO ra, rb, rc FORMAT.
+ */
+typedef union {
+ /*
+ * All instructions are 32 bits wide, PAL included
+ */
+ unsigned int bits;
+
+ /*
+ * Internal processor register access instrs
+ * specify the IPR index, doubly specify the
+ * (same) GP register as src/dest, and qualifiers
+ * for the IPR set involved (abox/ibox/tmp)
+ */
+ struct {
+ unsigned index : 5,
+ regset : 3, /* a,i,p */
+ xxx : 8,
+ rs : 5,
+ rd : 5,
+ opcode : 6;
+ } mXpr_format;
+
+ /*
+ * Load/store instructions have a 12 bit displacement,
+ * and two register specifiers just as normal ld/st.
+ * Four bits have special meanings:
+ * phy: bypass the MMU (physical access)
+ * alt: use mode in ALT register for checks,
+ * or if PHY is also on locked/linked access
+ * rwc: read-with-write-check (probew)
+ * qw: quadword access
+ */
+ struct {
+ signed int displacement : 12;
+ unsigned qw : 1,
+ qualif : 3,
+ rs : 5,
+ rd : 5,
+ opcode : 6;
+ } mem_format;
+
+ /*
+ * Return from exception or interrupt has
+ * a branch-like encoding, but only one
+ * instantiation is actually usable.
+ */
+ struct {
+ unsigned xxx : 14,
+ zero : 1, /* branch prediction! */
+ one : 1,
+ rb : 5, /* r31 or stall */
+ ra : 5, /* r31 or stall */
+ opcode : 6;
+ } rei_format;
+
+} pal_instruction;
+
+
+/*
+ * Major opcodes
+ */
+static char *op_name[64] = {
+/* 0 */ "call_pal", "op1", "op2", "op3", "op4", "op5", "op6", "op7",
+/* 8 */ "lda", "ldah", "ldbu", "ldq_u","ldwu", "stw", "stb", "stq_u",
+/*16 */ "arit", "logical","bit","mul", "op20", "vaxf", "ieeef","anyf",
+/*24 */ "spec", "hw_mfpr","jump","hw_ld","intmisc","hw_mtpr","hw_rei","hw_st",
+/*32 */ "ldf", "ldg", "lds", "ldt", "stf", "stg", "sts", "stt",
+/*40 */ "ldl", "ldq", "ldl_l","ldq_l","stl", "stq", "stl_c","stq_c",
+/*48 */ "br", "fbeq", "fblt", "fble", "bsr", "fbne", "fbge", "fbgt",
+/*56 */ "blbc", "beq", "blt", "ble", "blbs", "bne", "bge", "bgt"
};
-struct opinstr {
- char *nam;
- u_char opc;
- u_char func;
-} opinstr[] = {
- { "addl",0x10,0x00 }, { "subl",0x10,0x09 }, { "cmpeq",0x10,0x2d },
- { "addl/v",0x10,0x40 }, { "subl/v",0x10,0x49 }, { "cmplt",0x10,0x4d },
- { "addq",0x10,0x20 }, { "subq",0x10,0x29 }, { "cmple",0x10,0x6d },
- { "addq/v",0x10,0x60 }, { "subq/v",0x10,0x69 }, { "cmpult",0x10,0x1d },
- { "cmpule",0x10,0x3d },
- { "cmpbge",0x10,0x0f },
-
- { "s4addl",0x10,0x02 }, { "s4subl",0x10,0x0b }, { "s8addl",0x10,0x12 },
- { "s8subl",0x10,0x1b },
- { "s4addq",0x10,0x22 }, { "s4subq",0x10,0x2b }, { "s8addq",0x10,0x32 },
- { "s8subq",0x10,0x3b },
-
- { "and",0x11,0x00 }, { "bis",0x11,0x20 }, { "xor",0x11,0x40 },
- { "bic",0x11,0x08 }, { "ornot",0x11,0x28 }, { "eqv",0x11,0x48 },
- { "cmovq",0x11,0x24 }, { "cmovlt",0x11,0x44 }, { "cmovle",0x11,0x64 },
- { "cmovne",0x11,0x26 }, { "cmovge",0x11,0x46 }, { "cmovgt",0x11,0x66 },
- { "cmovbs",0x11,0x14 }, { "cmovbc",0x11,0x16 },
-
- { "sll",0x12,0x39 }, { "sra",0x12,0x3c }, { "srl",0x12,0x34 },
- { "extbl",0x12,0x06 }, { "insbl",0x12,0x0b }, { "mskbl",0x12,0x02 },
- { "extwl",0x12,0x16 }, { "inswl",0x12,0x1b }, { "mskwl",0x12,0x12 },
- { "extll",0x12,0x26 }, { "insll",0x12,0x2b }, { "mskll",0x12,0x22 },
- { "extql",0x12,0x36 }, { "insql",0x12,0x3b }, { "mskql",0x12,0x32 },
- { "extwh",0x12,0x5a }, { "inswh",0x12,0x57 }, { "mskwh",0x12,0x52 },
- { "extlh",0x12,0x6a }, { "inslh",0x12,0x67 }, { "msklh",0x12,0x62 },
- { "extqh",0x12,0x7a }, { "insqh",0x12,0x77 }, { "mskqh",0x12,0x72 },
- { "zap",0x12,0x30 },
- { "zapnot",0x12,0x31 },
-
- { "mull",0x13,0x00 }, { "mull/v",0x13,0x40 }, { "mulq",0x13,0x20 },
- { "mulq/v",0x13,0x60 }, { "umulh",0x13,0x30 },
+/*
+ * The function field is too big (7 or 11 bits), so the sub-tables
+ * are addressed in a somewhat complicated manner to save
+ * space. After all, alu operations is what RISCs are good at.
+ */
+
+struct tbl {
+ const char *name;
+ int code;
+};
+
+static const struct tbl pal_op_tbl[] = {
+ /* Common PAL function codes. */
+ { "halt", PAL_halt },
+ { "cflush", PAL_cflush },
+ { "draina", PAL_draina },
+ { "cserve", PAL_cserve, },
+ { "swppal", PAL_swppal },
+ { "ipir", PAL_ipir },
+ { "bpt", PAL_bpt },
+ { "bugchk", PAL_bugchk },
+ { "imb", PAL_imb },
+ { "rdunique", PAL_rdunique },
+ { "wrunique", PAL_wrunique },
+ { "gentrap", PAL_gentrap },
+
+ /* OSF/1 PAL function codes. */
+ { "osf1_rdmces", PAL_OSF1_rdmces },
+ { "osf1_wrmces", PAL_OSF1_wrmces },
+ { "osf1_wrfen", PAL_OSF1_wrfen },
+ { "osf1_wrvptptr", PAL_OSF1_wrvptptr },
+ { "osf1_swpctx", PAL_OSF1_swpctx },
+ { "osf1_wrval", PAL_OSF1_wrval },
+ { "osf1_rdval", PAL_OSF1_rdval },
+ { "osf1_tbi", PAL_OSF1_tbi },
+ { "osf1_wrent", PAL_OSF1_wrent },
+ { "osf1_swpipl", PAL_OSF1_swpipl },
+ { "osf1_rdps", PAL_OSF1_rdps },
+ { "osf1_wrkgp", PAL_OSF1_wrkgp },
+ { "osf1_wrusp", PAL_OSF1_wrusp },
+ { "osf1_wrperfmon", PAL_OSF1_wrperfmon },
+ { "osf1_rdusp", PAL_OSF1_rdusp },
+ { "osf1_whami", PAL_OSF1_whami },
+ { "osf1_retsys", PAL_OSF1_retsys },
+ { "osf1_rti", PAL_OSF1_rti },
+ { "osf1_callsys", PAL_OSF1_callsys },
+
+ { NULL, -1 },
+};
+
+static const char *pal_opname __P((int));
+
+static const char *
+pal_opname(op)
+ int op;
+{
+ static char unk[8];
+ int i;
+
+ for (i = 0; pal_op_tbl[i].name != NULL; i++) {
+ if (pal_op_tbl[i].code == op)
+ return (pal_op_tbl[i].name);
+ }
+
+ sprintf(unk, "0x%x", op);
+ return (unk);
+}
+
+/* HW (PAL) instruction qualifiers, stright tables */
+static const char *mXpr_name[8] = {
+ "", "/i", "/a", "/ai", "/p", "/pi", "/pa", "/pai"
+};
+static const char *hwlds_name[8] = {
+ "", "/r", "/a", "/ar", "/p", "/p?r", "_l-c", "_l-c/?r"
+};
+
+/*
+ * For this one we take the low nibble (valid values 0/2/9/b/d)
+ * and shift it down one to get the row index. Within a row
+ * we can just take the high nibble deprived of the high bit
+ * (valid values 0/1/2/3/4/6). We could have used a flat 64
+ * entry array, but in this way we use just 48 pointers.
+ * BUGFIX: the 'cmpbge 0x0f' opcode fits in here too
+ */
+static const char *arit_c0[8] = {
+ "addl", 0, "addq", 0, "addl/v", 0, "addq/v",
+};
+static const char *arit_c2[8] = {
+ "s4addl", "s8addl", "s4addq", "s8addq",
};
+static const char *arit_c9[8] = {
+ "subl", 0, "subq", 0, "subl/v", 0, "subq/v",
+};
+static const char *arit_cB[8] = {
+ "s4subl", "s8subl", "s4subq", "s8subq",
+};
+static const char *arit_cD[8] = {
+ 0, "cmpult", "cmpeq", "cmpule", "cmplt", 0, "cmple",
+};
+static const char *arit_cF[1] = {
+ "cmpbge"
+};
+static const char **arit_opname[8] = {
+ arit_c0, arit_c2, 0, 0, arit_c9, arit_cB, arit_cD, arit_cF
+};
+
+static __inline const char *arit_name __P((int));
+static __inline const char *
+arit_name(op)
+ int op;
+{
+ static char unk[32];
+ const char *name = NULL;
+
+ if (arit_opname[((op)&0xe)>>1])
+ name = arit_opname[((op)&0xe)>>1][((op)&0x70)>>4];
+
+ if (name != NULL)
+ return (name);
-char *jsrnam[] = {
- "jmp",
- "jsr",
- "ret",
- "jsr_coroutine"
+ sprintf(unk, "?arit 0x%x?", op);
+ return (unk);
+}
+
+/*
+ * Something similar for this one, except there are only
+ * 16 entries so the row indexing is done by enumeration
+ * of the low nibble (valid values 0/4/6/8). Then we can
+ * just shift the high nibble to index inside the row
+ * (valid values are 0/2/4 or 1/2/4/6)
+ *
+ * There are two functions that don't play by these simple rules,
+ * so we special-case them.
+ */
+static const char *logical_c0[4] = {
+ "and", "or", "xor", 0
+};
+static const char *logical_c4[4] = {
+ "cmovlbs", "cmoveq", "cmovlt", "cmovle"
+};
+static const char *logical_c6[4] = {
+ "cmovlbc", "cmovne", "cmovge", "cmovgt"
+};
+static const char *logical_c8[4] = {
+ "andnot", "ornot", "xornot", 0
};
-char *regnam __P((int));
+static __inline const char *logical_name __P((int));
+static __inline const char *
+logical_name(op)
+ int op;
+{
+ static char unk[32];
+ const char *name = NULL;
-char *
-regnam(r)
- int r;
+ if (op == op_amask)
+ return ("amask");
+ else if (op == op_implver)
+ return ("implver");
+
+ switch (op & 0xf) {
+ case 0: name = logical_c0[((op)>>5)&3]; break;
+ case 4: name = logical_c4[((op)>>5)&3]; break;
+ case 6: name = logical_c6[((op)>>5)&3]; break;
+ case 8: name = logical_c8[((op)>>5)&3]; break;
+ }
+
+ if (name != NULL)
+ return (name);
+
+ sprintf(unk, "?logical 0x%x?", op);
+ return (unk);
+}
+
+/*
+ * This is the messy one. First, we single out the dense
+ * case of a 3 in the high nibble (valid values 0/1/2/4/6/9/b/c).
+ * Then the case of a 2 in the low nibble (valid values 0/1/2/5/6/7).
+ * For the remaining codes (6/7/a/b) we do as above: high
+ * nibble has valid values 0/1/2 or 5/6/7. The low nibble
+ * can be used as row index picking bits 0 and 2, for the
+ * high one just the lower two bits.
+ */
+static const char *bitop_c3[8] = {
+ "zapnot", "mskql", "srl", "extql", "sll", "insql", "sra", 0
+};
+static const char *bitop_c2[8] = {
+ "mskbl", "mskwl", "mskll", 0/*mskql*/, 0, "mskwh", "msklh", "mskqh"
+};
+static const char *bitop_c67ab[4][4] = {
+/* a */ { 0, "extwh", "extlh", "extqh"},
+/* b */ { "insbl", "inswl", "insll", 0 },
+/* 6 */ { "extbl", "extwl", "extll", 0 },
+/* 7 */ { 0, "inswh", "inslh", "insqh" },
+};
+
+static __inline const char *bitop_name __P((int));
+static __inline const char *
+bitop_name(op)
+ int op;
{
- extern struct db_variable db_regs[];
+ static char unk[32];
+ const char *name = NULL;
+
+ if ((op & 0x70) == 0x30)
+ name = (op == op_zap) ? "zap" : bitop_c3[((op)&0xe)>>1];
+ else if ((op & 0xf) == 0x02)
+ name = bitop_c2[(op)>>4];
+ else
+ name =
+ bitop_c67ab[(((op)&1)|(((op)&0x4)>>1))][(((op)&0x30)>>4)];
+
+ if (name != NULL)
+ return (name);
- if (r == 31)
- return ("zero");
- return (db_regs[r].name);
+ sprintf(unk, "?bit 0x%x?", op);
+ return (unk);
}
-vm_offset_t
-db_disasm(loc, flag)
- vm_offset_t loc;
- boolean_t flag;
+/*
+ * Only 5 entries in this one
+ */
+static const char *mul_opname[4] = {
+ "mull", "mulq", "mull/v", "mulq/v"
+};
+
+static __inline const char *mul_name __P((int));
+static __inline const char *
+mul_name(op)
+ int op;
{
- char rnam[8];
- u_int32_t ins = *(u_int32_t *)loc;
- int opc = ins >> 26;
- int arg = ins & 0x3ffffff;
- int ra, rb, rc, disp, func, imm;
+ static char unk[32];
+ const char *name = NULL;
+
+ name = (op == op_umulh) ? "umulh" : mul_opname[((op)>>5)&3];
+
+ if (name != NULL)
+ return (name);
+
+ sprintf(unk, "?mul 0x%x?", op);
+ return (unk);
+}
+
+/*
+ * These are few, the high nibble is usually enough to dispatch.
+ * We single out the `f' case to halve the table size, as
+ * well as the cases in which the high nibble isn't enough.
+ */
+static const char *special_opname[8] = {
+ "trapb", 0, "mb", 0, "fetch", "fetch_m", "rpcc", "rc"
+};
+
+static __inline const char *special_name __P((int));
+static __inline const char *
+special_name(op)
+ int op;
+{
+ static char unk[32];
+ const char *name;
+
+ switch (op) {
+ case op_excb: name = "excb"; break;
+ case op_wmb: name = "wmb"; break;
+ case op_ecb: name = "ecb"; break;
+ case op_rs: name = "rs"; break;
+ case op_wh64: name = "wh64"; break;
+ default:
+ name = special_opname[(op)>>13];
+ }
+
+ if (name != NULL)
+ return (name);
+
+ sprintf(unk, "?special 0x%x?", op);
+ return (unk);
+}
+
+/*
+ * This is trivial
+ */
+static const char *jump_opname[4] = {
+ "jmp", "jsr", "ret", "jcr"
+};
+#define jump_name(ix) jump_opname[ix]
+
+/*
+ * For all but 4 of these, we can dispatch on the lower nibble of
+ * the "function".
+ */
+static const char *intmisc_opname_3x[16] = {
+ "ctpop", "perr", "ctlz", "cttz", "unpkbw", "unpkbl", "pkwb",
+ "pklb", "minsb8", "minsw4", "minub8", "minuw4", "maxub8",
+ "maxuw4", "maxsb8", "maxsw4",
+};
+
+static __inline const char *intmisc_name __P((int));
+static __inline const char *
+intmisc_name(op)
+ int op;
+{
+ static char unk[32];
+
+ if ((op & 0xf0) == 0x30)
+ return (intmisc_opname_3x[op & 0x0f]);
+
+ switch (op) {
+ case op_sextb: return ("sextb");
+ case op_sextw: return ("sextw");
+ case op_ftoit: return ("ftoit");
+ case op_ftois: return ("ftois");
+ }
+
+ sprintf(unk, "?intmisc 0x%x?", op);
+ return (unk);
+}
+
+static const char *float_name __P((const struct tbl[], int, const char *type));
+
+static const char *
+float_name(tbl, op, type)
+ const struct tbl tbl[];
+ int op;
+ const char *type;
+{
+ static char unk[32];
int i;
- if (opcode[opc].opc_print)
- db_printf("%s\t", opcode[opc].opc_name);
- switch (opcode[opc].opc_fmt) {
- case OPC_PAL:
- switch (arg) {
- case 0x0000000:
- db_printf("halt");
- break;
- case 0x0000080:
- db_printf("bpt");
- break;
- case 0x0000086:
- db_printf("imb");
- break;
- default:
- db_printf("0x%08x", ins);
+ for (i = 0; tbl[i].name != NULL; i++) {
+ if (tbl[i].code == op)
+ return (tbl[i].name);
+ }
+
+ sprintf(unk, "?%s 0x%x?", type, op);
+ return (unk);
+}
+
+#define vaxf_name(op) float_name(vaxf_tbl, op, "vaxfl")
+#define ieeef_name(op) float_name(ieeef_tbl, op, "ieeefl")
+#define anyf_name(op) float_name(anyf_tbl, op, "anyfl")
+
+static const struct tbl anyf_tbl[] = {
+ { "cvtlq", 0x010},
+ { "cpys", 0x020},
+ { "cpysn", 0x021},
+ { "cpyse", 0x022},
+ { "mt_fpcr", 0x024},
+ { "mf_fpcr", 0x025},
+ { "fcmoveq", 0x02a},
+ { "fcmovne", 0x02b},
+ { "fcmovlt", 0x02c},
+ { "fcmovge", 0x02d},
+ { "fcmovle", 0x02e},
+ { "fcmovgt", 0x02f},
+ { "cvtql", 0x030},
+ { "cvtql/v", 0x130},
+ { "cvtql/sv", 0x330},
+ { 0, 0},
+};
+
+static const struct tbl ieeef_tbl[] = {
+ { "adds/c", 0x000},
+ { "subs/c", 0x001},
+ { "muls/c", 0x002},
+ { "divs/c", 0x003},
+ { "addt/c", 0x020},
+ { "subt/c", 0x021},
+ { "mult/c", 0x022},
+ { "divt/c", 0x023},
+ { "cvtts/c", 0x02c},
+ { "cvttq/c", 0x02f},
+ { "cvtqs/c", 0x03c},
+ { "cvtqt/c", 0x03e},
+ { "adds/m", 0x040},
+ { "subs/m", 0x041},
+ { "muls/m", 0x042},
+ { "divs/m", 0x043},
+ { "addt/m", 0x060},
+ { "subt/m", 0x061},
+ { "mult/m", 0x062},
+ { "divt/m", 0x063},
+ { "cvtts/m", 0x06c},
+ { "cvtqs/m", 0x07c},
+ { "cvtqt/m", 0x07e},
+ { "adds", 0x080},
+ { "subs", 0x081},
+ { "muls", 0x082},
+ { "divs", 0x083},
+ { "addt", 0x0a0},
+ { "subt", 0x0a1},
+ { "mult", 0x0a2},
+ { "divt", 0x0a3},
+ { "cmptun", 0x0a4},
+ { "cmpteq", 0x0a5},
+ { "cmptlt", 0x0a6},
+ { "cmptle", 0x0a7},
+ { "cvtts", 0x0ac},
+ { "cvttq", 0x0af},
+ { "cvtqs", 0x0bc},
+ { "cvtqt", 0x0be},
+ { "adds/d", 0x0c0},
+ { "subs/d", 0x0c1},
+ { "muls/d", 0x0c2},
+ { "divs/d", 0x0c3},
+ { "addt/d", 0x0e0},
+ { "subt/d", 0x0e1},
+ { "mult/d", 0x0e2},
+ { "divt/d", 0x0e3},
+ { "cvtts/d", 0x0ec},
+ { "cvtqs/d", 0x0fc},
+ { "cvtqt/d", 0x0fe},
+ { "adds/uc", 0x100},
+ { "subs/uc", 0x101},
+ { "muls/uc", 0x102},
+ { "divs/uc", 0x103},
+ { "addt/uc", 0x120},
+ { "subt/uc", 0x121},
+ { "mult/uc", 0x122},
+ { "divt/uc", 0x123},
+ { "cvtts/uc", 0x12c},
+ { "cvttq/vc", 0x12f},
+ { "adds/um", 0x140},
+ { "subs/um", 0x141},
+ { "muls/um", 0x142},
+ { "divs/um", 0x143},
+ { "addt/um", 0x160},
+ { "subt/um", 0x161},
+ { "mult/um", 0x162},
+ { "divt/um", 0x163},
+ { "cvtts/um", 0x16c},
+ { "adds/u", 0x180},
+ { "subs/u", 0x181},
+ { "muls/u", 0x182},
+ { "divs/u", 0x183},
+ { "addt/u", 0x1a0},
+ { "subt/u", 0x1a1},
+ { "mult/u", 0x1a2},
+ { "divt/u", 0x1a3},
+ { "cvtts/u", 0x1ac},
+ { "cvttq/v", 0x1af},
+ { "adds/ud", 0x1c0},
+ { "subs/ud", 0x1c1},
+ { "muls/ud", 0x1c2},
+ { "divs/ud", 0x1c3},
+ { "addt/ud", 0x1e0},
+ { "subt/ud", 0x1e1},
+ { "mult/ud", 0x1e2},
+ { "divt/ud", 0x1e3},
+ { "cvtts/ud", 0x1ec},
+ { "adds/suc", 0x500},
+ { "subs/suc", 0x501},
+ { "muls/suc", 0x502},
+ { "divs/suc", 0x503},
+ { "addt/suc", 0x520},
+ { "subt/suc", 0x521},
+ { "mult/suc", 0x522},
+ { "divt/suc", 0x523},
+ { "cvtts/suc", 0x52c},
+ { "cvttq/svc", 0x52f},
+ { "adds/sum", 0x540},
+ { "subs/sum", 0x541},
+ { "muls/sum", 0x542},
+ { "divs/sum", 0x543},
+ { "addt/sum", 0x560},
+ { "subt/sum", 0x561},
+ { "mult/sum", 0x562},
+ { "divt/sum", 0x563},
+ { "cvtts/sum", 0x56c},
+ { "adds/su", 0x580},
+ { "subs/su", 0x581},
+ { "muls/su", 0x582},
+ { "divs/su", 0x583},
+ { "addt/su", 0x5a0},
+ { "subt/su", 0x5a1},
+ { "mult/su", 0x5a2},
+ { "divt/su", 0x5a3},
+ { "cmptun/su", 0x5a4},
+ { "cmpteq/su", 0x5a5},
+ { "cmptlt/su", 0x5a6},
+ { "cmptle/su", 0x5a7},
+ { "cvtts/su", 0x5ac},
+ { "cvttq/sv", 0x5af},
+ { "adds/sud", 0x5c0},
+ { "subs/sud", 0x5c1},
+ { "muls/sud", 0x5c2},
+ { "divs/sud", 0x5c3},
+ { "addt/sud", 0x5e0},
+ { "subt/sud", 0x5e1},
+ { "mult/sud", 0x5e2},
+ { "divt/sud", 0x5e3},
+ { "cvtts/sud", 0x5ec},
+ { "adds/suic", 0x700},
+ { "subs/suic", 0x701},
+ { "muls/suic", 0x702},
+ { "divs/suic", 0x703},
+ { "addt/suic", 0x720},
+ { "subt/suic", 0x721},
+ { "mult/suic", 0x722},
+ { "divt/suic", 0x723},
+ { "cvtts/suic", 0x72c},
+ { "cvttq/svic", 0x72f},
+ { "cvtqs/suic", 0x73c},
+ { "cvtqt/suic", 0x73e},
+ { "adds/suim", 0x740},
+ { "subs/suim", 0x741},
+ { "muls/suim", 0x742},
+ { "divs/suim", 0x743},
+ { "addt/suim", 0x760},
+ { "subt/suim", 0x761},
+ { "mult/suim", 0x762},
+ { "divt/suim", 0x763},
+ { "cvtts/suim", 0x76c},
+ { "cvtqs/suim", 0x77c},
+ { "cvtqt/suim", 0x77e},
+ { "adds/sui", 0x780},
+ { "subs/sui", 0x781},
+ { "muls/sui", 0x782},
+ { "divs/sui", 0x783},
+ { "addt/sui", 0x7a0},
+ { "subt/sui", 0x7a1},
+ { "mult/sui", 0x7a2},
+ { "divt/sui", 0x7a3},
+ { "cvtts/sui", 0x7ac},
+ { "cvttq/svi", 0x7af},
+ { "cvtqs/sui", 0x7bc},
+ { "cvtqt/sui", 0x7be},
+ { "adds/suid", 0x7c0},
+ { "subs/suid", 0x7c1},
+ { "muls/suid", 0x7c2},
+ { "divs/suid", 0x7c3},
+ { "addt/suid", 0x7e0},
+ { "subt/suid", 0x7e1},
+ { "mult/suid", 0x7e2},
+ { "divt/suid", 0x7e3},
+ { "cvtts/suid", 0x7ec},
+ { "cvtqs/suid", 0x7fc},
+ { "cvtqt/suid", 0x7fe},
+ { 0, 0}
+};
+
+static const struct tbl vaxf_tbl[] = {
+ { "addf/c", 0x000},
+ { "subf/c", 0x001},
+ { "mulf/c", 0x002},
+ { "divf/c", 0x003},
+ { "cvtdg/c", 0x01e},
+ { "addg/c", 0x020},
+ { "subg/c", 0x021},
+ { "mulg/c", 0x022},
+ { "divg/c", 0x023},
+ { "cvtgf/c", 0x02c},
+ { "cvtgd/c", 0x02d},
+ { "cvtgq/c", 0x02f},
+ { "cvtqf/c", 0x03c},
+ { "cvtqg/c", 0x03e},
+ { "addf", 0x080},
+ { "subf", 0x081},
+ { "mulf", 0x082},
+ { "divf", 0x083},
+ { "cvtdg", 0x09e},
+ { "addg", 0x0a0},
+ { "subg", 0x0a1},
+ { "mulg", 0x0a2},
+ { "divg", 0x0a3},
+ { "cmpgeq", 0x0a5},
+ { "cmpglt", 0x0a6},
+ { "cmpgle", 0x0a7},
+ { "cvtgf", 0x0ac},
+ { "cvtgd", 0x0ad},
+ { "cvtgq", 0x0af},
+ { "cvtqf", 0x0bc},
+ { "cvtqg", 0x0be},
+ { "addf/uc", 0x100},
+ { "subf/uc", 0x101},
+ { "mulf/uc", 0x102},
+ { "divf/uc", 0x103},
+ { "cvtdg/uc", 0x11e},
+ { "addg/uc", 0x120},
+ { "subg/uc", 0x121},
+ { "mulg/uc", 0x122},
+ { "divg/uc", 0x123},
+ { "cvtgf/uc", 0x12c},
+ { "cvtgd/uc", 0x12d},
+ { "cvtgq/vc", 0x12f},
+ { "addf/u", 0x180},
+ { "subf/u", 0x181},
+ { "mulf/u", 0x182},
+ { "divf/u", 0x183},
+ { "cvtdg/u", 0x19e},
+ { "addg/u", 0x1a0},
+ { "subg/u", 0x1a1},
+ { "mulg/u", 0x1a2},
+ { "divg/u", 0x1a3},
+ { "cvtgf/u", 0x1ac},
+ { "cvtgd/u", 0x1ad},
+ { "cvtgq/v", 0x1af},
+ { "addf/sc", 0x400},
+ { "subf/sc", 0x401},
+ { "mulf/sc", 0x402},
+ { "divf/sc", 0x403},
+ { "cvtdg/sc", 0x41e},
+ { "addg/sc", 0x420},
+ { "subg/sc", 0x421},
+ { "mulg/sc", 0x422},
+ { "divg/sc", 0x423},
+ { "cvtgf/sc", 0x42c},
+ { "cvtgd/sc", 0x42d},
+ { "cvtgq/sc", 0x42f},
+ { "cvtqf/sc", 0x43c},
+ { "cvtqg/sc", 0x43e},
+ { "addf/s", 0x480},
+ { "subf/s", 0x481},
+ { "mulf/s", 0x482},
+ { "divf/s", 0x483},
+ { "cvtdg/s", 0x49e},
+ { "addg/s", 0x4a0},
+ { "subg/s", 0x4a1},
+ { "mulg/s", 0x4a2},
+ { "divg/s", 0x4a3},
+ { "cmpgeq/s", 0x4a5},
+ { "cmpglt/s", 0x4a6},
+ { "cmpgle/s", 0x4a7},
+ { "cvtgf/s", 0x4ac},
+ { "cvtgd/s", 0x4ad},
+ { "cvtgq/s", 0x4af},
+ { "cvtqf/s", 0x4bc},
+ { "cvtqg/s", 0x4be},
+ { "addf/suc", 0x500},
+ { "subf/suc", 0x501},
+ { "mulf/suc", 0x502},
+ { "divf/suc", 0x503},
+ { "cvtdg/suc", 0x51e},
+ { "addg/suc", 0x520},
+ { "subg/suc", 0x521},
+ { "mulg/suc", 0x522},
+ { "divg/suc", 0x523},
+ { "cvtgf/suc", 0x52c},
+ { "cvtgd/suc", 0x52d},
+ { "cvtgq/svc", 0x52f},
+ { "addf/su", 0x580},
+ { "subf/su", 0x581},
+ { "mulf/su", 0x582},
+ { "divf/su", 0x583},
+ { "cvtdg/su", 0x59e},
+ { "addg/su", 0x5a0},
+ { "subg/su", 0x5a1},
+ { "mulg/su", 0x5a2},
+ { "divg/su", 0x5a3},
+ { "cvtgf/su", 0x5ac},
+ { "cvtgd/su", 0x5ad},
+ { "cvtgq/sv", 0x5af},
+ { 0, 0}
+};
+
+/*
+ * General purpose registers
+ */
+static const char *name_of_register[32] = {
+ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
+ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
+ "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"
+};
+
+static int regcount; /* how many regs used in this inst */
+static int regnum[3]; /* which regs used in this inst */
+
+static const char *register_name __P((int));
+
+static const char *
+register_name (ireg)
+ int ireg;
+{
+ int i;
+
+ for (i = 0; i < regcount; i++)
+ if (regnum[i] == ireg)
break;
- }
+ if (i >= regcount)
+ regnum[regcount++] = ireg;
+ return (name_of_register[ireg]);
+}
+
+/*
+ * Disassemble instruction at 'loc'. 'altfmt' specifies an
+ * (optional) alternate format. Return address of start of
+ * next instruction.
+ */
+int alpha_print_instruction __P((db_addr_t, alpha_instruction, boolean_t));
+
+db_addr_t
+db_disasm(loc, altfmt)
+ db_addr_t loc;
+ boolean_t altfmt;
+{
+ alpha_instruction inst;
+
+ inst.bits = db_get_value(loc, 4, 0);
+
+ loc += alpha_print_instruction(loc, inst, altfmt);
+ return (loc);
+}
+
+int
+alpha_print_instruction(iadr, i, showregs)
+ db_addr_t iadr;
+ alpha_instruction i;
+ boolean_t showregs;
+{
+ const char *opcode;
+ int ireg;
+ long signed_immediate;
+ boolean_t fstore;
+ pal_instruction p;
+
+ regcount = 0;
+ fstore = FALSE;
+ opcode = op_name[i.mem_format.opcode];
+
+ /*
+ * Dispatch directly on the opcode, save code
+ * duplication sometimes via "harmless gotos".
+ */
+ switch (i.mem_format.opcode) {
+ case op_pal:
+ /* "call_pal" is a long string; just use a space. */
+ db_printf("%s %s", opcode, pal_opname(i.pal_format.function));
break;
- case OPC_RES:
- db_printf("0x%08x", ins);
+ case op_lda:
+ case op_ldah:
+ case op_ldbu:
+ case op_ldq_u:
+ case op_ldwu:
+ case op_stw:
+ case op_stb:
+ case op_stq_u:
+ /*
+ * These loadstores are here to make compiling the
+ * switch a bit easier. Could embellish the output
+ * someday, too.
+ */
+ goto loadstore;
break;
- case OPC_MEM:
- ra = arg >> 21;
- rb = (arg >> 16) & 0x1f;
- disp = arg & 0xffff;
- switch (opc) {
- case 0x18:
- /* Memory fmt with a function code */
- switch (disp) {
- case 0x0000:
- db_printf("trapb");
- break;
- case 0x4000:
- db_printf("mb");
- break;
- case 0x8000:
- db_printf("fetch\t0(%s)", regnam(rb));
- break;
- case 0xa000:
- db_printf("fetch_m\t0($s)", regnam(rb));
- break;
- case 0xc000:
- db_printf("rpcc\t%s", regnam(ra));
+ case op_arit:
+ /*
+ * For this and the following three groups we
+ * just need different opcode strings
+ */
+ opcode = arit_name(i.operate_lit_format.function);
+ goto operate;
+ break;
+ case op_logical:
+ opcode = logical_name(i.operate_lit_format.function);
+ goto operate;
+ break;
+ case op_bit:
+ opcode = bitop_name(i.operate_lit_format.function);
+ goto operate;
+ break;
+ case op_mul:
+ opcode = mul_name(i.operate_lit_format.function);
+operate:
+ /*
+ * Nice and uniform, just check for literals
+ */
+ db_printf("%s\t%s,", opcode,
+ register_name(i.operate_lit_format.ra));
+ if (i.operate_lit_format.one)
+ db_printf("#0x%x", i.operate_lit_format.literal);
+ else
+ db_printf("%s", register_name(i.operate_reg_format.rb));
+ db_printf(",%s", register_name(i.operate_lit_format.rc));
+ break;
+ case op_vax_float:
+ /*
+ * The three floating point groups are even simpler
+ */
+ opcode = vaxf_name(i.float_format.function);
+ goto foperate;
+ break;
+ case op_ieee_float:
+ opcode = ieeef_name(i.float_format.function);
+ goto foperate;
+ break;
+ case op_any_float:
+ opcode = anyf_name(i.float_format.function);
+foperate:
+ db_printf("%s\tf%d,f%d,f%d", opcode,
+ i.float_format.fa,
+ i.float_format.fb,
+ i.float_format.fc);
+ break;
+ case op_special:
+ /*
+ * Miscellaneous.
+ */
+ {
+ register unsigned int code;
+
+ code = (i.mem_format.displacement)&0xffff;
+ opcode = special_name(code);
+
+ switch (code) {
+ case op_ecb:
+ db_printf("%s\t(%s)", opcode,
+ register_name(i.mem_format.rb));
break;
- case 0xe000:
- db_printf("rc\t%s", regnam(ra));
+ case op_fetch:
+ case op_fetch_m:
+ db_printf("%s\t0(%s)", opcode,
+ register_name(i.mem_format.rb));
break;
- case 0xf000:
- db_printf("rs\t%s", regnam(ra));
+ case op_rpcc:
+ case op_rc:
+ case op_rs:
+ db_printf("%s\t%s", opcode,
+ register_name(i.mem_format.ra));
break;
default:
- db_printf("0x%08x", ins);
- break;
- }
- break;
- case 0x1a:
- db_printf("%s\t\t%s,(%s),0x%x", jsrnam[disp >> 14],
- regnam(ra), regnam(rb), disp & 0x3fff);
- break;
- default:
- db_printf("\t%s,0x%x(%s)", regnam(ra), disp,
- regnam(rb));
+ db_printf("%s", opcode);
break;
+ }
}
break;
- case OPC_OP:
- ra = arg >> 21;
- rb = (arg >> 16) & 0x1f;
- func = (arg >> 5) & 0x7f;
- imm = (arg >> 5) & 0x80;
- rc = arg & 0x1f;
-
- switch (opc) {
- case 0x11:
- if (func == 0x20 && imm == 0 && ra == 31 &&
- rb == 31 && rc == 31) {
- db_printf("nop");
- break;
- }
- /*FALLTHROUGH*/
- case 0x10:
- case 0x12:
- case 0x13:
- if (imm) /* literal */
- sprintf(rnam, "0x%x", (arg >> 13) & 0xff);
- else
- sprintf(rnam, "%s", regnam(rb));
-
- for (i = 0; i < sizeof opinstr/sizeof(opinstr[0]); i++)
- if (opinstr[i].opc == opc &&
- opinstr[i].func == func)
- break;
- if (i != sizeof opinstr/sizeof(opinstr[0]))
- db_printf("%s\t\t%s,%s,%s",
- opinstr[i].nam, regnam(ra), rnam,
- regnam(rc));
- else
- db_printf("%s\t\t0x%03x,%s,%s,%s",
- opcode[opc].opc_name, func,
- regnam(ra), rnam, regnam(rc));
+ case op_j:
+ /*
+ * Jump instructions really are of two sorts,
+ * depending on the use of the hint info.
+ */
+ opcode = jump_name(i.jump_format.action);
+ switch (i.jump_format.action) {
+ case op_jmp:
+ case op_jsr:
+ db_printf("%s\t%s,(%s),", opcode,
+ register_name(i.jump_format.ra),
+ register_name(i.jump_format.rb));
+ signed_immediate = i.jump_format.hint;
+ goto branch_displacement;
break;
- default:
- db_printf("0x%03x,%s,%s,%s", func, regnam(ra),
- regnam(rb), regnam(rc));
+ case op_ret:
+ case op_jcr:
+ db_printf("%s\t%s,(%s)", opcode,
+ register_name(i.jump_format.ra),
+ register_name(i.jump_format.rb));
break;
}
break;
- case OPC_BR:
- ra = arg >> 21;
- disp = arg & 0x1fffff;
- db_printf("\t%s,0x%x [", regnam(ra), disp);
- disp = (disp & 0x100000) ? -((-disp) & 0xfffff) << 2 :
- (disp & 0xfffff) << 2;
- db_printsym(loc + sizeof (int) + disp, DB_STGY_PROC);
- db_printf("]");
+ case op_intmisc:
+ /*
+ * These are just in "operate" format.
+ */
+ opcode = intmisc_name(i.operate_lit_format.function);
+ goto operate;
+ break;
+ /* HW instructions, possibly chip-specific XXXX */
+ case op_pal19: /* "hw_mfpr" */
+ case op_pal1d: /* "hw_mtpr" */
+ p.bits = i.bits;
+ db_printf("\t%s%s\t%s, %d", opcode,
+ mXpr_name[p.mXpr_format.regset],
+ register_name(p.mXpr_format.rd),
+ p.mXpr_format.index);
+ break;
+ case op_pal1b: /* "hw_ld" */
+ case op_pal1f: /* "hw_st" */
+ p.bits = i.bits;
+ db_printf("\t%s%c%s\t%s,", opcode,
+ (p.mem_format.qw) ? 'q' : 'l',
+ hwlds_name[p.mem_format.qualif],
+ register_name(p.mem_format.rd));
+ signed_immediate = (long)p.mem_format.displacement;
+ goto loadstore_address;
+
+ case op_pal1e: /* "hw_rei" */
+ db_printf("\t%s", opcode);
break;
+
+ case op_ldf:
+ case op_ldg:
+ case op_lds:
+ case op_ldt:
+ case op_stf:
+ case op_stg:
+ case op_sts:
+ case op_stt:
+ fstore = TRUE;
+ /* fall through */
+ case op_ldl:
+ case op_ldq:
+ case op_ldl_l:
+ case op_ldq_l:
+ case op_stl:
+ case op_stq:
+ case op_stl_c:
+ case op_stq_c:
+ /*
+ * Memory operations, including floats
+ */
+loadstore:
+ if (fstore)
+ db_printf("%s\tf%d,", opcode, i.mem_format.ra);
+ else
+ db_printf("%s\t%s,", opcode,
+ register_name(i.mem_format.ra));
+ signed_immediate = (long)i.mem_format.displacement;
+loadstore_address:
+ db_printf("%lz(%s)", signed_immediate,
+ register_name(i.mem_format.rb));
+ /*
+ * For convenience, do the address computation
+ */
+ if (showregs) {
+ if (i.mem_format.opcode == op_ldah)
+ signed_immediate <<= 16;
+ db_printf(" <0x%lx>", signed_immediate +
+ db_register_value(DDB_REGS, i.mem_format.rb));
+ }
+ break;
+ case op_br:
+ case op_fbeq:
+ case op_fblt:
+ case op_fble:
+ case op_bsr:
+ case op_fbne:
+ case op_fbge:
+ case op_fbgt:
+ case op_blbc:
+ case op_beq:
+ case op_blt:
+ case op_ble:
+ case op_blbs:
+ case op_bne:
+ case op_bge:
+ case op_bgt:
+ /*
+ * We want to know where we are branching to
+ */
+ signed_immediate = (long)i.branch_format.displacement;
+ db_printf("%s\t%s,", opcode,
+ register_name(i.branch_format.ra));
+branch_displacement:
+ db_printsym(iadr + sizeof(alpha_instruction) +
+ (signed_immediate << 2), DB_STGY_PROC);
+ break;
+ default:
+ /*
+ * Shouldn't happen
+ */
+ db_printf("? 0x%x ?", i.bits);
+ }
+
+ /*
+ * Print out the registers used in this instruction
+ */
+ if (showregs && regcount > 0) {
+ db_printf("\t<");
+ for (ireg = 0; ireg < regcount; ireg++) {
+ if (ireg != 0)
+ db_printf(",");
+ db_printf("%s=0x%lx",
+ name_of_register[regnum[ireg]],
+ db_register_value(DDB_REGS, regnum[ireg]));
+ }
+ db_printf(">");
}
db_printf("\n");
- return (loc + sizeof (int));
+ return (sizeof(alpha_instruction));
}
diff --git a/sys/arch/alpha/alpha/db_instruction.h b/sys/arch/alpha/alpha/db_instruction.h
new file mode 100644
index 00000000000..1a1dbbefab7
--- /dev/null
+++ b/sys/arch/alpha/alpha/db_instruction.h
@@ -0,0 +1,727 @@
+/* $NetBSD: db_instruction.h,v 1.6 2000/03/20 02:54:45 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1999 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * File: alpha_instruction.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 11/91
+ *
+ * Alpha Instruction set definition
+ *
+ * Reference: "Alpha System Reference Manual", V4.0, April 1991
+ *
+ */
+
+#ifndef _ALPHA_INSTRUCTION_H_
+#define _ALPHA_INSTRUCTION_H_ 1
+
+#if !defined(ASSEMBLER)
+
+/*
+ * All instructions are in one of five formats:
+ * Memory, Branch, Operate, Floating-point Operate, PAL
+ *
+ * The original Mach sources attempted to use 'smarter' names
+ * for registers, which reflected source and destination. These
+ * definitions use the names from the Architecture Reference Manual,
+ * both for clarity and because you can't differentiate between
+ * 'source' and 'destinations' for some types of instructions (loads
+ * and stores; they'd be correct for one, but swapped for the other).
+ */
+
+
+typedef union {
+ /*
+ * All instructions are 32 bits wide
+ */
+ unsigned int bits;
+
+ /*
+ * Generic instruction pseudo format; look at
+ * opcode to see how to interpret the rest.
+ */
+ struct {
+ unsigned bits:26,
+ opcode:6;
+ } generic_format;
+
+ /*
+ * Memory instructions contain a 16 bit
+ * signed immediate value and two register
+ * specifiers
+ */
+ struct {
+ signed short displacement;
+ unsigned rb : 5,
+ ra : 5,
+ opcode : 6;
+ } mem_format;
+
+ /*
+ * Branch instruction contain a 21 bit offset,
+ * which is sign-extended, shifted and combined
+ * with the PC to form a 64 bit destination address.
+ *
+ * In computed jump instructions the opcode is further
+ * specified in the offset field, the rest of it is
+ * used as branch target hint. The destination of the
+ * jump is the source register.
+ */
+ struct {
+ signed int displacement : 21;
+ unsigned ra : 5,
+ opcode : 6;
+ } branch_format;
+
+ struct {
+ signed int hint : 14;
+ unsigned action : 2,
+ rb : 5,
+ ra : 5,
+ opcode : 6;
+ } jump_format;
+
+
+ /*
+ * Operate instructions are of two types, with
+ * a second source register or with a literal
+ * specifier. Bit 12 sez which is which.
+ */
+ struct {
+ unsigned rc : 5,
+ function : 7,
+ is_lit : 1,
+ sbz_or_litlo : 3,
+ rb_or_lithi : 5,
+ ra : 5,
+ opcode : 6;
+ } operate_generic_format;
+
+ struct {
+ unsigned rc : 5,
+ function : 7,
+ zero : 1,
+ sbz : 3,
+ rb : 5,
+ ra : 5,
+ opcode : 6;
+ } operate_reg_format;
+
+ struct {
+ unsigned rc : 5,
+ function : 7,
+ one : 1,
+ literal : 8,
+ ra : 5,
+ opcode : 6;
+ } operate_lit_format;
+
+
+ /*
+ * Floating point operate instruction are quite
+ * uniform in the encoding. As for the semantics..
+ */
+ struct {
+ unsigned fc : 5,
+ function : 11,
+ fb : 5,
+ fa : 5,
+ opcode : 6;
+ } float_format;
+
+
+ /*
+ * PAL instructions just define the major opcode
+ */
+
+ struct {
+ unsigned function : 26,
+ opcode : 6;
+ } pal_format;
+
+} alpha_instruction;
+
+#endif !defined(ASSEMBLER)
+
+/*
+ *
+ * Encoding of regular instructions (Appendix C op cit)
+ *
+ */
+
+ /* OPCODE, bits 26..31 */
+
+#define op_pal 0x00 /* see PAL sub-table */
+ /* 1..7 reserved */
+#define op_lda 0x08
+#define op_ldah 0x09
+#define op_ldbu 0x0a
+#define op_ldq_u 0x0b
+#define op_ldwu 0x0c
+#define op_stw 0x0d
+#define op_stb 0x0e
+#define op_stq_u 0x0f
+
+#define op_arit 0x10 /* see ARIT sub-table */
+#define op_logical 0x11 /* see LOGICAL sub-table */
+#define op_bit 0x12 /* see BIT sub-table */
+#define op_mul 0x13 /* see MUL sub-table */
+ /* reserved */
+#define op_vax_float 0x15 /* see FLOAT sub-table */
+#define op_ieee_float 0x16 /* see FLOAT sub-table */
+#define op_any_float 0x17 /* see FLOAT sub-table */
+
+#define op_special 0x18 /* see SPECIAL sub-table */
+#define op_pal19 0x19 /* reserved for pal code */
+#define op_j 0x1a /* see JUMP sub-table */
+#define op_pal1b 0x1b /* reserved for pal code */
+#define op_intmisc 0x1c /* see INTMISC sub-table */
+#define op_pal1d 0x1d /* reserved for pal code */
+#define op_pal1e 0x1e /* reserved for pal code */
+#define op_pal1f 0x1f /* reserved for pal code */
+
+#define op_ldf 0x20
+#define op_ldg 0x21
+#define op_lds 0x22
+#define op_ldt 0x23
+#define op_stf 0x24
+#define op_stg 0x25
+#define op_sts 0x26
+#define op_stt 0x27
+#define op_ldl 0x28
+#define op_ldq 0x29
+#define op_ldl_l 0x2a
+#define op_ldq_l 0x2b
+#define op_stl 0x2c
+#define op_stq 0x2d
+#define op_stl_c 0x2e
+#define op_stq_c 0x2f
+#define op_br 0x30
+#define op_fbeq 0x31
+#define op_fblt 0x32
+#define op_fble 0x33
+#define op_bsr 0x34
+#define op_fbne 0x35
+#define op_fbge 0x36
+#define op_fbgt 0x37
+#define op_blbc 0x38
+#define op_beq 0x39
+#define op_blt 0x3a
+#define op_ble 0x3b
+#define op_blbs 0x3c
+#define op_bne 0x3d
+#define op_bge 0x3e
+#define op_bgt 0x3f
+
+
+ /* PAL, "function" opcodes (bits 0..25) */
+/*
+ * What we will implement is TBD. These are the unprivileged ones
+ * that we probably have to support for compat reasons.
+ */
+
+/* See <machine/pal.h> */
+
+ /* ARIT, "function" opcodes (bits 5..11) */
+
+#define op_addl 0x00
+#define op_s4addl 0x02
+#define op_subl 0x09
+#define op_s4subl 0x0b
+#define op_cmpbge 0x0f
+#define op_s8addl 0x12
+#define op_s8subl 0x1b
+#define op_cmpult 0x1d
+#define op_addq 0x20
+#define op_s4addq 0x22
+#define op_subq 0x29
+#define op_s4subq 0x2b
+#define op_cmpeq 0x2d
+#define op_s8addq 0x32
+#define op_s8subq 0x3b
+#define op_cmpule 0x3d
+#define op_addl_v 0x40
+#define op_subl_v 0x49
+#define op_cmplt 0x4d
+#define op_addq_v 0x60
+#define op_subq_v 0x69
+#define op_cmple 0x6d
+
+
+ /* LOGICAL, "function" opcodes (bits 5..11) */
+
+#define op_and 0x00
+#define op_andnot 0x08 /* bic */
+#define op_cmovlbs 0x14
+#define op_cmovlbc 0x16
+#define op_or 0x20 /* bis */
+#define op_cmoveq 0x24
+#define op_cmovne 0x26
+#define op_ornot 0x28
+#define op_xor 0x40
+#define op_cmovlt 0x44
+#define op_cmovge 0x46
+#define op_xornot 0x48 /* eqv */
+#define op_amask 0x61
+#define op_cmovle 0x64
+#define op_cmovgt 0x66
+#define op_implver 0x6c
+
+ /* BIT, "function" opcodes (bits 5..11) */
+
+#define op_mskbl 0x02
+#define op_extbl 0x06
+#define op_insbl 0x0b
+#define op_mskwl 0x12
+#define op_extwl 0x16
+#define op_inswl 0x1b
+#define op_mskll 0x22
+#define op_extll 0x26
+#define op_insll 0x2b
+#define op_zap 0x30
+#define op_zapnot 0x31
+#define op_mskql 0x32
+#define op_srl 0x34
+#define op_extql 0x36
+#define op_sll 0x39
+#define op_insql 0x3b
+#define op_sra 0x3c
+#define op_mskwh 0x52
+#define op_inswh 0x57
+#define op_extwh 0x5a
+#define op_msklh 0x62
+#define op_inslh 0x67
+#define op_extlh 0x6a
+#define op_extqh 0x7a
+#define op_insqh 0x77
+#define op_mskqh 0x72
+
+ /* MUL, "function" opcodes (bits 5..11) */
+
+#define op_mull 0x00
+#define op_mulq_v 0x60
+#define op_mull_v 0x40
+#define op_umulh 0x30
+#define op_mulq 0x20
+
+
+ /* SPECIAL, "displacement" opcodes (bits 0..15) */
+
+#define op_trapb 0x0000
+#define op_excb 0x0400
+#define op_mb 0x4000
+#define op_wmb 0x4400
+#define op_fetch 0x8000
+#define op_fetch_m 0xa000
+#define op_rpcc 0xc000
+#define op_rc 0xe000
+#define op_ecb 0xe800
+#define op_rs 0xf000
+#define op_wh64 0xf800
+
+ /* JUMP, "action" opcodes (bits 14..15) */
+
+#define op_jmp 0x0
+#define op_jsr 0x1
+#define op_ret 0x2
+#define op_jcr 0x3
+
+ /* INTMISC, "function" opcodes (operate format) */
+
+#define op_sextb 0x00
+#define op_sextw 0x01
+#define op_ctpop 0x30
+#define op_perr 0x31
+#define op_ctlz 0x32
+#define op_cttz 0x33
+#define op_unpkbw 0x34
+#define op_unpkbl 0x35
+#define op_pkwb 0x36
+#define op_pklb 0x37
+#define op_minsb8 0x38
+#define op_minsw4 0x39
+#define op_minub8 0x3a
+#define op_minuw4 0x3b
+#define op_maxub8 0x3c
+#define op_maxuw4 0x3d
+#define op_maxsb8 0x3e
+#define op_maxsw4 0x3f
+#define op_ftoit 0x70
+#define op_ftois 0x78
+
+/*
+ *
+ * Encoding of floating point instructions (pagg. C-5..6 op cit)
+ *
+ * Load and store operations use opcodes op_ldf..op_stt
+ */
+
+ /* any FLOAT, "function" opcodes (bits 5..11) */
+
+#define op_cvtlq 0x010
+#define op_cpys 0x020
+#define op_cpysn 0x021
+#define op_cpyse 0x022
+#define op_mt_fpcr 0x024
+#define op_mf_fpcr 0x025
+#define op_fcmoveq 0x02a
+#define op_fcmovne 0x02b
+#define op_fcmovlt 0x02c
+#define op_fcmovge 0x02d
+#define op_fcmovle 0x02e
+#define op_fcmovgt 0x02f
+#define op_cvtql 0x030
+#define op_cvtql_v 0x130
+#define op_cvtql_sv 0x330
+
+
+ /* ieee FLOAT, "function" opcodes (bits 5..11) */
+
+#define op_adds_c 0x000
+#define op_subs_c 0x001
+#define op_muls_c 0x002
+#define op_divs_c 0x003
+#define op_addt_c 0x020
+#define op_subt_c 0x021
+#define op_mult_c 0x022
+#define op_divt_c 0x023
+#define op_cvtts_c 0x02c
+#define op_cvttq_c 0x02f
+#define op_cvtqs_c 0x03c
+#define op_cvtqt_c 0x03e
+#define op_adds_m 0x040
+#define op_subs_m 0x041
+#define op_muls_m 0x042
+#define op_divs_m 0x043
+#define op_addt_m 0x060
+#define op_subt_m 0x061
+#define op_mult_m 0x062
+#define op_divt_m 0x063
+#define op_cvtts_m 0x06c
+#define op_cvtqs_m 0x07c
+#define op_cvtqt_m 0x07e
+#define op_adds 0x080
+#define op_subs 0x081
+#define op_muls 0x082
+#define op_divs 0x083
+#define op_addt 0x0a0
+#define op_subt 0x0a1
+#define op_mult 0x0a2
+#define op_divt 0x0a3
+#define op_cmptun 0x0a4
+#define op_cmpteq 0x0a5
+#define op_cmptlt 0x0a6
+#define op_cmptle 0x0a7
+#define op_cvtts 0x0ac
+#define op_cvttq 0x0af
+#define op_cvtqs 0x0bc
+#define op_cvtqt 0x0be
+#define op_adds_d 0x0c0
+#define op_subs_d 0x0c1
+#define op_muls_d 0x0c2
+#define op_divs_d 0x0c3
+#define op_addt_d 0x0e0
+#define op_subt_d 0x0e1
+#define op_mult_d 0x0e2
+#define op_divt_d 0x0e3
+#define op_cvtts_d 0x0ec
+#define op_cvtqs_d 0x0fc
+#define op_cvtqt_d 0x0fe
+#define op_adds_uc 0x100
+#define op_subs_uc 0x101
+#define op_muls_uc 0x102
+#define op_divs_uc 0x103
+#define op_addt_uc 0x120
+#define op_subt_uc 0x121
+#define op_mult_uc 0x122
+#define op_divt_uc 0x123
+#define op_cvtts_uc 0x12c
+#define op_cvttq_vc 0x12f
+#define op_adds_um 0x140
+#define op_subs_um 0x141
+#define op_muls_um 0x142
+#define op_divs_um 0x143
+#define op_addt_um 0x160
+#define op_subt_um 0x161
+#define op_mult_um 0x162
+#define op_divt_um 0x163
+#define op_cvtts_um 0x16c
+#define op_adds_u 0x180
+#define op_subs_u 0x181
+#define op_muls_u 0x182
+#define op_divs_u 0x183
+#define op_addt_u 0x1a0
+#define op_subt_u 0x1a1
+#define op_mult_u 0x1a2
+#define op_divt_u 0x1a3
+#define op_cvtts_u 0x1ac
+#define op_cvttq_v 0x1af
+#define op_adds_ud 0x1c0
+#define op_subs_ud 0x1c1
+#define op_muls_ud 0x1c2
+#define op_divs_ud 0x1c3
+#define op_addt_ud 0x1e0
+#define op_subt_ud 0x1e1
+#define op_mult_ud 0x1e2
+#define op_divt_ud 0x1e3
+#define op_cvtts_ud 0x1ec
+#define op_adds_suc 0x500
+#define op_subs_suc 0x501
+#define op_muls_suc 0x502
+#define op_divs_suc 0x503
+#define op_addt_suc 0x520
+#define op_subt_suc 0x521
+#define op_mult_suc 0x522
+#define op_divt_suc 0x523
+#define op_cvtts_suc 0x52c
+#define op_cvttq_svc 0x52f
+#define op_adds_sum 0x540
+#define op_subs_sum 0x541
+#define op_muls_sum 0x542
+#define op_divs_sum 0x543
+#define op_addt_sum 0x560
+#define op_subt_sum 0x561
+#define op_mult_sum 0x562
+#define op_divt_sum 0x563
+#define op_cvtts_sum 0x56c
+#define op_adds_su 0x580
+#define op_subs_su 0x581
+#define op_muls_su 0x582
+#define op_divs_su 0x583
+#define op_addt_su 0x5a0
+#define op_subt_su 0x5a1
+#define op_mult_su 0x5a2
+#define op_divt_su 0x5a3
+#define op_cmptun_su 0x5a4
+#define op_cmpteq_su 0x5a5
+#define op_cmptlt_su 0x5a6
+#define op_cmptle_su 0x5a7
+#define op_cvtts_su 0x5ac
+#define op_cvttq_sv 0x5af
+#define op_adds_sud 0x5c0
+#define op_subs_sud 0x5c1
+#define op_muls_sud 0x5c2
+#define op_divs_sud 0x5c3
+#define op_addt_sud 0x5e0
+#define op_subt_sud 0x5e1
+#define op_mult_sud 0x5e2
+#define op_divt_sud 0x5e3
+#define op_cvtts_sud 0x5ec
+#define op_adds_suic 0x700
+#define op_subs_suic 0x701
+#define op_muls_suic 0x702
+#define op_divs_suic 0x703
+#define op_addt_suic 0x720
+#define op_subt_suic 0x721
+#define op_mult_suic 0x722
+#define op_divt_suic 0x723
+#define op_cvtts_suic 0x72c
+#define op_cvttq_svic 0x72f
+#define op_cvtqs_suic 0x73c
+#define op_cvtqt_suic 0x73e
+#define op_adds_suim 0x740
+#define op_subs_suim 0x741
+#define op_muls_suim 0x742
+#define op_divs_suim 0x743
+#define op_addt_suim 0x760
+#define op_subt_suim 0x761
+#define op_mult_suim 0x762
+#define op_divt_suim 0x763
+#define op_cvtts_suim 0x76c
+#define op_cvtqs_suim 0x77c
+#define op_cvtqt_suim 0x77e
+#define op_adds_sui 0x780
+#define op_subs_sui 0x781
+#define op_muls_sui 0x782
+#define op_divs_sui 0x783
+#define op_addt_sui 0x7a0
+#define op_subt_sui 0x7a1
+#define op_mult_sui 0x7a2
+#define op_divt_sui 0x7a3
+#define op_cvtts_sui 0x7ac
+#define op_cvttq_svi 0x7af
+#define op_cvtqs_sui 0x7bc
+#define op_cvtqt_sui 0x7be
+#define op_adds_suid 0x7c0
+#define op_subs_suid 0x7c1
+#define op_muls_suid 0x7c2
+#define op_divs_suid 0x7c3
+#define op_addt_suid 0x7e0
+#define op_subt_suid 0x7e1
+#define op_mult_suid 0x7e2
+#define op_divt_suid 0x7e3
+#define op_cvtts_suid 0x7ec
+#define op_cvtqs_suid 0x7fc
+#define op_cvtqt_suid 0x7fe
+
+
+ /* vax FLOAT, "function" opcodes (bits 5..11) */
+
+#define op_addf_c 0x000
+#define op_subf_c 0x001
+#define op_mulf_c 0x002
+#define op_divf_c 0x003
+#define op_cvtdg_c 0x01e
+#define op_addg_c 0x020
+#define op_subg_c 0x021
+#define op_mulg_c 0x022
+#define op_divg_c 0x023
+#define op_cvtgf_c 0x02c
+#define op_cvtgd_c 0x02d
+#define op_cvtgqg_c 0x02f
+#define op_cvtqf_c 0x03c
+#define op_cvtqg_c 0x03e
+#define op_addf 0x080
+#define op_subf 0x081
+#define op_mulf 0x082
+#define op_divf 0x083
+#define op_cvtdg 0x09e
+#define op_addg 0x0a0
+#define op_subg 0x0a1
+#define op_mulg 0x0a2
+#define op_divg 0x0a3
+#define op_cmpgeq 0x0a5
+#define op_cmpglt 0x0a6
+#define op_cmpgle 0x0a7
+#define op_cvtgf 0x0ac
+#define op_cvtgd 0x0ad
+#define op_cvtgq 0x0af
+#define op_cvtqf 0x0bc
+#define op_cvtqg 0x0be
+#define op_addf_uc 0x100
+#define op_subf_uc 0x101
+#define op_mulf_uc 0x102
+#define op_divf_uc 0x103
+#define op_cvtdg_uc 0x11e
+#define op_addg_uc 0x120
+#define op_subg_uc 0x121
+#define op_mulg_uc 0x122
+#define op_divg_uc 0x123
+#define op_cvtgf_uc 0x12c
+#define op_cvtgd_uc 0x12d
+#define op_cvtgqg_vc 0x12f
+#define op_addf_u 0x180
+#define op_subf_u 0x181
+#define op_mulf_u 0x182
+#define op_divf_u 0x183
+#define op_cvtdg_u 0x19e
+#define op_addg_u 0x1a0
+#define op_subg_u 0x1a1
+#define op_mulg_u 0x1a2
+#define op_divg_u 0x1a3
+#define op_cvtgf_u 0x1ac
+#define op_cvtgd_u 0x1ad
+#define op_cvtgqg_v 0x1af
+#define op_addf_sc 0x400
+#define op_subf_sc 0x401
+#define op_mulf_sc 0x402
+#define op_divf_sc 0x403
+#define op_cvtdg_sc 0x41e
+#define op_addg_sc 0x420
+#define op_subg_sc 0x421
+#define op_mulg_sc 0x422
+#define op_divg_sc 0x423
+#define op_cvtgf_sc 0x42c
+#define op_cvtgd_sc 0x42d
+#define op_cvtgqg_sc 0x42f
+#define op_cvtqf_sc 0x43c
+#define op_cvtqg_sc 0x43e
+#define op_addf_s 0x480
+#define op_subf_s 0x481
+#define op_mulf_s 0x482
+#define op_divf_s 0x483
+#define op_cvtdg_s 0x49e
+#define op_addg_s 0x4a0
+#define op_subg_s 0x4a1
+#define op_mulg_s 0x4a2
+#define op_divg_s 0x4a3
+#define op_cmpgeq_s 0x4a5
+#define op_cmpglt_s 0x4a6
+#define op_cmpgle_s 0x4a7
+#define op_cvtgf_s 0x4ac
+#define op_cvtgd_s 0x4ad
+#define op_cvtgqg_s 0x4af
+#define op_cvtqf_s 0x4bc
+#define op_cvtqg_s 0x4be
+#define op_addf_suc 0x500
+#define op_subf_suc 0x501
+#define op_mulf_suc 0x502
+#define op_divf_suc 0x503
+#define op_cvtdg_suc 0x51e
+#define op_addg_suc 0x520
+#define op_subg_suc 0x521
+#define op_mulg_suc 0x522
+#define op_divg_suc 0x523
+#define op_cvtgf_suc 0x52c
+#define op_cvtgd_suc 0x52d
+#define op_cvtgqg_svc 0x52f
+#define op_addf_su 0x580
+#define op_subf_su 0x581
+#define op_mulf_su 0x582
+#define op_divf_su 0x583
+#define op_cvtdg_su 0x59e
+#define op_addg_su 0x5a0
+#define op_subg_su 0x5a1
+#define op_mulg_su 0x5a2
+#define op_divg_su 0x5a3
+#define op_cvtgf_su 0x5ac
+#define op_cvtgd_su 0x5ad
+#define op_cvtgqg_sv 0x5af
+
+
+#endif /* _ALPHA_INSTRUCTION_H_ */
diff --git a/sys/arch/alpha/alpha/db_interface.c b/sys/arch/alpha/alpha/db_interface.c
index b1d95f08cd2..d647b619ae1 100644
--- a/sys/arch/alpha/alpha/db_interface.c
+++ b/sys/arch/alpha/alpha/db_interface.c
@@ -1,107 +1,182 @@
-/* $OpenBSD: db_interface.c,v 1.9 2000/01/03 19:19:41 deraadt Exp $ */
+/* $NetBSD: db_interface.c,v 1.8 1999/10/12 17:08:57 jdolecek Exp $ */
+
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS ``AS IS''
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * db_interface.c,v 2.4 1991/02/05 17:11:13 mrt (CMU)
+ */
/*
- * Copyright (c) 1997 Niklas Hallqvist. All rights reserverd.
+ * Parts of this file are derived from Mach 3:
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Niklas Hallqvist.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
+ * File: alpha_instruction.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ */
+
+/*
+ * Interface to DDB.
*
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Modified for NetBSD/alpha by:
+ *
+ * Christopher G. Demetriou, Carnegie Mellon University
+ *
+ * Jason R. Thorpe, Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center
*/
-#include <sys/types.h>
#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/reboot.h>
#include <sys/systm.h>
#include <vm/vm.h>
+#include <dev/cons.h>
+
#include <machine/db_machdep.h>
-#include <machine/frame.h>
+#include <machine/pal.h>
+#include <machine/prom.h>
-#include <ddb/db_access.h>
+#include <alpha/alpha/db_instruction.h>
+
+#include <ddb/db_sym.h>
#include <ddb/db_command.h>
+#include <ddb/db_extern.h>
+#include <ddb/db_access.h>
#include <ddb/db_output.h>
-#include <ddb/db_run.h>
-#include <ddb/db_sym.h>
-#include <ddb/db_var.h>
#include <ddb/db_variables.h>
-#include <ddb/db_extern.h>
+#include <ddb/db_interface.h>
-#include <dev/cons.h>
-extern label_t *db_recover;
-extern char *trap_type[];
-extern int trap_types;
+extern label_t *db_recover;
-void kdbprinttrap __P((int, int));
+#if 0
+extern char *trap_type[];
+extern int trap_types;
+#endif
-/*
- * These entries must be in the same order as the CPU registers.
- * You can add things at the end.
- */
-struct db_variable db_regs[] = {
- { "v0", (long *)&ddb_regs.tf_regs[FRAME_V0], FCN_NULL, }, /*0*/
- { "t0", (long *)&ddb_regs.tf_regs[FRAME_T0], FCN_NULL, }, /*1*/
- { "t1", (long *)&ddb_regs.tf_regs[FRAME_T1], FCN_NULL, }, /*2*/
- { "t2", (long *)&ddb_regs.tf_regs[FRAME_T2], FCN_NULL, }, /*3*/
- { "t3", (long *)&ddb_regs.tf_regs[FRAME_T3], FCN_NULL, }, /*4*/
- { "t4", (long *)&ddb_regs.tf_regs[FRAME_T4], FCN_NULL, }, /*5*/
- { "t5", (long *)&ddb_regs.tf_regs[FRAME_T5], FCN_NULL, }, /*6*/
- { "t6", (long *)&ddb_regs.tf_regs[FRAME_T6], FCN_NULL, }, /*7*/
- { "t7", (long *)&ddb_regs.tf_regs[FRAME_T7], FCN_NULL, }, /*8*/
- { "s0", (long *)&ddb_regs.tf_regs[FRAME_S0], FCN_NULL, }, /*9*/
- { "s1", (long *)&ddb_regs.tf_regs[FRAME_S1], FCN_NULL, }, /*10*/
- { "s2", (long *)&ddb_regs.tf_regs[FRAME_S2], FCN_NULL, }, /*11*/
- { "s3", (long *)&ddb_regs.tf_regs[FRAME_S3], FCN_NULL, }, /*12*/
- { "s4", (long *)&ddb_regs.tf_regs[FRAME_S4], FCN_NULL, }, /*13*/
- { "s5", (long *)&ddb_regs.tf_regs[FRAME_S5], FCN_NULL, }, /*14*/
- { "s6", (long *)&ddb_regs.tf_regs[FRAME_S6], FCN_NULL, }, /*15*/
- { "a0", (long *)&ddb_regs.tf_regs[FRAME_A0], FCN_NULL, }, /*16*/
- { "a1", (long *)&ddb_regs.tf_regs[FRAME_A1], FCN_NULL, }, /*17*/
- { "a2", (long *)&ddb_regs.tf_regs[FRAME_A2], FCN_NULL, }, /*18*/
- { "a3", (long *)&ddb_regs.tf_regs[FRAME_A3], FCN_NULL, }, /*19*/
- { "a4", (long *)&ddb_regs.tf_regs[FRAME_A4], FCN_NULL, }, /*20*/
- { "a5", (long *)&ddb_regs.tf_regs[FRAME_A5], FCN_NULL, }, /*21*/
- { "t8", (long *)&ddb_regs.tf_regs[FRAME_T8], FCN_NULL, }, /*22*/
- { "t9", (long *)&ddb_regs.tf_regs[FRAME_T9], FCN_NULL, }, /*23*/
- { "t10", (long *)&ddb_regs.tf_regs[FRAME_T10], FCN_NULL, }, /*24*/
- { "t11", (long *)&ddb_regs.tf_regs[FRAME_T11], FCN_NULL, }, /*25*/
- { "ra", (long *)&ddb_regs.tf_regs[FRAME_RA], FCN_NULL, }, /*26*/
- { "t12", (long *)&ddb_regs.tf_regs[FRAME_T12], FCN_NULL, }, /*27*/
- { "at", (long *)&ddb_regs.tf_regs[FRAME_AT], FCN_NULL, }, /*28*/
- { "gp", (long *)&ddb_regs.tf_regs[FRAME_GP], FCN_NULL, }, /*29*/
- { "sp", (long *)&ddb_regs.tf_regs[FRAME_SP], FCN_NULL, }, /*30*/
- { "pc", (long *)&ddb_regs.tf_regs[FRAME_PC], FCN_NULL, }, /*not*/
- { "ps", (long *)&ddb_regs.tf_regs[FRAME_PS], FCN_NULL, }, /*not*/
+int db_active = 0;
+
+void db_mach_halt __P((db_expr_t, int, db_expr_t, char *));
+void db_mach_reboot __P((db_expr_t, int, db_expr_t, char *));
+
+struct db_command db_machine_cmds[] = {
+ { "halt", db_mach_halt, 0, 0 },
+ { "reboot", db_mach_reboot, 0, 0 },
+ { (char *)0, },
};
+struct db_variable db_regs[] = {
+ { "v0", &ddb_regs.tf_regs[FRAME_V0], FCN_NULL },
+ { "t0", &ddb_regs.tf_regs[FRAME_T0], FCN_NULL },
+ { "t1", &ddb_regs.tf_regs[FRAME_T1], FCN_NULL },
+ { "t2", &ddb_regs.tf_regs[FRAME_T2], FCN_NULL },
+ { "t3", &ddb_regs.tf_regs[FRAME_T3], FCN_NULL },
+ { "t4", &ddb_regs.tf_regs[FRAME_T4], FCN_NULL },
+ { "t5", &ddb_regs.tf_regs[FRAME_T5], FCN_NULL },
+ { "t6", &ddb_regs.tf_regs[FRAME_T6], FCN_NULL },
+ { "t7", &ddb_regs.tf_regs[FRAME_T7], FCN_NULL },
+ { "s0", &ddb_regs.tf_regs[FRAME_S0], FCN_NULL },
+ { "s1", &ddb_regs.tf_regs[FRAME_S1], FCN_NULL },
+ { "s2", &ddb_regs.tf_regs[FRAME_S2], FCN_NULL },
+ { "s3", &ddb_regs.tf_regs[FRAME_S3], FCN_NULL },
+ { "s4", &ddb_regs.tf_regs[FRAME_S4], FCN_NULL },
+ { "s5", &ddb_regs.tf_regs[FRAME_S5], FCN_NULL },
+ { "s6", &ddb_regs.tf_regs[FRAME_S6], FCN_NULL },
+ { "a0", &ddb_regs.tf_regs[FRAME_A0], FCN_NULL },
+ { "a1", &ddb_regs.tf_regs[FRAME_A1], FCN_NULL },
+ { "a2", &ddb_regs.tf_regs[FRAME_A2], FCN_NULL },
+ { "a3", &ddb_regs.tf_regs[FRAME_A3], FCN_NULL },
+ { "a4", &ddb_regs.tf_regs[FRAME_A4], FCN_NULL },
+ { "a5", &ddb_regs.tf_regs[FRAME_A5], FCN_NULL },
+ { "t8", &ddb_regs.tf_regs[FRAME_T8], FCN_NULL },
+ { "t9", &ddb_regs.tf_regs[FRAME_T9], FCN_NULL },
+ { "t10", &ddb_regs.tf_regs[FRAME_T10], FCN_NULL },
+ { "t11", &ddb_regs.tf_regs[FRAME_T11], FCN_NULL },
+ { "ra", &ddb_regs.tf_regs[FRAME_RA], FCN_NULL },
+ { "t12", &ddb_regs.tf_regs[FRAME_T12], FCN_NULL },
+ { "at", &ddb_regs.tf_regs[FRAME_AT], FCN_NULL },
+ { "gp", &ddb_regs.tf_regs[FRAME_GP], FCN_NULL },
+ { "sp", &ddb_regs.tf_regs[FRAME_SP], FCN_NULL },
+ { "pc", &ddb_regs.tf_regs[FRAME_PC], FCN_NULL },
+ { "ps", &ddb_regs.tf_regs[FRAME_PS], FCN_NULL },
+ { "ai", &ddb_regs.tf_regs[FRAME_T11], FCN_NULL },
+ { "pv", &ddb_regs.tf_regs[FRAME_T12], FCN_NULL },
+};
struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]);
-int db_active = 0;
-void
-Debugger()
+/*
+ * ddb_trap - field a kernel trap
+ */
+int
+ddb_trap(a0, a1, a2, entry, regs)
+ unsigned long a0, a1, a2, entry;
+ db_regs_t *regs;
{
- __asm__ ("bpt");
+ int s;
+
+ if (entry != ALPHA_KENTRY_IF ||
+ (a0 != ALPHA_IF_CODE_BPT && a0 != ALPHA_IF_CODE_BUGCHK)) {
+ if (db_recover != 0) {
+ /* This will longjmp back into db_command_loop() */
+ db_error("Caught exception in ddb.\n");
+ /* NOTREACHED */
+ }
+
+ /*
+ * Tell caller "We did NOT handle the trap."
+ * Caller should panic, or whatever.
+ */
+ return (0);
+ }
+
+ /*
+ * alpha_debug() switches us to the debugger stack.
+ */
+
+ ddb_regs = *regs;
+
+ s = splhigh();
+
+ db_active++;
+ cnpollc(TRUE); /* Set polling mode, unblank video */
+
+ db_trap(entry, a0); /* Where the work happens */
+
+ cnpollc(FALSE); /* Resume interrupt mode */
+ db_active--;
+
+ splx(s);
+
+ *regs = ddb_regs;
+
+ /*
+ * Tell caller "We HAVE handled the trap."
+ */
+ return (1);
}
/*
@@ -109,16 +184,15 @@ Debugger()
*/
void
db_read_bytes(addr, size, data)
- vm_offset_t addr;
- size_t size;
- char *data;
+ vaddr_t addr;
+ register size_t size;
+ register char *data;
{
- char *src = (char*)addr;
+ register char *src;
- while (size > 0) {
- --size;
+ src = (char *)addr;
+ while (size-- > 0)
*data++ = *src++;
- }
}
/*
@@ -126,163 +200,335 @@ db_read_bytes(addr, size, data)
*/
void
db_write_bytes(addr, size, data)
- vm_offset_t addr;
- size_t size;
- char *data;
+ vaddr_t addr;
+ register size_t size;
+ register char *data;
{
- char *dst = (char *)addr;
+ register char *dst;
- while (size > 0) {
- --size;
+ dst = (char *)addr;
+ while (size-- > 0)
*dst++ = *data++;
- }
alpha_pal_imb();
}
+void
+Debugger()
+{
+
+ __asm __volatile("call_pal 0x81"); /* bugchk */
+}
+
/*
- * Print trap reason.
+ * This is called before ddb_init() to install the
+ * machine-specific command table. (see machdep.c)
*/
void
-kdbprinttrap(type, code)
- int type, code;
+db_machine_init()
{
- db_printf("kernel: ");
- if (type >= trap_types || type < 0)
- db_printf("type %d", type);
- else
- db_printf("%s", trap_type[type]);
- db_printf(" trap, code=%x\n", code);
+
+ db_machine_commands_install(db_machine_cmds);
}
/*
- * kdb_trap - field a BPT trap
+ * Alpha-specific ddb commands:
+ *
+ * halt set halt bit in rpb and halt
+ * reboot set reboot bit in rpb and halt
*/
-int
-kdb_trap(type, code, regs)
- int type, code;
- db_regs_t *regs;
+
+void
+db_mach_halt(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
{
- int s;
- switch (type) {
- case -1: /* keyboard interrupt */
- break;
- case ALPHA_KENTRY_IF: /* breakpoint */
- if (code == ALPHA_IF_CODE_BPT)
- break;
- default:
- if (!db_panic)
- return (0);
+ prom_halt(1);
+}
- kdbprinttrap(type, code);
- if (db_recover != 0) {
- db_error("Faulted in DDB; continuing...\n");
- /*NOTREACHED*/
- }
+void
+db_mach_reboot(addr, have_addr, count, modif)
+ db_expr_t addr;
+ int have_addr;
+ db_expr_t count;
+ char * modif;
+{
+
+ prom_halt(0);
+}
+
+/*
+ * Map Alpha register numbers to trapframe/db_regs_t offsets.
+ */
+static int reg_to_frame[32] = {
+ FRAME_V0,
+ FRAME_T0,
+ FRAME_T1,
+ FRAME_T2,
+ FRAME_T3,
+ FRAME_T4,
+ FRAME_T5,
+ FRAME_T6,
+ FRAME_T7,
+
+ FRAME_S0,
+ FRAME_S1,
+ FRAME_S2,
+ FRAME_S3,
+ FRAME_S4,
+ FRAME_S5,
+ FRAME_S6,
+
+ FRAME_A0,
+ FRAME_A1,
+ FRAME_A2,
+ FRAME_A3,
+ FRAME_A4,
+ FRAME_A5,
+
+ FRAME_T8,
+ FRAME_T9,
+ FRAME_T10,
+ FRAME_T11,
+ FRAME_RA,
+ FRAME_T12,
+ FRAME_AT,
+ FRAME_GP,
+ FRAME_SP,
+ -1, /* zero */
+};
+
+u_long
+db_register_value(regs, regno)
+ db_regs_t *regs;
+ int regno;
+{
+
+ if (regno > 31 || regno < 0) {
+ db_printf(" **** STRANGE REGISTER NUMBER %d **** ", regno);
+ return (0);
}
- /* XXX Should switch to kdb`s own stack here. */
+ if (regno == 31)
+ return (0);
- ddb_regs = *regs;
+ return (regs->tf_regs[reg_to_frame[regno]]);
+}
- s = splhigh();
- db_active++;
- cnpollc(TRUE);
- db_trap(type, code);
- cnpollc(FALSE);
- db_active--;
- splx(s);
+/*
+ * Support functions for software single-step.
+ */
- *regs = ddb_regs;
- return (1);
+boolean_t
+db_inst_call(ins)
+ int ins;
+{
+ alpha_instruction insn;
+
+ insn.bits = ins;
+ return ((insn.branch_format.opcode == op_bsr) ||
+ ((insn.jump_format.opcode == op_j) &&
+ (insn.jump_format.action & 1)));
}
-register_t
-getreg_val(regs, reg)
- db_regs_t *regs;
- int reg;
+boolean_t
+db_inst_return(ins)
+ int ins;
{
- return ((register_t)*db_regs[reg].valuep);
+ alpha_instruction insn;
+
+ insn.bits = ins;
+ return ((insn.jump_format.opcode == op_j) &&
+ (insn.jump_format.action == op_ret));
}
-/* XXX Where do jsr_coroutine fit in? We do not use it anyhow so... */
-int
-inst_call(ins)
- u_int ins;
+boolean_t
+db_inst_trap_return(ins)
+ int ins;
{
- return ((ins & 0xfc000000) == 0xd0000000 || /* bsr */
- (ins & 0xfc00c000) == 0x68004000); /* jsr */
+ alpha_instruction insn;
+
+ insn.bits = ins;
+ return ((insn.pal_format.opcode == op_pal) &&
+ (insn.pal_format.function == PAL_OSF1_rti));
}
-int
-inst_branch(ins)
- u_int ins;
+boolean_t
+db_inst_branch(ins)
+ int ins;
{
- return ((ins & 0xc0000000) == 0xc0000000 && /* 30 - 3F */
- !((ins & 0xfc000000) == 0xd0000000 || /* but !34 (bsr) */
- (ins & 0xfc00c000) == 0x68000000)); /* nor jmp */
+ alpha_instruction insn;
+
+ insn.bits = ins;
+ switch (insn.branch_format.opcode) {
+ case op_j:
+ case op_br:
+ case op_fbeq:
+ case op_fblt:
+ case op_fble:
+ case op_fbne:
+ case op_fbge:
+ case op_fbgt:
+ case op_blbc:
+ case op_beq:
+ case op_blt:
+ case op_ble:
+ case op_blbs:
+ case op_bne:
+ case op_bge:
+ case op_bgt:
+ return (TRUE);
+ }
+
+ return (FALSE);
}
-int
-inst_load(ins)
- u_int ins;
+boolean_t
+db_inst_unconditional_flow_transfer(ins)
+ int ins;
{
- char *nm = opcode[ins >> 26].opc_name;
+ alpha_instruction insn;
+
+ insn.bits = ins;
+ switch (insn.branch_format.opcode) {
+ case op_j:
+ case op_br:
+ return (TRUE);
+
+ case op_pal:
+ switch (insn.pal_format.function) {
+ case PAL_OSF1_retsys:
+ case PAL_OSF1_rti:
+ case PAL_OSF1_callsys:
+ return (TRUE);
+ }
+ }
- return (nm[0] == 'l' && nm[1] == 'd');
+ return (FALSE);
}
-int
-inst_store(ins)
- u_int ins;
+#if 0
+boolean_t
+db_inst_spill(ins, regn)
+ int ins, regn;
{
- char *nm = opcode[ins >> 26].opc_name;
+ alpha_instruction insn;
- return (nm[0] == 's' && nm[1] == 't');
+ insn.bits = ins;
+ return ((insn.mem_format.opcode == op_stq) &&
+ (insn.mem_format.rd == regn));
}
+#endif
-db_addr_t
-branch_taken(ins, pc, getreg, regs)
- u_int ins;
- db_addr_t pc;
- register_t (*getreg) __P((db_regs_t *, int));
- db_regs_t *regs;
+boolean_t
+db_inst_load(ins)
+ int ins;
{
- int offset;
-
- if (opcode[ins >> 26].opc_fmt == OPC_BR) {
- offset = ins & 0xfffff;
- if (offset & 0x80000)
- offset = offset - 0x100000;
- return (pc + sizeof(int) + offset * sizeof(int));
- } else
- return (db_addr_t)(*getreg)(regs, (ins >> 16) & 0x1f);
+ alpha_instruction insn;
+
+ insn.bits = ins;
+
+ /* Loads. */
+ if (insn.mem_format.opcode == op_ldbu ||
+ insn.mem_format.opcode == op_ldq_u ||
+ insn.mem_format.opcode == op_ldwu)
+ return (TRUE);
+ if ((insn.mem_format.opcode >= op_ldf) &&
+ (insn.mem_format.opcode <= op_ldt))
+ return (TRUE);
+ if ((insn.mem_format.opcode >= op_ldl) &&
+ (insn.mem_format.opcode <= op_ldq_l))
+ return (TRUE);
+
+ /* Prefetches. */
+ if (insn.mem_format.opcode == op_special) {
+ /* Note: MB is treated as a store. */
+ if ((insn.mem_format.displacement == (short)op_fetch) ||
+ (insn.mem_format.displacement == (short)op_fetch_m))
+ return (TRUE);
+ }
+
+ return (FALSE);
}
-db_addr_t
-next_instr_address(pc, branch)
- db_addr_t pc;
- int branch;
+boolean_t
+db_inst_store(ins)
+ int ins;
{
- if (!branch)
- return (pc + sizeof(int));
- return (branch_taken(*(u_int *)pc, pc, getreg_val, DDB_REGS));
+ alpha_instruction insn;
+
+ insn.bits = ins;
+
+ /* Stores. */
+ if (insn.mem_format.opcode == op_stw ||
+ insn.mem_format.opcode == op_stb ||
+ insn.mem_format.opcode == op_stq_u)
+ return (TRUE);
+ if ((insn.mem_format.opcode >= op_stf) &&
+ (insn.mem_format.opcode <= op_stt))
+ return (TRUE);
+ if ((insn.mem_format.opcode >= op_stl) &&
+ (insn.mem_format.opcode <= op_stq_c))
+ return (TRUE);
+
+ /* Barriers. */
+ if (insn.mem_format.opcode == op_special) {
+ if (insn.mem_format.displacement == op_mb)
+ return (TRUE);
+ }
+
+ return (FALSE);
}
-/*
- * Validate an address for use as a breakpoint. We cannot let some
- * addresses have breakpoints as the ddb code itself uses that codepath.
- * Recursion and kernel stack space exhaustion will follow.
- */
-int
-db_valid_breakpoint(addr)
- db_addr_t addr;
+db_addr_t
+db_branch_taken(ins, pc, regs)
+ int ins;
+ db_addr_t pc;
+ db_regs_t *regs;
{
- char *name;
- db_expr_t offset;
+ long signed_immediate;
+ alpha_instruction insn;
+ db_addr_t newpc;
+
+ insn.bits = ins;
+ switch (insn.branch_format.opcode) {
+ /*
+ * Jump format: target PC is (contents of instruction's "RB") & ~3.
+ */
+ case op_j:
+ newpc = db_register_value(regs, insn.jump_format.rb) & ~3;
+ break;
- db_find_sym_and_offset(addr, &name, &offset);
- if (name && strcmp(name, "alpha_pal_swpipl") == 0)
- return (0);
- return (1);
+ /*
+ * Branch format: target PC is
+ * (new PC) + (4 * sign-ext(displacement)).
+ */
+ case op_br:
+ case op_fbeq:
+ case op_fblt:
+ case op_fble:
+ case op_bsr:
+ case op_fbne:
+ case op_fbge:
+ case op_fbgt:
+ case op_blbc:
+ case op_beq:
+ case op_blt:
+ case op_ble:
+ case op_blbs:
+ case op_bne:
+ case op_bge:
+ case op_bgt:
+ signed_immediate = insn.branch_format.displacement;
+ newpc = (pc + 4) + (signed_immediate << 2);
+ break;
+
+ default:
+ printf("DDB: db_inst_branch_taken on non-branch!\n");
+ newpc = pc; /* XXX */
+ }
+
+ return (newpc);
}
diff --git a/sys/arch/alpha/alpha/db_trace.c b/sys/arch/alpha/alpha/db_trace.c
index 265c418887c..d80e4b967fd 100644
--- a/sys/arch/alpha/alpha/db_trace.c
+++ b/sys/arch/alpha/alpha/db_trace.c
@@ -1,8 +1,15 @@
-/* $OpenBSD: db_trace.c,v 1.4 1997/07/23 23:29:45 niklas Exp $ */
+/* $NetBSD: db_trace.c,v 1.6 2000/05/26 03:34:24 jhawk Exp $ */
-/*
- * Copyright (c) 1997 Niklas Hallqvist. All rights reserverd.
- * Copyright (c) 1997 Theo de Raadt. All rights reserverd.
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Ross Harvey.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,236 +21,352 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
- * This product includes software developed by Niklas Hallqvist and
- * Theo de Raadt.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
-
-#include <vm/vm.h>
-
+#include <sys/proc.h>
+#include <sys/user.h>
#include <machine/db_machdep.h>
-#include <machine/frame.h>
+#include <alpha/alpha/db_instruction.h>
+
+#include <ddb/db_sym.h>
#include <ddb/db_access.h>
-#include <ddb/db_command.h>
-#include <ddb/db_output.h>
-#include <ddb/db_sym.h>
#include <ddb/db_variables.h>
-#include <ddb/db_extern.h>
+#include <ddb/db_output.h>
#include <ddb/db_interface.h>
-extern int etext;
-
-static __inline int sext __P((u_int));
-static __inline int rega __P((u_int));
-static __inline int regb __P((u_int));
-static __inline int regc __P((u_int));
-static __inline int disp __P((u_int));
-
-static __inline int
-sext(x)
- u_int x;
-{
- return ((x & 0x8000) ? -(-x & 0xffff) : (x & 0xffff));
-}
-
-static __inline int
-rega(x)
- u_int x;
-{
- return ((x >> 21) & 0x1f);
-}
-
-static __inline int
-regb(x)
- u_int x;
-{
- return ((x >> 16) & 0x1f);
-}
-
-static __inline int
-regc(x)
- u_int x;
-{
- return (x & 0x1f);
-}
-
-static __inline int
-disp(x)
- u_int x;
-{
- return (sext(x & 0xffff));
-}
+/*
+ * Information about the `standard' Alpha function prologue.
+ */
+struct prologue_info {
+ int pi_reg_offset[32]; /* offset of registers in stack frame */
+ u_int32_t pi_regmask; /* which registers are in frame */
+ int pi_frame_size; /* frame size */
+};
/*
- * XXX There are a couple of problems with this code:
- *
- * The argument list printout code is likely to get confused.
- *
- * It relies on the conventions of gcc code generation.
+ * We use several symbols to take special action:
*
- * It uses heuristics to calculate the framesize, and might get it wrong.
+ * Trap vectors, which use a different (fixed-size) stack frame:
*
- * It doesn't yet use the framepointer if available.
- *
- * The address argument can only be used for pointing at trapframes
- * since a frame pointer of its own serves no good on the alpha,
- * you need a pc value too.
- *
- * The heuristics used for tracing through a trap relies on having
- * symbols available.
+ * XentArith
+ * XentIF
+ * XentInt
+ * XentMM
+ * XentSys
+ * XentUna
*/
+
+static struct special_symbol {
+ void (*ss_val) __P((void));
+ const char *ss_note;
+} special_symbols[] = {
+ { (void (*)(void))&XentArith, "arithmetic trap" },
+ { (void (*)(void))&XentIF, "instruction fault" },
+ { (void (*)(void))&XentInt, "interrupt" },
+ { (void (*)(void))&XentMM, "memory management fault" },
+ { (void (*)(void))&XentSys, "syscall" },
+ { (void (*)(void))&XentUna, "unaligned access fault" },
+ { (void (*)(void))&XentRestart, "console restart" },
+ { NULL }
+};
+
+static void decode_prologue __P((db_addr_t, db_addr_t, struct prologue_info *,
+ int (*)(const char *, ...)));
+static void decode_syscall __P((int, struct proc *,
+ int (*)(const char *, ...)));
+static int sym_is_trapsymbol __P((void *));
+
void
db_stack_trace_cmd(addr, have_addr, count, modif)
- db_expr_t addr;
- int have_addr;
- db_expr_t count;
- char *modif;
+ db_expr_t addr;
+ boolean_t have_addr;
+ db_expr_t count;
+ char *modif;
{
- u_long *frame;
- int i, framesize;
- db_addr_t pc, ra;
- u_int inst;
- char *name;
- db_expr_t offset;
- db_regs_t *regs;
- u_long *slot[32];
-
- bzero(slot, sizeof(slot));
- if (count == -1)
- count = 65535;
-
- regs = have_addr ? (db_regs_t *)addr : DDB_REGS;
-trapframe:
- /* remember where various registers are stored */
- for (i = 0; i < 31; i++)
- slot[i] = &regs->tf_regs[0] +
- ((u_long *)db_regs[i].valuep - &ddb_regs.tf_regs[0]);
- frame = (u_long *)regs->tf_regs[FRAME_SP];
- pc = (db_addr_t)regs->tf_regs[FRAME_PC];
- ra = (db_addr_t)regs->tf_regs[FRAME_RA];
-
- while (count-- && pc >= (db_addr_t)KERNBASE && pc < (db_addr_t)&etext) {
- db_find_sym_and_offset(pc, &name, &offset);
- if (!name) {
- name = "?";
- /* Limit the search for procedure start */
- offset = 65536;
+ db_addr_t callpc, frame, symval;
+ struct prologue_info pi;
+ void *symval_f;
+ db_expr_t diff;
+ db_sym_t sym;
+ int i;
+ u_long tfps;
+ char *symname;
+ struct pcb *pcbp;
+ char c, *cp = modif;
+ struct trapframe *tf;
+ boolean_t ra_from_tf;
+ boolean_t ra_from_pcb;
+ u_long last_ipl = ~0L;
+ struct proc *p = NULL;
+ boolean_t trace_thread = FALSE;
+ boolean_t have_trapframe = FALSE;
+
+ while ((c = *cp++) != 0)
+ trace_thread |= c == 't';
+
+ if (!have_addr) {
+ p = curproc;
+ addr = DDB_REGS->tf_regs[FRAME_SP] - FRAME_SIZE * 8;
+ tf = (struct trapframe *)addr;
+ have_trapframe = 1;
+ } else {
+ if (trace_thread) {
+ db_printf("trace: pid %d ", (int)addr);
+ p = pfind(addr);
+ if (p == NULL) {
+ db_printf("not found\n");
+ return;
+ }
+ if ((p->p_flag & P_INMEM) == 0) {
+ db_printf("swapped out\n");
+ return;
+ }
+ pcbp = &p->p_addr->u_pcb;
+ addr = (db_expr_t)pcbp->pcb_hw.apcb_ksp;
+ callpc = pcbp->pcb_context[7];
+ db_printf("at 0x%lx\n", addr);
+ } else {
+ db_printf("alpha trace requires known PC =eject=\n");
+ return;
}
- db_printf("%s(", name);
+ frame = addr;
+ }
- framesize = 0;
- for (i = sizeof (int); i <= offset; i += sizeof (int)) {
- inst = *(u_int *)(pc - i);
-
- /*
- * If by chance we don't have any symbols we have to
- * get out somehow anyway. Check for the preceding
- * procedure return in that case.
- */
- if (name[0] == '?' && inst_return(inst))
- break;
+ while (count--) {
+ if (have_trapframe) {
+ frame = (db_addr_t)tf + FRAME_SIZE * 8;
+ callpc = tf->tf_regs[FRAME_PC];
+ ra_from_tf = TRUE;
+ have_trapframe = 0;
+ }
+ sym = db_search_symbol(callpc, DB_STGY_ANY, &diff);
+ if (sym == DB_SYM_NULL)
+ break;
- /*
- * Disassemble to get the needed info for the frame.
- */
- if ((inst & 0xffff0000) == 0x23de0000)
- /* lda sp,n(sp) */
- framesize -= disp(inst) / sizeof (u_long);
- else if ((inst & 0xfc1f0000) == 0xb41e0000)
- /* stq X,n(sp) */
- slot[rega(inst)] =
- frame + disp(inst) / sizeof (u_long);
- else if ((inst & 0xfc000fe0) == 0x44000400 &&
- rega(inst) == regb(inst)) {
- /* bis X,X,Y (aka mov X,Y) */
- /* zero is hardwired */
- if (rega(inst) != 31)
- slot[rega(inst)] = slot[regc(inst)];
- slot[regc(inst)] = 0;
- /*
- * XXX In here we might special case a frame
- * pointer setup, i.e. mov sp, fp.
- */
- } else if (inst_load(inst))
- /* clobbers a register */
- slot[rega(inst)] = 0;
- else if (opcode[inst >> 26].opc_fmt == OPC_OP)
- /* clobbers a register */
- slot[regc(inst)] = 0;
- /*
- * XXX Recognize more reg clobbering instructions and
- * set slot[reg] = 0 then too.
- */
+ db_symbol_values(sym, &symname, (db_expr_t *)&symval);
+ symval_f = (void *)symval;
+
+ if (callpc < symval) {
+ db_printf("symbol botch: callpc 0x%lx < "
+ "func 0x%lx (%s)\n", callpc, symval, symname);
+ return;
}
/*
- * Try to print the 6 quads that might hold the args.
- * We print 6 of them even if there are fewer, cause we don't
- * know the number. Maybe we could skip the last ones
- * that never got used. If we cannot know the value, print
- * a question mark.
+ * XXX Printing out arguments is Hard. We'd have to
+ * keep lots of state as we traverse the frame, figuring
+ * out where the arguments to the function are stored
+ * on the stack.
+ *
+ * Even worse, they may be stored to the stack _after_
+ * being modified in place; arguments are passed in
+ * registers.
+ *
+ * So, in order for this to work reliably, we pretty much
+ * have to have a kernel built with `cc -g':
+ *
+ * - The debugging symbols would tell us where the
+ * arguments are, how many there are, if there were
+ * any passed on the stack, etc.
+ *
+ * - Presumably, the compiler would be careful to
+ * store the argument registers on the stack before
+ * modifying the registers, so that a debugger could
+ * know what those values were upon procedure entry.
+ *
+ * Because of this, we don't bother. We've got most of the
+ * benefit of back tracking without the arguments, and we
+ * could get the arguments if we use a remote source-level
+ * debugger (for serious debugging).
*/
- for (i = 0; i < 6; i++) {
- if (i > 0)
- db_printf(", ");
- if (slot[16 + i])
- db_printf("%lx", *slot[16 + i]);
- else
- db_printf("?");
- }
+ db_printf("%s() at ", symname);
+ db_printsym(callpc, DB_STGY_PROC);
+ db_printf("\n");
-#if 0
/*
- * XXX This will go eventually when I trust the argument
- * printout heuristics.
- *
- * Print the stack frame contents.
+ * If we are in a trap vector, frame points to a
+ * trapframe.
*/
- db_printf(") [%p: ", frame);
- if (framesize > 1) {
- for (i = 0; i < framesize - 1; i++)
- db_printf("%lx, ", frame[i]);
- db_printf("%lx", frame[i]);
+ if (sym_is_trapsymbol(symval_f)) {
+ tf = (struct trapframe *)frame;
+
+ for (i = 0; special_symbols[i].ss_val != NULL; ++i)
+ if (symval_f == special_symbols[i].ss_val)
+ db_printf("--- %s",
+ special_symbols[i].ss_note);
+
+ tfps = tf->tf_regs[FRAME_PS];
+ if (symval_f == &XentSys)
+ decode_syscall(tf->tf_regs[FRAME_V0], p, db_printf);
+ if ((tfps & ALPHA_PSL_IPL_MASK) != last_ipl) {
+ last_ipl = tfps & ALPHA_PSL_IPL_MASK;
+ if (symval_f != &XentSys)
+ db_printf(" (from ipl %ld)", last_ipl);
+ }
+ db_printf(" ---\n");
+ if (tfps & ALPHA_PSL_USERMODE) {
+ db_printf("--- user mode ---\n");
+ break; /* Terminate search. */
+ }
+ have_trapframe = 1;
+ continue;
}
- db_printf("] at ");
-#else
- db_printf(") at ");
-#endif
- db_printsym(pc, DB_STGY_PROC);
- db_printf("\n");
/*
- * If we are looking at a Xent* routine we are in a trap
- * context.
+ * This is a bit trickier; we must decode the function
+ * prologue to find the saved RA.
+ *
+ * XXX How does this interact w/ alloca()?!
*/
- if (strncmp(name, "Xent", sizeof("Xent") - 1) == 0) {
- regs = (db_regs_t *)frame;
- goto trapframe;
+ decode_prologue(callpc, symval, &pi, db_printf);
+ if ((pi.pi_regmask & (1 << 26)) == 0) {
+ /*
+ * No saved RA found. We might have RA from
+ * the trap frame, however (e.g trap occurred
+ * in a leaf call). If not, we've found the
+ * root of the call graph.
+ */
+ if (ra_from_tf)
+ callpc = tf->tf_regs[FRAME_RA];
+ else {
+ db_printf("--- root of call graph ---\n");
+ break;
+ }
+ } else
+ callpc = *(u_long *)(frame + pi.pi_reg_offset[26]);
+ ra_from_tf = ra_from_pcb = FALSE;
+#if 0
+ /*
+ * The call was actually made at RA - 4; the PC is
+ * updated before being stored in RA.
+ */
+ callpc -= 4;
+#endif
+ frame += pi.pi_frame_size;
+ }
+}
+
+/*
+ * Decode the function prologue for the function we're in, and note
+ * which registers are stored where, and how large the stack frame is.
+ */
+static void
+decode_prologue(callpc, func, pi, pr)
+ db_addr_t callpc, func;
+ struct prologue_info *pi;
+ int (*pr) __P((const char *, ...));
+{
+ long signed_immediate;
+ alpha_instruction ins;
+ db_expr_t pc;
+
+ pi->pi_regmask = 0;
+ pi->pi_frame_size = 0;
+
+#define CHECK_FRAMESIZE \
+do { \
+ if (pi->pi_frame_size != 0) { \
+ (*pr)("frame size botch: adjust register offsets?\n"); \
+ } \
+} while (0)
+
+ for (pc = func; pc < callpc; pc += sizeof(alpha_instruction)) {
+ ins.bits = *(unsigned int *)pc;
+
+ if (ins.mem_format.opcode == op_lda &&
+ ins.mem_format.ra == 30 &&
+ ins.mem_format.rb == 30) {
+ /*
+ * GCC 2.7-style stack adjust:
+ *
+ * lda sp, -64(sp)
+ */
+ signed_immediate = (long)ins.mem_format.displacement;
+#if 1
+ if (signed_immediate > 0)
+ (*pr)("prologue botch: displacement %ld\n",
+ signed_immediate);
+#endif
+ CHECK_FRAMESIZE;
+ pi->pi_frame_size += -signed_immediate;
+ } else if (ins.operate_lit_format.opcode == op_arit &&
+ ins.operate_lit_format.function == op_subq &&
+ ins.operate_lit_format.ra == 30 &&
+ ins.operate_lit_format.rc == 30) {
+ /*
+ * EGCS-style stack adjust:
+ *
+ * subq sp, 64, sp
+ */
+ CHECK_FRAMESIZE;
+ pi->pi_frame_size += ins.operate_lit_format.literal;
+ } else if (ins.mem_format.opcode == op_stq &&
+ ins.mem_format.rb == 30 &&
+ ins.mem_format.ra != 31) {
+ /* Store of (non-zero) register onto the stack. */
+ signed_immediate = (long)ins.mem_format.displacement;
+ pi->pi_regmask |= 1 << ins.mem_format.ra;
+ pi->pi_reg_offset[ins.mem_format.ra] = signed_immediate;
}
+ }
+}
- /* Look for the return address if recorded. */
- if (slot[26])
- ra = *(db_addr_t *)slot[26];
+static int
+sym_is_trapsymbol(v)
+ void *v;
+{
+ int i;
+
+ for (i = 0; special_symbols[i].ss_val != NULL; ++i)
+ if (v == special_symbols[i].ss_val)
+ return 1;
+ return 0;
+}
+
+static void
+decode_syscall(number, p, pr)
+ int number;
+ struct proc *p;
+ int (*pr) __P((const char *, ...));
+{
+ db_sym_t sym;
+ db_expr_t diff;
+ char *symname, *ename;
+ int (*f) __P((struct proc *, void *, register_t *));
- /* Advance to the next frame. */
- frame += framesize;
- pc = ra;
+ (*pr)(" (%d", number); /* ) */
+ if (!p)
+ goto out;
+ if (0 <= number && number < p->p_emul->e_nsysent) {
+ ename = p->p_emul->e_name;
+ f = p->p_emul->e_sysent[number].sy_call;
+ sym = db_search_symbol((db_addr_t)f, DB_STGY_ANY, &diff);
+ if (sym == DB_SYM_NULL || diff != 0)
+ goto out;
+ db_symbol_values(sym, &symname, NULL);
+ (*pr)(", %s.%s", ename, symname);
}
+out:
+ (*pr)(")");
+ return;
}
diff --git a/sys/arch/alpha/alpha/debug.s b/sys/arch/alpha/alpha/debug.s
new file mode 100644
index 00000000000..5fb2f12106d
--- /dev/null
+++ b/sys/arch/alpha/alpha/debug.s
@@ -0,0 +1,114 @@
+/* $NetBSD: debug.s,v 1.5 1999/06/18 18:11:56 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Debugger glue.
+ */
+
+ .text
+inc6: .stabs __FILE__,132,0,0,inc6; .loc 1 __LINE__
+
+/*
+ * Debugger stack.
+ */
+BSS(debug_stack_bottom, NBPG)
+ABS(debug_stack_top, debug_stack_bottom + NBPG)
+
+/*
+ * alpha_debug:
+ *
+ * Single debugger entry point, handling the housekeeping
+ * chores we need to deal with.
+ *
+ * Arguments are:
+ *
+ * a0 a0 from trap
+ * a1 a1 from trap
+ * a2 a2 from trap
+ * a3 kernel trap entry point
+ * a4 frame pointer
+ */
+NESTED_NOPROFILE(alpha_debug, 5, 32, ra, IM_RA|IM_S0, 0)
+ br pv, 1f
+1: LDGP(pv)
+ lda t0, FRAME_SIZE*8(a4) /* what would sp have been? */
+ stq t0, FRAME_SP*8(a4) /* belatedly save sp for ddb view */
+ lda sp, -32(sp) /* set up stack frame */
+ stq ra, (32-8)(sp) /* save ra */
+ stq s0, (32-16)(sp) /* save s0 */
+
+ /* Remember our current stack pointer. */
+ mov sp, s0
+
+#if defined(MULTIPROCESSOR)
+ /*
+ * XXX PAUSE ALL OTHER CPUs.
+ */
+#endif
+
+ /*
+ * Switch to the debug stack if we're not on it already.
+ */
+ lda t0, debug_stack_bottom
+ cmpule sp, t0, t1 /* sp <= debug_stack_bottom */
+ bne t1, 2f /* yes, switch now */
+
+ lda t0, debug_stack_top
+ cmpule t0, sp, t1 /* debug_stack_top <= sp? */
+ bne t1, 3f /* yes, we're on the debug stack */
+
+2: lda sp, debug_stack_top /* sp <- debug_stack_top */
+
+3: /* Dispatch to the debugger - arguments are already in place. */
+ CALL(ddb_trap)
+
+ /* Debugger return value in v0; switch back to our previous stack. */
+ mov s0, sp
+
+#if defined(MULTIPROCESSOR)
+ /*
+ * XXX RESUME ALL OTHER CPUs.
+ */
+#endif
+
+ ldq ra, (32-8)(sp) /* restore ra */
+ ldq s0, (32-16)(sp) /* restore s0 */
+ lda sp, 32(sp) /* pop stack frame */
+ RET
+ END(alpha_debug)
diff --git a/sys/arch/alpha/alpha/dec_2100_a50.c b/sys/arch/alpha/alpha/dec_2100_a50.c
index d684c8559a9..7b0b62bebce 100644
--- a/sys/arch/alpha/alpha/dec_2100_a50.c
+++ b/sys/arch/alpha/alpha/dec_2100_a50.c
@@ -1,8 +1,7 @@
-/* $OpenBSD: dec_2100_a50.c,v 1.8 1999/01/11 05:10:58 millert Exp $ */
-/* $NetBSD: dec_2100_a50.c,v 1.18 1996/11/25 03:59:19 cgd Exp $ */
+/* $NetBSD: dec_2100_a50.c,v 1.43 2000/05/22 20:13:31 thorpej Exp $ */
/*
- * Copyright (c) 1995, 1996 Carnegie-Mellon University.
+ * Copyright (c) 1995, 1996, 1997 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Chris G. Demetriou
@@ -27,20 +26,30 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
+/*
+ * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center
+ */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/termios.h>
#include <dev/cons.h>
+#include <sys/conf.h>
#include <machine/rpb.h>
#include <machine/autoconf.h>
+#include <machine/bus.h>
#include <machine/cpuconf.h>
-#include <dev/isa/isavar.h>
#include <dev/ic/comreg.h>
#include <dev/ic/comvar.h>
+
+#include <dev/isa/isareg.h>
+#include <dev/isa/isavar.h>
+#ifdef notyet
+#include <dev/ic/pckbcvar.h>
+#endif
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
@@ -50,41 +59,53 @@
#include <scsi/scsi_all.h>
#include <scsi/scsiconf.h>
-cpu_decl(dec_2100_a50);
-
-const char *
-dec_2100_a50_model_name()
-{
- static char s[80];
-
- switch (hwrpb->rpb_variation & SV_ST_MASK) {
- case SV_ST_AVANTI:
- case SV_ST_AVANTI_XXX: /* XXX apparently the same? */
- return "AlphaStation 400 4/233 (\"Avanti\")";
-
- case SV_ST_MUSTANG2_4_166:
- return "AlphaStation 200 4/166 (\"Mustang II\")";
-
- case SV_ST_MUSTANG2_4_233:
- return "AlphaStation 200 4/233 (\"Mustang II\")";
+#ifdef notyet
+#include "pckbd.h"
+#endif
- case 0x2000:
- return "AlphaStation 250 4/266";
+#ifndef CONSPEED
+#define CONSPEED TTYDEF_SPEED
+#endif
+static int comcnrate = CONSPEED;
+
+void dec_2100_a50_init __P((void));
+static void dec_2100_a50_cons_init __P((void));
+static void dec_2100_a50_device_register __P((struct device *, void *));
+
+const struct alpha_variation_table dec_2100_a50_variations[] = {
+ { SV_ST_AVANTI, "AlphaStation 400 4/233 (\"Avanti\")" },
+ { SV_ST_MUSTANG2_4_166, "AlphaStation 200 4/166 (\"Mustang II\")" },
+ { SV_ST_MUSTANG2_4_233, "AlphaStation 200 4/233 (\"Mustang II\")" },
+ { SV_ST_AVANTI_4_266, "AlphaStation 250 4/266" },
+ { SV_ST_MUSTANG2_4_100, "AlphaStation 200 4/100 (\"Mustang II\")" },
+ { SV_ST_AVANTI_4_233, "AlphaStation 255/233" },
+ { 0, NULL },
+};
- case SV_ST_MUSTANG2_4_100:
- return "AlphaStation 200 4/100 (\"Mustang II\")";
+void
+dec_2100_a50_init()
+{
+ u_int64_t variation;
- case 0xa800:
- return "AlphaStation 255/233";
+ platform.family = "AlphaStation 200/400 (\"Avanti\")";
- default:
- sprintf(s, "DEC 2100/A50 (\"Avanti\") family, variation %lx",
- hwrpb->rpb_variation & SV_ST_MASK);
- return s;
+ if ((platform.model = alpha_dsr_sysname()) == NULL) {
+ variation = hwrpb->rpb_variation & SV_ST_MASK;
+ if (variation == SV_ST_AVANTI_XXX) {
+ /* XXX apparently the same? */
+ variation = SV_ST_AVANTI;
+ }
+ if ((platform.model = alpha_variation_name(variation,
+ dec_2100_a50_variations)) == NULL)
+ platform.model = alpha_unknown_sysname();
}
+
+ platform.iobus = "apecs";
+ platform.cons_init = dec_2100_a50_cons_init;
+ platform.device_register = dec_2100_a50_device_register;
}
-void
+static void
dec_2100_a50_cons_init()
{
struct ctb *ctb;
@@ -101,54 +122,49 @@ dec_2100_a50_cons_init()
/* serial console ... */
/* XXX */
{
- static struct consdev comcons = { NULL, NULL,
- comcngetc, comcnputc, comcnpollc, NODEV, 1 };
-
- /* Delay to allow PROM putchars to complete */
- DELAY(10000);
-
- comconsaddr = 0x3f8;
- comconsinit = 0;
- comconsiot = acp->ac_iot;
- if (bus_space_map(comconsiot, comconsaddr, COM_NPORTS,
- 0, &comconsioh))
- panic("can't map serial console I/O ports");
- comconscflag = (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8;
- cominit(comconsiot, comconsioh, comdefaultrate);
-
- cn_tab = &comcons;
- comcons.cn_dev = makedev(26, 0); /* XXX */
+ /*
+ * Delay to allow PROM putchars to complete.
+ * FIFO depth * character time,
+ * character time = (1000000 / (defaultrate / 10))
+ */
+ DELAY(160000000 / comcnrate);
+
+ if(comcnattach(acp->ac_iot, 0x3f8, comcnrate,
+ COM_FREQ,
+ (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8))
+ panic("can't init serial console");
+
break;
}
case 3:
+#if NPCKBD > 0
/* display console ... */
/* XXX */
- if (ctb->ctb_turboslot == 0)
- isa_display_console(acp->ac_iot, acp->ac_memt);
+ (void) pckbc_cnattach(&acp->ac_iot, IO_KBD, PCKBC_KBD_SLOT);
+
+ if (CTB_TURBOSLOT_TYPE(ctb->ctb_turboslot) ==
+ CTB_TURBOSLOT_TYPE_ISA)
+ isa_display_console(&acp->ac_iot, &acp->ac_memt);
else
- pci_display_console(acp->ac_iot, acp->ac_memt,
- &acp->ac_pc, (ctb->ctb_turboslot >> 8) & 0xff,
- ctb->ctb_turboslot & 0xff, 0);
+ pci_display_console(&acp->ac_iot, &acp->ac_memt,
+ &acp->ac_pc, CTB_TURBOSLOT_BUS(ctb->ctb_turboslot),
+ CTB_TURBOSLOT_SLOT(ctb->ctb_turboslot), 0);
+#else
+ panic("not configured to use display && keyboard console");
+#endif
break;
default:
printf("ctb->ctb_term_type = 0x%lx\n", ctb->ctb_term_type);
printf("ctb->ctb_turboslot = 0x%lx\n", ctb->ctb_turboslot);
- panic("consinit: unknown console type %d",
+ panic("consinit: unknown console type %ld\n",
ctb->ctb_term_type);
}
}
-const char *
-dec_2100_a50_iobus_name()
-{
-
- return ("apecs");
-}
-
-void
+static void
dec_2100_a50_device_register(dev, aux)
struct device *dev;
void *aux;
@@ -159,13 +175,15 @@ dec_2100_a50_device_register(dev, aux)
struct device *parent = dev->dv_parent;
struct cfdata *cf = dev->dv_cfdata;
struct cfdriver *cd = cf->cf_driver;
+ extern struct device *booted_device;
if (found)
return;
if (!initted) {
scsiboot = (strcmp(b->protocol, "SCSI") == 0);
- netboot = (strcmp(b->protocol, "BOOTP") == 0);
+ netboot = (strcmp(b->protocol, "BOOTP") == 0) ||
+ (strcmp(b->protocol, "MOP") == 0);
#if 0
printf("scsiboot = %d, netboot = %d\n", scsiboot, netboot);
#endif
@@ -178,7 +196,7 @@ dec_2100_a50_device_register(dev, aux)
else {
struct pcibus_attach_args *pba = aux;
- if (b->bus != pba->pba_bus)
+ if ((b->slot / 1000) != pba->pba_bus)
return;
pcidev = dev;
@@ -195,7 +213,7 @@ dec_2100_a50_device_register(dev, aux)
else {
struct pci_attach_args *pa = aux;
- if (b->slot != pa->pa_device)
+ if ((b->slot % 1000) != pa->pa_device)
return;
/* XXX function? */
@@ -250,7 +268,7 @@ dec_2100_a50_device_register(dev, aux)
else {
struct pci_attach_args *pa = aux;
- if (b->slot != pa->pa_device)
+ if ((b->slot % 1000) != pa->pa_device)
return;
/* XXX function? */
diff --git a/sys/arch/alpha/alpha/dec_3000_300.c b/sys/arch/alpha/alpha/dec_3000_300.c
index 7b2ee0de67a..e1297486dff 100644
--- a/sys/arch/alpha/alpha/dec_3000_300.c
+++ b/sys/arch/alpha/alpha/dec_3000_300.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: dec_3000_300.c,v 1.5 1997/01/24 19:56:24 niklas Exp $ */
-/* $NetBSD: dec_3000_300.c,v 1.10 1996/11/12 05:14:30 cgd Exp $ */
+/* $NetBSD: dec_3000_300.c,v 1.30 2000/05/22 20:13:32 thorpej Exp $ */
/*
* Copyright (c) 1995, 1996 Carnegie-Mellon University.
@@ -28,111 +27,207 @@
* rights to redistribute these changes.
*/
+/*
+ * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center
+ */
+
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
-#include <machine/rpb.h>
+#include <sys/termios.h>
+#include <sys/conf.h>
+#include <machine/rpb.h>
#include <machine/autoconf.h>
#include <machine/cpuconf.h>
#include <dev/tc/tcvar.h>
-
#include <alpha/tc/tcdsvar.h>
+#include <alpha/tc/tc_3000_300.h>
+#ifndef NEW_SCC_DRIVER
+#include <alpha/tc/sccvar.h>
+#endif
+
+#if 0
+#include <machine/z8530var.h>
+#include <dev/dec/zskbdvar.h>
+#endif
#include <scsi/scsi_all.h>
#include <scsi/scsiconf.h>
-char *dec_3000_300_modelname __P((void));
-void dec_3000_300_cons_init __P((void));
-const char *dec_3000_300_iobus_name __P((void));
-void dec_3000_300_device_register __P((struct device *, void *));
+#include "wsdisplay.h"
+
+void dec_3000_300_init __P((void));
+static void dec_3000_300_cons_init __P((void));
+static void dec_3000_300_device_register __P((struct device *, void *));
-cpu_decl(dec_3000_300);
+const struct alpha_variation_table dec_3000_300_variations[] = {
+ { SV_ST_PELICAN, "DEC 3000/300 (\"Pelican\")" },
+ { SV_ST_PELICA, "DEC 3000/300L (\"Pelica\")" },
+ { SV_ST_PELICANPLUS, "DEC 3000/300X (\"Pelican+\")" },
+ { SV_ST_PELICAPLUS, "DEC 3000/300LX (\"Pelica+\")" },
+ { 0, NULL },
+};
-const char *
-dec_3000_300_model_name()
+void
+dec_3000_300_init()
{
+ u_int64_t variation;
- switch (hwrpb->rpb_variation & SV_ST_MASK) {
- case SV_ST_PELICAN:
- return "DEC 3000/300 (\"Pelican\")";
+ platform.family = "DEC 3000/300 (\"Pelican\")";
- case SV_ST_PELICA:
- return "DEC 3000/300L (\"Pelica\")";
+ if ((platform.model = alpha_dsr_sysname()) == NULL) {
+ variation = hwrpb->rpb_variation & SV_ST_MASK;
+ if ((platform.model = alpha_variation_name(variation,
+ dec_3000_300_variations)) == NULL)
+ platform.model = alpha_unknown_sysname();
+ }
- case SV_ST_PELICANPLUS:
- return "DEC 3000/300X (\"Pelican+\")";
+ platform.iobus = "tcasic";
+ platform.cons_init = dec_3000_300_cons_init;
+ platform.device_register = dec_3000_300_device_register;
+}
- case SV_ST_PELICAPLUS:
- return "DEC 3000/300LX (\"Pelica+\")";
+static void
+dec_3000_300_cons_init()
+{
+ struct ctb *ctb;
+ ctb = (struct ctb *)(((caddr_t)hwrpb) + hwrpb->rpb_ctb_off);
+
+#ifndef NEW_SCC_DRIVER
+ switch (ctb->ctb_term_type) {
+ case CTB_GRAPHICS:
+#if 0
+ alpha_donot_kludge_scc = 1;
+#endif
+ return;
+ case CTB_PRINTERPORT:
+ return;
default:
- printf("unknown system variation %lx\n",
- hwrpb->rpb_variation & SV_ST_MASK);
- return NULL;
+ goto badconsole;
}
-}
+#else
+ switch (ctb->ctb_term_type) {
+ case CTB_GRAPHICS:
+#if NWSDISPLAY > 0
+ /* display console ... */
+ if (zs_ioasic_lk201_cnattach(0x1a0000000, 0x00180000, 0) == 0 &&
+ tc_3000_300_fb_cnattach(
+ CTB_TURBOSLOT_SLOT(ctb->ctb_turboslot)) == 0) {
+ break;
+ }
+#endif
+ printf("consinit: Unable to init console on keyboard and ");
+ printf("TURBOchannel slot 0x%lx.\n", ctb->ctb_turboslot);
+ printf("Using serial console.\n");
+ /* FALLTHROUGH */
-void
-dec_3000_300_cons_init()
-{
-}
+ case CTB_PRINTERPORT:
+ /* serial console ... */
+ /*
+ * XXX This could stand some cleanup...
+ */
+ {
+ /*
+ * Delay to allow PROM putchars to complete.
+ * FIFO depth * character time.
+ * character time = (1000000 / (defaultrate / 10))
+ */
+ DELAY(160000000 / 9600); /* XXX */
-const char *
-dec_3000_300_iobus_name()
-{
- return ("tcasic");
+ /*
+ * Console is channel B of the first SCC.
+ * XXX Should use ctb_line_off to get the
+ * XXX line parameters.
+ */
+ if (zs_ioasic_cnattach(0x1a0000000, 0x00100000, 1,
+ 9600, (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8))
+ panic("can't init serial console");
+ break;
+ }
+
+ default:
+ goto badconsole;
+ }
+#endif
+ return;
+badconsole:
+ printf("ctb->ctb_term_type = 0x%lx\n", ctb->ctb_term_type);
+ printf("ctb->ctb_turboslot = 0x%lx\n", ctb->ctb_turboslot);
+
+ panic("consinit: unknown console type %lu\n",
+ ctb->ctb_term_type);
}
-void
+static void
dec_3000_300_device_register(dev, aux)
struct device *dev;
void *aux;
{
static int found, initted, scsiboot, netboot;
static struct device *scsidev;
+ static struct device *tcdsdev;
struct bootdev_data *b = bootdev_data;
struct device *parent = dev->dv_parent;
struct cfdata *cf = dev->dv_cfdata;
struct cfdriver *cd = cf->cf_driver;
+ extern struct device *booted_device;
if (found)
return;
if (!initted) {
scsiboot = (strcmp(b->protocol, "SCSI") == 0);
- netboot = (strcmp(b->protocol, "BOOTP") == 0);
+ netboot = (strcmp(b->protocol, "BOOTP") == 0) ||
+ (strcmp(b->protocol, "MOP") == 0);
#if 0
printf("scsiboot = %d, netboot = %d\n", scsiboot, netboot);
#endif
- initted =1;
+ initted = 1;
}
- if (scsiboot && (strcmp(cd->cd_name, "esp") == 0)) {
- if (b->slot == 4 &&
- strcmp(parent->dv_cfdata->cf_driver->cd_name, "tcds")
- == 0) {
- struct tcdsdev_attach_args *tcdsdev = aux;
+ /*
+ * for scsi boot, we look for "tcds", make sure it has the
+ * right slot number, then find the "asc" on this tcds that
+ * as the right channel. then we find the actual scsi
+ * device we came from. note: no SCSI LUN support (yet).
+ */
+ if (scsiboot && (strcmp(cd->cd_name, "tcds") == 0)) {
+ struct tc_attach_args *tcargs = aux;
+
+ if (b->slot != tcargs->ta_slot)
+ return;
- if (tcdsdev->tcdsda_slot == b->channel) {
- scsidev = dev;
+ tcdsdev = dev;
#if 0
- printf("\nscsidev = %s\n", dev->dv_xname);
+ printf("\ntcdsdev = %s\n", dev->dv_xname);
+#endif
+ }
+ if (scsiboot && tcdsdev &&
+ (strcmp(cd->cd_name, "asc") == 0)) {
+ struct tcdsdev_attach_args *ta = aux;
+
+ if (parent != (struct device *)tcdsdev)
+ return;
+
+ if (ta->tcdsda_slot != b->channel)
+ return;
+
+ scsidev = dev;
+#if 0
+ printf("\nscsidev = %s\n", dev->dv_xname);
#endif
- }
- }
}
- if (scsiboot &&
+ if (scsiboot && scsidev &&
(strcmp(cd->cd_name, "sd") == 0 ||
strcmp(cd->cd_name, "st") == 0 ||
strcmp(cd->cd_name, "cd") == 0)) {
struct scsibus_attach_args *sa = aux;
- if (scsidev == NULL)
- return;
-
if (parent->dv_parent != scsidev)
return;
diff --git a/sys/arch/alpha/alpha/dec_3000_500.c b/sys/arch/alpha/alpha/dec_3000_500.c
index 1479933f501..b8b46ec8820 100644
--- a/sys/arch/alpha/alpha/dec_3000_500.c
+++ b/sys/arch/alpha/alpha/dec_3000_500.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: dec_3000_500.c,v 1.5 1997/01/24 19:56:25 niklas Exp $ */
-/* $NetBSD: dec_3000_500.c,v 1.9 1996/11/12 05:14:31 cgd Exp $ */
+/* $NetBSD: dec_3000_500.c,v 1.29 2000/05/22 20:13:32 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -27,130 +26,227 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
+/*
+ * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center
+ */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
-#include <machine/rpb.h>
+#include <sys/termios.h>
+#include <dev/cons.h>
+#include <sys/conf.h>
+#include <machine/rpb.h>
#include <machine/autoconf.h>
#include <machine/cpuconf.h>
#include <dev/tc/tcvar.h>
-
#include <alpha/tc/tcdsvar.h>
+#include <alpha/tc/tc_3000_500.h>
+#ifndef NEW_SCC_DRIVER
+#include <alpha/tc/sccvar.h>
+#endif
+
+#if 0
+#include <machine/z8530var.h>
+#include <dev/dec/zskbdvar.h>
+#endif
#include <scsi/scsi_all.h>
#include <scsi/scsiconf.h>
-char *dec_3000_500_modelname __P((void));
-void dec_3000_500_cons_init __P((void));
-const char *dec_3000_500_iobus_name __P((void));
-void dec_3000_500_device_register __P((struct device *, void *));
-
-cpu_decl(dec_3000_500);
+#include "wsdisplay.h"
-const char *
-dec_3000_500_model_name()
-{
+void dec_3000_500_init __P((void));
+static void dec_3000_500_cons_init __P((void));
+static void dec_3000_500_device_register __P((struct device *, void *));
- switch (hwrpb->rpb_variation & SV_ST_MASK) {
- case SV_ST_SANDPIPER:
-systype_sandpiper:
- return "DEC 3000/400 (\"Sandpiper\")";
+static const char dec_3000_500_sp[] = "DEC 3000/400 (\"Sandpiper\")";
+static const char dec_3000_500_sf[] = "DEC 3000/500 (\"Flamingo\")";
- case SV_ST_FLAMINGO:
-systype_flamingo:
- return "DEC 3000/500 (\"Flamingo\")";
+const struct alpha_variation_table dec_3000_500_variations[] = {
+ { SV_ST_SANDPIPER, dec_3000_500_sp },
+ { SV_ST_FLAMINGO, dec_3000_500_sf },
+ { SV_ST_HOTPINK, "DEC 3000/500X (\"Hot Pink\")" },
+ { SV_ST_FLAMINGOPLUS, "DEC 3000/800 (\"Flamingo+\")" },
+ { SV_ST_SANDPLUS, "DEC 3000/600 (\"Sandpiper+\")" },
+ { SV_ST_SANDPIPER45, "DEC 3000/700 (\"Sandpiper45\")" },
+ { SV_ST_FLAMINGO45, "DEC 3000/900 (\"Flamingo45\")" },
+ { 0, NULL },
+};
- case SV_ST_HOTPINK:
- return "DEC 3000/500X (\"Hot Pink\")";
+void
+dec_3000_500_init()
+{
+ u_int64_t variation;
- case SV_ST_FLAMINGOPLUS:
- case SV_ST_ULTRA:
- return "DEC 3000/800 (\"Flamingo+\")";
+ platform.family = "DEC 3000/500 (\"Flamingo\")";
- case SV_ST_SANDPLUS:
- return "DEC 3000/600 (\"Sandpiper+\")";
+ if ((platform.model = alpha_dsr_sysname()) == NULL) {
+ variation = hwrpb->rpb_variation & SV_ST_MASK;
+ if (variation == SV_ST_ULTRA) {
+ /* These are really the same. */
+ variation = SV_ST_FLAMINGOPLUS;
+ }
+ if ((platform.model = alpha_variation_name(variation,
+ dec_3000_500_variations)) == NULL) {
+ /*
+ * This is how things used to be done.
+ */
+ if (variation == SV_ST_RESERVED) {
+ if (hwrpb->rpb_variation & SV_GRAPHICS)
+ platform.model = dec_3000_500_sf;
+ else
+ platform.model = dec_3000_500_sp;
+ } else
+ platform.model = alpha_unknown_sysname();
+ }
+ }
- case SV_ST_SANDPIPER45:
- return "DEC 3000/700 (\"Sandpiper45\")";
+ platform.iobus = "tcasic";
+ platform.cons_init = dec_3000_500_cons_init;
+ platform.device_register = dec_3000_500_device_register;
+}
- case SV_ST_FLAMINGO45:
- return "DEC 3000/900 (\"Flamingo45\")";
+static void
+dec_3000_500_cons_init()
+{
+ struct ctb *ctb;
- case SV_ST_RESERVED: /* this is how things used to be done */
- if (hwrpb->rpb_variation & SV_GRAPHICS)
- goto systype_flamingo;
- else
- goto systype_sandpiper;
+ ctb = (struct ctb *)(((caddr_t)hwrpb) + hwrpb->rpb_ctb_off);
+#ifndef NEW_SCC_DRIVER
+ switch (ctb->ctb_term_type) {
+ case CTB_GRAPHICS:
+#ifdef notyet
+ alpha_donot_kludge_scc = 1;
+#endif
+ return;
+ case CTB_PRINTERPORT:
+ return;
default:
- printf("unknown system variation %lx\n",
- hwrpb->rpb_variation & SV_ST_MASK);
- return NULL;
+ goto badconsole;
}
-}
+#else
+
+ switch (ctb->ctb_term_type) {
+ case CTB_GRAPHICS:
+#if NWSDISPLAY > 0
+ /* display console ... */
+ if (zs_ioasic_lk201_cnattach(0x1e0000000, 0x00180000, 0) == 0 &&
+ tc_3000_500_fb_cnattach(
+ CTB_TURBOSLOT_SLOT(ctb->ctb_turboslot)) == 0) {
+ break;
+ }
+#endif
+ printf("consinit: Unable to init console on keyboard and ");
+ printf("TURBOchannel slot 0x%lx.\n", ctb->ctb_turboslot);
+ printf("Using serial console.\n");
+ /* FALLTHROUGH */
-void
-dec_3000_500_cons_init()
-{
-}
+ case CTB_PRINTERPORT:
+ /* serial console ... */
+ /*
+ * XXX This could stand some cleanup...
+ */
+ {
+ /*
+ * Delay to allow PROM putchars to complete.
+ * FIFO depth * character time,
+ * character time = (1000000 / (defaultrate / 10))
+ */
+ DELAY(160000000 / 9600); /* XXX */
-const char *
-dec_3000_500_iobus_name()
-{
- return ("tcasic");
+ /*
+ * Console is channel B of the second SCC.
+ * XXX Should use ctb_line_off to get the
+ * XXX line parameters--these are the defaults.
+ */
+ if (zs_ioasic_cnattach(0x1e0000000, 0x00180000, 1,
+ 9600, (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8))
+ panic("can't init serial console");
+ break;
+ }
+ default:
+ goto badconsole;
+ }
+#endif
+ return;
+badconsole:
+ printf("ctb->ctb_term_type = 0x%lx\n", ctb->ctb_term_type);
+ printf("ctb->ctb_turboslot = 0x%lx\n", ctb->ctb_turboslot);
+
+ panic("consinit: unknown console type %lu\n",
+ ctb->ctb_term_type);
}
-void
+static void
dec_3000_500_device_register(dev, aux)
struct device *dev;
void *aux;
{
static int found, initted, scsiboot, netboot;
static struct device *scsidev;
+ static struct device *tcdsdev;
struct bootdev_data *b = bootdev_data;
struct device *parent = dev->dv_parent;
struct cfdata *cf = dev->dv_cfdata;
struct cfdriver *cd = cf->cf_driver;
+ extern struct device *booted_device;
if (found)
return;
if (!initted) {
scsiboot = (strcmp(b->protocol, "SCSI") == 0);
- netboot = (strcmp(b->protocol, "BOOTP") == 0);
+ netboot = (strcmp(b->protocol, "BOOTP") == 0) ||
+ (strcmp(b->protocol, "MOP") == 0);
#if 0
printf("scsiboot = %d, netboot = %d\n", scsiboot, netboot);
#endif
- initted =1;
+ initted = 1;
}
- if (scsiboot && (strcmp(cd->cd_name, "esp") == 0)) {
- if (b->slot == 6 &&
- strcmp(parent->dv_cfdata->cf_driver->cd_name, "tcds")
- == 0) {
- struct tcdsdev_attach_args *tcdsdev = aux;
+ /*
+ * for scsi boot, we look for "tcds", make sure it has the
+ * right slot number, then find the "asc" on this tcds that
+ * as the right channel. then we find the actual scsi
+ * device we came from. note: no SCSI LUN support (yet).
+ */
+ if (scsiboot && (strcmp(cd->cd_name, "tcds") == 0)) {
+ struct tc_attach_args *tcargs = aux;
+
+ if (b->slot != tcargs->ta_slot)
+ return;
- if (tcdsdev->tcdsda_slot == b->channel) {
- scsidev = dev;
+ tcdsdev = dev;
#if 0
- printf("\nscsidev = %s\n", dev->dv_xname);
+ printf("\ntcdsdev = %s\n", dev->dv_xname);
+#endif
+ }
+ if (scsiboot && tcdsdev &&
+ (strcmp(cd->cd_name, "asc") == 0)) {
+ struct tcdsdev_attach_args *ta = aux;
+
+ if (parent != (struct device *)tcdsdev)
+ return;
+
+ if (ta->tcdsda_slot != b->channel)
+ return;
+
+ scsidev = dev;
+#if 0
+ printf("\nscsidev = %s\n", dev->dv_xname);
#endif
- }
- }
}
- if (scsiboot &&
+ if (scsiboot && scsidev &&
(strcmp(cd->cd_name, "sd") == 0 ||
strcmp(cd->cd_name, "st") == 0 ||
strcmp(cd->cd_name, "cd") == 0)) {
struct scsibus_attach_args *sa = aux;
- if (scsidev == NULL)
- return;
-
if (parent->dv_parent != scsidev)
return;
diff --git a/sys/arch/alpha/alpha/dec_550.c b/sys/arch/alpha/alpha/dec_550.c
new file mode 100644
index 00000000000..276f1e1de47
--- /dev/null
+++ b/sys/arch/alpha/alpha/dec_550.c
@@ -0,0 +1,312 @@
+/* $NetBSD: dec_550.c,v 1.10 2000/06/20 03:48:53 matt Exp $ */
+
+/*
+ * Copyright (c) 1995, 1996, 1997 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+/*
+ * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+#include <sys/termios.h>
+#include <dev/cons.h>
+#include <sys/conf.h>
+
+#include <machine/rpb.h>
+#include <machine/autoconf.h>
+#include <machine/cpuconf.h>
+#include <machine/bus.h>
+
+#include <dev/ic/comreg.h>
+#include <dev/ic/comvar.h>
+
+#include <dev/isa/isareg.h>
+#include <dev/isa/isavar.h>
+#include <dev/ic/i8042reg.h>
+#ifdef notyet
+#include <dev/ic/pckbcvar.h>
+#endif
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <alpha/pci/ciareg.h>
+#include <alpha/pci/ciavar.h>
+
+#include <scsi/scsi_all.h>
+#include <scsi/scsiconf.h>
+#include <dev/ata/atavar.h>
+
+/* Write this to Pyxis General Purpose Output to turn off the power. */
+#define DEC_550_PYXIS_GPO_POWERDOWN 0x00000400
+
+#ifdef notyet
+#include "pckbd.h"
+#endif
+
+#ifndef CONSPEED
+#define CONSPEED TTYDEF_SPEED
+#endif
+static int comcnrate = CONSPEED;
+
+#define DR_VERBOSE(f) while (0)
+
+void dec_550_init __P((void));
+static void dec_550_cons_init __P((void));
+static void dec_550_device_register __P((struct device *, void *));
+static void dec_550_powerdown __P((void));
+
+void
+dec_550_init()
+{
+
+ platform.family = "Digital Personal Workstation";
+
+ if ((platform.model = alpha_dsr_sysname()) == NULL) {
+ /* XXX Don't know the system variations, yet. */
+ platform.model = alpha_unknown_sysname();
+ }
+
+ platform.iobus = "cia";
+ platform.cons_init = dec_550_cons_init;
+ platform.device_register = dec_550_device_register;
+ platform.powerdown = dec_550_powerdown;
+}
+
+static void
+dec_550_cons_init()
+{
+ struct ctb *ctb;
+ struct cia_config *ccp;
+ extern struct cia_config cia_configuration;
+
+ ccp = &cia_configuration;
+ cia_init(ccp, 0);
+
+ ctb = (struct ctb *)(((caddr_t)hwrpb) + hwrpb->rpb_ctb_off);
+
+ switch (ctb->ctb_term_type) {
+ case 2:
+ /* serial console ... */
+ /* XXX */
+ {
+ /*
+ * Delay to allow PROM putchars to complete.
+ * FIFO depth * character time,
+ * character time = (1000000 / (defaultrate / 10))
+ */
+ DELAY(160000000 / comcnrate);
+
+ if(comcnattach(ccp->cc_iot, 0x3f8, comcnrate,
+ COM_FREQ,
+ (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8))
+ panic("can't init serial console");
+
+ break;
+ }
+
+ case 3:
+#if NPCKBD > 0
+ /* display console ... */
+ /* XXX */
+ (void) pckbc_cnattach(&ccp->cc_iot, IO_KBD, KBCMDP,
+ PCKBC_KBD_SLOT);
+
+ if (CTB_TURBOSLOT_TYPE(ctb->ctb_turboslot) ==
+ CTB_TURBOSLOT_TYPE_ISA)
+ isa_display_console(&ccp->cc_iot, &ccp->cc_memt);
+ else
+ pci_display_console(&ccp->cc_iot, &ccp->cc_memt,
+ &ccp->cc_pc, CTB_TURBOSLOT_BUS(ctb->ctb_turboslot),
+ CTB_TURBOSLOT_SLOT(ctb->ctb_turboslot), 0);
+#else
+ panic("not configured to use display && keyboard console");
+#endif
+ break;
+
+ default:
+ printf("ctb->ctb_term_type = 0x%lx\n", ctb->ctb_term_type);
+ printf("ctb->ctb_turboslot = 0x%lx\n", ctb->ctb_turboslot);
+
+ panic("consinit: unknown console type %ld\n",
+ ctb->ctb_term_type);
+ }
+}
+
+static void
+dec_550_device_register(dev, aux)
+ struct device *dev;
+ void *aux;
+{
+ static int found, initted, scsiboot, ideboot, netboot;
+ static struct device *pcidev, *scsidev;
+ struct bootdev_data *b = bootdev_data;
+ struct device *parent = dev->dv_parent;
+ struct cfdata *cf = dev->dv_cfdata;
+ struct cfdriver *cd = cf->cf_driver;
+ extern struct device *booted_device;
+
+ if (found)
+ return;
+
+ if (!initted) {
+ scsiboot = (strcmp(b->protocol, "SCSI") == 0);
+ netboot = (strcmp(b->protocol, "BOOTP") == 0) ||
+ (strcmp(b->protocol, "MOP") == 0);
+ /*
+ * Add an extra check to boot from ide drives:
+ * Newer SRM firmware use the protocol identifier IDE,
+ * older SRM firmware use the protocol identifier SCSI.
+ */
+ ideboot = (strcmp(b->protocol, "IDE") == 0);
+ DR_VERBOSE(printf("scsiboot = %d, ideboot = %d, netboot = %d\n",
+ scsiboot, ideboot, netboot));
+ initted = 1;
+ }
+
+ if (pcidev == NULL) {
+ if (strcmp(cd->cd_name, "pci"))
+ return;
+ else {
+ struct pcibus_attach_args *pba = aux;
+
+ if ((b->slot / 1000) != pba->pba_bus)
+ return;
+
+ pcidev = dev;
+ DR_VERBOSE(printf("\npcidev = %s\n",
+ pcidev->dv_xname));
+ return;
+ }
+ }
+
+ if ((ideboot || scsiboot) && (scsidev == NULL)) {
+ if (parent != pcidev)
+ return;
+ else {
+ struct pci_attach_args *pa = aux;
+
+ if ((b->slot % 1000) != pa->pa_device)
+ return;
+
+ /* XXX function? */
+
+ scsidev = dev;
+ DR_VERBOSE(printf("\nscsidev = %s\n",
+ scsidev->dv_xname));
+ return;
+ }
+ }
+
+ if ((ideboot || scsiboot) &&
+ (!strcmp(cd->cd_name, "sd") ||
+ !strcmp(cd->cd_name, "st") ||
+ !strcmp(cd->cd_name, "cd"))) {
+ struct scsibus_attach_args *sa = aux;
+
+ if (parent->dv_parent != scsidev)
+ return;
+
+ if (b->unit / 100 != sa->sa_sc_link->target)
+ return;
+
+ /* XXX LUN! */
+
+ switch (b->boot_dev_type) {
+ case 0:
+ if (strcmp(cd->cd_name, "sd") &&
+ strcmp(cd->cd_name, "cd"))
+ return;
+ break;
+ case 1:
+ if (strcmp(cd->cd_name, "st"))
+ return;
+ break;
+ default:
+ return;
+ }
+
+ /* we've found it! */
+ booted_device = dev;
+ DR_VERBOSE(printf("\nbooted_device = %s\n",
+ booted_device->dv_xname));
+ found = 1;
+ }
+
+ /*
+ * Support to boot from IDE drives.
+ */
+ if ((ideboot || scsiboot) && !strcmp(cd->cd_name, "wd")) {
+ struct ata_atapi_attach *aa_link = aux;
+ if ((strncmp("pciide", parent->dv_xname, 6) != 0)) {
+ return;
+ } else {
+ if (parent != scsidev)
+ return;
+ }
+ DR_VERBOSE(printf("\nAtapi info: drive: %d, channel %d\n",
+ aa_link->aa_drv_data->drive, aa_link->aa_channel));
+ DR_VERBOSE(printf("Bootdev info: unit: %d, channel: %d\n",
+ b->unit, b->channel));
+ if (b->unit != aa_link->aa_drv_data->drive ||
+ b->channel != aa_link->aa_channel)
+ return;
+
+ /* we've found it! */
+ booted_device = dev;
+ DR_VERBOSE(printf("booted_device = %s\n",
+ booted_device->dv_xname));
+ found = 1;
+ }
+
+ if (netboot) {
+ if (parent != pcidev)
+ return;
+ else {
+ struct pci_attach_args *pa = aux;
+
+ if ((b->slot % 1000) != pa->pa_device)
+ return;
+
+ /* XXX function? */
+
+ booted_device = dev;
+ DR_VERBOSE(printf("\nbooted_device = %s\n",
+ booted_device->dv_xname));
+ found = 1;
+ return;
+ }
+ }
+}
+
+static void
+dec_550_powerdown()
+{
+
+ REGVAL(PYXIS_GPO) = DEC_550_PYXIS_GPO_POWERDOWN;
+ alpha_mb();
+}
diff --git a/sys/arch/alpha/alpha/dec_axppci_33.c b/sys/arch/alpha/alpha/dec_axppci_33.c
index 078abb94a36..427effa90fc 100644
--- a/sys/arch/alpha/alpha/dec_axppci_33.c
+++ b/sys/arch/alpha/alpha/dec_axppci_33.c
@@ -1,8 +1,7 @@
-/* $OpenBSD: dec_axppci_33.c,v 1.8 1999/01/11 05:10:59 millert Exp $ */
-/* $NetBSD: dec_axppci_33.c,v 1.16 1996/11/25 03:59:20 cgd Exp $ */
+/* $NetBSD: dec_axppci_33.c,v 1.44 2000/05/22 20:13:32 thorpej Exp $ */
/*
- * Copyright (c) 1995, 1996 Carnegie-Mellon University.
+ * Copyright (c) 1995, 1996, 1997 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Chris G. Demetriou
@@ -27,20 +26,30 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
+/*
+ * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center
+ */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/termios.h>
#include <dev/cons.h>
+#include <sys/conf.h>
#include <machine/rpb.h>
+#include <machine/alpha.h>
#include <machine/autoconf.h>
#include <machine/cpuconf.h>
-#include <dev/isa/isavar.h>
#include <dev/ic/comreg.h>
#include <dev/ic/comvar.h>
+
+#include <dev/isa/isareg.h>
+#include <dev/isa/isavar.h>
+#if 0
+#include <dev/ic/pckbcvar.h>
+#endif
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
@@ -50,32 +59,96 @@
#include <scsi/scsi_all.h>
#include <scsi/scsiconf.h>
-cpu_decl(dec_axppci_33);
+#if 0
+#include "pckbd.h"
+#endif
+
+#ifndef CONSPEED
+#define CONSPEED TTYDEF_SPEED
+#endif
+static int comcnrate = CONSPEED;
+
+void dec_axppci_33_init __P((void));
+static void dec_axppci_33_cons_init __P((void));
+static void dec_axppci_33_device_register __P((struct device *, void *));
+
+const struct alpha_variation_table dec_axppci_33_variations[] = {
+ { 0, "Alpha PC AXPpci33 (\"NoName\")" },
+ { 0, NULL },
+};
+
+static struct lca_config *lca_preinit __P((void));
-const char *
-dec_axppci_33_model_name()
+static struct lca_config *
+lca_preinit()
{
+ extern struct lca_config lca_configuration;
- switch (hwrpb->rpb_variation & SV_ST_MASK) {
- case 0: /* XXX */
- return "Alpha PC AXPpci33 (\"NoName\")";
-
- default:
- printf("unknown system variation %lx\n",
- hwrpb->rpb_variation & SV_ST_MASK);
- return NULL;
- }
+ lca_init(&lca_configuration, 0);
+ return &lca_configuration;
}
+#define NSIO_PORT 0x26e /* Hardware enabled option: 0x398 */
+#define NSIO_BASE 0
+#define NSIO_INDEX NSIO_BASE
+#define NSIO_DATA 1
+#define NSIO_SIZE 2
+#define NSIO_CFG0 0
+#define NSIO_CFG1 1
+#define NSIO_CFG2 2
+#define NSIO_IDE_ENABLE 0x40
+
void
+dec_axppci_33_init()
+{
+ int cfg0val;
+ u_int64_t variation;
+ bus_space_tag_t iot;
+ struct lca_config *lcp;
+ bus_space_handle_t nsio;
+#define A33_NSIOBARRIER(type) bus_space_barrier(iot, nsio,\
+ NSIO_BASE, NSIO_SIZE, (type))
+
+ platform.family = "DEC AXPpci";
+
+ if ((platform.model = alpha_dsr_sysname()) == NULL) {
+ variation = hwrpb->rpb_variation & SV_ST_MASK;
+ if ((platform.model = alpha_variation_name(variation,
+ dec_axppci_33_variations)) == NULL)
+ platform.model = alpha_unknown_sysname();
+ }
+
+ platform.iobus = "lca";
+ platform.cons_init = dec_axppci_33_cons_init;
+ platform.device_register = dec_axppci_33_device_register;
+
+ lcp = lca_preinit();
+ iot = lcp->lc_iot;
+ if (bus_space_map(iot, NSIO_PORT, NSIO_SIZE, 0, &nsio))
+ return;
+
+ bus_space_write_1(iot, nsio, NSIO_INDEX, NSIO_CFG0);
+ A33_NSIOBARRIER(BUS_BARRIER_READ | BUS_BARRIER_WRITE);
+ cfg0val = bus_space_read_1(iot, nsio, NSIO_DATA);
+
+ cfg0val |= NSIO_IDE_ENABLE;
+
+ bus_space_write_1(iot, nsio, NSIO_INDEX, NSIO_CFG0);
+ A33_NSIOBARRIER(BUS_BARRIER_WRITE);
+ bus_space_write_1(iot, nsio, NSIO_DATA, cfg0val);
+ A33_NSIOBARRIER(BUS_BARRIER_WRITE);
+ bus_space_write_1(iot, nsio, NSIO_DATA, cfg0val);
+
+ /* Leave nsio mapped to catch any accidental port space collisions */
+}
+
+static void
dec_axppci_33_cons_init()
{
struct ctb *ctb;
struct lca_config *lcp;
- extern struct lca_config lca_configuration;
- lcp = &lca_configuration;
- lca_init(lcp, 0);
+ lcp = lca_preinit();
ctb = (struct ctb *)(((caddr_t)hwrpb) + hwrpb->rpb_ctb_off);
@@ -84,56 +157,49 @@ dec_axppci_33_cons_init()
/* serial console ... */
/* XXX */
{
- static struct consdev comcons = { NULL, NULL,
- comcngetc, comcnputc, comcnpollc, NODEV, 1 };
-
- /* Delay to allow PROM putchars to complete */
- DELAY(10000);
-
- comconsaddr = 0x3f8;
- comconsinit = 0;
- comconsiot = lcp->lc_iot;
- if (bus_space_map(comconsiot, comconsaddr, COM_NPORTS,
- 0, &comconsioh))
- panic("can't map serial console I/O ports");
- comconscflag = (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8;
- cominit(comconsiot, comconsioh, comdefaultrate);
-
- cn_tab = &comcons;
- comcons.cn_dev = makedev(26, 0); /* XXX */
+ /*
+ * Delay to allow PROM putchars to complete.
+ * FIFO depth * character time,
+ * character time = (1000000 / (defaultrate / 10))
+ */
+ DELAY(160000000 / comcnrate);
+
+ if(comcnattach(lcp->lc_iot, 0x3f8, comcnrate,
+ COM_FREQ,
+ (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8))
+ panic("can't init serial console");
+
break;
}
case 3:
+#if NPCKBD > 0
/* display console ... */
-#if 0
- printf("turboslot 0x%x\n", ctb->ctb_turboslot);
-#endif
- if ((ctb->ctb_turboslot & 0xffff) == 0)
- isa_display_console(lcp->lc_iot, lcp->lc_memt);
+ /* XXX */
+ (void) pckbc_cnattach(&lcp->lc_iot, IO_KBD, PCKBC_KBD_SLOT);
+
+ if (CTB_TURBOSLOT_TYPE(ctb->ctb_turboslot) ==
+ CTB_TURBOSLOT_TYPE_ISA)
+ isa_display_console(&lcp->lc_iot, &lcp->lc_memt);
else
- pci_display_console(lcp->lc_iot, lcp->lc_memt,
- &lcp->lc_pc, (ctb->ctb_turboslot >> 8) & 0xff,
- ctb->ctb_turboslot & 0xff, 0);
+ pci_display_console(&lcp->lc_iot, &lcp->lc_memt,
+ &lcp->lc_pc, CTB_TURBOSLOT_BUS(ctb->ctb_turboslot),
+ CTB_TURBOSLOT_SLOT(ctb->ctb_turboslot), 0);
+#else
+ panic("not configured to use display && keyboard console");
+#endif
break;
default:
printf("ctb->ctb_term_type = 0x%lx\n", ctb->ctb_term_type);
printf("ctb->ctb_turboslot = 0x%lx\n", ctb->ctb_turboslot);
- panic("consinit: unknown console type %d",
+ panic("consinit: unknown console type %ld\n",
ctb->ctb_term_type);
}
}
-const char *
-dec_axppci_33_iobus_name()
-{
-
- return ("lca");
-}
-
-void
+static void
dec_axppci_33_device_register(dev, aux)
struct device *dev;
void *aux;
@@ -144,13 +210,15 @@ dec_axppci_33_device_register(dev, aux)
struct device *parent = dev->dv_parent;
struct cfdata *cf = dev->dv_cfdata;
struct cfdriver *cd = cf->cf_driver;
+ extern struct device *booted_device;
if (found)
return;
if (!initted) {
scsiboot = (strcmp(b->protocol, "SCSI") == 0);
- netboot = (strcmp(b->protocol, "BOOTP") == 0);
+ netboot = (strcmp(b->protocol, "BOOTP") == 0) ||
+ (strcmp(b->protocol, "MOP") == 0);
#if 0
printf("scsiboot = %d, netboot = %d\n", scsiboot, netboot);
#endif
@@ -163,7 +231,7 @@ dec_axppci_33_device_register(dev, aux)
else {
struct pcibus_attach_args *pba = aux;
- if (b->bus != pba->pba_bus)
+ if ((b->slot / 1000) != pba->pba_bus)
return;
pcidev = dev;
@@ -180,7 +248,7 @@ dec_axppci_33_device_register(dev, aux)
else {
struct pci_attach_args *pa = aux;
- if (b->slot != pa->pa_device)
+ if ((b->slot % 1000) != pa->pa_device)
return;
/* XXX function? */
@@ -235,7 +303,7 @@ dec_axppci_33_device_register(dev, aux)
else {
struct pci_attach_args *pa = aux;
- if (b->slot != pa->pa_device)
+ if ((b->slot % 1000) != pa->pa_device)
return;
/* XXX function? */
diff --git a/sys/arch/alpha/alpha/dec_eb164.c b/sys/arch/alpha/alpha/dec_eb164.c
index 89c248be0c6..a3470bd44e0 100644
--- a/sys/arch/alpha/alpha/dec_eb164.c
+++ b/sys/arch/alpha/alpha/dec_eb164.c
@@ -1,8 +1,7 @@
-/* $OpenBSD: dec_eb164.c,v 1.3 1999/01/11 05:10:59 millert Exp $ */
-/* $NetBSD: dec_eb164.c,v 1.4 1996/11/25 03:59:21 cgd Exp $ */
+/* $NetBSD: dec_eb164.c,v 1.33 2000/05/22 20:13:32 thorpej Exp $ */
/*
- * Copyright (c) 1995, 1996 Carnegie-Mellon University.
+ * Copyright (c) 1995, 1996, 1997 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Chris G. Demetriou
@@ -27,20 +26,30 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
+/*
+ * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center
+ */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/termios.h>
#include <dev/cons.h>
+#include <sys/conf.h>
#include <machine/rpb.h>
#include <machine/autoconf.h>
#include <machine/cpuconf.h>
+#include <machine/bus.h>
-#include <dev/isa/isavar.h>
#include <dev/ic/comreg.h>
#include <dev/ic/comvar.h>
+
+#include <dev/isa/isareg.h>
+#include <dev/isa/isavar.h>
+#if 0
+#include <dev/ic/pckbcvar.h>
+#endif
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
@@ -49,22 +58,40 @@
#include <scsi/scsi_all.h>
#include <scsi/scsiconf.h>
+#include <dev/ata/atavar.h>
+
+#if 0
+#include "pckbd.h"
+#endif
+
+#ifndef CONSPEED
+#define CONSPEED TTYDEF_SPEED
+#endif
+static int comcnrate = CONSPEED;
+
+#define DR_VERBOSE(f) while (0)
-cpu_decl(dec_eb164);
+void dec_eb164_init __P((void));
+static void dec_eb164_cons_init __P((void));
+static void dec_eb164_device_register __P((struct device *, void *));
-const char *
-dec_eb164_model_name()
+void
+dec_eb164_init()
{
- switch (hwrpb->rpb_variation & SV_ST_MASK) {
- default:
- printf("unknown system variation %lx\n",
- hwrpb->rpb_variation & SV_ST_MASK);
- return NULL;
+ platform.family = "EB164";
+
+ if ((platform.model = alpha_dsr_sysname()) == NULL) {
+ /* XXX Don't know the system variations, yet. */
+ platform.model = alpha_unknown_sysname();
}
+
+ platform.iobus = "cia";
+ platform.cons_init = dec_eb164_cons_init;
+ platform.device_register = dec_eb164_device_register;
}
-void
+static void
dec_eb164_cons_init()
{
struct ctb *ctb;
@@ -81,75 +108,77 @@ dec_eb164_cons_init()
/* serial console ... */
/* XXX */
{
- static struct consdev comcons = { NULL, NULL,
- comcngetc, comcnputc, comcnpollc, NODEV, 1 };
-
- /* Delay to allow PROM putchars to complete */
- DELAY(10000);
-
- comconsaddr = 0x3f8;
- comconsinit = 0;
- comconsiot = ccp->cc_iot;
- if (bus_space_map(comconsiot, comconsaddr, COM_NPORTS,
- 0, &comconsioh))
- panic("can't map serial console I/O ports");
- comconscflag = (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8;
- cominit(comconsiot, comconsioh, comdefaultrate);
-
- cn_tab = &comcons;
- comcons.cn_dev = makedev(26, 0); /* XXX */
+ /*
+ * Delay to allow PROM putchars to complete.
+ * FIFO depth * character time,
+ * character time = (1000000 / (defaultrate / 10))
+ */
+ DELAY(160000000 / comcnrate);
+
+ if(comcnattach(ccp->cc_iot, 0x3f8, comcnrate,
+ COM_FREQ,
+ (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8))
+ panic("can't init serial console");
+
break;
}
case 3:
+#if NPCKBD > 0
/* display console ... */
/* XXX */
- if (ctb->ctb_turboslot == 0)
- isa_display_console(ccp->cc_iot, ccp->cc_memt);
+ (void) pckbc_cnattach(&ccp->cc_iot, IO_KBD, PCKBC_KBD_SLOT);
+
+ if (CTB_TURBOSLOT_TYPE(ctb->ctb_turboslot) ==
+ CTB_TURBOSLOT_TYPE_ISA)
+ isa_display_console(&ccp->cc_iot, &ccp->cc_memt);
else
- pci_display_console(ccp->cc_iot, ccp->cc_memt,
- &ccp->cc_pc, (ctb->ctb_turboslot >> 8) & 0xff,
- ctb->ctb_turboslot & 0xff, 0);
+ pci_display_console(&ccp->cc_iot, &ccp->cc_memt,
+ &ccp->cc_pc, CTB_TURBOSLOT_BUS(ctb->ctb_turboslot),
+ CTB_TURBOSLOT_SLOT(ctb->ctb_turboslot), 0);
+#else
+ panic("not configured to use display && keyboard console");
+#endif
break;
default:
printf("ctb->ctb_term_type = 0x%lx\n", ctb->ctb_term_type);
printf("ctb->ctb_turboslot = 0x%lx\n", ctb->ctb_turboslot);
- panic("consinit: unknown console type %d",
+ panic("consinit: unknown console type %ld\n",
ctb->ctb_term_type);
}
}
-const char *
-dec_eb164_iobus_name()
-{
-
- return ("cia");
-}
-
-void
+static void
dec_eb164_device_register(dev, aux)
struct device *dev;
void *aux;
{
- static int found, initted, scsiboot, netboot;
+ static int found, initted, scsiboot, ideboot, netboot;
static struct device *pcidev, *scsidev;
struct bootdev_data *b = bootdev_data;
struct device *parent = dev->dv_parent;
struct cfdata *cf = dev->dv_cfdata;
struct cfdriver *cd = cf->cf_driver;
+ extern struct device *booted_device;
if (found)
return;
if (!initted) {
scsiboot = (strcmp(b->protocol, "SCSI") == 0);
- netboot = (strcmp(b->protocol, "BOOTP") == 0);
-#if 0
- printf("scsiboot = %d, netboot = %d\n", scsiboot, netboot);
-#endif
- initted =1;
+ netboot = (strcmp(b->protocol, "BOOTP") == 0) ||
+ (strcmp(b->protocol, "MOP") == 0);
+ /*
+ * Add an extra check to boot from ide drives:
+ * Newer SRM firmware use the protocol identifier IDE,
+ * older SRM firmware use the protocol identifier SCSI.
+ */
+ ideboot = (strcmp(b->protocol, "IDE") == 0);
+ DR_VERBOSE(printf("scsiboot = %d, ideboot = %d, netboot = %d\n",
+ scsiboot, ideboot, netboot));
+ initted = 1;
}
if (pcidev == NULL) {
@@ -158,32 +187,30 @@ dec_eb164_device_register(dev, aux)
else {
struct pcibus_attach_args *pba = aux;
- if (b->bus != pba->pba_bus)
+ if ((b->slot / 1000) != pba->pba_bus)
return;
pcidev = dev;
-#if 0
- printf("\npcidev = %s\n", pcidev->dv_xname);
-#endif
+ DR_VERBOSE(printf("\npcidev = %s\n",
+ pcidev->dv_xname));
return;
}
}
- if (scsiboot && (scsidev == NULL)) {
+ if ((ideboot || scsiboot) && (scsidev == NULL)) {
if (parent != pcidev)
return;
else {
struct pci_attach_args *pa = aux;
- if (b->slot != pa->pa_device)
+ if (b->slot % 1000 / 100 != pa->pa_function)
+ return;
+ if (b->slot % 100 != pa->pa_device)
return;
-
- /* XXX function? */
scsidev = dev;
-#if 0
- printf("\nscsidev = %s\n", scsidev->dv_xname);
-#endif
+ DR_VERBOSE(printf("\nscsidev = %s\n",
+ scsidev->dv_xname));
return;
}
}
@@ -218,9 +245,34 @@ dec_eb164_device_register(dev, aux)
/* we've found it! */
booted_device = dev;
-#if 0
- printf("\nbooted_device = %s\n", booted_device->dv_xname);
-#endif
+ DR_VERBOSE(printf("\nbooted_device = %s\n",
+ booted_device->dv_xname));
+ found = 1;
+ }
+
+ /*
+ * Support to boot from IDE drives.
+ */
+ if ((ideboot || scsiboot) && !strcmp(cd->cd_name, "wd")) {
+ struct ata_atapi_attach *aa_link = aux;
+ if ((strncmp("pciide", parent->dv_xname, 6) != 0)) {
+ return;
+ } else {
+ if (parent != scsidev)
+ return;
+ }
+ DR_VERBOSE(printf("\nAtapi info: drive: %d, channel %d\n",
+ aa_link->aa_drv_data->drive, aa_link->aa_channel));
+ DR_VERBOSE(printf("Bootdev info: unit: %d, channel: %d\n",
+ b->unit, b->channel));
+ if (b->unit != aa_link->aa_drv_data->drive ||
+ b->channel != aa_link->aa_channel)
+ return;
+
+ /* we've found it! */
+ booted_device = dev;
+ DR_VERBOSE(printf("booted_device = %s\n",
+ booted_device->dv_xname));
found = 1;
}
@@ -230,15 +282,14 @@ dec_eb164_device_register(dev, aux)
else {
struct pci_attach_args *pa = aux;
- if (b->slot != pa->pa_device)
+ if ((b->slot % 1000) != pa->pa_device)
return;
/* XXX function? */
booted_device = dev;
-#if 0
- printf("\nbooted_device = %s\n", booted_device->dv_xname);
-#endif
+ DR_VERBOSE(printf("\nbooted_device = %s\n",
+ booted_device->dv_xname));
found = 1;
return;
}
diff --git a/sys/arch/alpha/alpha/dec_kn20aa.c b/sys/arch/alpha/alpha/dec_kn20aa.c
index bbb69cd47a4..b87170cccfb 100644
--- a/sys/arch/alpha/alpha/dec_kn20aa.c
+++ b/sys/arch/alpha/alpha/dec_kn20aa.c
@@ -1,8 +1,7 @@
-/* $OpenBSD: dec_kn20aa.c,v 1.7 1999/01/11 05:10:59 millert Exp $ */
-/* $NetBSD: dec_kn20aa.c,v 1.16 1996/11/25 03:59:22 cgd Exp $ */
+/* $NetBSD: dec_kn20aa.c,v 1.42 2000/05/22 20:13:32 thorpej Exp $ */
/*
- * Copyright (c) 1995, 1996 Carnegie-Mellon University.
+ * Copyright (c) 1995, 1996, 1997 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Chris G. Demetriou
@@ -27,20 +26,30 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
+/*
+ * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center
+ */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/termios.h>
#include <dev/cons.h>
+#include <sys/conf.h>
#include <machine/rpb.h>
#include <machine/autoconf.h>
#include <machine/cpuconf.h>
+#include <machine/bus.h>
-#include <dev/isa/isavar.h>
#include <dev/ic/comreg.h>
#include <dev/ic/comvar.h>
+
+#include <dev/isa/isareg.h>
+#include <dev/isa/isavar.h>
+#if 0
+#include <dev/ic/pckbcvar.h>
+#endif
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
@@ -50,24 +59,44 @@
#include <scsi/scsi_all.h>
#include <scsi/scsiconf.h>
-cpu_decl(dec_kn20aa);
+#if 0
+#include "pckbd.h"
+#endif
+
+#ifndef CONSPEED
+#define CONSPEED TTYDEF_SPEED
+#endif
+static int comcnrate = CONSPEED;
+
+void dec_kn20aa_init __P((void));
+static void dec_kn20aa_cons_init __P((void));
+static void dec_kn20aa_device_register __P((struct device *, void *));
+
+const struct alpha_variation_table dec_kn20aa_variations[] = {
+ { 0, "AlphaStation 500 or 600 (KN20AA)" },
+ { 0, NULL },
+};
-const char *
-dec_kn20aa_model_name()
+void
+dec_kn20aa_init()
{
+ u_int64_t variation;
- switch (hwrpb->rpb_variation & SV_ST_MASK) {
- case 0:
- return "AlphaStation 600 5/266 (KN20AA)";
-
- default:
- printf("unknown system variation %lx\n",
- hwrpb->rpb_variation & SV_ST_MASK);
- return NULL;
+ platform.family = "AlphaStation 500 or 600 (KN20AA)";
+
+ if ((platform.model = alpha_dsr_sysname()) == NULL) {
+ variation = hwrpb->rpb_variation & SV_ST_MASK;
+ if ((platform.model = alpha_variation_name(variation,
+ dec_kn20aa_variations)) == NULL)
+ platform.model = alpha_unknown_sysname();
}
+
+ platform.iobus = "cia";
+ platform.cons_init = dec_kn20aa_cons_init;
+ platform.device_register = dec_kn20aa_device_register;
}
-void
+static void
dec_kn20aa_cons_init()
{
struct ctb *ctb;
@@ -84,54 +113,49 @@ dec_kn20aa_cons_init()
/* serial console ... */
/* XXX */
{
- static struct consdev comcons = { NULL, NULL,
- comcngetc, comcnputc, comcnpollc, NODEV, 1 };
-
- /* Delay to allow PROM putchars to complete */
- DELAY(10000);
-
- comconsaddr = 0x3f8;
- comconsinit = 0;
- comconsiot = ccp->cc_iot;
- if (bus_space_map(comconsiot, comconsaddr, COM_NPORTS,
- 0, &comconsioh))
- panic("can't map serial console I/O ports");
- comconscflag = (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8;
- cominit(comconsiot, comconsioh, comdefaultrate);
-
- cn_tab = &comcons;
- comcons.cn_dev = makedev(26, 0); /* XXX */
+ /*
+ * Delay to allow PROM putchars to complete.
+ * FIFO depth * character time,
+ * character time = (1000000 / (defaultrate / 10))
+ */
+ DELAY(160000000 / comcnrate);
+
+ if(comcnattach(ccp->cc_iot, 0x3f8, comcnrate,
+ COM_FREQ,
+ (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8))
+ panic("can't init serial console");
+
break;
}
case 3:
+#if NPCKBD > 0
/* display console ... */
/* XXX */
- if (ctb->ctb_turboslot == 0)
- isa_display_console(ccp->cc_iot, ccp->cc_memt);
+ (void) pckbc_cnattach(&ccp->cc_iot, IO_KBD, PCKBC_KBD_SLOT);
+
+ if (CTB_TURBOSLOT_TYPE(ctb->ctb_turboslot) ==
+ CTB_TURBOSLOT_TYPE_ISA)
+ isa_display_console(&ccp->cc_iot, &ccp->cc_memt);
else
- pci_display_console(ccp->cc_iot, ccp->cc_memt,
- &ccp->cc_pc, (ctb->ctb_turboslot >> 8) & 0xff,
- ctb->ctb_turboslot & 0xff, 0);
+ pci_display_console(&ccp->cc_iot, &ccp->cc_memt,
+ &ccp->cc_pc, CTB_TURBOSLOT_BUS(ctb->ctb_turboslot),
+ CTB_TURBOSLOT_SLOT(ctb->ctb_turboslot), 0);
+#else
+ panic("not configured to use display && keyboard console");
+#endif
break;
default:
printf("ctb->ctb_term_type = 0x%lx\n", ctb->ctb_term_type);
printf("ctb->ctb_turboslot = 0x%lx\n", ctb->ctb_turboslot);
- panic("consinit: unknown console type %d",
+ panic("consinit: unknown console type %ld\n",
ctb->ctb_term_type);
}
}
-const char *
-dec_kn20aa_iobus_name()
-{
-
- return ("cia");
-}
-
-void
+static void
dec_kn20aa_device_register(dev, aux)
struct device *dev;
void *aux;
@@ -142,13 +166,15 @@ dec_kn20aa_device_register(dev, aux)
struct device *parent = dev->dv_parent;
struct cfdata *cf = dev->dv_cfdata;
struct cfdriver *cd = cf->cf_driver;
+ extern struct device *booted_device;
if (found)
return;
if (!initted) {
scsiboot = (strcmp(b->protocol, "SCSI") == 0);
- netboot = (strcmp(b->protocol, "BOOTP") == 0);
+ netboot = (strcmp(b->protocol, "BOOTP") == 0) ||
+ (strcmp(b->protocol, "MOP") == 0);
#if 0
printf("scsiboot = %d, netboot = %d\n", scsiboot, netboot);
#endif
@@ -161,7 +187,7 @@ dec_kn20aa_device_register(dev, aux)
else {
struct pcibus_attach_args *pba = aux;
- if (b->bus != pba->pba_bus)
+ if ((b->slot / 1000) != pba->pba_bus)
return;
pcidev = dev;
@@ -178,7 +204,7 @@ dec_kn20aa_device_register(dev, aux)
else {
struct pci_attach_args *pa = aux;
- if (b->slot != pa->pa_device)
+ if ((b->slot % 1000) != pa->pa_device)
return;
/* XXX function? */
@@ -233,7 +259,7 @@ dec_kn20aa_device_register(dev, aux)
else {
struct pci_attach_args *pa = aux;
- if (b->slot != pa->pa_device)
+ if ((b->slot % 1000) != pa->pa_device)
return;
/* XXX function? */
diff --git a/sys/arch/alpha/alpha/genassym.c b/sys/arch/alpha/alpha/genassym.c
index f1d15a97ecc..fae05343977 100644
--- a/sys/arch/alpha/alpha/genassym.c
+++ b/sys/arch/alpha/alpha/genassym.c
@@ -1,7 +1,8 @@
-/* $OpenBSD: genassym.c,v 1.6 1999/09/26 11:07:32 kstailey Exp $ */
-/* $NetBSD: genassym.c,v 1.9 1996/08/20 23:00:24 cgd Exp $ */
+/* $NetBSD: genassym.c,v 1.27 2000/05/26 00:36:42 thorpej Exp $ */
/*
+ * Copyright (c) 1994, 1995 Gordon W. Ross
+ * Copyright (c) 1993 Adam Glass
* Copyright (c) 1982, 1990, 1993
* The Regents of the University of California. All rights reserved.
*
@@ -33,9 +34,33 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)genassym.c 8.3 (Berkeley) 1/4/94
+ * from: @(#)genassym.c 8.3 (Berkeley) 1/4/94
*/
+/*
+ * This program is designed so that it can be both:
+ * (1) Run on the native machine to generate assym.h
+ * (2) Converted to assembly that genassym.awk will
+ * translate into the same assym.h as (1) does.
+ * The second method is done as follows:
+ * m68k-xxx-gcc [options] -S .../genassym.c
+ * awk -f genassym.awk < genassym.s > assym.h
+ *
+ * Using actual C code here (instead of genassym.cf)
+ * has the advantage that "make depend" automatically
+ * tracks dependencies of this C code on the (many)
+ * header files used here. Also, the awk script used
+ * to convert the assembly output to assym.h is much
+ * smaller and simpler than sys/kern/genassym.sh.
+ *
+ * Both this method and the genassym.cf method have the
+ * disadvantage that they depend on gcc-specific features.
+ * This method depends on the format of assembly output for
+ * data, and the genassym.cf method depends on features of
+ * the gcc asm() statement (inline assembly).
+ */
+
+#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
#include <sys/param.h>
#include <sys/buf.h>
#include <sys/map.h>
@@ -44,152 +69,184 @@
#include <sys/msgbuf.h>
#include <sys/syscall.h>
#include <sys/user.h>
-#include <sys/syscall.h>
#include <machine/cpu.h>
#include <machine/reg.h>
#include <machine/frame.h>
#include <machine/rpb.h>
+#include <machine/vmparam.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <err.h>
-
-#ifdef COMPAT_NETBSD
-# include <compat/netbsd/netbsd_syscall.h>
-#endif
-
-void def __P((char *, long));
-int main __P((int argc, char **argv));
+#include <vm/vm.h>
-#define off(what, s, m) def(what, (int)offsetof(s, m))
+/* Note: Avoid /usr/include for cross compilation! */
+extern void printf __P((const char *fmt, ...));
+extern void exit __P((int));
-void
-def(what, val)
- char *what;
- long val;
-{
+#define offsetof(type, member) ((size_t)(&((type *)0)->member))
- if (printf("#define\t%s\t%ld\n", what, val) < 0)
- err(1, "printf");
-}
+#ifdef __STDC__
+#define def(name, value) { #name, value }
+#define def1(name) { #name, name }
+#define off(name, type, member) { #name, offsetof(type, member) }
+#else
+#define def(name, value) { "name", value }
+#define def1(name) { "name", name }
+#define off(name, type, member) { "name", offsetof(type, member) }
+#endif
-int
-main(argc, argv)
- int argc;
- char **argv;
-{
+/*
+ * Note: genassym.awk cares about the form of this structure,
+ * as well as the names and placement of the "asdefs" array
+ * and the "nassefs" variable below. Clever, but fragile.
+ */
+struct nv {
+ char n[28];
+ long v;
+};
+struct nv assyms[] = {
/* general constants */
- def("NBPG", NBPG);
- def("PGSHIFT", PGSHIFT);
- def("VM_MAX_ADDRESS", VM_MAX_ADDRESS);
+ def1(NBPG),
+ def1(PGSHIFT),
+ def1(VM_MAX_ADDRESS),
/* Register offsets, for stack frames. */
- def("FRAME_V0", FRAME_V0);
- def("FRAME_T0", FRAME_T0);
- def("FRAME_T1", FRAME_T1);
- def("FRAME_T2", FRAME_T2);
- def("FRAME_T3", FRAME_T3);
- def("FRAME_T4", FRAME_T4);
- def("FRAME_T5", FRAME_T5);
- def("FRAME_T6", FRAME_T6);
- def("FRAME_T7", FRAME_T7);
- def("FRAME_S0", FRAME_S0);
- def("FRAME_S1", FRAME_S1);
- def("FRAME_S2", FRAME_S2);
- def("FRAME_S3", FRAME_S3);
- def("FRAME_S4", FRAME_S4);
- def("FRAME_S5", FRAME_S5);
- def("FRAME_S6", FRAME_S6);
- def("FRAME_A3", FRAME_A3);
- def("FRAME_A4", FRAME_A4);
- def("FRAME_A5", FRAME_A5);
- def("FRAME_T8", FRAME_T8);
- def("FRAME_T9", FRAME_T9);
- def("FRAME_T10", FRAME_T10);
- def("FRAME_T11", FRAME_T11);
- def("FRAME_RA", FRAME_RA);
- def("FRAME_T12", FRAME_T12);
- def("FRAME_AT", FRAME_AT);
- def("FRAME_SP", FRAME_SP);
-
- def("FRAME_SW_SIZE", FRAME_SW_SIZE);
-
- def("FRAME_PS", FRAME_PS);
- def("FRAME_PC", FRAME_PC);
- def("FRAME_GP", FRAME_GP);
- def("FRAME_A0", FRAME_A0);
- def("FRAME_A1", FRAME_A1);
- def("FRAME_A2", FRAME_A2);
-
- def("FRAME_SIZE", FRAME_SIZE);
+ def1(FRAME_V0),
+ def1(FRAME_T0),
+ def1(FRAME_T1),
+ def1(FRAME_T2),
+ def1(FRAME_T3),
+ def1(FRAME_T4),
+ def1(FRAME_T5),
+ def1(FRAME_T6),
+ def1(FRAME_T7),
+ def1(FRAME_S0),
+ def1(FRAME_S1),
+ def1(FRAME_S2),
+ def1(FRAME_S3),
+ def1(FRAME_S4),
+ def1(FRAME_S5),
+ def1(FRAME_S6),
+ def1(FRAME_A3),
+ def1(FRAME_A4),
+ def1(FRAME_A5),
+ def1(FRAME_T8),
+ def1(FRAME_T9),
+ def1(FRAME_T10),
+ def1(FRAME_T11),
+ def1(FRAME_RA),
+ def1(FRAME_T12),
+ def1(FRAME_AT),
+ def1(FRAME_SP),
+
+ def1(FRAME_SW_SIZE),
+
+ def1(FRAME_PS),
+ def1(FRAME_PC),
+ def1(FRAME_GP),
+ def1(FRAME_A0),
+ def1(FRAME_A1),
+ def1(FRAME_A2),
+
+ def1(FRAME_SIZE),
/* bits of the PS register */
- def("ALPHA_PSL_USERMODE", ALPHA_PSL_USERMODE);
- def("ALPHA_PSL_IPL_MASK", ALPHA_PSL_IPL_MASK);
- def("ALPHA_PSL_IPL_0", ALPHA_PSL_IPL_0);
- def("ALPHA_PSL_IPL_SOFT", ALPHA_PSL_IPL_SOFT);
- def("ALPHA_PSL_IPL_HIGH", ALPHA_PSL_IPL_HIGH);
+ def1(ALPHA_PSL_USERMODE),
+ def1(ALPHA_PSL_IPL_MASK),
+ def1(ALPHA_PSL_IPL_0),
+ def1(ALPHA_PSL_IPL_SOFT),
+ def1(ALPHA_PSL_IPL_HIGH),
/* pte bits */
- def("ALPHA_PTE_VALID", ALPHA_PTE_VALID);
- def("ALPHA_PTE_ASM", ALPHA_PTE_ASM);
- def("ALPHA_PTE_KR", ALPHA_PTE_KR);
- def("ALPHA_PTE_KW", ALPHA_PTE_KW);
+ def1(ALPHA_PTE_VALID),
+ def1(ALPHA_PTE_ASM),
+ def1(ALPHA_PTE_KR),
+ def1(ALPHA_PTE_KW),
/* Important offsets into the proc struct & associated constants */
- off("P_FORW", struct proc, p_forw);
- off("P_BACK", struct proc, p_back);
- off("P_ADDR", struct proc, p_addr);
- off("P_VMSPACE", struct proc, p_vmspace);
- off("P_MD_FLAGS", struct proc, p_md.md_flags);
- off("P_MD_PCBPADDR", struct proc, p_md.md_pcbpaddr);
- off("PH_LINK", struct prochd, ph_link);
- off("PH_RLINK", struct prochd, ph_rlink);
-
-#ifndef NEW_PMAP
- /* offsets needed by cpu_switch(), et al., to switch mappings. */
- off("VM_PMAP_STPTE", struct vmspace, vm_pmap.pm_stpte);
- def("USTP_OFFSET", kvtol1pte(VM_MIN_ADDRESS) * sizeof(pt_entry_t));
-#else /* NEW_PMAP */
- off("VM_PMAP", struct vmspace, vm_pmap);
-#endif /* NEW_PMAP */
+ off(P_FORW, struct proc, p_forw),
+ off(P_BACK, struct proc, p_back),
+ off(P_ADDR, struct proc, p_addr),
+ off(P_VMSPACE, struct proc, p_vmspace),
+ off(P_STAT, struct proc, p_stat),
+ off(P_MD_FLAGS, struct proc, p_md.md_flags),
+ off(P_MD_PCBPADDR, struct proc, p_md.md_pcbpaddr),
+ off(PH_LINK, struct prochd, ph_link),
+ off(PH_RLINK, struct prochd, ph_rlink),
+
+ /* XXXXX - Extremly bogus! */
+ def(SONPROC, SRUN),
+ /* XXX */
+
+ /* offsets needed by cpu_switch() to switch mappings. */
+ off(VM_MAP_PMAP, struct vmspace, vm_map.pmap),
/* Important offsets into the user struct & associated constants */
- def("UPAGES", UPAGES);
- off("U_PCB", struct user, u_pcb);
- off("PCB_HWPCB", struct pcb, pcb_hw);
- off("PCB_HWPCB_KSP", struct pcb, pcb_hw.apcb_ksp);
- off("PCB_CONTEXT", struct pcb, pcb_context[0]);
- off("PCB_ONFAULT", struct pcb, pcb_onfault);
- off("PCB_ACCESSADDR", struct pcb, pcb_accessaddr);
+ def1(UPAGES),
+ off(U_PCB, struct user, u_pcb),
+ off(U_PCB_HWPCB, struct user, u_pcb.pcb_hw),
+ off(U_PCB_HWPCB_KSP, struct user, u_pcb.pcb_hw.apcb_ksp),
+ off(U_PCB_CONTEXT, struct user, u_pcb.pcb_context[0]),
+ off(U_PCB_ONFAULT, struct user, u_pcb.pcb_onfault),
+ off(U_PCB_ACCESSADDR, struct user, u_pcb.pcb_accessaddr),
/* Offsets into struct fpstate, for save, restore */
- off("FPREG_FPR_REGS", struct fpreg, fpr_regs[0]);
- off("FPREG_FPR_CR", struct fpreg, fpr_cr);
+ off(FPREG_FPR_REGS, struct fpreg, fpr_regs[0]),
+ off(FPREG_FPR_CR, struct fpreg, fpr_cr),
/* Important other addresses */
- def("HWRPB_ADDR", HWRPB_ADDR); /* Restart parameter block */
- def("VPTBASE", VPTBASE); /* Virtual Page Table base */
+ def1(HWRPB_ADDR), /* Restart parameter block */
+ def1(VPTBASE), /* Virtual Page Table base */
+
+ /* Offsets into the HWRPB. */
+ off(RPB_PRIMARY_CPU_ID, struct rpb, rpb_primary_cpu_id),
/* Kernel entries */
- def("ALPHA_KENTRY_ARITH", ALPHA_KENTRY_ARITH);
- def("ALPHA_KENTRY_MM", ALPHA_KENTRY_MM);
- def("ALPHA_KENTRY_IF", ALPHA_KENTRY_IF);
- def("ALPHA_KENTRY_UNA", ALPHA_KENTRY_UNA);
+ def1(ALPHA_KENTRY_ARITH),
+ def1(ALPHA_KENTRY_MM),
+ def1(ALPHA_KENTRY_IF),
+ def1(ALPHA_KENTRY_UNA),
/* errno values */
- def("ENAMETOOLONG", ENAMETOOLONG);
- def("EFAULT", EFAULT);
+ def1(ENAMETOOLONG),
+ def1(EFAULT),
/* Syscalls called from sigreturn. */
- def("SYS_sigreturn", SYS_sigreturn);
- def("SYS_exit", SYS_exit);
-#ifdef COMPAT_NETBSD
- def("NETBSD_SYS___sigreturn14", NETBSD_SYS___sigreturn14);
- def("NETBSD_SYS_exit", NETBSD_SYS_exit);
-#endif
+ def1(SYS_sigreturn),
+ def1(SYS_exit),
+
+ /* CPU info */
+ off(CPU_INFO_CURPROC, struct cpu_info, ci_curproc),
+ off(CPU_INFO_FPCURPROC, struct cpu_info, ci_fpcurproc),
+ off(CPU_INFO_CURPCB, struct cpu_info, ci_curpcb),
+ off(CPU_INFO_IDLE_PCB_PADDR, struct cpu_info, ci_idle_pcb_paddr),
+ off(CPU_INFO_WANT_RESCHED, struct cpu_info, ci_want_resched),
+ off(CPU_INFO_ASTPENDING, struct cpu_info, ci_astpending),
+ def(CPU_INFO_SIZEOF, sizeof(struct cpu_info)),
+};
+int nassyms = sizeof(assyms)/sizeof(assyms[0]);
+
+int main __P((int argc, char **argv));
+
+int
+main(argc, argv)
+ int argc;
+ char **argv;
+{
+ char *name;
+ long i, val;
+
+ for (i = 0; i < nassyms; i++) {
+ name = assyms[i].n;
+ val = assyms[i].v;
+
+ printf("#define\t%s\t", name);
+ /* Hack to make the output easier to verify. */
+ if ((val < 0) || (val > 999))
+ printf("0x%lx\n", val);
+ else
+ printf("%ld\n", val);
+ }
exit(0);
}
diff --git a/sys/arch/alpha/alpha/interrupt.c b/sys/arch/alpha/alpha/interrupt.c
index b1df45d638d..a24f42aa2db 100644
--- a/sys/arch/alpha/alpha/interrupt.c
+++ b/sys/arch/alpha/alpha/interrupt.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: interrupt.c,v 1.8 1999/09/25 16:23:49 pjanzen Exp $ */
-/* $NetBSD: interrupt.c,v 1.14 1996/11/13 22:20:54 cgd Exp $ */
+/* $NetBSD: interrupt.c,v 1.44 2000/05/23 05:12:53 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -27,149 +26,205 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
+/*
+ * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center.
+ * Redistribute and modify at will, leaving only this additional copyright
+ * notice.
+ */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/vmmeter.h>
#include <sys/sched.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+#include <vm/vm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/atomic.h>
#include <machine/autoconf.h>
+#include <machine/cpu.h>
#include <machine/reg.h>
+#include <machine/rpb.h>
#include <machine/frame.h>
+#include <machine/cpuconf.h>
+#include <machine/intrcnt.h>
+#include <machine/alpha.h>
-#ifdef EVCNT_COUNTERS
+#if defined(MULTIPROCESSOR)
#include <sys/device.h>
-#else
-#include <machine/intrcnt.h>
#endif
-extern int schedhz;
-
static u_int schedclk2;
-struct logout {
-#define LOGOUT_RETRY 0x1000000000000000 /* Retry bit. */
-#define LOGOUT_LENGTH 0xffff /* Length mask. */
- u_int64_t q1; /* Retry and length */
- /* Unspecified. */
-};
-
-void interrupt __P((u_long, u_long, u_long, struct trapframe *));
-void machine_check __P((struct trapframe *, u_long, u_long));
-void nullintr __P((void *, u_long));
-void real_clockintr __P((void *, u_long));
-
-static void (*iointr) __P((void *, u_long)) = nullintr;
-static void (*clockintr) __P((void *, u_long)) = nullintr;
-static volatile int mc_expected, mc_received;
-
-#ifdef EVCNT_COUNTERS
-struct evcnt clock_intr_evcnt; /* event counter for clock intrs. */
-#endif
-
void
interrupt(a0, a1, a2, framep)
- u_long a0, a1, a2;
+ unsigned long a0, a1, a2;
struct trapframe *framep;
{
struct proc *p;
+#if defined(MULTIPROCESSOR)
+ u_long cpu_id = alpha_pal_whami();
+#endif
+ extern int schedhz;
- if (a0 == 1) { /* clock interrupt */
- cnt.v_intr++;
- (*clockintr)(framep, a1);
- if((++schedclk2 & 0x3f) == 0
- && (p = curproc) != NULL
- && schedhz)
- schedclock(p);
- } else if (a0 == 3) { /* I/O device interrupt */
- cnt.v_intr++;
- (*iointr)(framep, a1);
- } else if (a0 == 2) /* machine check or correctable error */
- machine_check(framep, a1, a2);
- else {
- /*
- * Not expected or handled:
- * 0 Interprocessor interrupt
- * 4 Performance counter
- */
- panic("unexpected interrupt: type 0x%lx, vec 0x%lx",
- a0, a1);
- }
-}
+ switch (a0) {
+ case ALPHA_INTR_XPROC: /* interprocessor interrupt */
+#if defined(MULTIPROCESSOR)
+ {
+ struct cpu_info *ci = &cpu_info[cpu_id];
+ u_long pending_ipis, bit;
-void
-nullintr(framep, vec)
- void *framep;
- u_long vec;
-{
-}
+#if 0
+ printf("CPU %lu got IPI\n", cpu_id);
+#endif
-void
-real_clockintr(framep, vec)
- void *framep;
- u_long vec;
-{
+#ifdef DIAGNOSTIC
+ if (ci->ci_dev == NULL) {
+ /* XXX panic? */
+ printf("WARNING: no device for ID %lu\n", cpu_id);
+ return;
+ }
+#endif
+
+ pending_ipis = atomic_loadlatch_ulong(&ci->ci_ipis, 0);
+ for (bit = 0; bit < ALPHA_NIPIS; bit++)
+ if (pending_ipis & (1UL << bit))
+ (*ipifuncs[bit])();
-#ifdef EVCNT_COUNTERS
- clock_intr_evcnt.ev_count++;
+ /*
+ * Handle inter-console messages if we're the primary
+ * CPU.
+ */
+ if (cpu_id == hwrpb->rpb_primary_cpu_id &&
+ hwrpb->rpb_txrdy != 0)
+ cpu_iccb_receive();
+ }
#else
- intrcnt[INTRCNT_CLOCK]++;
+ printf("WARNING: received interprocessor interrupt!\n");
+#endif /* MULTIPROCESSOR */
+ break;
+
+ case ALPHA_INTR_CLOCK: /* clock interrupt */
+#if defined(MULTIPROCESSOR)
+ /* XXX XXX XXX */
+ if (cpu_id != hwrpb->rpb_primary_cpu_id)
+ return;
#endif
- hardclock(framep);
-}
+ uvmexp.intrs++;
+ intrcnt[INTRCNT_CLOCK]++;
+ if (platform.clockintr) {
+ (*platform.clockintr)((struct clockframe *)framep);
+ if((++schedclk2 & 0x3f) == 0
+ && (p = curproc) != NULL
+ && schedhz)
+ schedclock(p);
+ }
+ break;
-void
-set_clockintr()
-{
+ case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */
+ a0 = alpha_pal_rdmces();
+ if (platform.mcheck_handler)
+ (*platform.mcheck_handler)(a0, framep, a1, a2);
+ else
+ machine_check(a0, framep, a1, a2);
+ break;
+
+ case ALPHA_INTR_DEVICE: /* I/O device interrupt */
+#if defined(MULTIPROCESSOR)
+ /* XXX XXX XXX */
+ if (cpu_id != hwrpb->rpb_primary_cpu_id)
+ return;
+#endif
+ uvmexp.intrs++;
+ if (platform.iointr)
+ (*platform.iointr)(framep, a1);
+ break;
+
+ case ALPHA_INTR_PERF: /* performance counter interrupt */
+ printf("WARNING: received performance counter interrupt!\n");
+ break;
- if (clockintr != nullintr)
- panic("set clockintr twice");
+ case ALPHA_INTR_PASSIVE:
+#if 0
+ printf("WARNING: received passive release interrupt vec "
+ "0x%lx\n", a1);
+#endif
+ break;
- clockintr = real_clockintr;
+ default:
+ printf("unexpected interrupt: type 0x%lx vec 0x%lx "
+ "a2 0x%lx"
+#if defined(MULTIPROCESSOR)
+ " cpu %lu"
+#endif
+ "\n", a0, a1, a2
+#if defined(MULTIPROCESSOR)
+ , cpu_id
+#endif
+ );
+ panic("interrupt");
+ /* NOTREACHED */
+ }
}
void
set_iointr(niointr)
- void (*niointr) __P((void *, u_long));
+ void (*niointr) __P((void *, unsigned long));
{
-
- if (iointr != nullintr)
+ if (platform.iointr)
panic("set iointr twice");
-
- iointr = niointr;
+ platform.iointr = niointr;
}
+
void
-machine_check(framep, vector, param)
+machine_check(mces, framep, vector, param)
+ unsigned long mces;
struct trapframe *framep;
- u_long vector, param;
+ unsigned long vector, param;
{
- u_long mces;
const char *type;
+ struct mchkinfo *mcp;
- mces = alpha_pal_rdmces();
-
- /* If not a machine check, we have no clue ho we got here. */
- if ((mces & ALPHA_MCES_MIP) == 0) {
+ mcp = &curcpu()->ci_mcinfo;
+ /* Make sure it's an error we know about. */
+ if ((mces & (ALPHA_MCES_MIP|ALPHA_MCES_SCE|ALPHA_MCES_PCE)) == 0) {
type = "fatal machine check or error (unknown type)";
goto fatal;
}
- /* If we weren't expecting it, then we punt. */
- if (!mc_expected) {
- type = "unexpected machine check";
- goto fatal;
+ /* Machine checks. */
+ if (mces & ALPHA_MCES_MIP) {
+ /* If we weren't expecting it, then we punt. */
+ if (!mcp->mc_expected) {
+ type = "unexpected machine check";
+ goto fatal;
+ }
+ mcp->mc_expected = 0;
+ mcp->mc_received = 1;
}
- mc_expected = 0;
- mc_received = 1;
+ /* System correctable errors. */
+ if (mces & ALPHA_MCES_SCE)
+ printf("Warning: received system correctable error.\n");
+
+ /* Processor correctable errors. */
+ if (mces & ALPHA_MCES_PCE)
+ printf("Warning: received processor correctable error.\n");
/* Clear pending machine checks and correctable errors */
alpha_pal_wrmces(mces);
return;
fatal:
+ /* Clear pending machine checks and correctable errors */
+ alpha_pal_wrmces(mces);
+
printf("\n");
printf("%s:\n", type);
printf("\n");
@@ -191,14 +246,25 @@ badaddr(addr, size)
void *addr;
size_t size;
{
+ return(badaddr_read(addr, size, NULL));
+}
+
+int
+badaddr_read(addr, size, rptr)
+ void *addr;
+ size_t size;
+ void *rptr;
+{
+ struct mchkinfo *mcp = &curcpu()->ci_mcinfo;
long rcpt;
+ int rv;
/* Get rid of any stale machine checks that have been waiting. */
alpha_pal_draina();
/* Tell the trap code to expect a machine check. */
- mc_received = 0;
- mc_expected = 1;
+ mcp->mc_received = 0;
+ mcp->mc_expected = 1;
/* Read from the test address, and make sure the read happens. */
alpha_mb();
@@ -220,16 +286,42 @@ badaddr(addr, size)
break;
default:
- panic("badaddr: invalid size (%ld)", size);
+ panic("badaddr: invalid size (%ld)\n", size);
}
alpha_mb();
+ alpha_mb(); /* MAGIC ON SOME SYSTEMS */
/* Make sure we took the machine check, if we caused one. */
alpha_pal_draina();
/* disallow further machine checks */
- mc_expected = 0;
-
+ mcp->mc_expected = 0;
+
+ rv = mcp->mc_received;
+ mcp->mc_received = 0;
+
+ /*
+ * And copy back read results (if no fault occurred).
+ */
+ if (rptr && rv == 0) {
+ switch (size) {
+ case sizeof (u_int8_t):
+ *(volatile u_int8_t *)rptr = rcpt;
+ break;
+
+ case sizeof (u_int16_t):
+ *(volatile u_int16_t *)rptr = rcpt;
+ break;
+
+ case sizeof (u_int32_t):
+ *(volatile u_int32_t *)rptr = rcpt;
+ break;
+
+ case sizeof (u_int64_t):
+ *(volatile u_int64_t *)rptr = rcpt;
+ break;
+ }
+ }
/* Return non-zero (i.e. true) if it's a bad address. */
- return (mc_received);
+ return (rv);
}
diff --git a/sys/arch/alpha/alpha/locore.s b/sys/arch/alpha/alpha/locore.s
index 3ec60e37fec..40c3d64f6eb 100644
--- a/sys/arch/alpha/alpha/locore.s
+++ b/sys/arch/alpha/alpha/locore.s
@@ -1,5 +1,41 @@
-/* $OpenBSD: locore.s,v 1.12 2000/06/05 11:02:54 art Exp $ */
-/* $NetBSD: locore.s,v 1.27 1996/12/03 19:54:16 cgd Exp $ */
+/* $NetBSD: locore.s,v 1.80 2000/09/04 00:31:59 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -28,21 +64,88 @@
* rights to redistribute these changes.
*/
+.stabs __FILE__,100,0,0,kernel_text
+
#include <machine/asm.h>
-#ifndef EVCNT_COUNTERS
-#include <machine/intrcnt.h>
-#endif
+
#include "assym.h"
+.stabs __FILE__,132,0,0,kernel_text
+
+#if defined(MULTIPROCESSOR)
+
+/*
+ * Get various per-cpu values. A pointer to our cpu_info structure
+ * is stored in SysValue. These macros clobber v0, t0, t8..t11.
+ *
+ * All return values are in v0.
+ */
+#define GET_CPUINFO call_pal PAL_OSF1_rdval
+
+#define GET_CURPROC \
+ call_pal PAL_OSF1_rdval ; \
+ addq v0, CPU_INFO_CURPROC, v0
+
+#define GET_FPCURPROC \
+ call_pal PAL_OSF1_rdval ; \
+ addq v0, CPU_INFO_FPCURPROC, v0
+
+#define GET_CURPCB \
+ call_pal PAL_OSF1_rdval ; \
+ addq v0, CPU_INFO_CURPCB, v0
+
+#define GET_IDLE_PCB(reg) \
+ call_pal PAL_OSF1_rdval ; \
+ ldq reg, CPU_INFO_IDLE_PCB_PADDR(v0)
+
+#else /* if not MULTIPROCESSOR... */
+
+IMPORT(cpu_info_store, CPU_INFO_SIZEOF)
+
+#define GET_CPUINFO lda v0, cpu_info_store
+
+#define GET_CURPROC lda v0, cpu_info_store + CPU_INFO_CURPROC
+
+#define GET_FPCURPROC lda v0, cpu_info_store + CPU_INFO_FPCURPROC
+
+#define GET_CURPCB lda v0, cpu_info_store + CPU_INFO_CURPCB
+
+#define GET_IDLE_PCB(reg) \
+ lda reg, cpu_info_store ; \
+ ldq reg, CPU_INFO_IDLE_PCB_PADDR(reg)
+#endif
+
+/*
+ * Perform actions necessary to switch to a new context. The
+ * hwpcb should be in a0. Clobbers v0, t0, t8..t11, a0.
+ */
+#define SWITCH_CONTEXT \
+ /* Make a note of the context we're running on. */ \
+ GET_CURPCB ; \
+ stq a0, 0(v0) ; \
+ \
+ /* Swap in the new context. */ \
+ call_pal PAL_OSF1_swpctx
+
+
/* don't reorder instructions; paranoia. */
.set noreorder
.text
+ .macro bfalse reg, dst
+ beq \reg, \dst
+ .endm
+
+ .macro btrue reg, dst
+ bne \reg, \dst
+ .endm
+
/*
* This is for kvm_mkdb, and should be the address of the beginning
* of the kernel text segment (not necessarily the same as kernbase).
*/
EXPORT(kernel_text)
+.loc 1 __LINE__
kernel_text:
/*
@@ -58,7 +161,8 @@ bootstack:
* Arguments:
* a0 is the first free page frame number (PFN)
* a1 is the page table base register (PTBR)
- * a2 is the end of the symbol table
+ * a2 is the bootinfo magic number
+ * a3 is the pointer to the bootinfo structure
*
* All arguments are passed to alpha_init().
*/
@@ -78,7 +182,7 @@ Lstart1: LDGP(pv)
/*
* Call alpha_init() to do pre-main initialization.
* alpha_init() gets the arguments we were called with,
- * which are already in a0, a1 and a2.
+ * which are already in a0, a1, a2, a3 and a4.
*/
CALL(alpha_init)
@@ -87,28 +191,27 @@ Lstart1: LDGP(pv)
call_pal PAL_OSF1_wrvptptr /* clobbers a0, t0, t8-t11 */
/*
- * Switch to proc0's PCB, which is at U_PCB off of proc0paddr.
+ * Switch to proc0's PCB.
*/
- lda t0,proc0 /* get phys addr of pcb */
- ldq a0,P_MD_PCBPADDR(t0)
- call_pal PAL_OSF1_swpctx
- ldiq a0, -2
- call_pal PAL_OSF1_tbi
+ lda a0, proc0
+ ldq a0, P_MD_PCBPADDR(a0) /* phys addr of PCB */
+ SWITCH_CONTEXT
/*
- * Construct a fake trap frame, so execve() can work normally.
- * Note that setregs() is responsible for setting its contents
- * to 'reasonable' values.
+ * We've switched to a new page table base, so invalidate the TLB
+ * and I-stream. This happens automatically everywhere but here.
*/
- lda sp,-(FRAME_SIZE * 8)(sp) /* space for struct trapframe */
- mov sp, a0 /* main()'s arg is frame ptr */
- CALL(main) /* go to main()! */
+ ldiq a0, -2 /* TBIA */
+ call_pal PAL_OSF1_tbi
+ call_pal PAL_imb
/*
- * Call exception_return, to simulate return from (fake)
- * exception to user-land, running process 1, init!
+ * All ready to go! Call main()!
*/
- jmp zero, exception_return /* "And that's all she wrote." */
+ CALL(main)
+
+ /* This should never happen. */
+ PANIC("main() returned",Lmain_returned_pmsg)
END(__start)
/**************************************************************************/
@@ -129,14 +232,34 @@ Lstart1: LDGP(pv)
/**************************************************************************/
- .data
-EXPORT(cold)
- .long 1 /* cold start flag (.long -> _4_ bytes) */
- .align 3
-EXPORT(esym)
- .quad /* store end of kernel symbol table here */
- .text
+/**************************************************************************/
+
+#if defined(MULTIPROCESSOR)
+/*
+ * Pull in the multiprocssor glue.
+ */
+#include <alpha/alpha/multiproc.s>
+#endif /* MULTIPROCESSOR */
+
+/**************************************************************************/
+
+/**************************************************************************/
+
+#if defined(DDB)
+/*
+ * Pull in debugger glue.
+ */
+#include <alpha/alpha/debug.s>
+#endif /* DDB */
+
+/**************************************************************************/
+
+/**************************************************************************/
+ .text
+.stabs __FILE__,132,0,0,backtolocore1 /* done with includes */
+.loc 1 __LINE__
+backtolocore1:
/**************************************************************************/
/*
@@ -194,49 +317,59 @@ XNESTED(netbsd_esigcode,0)
*/
BSS(ssir, 8)
-IMPORT(astpending, 8)
LEAF(exception_return, 1) /* XXX should be NESTED */
- br pv, Ler1
-Ler1: LDGP(pv)
+ br pv, 1f
+1: LDGP(pv)
+
+#if defined(MULTIPROCESSOR)
+ /* XXX XXX XXX */
+ /*
+ * Check the current processor ID. If we're not the primary
+ * CPU, then just restore registers and bail out.
+ */
+ call_pal PAL_OSF1_whami
+ lda t0, hwrpb
+ ldq t0, 0(t0)
+ ldq t1, RPB_PRIMARY_CPU_ID(t0)
+ cmpeq t1, v0, t0
+ beq t0, 4f /* == 0: bail out now */
+#endif
ldq s1, (FRAME_PS * 8)(sp) /* get the saved PS */
and s1, ALPHA_PSL_IPL_MASK, t0 /* look at the saved IPL */
- bne t0, Lrestoreregs /* != 0: can't do AST or SIR */
+ bne t0, 4f /* != 0: can't do AST or SIR */
/* see if we can do an SIR */
- ldq t1, ssir /* SIR pending? */
- beq t1, Lchkast /* no, try an AST*/
+2: ldq t1, ssir /* SIR pending? */
+ bne t1, 5f /* yes */
+ /* no */
- /* We've got a SIR. */
- CALL(do_sir) /* do the SIR; lowers IPL */
-
-Lchkast:
- ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero*/
- call_pal PAL_OSF1_swpipl
+ /* check for AST */
+3: and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */
+ beq t0, 4f /* no: just return */
+ /* yes */
- and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */
- beq t0, Lrestoreregs /* no: just return */
+ /* GET_CPUINFO clobbers v0, t0, t8...t11. */
+ GET_CPUINFO
+ ldq t2, CPU_INFO_ASTPENDING(v0) /* AST pending? */
+ bne t2, 6f /* yes */
+ /* no: return & deal with FP */
- ldq t2, astpending /* AST pending? */
- beq t2, Lsetfpenable /* no: return & deal with FP */
-
- /* We've got an AST. Handle it. */
- mov sp, a0 /* only arg is frame */
- CALL(ast)
-
-Lsetfpenable:
- /* enable FPU based on whether the current proc is fpcurproc */
- ldq t0, curproc
- ldq t1, fpcurproc
- cmpeq t0, t1, t0
+ /*
+ * We are going back to usermode. Enable the FPU based on whether
+ * the current proc is fpcurproc. v0 already contains the cpu_info
+ * pointer from above.
+ */
+ ldq t1, CPU_INFO_CURPROC(v0)
+ ldq t2, CPU_INFO_FPCURPROC(v0)
+ cmpeq t1, t2, t1
mov zero, a0
- cmovne t0, 1, a0
+ cmovne t1, 1, a0
call_pal PAL_OSF1_wrfen
-Lrestoreregs:
/* restore the registers, and return */
- bsr ra, exception_restore_regs /* jmp/CALL trashes pv/t12 */
+4: bsr ra, exception_restore_regs /* jmp/CALL trashes pv/t12 */
ldq ra,(FRAME_RA*8)(sp)
.set noat
ldq at_reg,(FRAME_AT*8)(sp)
@@ -244,6 +377,32 @@ Lrestoreregs:
lda sp,(FRAME_SW_SIZE*8)(sp)
call_pal PAL_OSF1_rti
.set at
+ /* NOTREACHED */
+
+ /* We've got a SIR */
+5: ldiq a0, ALPHA_PSL_IPL_SOFT
+ call_pal PAL_OSF1_swpipl
+ mov v0, s2 /* remember old IPL */
+ CALL(do_sir)
+
+ /* SIR handled; restore IPL and check again */
+ mov s2, a0
+ call_pal PAL_OSF1_swpipl
+ br 2b
+
+ /* We've got an AST */
+6: ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero */
+ call_pal PAL_OSF1_swpipl
+ mov v0, s2 /* remember old IPL */
+
+ mov sp, a0 /* only arg is frame */
+ CALL(ast)
+
+ /* AST handled; restore IPL and check again */
+ mov s2, a0
+ call_pal PAL_OSF1_swpipl
+ br 3b
+
END(exception_return)
LEAF(exception_save_regs, 0)
@@ -309,17 +468,11 @@ LEAF(exception_restore_regs, 0)
* System arithmetic trap entry point.
*/
-LEAF(XentArith, 2) /* XXX should be NESTED */
- .set noat
- lda sp,-(FRAME_SW_SIZE*8)(sp)
- stq at_reg,(FRAME_AT*8)(sp)
- .set at
- stq ra,(FRAME_RA*8)(sp)
- bsr ra, exception_save_regs /* jmp/CALL trashes pv/t12 */
+ PALVECT(XentArith) /* setup frame, save registers */
/* a0, a1, & a2 already set up */
ldiq a3, ALPHA_KENTRY_ARITH
- mov sp, a4
+ mov sp, a4 ; .loc 1 __LINE__
CALL(trap)
jmp zero, exception_return
@@ -332,20 +485,13 @@ LEAF(XentArith, 2) /* XXX should be NESTED */
* System instruction fault trap entry point.
*/
-LEAF(XentIF, 1) /* XXX should be NESTED */
- .set noat
- lda sp,-(FRAME_SW_SIZE*8)(sp)
- stq at_reg,(FRAME_AT*8)(sp)
- .set at
- stq ra,(FRAME_RA*8)(sp)
- bsr ra, exception_save_regs /* jmp/CALL trashes pv/t12 */
+ PALVECT(XentIF) /* setup frame, save registers */
/* a0, a1, & a2 already set up */
ldiq a3, ALPHA_KENTRY_IF
- mov sp, a4
+ mov sp, a4 ; .loc 1 __LINE__
CALL(trap)
-
- jmp zero, exception_return
+ jmp zero, exception_return
END(XentIF)
/**************************************************************************/
@@ -355,18 +501,11 @@ LEAF(XentIF, 1) /* XXX should be NESTED */
* System interrupt entry point.
*/
-LEAF(XentInt, 2) /* XXX should be NESTED */
- .set noat
- lda sp,-(FRAME_SW_SIZE*8)(sp)
- stq at_reg,(FRAME_AT*8)(sp)
- .set at
- stq ra,(FRAME_RA*8)(sp)
- bsr ra, exception_save_regs /* jmp/CALL trashes pv/t12 */
+ PALVECT(XentInt) /* setup frame, save registers */
/* a0, a1, & a2 already set up */
- mov sp, a3
+ mov sp, a3 ; .loc 1 __LINE__
CALL(interrupt)
-
jmp zero, exception_return
END(XentInt)
@@ -377,17 +516,11 @@ LEAF(XentInt, 2) /* XXX should be NESTED */
* System memory management fault entry point.
*/
-LEAF(XentMM, 3) /* XXX should be NESTED */
- .set noat
- lda sp,-(FRAME_SW_SIZE*8)(sp)
- stq at_reg,(FRAME_AT*8)(sp)
- .set at
- stq ra,(FRAME_RA*8)(sp)
- bsr ra, exception_save_regs /* jmp/CALL trashes pv/t12 */
+ PALVECT(XentMM) /* setup frame, save registers */
/* a0, a1, & a2 already set up */
ldiq a3, ALPHA_KENTRY_MM
- mov sp, a4
+ mov sp, a4 ; .loc 1 __LINE__
CALL(trap)
jmp zero, exception_return
@@ -400,8 +533,8 @@ LEAF(XentMM, 3) /* XXX should be NESTED */
* System call entry point.
*/
-LEAF(XentSys, 0) /* XXX should be NESTED */
- lda sp,-(FRAME_SW_SIZE*8)(sp)
+ ESETUP(XentSys) ; .loc 1 __LINE__
+
stq v0,(FRAME_V0*8)(sp) /* in case we need to restart */
stq s0,(FRAME_S0*8)(sp)
stq s1,(FRAME_S1*8)(sp)
@@ -420,7 +553,7 @@ LEAF(XentSys, 0) /* XXX should be NESTED */
/* syscall number, passed in v0, is first arg, frame pointer second */
mov v0,a0
- mov sp,a1
+ mov sp,a1 ; .loc 1 __LINE__
CALL(syscall)
jmp zero, exception_return
@@ -443,7 +576,7 @@ LEAF(XentUna, 3) /* XXX should be NESTED */
/* a0, a1, & a2 already set up */
ldiq a3, ALPHA_KENTRY_UNA
- mov sp, a4
+ mov sp, a4 ; .loc 1 __LINE__
CALL(trap)
jmp zero, exception_return
@@ -554,7 +687,9 @@ LEAF(restorefpstate, 1)
ldt $f25, (25 * 8)(t1)
ldt $f26, (26 * 8)(t1)
ldt $f27, (27 * 8)(t1)
+ .set noat
ldt $f28, (28 * 8)(t1)
+ .set at
ldt $f29, (29 * 8)(t1)
ldt $f30, (30 * 8)(t1)
@@ -569,10 +704,10 @@ LEAF(restorefpstate, 1)
* Note that savectx() only works for processes other than curproc,
* since cpu_switch will copy over the info saved here. (It _can_
* sanely be used for curproc iff cpu_switch won't be called again, e.g.
- * from if called from boot().)
+ * if called from boot().)
*
* Arguments:
- * a0 'struct pcb *' of the process that needs its context saved
+ * a0 'struct user *' of the process that needs its context saved
*
* Return:
* v0 0. (note that for child processes, it seems
@@ -581,19 +716,19 @@ LEAF(restorefpstate, 1)
*/
LEAF(savectx, 1)
- br pv, Lsavectx1
-Lsavectx1: LDGP(pv)
- stq sp, PCB_HWPCB_KSP(a0) /* store sp */
- stq s0, PCB_CONTEXT+(0 * 8)(a0) /* store s0 - s6 */
- stq s1, PCB_CONTEXT+(1 * 8)(a0)
- stq s2, PCB_CONTEXT+(2 * 8)(a0)
- stq s3, PCB_CONTEXT+(3 * 8)(a0)
- stq s4, PCB_CONTEXT+(4 * 8)(a0)
- stq s5, PCB_CONTEXT+(5 * 8)(a0)
- stq s6, PCB_CONTEXT+(6 * 8)(a0)
- stq ra, PCB_CONTEXT+(7 * 8)(a0) /* store ra */
+ br pv, 1f
+1: LDGP(pv)
+ stq sp, U_PCB_HWPCB_KSP(a0) /* store sp */
+ stq s0, U_PCB_CONTEXT+(0 * 8)(a0) /* store s0 - s6 */
+ stq s1, U_PCB_CONTEXT+(1 * 8)(a0)
+ stq s2, U_PCB_CONTEXT+(2 * 8)(a0)
+ stq s3, U_PCB_CONTEXT+(3 * 8)(a0)
+ stq s4, U_PCB_CONTEXT+(4 * 8)(a0)
+ stq s5, U_PCB_CONTEXT+(5 * 8)(a0)
+ stq s6, U_PCB_CONTEXT+(6 * 8)(a0)
+ stq ra, U_PCB_CONTEXT+(7 * 8)(a0) /* store ra */
call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */
- stq v0, PCB_CONTEXT+(8 * 8)(a0) /* store ps, for ipl */
+ stq v0, U_PCB_CONTEXT+(8 * 8)(a0) /* store ps, for ipl */
mov zero, v0
RET
@@ -601,11 +736,7 @@ Lsavectx1: LDGP(pv)
/**************************************************************************/
-BSS(curpcb, 8)
-
IMPORT(whichqs, 4)
-IMPORT(want_resched, 8)
-IMPORT(Lev1map, 8)
/*
* When no processes are on the runq, cpu_switch branches to idle
@@ -614,64 +745,68 @@ IMPORT(Lev1map, 8)
* profiling.
*/
LEAF(idle, 0)
- br pv, Lidle1
-Lidle1: LDGP(pv)
- stq zero, curproc /* curproc <- NULL for stats */
+ br pv, 1f
+1: LDGP(pv)
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ stq zero, 0(v0) /* curproc <- NULL for stats */
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+ CALL(sched_unlock_idle) /* release sched_lock */
+#endif
mov zero, a0 /* enable all interrupts */
call_pal PAL_OSF1_swpipl
-Lidle2:
- ldl t0, whichqs /* look for non-empty queue */
- beq t0, Lidle2
+2: ldl t0, whichqs /* look for non-empty queue */
+ beq t0, 2b
ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
call_pal PAL_OSF1_swpipl
- jmp zero, sw1 /* jump back into the fray */
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+ CALL(sched_lock_idle) /* acquire sched_lock */
+#endif
+ jmp zero, cpu_switch_queuescan /* jump back into the fire */
END(idle)
/*
* cpu_switch()
* Find the highest priority process and resume it.
- * XXX should optimiize, and not do the switch if switching to curproc
*/
LEAF(cpu_switch, 0)
LDGP(pv)
- /* do an inline savectx(), to save old context */
- ldq a0, curproc
+ /*
+ * do an inline savectx(), to save old context
+ * Note: GET_CURPROC clobbers v0, t0, t8...t11.
+ */
+ GET_CURPROC
+ ldq a0, 0(v0)
ldq a1, P_ADDR(a0)
/* NOTE: ksp is stored by the swpctx */
- stq s0, U_PCB+PCB_CONTEXT+(0 * 8)(a1) /* store s0 - s6 */
- stq s1, U_PCB+PCB_CONTEXT+(1 * 8)(a1)
- stq s2, U_PCB+PCB_CONTEXT+(2 * 8)(a1)
- stq s3, U_PCB+PCB_CONTEXT+(3 * 8)(a1)
- stq s4, U_PCB+PCB_CONTEXT+(4 * 8)(a1)
- stq s5, U_PCB+PCB_CONTEXT+(5 * 8)(a1)
- stq s6, U_PCB+PCB_CONTEXT+(6 * 8)(a1)
- stq ra, U_PCB+PCB_CONTEXT+(7 * 8)(a1) /* store ra */
+ stq s0, U_PCB_CONTEXT+(0 * 8)(a1) /* store s0 - s6 */
+ stq s1, U_PCB_CONTEXT+(1 * 8)(a1)
+ stq s2, U_PCB_CONTEXT+(2 * 8)(a1)
+ stq s3, U_PCB_CONTEXT+(3 * 8)(a1)
+ stq s4, U_PCB_CONTEXT+(4 * 8)(a1)
+ stq s5, U_PCB_CONTEXT+(5 * 8)(a1)
+ stq s6, U_PCB_CONTEXT+(6 * 8)(a1)
+ stq ra, U_PCB_CONTEXT+(7 * 8)(a1) /* store ra */
call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */
- stq v0, U_PCB+PCB_CONTEXT+(8 * 8)(a1) /* store ps, for ipl */
+ stq v0, U_PCB_CONTEXT+(8 * 8)(a1) /* store ps, for ipl */
mov a0, s0 /* save old curproc */
mov a1, s1 /* save old U-area */
-
- ldl t0, whichqs /* look for non-empty queue */
- beq t0, idle /* and if none, go idle */
-
- ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
- call_pal PAL_OSF1_swpipl
-sw1:
- br pv, Lcs1
-Lcs1: LDGP(pv)
- ldl t0, whichqs /* look for non-empty queue */
+
+cpu_switch_queuescan:
+ br pv, 1f
+1: LDGP(pv)
+ ldl t0, whichqs /* look for non-empty queue */
beq t0, idle /* and if none, go idle */
mov t0, t3 /* t3 = saved whichqs */
mov zero, t2 /* t2 = lowest bit set */
- blbs t0, Lcs3 /* if low bit set, done! */
+ blbs t0, 3f /* if low bit set, done! */
-Lcs2: srl t0, 1, t0 /* try next bit */
+2: srl t0, 1, t0 /* try next bit */
addq t2, 1, t2
- blbc t0, Lcs2 /* if clear, try again */
+ blbc t0, 2b /* if clear, try again */
-Lcs3:
- /*
+3: /*
* Remove process from queue
*/
lda t1, qs /* get queues */
@@ -679,92 +814,123 @@ Lcs3:
addq t1, t0, t0 /* t0 = qp = &qs[firstbit] */
ldq t4, PH_LINK(t0) /* t4 = p = highest pri proc */
- ldq t5, P_FORW(t4) /* t5 = p->p_forw */
- bne t4, Lcs4 /* make sure p != NULL */
+ bne t4, 4f /* make sure p != NULL */
PANIC("cpu_switch",Lcpu_switch_pmsg) /* nothing in queue! */
-Lcs4:
+4:
+ ldq t5, P_FORW(t4) /* t5 = p->p_forw */
stq t5, PH_LINK(t0) /* qp->ph_link = p->p_forw */
stq t0, P_BACK(t5) /* p->p_forw->p_back = qp */
stq zero, P_BACK(t4) /* firewall: p->p_back = NULL */
cmpeq t0, t5, t0 /* see if queue is empty */
- beq t0, Lcs5 /* nope, it's not! */
+ beq t0, 5f /* nope, it's not! */
ldiq t0, 1 /* compute bit in whichqs */
sll t0, t2, t0
xor t3, t0, t3 /* clear bit in whichqs */
stl t3, whichqs
-Lcs5:
+5:
+ mov t4, s2 /* save new proc */
+ ldq s3, P_MD_PCBPADDR(s2) /* save new pcbpaddr */
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
/*
- * Switch to the new context
+ * Done mucking with the run queues, release the
+ * scheduler lock, but keep interrupts out.
*/
+ CALL(sched_unlock_idle)
+#endif
- /* mark the new curproc, and other globals */
- stq zero, want_resched /* we've rescheduled */
- /* XXX should allocate an ASN, rather than just flushing */
- stq t4, curproc /* curproc = p */
- ldq t5, P_MD_PCBPADDR(t4) /* t5 = p->p_md.md_pcbpaddr */
- stq t5, curpcb /* and store it in curpcb */
-
-#ifndef NEW_PMAP
/*
- * Do the context swap, and invalidate old TLB entries (XXX).
- * XXX should do the ASN thing, and therefore not have to invalidate.
+ * Check to see if we're switching to ourself. If we are,
+ * don't bother loading the new context.
+ *
+ * Note that even if we re-enter cpu_switch() from idle(),
+ * s0 will still contain the old curproc value because any
+ * users of that register between then and now must have
+ * saved it. Also note that switch_exit() ensures that
+ * s0 is clear before jumping here to find a new process.
*/
- ldq t2, P_VMSPACE(t4) /* t2 = p->p_vmspace */
- ldq t2, VM_PMAP_STPTE(t2) /* = p_vmspace.vm_pmap.pm_ste */
- ldq t3, Lev1map /* and store pte into Lev1map */
- stq t2, USTP_OFFSET(t3)
- mov t5, a0 /* swap the context */
- call_pal PAL_OSF1_swpctx
- ldiq a0, -1 /* & invalidate old TLB ents */
- call_pal PAL_OSF1_tbi
+ cmpeq s0, s2, t0 /* oldproc == newproc? */
+ bne t0, 7f /* Yes! Skip! */
/*
- * Now running on the new u struct.
- * Restore registers and return.
+ * Deactivate the old address space before activating the
+ * new one. We need to do this before activating the
+ * new process's address space in the event that new
+ * process is using the same vmspace as the old. If we
+ * do this after we activate, then we might end up
+ * incorrectly marking the pmap inactive!
+ *
+ * We don't deactivate if we came here from switch_exit
+ * (old pmap no longer exists; vmspace has been freed).
+ * oldproc will be NULL in this case. We have actually
+ * taken care of calling pmap_deactivate() in cpu_exit(),
+ * before the vmspace went away.
*/
- ldq t0, curproc
- ldq t0, P_ADDR(t0)
-#else /* NEW_PMAP */
- mov t4, s2 /* save new curproc */
- mov t5, s3 /* save new pcbpaddr */
- ldq s4, P_ADDR(t4) /* load/save new U-AREA */
-
- ldq a0, P_VMSPACE(s2) /* p->p_vmspace */
- lda a1, U_PCB+PCB_HWPCB(s4) /* &hardware PCB */
- mov zero, a2
- lda a0, VM_PMAP(a0) /* &p->p_vmspace->vm_pmap */
+ beq s0, 6f
+
+ mov s0, a0 /* pmap_deactivate(oldproc) */
+ CALL(pmap_deactivate)
+
+6: /*
+ * Activate the new process's address space and perform
+ * the actual context swap.
+ */
+
+ mov s2, a0 /* pmap_activate(p) */
CALL(pmap_activate)
mov s3, a0 /* swap the context */
- call_pal PAL_OSF1_swpctx
- ldiq a0, -2 /* & invalidate old TLB ents */
- call_pal PAL_OSF1_tbi
+ SWITCH_CONTEXT
+
+7: /*
+ * Now that the switch is done, update curproc and other
+ * globals. We must do this even if switching to ourselves
+ * because we might have re-entered cpu_switch() from idle(),
+ * in which case curproc would be NULL.
+ *
+ * Note: GET_CPUINFO clobbers v0, t0, t8...t11.
+ */
+#if 0
+#ifdef __alpha_bwx__
+ ldiq t0, SONPROC /* p->p_stat = SONPROC */
+ stb t0, P_STAT(s2)
+#else
+ addq s2, P_STAT, t3 /* p->p_stat = SONPROC */
+ ldq_u t1, 0(t3)
+ ldiq t0, SONPROC
+ insbl t0, t3, t0
+ mskbl t1, t3, t1
+ or t0, t1, t0
+ stq_u t0, 0(t3)
+#endif /* __alpha_bwx__ */
+#endif
- ldq a0, P_VMSPACE(s0)
- lda a1, U_PCB+PCB_HWPCB(s1)
- mov zero, a2
- lda a0, VM_PMAP(a0)
- CALL(pmap_deactivate)
+ GET_CPUINFO
+ /* p->p_cpu initialized in fork1() for single-processor */
+#if defined(MULTIPROCESSOR)
+ stq v0, P_CPU(s2) /* p->p_cpu = curcpu() */
+#endif
+ stq s2, CPU_INFO_CURPROC(v0) /* curproc = p */
+ stq zero, CPU_INFO_WANT_RESCHED(v0) /* we've rescheduled */
/*
* Now running on the new u struct.
* Restore registers and return.
*/
- mov s4, t0
-#endif /* NEW_PMAP */
+ ldq t0, P_ADDR(s2)
+
/* NOTE: ksp is restored by the swpctx */
- ldq s0, U_PCB+PCB_CONTEXT+(0 * 8)(t0) /* restore s0 - s6 */
- ldq s1, U_PCB+PCB_CONTEXT+(1 * 8)(t0)
- ldq s2, U_PCB+PCB_CONTEXT+(2 * 8)(t0)
- ldq s3, U_PCB+PCB_CONTEXT+(3 * 8)(t0)
- ldq s4, U_PCB+PCB_CONTEXT+(4 * 8)(t0)
- ldq s5, U_PCB+PCB_CONTEXT+(5 * 8)(t0)
- ldq s6, U_PCB+PCB_CONTEXT+(6 * 8)(t0)
- ldq ra, U_PCB+PCB_CONTEXT+(7 * 8)(t0) /* restore ra */
- ldq a0, U_PCB+PCB_CONTEXT+(8 * 8)(t0) /* restore ipl */
+ ldq s0, U_PCB_CONTEXT+(0 * 8)(t0) /* restore s0 - s6 */
+ ldq s1, U_PCB_CONTEXT+(1 * 8)(t0)
+ ldq s2, U_PCB_CONTEXT+(2 * 8)(t0)
+ ldq s3, U_PCB_CONTEXT+(3 * 8)(t0)
+ ldq s4, U_PCB_CONTEXT+(4 * 8)(t0)
+ ldq s5, U_PCB_CONTEXT+(5 * 8)(t0)
+ ldq s6, U_PCB_CONTEXT+(6 * 8)(t0)
+ ldq ra, U_PCB_CONTEXT+(7 * 8)(t0) /* restore ra */
+ ldq a0, U_PCB_CONTEXT+(8 * 8)(t0) /* restore ipl */
and a0, ALPHA_PSL_IPL_MASK, a0
call_pal PAL_OSF1_swpipl
@@ -772,28 +938,31 @@ Lcs5:
RET
END(cpu_switch)
-
/*
* switch_trampoline()
*
- * Arrange for a function to be invoked neatly, after a cpu_switch().
+ * Arrange for a function to be invoked neatly, after a cpu_fork().
*
* Invokes the function specified by the s0 register with the return
- * address specified by the s1 register and with one argument, a
- * pointer to the executing process's proc structure.
+ * address specified by the s1 register and with one argument specified
+ * by the s2 register.
*/
LEAF(switch_trampoline, 0)
+#if defined(MULTIPROCESSOR)
+ CALL(proc_trampoline_mp)
+#endif
mov s0, pv
mov s1, ra
- ldq a0, curproc
+ mov s2, a0
jmp zero, (pv)
END(switch_trampoline)
/*
* switch_exit(struct proc *p)
- * Make a the named process exit. Partially switch to proc0, unmap
- * the old proc's user struct, and jump into the middle of cpu_switch
- * to switch into a few process. MUST BE CALLED AT SPLHIGH.
+ * Make a the named process exit. Partially switch to our idle thread
+ * (we don't update curproc or restore registers), and jump into the middle
+ * of cpu_switch to switch into a few process. The process reaper will
+ * free the dead process's VM resources. MUST BE CALLED AT SPLHIGH.
*/
LEAF(switch_exit, 1)
LDGP(pv)
@@ -801,47 +970,31 @@ LEAF(switch_exit, 1)
/* save the exiting proc pointer */
mov a0, s2
- /* Switch to proc0. */
- lda t4, proc0 /* t4 = &proc0 */
- ldq t5, P_MD_PCBPADDR(t4) /* t5 = p->p_md.md_pcbpaddr */
- stq t5, curpcb /* and store it in curpcb */
-
-#ifndef NEW_PMAP
- mov t4, s0
- ldq s1, P_ADDR(t4)
-#endif
-
- /*
- * Do the context swap, and invalidate old TLB entries (XXX).
- * XXX should do the ASN thing, and therefore not have to invalidate.
- */
-#ifndef NEW_PMAP
- ldq t2, P_VMSPACE(t4) /* t2 = p->p_vmspace */
- ldq t2, VM_PMAP_STPTE(t2) /* = p_vmspace.vm_pmap.pm_ste */
- ldq t3, Lev1map /* and store pte into Lev1map */
- stq t2, USTP_OFFSET(t3)
-#endif /* NEW_PMAP */
- mov t5, a0 /* swap the context */
- call_pal PAL_OSF1_swpctx
-#ifndef NEW_PMAP
- ldiq a0, -1 /* & invalidate old TLB ents */
- call_pal PAL_OSF1_tbi
-#endif /* NEW_PMAP */
+ /* Switch to our idle stack. */
+ GET_IDLE_PCB(a0) /* clobbers v0, t0, t8-t11 */
+ SWITCH_CONTEXT
/*
- * Now running as proc0, except for the value of 'curproc' and
+ * Now running as idle thread, except for the value of 'curproc' and
* the saved regs.
*/
/* Schedule the vmspace and stack to be freed. */
- mov s2, a0
+ mov s2, a0
CALL(exit2)
- /* and jump into the middle of cpu_switch. */
-#ifdef NEW_PMAP
- /* XXX XXX LOSE */
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+ CALL(sched_lock_idle) /* acquire sched_lock */
#endif
- jmp zero, sw1
+
+ /*
+ * Now jump back into the middle of cpu_switch(). Note that
+ * we must clear s0 to guarantee that the check for switching
+ * to ourselves in cpu_switch() will fail. This is safe since
+ * s0 will be restored when a new process is resumed.
+ */
+ mov zero, s0
+ jmp zero, cpu_switch_queuescan
END(switch_exit)
/**************************************************************************/
@@ -856,12 +1009,11 @@ LEAF(copystr, 4)
LDGP(pv)
mov a2, t0 /* t0 = i = len */
- bne a2, Lcopystr1 /* if (len != 0), proceed */
+ bne a2, 1f /* if (len != 0), proceed */
ldiq t1, 1 /* else bail */
- br zero, Lcopystr2
+ br zero, 2f
-Lcopystr1:
- ldq_u t1, 0(a0) /* t1 = *from */
+1: ldq_u t1, 0(a0) /* t1 = *from */
extbl t1, a0, t1
ldq_u t3, 0(a1) /* set up t2 with quad around *to */
insbl t1, a1, t2
@@ -870,70 +1022,77 @@ Lcopystr1:
stq_u t3, 0(a1) /* write out that quad */
subl a2, 1, a2 /* len-- */
- beq t1, Lcopystr2 /* if (*from == 0), bail out */
+ beq t1, 2f /* if (*from == 0), bail out */
addq a1, 1, a1 /* to++ */
addq a0, 1, a0 /* from++ */
- bne a2, Lcopystr1 /* if (len != 0) copy more */
+ bne a2, 1b /* if (len != 0) copy more */
-Lcopystr2:
- beq a3, Lcopystr3 /* if (lenp != NULL) */
+2: beq a3, 3f /* if (lenp != NULL) */
subl t0, a2, t0 /* *lenp = (i - len) */
stq t0, 0(a3)
-Lcopystr3:
- beq t1, Lcopystr4 /* *from == '\0'; leave quietly */
+3: beq t1, 4f /* *from == '\0'; leave quietly */
ldiq v0, ENAMETOOLONG /* *from != '\0'; error. */
RET
-Lcopystr4:
- mov zero, v0 /* return 0. */
+4: mov zero, v0 /* return 0. */
RET
END(copystr)
-NESTED(copyinstr, 4, 16, ra, 0, 0)
+NESTED(copyinstr, 4, 16, ra, IM_RA|IM_S0, 0)
LDGP(pv)
lda sp, -16(sp) /* set up stack frame */
stq ra, (16-8)(sp) /* save ra */
+ stq s0, (16-16)(sp) /* save s0 */
ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, copyerr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, s0
lda v0, copyerr /* set up fault handler. */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(s0)
ldq at_reg, P_ADDR(at_reg)
- stq v0, U_PCB+PCB_ONFAULT(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
.set at
CALL(copystr) /* do the copy. */
.set noat
- ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, 0(s0) /* kill the fault handler. */
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
ldq ra, (16-8)(sp) /* restore ra. */
+ ldq s0, (16-16)(sp) /* restore s0. */
lda sp, 16(sp) /* kill stack frame. */
RET /* v0 left over from copystr */
END(copyinstr)
-NESTED(copyoutstr, 4, 16, ra, 0, 0)
+NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0)
LDGP(pv)
lda sp, -16(sp) /* set up stack frame */
stq ra, (16-8)(sp) /* save ra */
+ stq s0, (16-16)(sp) /* save s0 */
ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */
cmpult a1, t0, t1 /* is in user space. */
beq t1, copyerr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, s0
lda v0, copyerr /* set up fault handler. */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(s0)
ldq at_reg, P_ADDR(at_reg)
- stq v0, U_PCB+PCB_ONFAULT(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
.set at
CALL(copystr) /* do the copy. */
.set noat
- ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, 0(s0) /* kill the fault handler. */
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
ldq ra, (16-8)(sp) /* restore ra. */
+ ldq s0, (16-16)(sp) /* restore s0. */
lda sp, 16(sp) /* kill stack frame. */
RET /* v0 left over from copystr */
END(copyoutstr)
@@ -941,18 +1100,15 @@ NESTED(copyoutstr, 4, 16, ra, 0, 0)
/*
* Copy a bytes within the kernel's address space.
*
- * In the kernel, bcopy() doesn't have to handle the overlapping
- * case; that's that ovbcopy() is for. However, it doesn't hurt
- * to do both in bcopy, and it does provide a measure of safety.
+ * Although bcopy() is not specified to handle overlapping regions,
+ * this version does do so.
*
- * void memcpy(char *to, char*from, size_t len);
* void bcopy(char *from, char *to, size_t len);
- * void ovbcopy(char *from, char *to, size_t len);
*/
LEAF(memcpy,3)
- cmoveq zero,a0,t5
- cmoveq zero,a1,a0
- cmoveq zero,t5,a1
+ cmoveq zero,a0,t5
+ cmoveq zero,a1,a0
+ cmoveq zero,t5,a1
XLEAF(bcopy,3)
XLEAF(ovbcopy,3)
@@ -1178,51 +1334,116 @@ bcopy_ov_short:
END(bcopy)
-NESTED(copyin, 3, 16, ra, 0, 0)
+/*
+ * kcopy(const void *src, void *dst, size_t len);
+ *
+ * Copy len bytes from src to dst, aborting if we encounter a fatal
+ * page fault.
+ *
+ * kcopy() _must_ save and restore the old fault handler since it is
+ * called by uiomove(), which may be in the path of servicing a non-fatal
+ * page fault.
+ */
+NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0)
+ LDGP(pv)
+ lda sp, -32(sp) /* set up stack frame */
+ stq ra, (32-8)(sp) /* save ra */
+ stq s0, (32-16)(sp) /* save s0 */
+ stq s1, (32-24)(sp) /* save s1 */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, s1
+ lda v0, kcopyerr /* set up fault handler. */
+ .set noat
+ ldq at_reg, 0(s1)
+ ldq at_reg, P_ADDR(at_reg)
+ ldq s0, U_PCB_ONFAULT(at_reg) /* save old handler. */
+ stq v0, U_PCB_ONFAULT(at_reg)
+ .set at
+ CALL(bcopy) /* do the copy. */
+ .set noat
+ ldq at_reg, 0(s1) /* restore the old handler. */
+ ldq at_reg, P_ADDR(at_reg)
+ stq s0, U_PCB_ONFAULT(at_reg)
+ .set at
+ ldq ra, (32-8)(sp) /* restore ra. */
+ ldq s0, (32-16)(sp) /* restore s0. */
+ ldq s1, (32-24)(sp) /* restore s1. */
+ lda sp, 32(sp) /* kill stack frame. */
+ mov zero, v0 /* return 0. */
+ RET
+ END(kcopy)
+
+LEAF(kcopyerr, 0)
+ LDGP(pv)
+ .set noat
+ ldq at_reg, 0(s1) /* restore the old handler. */
+ ldq at_reg, P_ADDR(at_reg)
+ stq s0, U_PCB_ONFAULT(at_reg)
+ .set at
+ ldq ra, (32-8)(sp) /* restore ra. */
+ ldq s0, (32-16)(sp) /* restore s0. */
+ ldq s1, (32-24)(sp) /* restore s1. */
+ lda sp, 32(sp) /* kill stack frame. */
+ ldiq v0, EFAULT /* return EFAULT. */
+ RET
+END(kcopyerr)
+
+NESTED(copyin, 3, 16, ra, IM_RA|IM_S0, 0)
LDGP(pv)
lda sp, -16(sp) /* set up stack frame */
stq ra, (16-8)(sp) /* save ra */
+ stq s0, (16-16)(sp) /* save s0 */
ldiq t0, VM_MAX_ADDRESS /* make sure that src addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, copyerr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, s0
lda v0, copyerr /* set up fault handler. */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(s0)
ldq at_reg, P_ADDR(at_reg)
- stq v0, U_PCB+PCB_ONFAULT(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
.set at
CALL(bcopy) /* do the copy. */
.set noat
- ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, 0(s0) /* kill the fault handler. */
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
ldq ra, (16-8)(sp) /* restore ra. */
+ ldq s0, (16-16)(sp) /* restore s0. */
lda sp, 16(sp) /* kill stack frame. */
mov zero, v0 /* return 0. */
RET
END(copyin)
-NESTED(copyout, 3, 16, ra, 0, 0)
+NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0)
LDGP(pv)
lda sp, -16(sp) /* set up stack frame */
stq ra, (16-8)(sp) /* save ra */
+ stq s0, (16-16)(sp) /* save s0 */
ldiq t0, VM_MAX_ADDRESS /* make sure that dest addr */
cmpult a1, t0, t1 /* is in user space. */
beq t1, copyerr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, s0
lda v0, copyerr /* set up fault handler. */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(s0)
ldq at_reg, P_ADDR(at_reg)
- stq v0, U_PCB+PCB_ONFAULT(at_reg)
+ stq v0, U_PCB_ONFAULT(at_reg)
.set at
CALL(bcopy) /* do the copy. */
.set noat
- ldq at_reg, curproc /* kill the fault handler. */
+ ldq at_reg, 0(s0) /* kill the fault handler. */
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
ldq ra, (16-8)(sp) /* restore ra. */
+ ldq s0, (16-16)(sp) /* restore s0. */
lda sp, 16(sp) /* kill stack frame. */
mov zero, v0 /* return 0. */
RET
@@ -1231,6 +1452,7 @@ NESTED(copyout, 3, 16, ra, 0, 0)
LEAF(copyerr, 0)
LDGP(pv)
ldq ra, (16-8)(sp) /* restore ra. */
+ ldq s0, (16-16)(sp) /* restore s0. */
lda sp, 16(sp) /* kill stack frame. */
ldiq v0, EFAULT /* return EFAULT. */
RET
@@ -1244,25 +1466,27 @@ END(copyerr)
* {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
* user data space.
*/
-#ifdef notdef
LEAF(fuword, 1)
XLEAF(fuiword, 1)
LDGP(pv)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
.set at
ldq v0, 0(a0)
zap v0, 0xf0, v0
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
RET
END(fuword)
@@ -1273,17 +1497,20 @@ XLEAF(fuisword, 1)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
.set at
/* XXX FETCH IT */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
RET
END(fusword)
@@ -1294,38 +1521,43 @@ XLEAF(fuibyte, 1)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
.set at
/* XXX FETCH IT */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
RET
END(fubyte)
-#endif /* notdef */
LEAF(suword, 2)
LDGP(pv)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
.set at
- stq a1, 0(a0) /* do the wtore. */
+ stq a1, 0(a0) /* do the store. */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
mov zero, v0
RET
@@ -1337,17 +1569,20 @@ LEAF(suiword, 2)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
.set at
/* XXX STORE IT */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
call_pal PAL_OSF1_imb /* sync instruction stream */
mov zero, v0
@@ -1359,17 +1594,20 @@ LEAF(susword, 2)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
.set at
/* XXX STORE IT */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
mov zero, v0
RET
@@ -1380,17 +1618,20 @@ LEAF(suisword, 2)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
.set at
/* XXX STORE IT */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
call_pal PAL_OSF1_imb /* sync instruction stream */
mov zero, v0
@@ -1403,11 +1644,14 @@ LEAF(subyte, 2)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
.set at
zap a1, 0xfe, a1 /* kill arg's high bytes */
insbl a1, a0, a1 /* move it to the right byte */
@@ -1416,9 +1660,9 @@ LEAF(subyte, 2)
or t0, a1, a1 /* put the result together */
stq_u a1, 0(a0) /* and store it. */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
mov zero, v0
RET
@@ -1429,11 +1673,14 @@ LEAF(suibyte, 2)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
.set at
zap a1, 0xfe, a1 /* kill arg's high bytes */
insbl a1, a0, a1 /* move it to the right byte */
@@ -1442,9 +1689,9 @@ LEAF(suibyte, 2)
or t0, a1, a1 /* put the result together */
stq_u a1, 0(a0) /* and store it. */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
call_pal PAL_OSF1_imb /* sync instruction stream */
mov zero, v0
@@ -1471,18 +1718,21 @@ LEAF(fuswintr, 2)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswintrberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswintrberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
- stq a0, U_PCB+PCB_ACCESSADDR(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
+ stq a0, U_PCB_ACCESSADDR(at_reg)
.set at
/* XXX FETCH IT */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
RET
END(fuswintr)
@@ -1492,18 +1742,21 @@ LEAF(suswintr, 2)
ldiq t0, VM_MAX_ADDRESS /* make sure that addr */
cmpult a0, t0, t1 /* is in user space. */
beq t1, fswintrberr /* if it's not, error out. */
+ /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
+ GET_CURPROC
+ mov v0, t1
lda t0, fswintrberr
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq t0, U_PCB+PCB_ONFAULT(at_reg)
- stq a0, U_PCB+PCB_ACCESSADDR(at_reg)
+ stq t0, U_PCB_ONFAULT(at_reg)
+ stq a0, U_PCB_ACCESSADDR(at_reg)
.set at
/* XXX STORE IT */
.set noat
- ldq at_reg, curproc
+ ldq at_reg, 0(t1)
ldq at_reg, P_ADDR(at_reg)
- stq zero, U_PCB+PCB_ONFAULT(at_reg)
+ stq zero, U_PCB_ONFAULT(at_reg)
.set at
mov zero, v0
RET
@@ -1524,6 +1777,8 @@ XLEAF(suswintr, 2) /* XXX what is a 'word'? */
* Some bogus data, to keep vmstat happy, for now.
*/
+#include <machine/intrcnt.h>
+
.data
EXPORT(intrnames)
#ifndef EVCNT_COUNTERS
@@ -1541,26 +1796,6 @@ EXPORT(eintrcnt)
/**************************************************************************/
/*
- * Object:
- * swpctxt EXPORTED function
- *
- * Change HW process context
- *
- * Arguments:
- * pcb PHYSICAL struct pcb_hw *
- * old_ksp VIRTUAL long *
- *
- * If old_ksp is non-zero it saves the current KSP in it.
- * Execute the PAL call.
- */
-LEAF(swpctxt,2)
- beq a1,Lswpctxt1
- stq sp,0(a1)
-Lswpctxt1: call_pal PAL_OSF1_swpctx
- RET
- END(swpctxt)
-
-/*
* console 'restart' routine to be placed in HWRPB.
*/
LEAF(XentRestart, 1) /* XXX should be NESTED */
@@ -1569,6 +1804,9 @@ LEAF(XentRestart, 1) /* XXX should be NESTED */
stq at_reg,(FRAME_AT*8)(sp)
.set at
stq v0,(FRAME_V0*8)(sp)
+ stq a0,(FRAME_A0*8)(sp)
+ stq a1,(FRAME_A1*8)(sp)
+ stq a2,(FRAME_A2*8)(sp)
stq a3,(FRAME_A3*8)(sp)
stq a4,(FRAME_A4*8)(sp)
stq a5,(FRAME_A5*8)(sp)
@@ -1594,21 +1832,33 @@ LEAF(XentRestart, 1) /* XXX should be NESTED */
stq t12,(FRAME_T12*8)(sp)
stq ra,(FRAME_RA*8)(sp)
- br pv,LXconsole_restart1
-LXconsole_restart1: LDGP(pv)
+ br pv,1f
+1: LDGP(pv)
- ldq a0,(FRAME_RA*8)(sp) /* a0 = ra */
- ldq a1,(FRAME_T11*8)(sp) /* a1 = ai */
- ldq a2,(FRAME_T12*8)(sp) /* a2 = pv */
+ mov sp,a0
CALL(console_restart)
call_pal PAL_halt
END(XentRestart)
+/**************************************************************************/
+
+/*
+ * Kernel setjmp and longjmp. Rather minimalist.
+ *
+ * longjmp(label_t *a)
+ * will generate a "return (1)" from the last call to
+ * setjmp(label_t *a)
+ * by restoring registers from the stack,
+ */
+
+ .set noreorder
+
LEAF(setjmp, 1)
LDGP(pv)
- stq ra, (0 * 8)(a0)
- stq s0, (1 * 8)(a0)
+
+ stq ra, (0 * 8)(a0) /* return address */
+ stq s0, (1 * 8)(a0) /* callee-saved registers */
stq s1, (2 * 8)(a0)
stq s2, (3 * 8)(a0)
stq s3, (4 * 8)(a0)
@@ -1616,16 +1866,24 @@ LEAF(setjmp, 1)
stq s5, (6 * 8)(a0)
stq s6, (7 * 8)(a0)
stq sp, (8 * 8)(a0)
- /* We don't need to store the FP context in the kernel */
+
+ ldiq t0, 0xbeeffedadeadbabe /* set magic number */
+ stq t0, (9 * 8)(a0)
+
mov zero, v0 /* return zero */
RET
END(setjmp)
-LEAF(longjmp, 2)
+LEAF(longjmp, 1)
LDGP(pv)
- mov a1, v0
- ldq ra, (0 * 8)(a0)
- ldq s0, (1 * 8)(a0)
+
+ ldiq t0, 0xbeeffedadeadbabe /* check magic number */
+ ldq t1, (9 * 8)(a0)
+ cmpeq t0, t1, t0
+ beq t0, longjmp_botch /* if bad, punt */
+
+ ldq ra, (0 * 8)(a0) /* return address */
+ ldq s0, (1 * 8)(a0) /* callee-saved registers */
ldq s1, (2 * 8)(a0)
ldq s2, (3 * 8)(a0)
ldq s3, (4 * 8)(a0)
@@ -1633,5 +1891,35 @@ LEAF(longjmp, 2)
ldq s5, (6 * 8)(a0)
ldq s6, (7 * 8)(a0)
ldq sp, (8 * 8)(a0)
+
+ ldiq v0, 1
RET
+
+longjmp_botch:
+ lda a0, longjmp_botchmsg
+ mov ra, a1
+ CALL(panic)
+ call_pal PAL_bugchk
+
+ .data
+longjmp_botchmsg:
+ .asciz "longjmp botch from %p"
+ .text
END(longjmp)
+
+#if 0
+NESTED(transfer_check,0,0,ra,0,0)
+ CALL(U_need_2_run_config)
+ END(transfer_check)
+#endif
+
+/* Random data that shouldn't be necessary. */
+ .data
+EXPORT(cold)
+ .long 1 /* cold start flag (.long -> _4_ bytes) */
+ .align 3
+EXPORT(esym)
+ .quad /* store end of kernel symbol table here */
+
+
+/**************************************************************************/
diff --git a/sys/arch/alpha/alpha/machdep.c b/sys/arch/alpha/alpha/machdep.c
index 0403b5600a1..c37e1c91a93 100644
--- a/sys/arch/alpha/alpha/machdep.c
+++ b/sys/arch/alpha/alpha/machdep.c
@@ -1,5 +1,41 @@
-/* $OpenBSD: machdep.c,v 1.33 2000/04/11 02:44:12 pjanzen Exp $ */
-/* $NetBSD: machdep.c,v 1.61 1996/12/07 01:54:49 cgd Exp $ */
+/* $NetBSD: machdep.c,v 1.206 2000/05/23 05:12:54 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center and by Chris G. Demetriou.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -51,6 +87,7 @@
#include <sys/user.h>
#include <sys/exec.h>
#include <sys/exec_ecoff.h>
+#include <vm/vm.h>
#include <sys/sysctl.h>
#include <sys/core.h>
#include <sys/kcore.h>
@@ -68,8 +105,8 @@
#include <sys/mount.h>
#include <sys/syscallargs.h>
-#include <vm/pmap.h>
#include <vm/vm_kern.h>
+#include <uvm/uvm_extern.h>
#include <dev/cons.h>
@@ -109,12 +146,12 @@
#include "le_ioasic.h" /* for le_iomem creation */
-vm_map_t buffer_map;
-
int cpu_dump __P((void));
int cpu_dumpsize __P((void));
+u_long cpu_dump_mempagecnt __P((void));
void do_sir __P((void));
void dumpsys __P((void));
+caddr_t allocsys __P((caddr_t));
void identifycpu __P((void));
void netintr __P((void));
void regdump __P((struct trapframe *framep));
@@ -123,7 +160,6 @@ void printregs __P((struct reg *));
/*
* Declare these as initialized data so we can patch them.
*/
-int nswbuf = 0;
#ifdef NBUF
int nbuf = NBUF;
#else
@@ -134,12 +170,15 @@ int bufpages = BUFPAGES;
#else
int bufpages = 0;
#endif
+
+vm_map_t exec_map = NULL;
+vm_map_t mb_map = NULL;
+vm_map_t phys_map = NULL;
+
int maxmem; /* max memory per process */
int totalphysmem; /* total amount of physical memory in system */
int physmem; /* physical mem used by OpenBSD + some rsvd */
-int firstusablepage; /* first usable memory page */
-int lastusablepage; /* last usable memory page */
int resvmem; /* amount of memory reserved for PROM */
int unusedmem; /* amount of memory for OS that we don't use */
int unknownmem; /* amount of memory with an unknown use */
@@ -153,61 +192,169 @@ int cputype; /* system type, from the RPB */
u_int32_t no_optimize;
/* the following is used externally (sysctl_hw) */
-char machine[] = "alpha";
+char machine[] = MACHINE; /* from <machine/param.h> */
char cpu_model[128];
-const struct cpusw *cpu_fn_switch; /* function switch */
+char root_device[17];
struct user *proc0paddr;
/* Number of machine cycles per microsecond */
u_int64_t cycles_per_usec;
-/* some memory areas for device DMA. "ick." */
-caddr_t le_iomem; /* XXX iomem for LANCE DMA */
-
/* number of cpus in the box. really! */
int ncpus;
-char boot_flags[64];
-char booted_kernel[64];
+#if !defined(MULTIPROCESSOR)
+/* A single machine check info structure for single CPU configurations. */
+struct mchkinfo mchkinfo_store;
+#endif
+
+struct bootinfo_kernel bootinfo;
+
+/* For built-in TCDS */
+#if defined(DEC_3000_300) || defined(DEC_3000_500)
+u_int8_t dec_3000_scsiid[2], dec_3000_scsifast[2];
+#endif
+
+struct platform platform;
/* for cpu_sysctl() */
-char root_device[17];
int alpha_unaligned_print = 1; /* warn about unaligned accesses */
int alpha_unaligned_fix = 1; /* fix up unaligned accesses */
int alpha_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */
+/*
+ * XXX This should be dynamically sized, but we have the chicken-egg problem!
+ * XXX it should also be larger than it is, because not all of the mddt
+ * XXX clusters end up being used for VM.
+ */
+phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; /* low size bits overloaded */
+int mem_cluster_cnt;
+
void
-alpha_init(pfn, ptb, symend)
+alpha_init(pfn, ptb, bim, bip, biv)
u_long pfn; /* first free PFN number */
u_long ptb; /* PFN of current level 1 page table */
- char *symend; /* end of the symbol table */
+ u_long bim; /* bootinfo magic */
+ u_long bip; /* bootinfo pointer */
+ u_long biv; /* bootinfo version */
{
- extern char _end[];
- extern char *esym;
- caddr_t start, v;
+ extern char kernel_text[], _end[];
struct mddt *mddtp;
+ struct mddt_cluster *memc;
int i, mddtweird;
+ struct vm_physseg *vps;
+ vaddr_t kernstart, kernend;
+ paddr_t kernstartpfn, kernendpfn, pfn0, pfn1;
+ vsize_t size;
char *p;
+ caddr_t v;
+ char *bootinfo_msg;
+ const struct cpuinit *c;
+ extern caddr_t esym;
+ struct cpu_info *ci;
+ cpuid_t cpu_id;
- /* Save the symbol table end */
- esym = symend;
+ /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
/*
- * Turn off interrupts and floating point.
+ * Turn off interrupts (not mchecks) and floating point.
* Make sure the instruction and data streams are consistent.
*/
- (void)splhigh();
+ (void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
alpha_pal_wrfen(0);
ALPHA_TBIA();
alpha_pal_imb();
+ cpu_id = cpu_number();
+
+#if defined(MULTIPROCESSOR)
/*
- * get address of the restart block, while the bootstrap
- * mapping is still around.
+ * Set our SysValue to the address of our cpu_info structure.
+ * Secondary processors do this in their spinup trampoline.
*/
- hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(
- (vm_offset_t)(*(struct rpb **)HWRPB_ADDR));
+ alpha_pal_wrval((u_long)&cpu_info[cpu_id]);
+#endif
+
+ ci = curcpu();
+ ci->ci_cpuid = cpu_id;
+
+ /*
+ * Get critical system information (if possible, from the
+ * information provided by the boot program).
+ */
+ bootinfo_msg = NULL;
+ if (bim == BOOTINFO_MAGIC) {
+ if (biv == 0) { /* backward compat */
+ biv = *(u_long *)bip;
+ bip += 8;
+ }
+ switch (biv) {
+ case 1: {
+ struct bootinfo_v1 *v1p = (struct bootinfo_v1 *)bip;
+
+ bootinfo.ssym = v1p->ssym;
+ bootinfo.esym = v1p->esym;
+ /* hwrpb may not be provided by boot block in v1 */
+ if (v1p->hwrpb != NULL) {
+ bootinfo.hwrpb_phys =
+ ((struct rpb *)v1p->hwrpb)->rpb_phys;
+ bootinfo.hwrpb_size = v1p->hwrpbsize;
+ } else {
+ bootinfo.hwrpb_phys =
+ ((struct rpb *)HWRPB_ADDR)->rpb_phys;
+ bootinfo.hwrpb_size =
+ ((struct rpb *)HWRPB_ADDR)->rpb_size;
+ }
+ bcopy(v1p->boot_flags, bootinfo.boot_flags,
+ min(sizeof v1p->boot_flags,
+ sizeof bootinfo.boot_flags));
+ bcopy(v1p->booted_kernel, bootinfo.booted_kernel,
+ min(sizeof v1p->booted_kernel,
+ sizeof bootinfo.booted_kernel));
+ /* booted dev not provided in bootinfo */
+ init_prom_interface((struct rpb *)
+ ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys));
+ prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
+ sizeof bootinfo.booted_dev);
+ break;
+ }
+ default:
+ bootinfo_msg = "unknown bootinfo version";
+ goto nobootinfo;
+ }
+ } else {
+ bootinfo_msg = "boot program did not pass bootinfo";
+nobootinfo:
+ bootinfo.ssym = (u_long)_end;
+ bootinfo.esym = (u_long)_end;
+ bootinfo.hwrpb_phys = ((struct rpb *)HWRPB_ADDR)->rpb_phys;
+ bootinfo.hwrpb_size = ((struct rpb *)HWRPB_ADDR)->rpb_size;
+ init_prom_interface((struct rpb *)HWRPB_ADDR);
+ prom_getenv(PROM_E_BOOTED_OSFLAGS, bootinfo.boot_flags,
+ sizeof bootinfo.boot_flags);
+ prom_getenv(PROM_E_BOOTED_FILE, bootinfo.booted_kernel,
+ sizeof bootinfo.booted_kernel);
+ prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
+ sizeof bootinfo.booted_dev);
+ }
+
+ esym = (caddr_t)bootinfo.esym;
+ /*
+ * Initialize the kernel's mapping of the RPB. It's needed for
+ * lots of things.
+ */
+ hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys);
+
+#if defined(DEC_3000_300) || defined(DEC_3000_500)
+ if (hwrpb->rpb_type == ST_DEC_3000_300 ||
+ hwrpb->rpb_type == ST_DEC_3000_500) {
+ prom_getenv(PROM_E_SCSIID, dec_3000_scsiid,
+ sizeof(dec_3000_scsiid));
+ prom_getenv(PROM_E_SCSIFAST, dec_3000_scsifast,
+ sizeof(dec_3000_scsifast));
+ }
+#endif
/*
* Remember how many cycles there are per microsecond,
@@ -216,71 +363,231 @@ alpha_init(pfn, ptb, symend)
cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000;
/*
- * Init the PROM interface, so we can use printf
- * until PROM mappings go away in consinit.
+ * Initalize the (temporary) bootstrap console interface, so
+ * we can use printf until the VM system starts being setup.
+ * The real console is initialized before then.
*/
- init_prom_interface();
+ init_bootstrap_console();
+
+ /* OUTPUT NOW ALLOWED */
+
+ /* delayed from above */
+ if (bootinfo_msg)
+ printf("WARNING: %s (0x%lx, 0x%lx, 0x%lx)\n",
+ bootinfo_msg, bim, bip, biv);
+
+ /* Initialize the trap vectors on the primary processor. */
+ trap_init();
/*
- * Point interrupt/exception vectors to our own.
+ * Find out what hardware we're on, and do basic initialization.
*/
- alpha_pal_wrent(XentInt, ALPHA_KENTRY_INT);
- alpha_pal_wrent(XentArith, ALPHA_KENTRY_ARITH);
- alpha_pal_wrent(XentMM, ALPHA_KENTRY_MM);
- alpha_pal_wrent(XentIF, ALPHA_KENTRY_IF);
- alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA);
- alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS);
+ cputype = hwrpb->rpb_type;
+ if (cputype < 0) {
+ /*
+ * At least some white-box systems have SRM which
+ * reports a systype that's the negative of their
+ * blue-box counterpart.
+ */
+ cputype = -cputype;
+ }
+ c = platform_lookup(cputype);
+ if (c == NULL) {
+ platform_not_supported();
+ /* NOTREACHED */
+ }
+ (*c->init)();
+ strcpy(cpu_model, platform.model);
/*
- * Disable System and Processor Correctable Error reporting.
- * Clear pending machine checks and error reports, etc.
+ * Initalize the real console, so that the bootstrap console is
+ * no longer necessary.
*/
- alpha_pal_wrmces(alpha_pal_rdmces() | ALPHA_MCES_DSC | ALPHA_MCES_DPC);
+ (*platform.cons_init)();
+
+#ifdef DIAGNOSTIC
+ /* Paranoid sanity checking */
+
+ /* We should always be running on the primary. */
+ assert(hwrpb->rpb_primary_cpu_id == alpha_pal_whami());
+
+ /*
+ * On single-CPU systypes, the primary should always be CPU 0,
+ * except on Alpha 8200 systems where the CPU id is related
+ * to the VID, which is related to the Turbo Laser node id.
+ */
+ if (cputype != ST_DEC_21000)
+ assert(hwrpb->rpb_primary_cpu_id == 0);
+#endif
+
+ /* NO MORE FIRMWARE ACCESS ALLOWED */
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+ /*
+ * XXX (unless _PMAP_MAY_USE_PROM_CONSOLE is defined and
+ * XXX pmap_uses_prom_console() evaluates to non-zero.)
+ */
+#endif
+
+ /*
+ * find out this system's page size
+ */
+ PAGE_SIZE = hwrpb->rpb_page_size;
+ if (PAGE_SIZE != 8192)
+ panic("page size %d != 8192?!", PAGE_SIZE);
+
+ /*
+ * Initialize PAGE_SIZE-dependent variables.
+ */
+ uvm_setpagesize();
+
+ /*
+ * Find the beginning and end of the kernel (and leave a
+ * bit of space before the beginning for the bootstrap
+ * stack).
+ */
+ kernstart = trunc_page((vaddr_t)kernel_text) - 2 * PAGE_SIZE;
+#ifdef DDB
+ kernend = (vaddr_t)round_page((vaddr_t)bootinfo.esym);
+#else
+ kernend = (vaddr_t)round_page((vaddr_t)_end);
+#endif
+
+ kernstartpfn = atop(ALPHA_K0SEG_TO_PHYS(kernstart));
+ kernendpfn = atop(ALPHA_K0SEG_TO_PHYS(kernend));
/*
* Find out how much memory is available, by looking at
* the memory cluster descriptors. This also tries to do
* its best to detect things things that have never been seen
* before...
- *
- * XXX Assumes that the first "system" cluster is the
- * only one we can use. Is the second (etc.) system cluster
- * (if one happens to exist) guaranteed to be contiguous? or...?
*/
mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off);
- /*
- * BEGIN MDDT WEIRDNESS CHECKING
- */
+ /* MDDT SANITY CHECKING */
mddtweird = 0;
-
-#define cnt mddtp->mddt_cluster_cnt
-#define usage(n) mddtp->mddt_clusters[(n)].mddt_usage
- if (cnt != 2 && cnt != 3) {
- printf("WARNING: weird number (%ld) of mem clusters\n", cnt);
- mddtweird = 1;
- } else if (usage(0) != MDDT_PALCODE ||
- usage(1) != MDDT_SYSTEM ||
- (cnt == 3 && usage(2) != MDDT_PALCODE)) {
+ if (mddtp->mddt_cluster_cnt < 2) {
mddtweird = 1;
- printf("WARNING: %ld mem clusters, but weird config\n", cnt);
+ printf("WARNING: weird number of mem clusters: %lu\n",
+ mddtp->mddt_cluster_cnt);
}
- for (i = 0; i < cnt; i++) {
- if ((usage(i) & MDDT_mbz) != 0) {
- printf("WARNING: mem cluster %d has weird usage %lx\n",
- i, usage(i));
- mddtweird = 1;
+#if 0
+ printf("Memory cluster count: %d\n", mddtp->mddt_cluster_cnt);
+#endif
+
+ for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
+ memc = &mddtp->mddt_clusters[i];
+#if 0
+ printf("MEMC %d: pfn 0x%lx cnt 0x%lx usage 0x%lx\n", i,
+ memc->mddt_pfn, memc->mddt_pg_cnt, memc->mddt_usage);
+#endif
+ totalphysmem += memc->mddt_pg_cnt;
+ if (mem_cluster_cnt < VM_PHYSSEG_MAX) { /* XXX */
+ mem_clusters[mem_cluster_cnt].start =
+ ptoa(memc->mddt_pfn);
+ mem_clusters[mem_cluster_cnt].size =
+ ptoa(memc->mddt_pg_cnt);
+ if (memc->mddt_usage & MDDT_mbz ||
+ memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */
+ memc->mddt_usage & MDDT_PALCODE)
+ mem_clusters[mem_cluster_cnt].size |=
+ VM_PROT_READ;
+ else
+ mem_clusters[mem_cluster_cnt].size |=
+ VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
+ mem_cluster_cnt++;
}
- if (mddtp->mddt_clusters[i].mddt_pg_cnt == 0) {
- printf("WARNING: mem cluster %d has pg cnt == 0\n", i);
+
+ if (memc->mddt_usage & MDDT_mbz) {
mddtweird = 1;
+ printf("WARNING: mem cluster %d has weird "
+ "usage 0x%lx\n", i, memc->mddt_usage);
+ unknownmem += memc->mddt_pg_cnt;
+ continue;
+ }
+ if (memc->mddt_usage & MDDT_NONVOLATILE) {
+ /* XXX should handle these... */
+ printf("WARNING: skipping non-volatile mem "
+ "cluster %d\n", i);
+ unusedmem += memc->mddt_pg_cnt;
+ continue;
+ }
+ if (memc->mddt_usage & MDDT_PALCODE) {
+ resvmem += memc->mddt_pg_cnt;
+ continue;
}
- /* XXX other things to check? */
+
+ /*
+ * We have a memory cluster available for system
+ * software use. We must determine if this cluster
+ * holds the kernel.
+ */
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+ /*
+ * XXX If the kernel uses the PROM console, we only use the
+ * XXX memory after the kernel in the first system segment,
+ * XXX to avoid clobbering prom mapping, data, etc.
+ */
+ if (!pmap_uses_prom_console() || physmem == 0) {
+#endif /* _PMAP_MAY_USE_PROM_CONSOLE */
+ physmem += memc->mddt_pg_cnt;
+ pfn0 = memc->mddt_pfn;
+ pfn1 = memc->mddt_pfn + memc->mddt_pg_cnt;
+ if (pfn0 <= kernstartpfn && kernendpfn <= pfn1) {
+ /*
+ * Must compute the location of the kernel
+ * within the segment.
+ */
+#if 0
+ printf("Cluster %d contains kernel\n", i);
+#endif
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+ if (!pmap_uses_prom_console()) {
+#endif /* _PMAP_MAY_USE_PROM_CONSOLE */
+ if (pfn0 < kernstartpfn) {
+ /*
+ * There is a chunk before the kernel.
+ */
+#if 0
+ printf("Loading chunk before kernel: "
+ "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
+#endif
+ uvm_page_physload(pfn0, kernstartpfn,
+ pfn0, kernstartpfn, VM_FREELIST_DEFAULT);
+ }
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+ }
+#endif /* _PMAP_MAY_USE_PROM_CONSOLE */
+ if (kernendpfn < pfn1) {
+ /*
+ * There is a chunk after the kernel.
+ */
+#if 0
+ printf("Loading chunk after kernel: "
+ "0x%lx / 0x%lx\n", kernendpfn, pfn1);
+#endif
+ uvm_page_physload(kernendpfn, pfn1,
+ kernendpfn, pfn1, VM_FREELIST_DEFAULT);
+ }
+ } else {
+ /*
+ * Just load this cluster as one chunk.
+ */
+#if 0
+ printf("Loading cluster %d: 0x%lx / 0x%lx\n", i,
+ pfn0, pfn1);
+#endif
+ uvm_page_physload(pfn0, pfn1, pfn0, pfn1,
+ VM_FREELIST_DEFAULT);
+ }
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+ }
+#endif /* _PMAP_MAY_USE_PROM_CONSOLE */
}
-#undef cnt
-#undef usage
+ /*
+ * Dump out the MDDT if it looks odd...
+ */
if (mddtweird) {
printf("\n");
printf("complete memory cluster information:\n");
@@ -303,219 +610,81 @@ alpha_init(pfn, ptb, symend)
}
printf("\n");
}
- /*
- * END MDDT WEIRDNESS CHECKING
- */
- for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
- totalphysmem += mddtp->mddt_clusters[i].mddt_pg_cnt;
-#define usage(n) mddtp->mddt_clusters[(n)].mddt_usage
-#define pgcnt(n) mddtp->mddt_clusters[(n)].mddt_pg_cnt
- if ((usage(i) & MDDT_mbz) != 0)
- unknownmem += pgcnt(i);
- else if ((usage(i) & ~MDDT_mbz) == MDDT_PALCODE)
- resvmem += pgcnt(i);
- else if ((usage(i) & ~MDDT_mbz) == MDDT_SYSTEM) {
- /*
- * assumes that the system cluster listed is
- * one we're in...
- */
- if (physmem != resvmem) {
- physmem += pgcnt(i);
- firstusablepage =
- mddtp->mddt_clusters[i].mddt_pfn;
- lastusablepage =
- firstusablepage + pgcnt(i) - 1;
- } else
- unusedmem += pgcnt(i);
- }
-#undef usage
-#undef pgcnt
- }
if (totalphysmem == 0)
panic("can't happen: system seems to have no memory!");
maxmem = physmem;
-
#if 0
printf("totalphysmem = %d\n", totalphysmem);
printf("physmem = %d\n", physmem);
- printf("firstusablepage = %d\n", firstusablepage);
- printf("lastusablepage = %d\n", lastusablepage);
printf("resvmem = %d\n", resvmem);
printf("unusedmem = %d\n", unusedmem);
printf("unknownmem = %d\n", unknownmem);
#endif
/*
- * find out this CPU's page size
+ * Initialize error message buffer (at end of core).
*/
- PAGE_SIZE = hwrpb->rpb_page_size;
- if (PAGE_SIZE != 8192)
- panic("page size %d != 8192?!", PAGE_SIZE);
+ {
+ vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
+ vsize_t reqsz = sz;
- /*
- * Init PAGE_SIZE dependent variables in the MI VM system
- */
- vm_set_page_size();
+ vps = &vm_physmem[vm_nphysseg - 1];
- v = (caddr_t)alpha_round_page(symend ? symend : _end);
- /*
- * Init mapping for u page(s) for proc 0
- */
- start = v;
- curproc->p_addr = proc0paddr = (struct user *)v;
- v += UPAGES * NBPG;
+ /* shrink so that it'll fit in the last segment */
+ if ((vps->avail_end - vps->avail_start) < atop(sz))
+ sz = ptoa(vps->avail_end - vps->avail_start);
- /*
- * Find out what hardware we're on, and remember its type name.
- */
- cputype = hwrpb->rpb_type;
- if (cputype < 0 || cputype > ncpusw) {
-unknown_cputype:
- printf("\n");
- printf("Unknown system type %d.\n", cputype);
- printf("\n");
- panic("unknown system type");
- }
- cpu_fn_switch = &cpusw[cputype];
- if (cpu_fn_switch->family == NULL)
- goto unknown_cputype;
- if (cpu_fn_switch->option == NULL) {
- printf("\n");
- printf("OpenBSD does not currently support system type %d\n",
- cputype);
- printf("(%s family).\n", cpu_fn_switch->family);
- printf("\n");
- panic("unsupported system type");
- }
- if (!cpu_fn_switch->present) {
- printf("\n");
- printf("Support for system type %d (%s family) is\n", cputype,
- cpu_fn_switch->family);
- printf("not present in this kernel. Build a kernel with "
- "\"options %s\"\n", cpu_fn_switch->option);
- printf("to include support for this system type.\n");
- printf("\n");
- panic("support for system not present");
- }
+ vps->end -= atop(sz);
+ vps->avail_end -= atop(sz);
+ initmsgbuf((caddr_t) ALPHA_PHYS_TO_K0SEG(ptoa(vps->end)), sz);
- if ((*cpu_fn_switch->model_name)() != NULL)
- strncpy(cpu_model, (*cpu_fn_switch->model_name)(),
- sizeof cpu_model - 1);
- else {
- strncpy(cpu_model, cpu_fn_switch->family,
- sizeof cpu_model - 1);
- strcat(cpu_model, " family"); /* XXX */
- }
- cpu_model[sizeof cpu_model - 1] = '\0';
+ /* Remove the last segment if it now has no pages. */
+ if (vps->start == vps->end)
+ vm_nphysseg--;
-#if NLE_IOASIC > 0
- /*
- * Grab 128K at the top of physical memory for the lance chip
- * on machines where it does dma through the I/O ASIC.
- * It must be physically contiguous and aligned on a 128K boundary.
- *
- * Note that since this is conditional on the presence of
- * IOASIC-attached 'le' units in the kernel config, the
- * message buffer may move on these systems. This shouldn't
- * be a problem, because once people have a kernel config that
- * they use, they're going to stick with it.
- */
- if (cputype == ST_DEC_3000_500 ||
- cputype == ST_DEC_3000_300) { /* XXX possibly others? */
- lastusablepage -= btoc(128 * 1024);
- le_iomem =
- (caddr_t)ALPHA_PHYS_TO_K0SEG(ctob(lastusablepage + 1));
- }
-#endif /* NLE_IOASIC */
+ /* warn if the message buffer had to be shrunk */
+ if (sz != reqsz)
+ printf("WARNING: %ld bytes not available for msgbuf "
+ "in last cluster (%ld used)\n", reqsz, sz);
- /*
- * Initialize error message buffer (at end of core).
- */
- lastusablepage -= btoc(MSGBUFSIZE);
- printf("%lx %d\n", (caddr_t)ALPHA_PHYS_TO_K0SEG(ctob(lastusablepage + 1)),
- MSGBUFSIZE);
- initmsgbuf((caddr_t)ALPHA_PHYS_TO_K0SEG(ctob(lastusablepage + 1)),
- MSGBUFSIZE);
- printf("%lx %d\n", (caddr_t)ALPHA_PHYS_TO_K0SEG(ctob(lastusablepage + 1)),
- MSGBUFSIZE);
+ }
/*
- * Allocate space for system data structures.
- * The first available kernel virtual address is in "v".
- * As pages of kernel virtual memory are allocated, "v" is incremented.
- *
- * These data structures are allocated here instead of cpu_startup()
- * because physical memory is directly addressable. We don't have
- * to map these into virtual address space.
+ * Init mapping for u page(s) for proc 0
*/
-#define valloc(name, type, num) \
- (name) = (type *)v; v = (caddr_t)ALIGN((name)+(num))
-#define valloclim(name, type, num, lim) \
- (name) = (type *)v; v = (caddr_t)ALIGN((lim) = ((name)+(num)))
-#ifdef REAL_CLISTS
- valloc(cfree, struct cblock, nclist);
-#endif
- valloc(timeouts, struct timeout, ntimeout);
-#ifdef SYSVSHM
- valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
-#endif
-#ifdef SYSVSEM
- valloc(sema, struct semid_ds, seminfo.semmni);
- valloc(sem, struct sem, seminfo.semmns);
- /* This is pretty disgusting! */
- valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
-#endif
-#ifdef SYSVMSG
- valloc(msgpool, char, msginfo.msgmax);
- valloc(msgmaps, struct msgmap, msginfo.msgseg);
- valloc(msghdrs, struct msg, msginfo.msgtql);
- valloc(msqids, struct msqid_ds, msginfo.msgmni);
-#endif
+ proc0.p_addr = proc0paddr =
+ (struct user *)pmap_steal_memory(UPAGES * PAGE_SIZE, NULL, NULL);
/*
- * Determine how many buffers to allocate.
- * We allocate 10% of memory for buffer space. Insure a
- * minimum of 16 buffers. We allocate 1/2 as many swap buffer
- * headers as file i/o buffers.
+ * Allocate space for system data structures. These data structures
+ * are allocated here instead of cpu_startup() because physical
+ * memory is directly addressable. We don't have to map these into
+ * virtual address space.
*/
- if (bufpages == 0)
- bufpages = (physmem * 10) / (CLSIZE * 100);
- if (nbuf == 0) {
- nbuf = bufpages;
- if (nbuf < 16)
- nbuf = 16;
- }
- if (nswbuf == 0) {
- nswbuf = (nbuf / 2) &~ 1; /* force even */
- if (nswbuf > 256)
- nswbuf = 256; /* sanity */
- }
- valloc(swbuf, struct buf, nswbuf);
- valloc(buf, struct buf, nbuf);
+ size = (vsize_t)allocsys(NULL);
+ v = (caddr_t)pmap_steal_memory(size, NULL, NULL);
+ if ((allocsys(v) - v) != size)
+ panic("alpha_init: table size inconsistency");
/*
* Clear allocated memory.
*/
- bzero(start, v - start);
+ bzero(v, size);
/*
* Initialize the virtual memory system, and set the
* page table base register in proc 0's PCB.
*/
-#ifndef NEW_PMAP
- pmap_bootstrap((vm_offset_t)v, ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT));
-#else
- pmap_bootstrap((vm_offset_t)v, ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT),
- hwrpb->rpb_max_asn);
-#endif
+ pmap_bootstrap(ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT),
+ hwrpb->rpb_max_asn, hwrpb->rpb_pcs_cnt);
/*
* Initialize the rest of proc 0's PCB, and cache its physical
* address.
*/
proc0.p_md.md_pcbpaddr =
- (struct pcb *)ALPHA_K0SEG_TO_PHYS((vm_offset_t)&proc0paddr->u_pcb);
+ (struct pcb *)ALPHA_K0SEG_TO_PHYS((vaddr_t)&proc0paddr->u_pcb);
/*
* Set the kernel sp, reserving space for an (empty) trapframe,
@@ -526,29 +695,23 @@ unknown_cputype:
proc0.p_md.md_tf =
(struct trapframe *)proc0paddr->u_pcb.pcb_hw.apcb_ksp;
-#ifdef NEW_PMAP
- pmap_activate(kernel_pmap, &proc0paddr->u_pcb.pcb_hw, 0);
-#endif
+ /*
+ * Initialize the primary CPU's idle PCB to proc0's. In a
+ * MULTIPROCESSOR configuration, each CPU will later get
+ * its own idle PCB when autoconfiguration runs.
+ */
+ ci->ci_idle_pcb = &proc0paddr->u_pcb;
+ ci->ci_idle_pcb_paddr = (u_long)proc0.p_md.md_pcbpaddr;
/*
* Look at arguments passed to us and compute boothowto.
- * Also, get kernel name so it can be used in user-land.
*/
- prom_getenv(PROM_E_BOOTED_OSFLAGS, boot_flags, sizeof(boot_flags));
-#if 0
- printf("boot flags = \"%s\"\n", boot_flags);
-#endif
- prom_getenv(PROM_E_BOOTED_FILE, booted_kernel,
- sizeof(booted_kernel));
-#if 0
- printf("booted kernel = \"%s\"\n", booted_kernel);
-#endif
boothowto = RB_SINGLE;
#ifdef KADB
boothowto |= RB_KDB;
#endif
- for (p = boot_flags; p && *p != '\0'; p++) {
+ for (p = bootinfo.boot_flags; p && *p != '\0'; p++) {
/*
* Note that we'd really like to differentiate case here,
* but the Alpha AXP Architecture Reference Manual
@@ -593,9 +756,26 @@ unknown_cputype:
case 'N':
boothowto |= RB_ASKNAME;
break;
+
+ case 's': /* single-user (default, supported for sanity) */
+ case 'S':
+ boothowto |= RB_SINGLE;
+ break;
+
+ case '-':
+ /*
+ * Just ignore this. It's not required, but it's
+ * common for it to be passed regardless.
+ */
+ break;
+
+ default:
+ printf("Unrecognized boot flag '%c'.\n", *p);
+ break;
}
}
+
/*
* Figure out the number of cpus in the box, from RPB fields.
* Really. We mean it.
@@ -603,23 +783,107 @@ unknown_cputype:
for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
struct pcs *pcsp;
- pcsp = (struct pcs *)((char *)hwrpb + hwrpb->rpb_pcs_off +
- (i * hwrpb->rpb_pcs_size));
+ pcsp = LOCATE_PCS(hwrpb, i);
if ((pcsp->pcs_flags & PCS_PP) != 0)
ncpus++;
}
-}
-void
-consinit()
-{
- (*cpu_fn_switch->cons_init)();
- pmap_unmap_prom();
+ /*
+ * Initialize debuggers, and break into them if appropriate.
+ */
#ifdef DDB
ddb_init();
+
if (boothowto & RB_KDB)
Debugger();
#endif
+#ifdef KGDB
+ if (boothowto & RB_KDB)
+ kgdb_connect(0);
+#endif
+ /*
+ * Figure out our clock frequency, from RPB fields.
+ */
+ hz = hwrpb->rpb_intr_freq >> 12;
+ if (!(60 <= hz && hz <= 10240)) {
+ hz = 1024;
+#ifdef DIAGNOSTIC
+ printf("WARNING: unbelievable rpb_intr_freq: %ld (%d hz)\n",
+ hwrpb->rpb_intr_freq, hz);
+#endif
+ }
+}
+
+caddr_t
+allocsys(v)
+ caddr_t v;
+{
+ /*
+ * Allocate space for system data structures.
+ * The first available kernel virtual address is in "v".
+ * As pages of kernel virtual memory are allocated, "v" is incremented.
+ *
+ * These data structures are allocated here instead of cpu_startup()
+ * because physical memory is directly addressable. We don't have
+ * to map these into virtual address space.
+ */
+#define valloc(name, type, num) \
+ (name) = (type *)v; v = (caddr_t)ALIGN((name)+(num))
+
+#ifdef REAL_CLISTS
+ valloc(cfree, struct cblock, nclist);
+#endif
+ valloc(timeouts, struct timeout, ntimeout);
+#ifdef SYSVSHM
+ valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
+#endif
+#ifdef SYSVSEM
+ valloc(sema, struct semid_ds, seminfo.semmni);
+ valloc(sem, struct sem, seminfo.semmns);
+ /* This is pretty disgusting! */
+ valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
+#endif
+#ifdef SYSVMSG
+ valloc(msgpool, char, msginfo.msgmax);
+ valloc(msgmaps, struct msgmap, msginfo.msgseg);
+ valloc(msghdrs, struct msg, msginfo.msgtql);
+ valloc(msqids, struct msqid_ds, msginfo.msgmni);
+#endif
+
+#ifndef BUFCACHEPERCENT
+#define BUFCACHEPERCENT 10
+#endif
+ /*
+ * Determine how many buffers to allocate.
+ * We allocate 10% of memory for buffer space. Insure a
+ * minimum of 16 buffers.
+ */
+ if (bufpages == 0)
+ bufpages = (physmem / ((100/BUFCACHEPERCENT) / CLSIZE));
+ if (nbuf == 0) {
+ nbuf = bufpages;
+ if (nbuf < 16)
+ nbuf = 16;
+ }
+ valloc(buf, struct buf, nbuf);
+
+#undef valloc
+
+ return v;
+}
+
+void
+consinit()
+{
+
+ /*
+ * Everything related to console initialization is done
+ * in alpha_init().
+ */
+#if defined(DIAGNOSTIC) && defined(_PMAP_MAY_USE_PROM_CONSOLE)
+ printf("consinit: %susing prom console\n",
+ pmap_uses_prom_console() ? "" : "not ");
+#endif
}
void
@@ -627,8 +891,8 @@ cpu_startup()
{
register unsigned i;
int base, residual;
- vm_offset_t minaddr, maxaddr;
- vm_size_t size;
+ vaddr_t minaddr, maxaddr;
+ vsize_t size;
#if defined(DEBUG)
extern int pmapdebug;
int opmapdebug = pmapdebug;
@@ -641,13 +905,15 @@ cpu_startup()
*/
printf(version);
identifycpu();
- printf("real mem = %d (%d reserved for PROM, %d used by OpenBSD)\n",
- ctob(totalphysmem), ctob(resvmem), ctob(physmem));
- if (unusedmem)
- printf("WARNING: unused memory = %d bytes\n", ctob(unusedmem));
- if (unknownmem)
- printf("WARNING: %d bytes of memory with unknown purpose\n",
- ctob(unknownmem));
+ printf("total memory = %d\n", ptoa(totalphysmem));
+ printf("(%d reserved for PROM, ", ptoa(resvmem));
+ printf("%d used by OpenBSD)\n", ptoa(physmem));
+ if (unusedmem) {
+ printf("WARNING: unused memory = %d\n", ptoa(unusedmem));
+ }
+ if (unknownmem) {
+ printf("WARNING: %d of memory with unknown purpose\n", ptoa(unknownmem));
+ }
/*
* Allocate virtual address space for file I/O buffers.
@@ -655,52 +921,61 @@ cpu_startup()
* and usually occupy more virtual memory than physical.
*/
size = MAXBSIZE * nbuf;
- buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
- &maxaddr, size, TRUE);
- minaddr = (vm_offset_t)buffers;
- if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
- &minaddr, size, FALSE) != KERN_SUCCESS)
- panic("startup: cannot allocate buffers");
+ if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
+ NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
+ panic("startup: cannot allocate VM for buffers");
base = bufpages / nbuf;
residual = bufpages % nbuf;
for (i = 0; i < nbuf; i++) {
- vm_size_t curbufsize;
- vm_offset_t curbuf;
+ vsize_t curbufsize;
+ vaddr_t curbuf;
+ struct vm_page *pg;
/*
- * First <residual> buffers get (base+1) physical pages
- * allocated for them. The rest get (base) physical pages.
- *
- * The rest of each buffer occupies virtual space,
- * but has no physical memory allocated for it.
+ * Each buffer has MAXBSIZE bytes of VM space allocated. Of
+ * that MAXBSIZE space, we allocate and map (base+1) pages
+ * for the first "residual" buffers, and then we allocate
+ * "base" pages for the rest.
*/
- curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
- curbufsize = CLBYTES * (i < residual ? base+1 : base);
- vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
- vm_map_simplify(buffer_map, curbuf);
+ curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
+ curbufsize = NBPG * ((i < residual) ? (base+1) : base);
+
+ while (curbufsize) {
+ pg = uvm_pagealloc(NULL, 0, NULL, 0);
+ if (pg == NULL)
+ panic("cpu_startup: not enough memory for "
+ "buffer cache");
+ pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
+ VM_PROT_READ|VM_PROT_WRITE);
+ curbuf += PAGE_SIZE;
+ curbufsize -= PAGE_SIZE;
+ }
}
/*
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
*/
- exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- 16 * NCARGS, TRUE);
+ exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
/*
* Allocate a submap for physio
*/
- phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- VM_PHYS_SIZE, TRUE);
+ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ VM_PHYS_SIZE, 0, FALSE, NULL);
/*
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
* we use the more space efficient malloc in place of kmem_alloc.
*/
mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
- M_MBUF, M_NOWAIT);
+ M_MBUF, M_NOWAIT);
bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
- mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
- VM_MBUF_SIZE, FALSE);
+ mb_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&mbutl, &maxaddr,
+ VM_MBUF_SIZE, VM_MAP_INTRSAFE, FALSE, NULL);
+
/*
* Initialize timeouts
*/
@@ -709,9 +984,15 @@ cpu_startup()
#if defined(DEBUG)
pmapdebug = opmapdebug;
#endif
- printf("avail mem = %ld\n", (long)ptoa(cnt.v_free_count));
- printf("using %ld buffers containing %ld bytes of memory\n",
- (long)nbuf, (long)(bufpages * CLBYTES));
+ printf("avail memory = %d\n", ptoa(uvmexp.free));
+#if 0
+ {
+ extern u_long pmap_pages_stolen;
+
+ printf("stolen memory for VM structures = %d\n", pmap_pages_stolen * PAGE_SIZE);
+ }
+#endif
+ printf("using %ld buffers containing %d of memory\n", (long)nbuf, bufpages * NBPG);
/*
* Set up buffers, so they can be used to read disk labels.
@@ -731,21 +1012,78 @@ cpu_startup()
configure();
/*
- * Note that bootstrapping is finished, and set the HWRPB up
- * to do restarts.
+ * Set up the HWPCB so that it's safe to configure secondary
+ * CPUs.
+ */
+ hwrpb_primary_init();
+}
+
+/*
+ * Retrieve the platform name from the DSR.
+ */
+const char *
+alpha_dsr_sysname()
+{
+ struct dsrdb *dsr;
+ const char *sysname;
+
+ /*
+ * DSR does not exist on early HWRPB versions.
*/
- hwrpb_restart_setup();
+ if (hwrpb->rpb_version < HWRPB_DSRDB_MINVERS)
+ return (NULL);
+
+ dsr = (struct dsrdb *)(((caddr_t)hwrpb) + hwrpb->rpb_dsrdb_off);
+ sysname = (const char *)((caddr_t)dsr + (dsr->dsr_sysname_off +
+ sizeof(u_int64_t)));
+ return (sysname);
+}
+
+/*
+ * Lookup the system specified system variation in the provided table,
+ * returning the model string on match.
+ */
+const char *
+alpha_variation_name(variation, avtp)
+ u_int64_t variation;
+ const struct alpha_variation_table *avtp;
+{
+ int i;
+
+ for (i = 0; avtp[i].avt_model != NULL; i++)
+ if (avtp[i].avt_variation == variation)
+ return (avtp[i].avt_model);
+ return (NULL);
+}
+
+/*
+ * Generate a default platform name based for unknown system variations.
+ */
+const char *
+alpha_unknown_sysname()
+{
+ static char s[128]; /* safe size */
+
+ sprintf(s, "%s family, unknown model variation 0x%lx",
+ platform.family, hwrpb->rpb_variation & SV_ST_MASK);
+ return ((const char *)s);
}
void
identifycpu()
{
+ char *s;
/*
* print out CPU identification information.
*/
- printf("%s, %ldMHz\n", cpu_model,
- hwrpb->rpb_cc_freq / 1000000); /* XXX true for 21164? */
+ printf("%s", cpu_model);
+ for(s = cpu_model; *s; ++s)
+ if(strncasecmp(s, "MHz", 3) == 0)
+ goto skipMHz;
+ printf(", %ldMHz", hwrpb->rpb_cc_freq / 1000000);
+skipMHz:
+ printf("\n");
printf("%ld byte page size, %d processor%s.\n",
hwrpb->rpb_page_size, ncpus, ncpus == 1 ? "" : "s");
#if 0
@@ -763,12 +1101,23 @@ int waittime = -1;
struct pcb dumppcb;
void
-boot(howto /* , bootstr */)
+boot(howto)
int howto;
- /* char *bootstr; */
{
extern int cold;
+#if defined(MULTIPROCESSOR)
+#if 0 /* XXX See below. */
+ u_long cpu_id;
+#endif
+#endif
+
+#if defined(MULTIPROCESSOR)
+ /* We must be running on the primary CPU. */
+ if (alpha_pal_whami() != hwrpb->rpb_primary_cpu_id)
+ panic("cpu_reboot: not on primary CPU!");
+#endif
+
/* If system is cold, just halt. */
if (cold) {
howto |= RB_HALT;
@@ -781,24 +1130,13 @@ boot(howto /* , bootstr */)
boothowto = howto;
if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
- extern struct proc proc0;
-
- /* protect against curproc->p_stats.foo refs in sync XXX */
- if (curproc == NULL)
- curproc = &proc0;
-
waittime = 0;
vfs_shutdown();
/*
* If we've been adjusting the clock, the todr
- * will be out of synch; adjust it now unless
- * the system was sitting in ddb.
+ * will be out of synch; adjust it now.
*/
- if ((howto & RB_TIMEBAD) == 0) {
- resettodr();
- } else {
- printf("WARNING: not updating battery clock\n");
- }
+ resettodr();
}
/* Disable interrupts. */
@@ -817,13 +1155,32 @@ haltsys:
/* run any shutdown hooks */
doshutdownhooks();
+#if defined(MULTIPROCESSOR)
+#if 0 /* XXX doesn't work when called from here?! */
+ /* Kill off any secondary CPUs. */
+ for (cpu_id = 0; cpu_id < hwrpb->rpb_pcs_cnt; cpu_id++) {
+ if (cpu_id == hwrpb->rpb_primary_cpu_id ||
+ cpu_info[cpu_id].ci_softc == NULL)
+ continue;
+ cpu_halt_secondary(cpu_id);
+ }
+#endif
+#endif
+
#ifdef BOOTKEY
printf("hit any key to %s...\n", howto & RB_HALT ? "halt" : "reboot");
+ cnpollc(1); /* for proper keyboard command handling */
cngetc();
+ cnpollc(0);
printf("\n");
#endif
- /* Finally, halt/reboot the system. */
+ /* Finally, powerdown/halt/reboot the system. */
+ if ((howto & RB_POWERDOWN) == RB_POWERDOWN &&
+ platform.powerdown != NULL) {
+ (*platform.powerdown)();
+ printf("WARNING: powerdown failed!\n");
+ }
printf("%s\n\n", howto & RB_HALT ? "halted." : "rebooting...");
prom_halt(howto & RB_HALT);
/*NOTREACHED*/
@@ -844,7 +1201,8 @@ cpu_dumpsize()
{
int size;
- size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
+ size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
+ ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
if (roundup(size, dbtob(1)) != dbtob(1))
return -1;
@@ -852,21 +1210,39 @@ cpu_dumpsize()
}
/*
+ * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped.
+ */
+u_long
+cpu_dump_mempagecnt()
+{
+ u_long i, n;
+
+ n = 0;
+ for (i = 0; i < mem_cluster_cnt; i++)
+ n += atop(mem_clusters[i].size);
+ return (n);
+}
+
+/*
* cpu_dump: dump machine-dependent kernel core dump headers.
*/
int
cpu_dump()
{
int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
- long buf[dbtob(1) / sizeof (long)];
- kcore_seg_t *segp;
- cpu_kcore_hdr_t *cpuhdrp;
+ char buf[dbtob(1)];
+ kcore_seg_t *segp;
+ cpu_kcore_hdr_t *cpuhdrp;
+ phys_ram_seg_t *memsegp;
+ int i;
- dump = bdevsw[major(dumpdev)].d_dump;
+ dump = bdevsw[major(dumpdev)].d_dump;
+ bzero(buf, sizeof buf);
segp = (kcore_seg_t *)buf;
- cpuhdrp =
- (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp)) / sizeof (long)];
+ cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
+ memsegp = (phys_ram_seg_t *)&buf[ALIGN(sizeof(*segp)) +
+ ALIGN(sizeof(*cpuhdrp))];
/*
* Generate a segment header.
@@ -875,19 +1251,26 @@ cpu_dump()
segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
/*
- * Add the machine-dependent header info
+ * Add the machine-dependent header info.
*/
- cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vm_offset_t)Lev1map);
+ cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map);
cpuhdrp->page_size = PAGE_SIZE;
- cpuhdrp->core_seg.start = ctob(firstusablepage);
- cpuhdrp->core_seg.size = ctob(physmem);
+ cpuhdrp->nmemsegs = mem_cluster_cnt;
+
+ /*
+ * Fill in the memory segment descriptors.
+ */
+ for (i = 0; i < mem_cluster_cnt; i++) {
+ memsegp[i].start = mem_clusters[i].start;
+ memsegp[i].size = mem_clusters[i].size & ~PAGE_MASK;
+ }
return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1)));
}
/*
- * This is called by configure to set dumplo and dumpsize.
- * Dumps always skip the first CLBYTES of disk space
+ * This is called by main to set dumplo and dumpsize.
+ * Dumps always skip the first NBPG of disk space
* in case there might be a disk label stored there.
* If there is extra space, put dump at the end to
* reduce the chance that swapping trashes it.
@@ -912,7 +1295,7 @@ dumpconf()
dumpblks = cpu_dumpsize();
if (dumpblks < 0)
goto bad;
- dumpblks += ctod(physmem);
+ dumpblks += ctod(cpu_dump_mempagecnt());
/* If dump won't fit (incl. room for possible label), punt. */
if (dumpblks > (nblks - ctod(1)))
@@ -922,7 +1305,7 @@ dumpconf()
dumplo = nblks - dumpblks;
/* dumpsize is in page units, and doesn't include headers. */
- dumpsize = physmem;
+ dumpsize = cpu_dump_mempagecnt();
return;
bad:
@@ -938,8 +1321,9 @@ bad:
void
dumpsys()
{
- unsigned bytes, i, n;
- int maddr, psize;
+ u_long totalbytesleft, bytes, i, n, memcl;
+ u_long maddr;
+ int psize;
daddr_t blkno;
int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
int error;
@@ -959,10 +1343,12 @@ dumpsys()
if (dumpsize == 0)
dumpconf();
if (dumplo <= 0) {
- printf("\ndump to dev %x not possible\n", dumpdev);
+ printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
+ minor(dumpdev));
return;
}
- printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
+ printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
+ minor(dumpdev), dumplo);
psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
printf("dump ");
@@ -976,30 +1362,35 @@ dumpsys()
if ((error = cpu_dump()) != 0)
goto err;
- bytes = ctob(physmem);
- maddr = ctob(firstusablepage);
+ totalbytesleft = ptoa(cpu_dump_mempagecnt());
blkno = dumplo + cpu_dumpsize();
dump = bdevsw[major(dumpdev)].d_dump;
error = 0;
- for (i = 0; i < bytes; i += n) {
- /* Print out how many MBs we to go. */
- n = bytes - i;
- if (n && (n % (1024*1024)) == 0)
- printf("%d ", n / (1024 * 1024));
+ for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
+ maddr = mem_clusters[memcl].start;
+ bytes = mem_clusters[memcl].size & ~PAGE_MASK;
- /* Limit size for next transfer. */
- if (n > BYTES_PER_DUMP)
- n = BYTES_PER_DUMP;
+ for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
- error = (*dump)(dumpdev, blkno,
- (caddr_t)ALPHA_PHYS_TO_K0SEG(maddr), n);
- if (error)
- break;
- maddr += n;
- blkno += btodb(n); /* XXX? */
+ /* Print out how many MBs we to go. */
+ if ((totalbytesleft % (1024*1024)) == 0)
+ printf("%ld ", totalbytesleft / (1024 * 1024));
- /* XXX should look for keystrokes, to cancel. */
+ /* Limit size for next transfer. */
+ n = bytes - i;
+ if (n > BYTES_PER_DUMP)
+ n = BYTES_PER_DUMP;
+
+ error = (*dump)(dumpdev, blkno,
+ (caddr_t)ALPHA_PHYS_TO_K0SEG(maddr), n);
+ if (error)
+ goto err;
+ maddr += n;
+ blkno += btodb(n); /* XXX? */
+
+ /* XXX should look for keystrokes, to cancel. */
+ }
}
err:
@@ -1165,7 +1556,6 @@ sendsig(catcher, sig, mask, code, type, val)
struct sigacts *psp = p->p_sigacts;
int oonstack, fsize, rndfsize, kscsize;
extern char sigcode[], esigcode[];
- extern struct proc *fpcurproc;
siginfo_t *sip, ksi;
frame = p->p_md.md_tf;
@@ -1181,7 +1571,7 @@ sendsig(catcher, sig, mask, code, type, val)
/*
* Allocate and validate space for the signal handler
* context. Note that if the stack is in P0 space, the
- * call to grow() is a nop, and the useracc() check
+ * call to uvm_grow() is a nop, and the useracc() check
* will fail if the process has not already allocated
* the space with a `brk'.
*/
@@ -1193,16 +1583,16 @@ sendsig(catcher, sig, mask, code, type, val)
} else
scp = (struct sigcontext *)(alpha_pal_rdusp() - rndfsize);
if ((u_long)scp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
- (void)grow(p, (u_long)scp);
+ (void)uvm_grow(p, (u_long)scp);
#ifdef DEBUG
if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
printf("sendsig(%d): sig %d ssp %p usp %p\n", p->p_pid,
sig, &oonstack, scp);
#endif
- if (useracc((caddr_t)scp, fsize, B_WRITE) == 0) {
+ if (uvm_useracc((caddr_t)scp, fsize, B_WRITE) == 0) {
#ifdef DEBUG
if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
- printf("sendsig(%d): useracc failed on sig %d\n",
+ printf("sendsig(%d): uvm_useracc failed on sig %d\n",
p->p_pid, sig);
#endif
/*
@@ -1311,7 +1701,6 @@ sys_sigreturn(p, v, retval)
syscallarg(struct sigcontext *) sigcntxp;
} */ *uap = v;
struct sigcontext *scp, ksc;
- extern struct proc *fpcurproc;
scp = SCARG(uap, sigcntxp);
#ifdef DEBUG
@@ -1326,7 +1715,7 @@ sys_sigreturn(p, v, retval)
* Test and fetch the context structure.
* We grab it all at once for speed.
*/
- if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
+ if (uvm_useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
copyin((caddr_t)scp, (caddr_t)&ksc, sizeof ksc))
return (EINVAL);
@@ -1391,7 +1780,8 @@ cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
sizeof consdev));
case CPU_ROOT_DEVICE:
- return (sysctl_rdstring(oldp, oldlenp, newp, root_device));
+ return (sysctl_rdstring(oldp, oldlenp, newp,
+ root_device));
case CPU_UNALIGNED_PRINT:
return (sysctl_int(oldp, oldlenp, newp, newlen,
@@ -1406,7 +1796,8 @@ cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
&alpha_unaligned_sigbus));
case CPU_BOOTED_KERNEL:
- return (sysctl_rdstring(oldp, oldlenp, newp, booted_kernel));
+ return (sysctl_rdstring(oldp, oldlenp, newp,
+ bootinfo.booted_kernel));
default:
return (EOPNOTSUPP);
@@ -1425,7 +1816,6 @@ setregs(p, pack, stack, retval)
register_t *retval;
{
struct trapframe *tfp = p->p_md.md_tf;
- extern struct proc *fpcurproc;
#ifdef DEBUG
int i;
#endif
@@ -1475,37 +1865,10 @@ netintr()
#define DONETISR(bit, fn) \
do { \
if (n & (1 << (bit))) \
- fn; \
+ fn(); \
} while (0)
-#ifdef INET
- DONETISR(NETISR_ARP, arpintr());
- DONETISR(NETISR_IP, ipintr());
-#endif
-#ifdef INET6
- DONETISR(NETISR_IPV6, ip6intr());
-#endif
-#ifdef NETATALK
- DONETISR(NETISR_ATALK, atintr());
-#endif
-#ifdef NS
- DONETISR(NETISR_NS, nsintr());
-#endif
-#ifdef ISO
- DONETISR(NETISR_ISO, clnlintr());
-#endif
-#ifdef CCITT
- DONETISR(NETISR_CCITT, ccittintr());
-#endif
-#ifdef NATM
- DONETISR(NETISR_NATM, natmintr());
-#endif
-#if NPPP > 0
- DONETISR(NETISR_PPP, pppintr());
-#endif
-#if NBRIDGE > 0
- DONETISR(NETISR_BRIDGE, bridgeintr());
-#endif
+#include <net/netisr_dispatch.h>
#undef DONETISR
}
@@ -1515,16 +1878,13 @@ do_sir()
{
u_int64_t n;
- do {
- (void)splhigh();
- n = ssir;
- ssir = 0;
- splsoft(); /* don't recurse through spl0() */
-
+ while ((n = atomic_loadlatch_ulong(&ssir, 0)) != 0) {
+#define COUNT_SOFT uvmexp.softs++
+
#define DO_SIR(bit, fn) \
do { \
if (n & (bit)) { \
- cnt.v_soft++; \
+ COUNT_SOFT; \
fn; \
} \
} while (0)
@@ -1532,16 +1892,19 @@ do_sir()
DO_SIR(SIR_NET, netintr());
DO_SIR(SIR_CLOCK, softclock());
+#undef COUNT_SOFT
#undef DO_SIR
- } while (ssir != 0);
+ }
}
int
spl0()
{
- if (ssir)
- do_sir(); /* it lowers the IPL itself */
+ if (ssir) {
+ (void) alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT);
+ do_sir();
+ }
return (alpha_pal_swpipl(ALPHA_PSL_IPL_0));
}
@@ -1642,8 +2005,16 @@ delay(n)
{
long N = cycles_per_usec * (n);
- while (N > 0) /* XXX */
- N -= 3; /* XXX */
+ /*
+ * XXX Should be written to use RPCC?
+ */
+
+ __asm __volatile(
+ "# The 2 corresponds to the insn count\n"
+ "1: subq %2, %1, %0 \n"
+ " bgt %0, 1b"
+ : "=r" (N)
+ : "i" (2), "0" (N));
}
#if defined(COMPAT_OSF1) || 1 /* XXX */
@@ -1676,10 +2047,10 @@ cpu_exec_ecoff_hook(p, epp)
struct exec_package *epp;
{
struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
- extern struct emul emul_native;
-#ifdef COMPAT_OSF1
- extern struct emul emul_osf1;
-#endif
+ extern struct emul emul_native, emul_osf1;
+ int error;
+ extern int osf1_exec_ecoff_hook(struct proc *p,
+ struct exec_package *epp);
switch (execp->f.f_magic) {
#ifdef COMPAT_OSF1
@@ -1690,23 +2061,71 @@ cpu_exec_ecoff_hook(p, epp)
case ECOFF_MAGIC_NATIVE_ALPHA:
epp->ep_emul = &emul_native;
+ error = 0;
break;
default:
- return ENOEXEC;
+ error = ENOEXEC;
}
- return 0;
+ return (error);
}
#endif
+int
+alpha_pa_access(pa)
+ u_long pa;
+{
+ int i;
+
+ for (i = 0; i < mem_cluster_cnt; i++) {
+ if (pa < mem_clusters[i].start)
+ continue;
+ if ((pa - mem_clusters[i].start) >=
+ (mem_clusters[i].size & ~PAGE_MASK))
+ continue;
+ return (mem_clusters[i].size & PAGE_MASK); /* prot */
+ }
+
+ /*
+ * Address is not a memory address. If we're secure, disallow
+ * access. Otherwise, grant read/write.
+ */
+ if (securelevel > 0)
+ return (VM_PROT_NONE);
+ else
+ return (VM_PROT_READ | VM_PROT_WRITE);
+}
+
/* XXX XXX BEGIN XXX XXX */
-vm_offset_t alpha_XXX_dmamap_or; /* XXX */
+paddr_t alpha_XXX_dmamap_or; /* XXX */
/* XXX */
-vm_offset_t /* XXX */
+paddr_t /* XXX */
alpha_XXX_dmamap(v) /* XXX */
- vm_offset_t v; /* XXX */
+ vaddr_t v; /* XXX */
{ /* XXX */
/* XXX */
return (vtophys(v) | alpha_XXX_dmamap_or); /* XXX */
} /* XXX */
/* XXX XXX END XXX XXX */
+
+char *
+dot_conv(x)
+ unsigned long x;
+{
+ int i;
+ char *xc;
+ static int next;
+ static char space[2][20];
+
+ xc = space[next ^= 1] + sizeof space[0];
+ *--xc = '\0';
+ for (i = 0;; ++i) {
+ if (i && (i & 3) == 0)
+ *--xc = '.';
+ *--xc = "0123456789abcdef"[x & 0xf];
+ x >>= 4;
+ if (x == 0)
+ break;
+ }
+ return xc;
+}
diff --git a/sys/arch/alpha/alpha/mainbus.c b/sys/arch/alpha/alpha/mainbus.c
index 811435e5348..aec61dc7785 100644
--- a/sys/arch/alpha/alpha/mainbus.c
+++ b/sys/arch/alpha/alpha/mainbus.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: mainbus.c,v 1.8 1997/01/24 19:56:38 niklas Exp $ */
-/* $NetBSD: mainbus.c,v 1.15 1996/12/05 01:39:28 cgd Exp $ */
+/* $NetBSD: mainbus.c,v 1.27 1998/06/24 01:10:35 ross Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -32,66 +31,39 @@
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/reboot.h>
+#include <sys/conf.h>
#include <machine/autoconf.h>
#include <machine/rpb.h>
#include <machine/cpuconf.h>
-struct mainbus_softc {
- struct device sc_dv;
- struct abus sc_bus;
-};
-
/* Definition of the mainbus driver. */
-#ifdef __BROKEN_INDIRECT_CONFIG
static int mbmatch __P((struct device *, void *, void *));
-#else
-static int mbmatch __P((struct device *, struct cfdata *, void *));
-#endif
static void mbattach __P((struct device *, struct device *, void *));
static int mbprint __P((void *, const char *));
struct cfattach mainbus_ca = {
- sizeof(struct mainbus_softc), mbmatch, mbattach
+ sizeof(struct device), mbmatch, mbattach
};
struct cfdriver mainbus_cd = {
NULL, "mainbus", DV_DULL
};
-void mb_intr_establish __P((struct confargs *, int (*)(void *), void *));
-void mb_intr_disestablish __P((struct confargs *));
-caddr_t mb_cvtaddr __P((struct confargs *));
-int mb_matchname __P((struct confargs *, char *));
+/* There can be only one. */
+int mainbus_found;
static int
-#ifdef __BROKEN_INDIRECT_CONFIG
-mbmatch(parent, cfdata, aux)
-#else
mbmatch(parent, cf, aux)
-#endif
struct device *parent;
-#ifdef __BROKEN_INDIRECT_CONFIG
- void *cfdata;
-#else
- struct cfdata *cf;
-#endif
+ void *cf;
void *aux;
{
-#ifdef __BROKEN_INDIRECT_CONFIG
- struct cfdata *cf = cfdata;
-#endif
- /*
- * Only one mainbus, but some people are stupid...
- */
- if (cf->cf_unit > 0)
- return(0);
+ if (mainbus_found)
+ return (0);
- /*
- * That one mainbus is always here.
- */
- return(1);
+ return (1);
}
static void
@@ -100,52 +72,37 @@ mbattach(parent, self, aux)
struct device *self;
void *aux;
{
- struct mainbus_softc *sc = (struct mainbus_softc *)self;
- struct confargs nca;
+ struct mainbus_attach_args ma;
+ struct pcs *pcsp;
int i, cpuattachcnt;
- struct pcs* pcsp;
extern int ncpus;
- extern const struct cpusw *cpu_fn_switch;
- printf("\n");
+ mainbus_found = 1;
- sc->sc_bus.ab_dv = (struct device *)sc;
- sc->sc_bus.ab_type = BUS_MAIN;
- sc->sc_bus.ab_intr_establish = mb_intr_establish;
- sc->sc_bus.ab_intr_disestablish = mb_intr_disestablish;
- sc->sc_bus.ab_cvtaddr = mb_cvtaddr;
- sc->sc_bus.ab_matchname = mb_matchname;
+ printf("\n");
/*
* Try to find and attach all of the CPUs in the machine.
*/
cpuattachcnt = 0;
for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
- pcsp = (struct pcs *)((char *)hwrpb + hwrpb->rpb_pcs_off +
- (i * hwrpb->rpb_pcs_size));
+ pcsp = LOCATE_PCS(hwrpb, i);
if ((pcsp->pcs_flags & PCS_PP) == 0)
continue;
- nca.ca_name = "cpu";
- nca.ca_slot = 0;
- nca.ca_offset = 0;
- nca.ca_bus = &sc->sc_bus;
- if (config_found(self, &nca, mbprint) != NULL)
+ ma.ma_name = "cpu";
+ ma.ma_slot = i;
+ if (config_found(self, &ma, mbprint) != NULL)
cpuattachcnt++;
}
if (ncpus != cpuattachcnt)
printf("WARNING: %d cpus in machine, %d attached\n",
ncpus, cpuattachcnt);
- if ((*cpu_fn_switch->iobus_name)() != NULL) {
- char iobus_name[16];
-
- strncpy(iobus_name, (*cpu_fn_switch->iobus_name)(), 16);
- nca.ca_name = iobus_name;
- nca.ca_slot = 0;
- nca.ca_offset = 0;
- nca.ca_bus = &sc->sc_bus;
- config_found(self, &nca, mbprint);
+ if (platform.iobus != NULL) {
+ ma.ma_name = platform.iobus;
+ ma.ma_slot = 0; /* meaningless */
+ config_found(self, &ma, mbprint);
}
}
@@ -154,45 +111,10 @@ mbprint(aux, pnp)
void *aux;
const char *pnp;
{
- struct confargs *ca = aux;
+ struct mainbus_attach_args *ma = aux;
if (pnp)
- printf("%s at %s", ca->ca_name, pnp);
+ printf("%s at %s", ma->ma_name, pnp);
return (UNCONF);
}
-
-void
-mb_intr_establish(ca, handler, val)
- struct confargs *ca;
- int (*handler) __P((void *));
- void *val;
-{
-
- panic("can never mb_intr_establish");
-}
-
-void
-mb_intr_disestablish(ca)
- struct confargs *ca;
-{
-
- panic("can never mb_intr_disestablish");
-}
-
-caddr_t
-mb_cvtaddr(ca)
- struct confargs *ca;
-{
-
- return (NULL);
-}
-
-int
-mb_matchname(ca, name)
- struct confargs *ca;
- char *name;
-{
-
- return (strcmp(name, ca->ca_name) == 0);
-}
diff --git a/sys/arch/alpha/alpha/mem.c b/sys/arch/alpha/alpha/mem.c
index f8ecc33c7d0..5dbe1606970 100644
--- a/sys/arch/alpha/alpha/mem.c
+++ b/sys/arch/alpha/alpha/mem.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: mem.c,v 1.10 2000/07/05 18:30:32 ericj Exp $ */
-/* $NetBSD: mem.c,v 1.10 1996/11/13 21:13:10 cgd Exp $ */
+/* $NetBSD: mem.c,v 1.26 2000/03/29 03:48:20 simonb Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -46,22 +45,26 @@
*/
#include <sys/param.h>
-#include <sys/conf.h>
#include <sys/buf.h>
#include <sys/systm.h>
#include <sys/uio.h>
#include <sys/malloc.h>
+#include <sys/msgbuf.h>
+#include <sys/mman.h>
+#include <sys/conf.h>
#include <machine/cpu.h>
+#include <machine/alpha.h>
#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
+
#define mmread mmrw
#define mmwrite mmrw
cdev_decl(mm);
caddr_t zeropage;
-extern int firstusablepage, lastusablepage;
/*ARGSUSED*/
int
@@ -100,12 +103,13 @@ mmrw(dev, uio, flags)
struct uio *uio;
int flags;
{
- register vm_offset_t o, v;
+ register vaddr_t o, v;
register int c;
register struct iovec *iov;
- int error = 0;
+ int error = 0, rw;
+ extern int msgbufmapped;
- while (uio->uio_resid > 0 && error == 0) {
+ while (uio->uio_resid > 0 && !error) {
iov = uio->uio_iov;
if (iov->iov_len == 0) {
uio->uio_iov++;
@@ -120,15 +124,26 @@ mmrw(dev, uio, flags)
case 0:
v = uio->uio_offset;
kmemphys:
- /* allow reads only in RAM (except for DEBUG) */
- if (v < ctob(firstusablepage) ||
- v > ctob(lastusablepage + 1 + btoc(MSGBUFSIZE)))
- return (EFAULT);
+ if (v >= ALPHA_K0SEG_TO_PHYS((vaddr_t)msgbufp)) {
+ if (msgbufmapped == 0) {
+ printf("Message Buf not Mapped\n");
+ error = EFAULT;
+ break;
+ }
+ }
+
+ /* Allow reads only in RAM. */
+ rw = (uio->uio_rw == UIO_READ) ? PROT_READ : PROT_WRITE;
+ if ((alpha_pa_access(v) & rw) != rw) {
+ error = EFAULT;
+ break;
+ }
+
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
error =
uiomove((caddr_t)ALPHA_PHYS_TO_K0SEG(v), c, uio);
- continue;
+ break;
/* minor device 1 is kernel memory */
case 1:
@@ -140,13 +155,13 @@ kmemphys:
}
c = min(iov->iov_len, MAXPHYS);
- if (!kernacc((caddr_t)v, c,
+ if (!uvm_kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
error = uiomove((caddr_t)v, c, uio);
- continue;
+ break;
-/* minor device 2 is EOF/RATHOLE */
+/* minor device 2 is EOF/rathole */
case 2:
if (uio->uio_rw == UIO_WRITE)
uio->uio_resid = 0;
@@ -155,39 +170,25 @@ kmemphys:
/* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */
case 12:
if (uio->uio_rw == UIO_WRITE) {
- c = iov->iov_len;
- break;
+ uio->uio_resid = 0;
+ return (0);
}
/*
* On the first call, allocate and zero a page
* of memory for use with /dev/zero.
- *
- * XXX on the alpha we already know where there
- * is a global zeroed page, the null segment table.
*/
if (zeropage == NULL) {
-#if (CLBYTES == NBPG) && !defined(NEW_PMAP)
- extern caddr_t Segtabzero;
- zeropage = Segtabzero;
-#else
zeropage = (caddr_t)
- malloc(CLBYTES, M_TEMP, M_WAITOK);
- bzero(zeropage, CLBYTES);
-#endif
+ malloc(NBPG, M_TEMP, M_WAITOK);
+ bzero(zeropage, NBPG);
}
- c = min(iov->iov_len, CLBYTES);
+ c = min(iov->iov_len, NBPG);
error = uiomove(zeropage, c, uio);
- continue;
+ break;
default:
return (ENXIO);
}
- if (error)
- break;
- iov->iov_base += c;
- iov->iov_len -= c;
- uio->uio_offset += c;
- uio->uio_resid -= c;
}
return (error);
}
@@ -208,11 +209,11 @@ mmmmap(dev, off, prot)
*/
if (minor(dev) != 0)
return (-1);
+
/*
* Allow access only in RAM.
*/
- if (off < ctob(firstusablepage) ||
- off >= ctob(lastusablepage + 1))
+ if ((prot & alpha_pa_access(atop((paddr_t)off))) != prot)
return (-1);
return (alpha_btop(off));
}
diff --git a/sys/arch/alpha/alpha/multiproc.s b/sys/arch/alpha/alpha/multiproc.s
new file mode 100644
index 00000000000..b3b6cf1d69c
--- /dev/null
+++ b/sys/arch/alpha/alpha/multiproc.s
@@ -0,0 +1,84 @@
+/* $NetBSD: multiproc.s,v 1.5 1999/12/16 20:17:23 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+__KERNEL_RCSID(5, "$NetBSD: multiproc.s,v 1.5 1999/12/16 20:17:23 thorpej Exp $")
+
+/*
+ * Multiprocessor glue code.
+ */
+
+ .text
+inc5: .stabs __FILE__,132,0,0,inc5; .loc 1 __LINE__
+
+/*
+ * cpu_spinup_trampoline:
+ *
+ * We come here via the secondary processor's console. We simply
+ * make the function call look right, and call cpu_hatch() to finish
+ * starting up the processor.
+ *
+ * We are provided an argument in $27 (pv) (which will be our cpu_info).
+ */
+NESTED_NOPROFILE(cpu_spinup_trampoline,0,0,ra,0,0)
+ mov pv, s0 /* squirrel away argument */
+
+ br pv, 1f /* compute new GP */
+1: LDGP(pv)
+
+ /* Invalidate TLB and I-stream. */
+ ldiq a0, -2 /* TBIA */
+ call_pal PAL_OSF1_tbi
+ call_pal PAL_imb
+
+ /* Load KGP with current GP. */
+ mov gp, a0
+ call_pal PAL_OSF1_wrkgp /* clobbers a0, t0, t8-t11 */
+
+ /* Restore argument and write it in SysValue. */
+ mov s0, a0
+ call_pal PAL_OSF1_wrval
+
+ /* Restore argument and call cpu_hatch() */
+ mov s0, a0
+ CALL(cpu_hatch)
+
+ /* cpu_hatch() returned! Just halt (forever). */
+2: call_pal PAL_halt
+ br zero, 2b
+ END(cpu_spinup_trampoline)
diff --git a/sys/arch/alpha/alpha/pal.s b/sys/arch/alpha/alpha/pal.s
index 4be046c27af..a0c0bfffe07 100644
--- a/sys/arch/alpha/alpha/pal.s
+++ b/sys/arch/alpha/alpha/pal.s
@@ -1,5 +1,4 @@
-/* $OpenBSD: pal.s,v 1.4 1996/10/30 22:38:18 niklas Exp $ */
-/* $NetBSD: pal.s,v 1.5 1996/07/14 04:21:53 cgd Exp $ */
+/* $NetBSD: pal.s,v 1.14 1999/12/02 22:08:04 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995 Carnegie-Mellon University.
@@ -31,57 +30,61 @@
/*
* The various OSF PALcode routines.
*
- * The following code is derived from pages: (I) 6-5 - (I) 6-7 and
- * (III) 2-1 - (III) 2-25 of "Alpha Architecture Reference Manual" by
+ * The following code is originally derived from pages: (I) 6-5 - (I) 6-7
+ * and (III) 2-1 - (III) 2-25 of "Alpha Architecture Reference Manual" by
* Richard L. Sites.
+ *
+ * Updates taken from pages: (II-B) 2-1 - (II-B) 2-33 of "Alpha AXP
+ * Architecture Reference Manual, Second Edition" by Richard L. Sites
+ * and Richard T. Witek.
*/
+inc2: .stabs __FILE__,132,0,0,inc2; .loc 1 __LINE__
/*
- * alpha_rpcc: read process cycle counter (XXX INSTRUCTION, NOT PALcode OP)
- */
- .text
-LEAF(alpha_rpcc,1)
- rpcc v0
- RET
- END(alpha_rpcc)
-
-/*
- * alpha_mb: memory barrier (XXX INSTRUCTION, NOT PALcode OP)
- */
- .text
-LEAF(alpha_mb,0)
- mb
- RET
- END(alpha_mb)
-
-/*
- * alpha_wmb: write memory barrier (XXX INSTRUCTION, NOT PALcode OP)
+ * alpha_amask: read architecture features (XXX INSTRUCTION, NOT PALcode OP)
+ *
+ * Arguments:
+ * a0 bitmask of features to test
+ *
+ * Returns:
+ * v0 bitmask - bit is _cleared_ if feature is supported
*/
.text
-LEAF(alpha_wmb,0)
- /* wmb XXX */
- mb /* XXX */
+LEAF(alpha_amask,1)
+ amask a0, v0
RET
- END(alpha_wmb)
+ END(alpha_amask)
/*
- * alpha_pal_imb: I-Stream memory barrier. [UNPRIVILEGED]
- * (Makes instruction stream coherent with data stream.)
+ * alpha_implver: read implementation version (XXX INSTRUCTION, NOT PALcode OP)
+ *
+ * Returns:
+ * v0 implementation version - see <machine/alpha_cpu.h>
*/
.text
-LEAF(alpha_pal_imb,0)
- call_pal PAL_imb
+LEAF(alpha_implver,0)
+#if 0
+ implver 0x1, v0
+#else
+ .long 0x47e03d80 /* XXX gas(1) does the Wrong Thing */
+#endif
RET
- END(alpha_pal_imb)
+ END(alpha_implver)
/*
- * alpha_pal_draina: Drain aborts. [PRIVILEGED]
+ * alpha_pal_cflush: Cache flush [PRIVILEGED]
+ *
+ * Flush the entire physical page specified by the PFN specified in
+ * a0 from any data caches associated with the current processor.
+ *
+ * Arguments:
+ * a0 page frame number of page to flush
*/
.text
-LEAF(alpha_pal_draina,0)
- call_pal PAL_draina
+LEAF(alpha_pal_cflush,1)
+ call_pal PAL_cflush
RET
- END(alpha_pal_draina)
+ END(alpha_pal_cflush)
/*
* alpha_pal_halt: Halt the processor. [PRIVILEGED]
@@ -94,28 +97,16 @@ LEAF(alpha_pal_halt,0)
END(alpha_pal_halt)
/*
- * alpha_pal_rdmces: Read MCES processor register. [PRIVILEGED]
- *
- * Return:
- * v0 current MCES value
- */
- .text
-LEAF(alpha_pal_rdmces,1)
- call_pal PAL_OSF1_rdmces
- RET
- END(alpha_pal_rdmces)
-
-/*
- * alpha_pal_rdusp: Read user stack pointer. [PRIVILEGED]
+ * alpha_pal_rdps: Read processor status. [PRIVILEGED]
*
* Return:
- * v0 current user stack pointer
+ * v0 current PS value
*/
.text
-LEAF(alpha_pal_rdusp,0)
- call_pal PAL_OSF1_rdusp
+LEAF(alpha_pal_rdps,0)
+ call_pal PAL_OSF1_rdps
RET
- END(alpha_pal_rdusp)
+ END(alpha_pal_rdps)
/*
* alpha_pal_swpipl: Swap Interrupt priority level. [PRIVILEGED]
@@ -139,31 +130,6 @@ LEAF_NOPROFILE(_alpha_pal_swpipl,1)
END(_alpha_pal_swpipl)
/*
- * alpha_pal_tbi: Translation buffer invalidate. [PRIVILEGED]
- *
- * Arguments:
- * a0 operation selector
- * a1 address to operate on (if necessary)
- */
- .text
-LEAF(alpha_pal_tbi,2)
- call_pal PAL_OSF1_tbi
- RET
- END(alpha_pal_tbi)
-
-/*
- * alpha_pal_whami: Who am I? [PRIVILEGED]
- *
- * Return:
- * v0 processor number
- */
- .text
-LEAF(alpha_pal_whami,0)
- call_pal PAL_OSF1_whami
- RET
- END(alpha_pal_whami)
-
-/*
* alpha_pal_wrent: Write system entry address. [PRIVILEGED]
*
* Arguments:
@@ -177,30 +143,6 @@ LEAF(alpha_pal_wrent,2)
END(alpha_pal_wrent)
/*
- * alpha_pal_wrfen: Write floating-point enable. [PRIVILEGED]
- *
- * Arguments:
- * a0 new enable value (val & 0x1 -> enable).
- */
- .text
-LEAF(alpha_pal_wrfen,1)
- call_pal PAL_OSF1_wrfen
- RET
- END(alpha_pal_wrfen)
-
-/*
- * alpha_pal_wrusp: Write user stack pointer. [PRIVILEGED]
- *
- * Arguments:
- * a0 new user stack pointer
- */
- .text
-LEAF(alpha_pal_wrusp,1)
- call_pal PAL_OSF1_wrusp
- RET
- END(alpha_pal_wrusp)
-
-/*
* alpha_pal_wrvptptr: Write virtual page table pointer. [PRIVILEGED]
*
* Arguments:
@@ -211,15 +153,3 @@ LEAF(alpha_pal_wrvptptr,1)
call_pal PAL_OSF1_wrvptptr
RET
END(alpha_pal_wrvptptr)
-
-/*
- * alpha_pal_wrmces: Write MCES processor register. [PRIVILEGED]
- *
- * Arguments:
- * a0 value to write to MCES
- */
- .text
-LEAF(alpha_pal_wrmces,1)
- call_pal PAL_OSF1_wrmces
- RET
- END(alpha_pal_wrmces)
diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c
index 8b918d71ebe..07037daa58f 100644
--- a/sys/arch/alpha/alpha/pmap.c
+++ b/sys/arch/alpha/alpha/pmap.c
@@ -1,57 +1,135 @@
-/* $OpenBSD: pmap.c,v 1.6 1999/09/03 18:00:11 art Exp $ */
-/* $NetBSD: pmap.c,v 1.17 1996/10/13 02:59:42 christos Exp $ */
-
-/*
- * Copyright (c) 1992, 1996 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-
-/*
- * File: pmap.c
- *
- * Author list
- * vax: Avadis Tevanian, Jr., Michael Wayne Young
- * i386: Lance Berc, Mike Kupfer, Bob Baron, David Golub, Richard Draves
- * alpha: Alessandro Forin
- * {Net,Open}BSD/Alpha: Chris Demetriou
- *
- * Physical Map management code for DEC Alpha
+/* $NetBSD: pmap.c,v 1.132 2000/05/23 05:12:54 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
*
- * Manages physical address maps.
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center and by Chris G. Demetriou.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
- * This code was derived exclusively from information available in
- * "Alpha Architecture Reference Manual", Richard L. Sites ed.
- * Digital Press, Burlington, MA 01803
- * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.c 8.6 (Berkeley) 5/27/94
*/
/*
- * In addition to hardware address maps, this
- * module is called upon to provide software-use-only
- * maps which may or may not be stored in the same
- * form as hardware maps. These pseudo-maps are
- * used to store intermediate results from copy
- * operations to and from address spaces.
+ * DEC Alpha physical map management code.
+ *
+ * History:
+ *
+ * This pmap started life as a Motorola 68851/68030 pmap,
+ * written by Mike Hibler at the University of Utah.
+ *
+ * It was modified for the DEC Alpha by Chris Demetriou
+ * at Carnegie Mellon University.
+ *
+ * Support for non-contiguous physical memory was added by
+ * Jason R. Thorpe of the Numerical Aerospace Simulation
+ * Facility, NASA Ames Research Center and Chris Demetriou.
+ *
+ * Page table management and a major cleanup were undertaken
+ * by Jason R. Thorpe, with lots of help from Ross Harvey of
+ * Avalon Computer Systems and from Chris Demetriou.
+ *
+ * Support for the new UVM pmap interface was written by
+ * Jason R. Thorpe.
+ *
+ * Support for ASNs was written by Jason R. Thorpe, again
+ * with help from Chris Demetriou and Ross Harvey.
+ *
+ * The locking protocol was written by Jason R. Thorpe,
+ * using Chuck Cranor's i386 pmap for UVM as a model.
+ *
+ * TLB shootdown code was written by Jason R. Thorpe.
+ *
+ * Notes:
+ *
+ * All page table access is done via K0SEG. The one exception
+ * to this is for kernel mappings. Since all kernel page
+ * tables are pre-allocated, we can use the Virtual Page Table
+ * to access PTEs that map K1SEG addresses.
+ *
+ * Kernel page table pages are statically allocated in
+ * pmap_bootstrap(), and are never freed. In the future,
+ * support for dynamically adding additional kernel page
+ * table pages may be added. User page table pages are
+ * dynamically allocated and freed.
+ *
+ * This pmap implementation only supports NBPG == PAGE_SIZE.
+ * In practice, this is not a problem since PAGE_SIZE is
+ * initialized to the hardware page size in alpha_init().
+ *
+ * Bugs/misfeatures:
+ *
+ * - Some things could be optimized.
+ */
+
+/*
+ * Manages physical address maps.
*
* Since the information managed by this module is
* also stored by the logical address mapping module,
@@ -73,6 +151,7 @@
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
+#include <sys/pool.h>
#include <sys/user.h>
#include <sys/buf.h>
#ifdef SYSVSHM
@@ -82,3240 +161,4004 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
-#include <vm/vm_pageout.h>
-
-#include <machine/cpu.h>
-#include <machine/alpha_cpu.h>
-
-
-#define VM_OBJECT_NULL NULL
-#define VM_PAGE_NULL NULL
-#define BYTE_SIZE NBBY
-#define page_size PAGE_SIZE
-#define ALPHA_PTE_GLOBAL ALPHA_PTE_ASM
-#define MACRO_BEGIN do {
-#define MACRO_END } while (0)
-#define K2SEG_BASE ALPHA_K1SEG_BASE
-#define integer_t long
-#define spl_t int
-#define vm_page_fictitious_addr 0
-#define aligned_block_copy(src, dest, size) bcopy((void *)src, (void *)dest, size)
-#define db_printf printf
-#define tbia ALPHA_TBIA
-#define alphacache_Iflush alpha_pal_imb
-#define cpu_number() 0
-#define check_simple_locks()
-#define K0SEG_TO_PHYS ALPHA_K0SEG_TO_PHYS
-#define ISA_K0SEG(v) (v >= ALPHA_K0SEG_BASE && v <= ALPHA_K0SEG_END)
-#ifndef assert
-#define assert(x)
-#endif
-
-vm_offset_t avail_start; /* PA of first available physical page */
-vm_offset_t avail_end; /* PA of last available physical page */
-vm_offset_t mem_size; /* memory size in bytes */
-vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
-vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
-
-/* XXX */
-struct pv_entry *pmap_alloc_pv __P((void));
-void pmap_free_pv __P((struct pv_entry *pv));
-vm_page_t vm_page_grab __P((void));
-
-vm_offset_t pmap_resident_extract __P((pmap_t, vm_offset_t));
-
-/* For external use... */
-vm_offset_t kvtophys(vm_offset_t virt)
-{
- return pmap_resident_extract(kernel_pmap, virt);
-}
-
-/* ..but for internal use... */
-#define phystokv(a) ALPHA_PHYS_TO_K0SEG(a)
-#define kvtophys(p) ALPHA_K0SEG_TO_PHYS((vm_offset_t)p)
-
-
-/*
- * Private data structures.
- */
-/*
- * Map from MI protection codes to MD codes.
- * Assume that there are three MI protection codes, all using low bits.
- */
-pt_entry_t user_protection_codes[8];
-pt_entry_t kernel_protection_codes[8];
+#include <uvm/uvm.h>
-alpha_protection_init()
-{
- register pt_entry_t *kp, *up, prot;
+#include <machine/atomic.h>
+#include <machine/cpu.h>
+#if defined(_PMAP_MAY_USE_PROM_CONSOLE) || defined(MULTIPROCESSOR)
+#include <machine/rpb.h>
+#endif
- kp = kernel_protection_codes;
- up = user_protection_codes;
- for (prot = 0; prot < 8; prot++) {
- switch (prot) {
- case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
- *kp++ = 0;
- *up++ = 0;
- break;
- case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
- case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
- case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
- *kp++ = ALPHA_PTE_KR;
- *up++ = ALPHA_PTE_UR|ALPHA_PTE_KR;
- break;
- case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
- *kp++ = ALPHA_PTE_KW;
- *up++ = ALPHA_PTE_UW|ALPHA_PTE_KW;
- break;
- case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
- case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
- case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
- *kp++ = ALPHA_PTE_KW|ALPHA_PTE_KR;
- *up++ = ALPHA_PTE_UW|ALPHA_PTE_UR|ALPHA_PTE_KW|ALPHA_PTE_KR;
- break;
- }
- }
-}
+#ifdef DEBUG
+#define PDB_FOLLOW 0x0001
+#define PDB_INIT 0x0002
+#define PDB_ENTER 0x0004
+#define PDB_REMOVE 0x0008
+#define PDB_CREATE 0x0010
+#define PDB_PTPAGE 0x0020
+#define PDB_ASN 0x0040
+#define PDB_BITS 0x0080
+#define PDB_COLLECT 0x0100
+#define PDB_PROTECT 0x0200
+#define PDB_BOOTSTRAP 0x1000
+#define PDB_PARANOIA 0x2000
+#define PDB_WIRING 0x4000
+#define PDB_PVDUMP 0x8000
+
+int debugmap = 0;
+int pmapdebug = PDB_PARANOIA|PDB_FOLLOW|PDB_ENTER;
+#endif
/*
- * Given a map and a machine independent protection code,
- * convert to a alpha protection code.
+ * Given a map and a machine independent protection code,
+ * convert to an alpha protection code.
*/
-
-#define alpha_protection(map, prot) \
- (((map) == kernel_pmap) ? kernel_protection_codes[prot] : \
- user_protection_codes[prot])
-
-/* Build the typical kernel pte */
-#define pte_ktemplate(t,pa,pr) \
-MACRO_BEGIN \
- (t) = pa_to_pte(pa) | ALPHA_PTE_VALID | ALPHA_PTE_GLOBAL | \
- (alpha_protection(kernel_pmap,pr)); \
-MACRO_END
-
-/* build the typical pte */
-#define pte_template(m,t,pa,pr) \
-MACRO_BEGIN \
- (t) = pa_to_pte(pa) | ALPHA_PTE_VALID | \
- (alpha_protection(m,pr)); \
-MACRO_END
+#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p])
+int protection_codes[2][8];
/*
- * For each vm_page_t, there is a list of all currently
- * valid virtual mappings of that page. An entry is
- * a pv_entry_t; the list is the pv_table.
+ * kernel_lev1map:
+ *
+ * Kernel level 1 page table. This maps all kernel level 2
+ * page table pages, and is used as a template for all user
+ * pmap level 1 page tables. When a new user level 1 page
+ * table is allocated, all kernel_lev1map PTEs for kernel
+ * addresses are copied to the new map.
+ *
+ * The kernel also has an initial set of kernel level 2 page
+ * table pages. These map the kernel level 3 page table pages.
+ * As kernel level 3 page table pages are added, more level 2
+ * page table pages may be added to map them. These pages are
+ * never freed.
+ *
+ * Finally, the kernel also has an initial set of kernel level
+ * 3 page table pages. These map pages in K1SEG. More level
+ * 3 page table pages may be added at run-time if additional
+ * K1SEG address space is required. These pages are never freed.
+ *
+ * NOTE: When mappings are inserted into the kernel pmap, all
+ * level 2 and level 3 page table pages must already be allocated
+ * and mapped into the parent page table.
*/
-
-typedef struct pv_entry {
- struct pv_entry *next; /* next pv_entry */
- pmap_t pmap; /* pmap where mapping lies */
- vm_offset_t va; /* virtual address for mapping */
-} *pv_entry_t;
-
-#define PV_ENTRY_NULL ((pv_entry_t) 0)
-
-pv_entry_t pv_head_table; /* array of entries, one per page */
+pt_entry_t *kernel_lev1map;
/*
- * pv_list entries are kept on a list that can only be accessed
- * with the pmap system locked (at SPLVM, not in the cpus_active set).
- * The list is refilled from the pv_list_zone if it becomes empty.
+ * Virtual Page Table.
*/
-pv_entry_t pv_free_list; /* free list at SPLVM */
-decl_simple_lock_data(, pv_free_list_lock)
+pt_entry_t *VPT;
-#define PV_ALLOC(pv_e) { \
- simple_lock(&pv_free_list_lock); \
- if ((pv_e = pv_free_list) != 0) { \
- pv_free_list = pv_e->next; \
- } \
- simple_unlock(&pv_free_list_lock); \
-}
-
-#define PV_FREE(pv_e) { \
- simple_lock(&pv_free_list_lock); \
- pv_e->next = pv_free_list; \
- pv_free_list = pv_e; \
- simple_unlock(&pv_free_list_lock); \
-}
+struct pmap kernel_pmap_store;
+u_int kernel_pmap_asn_store[ALPHA_MAXPROCS];
+u_long kernel_pmap_asngen_store[ALPHA_MAXPROCS];
-#if 0
-zone_t pv_list_zone; /* zone of pv_entry structures */
-#endif
+paddr_t avail_start; /* PA of first available physical page */
+paddr_t avail_end; /* PA of last available physical page */
+vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/
+vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */
-/*
- * Each entry in the pv_head_table is locked by a bit in the
- * pv_lock_table. The lock bits are accessed by the physical
- * address of the page they lock.
- */
+boolean_t pmap_initialized; /* Has pmap_init completed? */
-char *pv_lock_table; /* pointer to array of bits */
-#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
+u_long pmap_pages_stolen; /* instrumentation */
/*
- * First and last physical addresses that we maintain any information
- * for. Initialized to zero so that pmap operations done before
- * pmap_init won't touch any non-existent structures.
+ * This variable contains the number of CPU IDs we need to allocate
+ * space for when allocating the pmap structure. It is used to
+ * size a per-CPU array of ASN and ASN Generation number.
*/
-vm_offset_t vm_first_phys = (vm_offset_t) 0;
-vm_offset_t vm_last_phys = (vm_offset_t) 0;
-boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
+u_long pmap_ncpuids;
/*
- * Index into pv_head table, its lock bits, and the modify/reference
- * bits starting at vm_first_phys.
+ * Storage for physical->virtual entries and page attributes.
*/
+struct pv_head *pv_table;
+int pv_table_npages;
-#define pa_index(pa) (atop(pa - vm_first_phys))
-
-#define pai_to_pvh(pai) (&pv_head_table[pai])
-#define lock_pvh_pai(pai) (bit_lock(pai, pv_lock_table))
-#define unlock_pvh_pai(pai) (bit_unlock(pai, pv_lock_table))
-
-/*
- * Array of physical page attributes for managed pages.
- * One byte per physical page.
- */
-char *pmap_phys_attributes;
+#ifndef PMAP_PV_LOWAT
+#define PMAP_PV_LOWAT 16
+#endif
+int pmap_pv_lowat = PMAP_PV_LOWAT;
/*
- * Physical page attributes. Copy bits from PTE.
+ * List of all pmaps, used to update them when e.g. additional kernel
+ * page tables are allocated. This list is kept LRU-ordered by
+ * pmap_activate().
*/
-#define PHYS_MODIFIED (ALPHA_PTE_MOD>>16) /* page modified */
-#define PHYS_REFERENCED (ALPHA_PTE_REF>>16) /* page referenced */
-
-#define pte_get_attributes(p) ((*p & (ALPHA_PTE_MOD|ALPHA_PTE_REF)) >> 16)
+TAILQ_HEAD(, pmap) pmap_all_pmaps;
/*
- * Amount of virtual memory mapped by one
- * page-directory entry.
+ * The pools from which pmap structures and sub-structures are allocated.
*/
-#define PDE_MAPPED_SIZE (pdetova(1))
-#define PDE2_MAPPED_SIZE (pde2tova(1))
-#define PDE3_MAPPED_SIZE (pde3tova(1))
+struct pool pmap_pmap_pool;
+struct pool pmap_asn_pool;
+struct pool pmap_asngen_pool;
+struct pool pmap_pv_pool;
/*
- * We allocate page table pages directly from the VM system
- * through this object. It maps physical memory.
+ * Canonical names for PGU_* constants.
*/
-vm_object_t pmap_object = VM_OBJECT_NULL;
+const char *pmap_pgu_strings[] = PGU_STRINGS;
/*
- * Locking and TLB invalidation
+ * Address Space Numbers.
+ *
+ * On many implementations of the Alpha architecture, the TLB entries and
+ * I-cache blocks are tagged with a unique number within an implementation-
+ * specified range. When a process context becomes active, the ASN is used
+ * to match TLB entries; if a TLB entry for a particular VA does not match
+ * the current ASN, it is ignored (one could think of the processor as
+ * having a collection of <max ASN> separate TLBs). This allows operating
+ * system software to skip the TLB flush that would otherwise be necessary
+ * at context switch time.
+ *
+ * Alpha PTEs have a bit in them (PG_ASM - Address Space Match) that
+ * causes TLB entries to match any ASN. The PALcode also provides
+ * a TBI (Translation Buffer Invalidate) operation that flushes all
+ * TLB entries that _do not_ have PG_ASM. We use this bit for kernel
+ * mappings, so that invalidation of all user mappings does not invalidate
+ * kernel mappings (which are consistent across all processes).
+ *
+ * pmap_next_asn always indicates to the next ASN to use. When
+ * pmap_next_asn exceeds pmap_max_asn, we start a new ASN generation.
+ *
+ * When a new ASN generation is created, the per-process (i.e. non-PG_ASM)
+ * TLB entries and the I-cache are flushed, the generation number is bumped,
+ * and pmap_next_asn is changed to indicate the first non-reserved ASN.
+ *
+ * We reserve ASN #0 for pmaps that use the global kernel_lev1map. This
+ * prevents the following scenario:
+ *
+ * * New ASN generation starts, and process A is given ASN #0.
+ *
+ * * A new process B (and thus new pmap) is created. The ASN,
+ * for lack of a better value, is initialized to 0.
+ *
+ * * Process B runs. It is now using the TLB entries tagged
+ * by process A. *poof*
+ *
+ * In the scenario above, in addition to the processor using using incorrect
+ * TLB entires, the PALcode might use incorrect information to service a
+ * TLB miss. (The PALcode uses the recursively mapped Virtual Page Table
+ * to locate the PTE for a faulting address, and tagged TLB entires exist
+ * for the Virtual Page Table addresses in order to speed up this procedure,
+ * as well.)
+ *
+ * By reserving an ASN for kernel_lev1map users, we are guaranteeing that
+ * new pmaps will initially run with no TLB entries for user addresses
+ * or VPT mappings that map user page tables. Since kernel_lev1map only
+ * contains mappings for kernel addresses, and since those mappings
+ * are always made with PG_ASM, sharing an ASN for kernel_lev1map users is
+ * safe (since PG_ASM mappings match any ASN).
+ *
+ * On processors that do not support ASNs, the PALcode invalidates
+ * the TLB and I-cache automatically on swpctx. We still still go
+ * through the motions of assigning an ASN (really, just refreshing
+ * the ASN generation in this particular case) to keep the logic sane
+ * in other parts of the code.
*/
+u_int pmap_max_asn; /* max ASN supported by the system */
+u_int pmap_next_asn[ALPHA_MAXPROCS]; /* next free ASN to use */
+u_long pmap_asn_generation[ALPHA_MAXPROCS]; /* current ASN generation */
/*
- * Locking Protocols:
+ * Locking:
*
- * There are two structures in the pmap module that need locking:
- * the pmaps themselves, and the per-page pv_lists (which are locked
- * by locking the pv_lock_table entry that corresponds to the pv_head
- * for the list in question.) Most routines want to lock a pmap and
- * then do operations in it that require pv_list locking -- however
- * pmap_remove_all and pmap_copy_on_write operate on a physical page
- * basis and want to do the locking in the reverse order, i.e. lock
- * a pv_list and then go through all the pmaps referenced by that list.
- * To protect against deadlock between these two cases, the pmap_lock
- * is used. There are three different locking protocols as a result:
+ * This pmap module uses two types of locks: `normal' (sleep)
+ * locks and `simple' (spin) locks. They are used as follows:
*
- * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
- * the pmap.
+ * READ/WRITE SPIN LOCKS
+ * ---------------------
*
- * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
- * lock on the pmap_lock (shared read), then lock the pmap
- * and finally the pv_lists as needed [i.e. pmap lock before
- * pv_list lock.]
+ * * pmap_main_lock - This lock is used to prevent deadlock and/or
+ * provide mutex access to the pmap module. Most operations lock
+ * the pmap first, then PV lists as needed. However, some operations,
+ * such as pmap_page_protect(), lock the PV lists before locking
+ * the pmaps. To prevent deadlock, we require a mutex lock on the
+ * pmap module if locking in the PV->pmap direction. This is
+ * implemented by acquiring a (shared) read lock on pmap_main_lock
+ * if locking pmap->PV and a (exclusive) write lock if locking in
+ * the PV->pmap direction. Since only one thread can hold a write
+ * lock at a time, this provides the mutex.
*
- * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
- * Get a write lock on the pmap_lock (exclusive write); this
- * also guaranteees exclusive access to the pv_lists. Lock the
- * pmaps as needed.
+ * SIMPLE LOCKS
+ * ------------
*
- * At no time may any routine hold more than one pmap lock or more than
- * one pv_list lock. Because interrupt level routines can allocate
- * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
- * kernel_pmap can only be held at splvm.
+ * * pm_slock (per-pmap) - This lock protects all of the members
+ * of the pmap structure itself. This lock will be asserted
+ * in pmap_activate() and pmap_deactivate() from a critical
+ * section of cpu_switch(), and must never sleep. Note that
+ * in the case of the kernel pmap, interrupts which cause
+ * memory allocation *must* be blocked while this lock is
+ * asserted.
+ *
+ * * pvh_slock (per-pv_head) - This lock protects the PV list
+ * for a specified managed page.
+ *
+ * * pmap_all_pmaps_slock - This lock protects the global list of
+ * all pmaps. Note that a pm_slock must never be held while this
+ * lock is held.
+ *
+ * Address space number management (global ASN counters and per-pmap
+ * ASN state) are not locked; they use arrays of values indexed
+ * per-processor.
+ *
+ * All internal functions which operate on a pmap are called
+ * with the pmap already locked by the caller (which will be
+ * an interface function).
*/
+struct lock pmap_main_lock;
+struct simplelock pmap_all_pmaps_slock;
-#if NCPUS > 1
-/*
- * We raise the interrupt level to splvm, to block interprocessor
- * interrupts during pmap operations. We must take the CPU out of
- * the cpus_active set while interrupts are blocked.
- */
-#define SPLVM(spl) { \
- spl = splvm(); \
- i_bit_clear(cpu_number(), &cpus_active); \
-}
+#ifdef __OpenBSD__
+#define spinlockinit(lock, name, flags) lockinit(lock, 0, name, 0, flags)
+#define sinlockmgr(lock, flags, slock) lockmgr(lock, flags, slock, curproc)
+#endif
-#define SPLX(spl) { \
- i_bit_set(cpu_number(), &cpus_active); \
- splx(spl); \
-}
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+#define PMAP_MAP_TO_HEAD_LOCK() \
+ spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
+#define PMAP_MAP_TO_HEAD_UNLOCK() \
+ spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
+#define PMAP_HEAD_TO_MAP_LOCK() \
+ spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
+#define PMAP_HEAD_TO_MAP_UNLOCK() \
+ spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
+#else
+#define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
+#define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
+#define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
+#define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
+#endif /* MULTIPROCESSOR || LOCKDEBUG */
+#if defined(MULTIPROCESSOR)
/*
- * Lock on pmap system
+ * TLB Shootdown:
+ *
+ * When a mapping is changed in a pmap, the TLB entry corresponding to
+ * the virtual address must be invalidated on all processors. In order
+ * to accomplish this on systems with multiple processors, messages are
+ * sent from the processor which performs the mapping change to all
+ * processors on which the pmap is active. For other processors, the
+ * ASN generation numbers for that processor is invalidated, so that
+ * the next time the pmap is activated on that processor, a new ASN
+ * will be allocated (which implicitly invalidates all TLB entries).
+ *
+ * Note, we can use the pool allocator to allocate job entries
+ * since pool pages are mapped with K0SEG, not with the TLB.
*/
-lock_data_t pmap_system_lock;
-
-volatile boolean_t cpu_update_needed[NCPUS];
-
-#define PMAP_READ_LOCK(pmap, spl) { \
- SPLVM(spl); \
- lock_read(&pmap_system_lock); \
- simple_lock(&(pmap)->lock); \
-}
-
-#define PMAP_WRITE_LOCK(spl) { \
- SPLVM(spl); \
- lock_write(&pmap_system_lock); \
-}
-
-#define PMAP_READ_UNLOCK(pmap, spl) { \
- simple_unlock(&(pmap)->lock); \
- lock_read_done(&pmap_system_lock); \
- SPLX(spl); \
-}
-
-#define PMAP_WRITE_UNLOCK(spl) { \
- lock_write_done(&pmap_system_lock); \
- SPLX(spl); \
-}
-
-#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
- simple_lock(&(pmap)->lock); \
- lock_write_to_read(&pmap_system_lock); \
-}
+struct pmap_tlb_shootdown_job {
+ TAILQ_ENTRY(pmap_tlb_shootdown_job) pj_list;
+ vaddr_t pj_va; /* virtual address */
+ pmap_t pj_pmap; /* the pmap which maps the address */
+ pt_entry_t pj_pte; /* the PTE bits */
+};
-#define LOCK_PVH(index) (lock_pvh_pai(index))
-
-#define UNLOCK_PVH(index) (unlock_pvh_pai(index))
-
-#define PMAP_UPDATE_TLBS(pmap, s, e) \
-{ \
- cpu_set cpu_mask = 1 << cpu_number(); \
- cpu_set users; \
- \
- /* Since the pmap is locked, other updates are locked */ \
- /* out, and any pmap_activate has finished. */ \
- \
- /* find other cpus using the pmap */ \
- users = (pmap)->cpus_using & ~cpu_mask; \
- if (users) { \
- /* signal them, and wait for them to finish */ \
- /* using the pmap */ \
- signal_cpus(users, (pmap), (s), (e)); \
- while ((pmap)->cpus_using & cpus_active & ~cpu_mask) \
- continue; \
- } \
- \
- /* invalidate our own TLB if pmap is in use */ \
- if ((pmap)->cpus_using & cpu_mask) { \
- INVALIDATE_TLB((s), (e)); \
- } \
-}
+struct pmap_tlb_shootdown_q {
+ TAILQ_HEAD(, pmap_tlb_shootdown_job) pq_head;
+ int pq_pte; /* aggregate PTE bits */
+ int pq_count; /* number of pending requests */
+ struct simplelock pq_slock; /* spin lock on queue */
+} pmap_tlb_shootdown_q[ALPHA_MAXPROCS];
-#else NCPUS > 1
+/* If we have more pending jobs than this, we just nail the whole TLB. */
+#define PMAP_TLB_SHOOTDOWN_MAXJOBS 6
-#define SPLVM(spl)
-#define SPLX(spl)
+struct pool pmap_tlb_shootdown_job_pool;
-#define PMAP_READ_LOCK(pmap, spl) SPLVM(spl)
-#define PMAP_WRITE_LOCK(spl) SPLVM(spl)
-#define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl)
-#define PMAP_WRITE_UNLOCK(spl) SPLX(spl)
-#define PMAP_WRITE_TO_READ_LOCK(pmap)
+void pmap_tlb_shootdown_q_drain __P((struct pmap_tlb_shootdown_q *));
+struct pmap_tlb_shootdown_job *pmap_tlb_shootdown_job_get
+ __P((struct pmap_tlb_shootdown_q *));
+void pmap_tlb_shootdown_job_put __P((struct pmap_tlb_shootdown_q *,
+ struct pmap_tlb_shootdown_job *));
+#endif /* MULTIPROCESSOR */
-#define LOCK_PVH(index)
-#define UNLOCK_PVH(index)
+#define PAGE_IS_MANAGED(pa) (vm_physseg_find(atop(pa), NULL) != -1)
-#if 0 /*fix bug later */
-#define PMAP_UPDATE_TLBS(pmap, s, e) { \
- /* invalidate our own TLB if pmap is in use */ \
- if ((pmap)->cpus_using) { \
- INVALIDATE_TLB((s), (e)); \
- } \
-}
-#else
-#define PMAP_UPDATE_TLBS(pmap, s, e) { \
- INVALIDATE_TLB((s), (e)); \
-}
-#endif
+static __inline struct pv_head *pa_to_pvh __P((paddr_t));
-#endif /* NCPUS > 1 */
+static __inline struct pv_head *
+pa_to_pvh(pa)
+ paddr_t pa;
+{
+ int bank, pg;
-#if 0
-#define INVALIDATE_TLB(s, e) { \
- register vm_offset_t v = s, ve = e; \
- while (v < ve) { \
- tbis(v); v += ALPHA_PGBYTES; \
- } \
-}
-#else
-#define INVALIDATE_TLB(s, e) { \
- tbia(); \
+ bank = vm_physseg_find(atop(pa), &pg);
+ return (&vm_physmem[bank].pmseg.pvhead[pg]);
}
-#endif
-
-
-#if NCPUS > 1
-
-void pmap_update_interrupt();
/*
- * Structures to keep track of pending TLB invalidations
+ * Optional argument passed to pmap_remove_mapping() for stealing mapping
+ * resources.
*/
+struct prm_thief {
+ int prmt_flags; /* flags; what to steal */
+ struct pv_entry *prmt_pv; /* the stolen PV entry */
+ pt_entry_t *prmt_ptp; /* the stolen PT page */
+};
-#define UPDATE_LIST_SIZE 4
+#define PRMT_PV 0x0001 /* steal the PV entry */
+#define PRMT_PTP 0x0002 /* steal the PT page */
-struct pmap_update_item {
- pmap_t pmap; /* pmap to invalidate */
- vm_offset_t start; /* start address to invalidate */
- vm_offset_t end; /* end address to invalidate */
-} ;
+/*
+ * Internal routines
+ */
+void alpha_protection_init __P((void));
+boolean_t pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *,
+ boolean_t, long, struct prm_thief *));
+void pmap_changebit __P((paddr_t, pt_entry_t, pt_entry_t, long));
-typedef struct pmap_update_item *pmap_update_item_t;
+/*
+ * PT page management functions.
+ */
+int pmap_lev1map_create __P((pmap_t, long));
+void pmap_lev1map_destroy __P((pmap_t, long));
+int pmap_ptpage_alloc __P((pmap_t, pt_entry_t *, int));
+boolean_t pmap_ptpage_steal __P((pmap_t, int, paddr_t *));
+void pmap_ptpage_free __P((pmap_t, pt_entry_t *, pt_entry_t **));
+void pmap_l3pt_delref __P((pmap_t, vaddr_t, pt_entry_t *, long,
+ pt_entry_t **));
+void pmap_l2pt_delref __P((pmap_t, pt_entry_t *, pt_entry_t *, long));
+void pmap_l1pt_delref __P((pmap_t, pt_entry_t *, long));
/*
- * List of pmap updates. If the list overflows,
- * the last entry is changed to invalidate all.
+ * PV table management functions.
*/
-struct pmap_update_list {
- decl_simple_lock_data(, lock)
- int count;
- struct pmap_update_item item[UPDATE_LIST_SIZE];
-} ;
-typedef struct pmap_update_list *pmap_update_list_t;
+int pmap_pv_enter __P((pmap_t, paddr_t, vaddr_t, pt_entry_t *, boolean_t));
+void pmap_pv_remove __P((pmap_t, paddr_t, vaddr_t, boolean_t,
+ struct pv_entry **));
+struct pv_entry *pmap_pv_alloc __P((void));
+void pmap_pv_free __P((struct pv_entry *));
+void *pmap_pv_page_alloc __P((u_long, int, int));
+void pmap_pv_page_free __P((void *, u_long, int));
+#ifdef DEBUG
+void pmap_pv_dump __P((paddr_t));
+#endif
-struct pmap_update_list cpu_update_list[NCPUS];
+/*
+ * ASN management functions.
+ */
+void pmap_asn_alloc __P((pmap_t, long));
-#endif /* NCPUS > 1 */
+/*
+ * Misc. functions.
+ */
+boolean_t pmap_physpage_alloc __P((int, paddr_t *));
+void pmap_physpage_free __P((paddr_t));
+int pmap_physpage_addref __P((void *));
+int pmap_physpage_delref __P((void *));
/*
- * Other useful macros.
+ * PMAP_ISACTIVE{,_TEST}:
+ *
+ * Check to see if a pmap is active on the current processor.
*/
-#define current_pmap() (vm_map_pmap(current_thread()->task->map))
-#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
+#define PMAP_ISACTIVE_TEST(pm, cpu_id) \
+ (((pm)->pm_cpus & (1UL << (cpu_id))) != 0)
+
+#if defined(DEBUG) && !defined(MULTIPROCESSOR)
+#define PMAP_ISACTIVE(pm, cpu_id) \
+({ \
+ /* \
+ * XXX This test is not MP-safe. \
+ */ \
+ int isactive_ = PMAP_ISACTIVE_TEST(pm, cpu_id); \
+ \
+ if (curproc != NULL && curproc->p_vmspace != NULL && \
+ (pm) != pmap_kernel() && \
+ (isactive_ ^ ((pm) == curproc->p_vmspace->vm_map.pmap))) \
+ panic("PMAP_ISACTIVE, isa: %d pm: %p curpm:%p\n", \
+ isactive_, (pm), curproc->p_vmspace->vm_map.pmap); \
+ (isactive_); \
+})
+#else
+#define PMAP_ISACTIVE(pm, cpu_id) PMAP_ISACTIVE_TEST(pm, cpu_id)
+#endif /* DEBUG && !MULTIPROCESSOR */
-struct pmap kernel_pmap_store;
-pmap_t kernel_pmap;
-
-struct zone *pmap_zone; /* zone of pmap structures */
-
-int pmap_debug = 0; /* flag for debugging prints */
-int ptes_per_vm_page; /* number of hardware ptes needed
- to map one VM page. */
-unsigned int inuse_ptepages_count = 0; /* debugging */
-
-extern char end;
-/*
- * Page directory for kernel.
- */
-pt_entry_t *root_kpdes;
-
-void pmap_remove_range(); /* forward */
-#if NCPUS > 1
-void signal_cpus(); /* forward */
-#endif /* NCPUS > 1 */
-
-int pmap_max_asn;
-void pmap_expand __P((pmap_t, vm_offset_t));
-
-/* XXX */
-#define PDB_BOOTSTRAP 0x00000001
-#define PDB_BOOTSTRAP_ALLOC 0x00000002
-#define PDB_UNMAP_PROM 0x00000004
-#define PDB_ACTIVATE 0x00000008
-#define PDB_DEACTIVATE 0x00000010
-#define PDB_TLBPID_INIT 0x00000020
-#define PDB_TLBPID_ASSIGN 0x00000040
-#define PDB_TLBPID_DESTROY 0x00000080
-#define PDB_ENTER 0x00000100
-#define PDB_CREATE 0x00000200
-#define PDB_PINIT 0x00000400
-#define PDB_EXPAND 0x00000800
-#define PDB_EXTRACT 0x00001000
-#define PDB_PTE 0x00002000
-#define PDB_RELEASE 0x00004000
-#define PDB_DESTROY 0x00008000
-#define PDB_COPY_PAGE 0x00010000
-#define PDB_ZERO_PAGE 0x00020000
-
-#define PDB_ANOMALOUS 0x20000000
-#define PDB_FOLLOW 0x40000000
-#define PDB_VERBOSE 0x80000000
-
-int pmapdebug = PDB_ANOMALOUS |-1 /* -1 */;
-
-#if defined(DEBUG) || 1
-#define DOPDB(x) ((pmapdebug & (x)) != 0)
+/*
+ * PMAP_ACTIVATE_ASN_SANITY:
+ *
+ * DEBUG sanity checks for ASNs within PMAP_ACTIVATE.
+ */
+#ifdef DEBUG
+#define PMAP_ACTIVATE_ASN_SANITY(pmap, cpu_id) \
+do { \
+ if ((pmap)->pm_lev1map == kernel_lev1map) { \
+ /* \
+ * This pmap implementation also ensures that pmaps \
+ * referencing kernel_lev1map use a reserved ASN \
+ * ASN to prevent the PALcode from servicing a TLB \
+ * miss with the wrong PTE. \
+ */ \
+ if ((pmap)->pm_asn[(cpu_id)] != PMAP_ASN_RESERVED) { \
+ printf("kernel_lev1map with non-reserved ASN " \
+ "(line %d)\n", __LINE__); \
+ panic("PMAP_ACTIVATE_ASN_SANITY"); \
+ } \
+ } else { \
+ if ((pmap)->pm_asngen[(cpu_id)] != \
+ pmap_asn_generation[(cpu_id)]) { \
+ /* \
+ * ASN generation number isn't valid! \
+ */ \
+ printf("pmap asngen %lu, current %lu " \
+ "(line %d)\n", \
+ (pmap)->pm_asngen[(cpu_id)], \
+ pmap_asn_generation[(cpu_id)], \
+ __LINE__); \
+ panic("PMAP_ACTIVATE_ASN_SANITY"); \
+ } \
+ if ((pmap)->pm_asn[(cpu_id)] == PMAP_ASN_RESERVED) { \
+ /* \
+ * DANGER WILL ROBINSON! We're going to \
+ * pollute the VPT TLB entries! \
+ */ \
+ printf("Using reserved ASN! (line %d)\n", \
+ __LINE__); \
+ panic("PMAP_ACTIVATE_ASN_SANITY"); \
+ } \
+ } \
+} while (0)
#else
-#define DOPDB(x) 0
+#define PMAP_ACTIVATE_ASN_SANITY(pmap, cpu_id) /* nothing */
#endif
-#define DOVPDB(x) (DOPDB(x) && DOPDB(PDB_VERBOSE))
/*
- * Given an offset and a map, compute the address of the
- * pte. If the address is invalid with respect to the map
- * then PT_ENTRY_NULL is returned (and the map may need to grow).
+ * PMAP_ACTIVATE:
*
- * This is only used internally.
+ * This is essentially the guts of pmap_activate(), without
+ * ASN allocation. This is used by pmap_activate(),
+ * pmap_lev1map_create(), and pmap_lev1map_destroy().
+ *
+ * This is called only when it is known that a pmap is "active"
+ * on the current processor; the ASN must already be valid.
*/
-#define pmap_pde(pmap, addr) (&(pmap)->dirbase[pdenum(addr)])
-
-pt_entry_t *pmap_pte(pmap, addr)
- register pmap_t pmap;
- register vm_offset_t addr;
-{
- register pt_entry_t *ptp, *ptep;
- register pt_entry_t pte;
-
- if (DOPDB(PDB_FOLLOW|PDB_PTE))
- printf("pmap_pte(%p, 0x%lx)\n", pmap, addr);
-
- if (pmap->dirbase == 0) {
- if (DOVPDB(PDB_FOLLOW|PDB_PTE))
- printf("pmap_pte: dirbase == 0\n");
- ptep = PT_ENTRY_NULL;
- goto out;
- }
+#define PMAP_ACTIVATE(pmap, p, cpu_id) \
+do { \
+ PMAP_ACTIVATE_ASN_SANITY(pmap, cpu_id); \
+ \
+ (p)->p_addr->u_pcb.pcb_hw.apcb_ptbr = \
+ ALPHA_K0SEG_TO_PHYS((vaddr_t)(pmap)->pm_lev1map) >> PGSHIFT; \
+ (p)->p_addr->u_pcb.pcb_hw.apcb_asn = (pmap)->pm_asn[(cpu_id)]; \
+ \
+ if ((p) == curproc) { \
+ /* \
+ * Page table base register has changed; switch to \
+ * our own context again so that it will take effect. \
+ */ \
+ (void) alpha_pal_swpctx((u_long)p->p_md.md_pcbpaddr); \
+ } \
+} while (0)
- /* seg1 */
- pte = *pmap_pde(pmap,addr);
- if ((pte & ALPHA_PTE_VALID) == 0) {
- if (DOVPDB(PDB_FOLLOW|PDB_PTE))
- printf("pmap_pte: l1 not valid\n");
- ptep = PT_ENTRY_NULL;
- goto out;
- }
-
- /* seg2 */
- ptp = (pt_entry_t *)ptetokv(pte);
- pte = ptp[pte2num(addr)];
- if ((pte & ALPHA_PTE_VALID) == 0) {
- if (DOVPDB(PDB_FOLLOW|PDB_PTE))
- printf("pmap_pte: l2 not valid\n");
- ptep = PT_ENTRY_NULL;
- goto out;
- }
+/*
+ * PMAP_SYNC_ISTREAM:
+ *
+ * Synchronize the I-stream for the specified pmap. For user
+ * pmaps, this is deferred until a process using the pmap returns
+ * to userspace.
+ *
+ * XXX Need MULTIPROCESSOR versions of these.
+ */
+#define PMAP_SYNC_ISTREAM_KERNEL() alpha_pal_imb()
- /* seg3 */
- ptp = (pt_entry_t *)ptetokv(pte);
- ptep = &ptp[pte3num(addr)];
+#define PMAP_SYNC_ISTREAM_USER(pmap) (pmap)->pm_needisync = ~0UL
-out:
- if (DOPDB(PDB_FOLLOW|PDB_PTE))
- printf("pmap_pte: returns %p\n", ptep);
- return (ptep);
-}
+#define PMAP_SYNC_ISTREAM(pmap) \
+do { \
+ if ((pmap) == pmap_kernel()) \
+ PMAP_SYNC_ISTREAM_KERNEL(); \
+ else \
+ PMAP_SYNC_ISTREAM_USER(pmap); \
+} while (0)
-#define DEBUG_PTE_PAGE 1
+/*
+ * PMAP_INVALIDATE_ASN:
+ *
+ * Invalidate the specified pmap's ASN, so as to force allocation
+ * of a new one the next time pmap_asn_alloc() is called.
+ *
+ * NOTE: THIS MUST ONLY BE CALLED IF AT LEAST ONE OF THE FOLLOWING
+ * CONDITIONS ARE TRUE:
+ *
+ * (1) The pmap references the global kernel_lev1map.
+ *
+ * (2) The pmap is not active on the current processor.
+ */
+#define PMAP_INVALIDATE_ASN(pmap, cpu_id) \
+do { \
+ (pmap)->pm_asn[(cpu_id)] = PMAP_ASN_RESERVED; \
+} while (0)
-extern vm_offset_t virtual_avail, virtual_end;
-extern vm_offset_t avail_start, avail_end;
+/*
+ * PMAP_INVALIDATE_TLB:
+ *
+ * Invalidate the TLB entry for the pmap/va pair.
+ */
+#define PMAP_INVALIDATE_TLB(pmap, va, hadasm, isactive, cpu_id) \
+do { \
+ if ((hadasm) || (isactive)) { \
+ /* \
+ * Simply invalidating the TLB entry and I-cache \
+ * works in this case. \
+ */ \
+ ALPHA_TBIS((va)); \
+ } else if ((pmap)->pm_asngen[(cpu_id)] == \
+ pmap_asn_generation[(cpu_id)]) { \
+ /* \
+ * We can't directly invalidate the TLB entry \
+ * in this case, so we have to force allocation \
+ * of a new ASN the next time this pmap becomes \
+ * active. \
+ */ \
+ PMAP_INVALIDATE_ASN((pmap), (cpu_id)); \
+ } \
+ /* \
+ * Nothing to do in this case; the next time the \
+ * pmap becomes active on this processor, a new \
+ * ASN will be allocated anyway. \
+ */ \
+} while (0)
/*
- * Bootstrap the system enough to run with virtual memory.
- * Map the kernel's code and data, and allocate the system page table.
- * Called with mapping OFF. Page_size must already be set.
+ * PMAP_KERNEL_PTE:
*
- * Parameters:
- * avail_start PA of first available physical page
- * avail_end PA of last available physical page
- * virtual_avail VA of first available page
- * virtual_end VA of last available page
+ * Get a kernel PTE.
*
+ * If debugging, do a table walk. If not debugging, just use
+ * the Virtual Page Table, since all kernel page tables are
+ * pre-allocated and mapped in.
*/
-vm_size_t pmap_kernel_vm = 5; /* each one 8 meg worth */
+#ifdef DEBUG
+#define PMAP_KERNEL_PTE(va) \
+({ \
+ pt_entry_t *l1pte_, *l2pte_; \
+ \
+ l1pte_ = pmap_l1pte(pmap_kernel(), va); \
+ if (pmap_pte_v(l1pte_) == 0) { \
+ printf("kernel level 1 PTE not valid, va 0x%lx " \
+ "(line %d)\n", (va), __LINE__); \
+ panic("PMAP_KERNEL_PTE"); \
+ } \
+ l2pte_ = pmap_l2pte(pmap_kernel(), va, l1pte_); \
+ if (pmap_pte_v(l2pte_) == 0) { \
+ printf("kernel level 2 PTE not valid, va 0x%lx " \
+ "(line %d)\n", (va), __LINE__); \
+ panic("PMAP_KERNEL_PTE"); \
+ } \
+ pmap_l3pte(pmap_kernel(), va, l2pte_); \
+})
+#else
+#define PMAP_KERNEL_PTE(va) (&VPT[VPT_INDEX((va))])
+#endif
-unsigned int
-pmap_free_pages()
-{
- return atop(avail_end - avail_start);
-}
+/*
+ * PMAP_STAT_{INCR,DECR}:
+ *
+ * Increment or decrement a pmap statistic.
+ */
+#define PMAP_STAT_INCR(s, v) atomic_add_ulong((unsigned long *)(&(s)), (v))
+#define PMAP_STAT_DECR(s, v) atomic_sub_ulong((unsigned long *)(&(s)), (v))
+/*
+ * pmap_bootstrap:
+ *
+ * Bootstrap the system to run with virtual memory.
+ *
+ * Note: no locking is necessary in this function.
+ */
void
-pmap_bootstrap(firstaddr, ptaddr, maxasn)
- vm_offset_t firstaddr, ptaddr;
- int maxasn;
+pmap_bootstrap(ptaddr, maxasn, ncpuids)
+ paddr_t ptaddr;
+ u_int maxasn;
+ u_long ncpuids;
{
- vm_offset_t pa;
- pt_entry_t template;
- pt_entry_t *pde, *pte;
- vm_offset_t start;
- extern int firstusablepage, lastusablepage;
+ vsize_t lev2mapsize, lev3mapsize;
+ pt_entry_t *lev2map, *lev3map;
+ pt_entry_t pte;
int i;
- long npages;
- if (DOPDB(PDB_FOLLOW|PDB_BOOTSTRAP))
- printf("pmap_bootstrap(0x%lx, 0x%lx, %d)\n", firstaddr, ptaddr,
- maxasn);
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_BOOTSTRAP))
+ printf("pmap_bootstrap(0x%lx, %u)\n", ptaddr, maxasn);
+#endif
- /* must be page aligned */
- start = firstaddr = alpha_round_page(firstaddr);
+ /*
+ * Figure out how many PTE's are necessary to map the kernel.
+ * The '512' comes from PAGER_MAP_SIZE in vm_pager_init().
+ * This should be kept in sync.
+ * We also reserve space for kmem_alloc_pageable() for vm_fork().
+ */
+ lev3mapsize = (VM_PHYS_SIZE +
+ nbuf * MAXBSIZE + 16 * NCARGS) / NBPG + 512 +
+ (maxproc * UPAGES) + NKMEMCLUSTERS;
-#define valloc(name, type, num) \
- (name) = (type *)firstaddr; \
- firstaddr = ALIGN((vm_offset_t)((name)+(num)))
-#define vallocsz(name, cast, size) \
- (name) = (cast)firstaddr; \
- firstaddr = ALIGN(firstaddr + size)
+#ifdef SYSVSHM
+ lev3mapsize += shminfo.shmall;
+#endif
+ lev3mapsize = roundup(lev3mapsize, NPTEPG);
/*
- * Initialize protection array.
+ * Allocate a level 1 PTE table for the kernel.
+ * This is always one page long.
+ * IF THIS IS NOT A MULTIPLE OF NBPG, ALL WILL GO TO HELL.
*/
- alpha_protection_init();
+ kernel_lev1map = (pt_entry_t *)
+ pmap_steal_memory(sizeof(pt_entry_t) * NPTEPG, NULL, NULL);
/*
- * Set ptes_per_vm_page for general use.
+ * Allocate a level 2 PTE table for the kernel.
+ * These must map all of the level3 PTEs.
+ * IF THIS IS NOT A MULTIPLE OF NBPG, ALL WILL GO TO HELL.
*/
- ptes_per_vm_page = page_size / ALPHA_PGBYTES;
+ lev2mapsize = roundup(howmany(lev3mapsize, NPTEPG), NPTEPG);
+ lev2map = (pt_entry_t *)
+ pmap_steal_memory(sizeof(pt_entry_t) * lev2mapsize, NULL, NULL);
/*
- * The kernel's pmap is statically allocated so we don't
- * have to use pmap_create, which is unlikely to work
- * correctly at this part of the boot sequence.
+ * Allocate a level 3 PTE table for the kernel.
+ * Contains lev3mapsize PTEs.
*/
+ lev3map = (pt_entry_t *)
+ pmap_steal_memory(sizeof(pt_entry_t) * lev3mapsize, NULL, NULL);
- kernel_pmap = &kernel_pmap_store;
-
-#if NCPUS > 1
- lock_init(&pmap_system_lock, FALSE); /* NOT a sleep lock */
-#endif /* NCPUS > 1 */
-
- simple_lock_init(&kernel_pmap->lock);
+ /*
+ * Allocate memory for the pv_heads. (A few more of the latter
+ * are allocated than are needed.)
+ *
+ * We could do this in pmap_init when we know the actual
+ * managed page pool size, but its better to use kseg0
+ * addresses rather than kernel virtual addresses mapped
+ * through the TLB.
+ */
+ pv_table_npages = physmem;
+ pv_table = (struct pv_head *)
+ pmap_steal_memory(sizeof(struct pv_head) * pv_table_npages,
+ NULL, NULL);
- kernel_pmap->ref_count = 1;
+ /*
+ * ...and intialize the pv_entry list headers.
+ */
+ for (i = 0; i < pv_table_npages; i++) {
+ LIST_INIT(&pv_table[i].pvh_list);
+ simple_lock_init(&pv_table[i].pvh_slock);
+ }
/*
- * Allocate the kernel page directory, and put its
- * virtual address in root_kpdes.
- *
- * No other physical memory has been allocated.
+ * Set up level 1 page table
*/
- vallocsz(root_kpdes, pt_entry_t *, PAGE_SIZE);
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: root_kpdes = %p\n", root_kpdes);
- kernel_pmap->dirbase = root_kpdes;
- kernel_pmap->dirpfn = alpha_btop(kvtophys((vm_offset_t)root_kpdes));
+ /* Map all of the level 2 pte pages */
+ for (i = 0; i < howmany(lev2mapsize, NPTEPG); i++) {
+ pte = (ALPHA_K0SEG_TO_PHYS(((vaddr_t)lev2map) +
+ (i*PAGE_SIZE)) >> PGSHIFT) << PG_SHIFT;
+ pte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED;
+ kernel_lev1map[l1pte_index(VM_MIN_KERNEL_ADDRESS +
+ (i*PAGE_SIZE*NPTEPG*NPTEPG))] = pte;
+ }
+
+ /* Map the virtual page table */
+ pte = (ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT)
+ << PG_SHIFT;
+ pte |= PG_V | PG_KRE | PG_KWE; /* NOTE NO ASM */
+ kernel_lev1map[l1pte_index(VPTBASE)] = pte;
+ VPT = (pt_entry_t *)VPTBASE;
+
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+ {
+ extern pt_entry_t prom_pte; /* XXX */
+ extern int prom_mapped; /* XXX */
- /* First, copy mappings for things below VM_MIN_KERNEL_ADDRESS */
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: setting up root_kpdes (copy 0x%lx)\n",
- pdenum(VM_MIN_KERNEL_ADDRESS) * sizeof root_kpdes[0]);
- bzero(root_kpdes, PAGE_SIZE);
- bcopy((caddr_t)ptaddr, root_kpdes,
- pdenum(VM_MIN_KERNEL_ADDRESS) * sizeof root_kpdes[0]);
+ if (pmap_uses_prom_console()) {
+ /*
+ * XXX Save old PTE so we can remap the PROM, if
+ * XXX necessary.
+ */
+ prom_pte = *(pt_entry_t *)ptaddr & ~PG_ASM;
+ }
+ prom_mapped = 0;
/*
- * Set up the virtual page table.
+ * Actually, this code lies. The prom is still mapped, and will
+ * remain so until the context switch after alpha_init() returns.
*/
- pte_ktemplate(template, kvtophys(root_kpdes),
- VM_PROT_READ | VM_PROT_WRITE);
- template &= ~ALPHA_PTE_GLOBAL;
- root_kpdes[pdenum(VPTBASE)] = template;
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: VPT PTE 0x%lx at 0x%lx)\n",
- root_kpdes[pdenum(VPTBASE)], &root_kpdes[pdenum(VPTBASE)]);
+ }
+#endif
-#if 0
/*
- * Rid of console's default mappings
+ * Set up level 2 page table.
*/
- for (pde = pmap_pde(kernel_pmap,0);
- pde < pmap_pde(kernel_pmap,VM_MIN_KERNEL_ADDRESS);)
- *pde++ = 0;
+ /* Map all of the level 3 pte pages */
+ for (i = 0; i < howmany(lev3mapsize, NPTEPG); i++) {
+ pte = (ALPHA_K0SEG_TO_PHYS(((vaddr_t)lev3map) +
+ (i*PAGE_SIZE)) >> PGSHIFT) << PG_SHIFT;
+ pte |= PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED;
+ lev2map[l2pte_index(VM_MIN_KERNEL_ADDRESS+
+ (i*PAGE_SIZE*NPTEPG))] = pte;
+ }
-#endif
/*
- * Allocate the seg2 kernel page table entries from the front
- * of available physical memory. Take enough to cover all of
- * the K2SEG range. But of course one page is enough for 8Gb,
- * and more in future chips ...
+ * Set up level three page table (lev3map)
*/
-#define enough_kseg2() (PAGE_SIZE)
-
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: allocating kvseg segment pages\n");
- vallocsz(pte, pt_entry_t *, enough_kseg2()); /* virtual */
- pa = kvtophys(pte); /* physical */
- bzero(pte, enough_kseg2());
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: kvseg segment pages at %p\n", pte);
-
-#undef enough_kseg2
+ /* Nothing to do; it's already zero'd */
/*
- * Make a note of it in the seg1 table
+ * Initialize `FYI' variables. Note we're relying on
+ * the fact that BSEARCH sorts the vm_physmem[] array
+ * for us.
*/
+ avail_start = ptoa(vm_physmem[0].start);
+ avail_end = ptoa(vm_physmem[vm_nphysseg - 1].end);
+ virtual_avail = VM_MIN_KERNEL_ADDRESS;
+ virtual_end = VM_MIN_KERNEL_ADDRESS + lev3mapsize * NBPG;
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: inserting segment pages into root\n");
- tbia();
- pte_ktemplate(template,pa,VM_PROT_READ|VM_PROT_WRITE);
- pde = pmap_pde(kernel_pmap,K2SEG_BASE);
- i = ptes_per_vm_page;
- do {
- *pde++ = template;
- pte_increment_pa(template);
- i--;
- } while (i > 0);
+#if 0
+ printf("avail_start = 0x%lx\n", avail_start);
+ printf("avail_end = 0x%lx\n", avail_end);
+ printf("virtual_avail = 0x%lx\n", virtual_avail);
+ printf("virtual_end = 0x%lx\n", virtual_end);
+#endif
/*
- * The kernel runs unmapped and cached (k0seg),
- * only dynamic data are mapped in k1seg.
- * ==> No need to map it.
+ * Intialize the pmap pools and list.
*/
+ pmap_ncpuids = ncpuids;
+ pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
+ 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ pool_init(&pmap_asn_pool, pmap_ncpuids * sizeof(u_int), 0, 0, 0,
+ "pmasnpl",
+ 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ pool_init(&pmap_asngen_pool, pmap_ncpuids * sizeof(u_long), 0, 0, 0,
+ "pmasngenpl",
+ 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
+ 0, pmap_pv_page_alloc, pmap_pv_page_free, M_VMPMAP);
+
+ TAILQ_INIT(&pmap_all_pmaps);
/*
- * But don't we need some seg2 pagetables to start with ?
+ * Initialize the ASN logic.
*/
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: allocating kvseg page table pages\n");
- pde = &pte[pte2num(K2SEG_BASE)];
- for (i = pmap_kernel_vm; i > 0; i--) {
- register int j;
-
- vallocsz(pte, pt_entry_t *, PAGE_SIZE); /* virtual */
- pa = kvtophys(pte); /* physical */
- pte_ktemplate(template,pa,VM_PROT_READ|VM_PROT_WRITE);
- bzero(pte, PAGE_SIZE);
- j = ptes_per_vm_page;
- do {
- *pde++ = template;
- pte_increment_pa(template);
- } while (--j > 0);
+ pmap_max_asn = maxasn;
+ for (i = 0; i < ALPHA_MAXPROCS; i++) {
+ pmap_next_asn[i] = 1;
+ pmap_asn_generation[i] = 0;
}
/*
- * Fix up managed physical memory information.
+ * Initialize the locks.
*/
- avail_start = ALPHA_K0SEG_TO_PHYS(firstaddr);
- avail_end = alpha_ptob(lastusablepage + 1);
- mem_size = avail_end - avail_start;
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: avail: 0x%lx -> 0x%lx (0x%lx)\n",
- avail_start, avail_end, mem_size);
+ spinlockinit(&pmap_main_lock, "pmaplk", 0);
+ simple_lock_init(&pmap_all_pmaps_slock);
/*
- * Allocate memory for the pv_head_table and its
- * lock bits, and the reference/modify byte array.
+ * Initialize kernel pmap. Note that all kernel mappings
+ * have PG_ASM set, so the ASN doesn't really matter for
+ * the kernel pmap. Also, since the kernel pmap always
+ * references kernel_lev1map, it always has an invalid ASN
+ * generation.
*/
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: allocating page management data\n");
-
- npages = ((BYTE_SIZE * mem_size) /
- (BYTE_SIZE * (PAGE_SIZE + sizeof (struct pv_entry) + 1) + 1));
-
- valloc(pv_head_table, struct pv_entry, npages);
- bzero(pv_head_table, sizeof (struct pv_entry) * npages);
-
- valloc(pv_lock_table, char, pv_lock_table_size(npages));
- bzero(pv_lock_table, pv_lock_table_size(npages));
-
- valloc(pmap_phys_attributes, char, npages);
- bzero(pmap_phys_attributes, sizeof (char) * npages);
-
- avail_start = alpha_round_page(ALPHA_K0SEG_TO_PHYS(firstaddr));
- if (npages > pmap_free_pages())
- panic("pmap_bootstrap");
- mem_size = avail_end - avail_start;
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: avail: 0x%lx -> 0x%lx (0x%lx)\n",
- avail_start, avail_end, mem_size);
+ memset(pmap_kernel(), 0, sizeof(struct pmap));
+ pmap_kernel()->pm_lev1map = kernel_lev1map;
+ pmap_kernel()->pm_count = 1;
+ pmap_kernel()->pm_asn = kernel_pmap_asn_store;
+ pmap_kernel()->pm_asngen = kernel_pmap_asngen_store;
+ for (i = 0; i < ALPHA_MAXPROCS; i++) {
+ pmap_kernel()->pm_asn[i] = PMAP_ASN_RESERVED;
+ pmap_kernel()->pm_asngen[i] = pmap_asn_generation[i];
+ }
+ simple_lock_init(&pmap_kernel()->pm_slock);
+ TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list);
+#if defined(MULTIPROCESSOR)
/*
- * Assert kernel limits (because of pmap_expand).
+ * Initialize the TLB shootdown queues.
*/
-
- virtual_avail = alpha_round_page(K2SEG_BASE);
- virtual_end = trunc_page(K2SEG_BASE + pde2tova(pmap_kernel_vm));
- if (DOVPDB(PDB_BOOTSTRAP)) {
- printf("pmap_bootstrap: virtual_avail = %p\n", virtual_avail);
- printf("pmap_bootstrap: virtual_end = %p\n", virtual_end);
+ pool_init(&pmap_tlb_shootdown_job_pool,
+ sizeof(struct pmap_tlb_shootdown_job), 0, 0, 0, "pmaptlbpl",
+ 0, NULL, NULL, M_VMPMAP);
+ for (i = 0; i < ALPHA_MAXPROCS; i++) {
+ TAILQ_INIT(&pmap_tlb_shootdown_q[i].pq_head);
+ simple_lock_init(&pmap_tlb_shootdown_q[i].pq_slock);
}
+#endif
/*
- * The distinguished tlbpid value of 0 is reserved for
- * the kernel pmap. Initialize the tlbpid allocator,
- * who knows about this.
+ * Set up proc0's PCB such that the ptbr points to the right place
+ * and has the kernel pmap's (really unused) ASN.
*/
- kernel_pmap->pid = 0;
- pmap_tlbpid_init(maxasn);
+ proc0.p_addr->u_pcb.pcb_hw.apcb_ptbr =
+ ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT;
+ proc0.p_addr->u_pcb.pcb_hw.apcb_asn =
+ pmap_kernel()->pm_asn[cpu_number()];
- if (DOVPDB(PDB_BOOTSTRAP))
- printf("pmap_bootstrap: leaving\n");
+ /*
+ * Mark the kernel pmap `active' on this processor.
+ */
+ atomic_setbits_ulong(&pmap_kernel()->pm_cpus,
+ (1UL << cpu_number()));
}
-pmap_rid_of_console()
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+int
+pmap_uses_prom_console()
{
- pt_entry_t *pde;
- /*
- * Rid of console's default mappings
- */
- for (pde = pmap_pde(kernel_pmap,0L);
- pde < pmap_pde(kernel_pmap,VM_MIN_KERNEL_ADDRESS);)
- *pde++ = 0;
+
+#if defined(NEW_SCC_DRIVER)
+ return (cputype == ST_DEC_21000);
+#else
+ return (cputype == ST_DEC_21000
+ || cputype == ST_DEC_3000_300
+ || cputype == ST_DEC_3000_500);
+#endif /* NEW_SCC_DRIVER */
}
+#endif _PMAP_MAY_USE_PROM_CONSOLE
/*
- * Bootstrap memory allocator. This function allows for early dynamic
- * memory allocation until the virtual memory system has been bootstrapped.
- * After that point, either kmem_alloc or malloc should be used. This
- * function works by stealing pages from the (to be) managed page pool,
- * implicitly mapping them (by using their k0seg addresses),
- * and zeroing them.
+ * pmap_steal_memory: [ INTERFACE ]
+ *
+ * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
+ * This function allows for early dynamic memory allocation until the
+ * virtual memory system has been bootstrapped. After that point, either
+ * kmem_alloc or malloc should be used. This function works by stealing
+ * pages from the (to be) managed page pool, then implicitly mapping the
+ * pages (by using their k0seg addresses) and zeroing them.
+ *
+ * It may be used once the physical memory segments have been pre-loaded
+ * into the vm_physmem[] array. Early memory allocation MUST use this
+ * interface! This cannot be used after vm_page_startup(), and will
+ * generate a panic if tried.
*
- * It should be used from pmap_bootstrap till vm_page_startup, afterwards
- * it cannot be used, and will generate a panic if tried. Note that this
- * memory will never be freed, and in essence it is wired down.
+ * Note that this memory will never be freed, and in essence it is wired
+ * down.
+ *
+ * Note: no locking is necessary in this function.
*/
-
-void *
-pmap_bootstrap_alloc(size)
- int size;
+vaddr_t
+pmap_steal_memory(size, vstartp, vendp)
+ vsize_t size;
+ vaddr_t *vstartp, *vendp;
{
- vm_offset_t val;
- extern boolean_t vm_page_startup_initialized;
+ int bank, npgs, x;
+ vaddr_t va;
+ paddr_t pa;
- if (DOPDB(PDB_FOLLOW|PDB_BOOTSTRAP_ALLOC))
- printf("pmap_bootstrap_alloc(%lx)\n", size);
- if (vm_page_startup_initialized)
- panic("pmap_bootstrap_alloc: called after startup initialized");
+ size = round_page(size);
+ npgs = atop(size);
- val = ALPHA_PHYS_TO_K0SEG(avail_start);
- size = alpha_round_page(size);
- avail_start += size;
- if (avail_start > avail_end) /* sanity */
- panic("pmap_bootstrap_alloc");
+#if 0
+ printf("PSM: size 0x%lx (npgs 0x%x)\n", size, npgs);
+#endif
- bzero((caddr_t)val, size);
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+#if 0
+ if (uvm.page_init_done == TRUE)
+ panic("pmap_steal_memory: called _after_ bootstrap");
+#else
+ /* XXXX */
+ if (vm_physmem[0].pgs)
+ panic("vm_page_physget: called _after_ bootstrap");
+#endif
- if (DOVPDB(PDB_BOOTSTRAP_ALLOC))
- printf("pmap_bootstrap_alloc: returns %p\n", val);
- return ((void *)val);
-}
-/*
- * Unmap the PROM mappings. PROM mappings are kept around
- * by pmap_bootstrap, so we can still use the prom's printf.
- * Basically, blow away all mappings in the level one PTE
- * table below VM_MIN_KERNEL_ADDRESS. The Virtual Page Table
- * Is at the end of virtual space, so it's safe.
- */
-void
-pmap_unmap_prom()
-{
- int i;
- extern int prom_mapped;
- extern pt_entry_t *rom_ptep, rom_pte;
-
- if (DOPDB(PDB_FOLLOW|PDB_UNMAP_PROM))
- printf("pmap_unmap_prom\n");
-
- /* XXX save old pte so that we can remap prom if necessary */
- rom_ptep = &root_kpdes[0]; /* XXX */
- rom_pte = *rom_ptep & ~ALPHA_PTE_ASM; /* XXX */
-
- if (DOVPDB(PDB_UNMAP_PROM))
- printf("pmap_unmap_prom: zero 0x%lx, rom_pte was 0x%lx\n",
- pdenum(VM_MIN_KERNEL_ADDRESS) * sizeof root_kpdes[0],
- rom_pte);
- /* Mark all mappings before VM_MIN_KERNEL_ADDRESS as invalid. */
- bzero(root_kpdes, pdenum(VM_MIN_KERNEL_ADDRESS) * sizeof root_kpdes[0]);
- prom_mapped = 0;
- ALPHA_TBIA();
- if (DOVPDB(PDB_UNMAP_PROM))
- printf("pmap_unmap_prom: leaving\n");
-}
+#if 0
+ printf(" bank %d: avail_start 0x%lx, start 0x%lx, "
+ "avail_end 0x%lx\n", bank, vm_physmem[bank].avail_start,
+ vm_physmem[bank].start, vm_physmem[bank].avail_end);
+#endif
-/*
- * Initialize the pmap module.
- * Called by vm_init, to initialize any structures that the pmap
- * system needs to map virtual memory.
- */
-void
-pmap_init(phys_start, phys_end)
- vm_offset_t phys_start, phys_end;
-{
- vm_size_t s;
- int i;
+ if (vm_physmem[bank].avail_start != vm_physmem[bank].start ||
+ vm_physmem[bank].avail_start >= vm_physmem[bank].avail_end)
+ continue;
- /*
- * Create the zone of physical maps,
- * and of the physical-to-virtual entries.
- */
#if 0
- s = (vm_size_t) sizeof(struct pmap);
- pmap_zone = zinit(s, 400*s, 4096, FALSE, "pmap"); /* XXX */
- s = (vm_size_t) sizeof(struct pv_entry);
- pv_list_zone = zinit(s, 10000*s, 4096, FALSE, "pv_list"); /* XXX */
+ printf(" avail_end - avail_start = 0x%lx\n",
+ vm_physmem[bank].avail_end - vm_physmem[bank].avail_start);
#endif
-#if NCPUS > 1
- /*
- * Set up the pmap request lists
- */
- for (i = 0; i < NCPUS; i++) {
- pmap_update_list_t up = &cpu_update_list[i];
+ if ((vm_physmem[bank].avail_end - vm_physmem[bank].avail_start)
+ < npgs)
+ continue;
- simple_lock_init(&up->lock);
- up->count = 0;
- }
+ /*
+ * There are enough pages here; steal them!
+ */
+ pa = ptoa(vm_physmem[bank].avail_start);
+ vm_physmem[bank].avail_start += npgs;
+ vm_physmem[bank].start += npgs;
- alpha_set_scb_entry( SCB_INTERPROC, pmap_update_interrupt);
+ /*
+ * Have we used up this segment?
+ */
+ if (vm_physmem[bank].avail_start == vm_physmem[bank].end) {
+ if (vm_nphysseg == 1)
+ panic("pmap_steal_memory: out of memory!");
+
+ /* Remove this segment from the list. */
+ vm_nphysseg--;
+ for (x = bank; x < vm_nphysseg; x++) {
+ /* structure copy */
+ vm_physmem[x] = vm_physmem[x + 1];
+ }
+ }
-#endif /* NCPUS > 1 */
+ /*
+ * Fill these in for the caller; we don't modify them,
+ * but the upper layers still want to know.
+ */
+ if (vstartp)
+ *vstartp = round_page(virtual_avail);
+ if (vendp)
+ *vendp = trunc_page(virtual_end);
+
+ va = ALPHA_PHYS_TO_K0SEG(pa);
+ bzero((caddr_t)va, size);
+ pmap_pages_stolen += npgs;
+ return (va);
+ }
/*
- * Only now, when all of the data structures are allocated,
- * can we set vm_first_phys and vm_last_phys. If we set them
- * too soon, the kmem_alloc_wired above will try to use these
- * data structures and blow up.
+ * If we got here, this was no memory left.
*/
-
- vm_first_phys = phys_start;
- vm_last_phys = phys_end;
- pmap_initialized = TRUE;
+ panic("pmap_steal_memory: no memory to steal");
}
-#define pmap_valid_page(x) ((avail_start <= x) && (x < avail_end))
-#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
-
/*
- * Routine: pmap_page_table_page_alloc
+ * pmap_init: [ INTERFACE ]
*
- * Allocates a new physical page to be used as a page-table page.
+ * Initialize the pmap module. Called by vm_init(), to initialize any
+ * structures that the pmap system needs to map virtual memory.
*
- * Must be called with the pmap system and the pmap unlocked,
- * since these must be unlocked to use vm_page_grab.
+ * Note: no locking is necessary in this function.
*/
-vm_offset_t
-pmap_page_table_page_alloc()
+void
+pmap_init()
{
- register vm_page_t m;
- register vm_offset_t pa;
+ vsize_t s;
+ int bank;
+ struct pv_head *pvh;
- check_simple_locks();
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_init()\n");
+#endif
- /*
- * We cannot allocate the pmap_object in pmap_init,
- * because it is called before the zone package is up.
- * Allocate it now if it is missing.
- */
- if (pmap_object == VM_OBJECT_NULL)
- pmap_object = vm_object_allocate(mem_size);
+ /* initialize protection array */
+ alpha_protection_init();
/*
- * Allocate a VM page
+ * Memory for the pv heads has already been allocated.
+ * Initialize the physical memory segments.
*/
- while ((m = vm_page_grab()) == VM_PAGE_NULL)
- vm_page_wait();
+ pvh = pv_table;
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+ s = vm_physmem[bank].end - vm_physmem[bank].start;
+ vm_physmem[bank].pmseg.pvhead = pvh;
+ pvh += s;
+ }
/*
- * Map the page to its physical address so that it
- * can be found later.
+ * Set a low water mark on the pv_entry pool, so that we are
+ * more likely to have these around even in extreme memory
+ * starvation.
*/
- pa = m->phys_addr;
- vm_object_lock(pmap_object);
- vm_page_insert(m, pmap_object, pa);
- vm_page_lock_queues();
- vm_page_wire(m);
- inuse_ptepages_count++;
- vm_page_unlock_queues();
- vm_object_unlock(pmap_object);
+ pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
/*
- * Zero the page.
+ * Now it is safe to enable pv entry recording.
*/
- bzero((void *)phystokv(pa), PAGE_SIZE);
-
- return pa;
-}
+ pmap_initialized = TRUE;
-/*
- * Deallocate a page-table page.
- * The page-table page must have all mappings removed,
- * and be removed from its page directory.
- */
-void
-pmap_page_table_page_dealloc(pa)
- vm_offset_t pa;
-{
- vm_page_t m;
-
- vm_object_lock(pmap_object);
- m = vm_page_lookup(pmap_object, pa);
- if (m == VM_PAGE_NULL)
- panic("pmap_page_table_page_dealloc: page %#X not in object", pa);
- vm_page_lock_queues();
- vm_page_free(m);
- inuse_ptepages_count--;
- vm_page_unlock_queues();
- vm_object_unlock(pmap_object);
+#if 0
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+ printf("bank %d\n", bank);
+ printf("\tstart = 0x%x\n", ptoa(vm_physmem[bank].start));
+ printf("\tend = 0x%x\n", ptoa(vm_physmem[bank].end));
+ printf("\tavail_start = 0x%x\n",
+ ptoa(vm_physmem[bank].avail_start));
+ printf("\tavail_end = 0x%x\n",
+ ptoa(vm_physmem[bank].avail_end));
+ }
+#endif
}
/*
- * Create and return a physical map.
+ * pmap_create: [ INTERFACE ]
*
- * If the size specified for the map
- * is zero, the map is an actual physical
- * map, and may be referenced by the
- * hardware.
+ * Create and return a physical map.
*
- * If the size specified is non-zero,
- * the map will be used in software only, and
- * is bounded by that size.
+ * Note: no locking is necessary in this function.
*/
pmap_t
-pmap_create(size)
- vm_size_t size;
+pmap_create()
{
- register pmap_t p;
+ pmap_t pmap;
+ int i;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
+ printf("pmap_create()\n");
+#endif
- if (DOPDB(PDB_FOLLOW|PDB_CREATE))
- printf("pmap_create(%d)\n", size);
+ pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
+ bzero(pmap, sizeof(*pmap));
+
+ pmap->pm_asn = pool_get(&pmap_asn_pool, PR_WAITOK);
+ pmap->pm_asngen = pool_get(&pmap_asngen_pool, PR_WAITOK);
/*
- * A software use-only map doesn't even need a map.
+ * Defer allocation of a new level 1 page table until
+ * the first new mapping is entered; just take a reference
+ * to the kernel kernel_lev1map.
*/
+ pmap->pm_lev1map = kernel_lev1map;
- if (size != 0) {
- p = PMAP_NULL;
- goto out;
+ pmap->pm_count = 1;
+ for (i = 0; i < pmap_ncpuids; i++) {
+ pmap->pm_asn[i] = PMAP_ASN_RESERVED;
+ /* XXX Locking? */
+ pmap->pm_asngen[i] = pmap_asn_generation[i];
}
+ simple_lock_init(&pmap->pm_slock);
- /* XXX: is it ok to wait here? */
- p = (pmap_t) malloc(sizeof *p, M_VMPMAP, M_WAITOK);
- if (p == NULL)
- panic("pmap_create: cannot allocate a pmap");
-
- bzero(p, sizeof (*p));
- pmap_pinit(p);
+ simple_lock(&pmap_all_pmaps_slock);
+ TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list);
+ simple_unlock(&pmap_all_pmaps_slock);
-out:
- if (DOVPDB(PDB_FOLLOW|PDB_CREATE))
- printf("pmap_create: returning %p\n", p);
- return (p);
+ return (pmap);
}
+/*
+ * pmap_destroy: [ INTERFACE ]
+ *
+ * Drop the reference count on the specified pmap, releasing
+ * all resources if the reference count drops to zero.
+ */
void
-pmap_pinit(p)
- struct pmap *p;
+pmap_destroy(pmap)
+ pmap_t pmap;
{
- register pmap_statistics_t stats;
- extern struct vmspace vmspace0;
-
- if (DOPDB(PDB_FOLLOW|PDB_PINIT))
- printf("pmap_init(%p)\n", p);
-
-#if 0
- /* XXX cgd WHY NOT pmap_page_table_page_alloc()? */
- p->dirbase = (void *)kmem_alloc(kernel_map, ALPHA_PGBYTES);
-#else
- p->dirbase = (void *)phystokv(pmap_page_table_page_alloc());
-#endif
- if (p->dirbase == NULL)
- panic("pmap_create");
- p->dirpfn = alpha_btop(pmap_resident_extract(kernel_pmap,
- (vm_offset_t)p->dirbase));
-
- if (DOVPDB(PDB_FOLLOW|PDB_PINIT))
- printf("pmap_init(%p): dirbase = %p, dirpfn = 0x%x\n", p,
- p->dirbase, p->dirpfn);
- aligned_block_copy(root_kpdes, p->dirbase, ALPHA_PGBYTES);
- p->ref_count = 1;
- p->pid = -1;
- if (DOVPDB(PDB_FOLLOW|PDB_PINIT))
- printf("pmap_init(%p): first pde = 0x%lx\n", p->dirbase[0]);
+ int refs;
- {
- pt_entry_t template;
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_destroy(%p)\n", pmap);
+#endif
+ if (pmap == NULL)
+ return;
- pte_ktemplate(template, kvtophys(p->dirbase),
- VM_PROT_READ | VM_PROT_WRITE);
- template &= ~ALPHA_PTE_GLOBAL;
- p->dirbase[pdenum(VPTBASE)] = template;
- }
-printf("PMAP_PINIT: FIRST ENT = 0x%lx\n", p->dirbase[0]);
+ PMAP_LOCK(pmap);
+ refs = --pmap->pm_count;
+ PMAP_UNLOCK(pmap);
- simple_lock_init(&p->lock);
- p->cpus_using = 0;
- p->hacking = 0;
+ if (refs > 0)
+ return;
/*
- * Initialize statistics.
+ * Remove it from the global list of all pmaps.
*/
+ simple_lock(&pmap_all_pmaps_slock);
+ TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list);
+ simple_unlock(&pmap_all_pmaps_slock);
- stats = &p->stats;
- stats->resident_count = 0;
- stats->wired_count = 0;
+#ifdef DIAGNOSTIC
+ /*
+ * Since the pmap is supposed to contain no valid
+ * mappings at this point, this should never happen.
+ */
+ if (pmap->pm_lev1map != kernel_lev1map) {
+ printf("pmap_release: pmap still contains valid mappings!\n");
+ if (pmap->pm_nlev2)
+ printf("pmap_release: %ld level 2 tables left\n",
+ pmap->pm_nlev2);
+ if (pmap->pm_nlev3)
+ printf("pmap_release: %ld level 3 tables left\n",
+ pmap->pm_nlev3);
+ pmap_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
+ if (pmap->pm_lev1map != kernel_lev1map)
+ panic("pmap_release: pmap_remove() didn't");
+ }
+#endif
-out:
- if (DOVPDB(PDB_FOLLOW|PDB_PINIT))
- printf("pmap_init: leaving\n", p);
+ pool_put(&pmap_asn_pool, pmap->pm_asn);
+ pool_put(&pmap_asngen_pool, pmap->pm_asngen);
+ pool_put(&pmap_pmap_pool, pmap);
}
/*
- * Retire the given physical map from service.
- * Should only be called if the map contains
- * no valid mappings.
+ * pmap_reference: [ INTERFACE ]
+ *
+ * Add a reference to the specified pmap.
*/
-
-void pmap_destroy(p)
- register pmap_t p;
+void
+pmap_reference(pmap)
+ pmap_t pmap;
{
- register int c;
- register spl_t s;
-
- if (DOPDB(PDB_FOLLOW|PDB_DESTROY))
- printf("pmap_destroy(%p)\n", p);
- if (p == PMAP_NULL)
- goto out;
-
- SPLVM(s);
- simple_lock(&p->lock);
- c = --p->ref_count;
- simple_unlock(&p->lock);
- SPLX(s);
-
- if (c == 0) {
- pmap_release(p);
- free(p, M_VMPMAP);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_reference(%p)\n", pmap);
+#endif
+ if (pmap != NULL) {
+ PMAP_LOCK(pmap);
+ pmap->pm_count++;
+ PMAP_UNLOCK(pmap);
}
-out:
- if (DOVPDB(PDB_FOLLOW|PDB_DESTROY))
- printf("pmap_destroy: leaving\n");
}
+/*
+ * pmap_remove: [ INTERFACE ]
+ *
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
void
-pmap_release(p)
- pmap_t p;
+pmap_remove(pmap, sva, eva)
+ pmap_t pmap;
+ vaddr_t sva, eva;
{
- register pt_entry_t *pdep, *ptep, *eptep;
- register vm_offset_t pa;
-
- if (DOPDB(PDB_FOLLOW|PDB_RELEASE))
- printf("pmap_release(%p)\n", p);
+ pt_entry_t *l1pte, *l2pte, *l3pte;
+ pt_entry_t *saved_l1pte, *saved_l2pte, *saved_l3pte;
+ vaddr_t l1eva, l2eva, vptva;
+ boolean_t needisync = FALSE;
+ long cpu_id = cpu_number();
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+ printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva);
+#endif
- if (p->dirbase == NULL) {
- if (DOPDB(PDB_FOLLOW|PDB_ANOMALOUS|PDB_RELEASE))
- printf("pmap_release: already reclaimed\n");
- /* resources already reclaimed */
- goto out;
- }
+ if (pmap == NULL)
+ return;
/*
- * Free the memory maps, then the
- * pmap structure.
+ * If this is the kernel pmap, we can use a faster method
+ * for accessing the PTEs (since the PT pages are always
+ * resident).
+ *
+ * Note that this routine should NEVER be called from an
+ * interrupt context; pmap_kremove() is used for that.
*/
- for (pdep = p->dirbase;
- pdep < pmap_pde(p,VM_MIN_KERNEL_ADDRESS);
- pdep += ptes_per_vm_page) {
- if (*pdep & ALPHA_PTE_VALID) {
- pa = pte_to_pa(*pdep);
+ if (pmap == pmap_kernel()) {
+ PMAP_MAP_TO_HEAD_LOCK();
+ PMAP_LOCK(pmap);
- ptep = (pt_entry_t *)phystokv(pa);
- eptep = ptep + NPTES;
- for (; ptep < eptep; ptep += ptes_per_vm_page ) {
- if (*ptep & ALPHA_PTE_VALID)
- pmap_page_table_page_dealloc(pte_to_pa(*ptep));
- }
- pmap_page_table_page_dealloc(pa);
- }
- }
- pmap_tlbpid_destroy(p->pid, FALSE);
-
-#if 0
- kmem_free(kernel_map, (vm_offset_t)p->dirbase, ALPHA_PGBYTES);
-#else
- pmap_page_table_page_dealloc(kvtophys(p->dirbase));
+ while (sva < eva) {
+ l3pte = PMAP_KERNEL_PTE(sva);
+ if (pmap_pte_v(l3pte)) {
+#ifdef DIAGNOSTIC
+ if (PAGE_IS_MANAGED(pmap_pte_pa(l3pte)) &&
+ pmap_pte_pv(l3pte) == 0)
+ panic("pmap_remove: managed page "
+ "without PG_PVLIST for 0x%lx",
+ sva);
#endif
- p->dirbase = NULL;
-
-out:
- if (DOVPDB(PDB_FOLLOW|PDB_RELEASE))
- printf("pmap_release: leaving\n");
-}
+ needisync |= pmap_remove_mapping(pmap, sva,
+ l3pte, TRUE, cpu_id, NULL);
+ }
+ sva += PAGE_SIZE;
+ }
-/*
- * Add a reference to the specified pmap.
- */
+ PMAP_UNLOCK(pmap);
+ PMAP_MAP_TO_HEAD_UNLOCK();
-void pmap_reference(p)
- register pmap_t p;
-{
- spl_t s;
- if (p != PMAP_NULL) {
- SPLVM(s);
- simple_lock(&p->lock);
- p->ref_count++;
- simple_unlock(&p->lock);
- SPLX(s);
+ if (needisync)
+ PMAP_SYNC_ISTREAM_KERNEL();
+ return;
}
-}
-
-/*
- * Remove a range of hardware page-table entries.
- * The entries given are the first (inclusive)
- * and last (exclusive) entries for the VM pages.
- * The virtual address is the va for the first pte.
- *
- * The pmap must be locked.
- * If the pmap is not the kernel pmap, the range must lie
- * entirely within one pte-page. This is NOT checked.
- * Assumes that the pte-page exists.
- */
-/* static */
-void pmap_remove_range(pmap, va, spte, epte)
- pmap_t pmap;
- vm_offset_t va;
- pt_entry_t *spte;
- pt_entry_t *epte;
-{
- register pt_entry_t *cpte;
- int num_removed, num_unwired;
- int pai;
- vm_offset_t pa;
+#ifdef DIAGNOSTIC
+ if (sva > VM_MAXUSER_ADDRESS || eva > VM_MAXUSER_ADDRESS)
+ panic("pmap_remove: (0x%lx - 0x%lx) user pmap, kernel "
+ "address range", sva, eva);
+#endif
- num_removed = 0;
- num_unwired = 0;
+ PMAP_MAP_TO_HEAD_LOCK();
+ PMAP_LOCK(pmap);
- for (cpte = spte; cpte < epte;
- cpte += ptes_per_vm_page, va += PAGE_SIZE) {
+ /*
+ * If we're already referencing the kernel_lev1map, there
+ * is no work for us to do.
+ */
+ if (pmap->pm_lev1map == kernel_lev1map)
+ goto out;
- if (*cpte == 0)
- continue;
- pa = pte_to_pa(*cpte);
+ saved_l1pte = l1pte = pmap_l1pte(pmap, sva);
- num_removed++;
- if (*cpte & ALPHA_PTE_WIRED)
- num_unwired++;
+ /*
+ * Add a reference to the L1 table to it won't get
+ * removed from under us.
+ */
+ pmap_physpage_addref(saved_l1pte);
- if (!valid_page(pa)) {
+ for (; sva < eva; sva = l1eva, l1pte++) {
+ l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE;
+ if (pmap_pte_v(l1pte)) {
+ saved_l2pte = l2pte = pmap_l2pte(pmap, sva, l1pte);
- /*
- * Outside range of managed physical memory.
- * Just remove the mappings.
- */
- register int i = ptes_per_vm_page;
- register pt_entry_t *lpte = cpte;
- do {
- *lpte = 0;
- lpte++;
- } while (--i > 0);
- continue;
- }
-
- pai = pa_index(pa);
- LOCK_PVH(pai);
-
- /*
- * Get the modify and reference bits.
- */
- {
- register int i;
- register pt_entry_t *lpte;
-
- i = ptes_per_vm_page;
- lpte = cpte;
- do {
- pmap_phys_attributes[pai] |= pte_get_attributes(lpte);
- *lpte = 0;
- lpte++;
- } while (--i > 0);
- }
-
- /*
- * Remove the mapping from the pvlist for
- * this physical page.
- */
- {
- register pv_entry_t pv_h, prev, cur;
-
- pv_h = pai_to_pvh(pai);
- if (pv_h->pmap == PMAP_NULL) {
- panic("pmap_remove: null pv_list!");
- }
- if (pv_h->va == va && pv_h->pmap == pmap) {
- /*
- * Header is the pv_entry. Copy the next one
- * to header and free the next one (we cannot
- * free the header)
- */
- cur = pv_h->next;
- if (cur != PV_ENTRY_NULL) {
- *pv_h = *cur;
- PV_FREE(cur);
- }
- else {
- pv_h->pmap = PMAP_NULL;
- }
- }
- else {
- cur = pv_h;
- do {
- prev = cur;
- if ((cur = prev->next) == PV_ENTRY_NULL) {
- panic("pmap-remove: mapping not in pv_list!");
+ /*
+ * Add a reference to the L2 table so it won't
+ * get removed from under us.
+ */
+ pmap_physpage_addref(saved_l2pte);
+
+ for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) {
+ l2eva =
+ alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE;
+ if (pmap_pte_v(l2pte)) {
+ saved_l3pte = l3pte =
+ pmap_l3pte(pmap, sva, l2pte);
+
+ /*
+ * Add a reference to the L3 table so
+ * it won't get removed from under us.
+ */
+ pmap_physpage_addref(saved_l3pte);
+
+ /*
+ * Remember this sva; if the L3 table
+ * gets removed, we need to invalidate
+ * the VPT TLB entry for it.
+ */
+ vptva = sva;
+
+ for (; sva < l2eva && sva < eva;
+ sva += PAGE_SIZE, l3pte++) {
+ if (pmap_pte_v(l3pte)) {
+ needisync |=
+ pmap_remove_mapping(
+ pmap, sva,
+ l3pte, TRUE,
+ cpu_id, NULL);
+ }
+ }
+
+ /*
+ * Remove the reference to the L3
+ * table that we added above. This
+ * may free the L3 table.
+ */
+ pmap_l3pt_delref(pmap, vptva,
+ saved_l3pte, cpu_id, NULL);
+ }
}
- } while (cur->va != va || cur->pmap != pmap);
- prev->next = cur->next;
- PV_FREE(cur);
+
+ /*
+ * Remove the reference to the L2 table that we
+ * added above. This may free the L2 table.
+ */
+ pmap_l2pt_delref(pmap, l1pte, saved_l2pte, cpu_id);
}
- UNLOCK_PVH(pai);
- }
}
/*
- * Update the counts
+ * Remove the reference to the L1 table that we added above.
+ * This may free the L1 table.
*/
- pmap->stats.resident_count -= num_removed;
- pmap->stats.wired_count -= num_unwired;
-}
+ pmap_l1pt_delref(pmap, saved_l1pte, cpu_id);
-/*
- * One level up, iterate an operation on the
- * virtual range va..eva, mapped by the 1st
- * level pte spte.
- */
+ if (needisync)
+ PMAP_SYNC_ISTREAM_USER(pmap);
-/* static */
-void pmap_iterate_lev2(pmap, s, e, spte, operation)
- pmap_t pmap;
- vm_offset_t s, e;
- pt_entry_t *spte;
- void (*operation)();
-{
- vm_offset_t l;
- pt_entry_t *epte;
- pt_entry_t *cpte;
-
-if (pmap_debug > 1) db_printf("iterate2(%x,%x,%x)", s, e, spte);
- while (s < e) {
- /* at most 1 << 23 virtuals per iteration */
- l = roundup(s+1,PDE2_MAPPED_SIZE);
- if (l > e)
- l = e;
- if (*spte & ALPHA_PTE_VALID) {
- register int n;
- cpte = (pt_entry_t *) ptetokv(*spte);
- n = pte3num(l);
- if (n == 0) n = SEG_MASK + 1;/* l == next segment up */
- epte = &cpte[n];
- cpte = &cpte[pte3num(s)];
- assert(epte >= cpte);
-if (pmap_debug > 1) db_printf(" [%x %x, %x %x]", s, l, cpte, epte);
- operation(pmap, s, cpte, epte);
- }
- s = l;
- spte++;
- }
-if (pmap_debug > 1) db_printf("\n");
+ out:
+ PMAP_UNLOCK(pmap);
+ PMAP_MAP_TO_HEAD_UNLOCK();
}
+/*
+ * pmap_page_protect: [ INTERFACE ]
+ *
+ * Lower the permission for all mappings to a given page to
+ * the permissions specified.
+ */
void
-pmap_make_readonly(pmap, va, spte, epte)
- pmap_t pmap;
- vm_offset_t va;
- pt_entry_t *spte;
- pt_entry_t *epte;
+pmap_page_protect(pg, prot)
+ struct vm_page *pg;
+ vm_prot_t prot;
{
- while (spte < epte) {
- if (*spte & ALPHA_PTE_VALID)
- *spte &= ~ALPHA_PTE_WRITE;
- spte++;
+ pmap_t pmap;
+ struct pv_head *pvh;
+ pv_entry_t pv, nextpv;
+ boolean_t needisync = FALSE;
+ long cpu_id = cpu_number();
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+
+#ifdef DEBUG
+ if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
+ (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
+ printf("pmap_page_protect(%p, %x)\n", pg, prot);
+#endif
+
+ /*
+ * Even though we don't change the mapping of the page,
+ * we still flush the I-cache if VM_PROT_EXECUTE is set
+ * because we might be "adding" execute permissions to
+ * a previously non-execute page.
+ */
+
+ switch (prot) {
+ case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
+ alpha_pal_imb(); /* XXX XXX XXX */
+ case VM_PROT_READ|VM_PROT_WRITE:
+ return;
+ /* copy_on_write */
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ alpha_pal_imb(); /* XXX XXX XXX */
+ case VM_PROT_READ:
+ pvh = pa_to_pvh(pa);
+ PMAP_HEAD_TO_MAP_LOCK();
+ simple_lock(&pvh->pvh_slock);
+/* XXX */ pmap_changebit(pa, 0, ~(PG_KWE | PG_UWE), cpu_id);
+ simple_unlock(&pvh->pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
+ return;
+ /* remove_all */
+ default:
+ break;
+ }
+
+ pvh = pa_to_pvh(pa);
+ PMAP_HEAD_TO_MAP_LOCK();
+ simple_lock(&pvh->pvh_slock);
+ for (pv = LIST_FIRST(&pvh->pvh_list); pv != NULL; pv = nextpv) {
+ nextpv = LIST_NEXT(pv, pv_list);
+ pmap = pv->pv_pmap;
+
+ PMAP_LOCK(pmap);
+#ifdef DEBUG
+ if (pmap_pte_v(pmap_l2pte(pv->pv_pmap, pv->pv_va, NULL)) == 0 ||
+ pmap_pte_pa(pv->pv_pte) != pa)
+ panic("pmap_page_protect: bad mapping");
+#endif
+ if (pmap_pte_w(pv->pv_pte) == 0)
+ needisync |= pmap_remove_mapping(pmap,
+ pv->pv_va, pv->pv_pte, FALSE, cpu_id, NULL);
+#ifdef DEBUG
+ else {
+ if (pmapdebug & PDB_PARANOIA) {
+ printf("%s wired mapping for %lx not removed\n",
+ "pmap_page_protect:", pa);
+ printf("vm wire count %d\n",
+ PHYS_TO_VM_PAGE(pa)->wire_count);
+ }
+ }
+#endif
+ PMAP_UNLOCK(pmap);
}
+
+ if (needisync)
+ alpha_pal_imb();
+
+ simple_unlock(&pvh->pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
}
/*
- * Remove the given range of addresses
- * from the specified map.
+ * pmap_protect: [ INTERFACE ]
*
- * It is assumed that the start and end are properly
- * rounded to the hardware page size.
+ * Set the physical protection on the specified range of this map
+ * as requested.
*/
-vm_offset_t pmap_suspect_vs, pmap_suspect_ve;
-
-
-void pmap_remove(map, s, e)
- pmap_t map;
- vm_offset_t s, e;
+void
+pmap_protect(pmap, sva, eva, prot)
+ pmap_t pmap;
+ vaddr_t sva, eva;
+ vm_prot_t prot;
{
- spl_t spl;
- register pt_entry_t *pde;
- register pt_entry_t *spte;
- vm_offset_t l;
+ pt_entry_t *l1pte, *l2pte, *l3pte, bits;
+ boolean_t isactive;
+ boolean_t hadasm;
+ vaddr_t l1eva, l2eva;
+ long cpu_id = cpu_number();
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
+ printf("pmap_protect(%p, %lx, %lx, %x)\n",
+ pmap, sva, eva, prot);
+#endif
- if (map == PMAP_NULL)
+ if (pmap == NULL)
return;
-if (pmap_debug || ((s > pmap_suspect_vs) && (s < pmap_suspect_ve)))
-db_printf("[%d]pmap_remove(%x,%x,%x)\n", cpu_number(), map, s, e);
- PMAP_READ_LOCK(map, spl);
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
- /*
- * Invalidate the translation buffer first
- */
- PMAP_UPDATE_TLBS(map, s, e);
+ if (prot & VM_PROT_WRITE)
+ return;
- pde = pmap_pde(map, s);
- while (s < e) {
- /* at most (1 << 33) virtuals per iteration */
- l = roundup(s+1, PDE_MAPPED_SIZE);
- if (l > e)
- l = e;
- if (*pde & ALPHA_PTE_VALID) {
- spte = (pt_entry_t *)ptetokv(*pde);
- spte = &spte[pte2num(s)];
- pmap_iterate_lev2(map, s, l, spte, pmap_remove_range);
- }
- s = l;
- pde++;
+ PMAP_LOCK(pmap);
+
+ bits = pte_prot(pmap, prot);
+ isactive = PMAP_ISACTIVE(pmap, cpu_id);
+
+ l1pte = pmap_l1pte(pmap, sva);
+ for (; sva < eva; sva = l1eva, l1pte++) {
+ l1eva = alpha_trunc_l1seg(sva) + ALPHA_L1SEG_SIZE;
+ if (pmap_pte_v(l1pte)) {
+ l2pte = pmap_l2pte(pmap, sva, l1pte);
+ for (; sva < l1eva && sva < eva; sva = l2eva, l2pte++) {
+ l2eva =
+ alpha_trunc_l2seg(sva) + ALPHA_L2SEG_SIZE;
+ if (pmap_pte_v(l2pte)) {
+ l3pte = pmap_l3pte(pmap, sva, l2pte);
+ for (; sva < l2eva && sva < eva;
+ sva += PAGE_SIZE, l3pte++) {
+ if (pmap_pte_v(l3pte) &&
+ pmap_pte_prot_chg(l3pte,
+ bits)) {
+ hadasm =
+ (pmap_pte_asm(l3pte)
+ != 0);
+ pmap_pte_set_prot(l3pte,
+ bits);
+ PMAP_INVALIDATE_TLB(
+ pmap, sva, hadasm,
+ isactive, cpu_id);
+#if defined(MULTIPROCESSOR) && 0
+ pmap_tlb_shootdown(
+ pmap, sva,
+ hadasm ? PG_ASM : 0);
+#endif
+ }
+ }
+ }
+ }
+ }
}
- PMAP_READ_UNLOCK(map, spl);
+ if (prot & VM_PROT_EXECUTE)
+ PMAP_SYNC_ISTREAM(pmap);
+
+ PMAP_UNLOCK(pmap);
}
/*
- * Routine: pmap_page_protect
+ * pmap_enter: [ INTERFACE ]
+ *
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
*
- * Function:
- * Lower the permission for all mappings to a given
- * page.
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * Note: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
*/
-vm_offset_t pmap_suspect_phys;
-
-void pmap_page_protect(phys, prot)
- vm_offset_t phys;
- vm_prot_t prot;
+void
+pmap_enter(pmap, va, pa, prot, wired, access_type)
+ pmap_t pmap;
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+ int wired;
+ vm_prot_t access_type;
{
- pv_entry_t pv_h, prev;
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
- int pai;
- register pmap_t pmap;
- spl_t spl;
- boolean_t remove;
+ boolean_t managed;
+ pt_entry_t *pte, npte, opte;
+ paddr_t opa;
+ boolean_t tflush = TRUE;
+ boolean_t hadasm = FALSE; /* XXX gcc -Wuninitialized */
+ boolean_t needisync;
+ boolean_t isactive;
+ long cpu_id = cpu_number();
+ int error;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
+ printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
+ pmap, va, pa, prot, access_type);
+#endif
+ if (pmap == NULL)
+ return;
-if (pmap_debug || (phys == pmap_suspect_phys)) db_printf("pmap_page_protect(%x,%x)\n", phys, prot);
+ managed = PAGE_IS_MANAGED(pa);
+ isactive = PMAP_ISACTIVE(pmap, cpu_id);
+ needisync = isactive && (prot & VM_PROT_EXECUTE) != 0;
- assert(phys != vm_page_fictitious_addr);
- if (!valid_page(phys)) {
- /*
- * Not a managed page.
- */
- return;
- }
+ PMAP_MAP_TO_HEAD_LOCK();
+ PMAP_LOCK(pmap);
- /*
- * Determine the new protection.
- */
- switch (prot) {
- case VM_PROT_READ:
- case VM_PROT_READ|VM_PROT_EXECUTE:
- remove = FALSE;
- break;
- case VM_PROT_ALL:
- return; /* nothing to do */
- default:
- remove = TRUE;
- break;
- }
+ if (pmap == pmap_kernel()) {
+#ifdef DIAGNOSTIC
+ /*
+ * Sanity check the virtual address.
+ */
+ if (va < VM_MIN_KERNEL_ADDRESS)
+ panic("pmap_enter: kernel pmap, invalid va 0x%lx", va);
+#endif
+ pte = PMAP_KERNEL_PTE(va);
+ } else {
+ pt_entry_t *l1pte, *l2pte;
- /*
- * Lock the pmap system first, since we will be changing
- * several pmaps.
- */
+#ifdef DIAGNOSTIC
+ /*
+ * Sanity check the virtual address.
+ */
+ if (va >= VM_MAXUSER_ADDRESS)
+ panic("pmap_enter: user pmap, invalid va 0x%lx", va);
+#endif
- PMAP_WRITE_LOCK(spl);
+ /*
+ * If we're still referencing the kernel kernel_lev1map,
+ * create a new level 1 page table. A reference will be
+ * added to the level 1 table when the level 2 table is
+ * created.
+ */
+ if (pmap->pm_lev1map == kernel_lev1map) {
+ error = pmap_lev1map_create(pmap, cpu_id);
+ if (error != KERN_SUCCESS) {
+#ifdef notyet
+ if (flags & PMAP_CANFAIL)
+ return (error);
+#endif
+ panic("pmap_enter: unable to create lev1map");
+ }
+ }
- pai = pa_index(phys);
- pv_h = pai_to_pvh(pai);
+ /*
+ * Check to see if the level 1 PTE is valid, and
+ * allocate a new level 2 page table page if it's not.
+ * A reference will be added to the level 2 table when
+ * the level 3 table is created.
+ */
+ l1pte = pmap_l1pte(pmap, va);
+ if (pmap_pte_v(l1pte) == 0) {
+ pmap_physpage_addref(l1pte);
+ error = pmap_ptpage_alloc(pmap, l1pte, PGU_L2PT);
+ if (error != KERN_SUCCESS) {
+ pmap_l1pt_delref(pmap, l1pte, cpu_id);
+#ifdef notyet
+ if (flags & PMAP_CANFAIL)
+ return (error);
+#endif
+ panic("pmap_enter: unable to create L2 PT "
+ "page");
+ }
+ pmap->pm_nlev2++;
+#ifdef DEBUG
+ if (pmapdebug & PDB_PTPAGE)
+ printf("pmap_enter: new level 2 table at "
+ "0x%lx\n", pmap_pte_pa(l1pte));
+#endif
+ }
- /*
- * Walk down PV list, changing or removing all mappings.
- * We do not have to lock the pv_list because we have
- * the entire pmap system locked.
- */
- if (pv_h->pmap != PMAP_NULL) {
+ /*
+ * Check to see if the level 2 PTE is valid, and
+ * allocate a new level 3 page table page if it's not.
+ * A reference will be added to the level 3 table when
+ * the mapping is validated.
+ */
+ l2pte = pmap_l2pte(pmap, va, l1pte);
+ if (pmap_pte_v(l2pte) == 0) {
+ pmap_physpage_addref(l2pte);
+ error = pmap_ptpage_alloc(pmap, l2pte, PGU_L3PT);
+ if (error != KERN_SUCCESS) {
+ pmap_l2pt_delref(pmap, l1pte, l2pte, cpu_id);
+#ifdef notyet
+ if (flags & PMAP_CANFAIL)
+ return (error);
+#endif
+ panic("pmap_enter: unable to create L3 PT "
+ "page");
+ }
+ pmap->pm_nlev3++;
+#ifdef DEBUG
+ if (pmapdebug & PDB_PTPAGE)
+ printf("pmap_enter: new level 3 table at "
+ "0x%lx\n", pmap_pte_pa(l2pte));
+#endif
+ }
- prev = pv_e = pv_h;
- do {
- pmap = pv_e->pmap;
/*
- * Lock the pmap to block pmap_extract and similar routines.
+ * Get the PTE that will map the page.
*/
- simple_lock(&pmap->lock);
+ pte = pmap_l3pte(pmap, va, l2pte);
+ }
- {
- register vm_offset_t va;
+ /* Remember all of the old PTE; used for TBI check later. */
+ opte = *pte;
- va = pv_e->va;
- pte = pmap_pte(pmap, va);
+ /*
+ * Check to see if the old mapping is valid. If not, validate the
+ * new one immediately.
+ */
+ if (pmap_pte_v(pte) == 0) {
+ /*
+ * No need to invalidate the TLB in this case; an invalid
+ * mapping won't be in the TLB, and a previously valid
+ * mapping would have been flushed when it was invalidated.
+ */
+ tflush = FALSE;
- /*
- * Consistency checks.
- */
- /* assert(*pte & ALPHA_PTE_VALID); XXX */
- /* assert(pte_to_phys(*pte) == phys); */
+ /*
+ * No need to synchronize the I-stream, either, for basically
+ * the same reason.
+ */
+ needisync = FALSE;
- /*
- * Invalidate TLBs for all CPUs using this mapping.
- */
- PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+ if (pmap != pmap_kernel()) {
+ /*
+ * New mappings gain a reference on the level 3
+ * table.
+ */
+ pmap_physpage_addref(pte);
}
+ goto validate_enterpv;
+ }
+
+ opa = pmap_pte_pa(pte);
+ hadasm = (pmap_pte_asm(pte) != 0);
+ if (opa == pa) {
/*
- * Remove the mapping if new protection is NONE
- * or if write-protecting a kernel mapping.
+ * Mapping has not changed; must be a protection or
+ * wiring change.
*/
- if (remove || pmap == kernel_pmap) {
- /*
- * Remove the mapping, collecting any modify bits.
- */
- if (*pte & ALPHA_PTE_WIRED)
- panic("pmap_remove_all removing a wired page");
-
- {
- register int i = ptes_per_vm_page;
-
- do {
- pmap_phys_attributes[pai] |= pte_get_attributes(pte);
- *pte++ = 0;
- } while (--i > 0);
- }
-
- pmap->stats.resident_count--;
-
- /*
- * Remove the pv_entry.
- */
- if (pv_e == pv_h) {
- /*
- * Fix up head later.
- */
- pv_h->pmap = PMAP_NULL;
- }
- else {
+ if (pmap_pte_w_chg(pte, wired ? PG_WIRED : 0)) {
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("pmap_enter: wiring change -> %d\n",
+ wired);
+#endif
/*
- * Delete this entry.
+ * Adjust the wiring count.
*/
- prev->next = pv_e->next;
- PV_FREE(pv_e);
- }
- }
- else {
- /*
- * Write-protect.
- */
- register int i = ptes_per_vm_page;
-
- do {
- *pte &= ~ALPHA_PTE_WRITE;
- pte++;
- } while (--i > 0);
-
- /*
- * Advance prev.
- */
- prev = pv_e;
+ if (wired)
+ PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1);
+ else
+ PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1);
}
- simple_unlock(&pmap->lock);
+ /*
+ * Set the PTE.
+ */
+ goto validate;
+ }
- } while ((pv_e = prev->next) != PV_ENTRY_NULL);
+ /*
+ * The mapping has changed. We need to invalidate the
+ * old mapping before creating the new one.
+ */
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("pmap_enter: removing old mapping 0x%lx\n", va);
+#endif
+ if (pmap != pmap_kernel()) {
+ /*
+ * Gain an extra reference on the level 3 table.
+ * pmap_remove_mapping() will delete a reference,
+ * and we don't want the table to be erroneously
+ * freed.
+ */
+ pmap_physpage_addref(pte);
+ }
+ needisync |= pmap_remove_mapping(pmap, va, pte, TRUE, cpu_id, NULL);
- /*
- * If pv_head mapping was removed, fix it up.
- */
- if (pv_h->pmap == PMAP_NULL) {
- pv_e = pv_h->next;
- if (pv_e != PV_ENTRY_NULL) {
- *pv_h = *pv_e;
- PV_FREE(pv_e);
+ validate_enterpv:
+ /*
+ * Enter the mapping into the pv_table if appropriate.
+ */
+ if (managed) {
+ error = pmap_pv_enter(pmap, pa, va, pte, TRUE);
+ if (error != KERN_SUCCESS) {
+ pmap_l3pt_delref(pmap, va, pte, cpu_id, NULL);
+#ifdef notyet
+ if (flags & PMAP_CANFAIL)
+ return (error);
+#endif
+ panic("pmap_enter: unable to enter mapping in PV "
+ "table");
}
- }
}
- PMAP_WRITE_UNLOCK(spl);
-}
-
-/*
- * Set the physical protection on the
- * specified range of this map as requested.
- * Will not increase permissions.
- */
-void pmap_protect(map, s, e, prot)
- pmap_t map;
- vm_offset_t s, e;
- vm_prot_t prot;
-{
- register pt_entry_t *pde;
- register pt_entry_t *spte, *epte;
- vm_offset_t l;
- spl_t spl;
-
- if (map == PMAP_NULL)
- return;
+ /*
+ * Increment counters.
+ */
+ PMAP_STAT_INCR(pmap->pm_stats.resident_count, 1);
+ if (wired)
+ PMAP_STAT_INCR(pmap->pm_stats.wired_count, 1);
-if (pmap_debug || ((s > pmap_suspect_vs) && (s < pmap_suspect_ve)))
-db_printf("[%d]pmap_protect(%x,%x,%x,%x)\n", cpu_number(), map, s, e, prot);
+ validate:
/*
- * Determine the new protection.
+ * Build the new PTE.
*/
- switch (prot) {
- case VM_PROT_READ|VM_PROT_EXECUTE:
- alphacache_Iflush();
- case VM_PROT_READ:
- break;
- case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
- alphacache_Iflush();
- case VM_PROT_READ|VM_PROT_WRITE:
- return; /* nothing to do */
- default:
- pmap_remove(map, s, e);
- return;
+ npte = ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap, prot) | PG_V;
+ if (managed) {
+ struct pv_head *pvh = pa_to_pvh(pa);
+ int attrs;
+
+#ifdef DIAGNOSTIC
+ if ((access_type & VM_PROT_ALL) & ~prot)
+ panic("pmap_enter: access type exceeds prot");
+#endif
+ simple_lock(&pvh->pvh_slock);
+ if (access_type & VM_PROT_WRITE)
+ pvh->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
+ else if (access_type & VM_PROT_ALL)
+ pvh->pvh_attrs |= PGA_REFERENCED;
+ attrs = pvh->pvh_attrs;
+ simple_unlock(&pvh->pvh_slock);
+
+ /*
+ * Set up referenced/modified emulation for new mapping.
+ */
+ if ((attrs & PGA_REFERENCED) == 0)
+ npte |= PG_FOR | PG_FOW | PG_FOE;
+ else if ((attrs & PGA_MODIFIED) == 0)
+ npte |= PG_FOW;
+
+ /*
+ * Mapping was entered on PV list.
+ */
+ npte |= PG_PVLIST;
}
+ if (wired)
+ npte |= PG_WIRED;
+#ifdef DEBUG
+ if (pmapdebug & PDB_ENTER)
+ printf("pmap_enter: new pte = 0x%lx\n", npte);
+#endif
- SPLVM(spl);
- simple_lock(&map->lock);
+ /*
+ * If the PALcode portion of the new PTE is the same as the
+ * old PTE, no TBI is necessary.
+ */
+ if (PG_PALCODE(opte) == PG_PALCODE(npte))
+ tflush = FALSE;
/*
- * Invalidate the translation buffer first
+ * Set the new PTE.
*/
- PMAP_UPDATE_TLBS(map, s, e);
+ *pte = npte;
- pde = pmap_pde(map, s);
- while (s < e) {
- /* at most (1 << 33) virtuals per iteration */
- l = roundup(s+1, PDE_MAPPED_SIZE);
- if (l > e)
- l = e;
- if (*pde & ALPHA_PTE_VALID) {
- spte = (pt_entry_t *)ptetokv(*pde);
- spte = &spte[pte2num(s)];
- pmap_iterate_lev2(map, s, l, spte, pmap_make_readonly);
- }
- s = l;
- pde++;
+ /*
+ * Invalidate the TLB entry for this VA and any appropriate
+ * caches.
+ */
+ if (tflush) {
+ PMAP_INVALIDATE_TLB(pmap, va, hadasm, isactive, cpu_id);
+#if defined(MULTIPROCESSOR) && 0
+ pmap_tlb_shootdown(pmap, va, hadasm ? PG_ASM : 0);
+#endif
}
+ if (needisync)
+ PMAP_SYNC_ISTREAM(pmap);
+
+ PMAP_UNLOCK(pmap);
+ PMAP_MAP_TO_HEAD_UNLOCK();
- simple_unlock(&map->lock);
- SPLX(spl);
+#ifdef notyet
+ return (KERN_SUCCESS);
+#endif
}
/*
- * Insert the given physical page (p) at
- * the specified virtual address (v) in the
- * target physical map with the protection requested.
+ * pmap_kenter_pa: [ INTERFACE ]
*
- * If specified, the page will be wired down, meaning
- * that the related pte can not be reclaimed.
+ * Enter a va -> pa mapping into the kernel pmap without any
+ * physical->virtual tracking.
*
- * NB: This is the only routine which MAY NOT lazy-evaluate
- * or lose information. That is, this routine must actually
- * insert this page into the given map NOW.
+ * Note: no locking is necessary in this function.
*/
void
-pmap_enter(pmap, v, pa, prot, wired, access_type)
- register pmap_t pmap;
- vm_offset_t v;
- register vm_offset_t pa;
- vm_prot_t prot;
- boolean_t wired;
- vm_prot_t access_type;
+pmap_kenter_pa(va, pa, prot)
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
{
- register pt_entry_t *pte;
- register pv_entry_t pv_h;
- register int i, pai;
- pv_entry_t pv_e;
- pt_entry_t template;
- spl_t spl;
- vm_offset_t old_pa;
-
- if (DOPDB(PDB_FOLLOW|PDB_ENTER))
- printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, %d)\n",
- pmap, v, pa, prot, wired);
-
- assert(pa != vm_page_fictitious_addr);
-if (pmap_debug || ((v > pmap_suspect_vs) && (v < pmap_suspect_ve)))
-db_printf("[%d]pmap_enter(%x(%d), %x, %x, %x, %x)\n", cpu_number(), pmap, pmap->pid, v, pa, prot, wired);
- if (pmap == PMAP_NULL)
- goto out;
- assert(!pmap_max_asn || pmap->pid >= 0);
+ pt_entry_t *pte, npte;
+ long cpu_id = cpu_number();
+ boolean_t needisync = FALSE;
+ pmap_t pmap = pmap_kernel();
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
+ printf("pmap_kenter_pa(%lx, %lx, %x)\n",
+ va, pa, prot);
+#endif
+#ifdef DIAGNOSTIC
/*
- * Must allocate a new pvlist entry while we're unlocked;
- * zalloc may cause pageout (which will lock the pmap system).
- * If we determine we need a pvlist entry, we will unlock
- * and allocate one. Then we will retry, throwing away
- * the allocated entry later (if we no longer need it).
+ * Sanity check the virtual address.
*/
- pv_e = PV_ENTRY_NULL;
-Retry:
- PMAP_READ_LOCK(pmap, spl);
+ if (va < VM_MIN_KERNEL_ADDRESS)
+ panic("pmap_kenter_pa: kernel pmap, invalid va 0x%lx", va);
+#endif
- /*
- * Expand pmap to include this pte. Assume that
- * pmap is always expanded to include enough hardware
- * pages to map one VM page.
- */
+ pte = PMAP_KERNEL_PTE(va);
- while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) {
- /*
- * Must unlock to expand the pmap.
- */
- PMAP_READ_UNLOCK(pmap, spl);
-
- pmap_expand(pmap, v);
-
- PMAP_READ_LOCK(pmap, spl);
- }
-
- /*
- * Special case if the physical page is already mapped
- * at this address.
- */
- old_pa = pte_to_pa(*pte);
- if (*pte && old_pa == pa) {
- /*
- * May be changing its wired attribute or protection
- */
-
- if (DOVPDB(PDB_FOLLOW|PDB_ENTER))
- printf("pmap_enter: same PA already mapped there (0x%lx)\n",
- *pte);
-
- if (wired && !(*pte & ALPHA_PTE_WIRED))
- pmap->stats.wired_count++;
- else if (!wired && (*pte & ALPHA_PTE_WIRED))
- pmap->stats.wired_count--;
-
- pte_template(pmap,template,pa,prot);
- if (pmap == kernel_pmap)
- template |= ALPHA_PTE_GLOBAL;
- if (wired)
- template |= ALPHA_PTE_WIRED;
- PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
- i = ptes_per_vm_page;
- do {
- template |= (*pte & ALPHA_PTE_MOD);
- *pte = template;
- pte++;
- pte_increment_pa(template);
- } while (--i > 0);
- }
- else {
+ if (pmap_pte_v(pte) == 0)
+ PMAP_STAT_INCR(pmap->pm_stats.resident_count, 1);
+ if (pmap_pte_w(pte) == 0)
+ PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1);
- /*
- * Remove old mapping from the PV list if necessary.
- */
- if (*pte) {
- if (DOVPDB(PDB_FOLLOW|PDB_ENTER))
- printf("pmap_enter: removing old PTE (0x%lx)\n", *pte);
+ if ((prot & VM_PROT_EXECUTE) != 0 || pmap_pte_exec(pte))
+ needisync = TRUE;
- /*
- * Invalidate the translation buffer,
- * then remove the mapping.
- */
- PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+ /*
+ * Build the new PTE.
+ */
+ npte = ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap_kernel(), prot) |
+ PG_V | PG_WIRED;
- /*
- * Don't free the pte page if removing last
- * mapping - we will immediately replace it.
- */
- pmap_remove_range(pmap, v, pte,
- pte + ptes_per_vm_page);
- }
+ /*
+ * Set the new PTE.
+ */
+ *pte = npte;
- if (valid_page(pa)) {
- if (DOVPDB(PDB_FOLLOW|PDB_ENTER))
- printf("pmap_enter: valid page\n");
+ /*
+ * Invalidate the TLB entry for this VA and any appropriate
+ * caches.
+ */
+ PMAP_INVALIDATE_TLB(pmap, va, TRUE, TRUE, cpu_id);
+#if defined(MULTIPROCESSOR) && 0
+ pmap_tlb_shootdown(pmap, va, PG_ASM);
+#endif
+ if (needisync)
+ PMAP_SYNC_ISTREAM_KERNEL();
+}
- /*
- * Enter the mapping in the PV list for this
- * physical page.
- */
+/*
+ * pmap_kenter_pgs: [ INTERFACE ]
+ *
+ * Enter a va -> pa mapping for the array of vm_page's into the
+ * kernel pmap without any physical->virtual tracking, starting
+ * at address va, for npgs pages.
+ *
+ * Note: no locking is necessary in this function.
+ */
+void
+pmap_kenter_pgs(va, pgs, npgs)
+ vaddr_t va;
+ vm_page_t *pgs;
+ int npgs;
+{
+ int i;
- pai = pa_index(pa);
- LOCK_PVH(pai);
- pv_h = pai_to_pvh(pai);
-
- if (pv_h->pmap == PMAP_NULL) {
- /*
- * No mappings yet
- */
- if (DOVPDB(PDB_FOLLOW|PDB_ENTER))
- printf("pmap_enter: first mapping\n");
- pv_h->va = v;
- pv_h->pmap = pmap;
- pv_h->next = PV_ENTRY_NULL;
- if (prot & VM_PROT_EXECUTE)
- alphacache_Iflush();
- }
- else {
- if (DOVPDB(PDB_FOLLOW|PDB_ENTER))
- printf("pmap_enter: second+ mapping\n");
-
-#if DEBUG
- {
- /* check that this mapping is not already there */
- pv_entry_t e = pv_h;
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap && e->va == v)
- panic("pmap_enter: already in pv_list");
- e = e->next;
- }
- }
-#endif /* DEBUG */
-
- /*
- * Add new pv_entry after header.
- */
- if (pv_e == PV_ENTRY_NULL) {
- pv_e = pmap_alloc_pv();
-#if 0
- PV_ALLOC(pv_e);
- if (pv_e == PV_ENTRY_NULL) {
- UNLOCK_PVH(pai);
- PMAP_READ_UNLOCK(pmap, spl);
-
- /*
- * Refill from zone.
- */
- pv_e = (pv_entry_t) zalloc(pv_list_zone);
- goto Retry;
- }
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
+ printf("pmap_kenter_pgs(%lx, %p, %d)\n",
+ va, pgs, npgs);
#endif
- }
- pv_e->va = v;
- pv_e->pmap = pmap;
- pv_e->next = pv_h->next;
- pv_h->next = pv_e;
- /*
- * Remember that we used the pvlist entry.
- */
- pv_e = PV_ENTRY_NULL;
- }
- UNLOCK_PVH(pai);
- }
-
- /*
- * And count the mapping.
- */
-
- pmap->stats.resident_count++;
- if (wired)
- pmap->stats.wired_count++;
-
- /*
- * Build a template to speed up entering -
- * only the pfn changes.
- */
- pte_template(pmap,template,pa,prot);
- if (pmap == kernel_pmap)
- template |= ALPHA_PTE_GLOBAL;
- if (wired)
- template |= ALPHA_PTE_WIRED;
- i = ptes_per_vm_page;
- do {
- if (DOVPDB(PDB_FOLLOW|PDB_ENTER))
- printf("pmap_enter: entering PTE 0x%lx at %p\n",
- template, pte);
- *pte = template;
- pte++;
- pte_increment_pa(template);
- } while (--i > 0);
- ALPHA_TBIA();
- }
-
- if (pv_e != PV_ENTRY_NULL) {
- PV_FREE(pv_e);
- }
-
- PMAP_READ_UNLOCK(pmap, spl);
-out:
- if (DOVPDB(PDB_FOLLOW|PDB_ENTER))
- printf("pmap_enter: done\n");
+
+ for (i = 0; i < npgs; i++)
+ pmap_kenter_pa(va + (PAGE_SIZE * i),
+ VM_PAGE_TO_PHYS(pgs[i]),
+ VM_PROT_READ|VM_PROT_WRITE);
}
/*
- * Routine: pmap_change_wiring
- * Function: Change the wiring attribute for a map/virtual-address
- * pair.
- * In/out conditions:
- * The mapping must already exist in the pmap.
+ * pmap_kremove: [ INTERFACE ]
+ *
+ * Remove a mapping entered with pmap_kenter_pa() or pmap_kenter_pgs()
+ * starting at va, for size bytes (assumed to be page rounded).
*/
-void pmap_change_wiring(map, v, wired)
- register pmap_t map;
- vm_offset_t v;
- boolean_t wired;
+void
+pmap_kremove(va, size)
+ vaddr_t va;
+ vsize_t size;
{
- register pt_entry_t *pte;
- register int i;
- spl_t spl;
-
-if (pmap_debug) db_printf("pmap_change_wiring(%x,%x,%x)\n", map, v, wired);
- /*
- * We must grab the pmap system lock because we may
- * change a pte_page queue.
- */
- PMAP_READ_LOCK(map, spl);
-
- if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
- panic("pmap_change_wiring: pte missing");
-
- if (wired && !(*pte & ALPHA_PTE_WIRED)) {
- /*
- * wiring down mapping
- */
- map->stats.wired_count++;
- i = ptes_per_vm_page;
- do {
- *pte++ |= ALPHA_PTE_WIRED;
- } while (--i > 0);
- }
- else if (!wired && (*pte & ALPHA_PTE_WIRED)) {
- /*
- * unwiring mapping
- */
- map->stats.wired_count--;
- i = ptes_per_vm_page;
- do {
- *pte &= ~ALPHA_PTE_WIRED;
- } while (--i > 0);
- }
-
- PMAP_READ_UNLOCK(map, spl);
+ pt_entry_t *pte;
+ boolean_t needisync = FALSE;
+ long cpu_id = cpu_number();
+ pmap_t pmap = pmap_kernel();
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
+ printf("pmap_kremove(%lx, %lx)\n",
+ va, size);
+#endif
+
+#ifdef DIAGNOSTIC
+ if (va < VM_MIN_KERNEL_ADDRESS)
+ panic("pmap_kremove: user address");
+#endif
+
+ for (; size != 0; size -= PAGE_SIZE, va += PAGE_SIZE) {
+ pte = PMAP_KERNEL_PTE(va);
+ if (pmap_pte_v(pte)) {
+#ifdef DIAGNOSTIC
+ if (pmap_pte_pv(pte))
+ panic("pmap_kremove: PG_PVLIST mapping for "
+ "0x%lx", va);
+#endif
+ if (pmap_pte_exec(pte))
+ needisync = TRUE;
+
+ /* Zap the mapping. */
+ *pte = PG_NV;
+ PMAP_INVALIDATE_TLB(pmap, va, TRUE, TRUE, cpu_id);
+#if defined(MULTIPROCESSOR) && 0
+ pmap_tlb_shootdown(pmap, va, PG_ASM);
+#endif
+ /* Update stats. */
+ PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1);
+ PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1);
+ }
+ }
+
+ if (needisync)
+ PMAP_SYNC_ISTREAM_KERNEL();
}
/*
- * Routine: pmap_extract
- * Function:
- * Extract the physical page address associated
- * with the given map/virtual_address pair.
+ * pmap_unwire: [ INTERFACE ]
+ *
+ * Clear the wired attribute for a map/virtual-address pair.
+ *
+ * The mapping must already exist in the pmap.
*/
-
-vm_offset_t
-pmap_extract(pmap, va)
- register pmap_t pmap;
- vm_offset_t va;
+void
+pmap_change_wiring(pmap, va, wired)
+ pmap_t pmap;
+ vaddr_t va;
{
- register pt_entry_t *pte;
- register vm_offset_t pa;
- spl_t spl;
+ pt_entry_t *pte;
- if (DOPDB(PDB_FOLLOW|PDB_EXTRACT))
- printf("pmap_extract(%p, 0x%lx)\n", pmap, va);
+ if (wired)
+ panic("pmap_change_wiring");
- /*
- * Special translation for kernel addresses in
- * K0 space (directly mapped to physical addresses).
- */
- if (ISA_K0SEG(va)) {
- pa = K0SEG_TO_PHYS(va);
- if (DOPDB(PDB_FOLLOW|PDB_EXTRACT))
- printf("pmap_extract: returns 0x%lx\n", pa);
- goto out;
- }
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_unwire(%p, %lx)\n", pmap, va);
+#endif
+ if (pmap == NULL)
+ return;
- SPLVM(spl);
- simple_lock(&pmap->lock);
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
- pa = (vm_offset_t) 0;
- else if (!(*pte & ALPHA_PTE_VALID))
- pa = (vm_offset_t) 0;
- else
- pa = pte_to_pa(*pte) + (va & ALPHA_OFFMASK);
- simple_unlock(&pmap->lock);
+ PMAP_LOCK(pmap);
+
+ pte = pmap_l3pte(pmap, va, NULL);
+#ifdef DIAGNOSTIC
+ if (pte == NULL || pmap_pte_v(pte) == 0)
+ panic("pmap_unwire");
+#endif
/*
- * Beware: this puts back this thread in the cpus_active set
+ * If wiring actually changed (always?) clear the wire bit and
+ * update the wire count. Note that wiring is not a hardware
+ * characteristic so there is no need to invalidate the TLB.
*/
- SPLX(spl);
+ if (pmap_pte_w_chg(pte, 0)) {
+ pmap_pte_set_w(pte, FALSE);
+ PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1);
+ }
+#ifdef DIAGNOSTIC
+ else {
+ printf("pmap_unwire: wiring for pmap %p va 0x%lx "
+ "didn't change!\n", pmap, va);
+ }
+#endif
-out:
- if (DOPDB(PDB_FOLLOW|PDB_EXTRACT))
- printf("pmap_extract: returns 0x%lx\n", pa);
- return(pa);
+ PMAP_UNLOCK(pmap);
}
-vm_offset_t
-pmap_resident_extract(pmap, va)
- register pmap_t pmap;
- vm_offset_t va;
+/*
+ * pmap_extract: [ INTERFACE ]
+ *
+ * Extract the physical address associated with the given
+ * pmap/virtual address pair.
+ */
+paddr_t
+pmap_extract(pmap, va)
+ pmap_t pmap;
+ vaddr_t va;
{
- register pt_entry_t *pte;
- register vm_offset_t pa;
+ pt_entry_t *l1pte, *l2pte, *l3pte;
+ paddr_t pa = 0;
- /*
- * Special translation for kernel addresses in
- * K0 space (directly mapped to physical addresses).
- */
- if (ISA_K0SEG(va)) {
- pa = K0SEG_TO_PHYS(va);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_extract(%p, %lx) -> ", pmap, va);
+#endif
+ PMAP_LOCK(pmap);
+
+ l1pte = pmap_l1pte(pmap, va);
+ if (pmap_pte_v(l1pte) == 0)
goto out;
- }
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
- pa = (vm_offset_t) 0;
- else if (!(*pte & ALPHA_PTE_VALID))
- pa = (vm_offset_t) 0;
- else
- pa = pte_to_pa(*pte) + (va & ALPHA_OFFMASK);
+ l2pte = pmap_l2pte(pmap, va, l1pte);
+ if (pmap_pte_v(l2pte) == 0)
+ goto out;
+
+ l3pte = pmap_l3pte(pmap, va, l2pte);
+ if (pmap_pte_v(l3pte) == 0)
+ goto out;
-out:
- return(pa);
+ pa = pmap_pte_pa(l3pte) | (va & PGOFSET);
+ out:
+ PMAP_UNLOCK(pmap);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ if (pa)
+ printf("0x%lx\n", pa);
+ else
+ printf("failed\n");
+ }
+#endif
+ return (pa);
}
/*
- * Routine: pmap_expand
+ * pmap_copy: [ INTERFACE ]
*
- * Expands a pmap to be able to map the specified virtual address.
+ * Copy the mapping range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
*
- * Must be called with the pmap system and the pmap unlocked,
- * since these must be unlocked to use vm_page_grab.
- * Thus it must be called in a loop that checks whether the map
- * has been expanded enough.
+ * This routine is only advisory and need not do anything.
*/
void
-pmap_expand(map, v)
- register pmap_t map;
- register vm_offset_t v;
+pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+ pmap_t dst_pmap;
+ pmap_t src_pmap;
+ vaddr_t dst_addr;
+ vsize_t len;
+ vaddr_t src_addr;
{
- pt_entry_t template;
- pt_entry_t *pdp;
- register vm_page_t m;
- register vm_offset_t pa;
- register int i;
- spl_t spl;
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
+ dst_pmap, src_pmap, dst_addr, len, src_addr);
+#endif
+}
- if (DOPDB(PDB_FOLLOW|PDB_EXPAND))
- printf("pmap_expand(%p, 0x%lx)\n", map, v);
+/*
+ * pmap_update: [ INTERFACE ]
+ *
+ * Require that all active physical maps contain no
+ * incorrect entries NOW, by processing any deferred
+ * pmap operations.
+ */
+void
+pmap_update()
+{
- /* Would have to go through all maps to add this page */
- if (map == kernel_pmap)
- panic("pmap_expand");
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_update()\n");
+#endif
/*
- * Allocate a VM page for the level 2 page table entries,
- * if not already there.
+ * Nothing to do; this pmap module does not defer any operations.
*/
- pdp = pmap_pde(map,v);
- if ((*pdp & ALPHA_PTE_VALID) == 0) {
- pt_entry_t *pte;
+}
- if (DOVPDB(PDB_FOLLOW|PDB_EXPAND))
- printf("pmap_expand: needs pde\n");
+/*
+ * pmap_collect: [ INTERFACE ]
+ *
+ * Garbage collects the physical map system for pages which are no
+ * longer used. Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but others may be
+ * collected.
+ *
+ * Called by the pageout daemon when pages are scarce.
+ */
+void
+pmap_collect(pmap)
+ pmap_t pmap;
+{
- pa = pmap_page_table_page_alloc();
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_collect(%p)\n", pmap);
+#endif
- /*
- * Re-lock the pmap and check that another thread has
- * not already allocated the page-table page. If it
- * has, discard the new page-table page (and try
- * again to make sure).
- */
- PMAP_READ_LOCK(map, spl);
+ /*
+ * This process is about to be swapped out; free all of
+ * the PT pages by removing the physical mappings for its
+ * entire address space. Note: pmap_remove() performs
+ * all necessary locking.
+ */
+ pmap_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
+}
- if (*pdp & ALPHA_PTE_VALID) {
- /*
- * Oops...
- */
- PMAP_READ_UNLOCK(map, spl);
- pmap_page_table_page_dealloc(pa);
- return;
- }
+/*
+ * pmap_activate: [ INTERFACE ]
+ *
+ * Activate the pmap used by the specified process. This includes
+ * reloading the MMU context if the current process, and marking
+ * the pmap in use by the processor.
+ *
+ * Note: We may use only spin locks here, since we are called
+ * by a critical section in cpu_switch()!
+ */
+void
+pmap_activate(p)
+ struct proc *p;
+{
+ struct pmap *pmap = p->p_vmspace->vm_map.pmap;
+ long cpu_id = cpu_number();
- /*
- * Map the page.
- */
- i = ptes_per_vm_page;
- pte = pdp;
- pte_ktemplate(template,pa,VM_PROT_READ|VM_PROT_WRITE);
- if (map != kernel_pmap)
- template &= ~ALPHA_PTE_ASM;
- do {
- *pte = template;
- if (DOVPDB(PDB_FOLLOW|PDB_EXPAND))
- printf("pmap_expand: inserted l1 pte (0x%lx) at %p\n",
- template, pte);
- pte++;
- pte_increment_pa(template);
- } while (--i > 0);
- PMAP_READ_UNLOCK(map, spl);
- }
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_activate(%p)\n", p);
+#endif
/*
- * Allocate a level 3 page table.
+ * Mark the pmap in use by this processor.
*/
-
- pa = pmap_page_table_page_alloc();
+ atomic_setbits_ulong(&pmap->pm_cpus, (1UL << cpu_id));
/*
- * Re-lock the pmap and check that another thread has
- * not already allocated the page-table page. If it
- * has, we are done.
+ * Move the pmap to the end of the LRU list.
*/
- PMAP_READ_LOCK(map, spl);
+ simple_lock(&pmap_all_pmaps_slock);
+ TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list);
+ TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list);
+ simple_unlock(&pmap_all_pmaps_slock);
- if (pmap_pte(map, v) != PT_ENTRY_NULL) {
- PMAP_READ_UNLOCK(map, spl);
- pmap_page_table_page_dealloc(pa);
- return;
- }
+ PMAP_LOCK(pmap);
/*
- * Set the page directory entry for this page table.
- * If we have allocated more than one hardware page,
- * set several page directory entries.
+ * Allocate an ASN.
*/
- i = ptes_per_vm_page;
- pdp = (pt_entry_t *)ptetokv(*pdp);
- pdp = &pdp[pte2num(v)];
- pte_ktemplate(template,pa,VM_PROT_READ|VM_PROT_WRITE);
- if (map != kernel_pmap)
- template &= ~ALPHA_PTE_ASM;
- do {
- *pdp = template;
- if (DOVPDB(PDB_FOLLOW|PDB_EXPAND))
- printf("pmap_expand: inserted l2 pte (0x%lx) at %p\n",
- template, pdp);
- pdp++;
- pte_increment_pa(template);
- } while (--i > 0);
- PMAP_READ_UNLOCK(map, spl);
-
-out:
- if (DOVPDB(PDB_FOLLOW|PDB_EXPAND))
- printf("pmap_expand: leaving\n");
- return;
+ pmap_asn_alloc(pmap, cpu_id);
+
+ PMAP_ACTIVATE(pmap, p, cpu_id);
+
+ PMAP_UNLOCK(pmap);
}
/*
- * Copy the range specified by src_addr/len
- * from the source map to the range dst_addr/len
- * in the destination map.
+ * pmap_deactivate: [ INTERFACE ]
*
- * This routine is only advisory and need not do anything.
+ * Mark that the pmap used by the specified process is no longer
+ * in use by the processor.
+ *
+ * The comment above pmap_activate() wrt. locking applies here,
+ * as well. Note that we use only a single `atomic' operation,
+ * so no locking is necessary.
*/
-#if 0
-void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
- pmap_t dst_pmap;
- pmap_t src_pmap;
- vm_offset_t dst_addr;
- vm_size_t len;
- vm_offset_t src_addr;
+void
+pmap_deactivate(p)
+ struct proc *p;
{
-#ifdef lint
- dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
-#endif /* lint */
-}
+ struct pmap *pmap = p->p_vmspace->vm_map.pmap;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_deactivate(%p)\n", p);
#endif
+ /*
+ * Mark the pmap no longer in use by this processor.
+ */
+ atomic_clearbits_ulong(&pmap->pm_cpus, (1UL << cpu_number()));
+}
+
/*
- * Routine: pmap_collect
- * Function:
- * Garbage collects the physical map system for
- * pages which are no longer used.
- * Success need not be guaranteed -- that is, there
- * may well be pages which are not referenced, but
- * others may be collected.
- * Usage:
- * Called by the pageout daemon when pages are scarce.
+ * pmap_zero_page: [ INTERFACE ]
+ *
+ * Zero the specified (machine independent) page by mapping the page
+ * into virtual memory and using bzero to clear its contents, one
+ * machine dependent page at a time.
+ *
+ * Note: no locking is necessary in this function.
*/
-void pmap_collect(p)
- pmap_t p;
+void
+pmap_zero_page(phys)
+ paddr_t phys;
{
-#if notyet
+ u_long *p0, *p1, *pend;
- register pt_entry_t *pdp, *ptp;
- pt_entry_t *eptp;
- vm_offset_t pa;
- spl_t spl;
- int wired;
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_zero_page(%lx)\n", phys);
+#endif
- if (p == PMAP_NULL)
- return;
-
- if (p == kernel_pmap)
- return;
+ p0 = (u_long *)ALPHA_PHYS_TO_K0SEG(phys);
+ pend = (u_long *)((u_long)p0 + PAGE_SIZE);
/*
- * Garbage collect map.
+ * Unroll the loop a bit, doing 16 quadwords per iteration.
+ * Do only 8 back-to-back stores, and alternate registers.
*/
- PMAP_READ_LOCK(p, spl);
- PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
- pmap_tlbpid_destroy(p->pid, FALSE);
-
- for (pdp = p->dirbase;
- pdp < pmap_pde(p,VM_MIN_KERNEL_ADDRESS);
- pdp += ptes_per_vm_page)
- {
- if (*pdp & ALPHA_PTE_VALID) {
-
- pa = pte_to_pa(*pdp);
- ptp = (pt_entry_t *)phystokv(pa);
- eptp = ptp + NPTES*ptes_per_vm_page;
-
- /*
- * If the pte page has any wired mappings, we cannot
- * free it.
- */
- wired = 0;
- {
- register pt_entry_t *ptep;
- for (ptep = ptp; ptep < eptp; ptep++) {
- if (*ptep & ALPHA_PTE_WIRED) {
- wired = 1;
- break;
- }
- }
- }
- if (!wired) {
- /*
- * Remove the virtual addresses mapped by this pte page.
- */
-..... pmap_remove_range_2(p,
- pdetova(pdp - p->dirbase),
- ptp,
- eptp);
-
- /*
- * Invalidate the page directory pointer.
- */
- {
- register int i = ptes_per_vm_page;
- register pt_entry_t *pdep = pdp;
- do {
- *pdep++ = 0;
- } while (--i > 0);
- }
-
- PMAP_READ_UNLOCK(p, spl);
-
- /*
- * And free the pte page itself.
- */
- {
- register vm_page_t m;
-
- vm_object_lock(pmap_object);
- m = vm_page_lookup(pmap_object, pa);
- if (m == VM_PAGE_NULL)
- panic("pmap_collect: pte page not in object");
- vm_page_lock_queues();
- vm_page_free(m);
- inuse_ptepages_count--;
- vm_page_unlock_queues();
- vm_object_unlock(pmap_object);
- }
-
- PMAP_READ_LOCK(p, spl);
- }
- }
- }
- PMAP_READ_UNLOCK(p, spl);
- return;
-#endif
+ do {
+ __asm __volatile(
+ "# BEGIN loop body\n"
+ " addq %2, (8 * 8), %1 \n"
+ " stq $31, (0 * 8)(%0) \n"
+ " stq $31, (1 * 8)(%0) \n"
+ " stq $31, (2 * 8)(%0) \n"
+ " stq $31, (3 * 8)(%0) \n"
+ " stq $31, (4 * 8)(%0) \n"
+ " stq $31, (5 * 8)(%0) \n"
+ " stq $31, (6 * 8)(%0) \n"
+ " stq $31, (7 * 8)(%0) \n"
+ " \n"
+ " addq %3, (8 * 8), %0 \n"
+ " stq $31, (0 * 8)(%1) \n"
+ " stq $31, (1 * 8)(%1) \n"
+ " stq $31, (2 * 8)(%1) \n"
+ " stq $31, (3 * 8)(%1) \n"
+ " stq $31, (4 * 8)(%1) \n"
+ " stq $31, (5 * 8)(%1) \n"
+ " stq $31, (6 * 8)(%1) \n"
+ " stq $31, (7 * 8)(%1) \n"
+ " # END loop body"
+ : "=r" (p0), "=r" (p1)
+ : "0" (p0), "1" (p1)
+ : "memory");
+ } while (p0 < pend);
}
/*
- * Routine: pmap_activate
- * Function:
- * Binds the given physical map to the given
- * processor, and returns a hardware map description.
+ * pmap_copy_page: [ INTERFACE ]
+ *
+ * Copy the specified (machine independent) page by mapping the page
+ * into virtual memory and using bcopy to copy the page, one machine
+ * dependent page at a time.
+ *
+ * Note: no locking is necessary in this function.
*/
void
-pmap_activate(pmap, hwpcb, cpu)
- register pmap_t pmap;
- struct alpha_pcb *hwpcb;
- int cpu;
+pmap_copy_page(src, dst)
+ paddr_t src, dst;
{
+ caddr_t s, d;
- if (DOPDB(PDB_FOLLOW|PDB_ACTIVATE))
- printf("pmap_activate(%p, %p, %d)\n", pmap, hwpcb, cpu);
-
-#if 0
- PMAP_ACTIVATE(my_pmap, th, my_cpu);
-#else
- if (DOVPDB(PDB_ACTIVATE))
- printf("pmap_activate: old pid = %d\n", pmap->pid);
- if (pmap->pid < 0) pmap_tlbpid_assign(pmap);
- hwpcb->apcb_asn = pmap->pid;
- hwpcb->apcb_ptbr = pmap->dirpfn;
- if (pmap != kernel_pmap)
- pmap->cpus_using = TRUE;
- if (DOVPDB(PDB_ACTIVATE))
- printf("pmap_activate: new pid = %d, new ptbr = 0x%lx\n",
- pmap->pid, pmap->dirpfn);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_copy_page(%lx, %lx)\n", src, dst);
#endif
+ s = (caddr_t)ALPHA_PHYS_TO_K0SEG(src);
+ d = (caddr_t)ALPHA_PHYS_TO_K0SEG(dst);
+ bcopy(s, d, PAGE_SIZE);
}
-/*
- * Routine: pmap_deactivate
- * Function:
- * Indicates that the given physical map is no longer
- * in use on the specified processor. (This is a macro
- * in pmap.h)
- */
void
-pmap_deactivate(pmap, hwpcb, cpu)
- register pmap_t pmap;
- struct alpha_pcb *hwpcb;
- int cpu;
+pmap_pageable(pmap, start, end, pageable)
+ pmap_t pmap;
+ vaddr_t start;
+ vaddr_t end;
+ boolean_t pageable;
{
- if (DOPDB(PDB_FOLLOW|PDB_DEACTIVATE))
- printf("pmap_deactivate(%p, %p, %d)\n", pmap, hwpcb, cpu);
-
-#if 0
- PMAP_DEACTIVATE(pmap, th, which_cpu);
-#else
- if (DOVPDB(PDB_DEACTIVATE))
- printf("pmap_deactivate: pid = %d, ptbr = 0x%lx\n",
- pmap->pid, pmap->dirpfn);
- pmap->cpus_using = FALSE;
-#endif
}
/*
- * Routine: pmap_kernel
- * Function:
- * Returns the physical map handle for the kernel.
+ * pmap_clear_modify: [ INTERFACE ]
+ *
+ * Clear the modify bits on the specified physical page.
*/
-#if 0
-pmap_t pmap_kernel()
+boolean_t
+pmap_clear_modify(pg)
+ struct vm_page *pg;
{
- return (kernel_pmap);
-}
+ struct pv_head *pvh;
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t rv = FALSE;
+ long cpu_id = cpu_number();
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_clear_modify(%p)\n", pg);
#endif
+ pvh = pa_to_pvh(pa);
+
+ PMAP_HEAD_TO_MAP_LOCK();
+ simple_lock(&pvh->pvh_slock);
+
+ if (pvh->pvh_attrs & PGA_MODIFIED) {
+ rv = TRUE;
+ pmap_changebit(pa, PG_FOW, ~0, cpu_id);
+ pvh->pvh_attrs &= ~PGA_MODIFIED;
+ }
+
+ simple_unlock(&pvh->pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
+
+ return (rv);
+}
+
/*
- * pmap_zero_page zeros the specified (machine independent) page.
- * See machine/phys.c or machine/phys.s for implementation.
+ * pmap_clear_reference: [ INTERFACE ]
+ *
+ * Clear the reference bit on the specified physical page.
*/
-#if 1
-void
-pmap_zero_page(phys)
- register vm_offset_t phys;
+boolean_t
+pmap_clear_reference(pg)
+ struct vm_page *pg;
{
+ struct pv_head *pvh;
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t rv = FALSE;
+ long cpu_id = cpu_number();
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_clear_reference(%p)\n", pg);
+#endif
- if (DOPDB(PDB_FOLLOW|PDB_ZERO_PAGE))
- printf("pmap_zero_page(0x%lx)\n", phys);
+ pvh = pa_to_pvh(pa);
- assert(phys != vm_page_fictitious_addr);
+ PMAP_HEAD_TO_MAP_LOCK();
+ simple_lock(&pvh->pvh_slock);
+
+ if (pvh->pvh_attrs & PGA_REFERENCED) {
+ rv = TRUE;
+ pmap_changebit(pa, PG_FOR | PG_FOW | PG_FOE, ~0, cpu_id);
+ pvh->pvh_attrs &= ~PGA_REFERENCED;
+ }
- bzero((void *)phystokv(phys), PAGE_SIZE);
+ simple_unlock(&pvh->pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
- if (DOVPDB(PDB_FOLLOW|PDB_ZERO_PAGE))
- printf("pmap_zero_page: leaving\n");
+ return (rv);
}
-#endif
/*
- * pmap_copy_page copies the specified (machine independent) page.
- * See machine/phys.c or machine/phys.s for implementation.
+ * pmap_is_referenced: [ INTERFACE ]
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
*/
-#if 1 /* fornow */
-void
-pmap_copy_page(src, dst)
- vm_offset_t src, dst;
+boolean_t
+pmap_is_referenced(pg)
+ struct vm_page *pg;
{
+ struct pv_head *pvh;
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t rv;
+
+ pvh = pa_to_pvh(pa);
+ rv = ((pvh->pvh_attrs & PGA_REFERENCED) != 0);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_is_referenced(%p) -> %c\n", pg, "FT"[rv]);
+ }
+#endif
+ return (rv);
+}
- if (DOPDB(PDB_FOLLOW|PDB_COPY_PAGE))
- printf("pmap_copy_page(0x%lx, 0x%lx)\n", src, dst);
-
- assert(src != vm_page_fictitious_addr);
- assert(dst != vm_page_fictitious_addr);
+/*
+ * pmap_is_modified: [ INTERFACE ]
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+boolean_t
+pmap_is_modified(pg)
+ struct vm_page *pg;
+{
+ struct pv_head *pvh;
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ boolean_t rv;
+
+ pvh = pa_to_pvh(pa);
+ rv = ((pvh->pvh_attrs & PGA_MODIFIED) != 0);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("pmap_is_modified(%p) -> %c\n", pg, "FT"[rv]);
+ }
+#endif
+ return (rv);
+}
- aligned_block_copy(phystokv(src), phystokv(dst), PAGE_SIZE);
+/*
+ * pmap_phys_address: [ INTERFACE ]
+ *
+ * Return the physical address corresponding to the specified
+ * cookie. Used by the device pager to decode a device driver's
+ * mmap entry point return value.
+ *
+ * Note: no locking is necessary in this function.
+ */
+paddr_t
+pmap_phys_address(ppn)
+ int ppn;
+{
- if (DOVPDB(PDB_FOLLOW|PDB_COPY_PAGE))
- printf("pmap_copy_page: leaving\n");
+ return (alpha_ptob(ppn));
}
-#endif
/*
- * Routine: pmap_pageable
- * Function:
- * Make the specified pages (by pmap, offset)
- * pageable (or not) as requested.
+ * Miscellaneous support routines follow
+ */
+
+/*
+ * alpha_protection_init:
*
- * A page which is not pageable may not take
- * a fault; therefore, its page table entry
- * must remain valid for the duration.
+ * Initialize Alpha protection code array.
*
- * This routine is merely advisory; pmap_enter
- * will specify that these pages are to be wired
- * down (or not) as appropriate.
+ * Note: no locking is necessary in this function.
*/
void
-pmap_pageable(pmap, start, end, pageable)
- pmap_t pmap;
- vm_offset_t start;
- vm_offset_t end;
- boolean_t pageable;
+alpha_protection_init()
{
-#ifdef lint
- pmap++; start++; end++; pageable++;
-#endif
+ int prot, *kp, *up;
+
+ kp = protection_codes[0];
+ up = protection_codes[1];
+
+ for (prot = 0; prot < 8; prot++) {
+ kp[prot] = 0; up[prot] = 0;
+ switch (prot) {
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
+ kp[prot] |= PG_ASM;
+ up[prot] |= 0;
+ break;
+
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
+ kp[prot] |= PG_EXEC; /* software */
+ up[prot] |= PG_EXEC; /* software */
+ /* FALLTHROUGH */
+
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
+ kp[prot] |= PG_ASM | PG_KRE;
+ up[prot] |= PG_URE | PG_KRE;
+ break;
+
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
+ kp[prot] |= PG_ASM | PG_KWE;
+ up[prot] |= PG_UWE | PG_KWE;
+ break;
+
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ kp[prot] |= PG_EXEC; /* software */
+ up[prot] |= PG_EXEC; /* software */
+ /* FALLTHROUGH */
+
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
+ kp[prot] |= PG_ASM | PG_KWE | PG_KRE;
+ up[prot] |= PG_UWE | PG_URE | PG_KWE | PG_KRE;
+ break;
+ }
+ }
}
/*
- * Clear specified attribute bits.
+ * pmap_remove_mapping:
+ *
+ * Invalidate a single page denoted by pmap/va.
+ *
+ * If (pte != NULL), it is the already computed PTE for the page.
+ *
+ * Note: locking in this function is complicated by the fact
+ * that we can be called when the PV list is already locked.
+ * (pmap_page_protect()). In this case, the caller must be
+ * careful to get the next PV entry while we remove this entry
+ * from beneath it. We assume that the pmap itself is already
+ * locked; dolock applies only to the PV list.
+ *
+ * Returns TRUE or FALSE, indicating if the I-stream needs to
+ * be synchronized.
*/
-void
-phys_attribute_clear(phys, bits)
- vm_offset_t phys;
- int bits;
+boolean_t
+pmap_remove_mapping(pmap, va, pte, dolock, cpu_id, prmt)
+ pmap_t pmap;
+ vaddr_t va;
+ pt_entry_t *pte;
+ boolean_t dolock;
+ long cpu_id;
+ struct prm_thief *prmt;
{
- pv_entry_t pv_h;
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
- int pai;
- register pmap_t pmap;
- spl_t spl;
+ paddr_t pa;
+ boolean_t onpv;
+ boolean_t hadasm;
+ boolean_t isactive;
+ boolean_t needisync;
+ struct pv_entry **pvp;
+ pt_entry_t **ptp;
+
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
+ printf("pmap_remove_mapping(%p, %lx, %p, %d, %ld, %p)\n",
+ pmap, va, pte, dolock, cpu_id, pvp);
+#endif
- assert(phys != vm_page_fictitious_addr);
- if (!valid_page(phys)) {
- /*
- * Not a managed page.
- */
- return;
+ if (prmt != NULL) {
+ if (prmt->prmt_flags & PRMT_PV)
+ pvp = &prmt->prmt_pv;
+ else
+ pvp = NULL;
+ if (prmt->prmt_flags & PRMT_PTP)
+ ptp = &prmt->prmt_ptp;
+ else
+ ptp = NULL;
+ } else {
+ pvp = NULL;
+ ptp = NULL;
}
/*
- * Lock the pmap system first, since we will be changing
- * several pmaps.
+ * PTE not provided, compute it from pmap and va.
*/
+ if (pte == PT_ENTRY_NULL) {
+ pte = pmap_l3pte(pmap, va, NULL);
+ if (pmap_pte_v(pte) == 0)
+ return (FALSE);
+ }
- PMAP_WRITE_LOCK(spl);
+ pa = pmap_pte_pa(pte);
+ onpv = (pmap_pte_pv(pte) != 0);
+ hadasm = (pmap_pte_asm(pte) != 0);
+ isactive = PMAP_ISACTIVE(pmap, cpu_id);
+ needisync = isactive && (pmap_pte_exec(pte) != 0);
- pai = pa_index(phys);
- pv_h = pai_to_pvh(pai);
+ /*
+ * Update statistics
+ */
+ if (pmap_pte_w(pte))
+ PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1);
+ PMAP_STAT_DECR(pmap->pm_stats.resident_count, 1);
/*
- * Walk down PV list, clearing all modify or reference bits.
- * We do not have to lock the pv_list because we have
- * the entire pmap system locked.
+ * Invalidate the PTE after saving the reference modify info.
*/
- if (pv_h->pmap != PMAP_NULL) {
- /*
- * There are some mappings.
- */
- for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+#ifdef DEBUG
+ if (pmapdebug & PDB_REMOVE)
+ printf("remove: invalidating pte at %p\n", pte);
+#endif
+ *pte = PG_NV;
- pmap = pv_e->pmap;
+ PMAP_INVALIDATE_TLB(pmap, va, hadasm, isactive, cpu_id);
+#if defined(MULTIPROCESSOR) && 0
+ pmap_tlb_shootdown(pmap, va, hadasm ? PG_ASM : 0);
+#endif
+
+ /*
+ * If we're removing a user mapping, check to see if we
+ * can free page table pages.
+ */
+ if (pmap != pmap_kernel()) {
/*
- * Lock the pmap to block pmap_extract and similar routines.
+ * Delete the reference on the level 3 table. It will
+ * delete references on the level 2 and 1 tables as
+ * appropriate.
*/
- simple_lock(&pmap->lock);
+ pmap_l3pt_delref(pmap, va, pte, cpu_id, ptp);
+ }
+
+ /*
+ * If the mapping wasn't enterd on the PV list, we're all done.
+ */
+ if (onpv == FALSE) {
+#ifdef DIAGNOSTIC
+ if (pvp != NULL)
+ panic("pmap_removing_mapping: onpv / pvp inconsistent");
+#endif
+ return (needisync);
+ }
- {
- register vm_offset_t va;
+ /*
+ * Remove it from the PV table.
+ */
+ pmap_pv_remove(pmap, pa, va, dolock, pvp);
- va = pv_e->va;
- pte = pmap_pte(pmap, va);
+ return (needisync);
+}
-#if 0
- /*
- * Consistency checks.
- */
- assert(*pte & ALPHA_PTE_VALID);
- /* assert(pte_to_phys(*pte) == phys); */
+/*
+ * pmap_changebit:
+ *
+ * Set or clear the specified PTE bits for all mappings on the
+ * specified page.
+ *
+ * Note: we assume that the pv_head is already locked, and that
+ * the caller has acquired a PV->pmap mutex so that we can lock
+ * the pmaps as we encounter them.
+ */
+void
+pmap_changebit(pa, set, mask, cpu_id)
+ paddr_t pa;
+ u_long set, mask;
+ long cpu_id;
+{
+ struct pv_head *pvh;
+ pv_entry_t pv;
+ pt_entry_t *pte, npte;
+ vaddr_t va;
+ boolean_t hadasm, isactive;
+ boolean_t needisync = FALSE;
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_BITS)
+ printf("pmap_changebit(0x%lx, 0x%lx, 0x%lx)\n",
+ pa, set, mask);
#endif
+ if (!PAGE_IS_MANAGED(pa))
+ return;
- /*
- * Invalidate TLBs for all CPUs using this mapping.
- */
- PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
- }
+ pvh = pa_to_pvh(pa);
+ /*
+ * Loop over all current mappings setting/clearing as appropos.
+ */
+ for (pv = LIST_FIRST(&pvh->pvh_list); pv != NULL;
+ pv = LIST_NEXT(pv, pv_list)) {
+ va = pv->pv_va;
/*
- * Clear modify or reference bits.
+ * XXX don't write protect pager mappings
*/
- {
- register int i = ptes_per_vm_page;
- do {
- *pte &= ~bits;
- } while (--i > 0);
+ if (pv->pv_pmap == pmap_kernel() &&
+/* XXX */ mask == ~(PG_KWE | PG_UWE)) {
+ if (va >= uvm.pager_sva && va < uvm.pager_eva)
+ continue;
}
- simple_unlock(&pmap->lock);
- }
- }
- pmap_phys_attributes[pai] &= ~ (bits >> 16);
+ PMAP_LOCK(pv->pv_pmap);
+
+ pte = pv->pv_pte;
+ npte = (*pte | set) & mask;
+ if (*pte != npte) {
+ hadasm = (pmap_pte_asm(pte) != 0);
+ isactive = PMAP_ISACTIVE(pv->pv_pmap, cpu_id);
+ needisync |= (isactive && (pmap_pte_exec(pte) != 0));
+ *pte = npte;
+ PMAP_INVALIDATE_TLB(pv->pv_pmap, va, hadasm, isactive,
+ cpu_id);
+#if defined(MULTIPROCESSOR) && 0
+ pmap_tlb_shootdown(pv->pv_pmap, va,
+ hadasm ? PG_ASM : 0);
+#endif
+ }
+ PMAP_UNLOCK(pv->pv_pmap);
+ }
- PMAP_WRITE_UNLOCK(spl);
+ if (needisync) {
+ alpha_pal_imb();
+#if defined(MULTIPROCESSOR) && 0
+ alpha_broadcast_ipi(ALPHA_IPI_IMB);
+#endif
+ }
}
/*
- * Check specified attribute bits.
+ * pmap_emulate_reference:
+ *
+ * Emulate reference and/or modified bit hits.
*/
-boolean_t
-phys_attribute_test(phys, bits)
- vm_offset_t phys;
- int bits;
+void
+pmap_emulate_reference(p, v, user, write)
+ struct proc *p;
+ vaddr_t v;
+ int user;
+ int write;
{
- pv_entry_t pv_h;
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
- int pai;
- register pmap_t pmap;
- spl_t spl;
+ pt_entry_t faultoff, *pte;
+ paddr_t pa;
+ struct pv_head *pvh;
+ boolean_t didlock = FALSE;
+ long cpu_id = cpu_number();
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("pmap_emulate_reference: %p, 0x%lx, %d, %d\n",
+ p, v, user, write);
+#endif
- assert(phys != vm_page_fictitious_addr);
- if (!valid_page(phys)) {
- /*
- * Not a managed page.
- */
- return (FALSE);
+ /*
+ * Convert process and virtual address to physical address.
+ */
+ if (v >= VM_MIN_KERNEL_ADDRESS) {
+ if (user)
+ panic("pmap_emulate_reference: user ref to kernel");
+ /*
+ * No need to lock here; kernel PT pages never go away.
+ */
+ pte = PMAP_KERNEL_PTE(v);
+ } else {
+#ifdef DIAGNOSTIC
+ if (p == NULL)
+ panic("pmap_emulate_reference: bad proc");
+ if (p->p_vmspace == NULL)
+ panic("pmap_emulate_reference: bad p_vmspace");
+#endif
+ PMAP_LOCK(p->p_vmspace->vm_map.pmap);
+ didlock = TRUE;
+ pte = pmap_l3pte(p->p_vmspace->vm_map.pmap, v, NULL);
+ /*
+ * We'll unlock below where we're done with the PTE.
+ */
+ }
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW) {
+ printf("\tpte = %p, ", pte);
+ printf("*pte = 0x%lx\n", *pte);
+ }
+#endif
+#ifdef DEBUG /* These checks are more expensive */
+ if (!pmap_pte_v(pte))
+ panic("pmap_emulate_reference: invalid pte");
+#if 0
+ /*
+ * Can't do these, because cpu_fork and cpu_swapin call
+ * pmap_emulate_reference(), and the bits aren't guaranteed,
+ * for them...
+ */
+ if (write) {
+ if (!(*pte & (user ? PG_UWE : PG_UWE | PG_KWE)))
+ panic("pmap_emulate_reference: write but unwritable");
+ if (!(*pte & PG_FOW))
+ panic("pmap_emulate_reference: write but not FOW");
+ } else {
+ if (!(*pte & (user ? PG_URE : PG_URE | PG_KRE)))
+ panic("pmap_emulate_reference: !write but unreadable");
+ if (!(*pte & (PG_FOR | PG_FOE)))
+ panic("pmap_emulate_reference: !write but not FOR|FOE");
}
+#endif
+ /* Other diagnostics? */
+#endif
+ pa = pmap_pte_pa(pte);
/*
- * Lock the pmap system first, since we will be checking
- * several pmaps.
+ * We're now done with the PTE. If it was a user pmap, unlock
+ * it now.
*/
+ if (didlock)
+ PMAP_UNLOCK(p->p_vmspace->vm_map.pmap);
- PMAP_WRITE_LOCK(spl);
+#ifdef DEBUG
+ if (pmapdebug & PDB_FOLLOW)
+ printf("\tpa = 0x%lx\n", pa);
+#endif
+#ifdef DIAGNOSTIC
+ if (!PAGE_IS_MANAGED(pa))
+ panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): pa 0x%lx not managed", p, v, user, write, pa);
+#endif
+
+ /*
+ * Twiddle the appropriate bits to reflect the reference
+ * and/or modification..
+ *
+ * The rules:
+ * (1) always mark page as used, and
+ * (2) if it was a write fault, mark page as modified.
+ */
+ pvh = pa_to_pvh(pa);
- pai = pa_index(phys);
- pv_h = pai_to_pvh(pai);
+ PMAP_HEAD_TO_MAP_LOCK();
+ simple_lock(&pvh->pvh_slock);
- if (pmap_phys_attributes[pai] & (bits >> 16)) {
- PMAP_WRITE_UNLOCK(spl);
- return (TRUE);
+ if (write) {
+ pvh->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
+ faultoff = PG_FOR | PG_FOW | PG_FOE;
+ } else {
+ pvh->pvh_attrs |= PGA_REFERENCED;
+ faultoff = PG_FOR | PG_FOE;
}
+ pmap_changebit(pa, 0, ~faultoff, cpu_id);
- /*
- * Walk down PV list, checking all mappings.
- * We do not have to lock the pv_list because we have
- * the entire pmap system locked.
- */
- if (pv_h->pmap != PMAP_NULL) {
- /*
- * There are some mappings.
- */
- for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+ simple_unlock(&pvh->pvh_slock);
+ PMAP_HEAD_TO_MAP_UNLOCK();
+}
- pmap = pv_e->pmap;
- /*
- * Lock the pmap to block pmap_extract and similar routines.
- */
- simple_lock(&pmap->lock);
+#ifdef DEBUG
+/*
+ * pmap_pv_dump:
+ *
+ * Dump the physical->virtual data for the specified page.
+ */
+void
+pmap_pv_dump(pa)
+ paddr_t pa;
+{
+ struct pv_head *pvh;
+ pv_entry_t pv;
+ static const char *usage[] = {
+ "normal", "pvent", "l1pt", "l2pt", "l3pt",
+ };
+
+ pvh = pa_to_pvh(pa);
+
+ simple_lock(&pvh->pvh_slock);
- {
- register vm_offset_t va;
+ printf("pa 0x%lx (attrs = 0x%x, usage = " /* ) */, pa, pvh->pvh_attrs);
+ if (pvh->pvh_usage < PGU_NORMAL || pvh->pvh_usage > PGU_L3PT)
+/* ( */ printf("??? %d):\n", pvh->pvh_usage);
+ else
+/* ( */ printf("%s):\n", usage[pvh->pvh_usage]);
- va = pv_e->va;
- pte = pmap_pte(pmap, va);
+ for (pv = LIST_FIRST(&pvh->pvh_list); pv != NULL;
+ pv = LIST_NEXT(pv, pv_list))
+ printf(" pmap %p, va 0x%lx\n",
+ pv->pv_pmap, pv->pv_va);
+ printf("\n");
-#if 0
- /*
- * Consistency checks.
- */
- assert(*pte & ALPHA_PTE_VALID);
- /* assert(pte_to_phys(*pte) == phys); */
+ simple_unlock(&pvh->pvh_slock);
+}
#endif
- }
+
+/*
+ * vtophys:
+ *
+ * Return the physical address corresponding to the K0SEG or
+ * K1SEG address provided.
+ *
+ * Note: no locking is necessary in this function.
+ */
+paddr_t
+vtophys(vaddr)
+ vaddr_t vaddr;
+{
+ pt_entry_t *pte;
+ paddr_t paddr = 0;
- /*
- * Check modify or reference bits.
- */
- {
- register int i = ptes_per_vm_page;
-
- do {
- if (*pte & bits) {
- simple_unlock(&pmap->lock);
- PMAP_WRITE_UNLOCK(spl);
- return (TRUE);
- }
- } while (--i > 0);
+ if (vaddr < ALPHA_K0SEG_BASE)
+ printf("vtophys: invalid vaddr 0x%lx", vaddr);
+ else if (vaddr <= ALPHA_K0SEG_END)
+ paddr = ALPHA_K0SEG_TO_PHYS(vaddr);
+ else {
+ pte = PMAP_KERNEL_PTE(vaddr);
+ if (pmap_pte_v(pte))
+ paddr = pmap_pte_pa(pte) | (vaddr & PGOFSET);
+ }
+
+#if 0
+ printf("vtophys(0x%lx) -> 0x%lx\n", vaddr, paddr);
+#endif
+
+ return (paddr);
+}
+
+/******************** pv_entry management ********************/
+
+/*
+ * pmap_pv_enter:
+ *
+ * Add a physical->virtual entry to the pv_table.
+ */
+int
+pmap_pv_enter(pmap, pa, va, pte, dolock)
+ pmap_t pmap;
+ paddr_t pa;
+ vaddr_t va;
+ pt_entry_t *pte;
+ boolean_t dolock;
+{
+ struct pv_head *pvh;
+ pv_entry_t newpv;
+
+ /*
+ * Allocate and fill in the new pv_entry.
+ */
+ newpv = pmap_pv_alloc();
+ if (newpv == NULL)
+ return (KERN_RESOURCE_SHORTAGE);
+ newpv->pv_va = va;
+ newpv->pv_pmap = pmap;
+ newpv->pv_pte = pte;
+
+ pvh = pa_to_pvh(pa);
+
+ if (dolock)
+ simple_lock(&pvh->pvh_slock);
+
+#ifdef DEBUG
+ {
+ pv_entry_t pv;
+ /*
+ * Make sure the entry doesn't already exist.
+ */
+ for (pv = LIST_FIRST(&pvh->pvh_list); pv != NULL;
+ pv = LIST_NEXT(pv, pv_list))
+ if (pmap == pv->pv_pmap && va == pv->pv_va) {
+ printf("pmap = %p, va = 0x%lx\n", pmap, va);
+ panic("pmap_pv_enter: already in pv table");
}
- simple_unlock(&pmap->lock);
- }
}
- PMAP_WRITE_UNLOCK(spl);
- return (FALSE);
+#endif
+
+ /*
+ * ...and put it in the list.
+ */
+ LIST_INSERT_HEAD(&pvh->pvh_list, newpv, pv_list);
+
+ if (dolock)
+ simple_unlock(&pvh->pvh_slock);
+
+ return (KERN_SUCCESS);
}
/*
- * Set specified attribute bits. <ugly>
+ * pmap_pv_remove:
+ *
+ * Remove a physical->virtual entry from the pv_table.
*/
void
-phys_attribute_set(phys, bits)
- vm_offset_t phys;
- int bits;
+pmap_pv_remove(pmap, pa, va, dolock, pvp)
+ pmap_t pmap;
+ paddr_t pa;
+ vaddr_t va;
+ boolean_t dolock;
+ struct pv_entry **pvp;
{
- int pai;
- spl_t spl;
+ struct pv_head *pvh;
+ pv_entry_t pv;
- assert(phys != vm_page_fictitious_addr);
- if (!valid_page(phys)) {
- /*
- * Not a managed page.
- */
- return;
- }
+ pvh = pa_to_pvh(pa);
+
+ if (dolock)
+ simple_lock(&pvh->pvh_slock);
/*
- * Lock the pmap system.
+ * Find the entry to remove.
*/
+ for (pv = LIST_FIRST(&pvh->pvh_list); pv != NULL;
+ pv = LIST_NEXT(pv, pv_list))
+ if (pmap == pv->pv_pmap && va == pv->pv_va)
+ break;
- PMAP_WRITE_LOCK(spl);
+#ifdef DEBUG
+ if (pv == NULL)
+ panic("pmap_pv_remove: not in pv table");
+#endif
+
+ LIST_REMOVE(pv, pv_list);
- pai = pa_index(phys);
- pmap_phys_attributes[pai] |= (bits >> 16);
+ if (dolock)
+ simple_unlock(&pvh->pvh_slock);
- PMAP_WRITE_UNLOCK(spl);
+ /*
+ * If pvp is not NULL, this is pmap_pv_alloc() stealing an
+ * entry from another mapping, and we return the now unused
+ * entry in it. Otherwise, free the pv_entry.
+ */
+ if (pvp != NULL)
+ *pvp = pv;
+ else
+ pmap_pv_free(pv);
}
/*
- * Clear the modify bits on the specified physical page.
+ * pmap_pv_alloc:
+ *
+ * Allocate a pv_entry.
*/
-
-void pmap_clear_modify(phys)
- register vm_offset_t phys;
+struct pv_entry *
+pmap_pv_alloc()
{
-if (pmap_debug) db_printf("pmap_clear_mod(%x)\n", phys);
- phys_attribute_clear(phys, ALPHA_PTE_MOD);
+ struct pv_head *pvh;
+ struct pv_entry *pv;
+ int bank, npg, pg;
+ pt_entry_t *pte;
+ pmap_t pvpmap;
+ u_long cpu_id;
+ struct prm_thief prmt;
+
+ pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
+ if (pv != NULL)
+ return (pv);
+
+ prmt.prmt_flags = PRMT_PV;
+
+ /*
+ * We were unable to allocate one from the pool. Try to
+ * steal one from another mapping. At this point we know that:
+ *
+ * (1) We have not locked the pv table, and we already have
+ * the map-to-head lock, so it is safe for us to do so here.
+ *
+ * (2) The pmap that wants this entry *is* locked. We must
+ * use simple_lock_try() to prevent deadlock from occurring.
+ *
+ * XXX Note that in case #2, there is an exception; it *is* safe to
+ * steal a mapping from the pmap that wants this entry! We may want
+ * to consider passing the pmap to this function so that we can take
+ * advantage of this.
+ */
+
+ /* XXX This search could probably be improved. */
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+ npg = vm_physmem[bank].end - vm_physmem[bank].start;
+ for (pg = 0; pg < npg; pg++) {
+ pvh = &vm_physmem[bank].pmseg.pvhead[pg];
+ simple_lock(&pvh->pvh_slock);
+ for (pv = LIST_FIRST(&pvh->pvh_list);
+ pv != NULL; pv = LIST_NEXT(pv, pv_list)) {
+ pvpmap = pv->pv_pmap;
+
+ /* Don't steal from kernel pmap. */
+ if (pvpmap == pmap_kernel())
+ continue;
+
+ /*
+ * XXX We know we're not going to try and
+ * XXX lock the kernel pmap, so we don't
+ * XXX have to block interrupts here.
+ */
+ if (simple_lock_try(&pvpmap->pm_slock) == 0)
+ continue;
+
+ pte = pv->pv_pte;
+
+ /* Don't steal wired mappings. */
+ if (pmap_pte_w(pte)) {
+ simple_unlock(&pvpmap->pm_slock);
+ continue;
+ }
+
+ cpu_id = cpu_number();
+
+ /*
+ * Okay! We have a mapping we can steal;
+ * remove it and grab the pv_entry.
+ */
+ if (pmap_remove_mapping(pvpmap, pv->pv_va,
+ pte, FALSE, cpu_id, &prmt))
+ alpha_pal_imb();
+
+ /* Unlock everything and return. */
+ simple_unlock(&pvpmap->pm_slock);
+ simple_unlock(&pvh->pvh_slock);
+ return (prmt.prmt_pv);
+ }
+ simple_unlock(&pvh->pvh_slock);
+ }
+ }
+
+ return (NULL);
}
/*
- * Set the modify bits on the specified physical page.
+ * pmap_pv_free:
+ *
+ * Free a pv_entry.
*/
-
-void pmap_set_modify(phys)
- register vm_offset_t phys;
+void
+pmap_pv_free(pv)
+ struct pv_entry *pv;
{
-if (pmap_debug) db_printf("pmap_set_mod(%x)\n", phys);
- phys_attribute_set(phys, ALPHA_PTE_MOD);
+
+ pool_put(&pmap_pv_pool, pv);
}
/*
- * pmap_is_modified:
+ * pmap_pv_page_alloc:
*
- * Return whether or not the specified physical page is modified
- * by any physical maps.
+ * Allocate a page for the pv_entry pool.
*/
-
-boolean_t pmap_is_modified(phys)
- register vm_offset_t phys;
+void *
+pmap_pv_page_alloc(size, flags, mtype)
+ u_long size;
+ int flags, mtype;
{
-if (pmap_debug) db_printf("pmap_is_mod(%x)\n", phys);
- return (phys_attribute_test(phys, ALPHA_PTE_MOD));
+ paddr_t pg;
+
+ if (pmap_physpage_alloc(PGU_PVENT, &pg))
+ return ((void *)ALPHA_PHYS_TO_K0SEG(pg));
+ return (NULL);
}
/*
- * pmap_clear_reference:
+ * pmap_pv_page_free:
*
- * Clear the reference bit on the specified physical page.
+ * Free a pv_entry pool page.
*/
-
-void pmap_clear_reference(phys)
- vm_offset_t phys;
+void
+pmap_pv_page_free(v, size, mtype)
+ void *v;
+ u_long size;
+ int mtype;
{
-if (pmap_debug) db_printf("pmap_clear_ref(%x)\n", phys);
- phys_attribute_clear(phys, ALPHA_PTE_REF);
+
+ pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t)v));
}
+/******************** misc. functions ********************/
+
/*
- * pmap_is_referenced:
+ * pmap_physpage_alloc:
*
- * Return whether or not the specified physical page is referenced
- * by any physical maps.
+ * Allocate a single page from the VM system and return the
+ * physical address for that page.
*/
-
-boolean_t pmap_is_referenced(phys)
- vm_offset_t phys;
+boolean_t
+pmap_physpage_alloc(usage, pap)
+ int usage;
+ paddr_t *pap;
{
-if (pmap_debug) db_printf("pmap_is_ref(%x)\n", phys);
- return (phys_attribute_test(phys, ALPHA_PTE_REF));
+ struct vm_page *pg;
+ struct pv_head *pvh;
+ paddr_t pa;
+
+ pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
+ if (pg != NULL) {
+ uvm_pagezero(pg);
+ pa = VM_PAGE_TO_PHYS(pg);
+
+ pvh = pa_to_pvh(pa);
+ simple_lock(&pvh->pvh_slock);
+#ifdef DIAGNOSTIC
+ if (pvh->pvh_usage != PGU_NORMAL) {
+ printf("pmap_physpage_alloc: page 0x%lx is "
+ "in use (%s)\n", pa,
+ pmap_pgu_strings[pvh->pvh_usage]);
+ panic("pmap_physpage_alloc");
+ }
+ if (pvh->pvh_refcnt != 0) {
+ printf("pmap_physpage_alloc: page 0x%lx has "
+ "%d references\n", pa, pvh->pvh_refcnt);
+ panic("pmap_physpage_alloc");
+ }
+#endif
+ pvh->pvh_usage = usage;
+ simple_unlock(&pvh->pvh_slock);
+ *pap = pa;
+ return (TRUE);
+ }
+ return (FALSE);
}
-#if NCPUS > 1
-/*
-* TLB Coherence Code (TLB "shootdown" code)
-*
-* Threads that belong to the same task share the same address space and
-* hence share a pmap. However, they may run on distinct cpus and thus
-* have distinct TLBs that cache page table entries. In order to guarantee
-* the TLBs are consistent, whenever a pmap is changed, all threads that
-* are active in that pmap must have their TLB updated. To keep track of
-* this information, the set of cpus that are currently using a pmap is
-* maintained within each pmap structure (cpus_using). Pmap_activate() and
-* pmap_deactivate add and remove, respectively, a cpu from this set.
-* Since the TLBs are not addressable over the bus, each processor must
-* flush its own TLB; a processor that needs to invalidate another TLB
-* needs to interrupt the processor that owns that TLB to signal the
-* update.
-*
-* Whenever a pmap is updated, the lock on that pmap is locked, and all
-* cpus using the pmap are signaled to invalidate. All threads that need
-* to activate a pmap must wait for the lock to clear to await any updates
-* in progress before using the pmap. They must ACQUIRE the lock to add
-* their cpu to the cpus_using set. An implicit assumption made
-* throughout the TLB code is that all kernel code that runs at or higher
-* than splvm blocks out update interrupts, and that such code does not
-* touch pageable pages.
-*
-* A shootdown interrupt serves another function besides signaling a
-* processor to invalidate. The interrupt routine (pmap_update_interrupt)
-* waits for the both the pmap lock (and the kernel pmap lock) to clear,
-* preventing user code from making implicit pmap updates while the
-* sending processor is performing its update. (This could happen via a
-* user data write reference that turns on the modify bit in the page
-* table). It must wait for any kernel updates that may have started
-* concurrently with a user pmap update because the IPC code
-* changes mappings.
-* Spinning on the VALUES of the locks is sufficient (rather than
-* having to acquire the locks) because any updates that occur subsequent
-* to finding the lock unlocked will be signaled via another interrupt.
-* (This assumes the interrupt is cleared before the low level interrupt code
-* calls pmap_update_interrupt()).
-*
-* The signaling processor must wait for any implicit updates in progress
-* to terminate before continuing with its update. Thus it must wait for an
-* acknowledgement of the interrupt from each processor for which such
-* references could be made. For maintaining this information, a set
-* cpus_active is used. A cpu is in this set if and only if it can
-* use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from
-* this set; when all such cpus are removed, it is safe to update.
-*
-* Before attempting to acquire the update lock on a pmap, a cpu (A) must
-* be at least at the priority of the interprocessor interrupt
-* (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a
-* kernel update; it would spin forever in pmap_update_interrupt() trying
-* to acquire the user pmap lock it had already acquired. Furthermore A
-* must remove itself from cpus_active. Otherwise, another cpu holding
-* the lock (B) could be in the process of sending an update signal to A,
-* and thus be waiting for A to remove itself from cpus_active. If A is
-* spinning on the lock at priority this will never happen and a deadlock
-* will result.
-*/
-
-/*
- * Signal another CPU that it must flush its TLB
- */
-void signal_cpus(use_list, pmap, start, end)
- cpu_set use_list;
- pmap_t pmap;
- vm_offset_t start, end;
+/*
+ * pmap_physpage_free:
+ *
+ * Free the single page table page at the specified physical address.
+ */
+void
+pmap_physpage_free(pa)
+ paddr_t pa;
{
- register int which_cpu, j;
- register pmap_update_list_t update_list_p;
+ struct pv_head *pvh;
+ struct vm_page *pg;
- while ((which_cpu = ffs(use_list)) != 0) {
- which_cpu -= 1; /* convert to 0 origin */
+ if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
+ panic("pmap_physpage_free: bogus physical page address");
- update_list_p = &cpu_update_list[which_cpu];
- simple_lock(&update_list_p->lock);
+ pvh = pa_to_pvh(pa);
- j = update_list_p->count;
- if (j >= UPDATE_LIST_SIZE) {
- /*
- * list overflowed. Change last item to
- * indicate overflow.
- */
- update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap;
- update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_ADDRESS;
- update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS;
- }
- else {
- update_list_p->item[j].pmap = pmap;
- update_list_p->item[j].start = start;
- update_list_p->item[j].end = end;
- update_list_p->count = j+1;
- }
- cpu_update_needed[which_cpu] = TRUE;
- simple_unlock(&update_list_p->lock);
-
- if ((cpus_idle & (1 << which_cpu)) == 0)
- interrupt_processor(which_cpu);
- use_list &= ~(1 << which_cpu);
- }
+ simple_lock(&pvh->pvh_slock);
+#ifdef DIAGNOSTIC
+ if (pvh->pvh_usage == PGU_NORMAL)
+ panic("pmap_physpage_free: not in use?!");
+ if (pvh->pvh_refcnt != 0)
+ panic("pmap_physpage_free: page still has references");
+#endif
+ pvh->pvh_usage = PGU_NORMAL;
+ simple_unlock(&pvh->pvh_slock);
+
+ uvm_pagefree(pg);
}
-void process_pmap_updates(my_pmap)
- register pmap_t my_pmap;
+/*
+ * pmap_physpage_addref:
+ *
+ * Add a reference to the specified special use page.
+ */
+int
+pmap_physpage_addref(kva)
+ void *kva;
{
- register int my_cpu = cpu_number();
- register pmap_update_list_t update_list_p;
- register int j;
- register pmap_t pmap;
-
- update_list_p = &cpu_update_list[my_cpu];
- simple_lock(&update_list_p->lock);
-
- for (j = 0; j < update_list_p->count; j++) {
- pmap = update_list_p->item[j].pmap;
- if (pmap == my_pmap ||
- pmap == kernel_pmap) {
-
- INVALIDATE_TLB(update_list_p->item[j].start,
- update_list_p->item[j].end);
- }
- }
- update_list_p->count = 0;
- cpu_update_needed[my_cpu] = FALSE;
- simple_unlock(&update_list_p->lock);
-}
+ struct pv_head *pvh;
+ paddr_t pa;
+ int rval;
-#if MACH_KDB
+ pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva));
+ pvh = pa_to_pvh(pa);
-static boolean_t db_interp_int[NCPUS];
-int db_inside_pmap_update[NCPUS];
-int suicide_cpu;
+ simple_lock(&pvh->pvh_slock);
+#ifdef DIAGNOSTIC
+ if (pvh->pvh_usage == PGU_NORMAL)
+ panic("pmap_physpage_addref: not a special use page");
+#endif
-cpu_interrupt_to_db(i)
- int i;
-{
- db_interp_int[i] = TRUE;
- interrupt_processor(i);
+ rval = ++pvh->pvh_refcnt;
+ simple_unlock(&pvh->pvh_slock);
+
+ return (rval);
}
-#endif
/*
- * Interrupt routine for TBIA requested from other processor.
+ * pmap_physpage_delref:
+ *
+ * Delete a reference to the specified special use page.
*/
-void pmap_update_interrupt()
+int
+pmap_physpage_delref(kva)
+ void *kva;
{
- register int my_cpu;
- register pmap_t my_pmap;
- spl_t s;
+ struct pv_head *pvh;
+ paddr_t pa;
+ int rval;
- my_cpu = cpu_number();
+ pa = ALPHA_K0SEG_TO_PHYS(trunc_page((vaddr_t)kva));
+ pvh = pa_to_pvh(pa);
- db_inside_pmap_update[my_cpu]++;
-#if MACH_KDB
- if (db_interp_int[my_cpu]) {
- db_interp_int[my_cpu] = FALSE;
- remote_db_enter();
- /* In case another processor modified text */
- alphacache_Iflush();
-if (cpu_number() == suicide_cpu) halt();
- goto out; /* uhmmm, maybe should do updates just in case */
- }
+ simple_lock(&pvh->pvh_slock);
+#ifdef DIAGNOSTIC
+ if (pvh->pvh_usage == PGU_NORMAL)
+ panic("pmap_physpage_delref: not a special use page");
#endif
- /*
- * Exit now if we're idle. We'll pick up the update request
- * when we go active, and we must not put ourselves back in
- * the active set because we'll never process the interrupt
- * while we're idle (thus hanging the system).
- */
- if (cpus_idle & (1 << my_cpu))
- goto out;
- if (current_thread() == THREAD_NULL)
- my_pmap = kernel_pmap;
- else {
- my_pmap = current_pmap();
- if (!pmap_in_use(my_pmap, my_cpu))
- my_pmap = kernel_pmap;
- }
+ rval = --pvh->pvh_refcnt;
+#ifdef DIAGNOSTIC
/*
- * Raise spl to splvm (above splip) to block out pmap_extract
- * from IO code (which would put this cpu back in the active
- * set).
+ * Make sure we never have a negative reference count.
*/
- s = splvm();
-
- do {
+ if (pvh->pvh_refcnt < 0)
+ panic("pmap_physpage_delref: negative reference count");
+#endif
+ simple_unlock(&pvh->pvh_slock);
- /*
- * Indicate that we're not using either user or kernel
- * pmap.
- */
- i_bit_clear(my_cpu, &cpus_active);
-
- /*
- * Wait for any pmap updates in progress, on either user
- * or kernel pmap.
- */
- while (*(volatile int *)&my_pmap->lock.lock_data ||
- *(volatile int *)&kernel_pmap->lock.lock_data)
- continue;
+ return (rval);
+}
- process_pmap_updates(my_pmap);
+/******************** page table page management ********************/
- i_bit_set(my_cpu, &cpus_active);
+/*
+ * pmap_growkernel: [ INTERFACE ]
+ *
+ * Grow the kernel address space. This is a hint from the
+ * upper layer to pre-allocate more kernel PT pages.
+ *
+ * XXX Implement XXX
+ */
- } while (cpu_update_needed[my_cpu]);
-
- splx(s);
-out:
- db_inside_pmap_update[my_cpu]--;
-}
-#else NCPUS > 1
/*
- * Dummy routine to satisfy external reference.
+ * pmap_lev1map_create:
+ *
+ * Create a new level 1 page table for the specified pmap.
+ *
+ * Note: the pmap must already be locked.
*/
-void pmap_update_interrupt()
+int
+pmap_lev1map_create(pmap, cpu_id)
+ pmap_t pmap;
+ long cpu_id;
{
- /* should never be called. */
+ paddr_t ptpa;
+ pt_entry_t pte;
+ int i;
+
+#ifdef DIAGNOSTIC
+ if (pmap == pmap_kernel())
+ panic("pmap_lev1map_create: got kernel pmap");
+
+ if (pmap->pm_asn[cpu_id] != PMAP_ASN_RESERVED)
+ panic("pmap_lev1map_create: pmap uses non-reserved ASN");
+#endif
+
+ /*
+ * Allocate a page for the level 1 table.
+ */
+ if (pmap_physpage_alloc(PGU_L1PT, &ptpa) == FALSE) {
+ /*
+ * Yow! No free pages! Try to steal a PT page from
+ * another pmap!
+ */
+ if (pmap_ptpage_steal(pmap, PGU_L1PT, &ptpa) == FALSE)
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ pmap->pm_lev1map = (pt_entry_t *) ALPHA_PHYS_TO_K0SEG(ptpa);
+
+ /*
+ * Initialize the new level 1 table by copying the
+ * kernel mappings into it.
+ */
+ for (i = l1pte_index(VM_MIN_KERNEL_ADDRESS);
+ i <= l1pte_index(VM_MAX_KERNEL_ADDRESS); i++)
+ pmap->pm_lev1map[i] = kernel_lev1map[i];
+
+ /*
+ * Now, map the new virtual page table. NOTE: NO ASM!
+ */
+ pte = ((ptpa >> PGSHIFT) << PG_SHIFT) | PG_V | PG_KRE | PG_KWE;
+ pmap->pm_lev1map[l1pte_index(VPTBASE)] = pte;
+
+ /*
+ * The page table base has changed; if the pmap was active,
+ * reactivate it.
+ */
+ if (PMAP_ISACTIVE(pmap, cpu_id)) {
+ pmap_asn_alloc(pmap, cpu_id);
+ PMAP_ACTIVATE(pmap, curproc, cpu_id);
+ }
+ return (KERN_SUCCESS);
}
-#endif /* NCPUS > 1 */
+/*
+ * pmap_lev1map_destroy:
+ *
+ * Destroy the level 1 page table for the specified pmap.
+ *
+ * Note: the pmap must already be locked.
+ */
void
-set_ptbr(pmap_t map, pcb_t pcb, boolean_t switchit)
+pmap_lev1map_destroy(pmap, cpu_id)
+ pmap_t pmap;
+ long cpu_id;
{
- /* optimize later */
- vm_offset_t pa;
+ paddr_t ptpa;
- pa = pmap_resident_extract(kernel_pmap, (vm_offset_t)map->dirbase);
-printf("set_ptbr (switch = %d): dirbase = 0x%lx, pa = 0x%lx\n", switchit, map->dirbase, pa);
- if (pa == 0)
- panic("set_ptbr");
-#if 0
- pcb->mss.hw_pcb.ptbr = alpha_btop(pa);
- if (switchit) {
- pcb->mss.hw_pcb.asn = map->pid;
- swpctxt(kvtophys((vm_offset_t) pcb), &(pcb)->mss.hw_pcb.ksp);
- }
-#else
- pcb->pcb_hw.apcb_ptbr = alpha_btop(pa);
- if (switchit) {
- pcb->pcb_hw.apcb_asn = map->pid;
- swpctxt(kvtophys((vm_offset_t) pcb), &(pcb)->pcb_hw.apcb_ksp);
- }
+#ifdef DIAGNOSTIC
+ if (pmap == pmap_kernel())
+ panic("pmap_lev1map_destroy: got kernel pmap");
#endif
+
+ ptpa = ALPHA_K0SEG_TO_PHYS((vaddr_t)pmap->pm_lev1map);
+
+ /*
+ * Go back to referencing the global kernel_lev1map.
+ */
+ pmap->pm_lev1map = kernel_lev1map;
+
+ /*
+ * The page table base has changed; if the pmap was active,
+ * reactivate it. Note that allocation of a new ASN is
+ * not necessary here:
+ *
+ * (1) We've gotten here because we've deleted all
+ * user mappings in the pmap, invalidating the
+ * TLB entries for them as we go.
+ *
+ * (2) kernel_lev1map contains only kernel mappings, which
+ * were identical in the user pmap, and all of
+ * those mappings have PG_ASM, so the ASN doesn't
+ * matter.
+ *
+ * We do, however, ensure that the pmap is using the
+ * reserved ASN, to ensure that no two pmaps never have
+ * clashing TLB entries.
+ */
+ PMAP_INVALIDATE_ASN(pmap, cpu_id);
+ if (PMAP_ISACTIVE(pmap, cpu_id))
+ PMAP_ACTIVATE(pmap, curproc, cpu_id);
+
+ /*
+ * Free the old level 1 page table page.
+ */
+ pmap_physpage_free(ptpa);
}
-/***************************************************************************
+/*
+ * pmap_ptpage_alloc:
*
- * TLBPID Management
+ * Allocate a level 2 or level 3 page table page, and
+ * initialize the PTE that references it.
*
- * This is basically a unique number generator, with the twist
- * that numbers are in a given range (dynamically defined).
- * All things considered, I did it right in the MIPS case.
+ * Note: the pmap must already be locked.
*/
+int
+pmap_ptpage_alloc(pmap, pte, usage)
+ pmap_t pmap;
+ pt_entry_t *pte;
+ int usage;
+{
+ paddr_t ptpa;
-#if 0
-/* above */
-int pmap_max_asn;
-#endif
+ /*
+ * Allocate the page table page.
+ */
+ if (pmap_physpage_alloc(usage, &ptpa) == FALSE) {
+ /*
+ * Yow! No free pages! Try to steal a PT page from
+ * another pmap!
+ */
+ if (pmap_ptpage_steal(pmap, usage, &ptpa) == FALSE)
+ return (KERN_RESOURCE_SHORTAGE);
+ }
-decl_simple_lock_data(static, tlbpid_lock)
-static struct pmap **pids_in_use;
-static int pmap_next_pid;
+ /*
+ * Initialize the referencing PTE.
+ */
+ *pte = ((ptpa >> PGSHIFT) << PG_SHIFT) | \
+ PG_V | PG_KRE | PG_KWE | PG_WIRED |
+ (pmap == pmap_kernel() ? PG_ASM : 0);
+
+ return (KERN_SUCCESS);
+}
-pmap_tlbpid_init(maxasn)
- int maxasn;
+/*
+ * pmap_ptpage_free:
+ *
+ * Free the level 2 or level 3 page table page referenced
+ * be the provided PTE.
+ *
+ * Note: the pmap must already be locked.
+ */
+void
+pmap_ptpage_free(pmap, pte, ptp)
+ pmap_t pmap;
+ pt_entry_t *pte;
+ pt_entry_t **ptp;
{
- simple_lock_init(&tlbpid_lock);
+ paddr_t ptpa;
- if (DOVPDB(PDB_FOLLOW|PDB_TLBPID_INIT))
- printf("pmap_tlbpid_init: maxasn = %d\n", maxasn);
+ /*
+ * Extract the physical address of the page from the PTE
+ * and clear the entry.
+ */
+ ptpa = pmap_pte_pa(pte);
+ *pte = PG_NV;
- pmap_max_asn = maxasn;
- if (maxasn == 0) {
- /* ASNs not implemented... Is this the right way to check? */
- return;
+ /*
+ * Check to see if we're stealing the PT page. If we are,
+ * zero it, and return the KSEG address of the page.
+ */
+ if (ptp != NULL) {
+ pmap_zero_page(ptpa);
+ *ptp = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(ptpa);
+ } else {
+#ifdef DEBUG
+ pmap_zero_page(ptpa);
+#endif
+ pmap_physpage_free(ptpa);
}
-
- pids_in_use = (struct pmap **)
- pmap_bootstrap_alloc((maxasn + 1) * sizeof(struct pmap *));
- bzero(pids_in_use, (maxasn + 1) * sizeof(struct pmap *));
-
- pmap_next_pid = 1;
}
/*
- * Axioms:
- * - pmap_next_pid always points to a free one, unless the table is full;
- * in that case it points to a likely candidate for recycling.
- * - pmap.pid prevents from making duplicates: if -1 there is no
- * pid for it, otherwise there is one and only one entry at that index.
+ * pmap_ptpage_steal:
*
- * pmap_tlbpid_assign provides a tlbpid for the given pmap, creating
- * a new one if necessary
- * pmap_tlbpid_destroy returns a tlbpid to the pool of available ones
+ * Steal a PT page from a pmap.
*/
-
-pmap_tlbpid_assign(map)
- struct pmap *map;
+boolean_t
+pmap_ptpage_steal(pmap, usage, pap)
+ pmap_t pmap;
+ int usage;
+ paddr_t *pap;
{
- register int pid, next_pid;
-
- if (DOVPDB(PDB_FOLLOW|PDB_TLBPID_ASSIGN))
- printf("pmap_tlbpid_assign: pmap %p had %d\n", map, map->pid);
+ struct pv_head *pvh;
+ pmap_t spmap;
+ int l1idx, l2idx, l3idx;
+ pt_entry_t *lev2map, *lev3map;
+ vaddr_t va;
+ paddr_t pa;
+ struct prm_thief prmt;
+ u_long cpu_id = cpu_number();
+ boolean_t needisync = FALSE;
+
+ prmt.prmt_flags = PRMT_PTP;
+ prmt.prmt_ptp = NULL;
- if (pmap_max_asn && map->pid < 0) {
+ /*
+ * We look for pmaps which do not reference kernel_lev1map (which
+ * would indicate that they are either the kernel pmap, or a user
+ * pmap with no valid mappings). Since the list of all pmaps is
+ * maintained in an LRU fashion, we should get a pmap that is
+ * `more inactive' than our current pmap (although this may not
+ * always be the case).
+ *
+ * We start looking for valid L1 PTEs at the lowest address,
+ * go to that L2, look for the first valid L2 PTE, and steal
+ * that L3 PT page.
+ */
+ simple_lock(&pmap_all_pmaps_slock);
+ for (spmap = TAILQ_FIRST(&pmap_all_pmaps);
+ spmap != NULL; spmap = TAILQ_NEXT(spmap, pm_list)) {
+ /*
+ * Skip the kernel pmap and ourselves.
+ */
+ if (spmap == pmap_kernel() || spmap == pmap)
+ continue;
- simple_lock(&tlbpid_lock);
+ PMAP_LOCK(spmap);
+ if (spmap->pm_lev1map == kernel_lev1map) {
+ PMAP_UNLOCK(spmap);
+ continue;
+ }
- next_pid = pmap_next_pid;
- if (pids_in_use[next_pid]) {
- /* are we _really_ sure it's full ? */
- for (pid = 1; pid < pmap_max_asn; pid++)
- if (pids_in_use[pid] == PMAP_NULL) {
- /* aha! */
- next_pid = pid;
- goto got_a_free_one;
+ /*
+ * Have a candidate pmap. Loop through the PT pages looking
+ * for one we can steal.
+ */
+ for (l1idx = 0; l1idx < NPTEPG; l1idx++) {
+ if (pmap_pte_v(&spmap->pm_lev1map[l1idx]) == 0)
+ continue;
+
+ lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(
+ pmap_pte_pa(&spmap->pm_lev1map[l1idx]));
+ for (l2idx = 0; l2idx < NPTEPG; l2idx++) {
+ if (pmap_pte_v(&lev2map[l2idx]) == 0)
+ continue;
+ lev3map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(
+ pmap_pte_pa(&lev2map[l2idx]));
+ for (l3idx = 0; l3idx < NPTEPG; l3idx++) {
+ /*
+ * If the entry is valid and wired,
+ * we cannot steal this page.
+ */
+ if (pmap_pte_v(&lev3map[l3idx]) &&
+ pmap_pte_w(&lev3map[l3idx]))
+ break;
}
- /* Table full */
- while (pids_in_use[next_pid]->cpus_using) {
- if (++next_pid == pmap_max_asn)
- next_pid = 1;
+
+ /*
+ * If we scanned all of the current L3 table
+ * without finding a wired entry, we can
+ * steal this page!
+ */
+ if (l3idx == NPTEPG)
+ goto found_one;
}
- pmap_tlbpid_destroy(next_pid, TRUE);
}
-got_a_free_one:
- pids_in_use[next_pid] = map;
- map->pid = next_pid;
- if (++next_pid == pmap_max_asn)
- next_pid = 1;
- pmap_next_pid = next_pid;
- simple_unlock(&tlbpid_lock);
- }
- if (DOVPDB(PDB_FOLLOW|PDB_TLBPID_ASSIGN))
- printf("pmap_tlbpid_assign: pmap %p got %d\n", map, map->pid);
-}
+ /*
+ * Didn't find something we could steal in this
+ * pmap, try the next one.
+ */
+ PMAP_UNLOCK(spmap);
+ continue;
-pmap_tlbpid_destroy(pid, locked)
- int pid;
- boolean_t locked;
-{
- struct pmap *map;
+ found_one:
+ /* ...don't need this anymore. */
+ simple_unlock(&pmap_all_pmaps_slock);
- if (DOVPDB(PDB_FOLLOW|PDB_TLBPID_DESTROY))
- printf("pmap_tlbpid_destroy(%d, %d)\n", pid, locked);
+ /*
+ * Okay! We have a PT page we can steal. l1idx and
+ * l2idx indicate which L1 PTP and L2 PTP we should
+ * use to compute the virtual addresses the L3 PTP
+ * maps. Loop through all the L3 PTEs in this range
+ * and nuke the mappings for them. When we're through,
+ * we'll have a PT page pointed to by prmt.prmt_ptp!
+ */
+ for (l3idx = 0,
+ va = (l1idx * ALPHA_L1SEG_SIZE) +
+ (l2idx * ALPHA_L2SEG_SIZE);
+ l3idx < NPTEPG && prmt.prmt_ptp == NULL;
+ l3idx++, va += PAGE_SIZE) {
+ if (pmap_pte_v(&lev3map[l3idx])) {
+ needisync |= pmap_remove_mapping(spmap, va,
+ &lev3map[l3idx], TRUE, cpu_id, &prmt);
+ }
+ }
- if (pid < 0) /* no longer in use */
- return;
+ PMAP_UNLOCK(spmap);
- assert(pmap_max_asn);
+ if (needisync) {
+ alpha_pal_imb();
+#if defined(MULTIPROCESSOR) && 0
+ alpha_broadcast_ipi(ALPHA_IPI_IMB);
+#endif
+ }
- if (!locked) simple_lock(&tlbpid_lock);
+#ifdef DIAGNOSTIC
+ if (prmt.prmt_ptp == NULL)
+ panic("pmap_ptptage_steal: failed");
+ if (prmt.prmt_ptp != lev3map)
+ panic("pmap_ptpage_steal: inconsistent");
+#endif
+ pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)prmt.prmt_ptp);
- /*
- * Make the pid available, and the map unassigned.
- */
- map = pids_in_use[pid];
- assert(map != NULL);
- pids_in_use[pid] = PMAP_NULL;
- map->pid = -1;
+ /*
+ * Don't bother locking here; the assignment is atomic.
+ */
+ pvh = pa_to_pvh(pa);
+ pvh->pvh_usage = usage;
- if (!locked) simple_unlock(&tlbpid_lock);
+ *pap = pa;
+ return (TRUE);
+ }
+ simple_unlock(&pmap_all_pmaps_slock);
+ return (FALSE);
}
-#if 1 /* DEBUG */
-
-print_pv_list()
+/*
+ * pmap_l3pt_delref:
+ *
+ * Delete a reference on a level 3 PT page. If the reference drops
+ * to zero, free it.
+ *
+ * Note: the pmap must already be locked.
+ */
+void
+pmap_l3pt_delref(pmap, va, l3pte, cpu_id, ptp)
+ pmap_t pmap;
+ vaddr_t va;
+ pt_entry_t *l3pte;
+ long cpu_id;
+ pt_entry_t **ptp;
{
- pv_entry_t p;
- vm_offset_t phys;
-
- db_printf("phys pages %x < p < %x\n", vm_first_phys, vm_last_phys);
- for (phys = vm_first_phys; phys < vm_last_phys; phys += PAGE_SIZE) {
- p = pai_to_pvh(pa_index(phys));
- if (p->pmap != PMAP_NULL) {
- db_printf("%x: %x %x\n", phys, p->pmap, p->va);
- while (p = p->next)
- db_printf("\t\t%x %x\n", p->pmap, p->va);
- }
- }
-}
+ pt_entry_t *l1pte, *l2pte;
+ l1pte = pmap_l1pte(pmap, va);
+ l2pte = pmap_l2pte(pmap, va, l1pte);
+
+#ifdef DIAGNOSTIC
+ if (pmap == pmap_kernel())
+ panic("pmap_l3pt_delref: kernel pmap");
#endif
-vm_offset_t
-pmap_phys_address(ppn)
- int ppn;
-{
- return(alpha_ptob(ppn));
-}
+ if (pmap_physpage_delref(l3pte) == 0) {
+ /*
+ * No more mappings; we can free the level 3 table.
+ */
+#ifdef DEBUG
+ if (pmapdebug & PDB_PTPAGE)
+ printf("pmap_l3pt_delref: freeing level 3 table at "
+ "0x%lx\n", pmap_pte_pa(l2pte));
+#endif
+ pmap_ptpage_free(pmap, l2pte, ptp);
+ pmap->pm_nlev3--;
-void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
- pmap_t dst_pmap;
- pmap_t src_pmap;
- vm_offset_t dst_addr;
- vm_size_t len;
- vm_offset_t src_addr;
-{
-}
+ /*
+ * We've freed a level 3 table, so we must
+ * invalidate the TLB entry for that PT page
+ * in the Virtual Page Table VA range, because
+ * otherwise the PALcode will service a TLB
+ * miss using the stale VPT TLB entry it entered
+ * behind our back to shortcut to the VA's PTE.
+ */
+ PMAP_INVALIDATE_TLB(pmap,
+ (vaddr_t)(&VPT[VPT_INDEX(va)]), FALSE,
+ PMAP_ISACTIVE(pmap, cpu_id), cpu_id);
+#if defined(MULTIPROCESSOR) && 0
+ pmap_tlb_shootdown(pmap,
+ (vaddr_t)(&VPT[VPT_INDEX(va)]), 0);
+#endif
-void pmap_update()
-{
+ /*
+ * We've freed a level 3 table, so delete the reference
+ * on the level 2 table.
+ */
+ pmap_l2pt_delref(pmap, l1pte, l2pte, cpu_id);
+ }
}
-vm_page_t
-vm_page_grab()
+/*
+ * pmap_l2pt_delref:
+ *
+ * Delete a reference on a level 2 PT page. If the reference drops
+ * to zero, free it.
+ *
+ * Note: the pmap must already be locked.
+ */
+void
+pmap_l2pt_delref(pmap, l1pte, l2pte, cpu_id)
+ pmap_t pmap;
+ pt_entry_t *l1pte, *l2pte;
+ long cpu_id;
{
- register vm_page_t mem;
- int spl;
-
- spl = splimp(); /* XXX */
- simple_lock(&vm_page_queue_free_lock);
- if (vm_page_queue_free.tqh_first == NULL) {
- simple_unlock(&vm_page_queue_free_lock);
- splx(spl);
- return (NULL);
- }
-
- mem = vm_page_queue_free.tqh_first;
- TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
-
- cnt.v_free_count--;
- simple_unlock(&vm_page_queue_free_lock);
- splx(spl);
-
- mem->flags = PG_BUSY | PG_CLEAN | PG_FAKE;
- mem->wire_count = 0;
-
- /*
- * Decide if we should poke the pageout daemon.
- * We do this if the free count is less than the low
- * water mark, or if the free count is less than the high
- * water mark (but above the low water mark) and the inactive
- * count is less than its target.
- *
- * We don't have the counts locked ... if they change a little,
- * it doesn't really matter.
- */
-
- if (cnt.v_free_count < cnt.v_free_min ||
- (cnt.v_free_count < cnt.v_free_target &&
- cnt.v_inactive_count < cnt.v_inactive_target))
- thread_wakeup((void *)&vm_pages_needed);
- return (mem);
-}
-int
-vm_page_wait()
-{
+#ifdef DIAGNOSTIC
+ if (pmap == pmap_kernel())
+ panic("pmap_l2pt_delref: kernel pmap");
+#endif
+
+ if (pmap_physpage_delref(l2pte) == 0) {
+ /*
+ * No more mappings in this segment; we can free the
+ * level 2 table.
+ */
+#ifdef DEBUG
+ if (pmapdebug & PDB_PTPAGE)
+ printf("pmap_l2pt_delref: freeing level 2 table at "
+ "0x%lx\n", pmap_pte_pa(l1pte));
+#endif
+ pmap_ptpage_free(pmap, l1pte, NULL);
+ pmap->pm_nlev2--;
- assert_wait(&cnt.v_free_count, 0);
- thread_block();
+ /*
+ * We've freed a level 2 table, so delete the reference
+ * on the level 1 table.
+ */
+ pmap_l1pt_delref(pmap, l1pte, cpu_id);
+ }
}
/*
- * Emulate reference and/or modified bit hits.
+ * pmap_l1pt_delref:
+ *
+ * Delete a reference on a level 1 PT page. If the reference drops
+ * to zero, free it.
+ *
+ * Note: the pmap must already be locked.
*/
void
-pmap_emulate_reference(p, v, user, write)
- struct proc *p;
- vm_offset_t v;
- int user;
- int write;
+pmap_l1pt_delref(pmap, l1pte, cpu_id)
+ pmap_t pmap;
+ pt_entry_t *l1pte;
+ long cpu_id;
{
- /* XXX */
+
+#ifdef DIAGNOSTIC
+ if (pmap == pmap_kernel())
+ panic("pmap_l1pt_delref: kernel pmap");
+#endif
+
+ if (pmap_physpage_delref(l1pte) == 0) {
+ /*
+ * No more level 2 tables left, go back to the global
+ * kernel_lev1map.
+ */
+ pmap_lev1map_destroy(pmap, cpu_id);
+ }
}
-struct pv_page;
+/******************** Address Space Number management ********************/
-struct pv_page_info {
- TAILQ_ENTRY(pv_page) pgi_list;
- struct pv_entry *pgi_freelist;
- int pgi_nfree;
-};
+/*
+ * pmap_asn_alloc:
+ *
+ * Allocate and assign an ASN to the specified pmap.
+ *
+ * Note: the pmap must already be locked.
+ */
+void
+pmap_asn_alloc(pmap, cpu_id)
+ pmap_t pmap;
+ long cpu_id;
+{
-#define NPVPPG ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
+#ifdef DEBUG
+ if (pmapdebug & (PDB_FOLLOW|PDB_ASN))
+ printf("pmap_asn_alloc(%p)\n", pmap);
+#endif
-struct pv_page {
- struct pv_page_info pvp_pgi;
- struct pv_entry pvp_pv[NPVPPG];
-};
+ /*
+ * If the pmap is still using the global kernel_lev1map, there
+ * is no need to assign an ASN at this time, because only
+ * kernel mappings exist in that map, and all kernel mappings
+ * have PG_ASM set. If the pmap eventually gets its own
+ * lev1map, an ASN will be allocated at that time.
+ */
+ if (pmap->pm_lev1map == kernel_lev1map) {
+#ifdef DEBUG
+ if (pmapdebug & PDB_ASN)
+ printf("pmap_asn_alloc: still references "
+ "kernel_lev1map\n");
+#endif
+#ifdef DIAGNOSTIC
+ if (pmap->pm_asn[cpu_id] != PMAP_ASN_RESERVED)
+ panic("pmap_asn_alloc: kernel_lev1map without "
+ "PMAP_ASN_RESERVED");
+#endif
+ return;
+ }
+
+ /*
+ * On processors which do not implement ASNs, the swpctx PALcode
+ * operation will automatically invalidate the TLB and I-cache,
+ * so we don't need to do that here.
+ */
+ if (pmap_max_asn == 0) {
+ /*
+ * Refresh the pmap's generation number, to
+ * simplify logic elsewhere.
+ */
+ pmap->pm_asngen[cpu_id] = pmap_asn_generation[cpu_id];
+#ifdef DEBUG
+ if (pmapdebug & PDB_ASN)
+ printf("pmap_asn_alloc: no ASNs, using asngen %lu\n",
+ pmap->pm_asngen[cpu_id]);
+#endif
+ return;
+ }
-TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
-int pv_nfree;
+ /*
+ * Hopefully, we can continue using the one we have...
+ */
+ if (pmap->pm_asn[cpu_id] != PMAP_ASN_RESERVED &&
+ pmap->pm_asngen[cpu_id] == pmap_asn_generation[cpu_id]) {
+ /*
+ * ASN is still in the current generation; keep on using it.
+ */
+#ifdef DEBUG
+ if (pmapdebug & PDB_ASN)
+ printf("pmap_asn_alloc: same generation, keeping %u\n",
+ pmap->pm_asn[cpu_id]);
+#endif
+ return;
+ }
-#define pv_next next
+ /*
+ * Need to assign a new ASN. Grab the next one, incrementing
+ * the generation number if we have to.
+ */
+ if (pmap_next_asn[cpu_id] > pmap_max_asn) {
+ /*
+ * Invalidate all non-PG_ASM TLB entries and the
+ * I-cache, and bump the generation number.
+ */
+ ALPHA_TBIAP();
+ alpha_pal_imb();
-struct pv_entry *
-pmap_alloc_pv()
-{
- struct pv_page *pvp;
- struct pv_entry *pv;
- int i;
+ pmap_next_asn[cpu_id] = 1;
- if (pv_nfree == 0) {
- pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
- if (pvp == 0)
- panic("pmap_alloc_pv: kmem_alloc() failed");
- pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
- for (i = NPVPPG - 2; i; i--, pv++)
- pv->pv_next = pv + 1;
- pv->pv_next = 0;
- pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
- TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- pv = &pvp->pvp_pv[0];
- } else {
- --pv_nfree;
- pvp = pv_page_freelist.tqh_first;
- if (--pvp->pvp_pgi.pgi_nfree == 0) {
- TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- }
- pv = pvp->pvp_pgi.pgi_freelist;
+ pmap_asn_generation[cpu_id]++;
#ifdef DIAGNOSTIC
- if (pv == 0)
- panic("pmap_alloc_pv: pgi_nfree inconsistent");
+ if (pmap_asn_generation[cpu_id] == 0) {
+ /*
+ * The generation number has wrapped. We could
+ * handle this scenario by traversing all of
+ * the pmaps, and invaldating the generation
+ * number on those which are not currently
+ * in use by this processor.
+ *
+ * However... considering that we're using
+ * an unsigned 64-bit integer for generation
+ * numbers, on non-ASN CPUs, we won't wrap
+ * for approx. 585 million years, or 75 billion
+ * years on a 128-ASN CPU (assuming 1000 switch
+ * operations per second).
+ *
+ * So, we don't bother.
+ */
+ panic("pmap_asn_alloc: too much uptime");
+ }
+#endif
+#ifdef DEBUG
+ if (pmapdebug & PDB_ASN)
+ printf("pmap_asn_alloc: generation bumped to %lu\n",
+ pmap_asn_generation[cpu_id]);
#endif
- pvp->pvp_pgi.pgi_freelist = pv->pv_next;
}
- return pv;
+
+ /*
+ * Assign the new ASN and validate the generation number.
+ */
+ pmap->pm_asn[cpu_id] = pmap_next_asn[cpu_id]++;
+ pmap->pm_asngen[cpu_id] = pmap_asn_generation[cpu_id];
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_ASN)
+ printf("pmap_asn_alloc: assigning %u to pmap %p\n",
+ pmap->pm_asn[cpu_id], pmap);
+#endif
+
+#if 0 /* XXX Not sure if this is safe yet. --thorpej */
+ /*
+ * Have a new ASN, so there's no need to sync the I-stream
+ * on the way back out to userspace.
+ */
+ atomic_clearbits_ulong(&pmap->pm_needisync, (1UL << cpu_id));
+#endif
}
+#if defined(MULTIPROCESSOR)
+/******************** TLB shootdown code ********************/
+
+/*
+ * pmap_tlb_shootdown:
+ *
+ * Cause the TLB entry for pmap/va to be shot down.
+ */
void
-pmap_free_pv(pv)
- struct pv_entry *pv;
+pmap_tlb_shootdown(pmap, va, pte)
+ pmap_t pmap;
+ vaddr_t va;
+ pt_entry_t pte;
{
- register struct pv_page *pvp;
- register int i;
+ u_long i, ipinum, cpu_id = cpu_number();
+ struct pmap_tlb_shootdown_q *pq;
+ struct pmap_tlb_shootdown_job *pj;
+ int s;
+
+ s = splimp();
+
+ for (i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
+ if (i == cpu_id || cpu_info[i].ci_dev == NULL)
+ continue;
+ pq = &pmap_tlb_shootdown_q[i];
+ simple_lock(&pq->pq_slock);
+ pj = pmap_tlb_shootdown_job_get(pq);
+ pq->pq_pte |= pte;
+ if (pj == NULL) {
+ /*
+ * Couldn't allocate a job entry. Just do a
+ * TBIA[P].
+ */
+ if (pq->pq_pte & PG_ASM)
+ ipinum = ALPHA_IPI_TBIA;
+ else
+ ipinum = ALPHA_IPI_TBIAP;
+ if (pq->pq_pte & PG_EXEC)
+ ipinum |= ALPHA_IPI_IMB;
+ alpha_send_ipi(i, ipinum);
- pvp = (struct pv_page *) trunc_page(pv);
- switch (++pvp->pvp_pgi.pgi_nfree) {
- case 1:
- TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- default:
- pv->pv_next = pvp->pvp_pgi.pgi_freelist;
- pvp->pvp_pgi.pgi_freelist = pv;
- ++pv_nfree;
- break;
- case NPVPPG:
- pv_nfree -= NPVPPG - 1;
- TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
- kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
- break;
+ /*
+ * Since we've nailed the whole thing, drain the
+ * job entries pending for that processor.
+ */
+ pmap_tlb_shootdown_q_drain(pq);
+ } else {
+ pj->pj_pmap = pmap;
+ pj->pj_va = va;
+ pj->pj_pte = pte;
+ TAILQ_INSERT_TAIL(&pq->pq_head, pj, pj_list);
+ alpha_send_ipi(i, ALPHA_IPI_SHOOTDOWN);
+ }
+ simple_unlock(&pq->pq_slock);
}
+
+ splx(s);
}
-#if 0
-sanity(pmap, addr)
- register pmap_t pmap;
- register vm_offset_t addr;
+/*
+ * pmap_do_tlb_shootdown:
+ *
+ * Process pending TLB shootdown operations for this processor.
+ */
+void
+pmap_do_tlb_shootdown()
+{
+ u_long cpu_id = cpu_number();
+ u_long cpu_mask = (1UL << cpu_id);
+ struct pmap_tlb_shootdown_q *pq = &pmap_tlb_shootdown_q[cpu_id];
+ struct pmap_tlb_shootdown_job *pj;
+ int s;
+
+ s = splimp();
+
+ simple_lock(&pq->pq_slock);
+
+ while ((pj = TAILQ_FIRST(&pq->pq_head)) != NULL) {
+ TAILQ_REMOVE(&pq->pq_head, pj, pj_list);
+ PMAP_INVALIDATE_TLB(pj->pj_pmap, pj->pj_va,
+ pj->pj_pte & PG_ASM, pj->pj_pmap->pm_cpus & cpu_mask,
+ cpu_id);
+ pmap_tlb_shootdown_job_put(pq, pj);
+ }
+
+ if (pq->pq_pte & PG_EXEC)
+ alpha_pal_imb();
+ pq->pq_pte = 0;
+
+ simple_unlock(&pq->pq_slock);
+
+ splx(s);
+}
+
+/*
+ * pmap_tlb_shootdown_q_drain:
+ *
+ * Drain a processor's TLB shootdown queue. We do not perform
+ * the shootdown operations. This is merely a convenience
+ * function.
+ *
+ * Note: We expect the queue to be locked.
+ */
+void
+pmap_tlb_shootdown_q_drain(pq)
+ struct pmap_tlb_shootdown_q *pq;
{
- register pt_entry_t *ptp;
- register pt_entry_t pte;
-
- printf("checking dirbase...\n");
- assert(pmap->dirbase != 0);
- printf("checking dirpfn...\n");
- assert(pmap->dirpfn == curproc->p_addr->u_pcb.pcb_hw.apcb_ptbr);
- printf("checking pid...\n");
- assert(pmap->pid == curproc->p_addr->u_pcb.pcb_hw.apcb_asn);
-
-
- /* seg1 */
- pte = *pmap_pde(pmap,addr);
- if ((pte & ALPHA_PTE_VALID) == 0)
- return(PT_ENTRY_NULL);
- /* seg2 */
- ptp = (pt_entry_t *)ptetokv(pte);
- pte = ptp[pte2num(addr)];
- if ((pte & ALPHA_PTE_VALID) == 0)
- return(PT_ENTRY_NULL);
- /* seg3 */
- ptp = (pt_entry_t *)ptetokv(pte);
- return(&ptp[pte3num(addr)]);
+ struct pmap_tlb_shootdown_job *pj;
+
+ while ((pj = TAILQ_FIRST(&pq->pq_head)) != NULL) {
+ TAILQ_REMOVE(&pq->pq_head, pj, pj_list);
+ pmap_tlb_shootdown_job_put(pq, pj);
+ }
+ pq->pq_pte = 0;
+}
+/*
+ * pmap_tlb_shootdown_job_get:
+ *
+ * Get a TLB shootdown job queue entry. This places a limit on
+ * the number of outstanding jobs a processor may have.
+ *
+ * Note: We expect the queue to be locked.
+ */
+struct pmap_tlb_shootdown_job *
+pmap_tlb_shootdown_job_get(pq)
+ struct pmap_tlb_shootdown_q *pq;
+{
+ struct pmap_tlb_shootdown_job *pj;
+
+ if (pq->pq_count == PMAP_TLB_SHOOTDOWN_MAXJOBS)
+ return (NULL);
+ pj = pool_get(&pmap_tlb_shootdown_job_pool, PR_NOWAIT);
+ if (pj != NULL)
+ pq->pq_count++;
+ return (pj);
}
+
+/*
+ * pmap_tlb_shootdown_job_put:
+ *
+ * Put a TLB shootdown job queue entry onto the free list.
+ *
+ * Note: We expect the queue to be locked.
+ */
+void
+pmap_tlb_shootdown_job_put(pq, pj)
+ struct pmap_tlb_shootdown_q *pq;
+ struct pmap_tlb_shootdown_job *pj;
+{
+
+#ifdef DIAGNOSTIC
+ if (pq->pq_count == 0)
+ panic("pmap_tlb_shootdown_job_put: queue length inconsistency");
#endif
+ pool_put(&pmap_tlb_shootdown_job_pool, pj);
+ pq->pq_count--;
+}
+#endif /* MULTIPROCESSOR */
diff --git a/sys/arch/alpha/alpha/process_machdep.c b/sys/arch/alpha/alpha/process_machdep.c
index be249eec38c..2427271486e 100644
--- a/sys/arch/alpha/alpha/process_machdep.c
+++ b/sys/arch/alpha/alpha/process_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: process_machdep.c,v 1.4 1996/10/30 22:38:23 niklas Exp $ */
+/* $OpenBSD: process_machdep.c,v 1.5 2000/11/08 16:01:03 art Exp $ */
/* $NetBSD: process_machdep.c,v 1.7 1996/07/11 20:14:21 cgd Exp $ */
/*
@@ -119,7 +119,6 @@ process_read_fpregs(p, regs)
struct proc *p;
struct fpreg *regs;
{
- extern struct proc *fpcurproc;
if (p == fpcurproc) {
alpha_pal_wrfen(1);
@@ -136,7 +135,6 @@ process_write_fpregs(p, regs)
struct proc *p;
struct fpreg *regs;
{
- extern struct proc *fpcurproc;
if (p == fpcurproc)
fpcurproc = NULL;
diff --git a/sys/arch/alpha/alpha/prom.c b/sys/arch/alpha/alpha/prom.c
index e8500c7aff3..2d480bb0adc 100644
--- a/sys/arch/alpha/alpha/prom.c
+++ b/sys/arch/alpha/alpha/prom.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: prom.c,v 1.6 2000/06/08 11:57:21 art Exp $ */
-/* $NetBSD: prom.c,v 1.12 1996/11/13 21:13:11 cgd Exp $ */
+/* $NetBSD: prom.c,v 1.39 2000/03/06 21:36:05 thorpej Exp $ */
/*
* Copyright (c) 1992, 1994, 1995, 1996 Carnegie Mellon University
@@ -28,47 +27,71 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <vm/vm.h>
+#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/user.h>
+#include <machine/cpu.h>
#include <machine/rpb.h>
+#include <machine/alpha.h>
+#define ENABLEPROM
#include <machine/prom.h>
-#ifdef NEW_PMAP
-#include <vm/vm.h>
-#include <vm/pmap.h>
-#endif
#include <dev/cons.h>
-u_int64_t hwrpb_checksum __P((void));
-
/* XXX this is to fake out the console routines, while booting. */
struct consdev promcons = { NULL, NULL, promcngetc, promcnputc,
nullcnpollc, makedev(23,0), 1 };
struct rpb *hwrpb;
int alpha_console;
-int prom_mapped = 1; /* Is PROM still mapped? */
extern struct prom_vec prom_dispatch_v;
-pt_entry_t *rom_ptep, rom_pte, saved_pte; /* XXX */
+struct simplelock prom_slock;
-#ifdef NEW_PMAP
-#define rom_ptep (curproc ? &curproc->p_vmspace->vm_map.pmap->dir[0] : rom_ptep)
-#endif
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+int prom_mapped = 1; /* Is PROM still mapped? */
+
+pt_entry_t prom_pte, saved_pte[1]; /* XXX */
+static pt_entry_t *prom_lev1map __P((void));
+
+static pt_entry_t *
+prom_lev1map()
+{
+ struct alpha_pcb *apcb;
+
+ /*
+ * Find the level 1 map that we're currently running on.
+ */
+ apcb = (struct alpha_pcb *)ALPHA_PHYS_TO_K0SEG(curpcb);
+
+ return ((pt_entry_t *)ALPHA_PHYS_TO_K0SEG(apcb->apcb_ptbr << PGSHIFT));
+}
+#endif /* _PMAP_MAY_USE_PROM_CONSOLE */
void
-init_prom_interface()
+init_prom_interface(rpb)
+ struct rpb *rpb;
{
struct crb *c;
- char buf[4];
- c = (struct crb*)((char*)hwrpb + hwrpb->rpb_crb_off);
+ c = (struct crb *)((char *)rpb + rpb->rpb_crb_off);
prom_dispatch_v.routine_arg = c->crb_v_dispatch;
prom_dispatch_v.routine = c->crb_v_dispatch->entry_va;
+ simple_lock_init(&prom_slock);
+}
+
+void
+init_bootstrap_console()
+{
+ char buf[4];
+
+ init_prom_interface(hwrpb);
+
prom_getenv(PROM_E_TTY_DEV, buf, 4);
alpha_console = buf[0] - '0';
@@ -76,6 +99,74 @@ init_prom_interface()
cn_tab = &promcons;
}
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+static void prom_cache_sync __P((void));
+#endif
+
+int
+prom_enter()
+{
+ int s;
+
+ s = splhigh();
+ simple_lock(&prom_slock);
+
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+ /*
+ * If we have not yet switched out of the PROM's context
+ * (i.e. the first one after alpha_init()), then the PROM
+ * is still mapped, regardless of the `prom_mapped' setting.
+ */
+ if (prom_mapped == 0 && curpcb != 0) {
+ if (!pmap_uses_prom_console())
+ panic("prom_enter");
+ {
+ pt_entry_t *lev1map;
+
+ lev1map = prom_lev1map(); /* XXX */
+ saved_pte[0] = lev1map[0]; /* XXX */
+ lev1map[0] = prom_pte; /* XXX */
+ }
+ prom_cache_sync(); /* XXX */
+ }
+#endif
+ return s;
+}
+
+void
+prom_leave(s)
+ int s;
+{
+
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+ /*
+ * See comment above.
+ */
+ if (prom_mapped == 0 && curpcb != 0) {
+ if (!pmap_uses_prom_console())
+ panic("prom_leave");
+ {
+ pt_entry_t *lev1map;
+
+ lev1map = prom_lev1map(); /* XXX */
+ lev1map[0] = saved_pte[0]; /* XXX */
+ }
+ prom_cache_sync(); /* XXX */
+ }
+#endif
+ simple_unlock(&prom_slock);
+ splx(s);
+}
+
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+static void
+prom_cache_sync __P((void))
+{
+ ALPHA_TBIA();
+ alpha_pal_imb();
+}
+#endif
+
/*
* promcnputc:
*
@@ -92,31 +183,17 @@ promcnputc(dev, c)
int c;
{
prom_return_t ret;
- u_char *to = (u_char *)0x20000000;
+ unsigned char *to = (unsigned char *)0x20000000;
int s;
-#ifdef notdef /* XXX */
- if (!prom_mapped)
- return;
-#endif
-
- s = splhigh();
- if (!prom_mapped) { /* XXX */
- saved_pte = *rom_ptep; /* XXX */
- *rom_ptep = rom_pte; /* XXX */
- ALPHA_TBIA(); /* XXX */
- } /* XXX */
+ s = prom_enter(); /* splhigh() and map prom */
*to = c;
do {
ret.bits = prom_putstr(alpha_console, to, 1);
} while ((ret.u.retval & 1) == 0);
- if (!prom_mapped) { /* XXX */
- *rom_ptep = saved_pte; /* XXX */
- ALPHA_TBIA(); /* XXX */
- } /* XXX */
- splx(s);
+ prom_leave(s); /* unmap prom and splx(s) */
}
/*
@@ -131,24 +208,10 @@ promcngetc(dev)
prom_return_t ret;
int s;
-#ifdef notdef /* XXX */
- if (!prom_mapped)
- return (-1);
-#endif
-
for (;;) {
- s = splhigh();
- if (!prom_mapped) { /* XXX */
- saved_pte = *rom_ptep; /* XXX */
- *rom_ptep = rom_pte; /* XXX */
- ALPHA_TBIA(); /* XXX */
- } /* XXX */
+ s = prom_enter();
ret.bits = prom_getc(alpha_console);
- if (!prom_mapped) { /* XXX */
- *rom_ptep = saved_pte; /* XXX */
- ALPHA_TBIA(); /* XXX */
- } /* XXX */
- splx(s);
+ prom_leave(s);
if (ret.u.status == 0 || ret.u.status == 1)
return (ret.u.retval);
}
@@ -167,23 +230,9 @@ promcnlookc(dev, cp)
prom_return_t ret;
int s;
-#ifdef notdef /* XXX */
- if (!prom_mapped)
- return (-1);
-#endif
-
- s = splhigh();
- if (!prom_mapped) { /* XXX */
- saved_pte = *rom_ptep; /* XXX */
- *rom_ptep = rom_pte; /* XXX */
- ALPHA_TBIA(); /* XXX */
- } /* XXX */
+ s = prom_enter();
ret.bits = prom_getc(alpha_console);
- if (!prom_mapped) { /* XXX */
- *rom_ptep = saved_pte; /* XXX */
- ALPHA_TBIA(); /* XXX */
- }
- splx(s);
+ prom_leave(s);
if (ret.u.status == 0 || ret.u.status == 1) {
*cp = ret.u.retval;
return 1;
@@ -200,24 +249,10 @@ prom_getenv(id, buf, len)
prom_return_t ret;
int s;
-#ifdef notdef /* XXX */
- if (!prom_mapped)
- return (-1);
-#endif
-
- s = splhigh();
- if (!prom_mapped) { /* XXX */
- saved_pte = *rom_ptep; /* XXX */
- *rom_ptep = rom_pte; /* XXX */
- ALPHA_TBIA(); /* XXX */
- } /* XXX */
+ s = prom_enter();
ret.bits = prom_getenv_disp(id, to, len);
bcopy(to, buf, len);
- if (!prom_mapped) { /* XXX */
- *rom_ptep = saved_pte; /* XXX */
- ALPHA_TBIA(); /* XXX */
- } /* XXX */
- splx(s);
+ prom_leave(s);
if (ret.u.status & 0x4)
ret.u.retval = 0;
@@ -241,7 +276,7 @@ prom_halt(halt)
* Set "boot request" part of the CPU state depending on what
* we want to happen when we halt.
*/
- p = (struct pcs *)((char *)hwrpb + hwrpb->rpb_pcs_off);
+ p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
p->pcs_flags &= ~(PCS_RC | PCS_HALT_REQ);
if (halt)
p->pcs_flags |= PCS_HALT_STAY_HALTED;
@@ -260,8 +295,9 @@ hwrpb_checksum()
u_int64_t *p, sum;
int i;
-#define offsetof(type, member) ((size_t)(&((type *)0)->member)) /* XXX */
-
+#ifndef offsetof
+#define offsetof(type, member) ((size_t)(&((type *)0)->member)) /* XXX */
+#endif
for (i = 0, p = (u_int64_t *)hwrpb, sum = 0;
i < (offsetof(struct rpb, rpb_checksum) / sizeof (u_int64_t));
i++, p++)
@@ -271,43 +307,56 @@ hwrpb_checksum()
}
void
-hwrpb_restart_setup()
+hwrpb_primary_init()
{
struct pcs *p;
- /* Clear bootstrap-in-progress flag since we're done bootstrapping */
- p = (struct pcs *)((char *)hwrpb + hwrpb->rpb_pcs_off);
- p->pcs_flags &= ~PCS_BIP;
+ p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
+ /* Initialize the primary's HWPCB and the Virtual Page Table Base. */
bcopy(&proc0.p_addr->u_pcb.pcb_hw, p->pcs_hwpcb,
sizeof proc0.p_addr->u_pcb.pcb_hw);
hwrpb->rpb_vptb = VPTBASE;
+ hwrpb->rpb_checksum = hwrpb_checksum();
+}
+
+void
+hwrpb_restart_setup()
+{
+ struct pcs *p;
+
+ /* Clear bootstrap-in-progress flag since we're done bootstrapping */
+ p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
+ p->pcs_flags &= ~PCS_BIP;
+
/* when 'c'ontinuing from console halt, do a dump */
hwrpb->rpb_rest_term = (u_int64_t)&XentRestart;
hwrpb->rpb_rest_term_val = 0x1;
-#if 0
- /* don't know what this is really used by, so don't mess with it. */
- hwrpb->rpb_restart = (u_int64_t)&XentRestart;
- hwrpb->rpb_restart_val = 0x2;
-#endif
-
hwrpb->rpb_checksum = hwrpb_checksum();
p->pcs_flags |= (PCS_RC | PCS_CV);
}
u_int64_t
-console_restart(ra, ai, pv)
- u_int64_t ra, ai, pv;
+console_restart(framep)
+ struct trapframe *framep;
{
struct pcs *p;
/* Clear restart-capable flag, since we can no longer restart. */
- p = (struct pcs *)((char *)hwrpb + hwrpb->rpb_pcs_off);
+ p = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
p->pcs_flags &= ~PCS_RC;
+ /* Fill in the missing frame slots */
+
+ framep->tf_regs[FRAME_PS] = p->pcs_halt_ps;
+ framep->tf_regs[FRAME_PC] = p->pcs_halt_pc;
+ framep->tf_regs[FRAME_T11] = p->pcs_halt_r25;
+ framep->tf_regs[FRAME_RA] = p->pcs_halt_r26;
+ framep->tf_regs[FRAME_T12] = p->pcs_halt_r27;
+
panic("user requested console halt");
return (1);
diff --git a/sys/arch/alpha/alpha/prom_disp.s b/sys/arch/alpha/alpha/prom_disp.s
index 73ac11d8348..3cad046a0c3 100644
--- a/sys/arch/alpha/alpha/prom_disp.s
+++ b/sys/arch/alpha/alpha/prom_disp.s
@@ -1,5 +1,4 @@
-/* $OpenBSD: prom_disp.s,v 1.4 1996/10/30 22:38:24 niklas Exp $ */
-/* $NetBSD: prom_disp.s,v 1.5 1996/09/17 21:17:14 cgd Exp $ */
+/* $NetBSD: prom_disp.s,v 1.8 1997/11/03 04:22:03 ross Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -38,6 +37,7 @@
.text
.align 4
+inc3: .stabs __FILE__,132,0,0,inc3; .loc 1 __LINE__
/*
* Dispatcher routine. Implements prom's calling machinery, saves our
* callee-saved registers as required by C.
diff --git a/sys/arch/alpha/alpha/trap.c b/sys/arch/alpha/alpha/trap.c
index 5939d6a9fb1..34bcea1aa42 100644
--- a/sys/arch/alpha/alpha/trap.c
+++ b/sys/arch/alpha/alpha/trap.c
@@ -1,5 +1,71 @@
-/* $OpenBSD: trap.c,v 1.19 2000/06/08 22:25:16 niklas Exp $ */
-/* $NetBSD: trap.c,v 1.19 1996/11/27 01:28:30 cgd Exp $ */
+/* $NetBSD: trap.c,v 1.52 2000/05/24 16:48:33 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1999 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -39,30 +105,21 @@
#include <sys/ktrace.h>
#endif
+#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
+
#include <machine/cpu.h>
#include <machine/reg.h>
-
+#include <machine/alpha.h>
#ifdef DDB
#include <machine/db_machdep.h>
#endif
+#include <alpha/alpha/db_instruction.h> /* for handle_opdec() */
#ifdef COMPAT_OSF1
#include <compat/osf1/osf1_syscall.h>
#endif
-static __inline void userret __P((struct proc *, u_int64_t, u_quad_t));
-void trap __P((const u_long, const u_long, const u_long, const u_long,
- struct trapframe *));
-int unaligned_fixup __P((u_long, u_long, u_long, struct proc *));
-void syscall __P((u_int64_t, struct trapframe *));
-void child_return __P((struct proc *));
-void ast __P((struct trapframe *));
-u_long Sfloat_to_reg __P((u_int));
-u_int reg_to_Sfloat __P((u_long));
-u_long Tfloat_reg_cvt __P((u_long));
-
-struct proc *fpcurproc; /* current user of the FPU */
-
void userret __P((struct proc *, u_int64_t, u_quad_t));
unsigned long Sfloat_to_reg __P((unsigned int));
@@ -76,6 +133,35 @@ unsigned long Gfloat_reg_cvt __P((unsigned long));
int unaligned_fixup __P((unsigned long, unsigned long,
unsigned long, struct proc *));
+int handle_opdec(struct proc *p, u_int64_t *ucodep);
+
+static void printtrap __P((const unsigned long, const unsigned long,
+ const unsigned long, const unsigned long, struct trapframe *, int, int));
+
+/*
+ * Initialize the trap vectors for the current processor.
+ */
+void
+trap_init()
+{
+
+ /*
+ * Point interrupt/exception vectors to our own.
+ */
+ alpha_pal_wrent(XentInt, ALPHA_KENTRY_INT);
+ alpha_pal_wrent(XentArith, ALPHA_KENTRY_ARITH);
+ alpha_pal_wrent(XentMM, ALPHA_KENTRY_MM);
+ alpha_pal_wrent(XentIF, ALPHA_KENTRY_IF);
+ alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA);
+ alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS);
+
+ /*
+ * Clear pending machine checks and error reports, and enable
+ * system- and processor-correctable error reporting.
+ */
+ alpha_pal_wrmces(alpha_pal_rdmces() &
+ ~(ALPHA_MCES_DSC|ALPHA_MCES_DPC));
+}
/*
* Define the code needed before returning to user mode, for
@@ -87,26 +173,25 @@ userret(p, pc, oticks)
u_int64_t pc;
u_quad_t oticks;
{
- int sig, s;
+ int sig;
+ struct cpu_info *ci = curcpu();
+
+ /* Do any deferred user pmap operations. */
+ PMAP_USERRET(vm_map_pmap(&p->p_vmspace->vm_map));
/* take pending signals */
while ((sig = CURSIG(p)) != 0)
postsig(sig);
p->p_priority = p->p_usrpri;
- if (want_resched) {
+ if (ci->ci_want_resched) {
/*
- * Since we are curproc, a clock interrupt could
- * change our priority without changing run queues
- * (the running process is not kept on a run queue).
- * If this happened after we setrunqueue ourselves but
- * before we switch()'ed, we might not be on the queue
- * indicated by our priority.
+ * We are being preempted.
*/
- s = splstatclock();
- setrunqueue(p);
- p->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
- splx(s);
+ preempt(NULL);
+
+ ci = curcpu();
+
+ PMAP_USERRET(vm_map_pmap(&p->p_vmspace->vm_map));
while ((sig = CURSIG(p)) != 0)
postsig(sig);
}
@@ -123,15 +208,56 @@ userret(p, pc, oticks)
curpriority = p->p_priority;
}
-char *trap_type[] = {
- "interrupt", /* 0 ALPHA_KENTRY_INT */
- "arithmetic trap", /* 1 ALPHA_KENTRY_ARITH */
- "memory management fault", /* 2 ALPHA_KENTRY_MM */
- "instruction fault", /* 3 ALPHA_KENTRY_IF */
- "unaligned access fault", /* 4 ALPHA_KENTRY_UNA */
- "system call", /* 5 ALPHA_KENTRY_SYS */
-};
-int trap_types = sizeof trap_type / sizeof trap_type[0];
+static void
+printtrap(a0, a1, a2, entry, framep, isfatal, user)
+ const unsigned long a0, a1, a2, entry;
+ struct trapframe *framep;
+ int isfatal, user;
+{
+ char ubuf[64];
+ const char *entryname;
+
+ switch (entry) {
+ case ALPHA_KENTRY_INT:
+ entryname = "interrupt";
+ break;
+ case ALPHA_KENTRY_ARITH:
+ entryname = "arithmetic trap";
+ break;
+ case ALPHA_KENTRY_MM:
+ entryname = "memory management fault";
+ break;
+ case ALPHA_KENTRY_IF:
+ entryname = "instruction fault";
+ break;
+ case ALPHA_KENTRY_UNA:
+ entryname = "unaligned access fault";
+ break;
+ case ALPHA_KENTRY_SYS:
+ entryname = "system call";
+ break;
+ default:
+ sprintf(ubuf, "type %lx", entry);
+ entryname = (const char *) ubuf;
+ break;
+ }
+
+ printf("\n");
+ printf("%s %s trap:\n", isfatal? "fatal" : "handled",
+ user ? "user" : "kernel");
+ printf("\n");
+ printf(" trap entry = 0x%lx (%s)\n", entry, entryname);
+ printf(" a0 = 0x%lx\n", a0);
+ printf(" a1 = 0x%lx\n", a1);
+ printf(" a2 = 0x%lx\n", a2);
+ printf(" pc = 0x%lx\n", framep->tf_regs[FRAME_PC]);
+ printf(" ra = 0x%lx\n", framep->tf_regs[FRAME_RA]);
+ printf(" curproc = %p\n", curproc);
+ if (curproc != NULL)
+ printf(" pid = %d, comm = %s\n", curproc->p_pid,
+ curproc->p_comm);
+ printf("\n");
+}
/*
* Trap is called from locore to handle most types of processor traps.
@@ -147,28 +273,33 @@ trap(a0, a1, a2, entry, framep)
{
register struct proc *p;
register int i;
- u_long ucode;
+ u_int64_t ucode;
u_quad_t sticks;
- caddr_t v;
int user;
+#if defined(DDB)
+ int call_debugger = 1;
+#endif
+ caddr_t v;
int typ;
union sigval sv;
- cnt.v_trap++;
+ uvmexp.traps++;
p = curproc;
- v = 0;
ucode = 0;
user = (framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0;
-#ifdef DDB
- framep->tf_regs[FRAME_SP] = (long)framep + FRAME_SIZE*8;
-#endif
if (user) {
sticks = p->p_sticks;
p->p_md.md_tf = framep;
- } else {
-#ifdef DIAGNOSTIC
- sticks = 0xdeadbeef; /* XXX for -Wuninitialized */
+#if 0
+/* This is to catch some wierd stuff on the UDB (mj) */
+ if (framep->tf_regs[FRAME_PC] > 0 &&
+ framep->tf_regs[FRAME_PC] < 0x120000000) {
+ printf("PC Out of Whack\n");
+ printtrap(a0, a1, a2, entry, framep, 1, user);
+ }
#endif
+ } else {
+ sticks = 0; /* XXX bogus -Wuninitialized warning */
}
switch (entry) {
@@ -182,12 +313,7 @@ trap(a0, a1, a2, entry, framep)
if ((i = unaligned_fixup(a0, a1, a2, p)) == 0)
goto out;
- ucode = VM_PROT_NONE; /* XXX determine */
- v = (caddr_t)a0;
- if (i == SIGBUS)
- typ = BUS_ADRALN;
- else
- typ = SEGV_MAPERR;
+ ucode = a0; /* VA */
break;
}
@@ -202,7 +328,7 @@ trap(a0, a1, a2, entry, framep)
* user are properly aligned, and so if the kernel
* does cause an unaligned access it's a kernel bug.
*/
- goto we_re_toast;
+ goto dopanic;
case ALPHA_KENTRY_ARITH:
/*
@@ -211,52 +337,61 @@ trap(a0, a1, a2, entry, framep)
* user has requested that.
*/
if (user) {
-sigfpe: i = SIGFPE;
- v = NULL; /* XXX determine */
- ucode = a0; /* exception summary */
- typ = FPE_FLTINV; /* XXX? */
+#ifdef COMPAT_OSF1
+ extern struct emul emul_osf1;
+
+ /* just punt on OSF/1. XXX THIS IS EVIL */
+ if (p->p_emul == &emul_osf1)
+ goto out;
+#endif
+ i = SIGFPE;
+ ucode = a0; /* exception summary */
break;
}
/* Always fatal in kernel. Should never happen. */
- goto we_re_toast;
+ goto dopanic;
case ALPHA_KENTRY_IF:
/*
* These are always fatal in kernel, and should never
- * happen, unless they're breakpoints of course.
+ * happen. (Debugger entry is handled in XentIF.)
*/
- if (!user)
- goto we_re_toast;
+ if (!user) {
+#if defined(DDB)
+ /*
+ * ...unless a debugger is configured. It will
+ * inform us if the trap was handled.
+ */
+ if (alpha_debug(a0, a1, a2, entry, framep))
+ goto out;
+ /*
+ * Debugger did NOT handle the trap, don't
+ * call the debugger again!
+ */
+ call_debugger = 0;
+#endif
+ goto dopanic;
+ }
+ i = 0;
switch (a0) {
case ALPHA_IF_CODE_GENTRAP:
- if (framep->tf_regs[FRAME_A0] == -2) /* weird! */
- goto sigfpe;
+ if (framep->tf_regs[FRAME_A0] == -2) { /* weird! */
+ i = SIGFPE;
+ ucode = a0; /* exception summary */
+ break;
+ }
+ /* FALLTHROUTH */
case ALPHA_IF_CODE_BPT:
case ALPHA_IF_CODE_BUGCHK:
- /* XXX what is the address? Guess on a1 for now */
- v = (caddr_t)a1;
- ucode = 0; /* XXX determine */
+ ucode = a0; /* trap type */
i = SIGTRAP;
- typ = TRAP_BRKPT;
break;
case ALPHA_IF_CODE_OPDEC:
- /* XXX what is the address? Guess on a1 for now */
- v = (caddr_t)a1;
- ucode = 0; /* XXX determine */
-#ifdef NEW_PMAP
-{
-int instr;
-printf("REAL SIGILL: PC = 0x%lx, RA = 0x%lx\n", framep->tf_regs[FRAME_PC], framep->tf_regs[FRAME_RA]);
-printf("INSTRUCTION (%d) = 0x%lx\n", copyin((void*)framep->tf_regs[FRAME_PC] - 4, &instr, 4), instr);
-regdump(framep);
-panic("foo");
-}
-#endif
- i = SIGILL;
- typ = ILL_ILLOPC;
+ if ((i = handle_opdec(p, &ucode)) == 0)
+ goto out;
break;
case ALPHA_IF_CODE_FEN:
@@ -267,7 +402,7 @@ panic("foo");
if (fpcurproc == p) {
printf("trap: fp disabled for fpcurproc == %p",
p);
- goto we_re_toast;
+ goto dopanic;
}
alpha_pal_wrfen(1);
@@ -282,49 +417,31 @@ panic("foo");
default:
printf("trap: unknown IF type 0x%lx\n", a0);
- goto we_re_toast;
+ goto dopanic;
}
break;
case ALPHA_KENTRY_MM:
-#ifdef NEW_PMAP
- printf("mmfault: 0x%lx, 0x%lx, %d\n", a0, a1, a2);
-#endif
switch (a1) {
case ALPHA_MMCSR_FOR:
case ALPHA_MMCSR_FOE:
-#ifdef NEW_PMAP
- printf("mmfault for/foe in\n");
-#endif
pmap_emulate_reference(p, a0, user, 0);
-#ifdef NEW_PMAP
- printf("mmfault for/foe out\n");
-#endif
goto out;
case ALPHA_MMCSR_FOW:
-#ifdef NEW_PMAP
- printf("mmfault fow in\n");
-#endif
pmap_emulate_reference(p, a0, user, 1);
-#ifdef NEW_PMAP
- printf("mmfault fow out\n");
-#endif
goto out;
case ALPHA_MMCSR_INVALTRANS:
case ALPHA_MMCSR_ACCESS:
{
- register vm_offset_t va;
- register struct vmspace *vm;
+ register vaddr_t va;
+ register struct vmspace *vm = NULL;
register vm_map_t map;
vm_prot_t ftype;
int rv;
extern vm_map_t kernel_map;
-#ifdef NEW_PMAP
- printf("mmfault invaltrans/access in\n");
-#endif
/*
* If it was caused by fuswintr or suswintr,
* just punt. Note that we check the faulting
@@ -337,15 +454,9 @@ panic("foo");
p->p_addr->u_pcb.pcb_onfault ==
(unsigned long)fswintrberr &&
p->p_addr->u_pcb.pcb_accessaddr == a0) {
-#ifdef NEW_PMAP
- printf("mmfault nfintr in\n");
-#endif
framep->tf_regs[FRAME_PC] =
p->p_addr->u_pcb.pcb_onfault;
p->p_addr->u_pcb.pcb_onfault = 0;
-#ifdef NEW_PMAP
- printf("mmfault nfintr out\n");
-#endif
goto out;
}
@@ -375,18 +486,12 @@ panic("foo");
break;
#ifdef DIAGNOSTIC
default: /* XXX gcc -Wuninitialized */
- goto we_re_toast;
+ goto dopanic;
#endif
}
- va = trunc_page((vm_offset_t)a0);
-#ifdef NEW_PMAP
- printf("mmfault going to vm_fault\n");
-#endif
- rv = vm_fault(map, va, ftype, FALSE);
-#ifdef NEW_PMAP
- printf("mmfault back from vm_fault\n");
-#endif
+ va = trunc_page((vaddr_t)a0);
+ rv = uvm_fault(map, va, 0, ftype);
/*
* If this was a stack access we keep track of the
* maximum accessed stack size. Also, if vm_fault
@@ -395,28 +500,23 @@ panic("foo");
* we need to reflect that as an access error.
*/
if (map != kernel_map &&
- (caddr_t)va >= vm->vm_maxsaddr) {
+ (caddr_t)va >= vm->vm_maxsaddr &&
+ va < USRSTACK) {
if (rv == KERN_SUCCESS) {
unsigned nss;
- nss = clrnd(btoc(USRSTACK -
- (unsigned long)va));
+ nss = btoc(USRSTACK -
+ (unsigned long)va);
if (nss > vm->vm_ssize)
vm->vm_ssize = nss;
} else if (rv == KERN_PROTECTION_FAILURE)
rv = KERN_INVALID_ADDRESS;
}
if (rv == KERN_SUCCESS) {
-#ifdef NEW_PMAP
- printf("mmfault vm_fault success\n");
-#endif
goto out;
}
if (!user) {
-#ifdef NEW_PMAP
- printf("mmfault check copyfault\n");
-#endif
/* Check for copyin/copyout fault */
if (p != NULL &&
p->p_addr->u_pcb.pcb_onfault != 0) {
@@ -425,30 +525,36 @@ panic("foo");
p->p_addr->u_pcb.pcb_onfault = 0;
goto out;
}
- goto we_re_toast;
+ goto dopanic;
}
- v = (caddr_t)a0;
ucode = ftype;
- i = SIGSEGV;
+ v = (caddr_t)a0;
typ = SEGV_MAPERR;
+ if (rv == KERN_RESOURCE_SHORTAGE) {
+ printf("UVM: pid %d (%s), uid %d killed: "
+ "out of swap\n", p->p_pid, p->p_comm,
+ p->p_cred && p->p_ucred ?
+ p->p_ucred->cr_uid : -1);
+ i = SIGKILL;
+ } else {
+ i = SIGSEGV;
+ }
break;
}
default:
printf("trap: unknown MMCSR value 0x%lx\n", a1);
- goto we_re_toast;
+ goto dopanic;
}
break;
default:
- we_re_toast:
-#ifdef DDB
- if (kdb_trap(entry, a0, framep))
- return;
-#endif
goto dopanic;
}
+#ifdef DEBUG
+ printtrap(a0, a1, a2, entry, framep, 1, user);
+#endif
sv.sival_ptr = v;
trapsignal(p, i, ucode, typ, sv);
out:
@@ -457,30 +563,18 @@ out:
return;
dopanic:
- {
- const char *entryname = "???";
-
- if (entry > 0 && entry < trap_types)
- entryname = trap_type[entry];
-
- printf("\n");
- printf("fatal %s trap:\n", user ? "user" : "kernel");
- printf("\n");
- printf(" trap entry = 0x%lx (%s)\n", entry, entryname);
- printf(" a0 = 0x%lx\n", a0);
- printf(" a1 = 0x%lx\n", a1);
- printf(" a2 = 0x%lx\n", a2);
- printf(" pc = 0x%lx\n", framep->tf_regs[FRAME_PC]);
- printf(" ra = 0x%lx\n", framep->tf_regs[FRAME_RA]);
- printf(" curproc = %p\n", curproc);
- if (curproc != NULL)
- printf(" pid = %d, comm = %s\n", curproc->p_pid,
- curproc->p_comm);
- printf("\n");
- }
+ printtrap(a0, a1, a2, entry, framep, 1, user);
/* XXX dump registers */
- /* XXX kernel debugger */
+
+#if defined(DDB)
+ if (call_debugger && alpha_debug(a0, a1, a2, entry, framep)) {
+ /*
+ * The debugger has handled the trap; just return.
+ */
+ goto out;
+ }
+#endif
panic("trap");
}
@@ -516,10 +610,10 @@ syscall(code, framep)
#endif
#if notdef /* can't happen, ever. */
- if ((framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) == 0) {
+ if ((framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) == 0)
panic("syscall");
#endif
- cnt.v_syscall++;
+ uvmexp.syscalls++;
p = curproc;
p->p_md.md_tf = framep;
opc = framep->tf_regs[FRAME_PC] - 4;
@@ -589,9 +683,6 @@ syscall(code, framep)
#endif
#ifdef SYSCALL_DEBUG
scdebug_call(p, code, args + hidden);
-#ifdef NEW_PMAP
- printf("called from 0x%lx, ra 0x%lx\n", framep->tf_regs[FRAME_PC], framep->tf_regs[FRAME_RA]);
-#endif
#endif
if (error == 0) {
rval[0] = 0;
@@ -611,6 +702,8 @@ syscall(code, framep)
case EJUSTRETURN:
break;
default:
+ if (p->p_emul->e_errno)
+ error = p->p_emul->e_errno[error];
framep->tf_regs[FRAME_V0] = error;
framep->tf_regs[FRAME_A3] = 1;
break;
@@ -623,9 +716,6 @@ syscall(code, framep)
p = curproc;
#ifdef SYSCALL_DEBUG
scdebug_ret(p, code, error, rval);
-#ifdef NEW_PMAP
- printf("outgoing pc 0x%lx, ra 0x%lx\n", framep->tf_regs[FRAME_PC], framep->tf_regs[FRAME_RA]);
-#endif
#endif
userret(p, framep->tf_regs[FRAME_PC], sticks);
@@ -639,9 +729,10 @@ syscall(code, framep)
* Process the tail end of a fork() for the child.
*/
void
-child_return(p)
- struct proc *p;
+child_return(arg)
+ void *arg;
{
+ struct proc *p = arg;
/*
* Return values in the frame set by cpu_fork().
@@ -665,6 +756,8 @@ ast(framep)
register struct proc *p;
u_quad_t sticks;
+ curcpu()->ci_astpending = 0;
+
p = curproc;
sticks = p->p_sticks;
p->p_md.md_tf = framep;
@@ -672,9 +765,8 @@ ast(framep)
if ((framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) == 0)
panic("ast and not user");
- cnt.v_soft++;
+ uvmexp.softs++;
- astpending = 0;
if (p->p_flag & P_OWEUPC) {
p->p_flag &= ~P_OWEUPC;
ADDUPROF(p);
@@ -714,21 +806,20 @@ const static int reg_to_framereg[32] = {
}
#define unaligned_load(storage, ptrf, mod) \
- if (copyin((caddr_t)va, &(storage), sizeof (storage)) == 0 && \
- (regptr = ptrf(p, reg)) != NULL) \
- signal = 0; \
- else \
+ if (copyin((caddr_t)va, &(storage), sizeof (storage)) != 0) \
break; \
- *regptr = mod (storage);
+ signal = 0; \
+ if ((regptr = ptrf(p, reg)) != NULL) \
+ *regptr = mod (storage);
#define unaligned_store(storage, ptrf, mod) \
- if ((regptr = ptrf(p, reg)) == NULL) \
- break; \
- (storage) = mod (*regptr); \
- if (copyout(&(storage), (caddr_t)va, sizeof (storage)) == 0) \
- signal = 0; \
+ if ((regptr = ptrf(p, reg)) != NULL) \
+ (storage) = mod (*regptr); \
else \
- break;
+ (storage) = 0; \
+ if (copyout(&(storage), (caddr_t)va, sizeof (storage)) != 0) \
+ break; \
+ signal = 0;
#define unaligned_load_integer(storage) \
unaligned_load(storage, irp, )
@@ -863,38 +954,60 @@ Gfloat_reg_cvt(input)
extern int alpha_unaligned_print, alpha_unaligned_fix;
extern int alpha_unaligned_sigbus;
+struct unaligned_fixup_data {
+ const char *type; /* opcode name */
+ int fixable; /* fixable, 0 if fixup not supported */
+ int size; /* size, 0 if unknown */
+ int acc; /* useracc type; B_READ or B_WRITE */
+};
+
+#define UNKNOWN() { "0x%lx", 0, 0, 0 }
+#define FIX_LD(n,s) { n, 1, s, B_READ }
+#define FIX_ST(n,s) { n, 1, s, B_WRITE }
+#define NOFIX_LD(n,s) { n, 0, s, B_READ }
+#define NOFIX_ST(n,s) { n, 0, s, B_WRITE }
+
int
unaligned_fixup(va, opcode, reg, p)
unsigned long va, opcode, reg;
struct proc *p;
{
- int doprint, dofix, dosigbus;
- int signal, size;
- const char *type;
- unsigned long *regptr, longdata;
- int intdata; /* signed to get extension when storing */
- struct {
- const char *type; /* opcode name */
- int size; /* size, 0 if fixup not supported */
- } tab[0x10] = {
+ const struct unaligned_fixup_data tab_unknown[1] = {
+ UNKNOWN(),
+ };
+ const struct unaligned_fixup_data tab_0c[0x02] = {
+ FIX_LD("ldwu", 2), FIX_ST("stw", 2),
+ };
+ const struct unaligned_fixup_data tab_20[0x10] = {
#ifdef FIX_UNALIGNED_VAX_FP
- { "ldf", 4 }, { "ldg", 8 },
+ FIX_LD("ldf", 4), FIX_LD("ldg", 8),
#else
- { "ldf", 0 }, { "ldg", 0 },
+ NOFIX_LD("ldf", 4), NOFIX_LD("ldg", 8),
#endif
- { "lds", 4 }, { "ldt", 8 },
+ FIX_LD("lds", 4), FIX_LD("ldt", 8),
#ifdef FIX_UNALIGNED_VAX_FP
- { "stf", 4 }, { "stg", 8 },
+ FIX_ST("stf", 4), FIX_ST("stg", 8),
#else
- { "stf", 0 }, { "stg", 0 },
+ NOFIX_ST("stf", 4), NOFIX_ST("stg", 8),
#endif
- { "sts", 4 }, { "stt", 8 },
- { "ldl", 4 }, { "ldq", 8 },
- { "ldl_l", 0 }, { "ldq_l", 0 }, /* can't fix */
- { "stl", 4 }, { "stq", 8 },
- { "stl_c", 0 }, { "stq_c", 0 }, /* can't fix */
+ FIX_ST("sts", 4), FIX_ST("stt", 8),
+ FIX_LD("ldl", 4), FIX_LD("ldq", 8),
+ NOFIX_LD("ldl_c", 4), NOFIX_LD("ldq_c", 8),
+ FIX_ST("stl", 4), FIX_ST("stq", 8),
+ NOFIX_ST("stl_c", 4), NOFIX_ST("stq_c", 8),
};
- int typ;
+ const struct unaligned_fixup_data *selected_tab;
+ int doprint, dofix, dosigbus, signal;
+ unsigned long *regptr, longdata;
+ int intdata; /* signed to get extension when storing */
+ u_int16_t worddata; /* unsigned to _avoid_ extension */
+
+ /*
+ * Read USP into frame in case it's the register to be modified.
+ * This keeps us from having to check for it in lots of places
+ * later.
+ */
+ p->p_md.md_tf->tf_regs[FRAME_SP] = alpha_pal_rdusp();
/*
* Figure out what actions to take.
@@ -910,20 +1023,24 @@ unaligned_fixup(va, opcode, reg, p)
* Find out which opcode it is. Arrange to have the opcode
* printed if it's an unknown opcode.
*/
- if (opcode >= 0x20 && opcode <= 0x2f) {
- type = tab[opcode - 0x20].type;
- size = tab[opcode - 0x20].size;
- } else {
- type = "0x%lx";
- size = 0;
- }
+ if (opcode >= 0x0c && opcode <= 0x0d)
+ selected_tab = &tab_0c[opcode - 0x0c];
+ else if (opcode >= 0x20 && opcode <= 0x2f)
+ selected_tab = &tab_20[opcode - 0x20];
+ else
+ selected_tab = tab_unknown;
/*
* See if the user can access the memory in question.
- * Even if it's an unknown opcode, SEGV if the access
- * should have failed.
+ * If it's an unknown opcode, we don't know whether to
+ * read or write, so we don't check.
+ *
+ * We adjust the PC backwards so that the instruction will
+ * be re-run.
*/
- if (!useracc((caddr_t)va, size ? size : 1, B_WRITE)) {
+ if (selected_tab->size != 0 &&
+ !uvm_useracc((caddr_t)va, selected_tab->size, selected_tab->acc)) {
+ p->p_md.md_tf->tf_regs[FRAME_PC] -= 4;
signal = SIGSEGV;
goto out;
}
@@ -932,10 +1049,12 @@ unaligned_fixup(va, opcode, reg, p)
* If we're supposed to be noisy, squawk now.
*/
if (doprint) {
- uprintf("pid %d (%s): unaligned access: va=0x%lx pc=0x%lx ra=0x%lx op=",
- p->p_pid, p->p_comm, va, p->p_md.md_tf->tf_regs[FRAME_PC],
- p->p_md.md_tf->tf_regs[FRAME_PC]);
- uprintf(type, opcode);
+ uprintf(
+ "pid %d (%s): unaligned access: va=0x%lx pc=0x%lx ra=0x%lx op=",
+ p->p_pid, p->p_comm, va,
+ p->p_md.md_tf->tf_regs[FRAME_PC] - 4,
+ p->p_md.md_tf->tf_regs[FRAME_RA]);
+ uprintf(selected_tab->type,opcode);
uprintf("\n");
}
@@ -952,9 +1071,18 @@ unaligned_fixup(va, opcode, reg, p)
* unaligned_{load,store}_* clears the signal flag.
*/
signal = SIGBUS;
- typ = BUS_ADRALN;
- if (dofix && size != 0) {
+ if (dofix && selected_tab->fixable) {
switch (opcode) {
+ case 0x0c: /* ldwu */
+ /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
+ unaligned_load_integer(worddata);
+ break;
+
+ case 0x0d: /* stw */
+ /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
+ unaligned_store_integer(worddata);
+ break;
+
#ifdef FIX_UNALIGNED_VAX_FP
case 0x20: /* ldf */
unaligned_load_floating(intdata, Ffloat_to_reg);
@@ -1021,5 +1149,172 @@ unaligned_fixup(va, opcode, reg, p)
signal = SIGBUS;
out:
+ /*
+ * Write back USP.
+ */
+ alpha_pal_wrusp(p->p_md.md_tf->tf_regs[FRAME_SP]);
+
return (signal);
}
+
+/*
+ * Reserved/unimplemented instruction (opDec fault) handler
+ *
+ * Argument is the process that caused it. No useful information
+ * is passed to the trap handler other than the fault type. The
+ * address of the instruction that caused the fault is 4 less than
+ * the PC stored in the trap frame.
+ *
+ * If the instruction is emulated successfully, this function returns 0.
+ * Otherwise, this function returns the signal to deliver to the process,
+ * and fills in *ucodep with the code to be delivered.
+ */
+int
+handle_opdec(p, ucodep)
+ struct proc *p;
+ u_int64_t *ucodep;
+{
+ alpha_instruction inst;
+ register_t *regptr, memaddr;
+ u_int64_t inst_pc;
+ int sig;
+
+ /*
+ * Read USP into frame in case it's going to be used or modified.
+ * This keeps us from having to check for it in lots of places
+ * later.
+ */
+ p->p_md.md_tf->tf_regs[FRAME_SP] = alpha_pal_rdusp();
+
+ inst_pc = memaddr = p->p_md.md_tf->tf_regs[FRAME_PC] - 4;
+ if (copyin((caddr_t)inst_pc, &inst, sizeof (inst)) != 0) {
+ /*
+ * really, this should never happen, but in case it
+ * does we handle it.
+ */
+ printf("WARNING: handle_opdec() couldn't fetch instruction\n");
+ goto sigsegv;
+ }
+
+ switch (inst.generic_format.opcode) {
+ case op_ldbu:
+ case op_ldwu:
+ case op_stw:
+ case op_stb:
+ regptr = irp(p, inst.mem_format.rb);
+ if (regptr != NULL)
+ memaddr = *regptr;
+ else
+ memaddr = 0;
+ memaddr += inst.mem_format.displacement;
+
+ regptr = irp(p, inst.mem_format.ra);
+
+ if (inst.mem_format.opcode == op_ldwu ||
+ inst.mem_format.opcode == op_stw) {
+ if (memaddr & 0x01) {
+ sig = unaligned_fixup(memaddr,
+ inst.mem_format.opcode,
+ inst.mem_format.ra, p);
+ if (sig)
+ goto unaligned_fixup_sig;
+ break;
+ }
+ }
+
+ if (inst.mem_format.opcode == op_ldbu) {
+ u_int8_t b;
+
+ /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
+ if (copyin((caddr_t)memaddr, &b, sizeof (b)) != 0)
+ goto sigsegv;
+ if (regptr != NULL)
+ *regptr = b;
+ } else if (inst.mem_format.opcode == op_ldwu) {
+ u_int16_t w;
+
+ /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
+ if (copyin((caddr_t)memaddr, &w, sizeof (w)) != 0)
+ goto sigsegv;
+ if (regptr != NULL)
+ *regptr = w;
+ } else if (inst.mem_format.opcode == op_stw) {
+ u_int16_t w;
+
+ /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
+ w = (regptr != NULL) ? *regptr : 0;
+ if (copyout(&w, (caddr_t)memaddr, sizeof (w)) != 0)
+ goto sigsegv;
+ } else if (inst.mem_format.opcode == op_stb) {
+ u_int8_t b;
+
+ /* XXX ONLY WORKS ON LITTLE-ENDIAN ALPHA */
+ b = (regptr != NULL) ? *regptr : 0;
+ if (copyout(&b, (caddr_t)memaddr, sizeof (b)) != 0)
+ goto sigsegv;
+ }
+ break;
+
+ case op_intmisc:
+ if (inst.operate_generic_format.function == op_sextb &&
+ inst.operate_generic_format.ra == 31) {
+ int8_t b;
+
+ if (inst.operate_generic_format.is_lit) {
+ b = inst.operate_lit_format.literal;
+ } else {
+ if (inst.operate_reg_format.sbz != 0)
+ goto sigill;
+ regptr = irp(p, inst.operate_reg_format.rb);
+ b = (regptr != NULL) ? *regptr : 0;
+ }
+
+ regptr = irp(p, inst.operate_generic_format.rc);
+ if (regptr != NULL)
+ *regptr = b;
+ break;
+ }
+ if (inst.operate_generic_format.function == op_sextw &&
+ inst.operate_generic_format.ra == 31) {
+ int16_t w;
+
+ if (inst.operate_generic_format.is_lit) {
+ w = inst.operate_lit_format.literal;
+ } else {
+ if (inst.operate_reg_format.sbz != 0)
+ goto sigill;
+ regptr = irp(p, inst.operate_reg_format.rb);
+ w = (regptr != NULL) ? *regptr : 0;
+ }
+
+ regptr = irp(p, inst.operate_generic_format.rc);
+ if (regptr != NULL)
+ *regptr = w;
+ break;
+ }
+ goto sigill;
+
+ default:
+ goto sigill;
+ }
+
+ /*
+ * Write back USP. Note that in the error cases below,
+ * nothing will have been successfully modified so we don't
+ * have to write it out.
+ */
+ alpha_pal_wrusp(p->p_md.md_tf->tf_regs[FRAME_SP]);
+
+ return (0);
+
+sigill:
+ *ucodep = ALPHA_IF_CODE_OPDEC; /* trap type */
+ return (SIGILL);
+
+sigsegv:
+ sig = SIGSEGV;
+ p->p_md.md_tf->tf_regs[FRAME_PC] = inst_pc; /* re-run instr. */
+unaligned_fixup_sig:
+ *ucodep = memaddr; /* faulting address */
+ return (sig);
+}
diff --git a/sys/arch/alpha/alpha/vm_machdep.c b/sys/arch/alpha/alpha/vm_machdep.c
index 1c9ff9dbdc5..eed04e566b6 100644
--- a/sys/arch/alpha/alpha/vm_machdep.c
+++ b/sys/arch/alpha/alpha/vm_machdep.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.15 2000/06/08 22:25:17 niklas Exp $ */
-/* $NetBSD: vm_machdep.c,v 1.21 1996/11/13 21:13:15 cgd Exp $ */
+/* $NetBSD: vm_machdep.c,v 1.55 2000/03/29 03:49:48 simonb Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -39,15 +38,16 @@
#include <sys/core.h>
#include <sys/exec.h>
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+
+#include <uvm/uvm_extern.h>
+
#include <machine/cpu.h>
+#include <machine/alpha.h>
#include <machine/pmap.h>
#include <machine/reg.h>
-#include <vm/vm.h>
-#include <vm/vm_kern.h>
-
-extern void exception_return __P((void));
-extern void child_return __P((struct proc *));
/*
* Dump the machine specific header information at the start of a core dump.
@@ -62,7 +62,6 @@ cpu_coredump(p, vp, cred, chdr)
int error;
struct md_coredump cpustate;
struct coreseg cseg;
- extern struct proc *fpcurproc;
CORE_SETMAGIC(*chdr, COREMAGIC, MID_ALPHA, 0);
chdr->c_hdrsize = ALIGN(sizeof(*chdr));
@@ -103,20 +102,25 @@ cpu_coredump(p, vp, cred, chdr)
/*
* cpu_exit is called as the last action during exit.
- * We release the address space of the process, block interrupts,
- * and call switch_exit. switch_exit switches to proc0's PCB and stack,
- * then jumps into the middle of cpu_switch, as if it were switching
- * from proc0.
+ * We block interrupts and call switch_exit. switch_exit switches
+ * to proc0's PCB and stack, then jumps into the middle of cpu_switch,
+ * as if it were switching from proc0.
*/
void
cpu_exit(p)
struct proc *p;
{
- extern struct proc *fpcurproc;
if (p == fpcurproc)
fpcurproc = NULL;
+ /*
+ * Deactivate the exiting address space before the vmspace
+ * is freed. Note that we will continue to run on this
+ * vmspace's context until the switch to proc0 in switch_exit().
+ */
+ pmap_deactivate(p);
+
(void) splhigh();
switch_exit(p);
/* NOTREACHED */
@@ -124,12 +128,20 @@ cpu_exit(p)
/*
* Finish a fork operation, with process p2 nearly set up.
- * Copy and update the kernel stack and pcb, making the child
- * ready to run, and marking it so that it can return differently
- * than the parent. Returns 1 in the child process, 0 in the parent.
- * We currently double-map the user area so that the stack is at the same
- * address in each process; in the future we will probably relocate
- * the frame pointers on the stack after copying.
+ * Copy and update the pcb and trap frame, making the child ready to run.
+ *
+ * Rig the child's kernel stack so that it will start out in
+ * switch_trampoline() and call child_return() with p2 as an
+ * argument. This causes the newly-created child process to go
+ * directly to user level with an apparent return value of 0 from
+ * fork(), while the parent process returns normally.
+ *
+ * p1 is the process being forked; if p1 == &proc0, we are creating
+ * a kernel thread, and the return path will later be changed in cpu_set_kpc.
+ *
+ * If an alternate user-level stack is requested (with non-zero values
+ * in both the stack and stacksize args), set up the user stack pointer
+ * accordingly.
*/
void
cpu_fork(p1, p2, stack, stacksize)
@@ -138,9 +150,6 @@ cpu_fork(p1, p2, stack, stacksize)
size_t stacksize;
{
struct user *up = p2->p_addr;
- pt_entry_t *ptep;
- int i;
- extern struct proc *fpcurproc;
p2->p_md.md_tf = p1->p_md.md_tf;
p2->p_md.md_flags = p1->p_md.md_flags & MDP_FPUSED;
@@ -149,25 +158,7 @@ cpu_fork(p1, p2, stack, stacksize)
* Cache the physical address of the pcb, so we can
* swap to it easily.
*/
-#ifndef NEW_PMAP
- ptep = kvtopte(up);
- p2->p_md.md_pcbpaddr =
- &((struct user *)(PG_PFNUM(*ptep) << PGSHIFT))->u_pcb;
-#else
- p2->p_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)&up->u_pcb);
- printf("process %d pcbpaddr = 0x%lx, pmap = %p\n",
- p2->p_pid, p2->p_md.md_pcbpaddr,&p2->p_vmspace->vm_map.pmap);
-#endif
-
- /*
- * Simulate a write to the process's U-area pages,
- * so that the system doesn't lose badly.
- * (If this isn't done, the kernel can't read or
- * write the kernel stack. "Ouch!")
- */
- for (i = 0; i < UPAGES; i++)
- pmap_emulate_reference(p2, (vm_offset_t)up + i * PAGE_SIZE,
- 0, 1);
+ p2->p_md.md_pcbpaddr = (void *)vtophys((vaddr_t)&up->u_pcb);
/*
* Copy floating point state from the FP chip to the PCB
@@ -186,11 +177,6 @@ cpu_fork(p1, p2, stack, stacksize)
*/
p2->p_addr->u_pcb = p1->p_addr->u_pcb;
p2->p_addr->u_pcb.pcb_hw.apcb_usp = alpha_pal_rdusp();
-#ifndef NEW_PMAP
- PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, 0);
-#else
-printf("NEW PROCESS %d USP = %p\n", p2->p_pid, p2->p_addr->u_pcb.pcb_hw.apcb_usp);
-#endif
/*
* Arrange for a non-local goto when the new process
@@ -223,9 +209,6 @@ printf("NEW PROCESS %d USP = %p\n", p2->p_pid, p2->p_addr->u_pcb.pcb_hw.apcb_usp
bcopy(p1->p_md.md_tf, p2->p_md.md_tf,
sizeof(struct trapframe));
-#ifdef NEW_PMAP
-printf("FORK CHILD: pc = %p, ra = %p\n", p2tf->tf_regs[FRAME_PC], p2tf->tf_regs[FRAME_RA]);
-#endif
/*
* Set up return-value registers as fork() libc stub expects.
*/
@@ -263,7 +246,7 @@ printf("FORK CHILD: pc = %p, ra = %p\n", p2tf->tf_regs[FRAME_PC], p2tf->tf_regs[
*
* Arrange for in-kernel execution of a process to continue at the
* named pc, as if the code at that address were called as a function
- * with argument, the current process' process pointer.
+ * with argument, the current process's process pointer.
*
* Note that it's assumed that when the named process returns,
* exception_return() should be invoked, to return to user mode.
@@ -280,8 +263,8 @@ cpu_set_kpc(p, pc, arg)
pcbp = &p->p_addr->u_pcb;
pcbp->pcb_context[0] = (u_int64_t)pc; /* s0 - pc to invoke */
- pcbp->pcb_context[1] = (u_int64_t)exception_return;
- /* s1 - return address */
+ pcbp->pcb_context[1] =
+ (u_int64_t)exception_return; /* s1 - return address */
pcbp->pcb_context[2] = (u_int64_t)arg; /* s2 - arg */
pcbp->pcb_context[7] =
(u_int64_t)switch_trampoline; /* ra - assembly magic */
@@ -297,30 +280,12 @@ cpu_swapin(p)
register struct proc *p;
{
struct user *up = p->p_addr;
- pt_entry_t *ptep;
- int i;
/*
* Cache the physical address of the pcb, so we can swap to
* it easily.
*/
-#ifndef NEW_PMAP
- ptep = kvtopte(up);
- p->p_md.md_pcbpaddr =
- &((struct user *)(PG_PFNUM(*ptep) << PGSHIFT))->u_pcb;
-#else
- p->p_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)&up->u_pcb);
-#endif
-
- /*
- * Simulate a write to the process's U-area pages,
- * so that the system doesn't lose badly.
- * (If this isn't done, the kernel can't read or
- * write the kernel stack. "Ouch!")
- */
- for (i = 0; i < UPAGES; i++)
- pmap_emulate_reference(p, (vm_offset_t)up + i * PAGE_SIZE,
- 0, 1);
+ p->p_md.md_pcbpaddr = (void *)vtophys((vaddr_t)&up->u_pcb);
}
/*
@@ -334,7 +299,6 @@ void
cpu_swapout(p)
struct proc *p;
{
- extern struct proc *fpcurproc;
if (p != fpcurproc)
return;
@@ -347,78 +311,71 @@ cpu_swapout(p)
/*
* Move pages from one kernel virtual address to another.
- * Both addresses are assumed to reside in the Sysmap,
+ * Both addresses are assumed to have valid page table pages
* and size must be a multiple of CLSIZE.
+ *
+ * Note that since all kernel page table pages are pre-allocated
+ * and mapped in, we can use the Virtual Page Table.
*/
void
pagemove(from, to, size)
register caddr_t from, to;
size_t size;
{
- register pt_entry_t *fpte, *tpte;
+ long fidx, tidx;
ssize_t todo;
- if (size % CLBYTES)
+ if (size % NBPG)
panic("pagemove");
-#ifndef NEW_PMAP
- fpte = kvtopte(from);
- tpte = kvtopte(to);
-#else
- fpte = pmap_pte(kernel_pmap, (vm_offset_t)from);
- tpte = pmap_pte(kernel_pmap, (vm_offset_t)to);
-#endif
+
todo = size; /* if testing > 0, need sign... */
while (todo > 0) {
- ALPHA_TBIS((vm_offset_t)from);
- *tpte++ = *fpte;
- *fpte = 0;
- fpte++;
+ fidx = VPT_INDEX(from);
+ tidx = VPT_INDEX(to);
+
+ VPT[tidx] = VPT[fidx];
+ VPT[fidx] = 0;
+
+ ALPHA_TBIS((vaddr_t)from);
+ ALPHA_TBIS((vaddr_t)to);
+
+#if defined(MULTIPROCESSOR) && 0
+ pmap_tlb_shootdown(pmap_kernel(), (vaddr_t)from, PG_ASM);
+ pmap_tlb_shootdown(pmap_kernel(), (vaddr_t)to, PG_ASM);
+#endif
+
todo -= NBPG;
from += NBPG;
to += NBPG;
}
}
-extern vm_map_t phys_map;
-
/*
- * Map an IO request into kernel virtual address space. Requests fall into
- * one of five catagories:
- *
- * B_PHYS|B_UAREA: User u-area swap.
- * Address is relative to start of u-area (p_addr).
- * B_PHYS|B_PAGET: User page table swap.
- * Address is a kernel VA in usrpt (Usrptmap).
- * B_PHYS|B_DIRTY: Dirty page push.
- * Address is a VA in proc2's address space.
- * B_PHYS|B_PGIN: Kernel pagein of user pages.
- * Address is VA in user's address space.
- * B_PHYS: User "raw" IO request.
- * Address is VA in user's address space.
- *
- * All requests are (re)mapped into kernel VA space via the useriomap
- * (a name with only slightly more meaning than "kernelmap")
+ * Map a user I/O request into kernel virtual address space.
+ * Note: the pages are already locked by uvm_vslock(), so we
+ * do not need to pass an access_type to pmap_enter().
*/
void
vmapbuf(bp, len)
struct buf *bp;
- vm_size_t len;
+ vsize_t len;
{
- vm_offset_t faddr, taddr, off, pa;
+ vaddr_t faddr, taddr, off;
+ paddr_t pa;
struct proc *p;
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
p = bp->b_proc;
- faddr = trunc_page(bp->b_saveaddr = bp->b_data);
- off = (vm_offset_t)bp->b_data - faddr;
+ faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data);
+ off = (vaddr_t)bp->b_data - faddr;
len = round_page(off + len);
- taddr = kmem_alloc_wait(phys_map, len);
+ taddr = uvm_km_valloc_wait(phys_map, len);
bp->b_data = (caddr_t)(taddr + off);
len = atop(len);
while (len--) {
- pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr);
- if (pa == 0)
+ if ((pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
+ faddr)) == 0)
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
VM_PROT_READ|VM_PROT_WRITE, TRUE, 0);
@@ -428,22 +385,21 @@ vmapbuf(bp, len)
}
/*
- * Free the io map PTEs associated with this IO operation.
- * We also invalidate the TLB entries and restore the original b_addr.
+ * Unmap a previously-mapped user I/O request.
*/
void
vunmapbuf(bp, len)
struct buf *bp;
- vm_size_t len;
+ vsize_t len;
{
- vm_offset_t addr, off;
+ vaddr_t addr, off;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
- addr = trunc_page(bp->b_data);
- off = (vm_offset_t)bp->b_data - addr;
+ addr = trunc_page((vaddr_t)bp->b_data);
+ off = (vaddr_t)bp->b_data - addr;
len = round_page(off + len);
- kmem_free_wakeup(phys_map, addr, len);
+ uvm_km_free_wakeup(phys_map, addr, len);
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = NULL;
}
diff --git a/sys/arch/alpha/common/bus_dma.c b/sys/arch/alpha/common/bus_dma.c
new file mode 100644
index 00000000000..e46619191f2
--- /dev/null
+++ b/sys/arch/alpha/common/bus_dma.c
@@ -0,0 +1,673 @@
+/* $NetBSD: bus_dma.c,v 1.40 2000/07/17 04:47:56 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define _ALPHA_BUS_DMA_PRIVATE
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/mbuf.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+
+int _bus_dmamap_load_buffer_direct_common __P((bus_dma_tag_t,
+ bus_dmamap_t, void *, bus_size_t, struct proc *, int,
+ paddr_t *, int *, int));
+
+extern paddr_t avail_start, avail_end; /* from pmap.c */
+
+/*
+ * Common function for DMA map creation. May be called by bus-specific
+ * DMA map creation functions.
+ */
+int
+_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
+ bus_dma_tag_t t;
+ bus_size_t size;
+ int nsegments;
+ bus_size_t maxsegsz;
+ bus_size_t boundary;
+ int flags;
+ bus_dmamap_t *dmamp;
+{
+ struct alpha_bus_dmamap *map;
+ void *mapstore;
+ size_t mapsize;
+
+ /*
+ * Allocate and initialize the DMA map. The end of the map
+ * is a variable-sized array of segments, so we allocate enough
+ * room for them in one shot.
+ *
+ * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
+ * of ALLOCNOW notifes others that we've reserved these resources,
+ * and they are not to be freed.
+ *
+ * The bus_dmamap_t includes one bus_dma_segment_t, hence
+ * the (nsegments - 1).
+ */
+ mapsize = sizeof(struct alpha_bus_dmamap) +
+ (sizeof(bus_dma_segment_t) * (nsegments - 1));
+ if ((mapstore = malloc(mapsize, M_DEVBUF,
+ (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
+ return (ENOMEM);
+
+ bzero(mapstore, mapsize);
+ map = (struct alpha_bus_dmamap *)mapstore;
+ map->_dm_size = size;
+ map->_dm_segcnt = nsegments;
+ map->_dm_maxsegsz = maxsegsz;
+ if (t->_boundary != 0 && t->_boundary < boundary)
+ map->_dm_boundary = t->_boundary;
+ else
+ map->_dm_boundary = boundary;
+ map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
+ map->dm_mapsize = 0; /* no valid mappings */
+ map->dm_nsegs = 0;
+
+ *dmamp = map;
+ return (0);
+}
+
+/*
+ * Common function for DMA map destruction. May be called by bus-specific
+ * DMA map destruction functions.
+ */
+void
+_bus_dmamap_destroy(t, map)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+{
+
+ free(map, M_DEVBUF);
+}
+
+/*
+ * Utility function to load a linear buffer. lastaddrp holds state
+ * between invocations (for multiple-buffer loads). segp contains
+ * the starting segment on entrance, and the ending segment on exit.
+ * first indicates if this is the first invocation of this function.
+ */
+int
+_bus_dmamap_load_buffer_direct_common(t, map, buf, buflen, p, flags,
+ lastaddrp, segp, first)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ void *buf;
+ bus_size_t buflen;
+ struct proc *p;
+ int flags;
+ paddr_t *lastaddrp;
+ int *segp;
+ int first;
+{
+ bus_size_t sgsize;
+ bus_addr_t curaddr, lastaddr, baddr, bmask;
+ vaddr_t vaddr = (vaddr_t)buf;
+ int seg;
+
+ lastaddr = *lastaddrp;
+ bmask = ~(map->_dm_boundary - 1);
+
+ for (seg = *segp; buflen > 0 ; ) {
+ /*
+ * Get the physical address for this segment.
+ */
+ if (p != NULL)
+ curaddr = pmap_extract(p->p_vmspace->vm_map.pmap,
+ vaddr);
+ else
+ curaddr = vtophys(vaddr);
+
+ /*
+ * If we're beyond the current DMA window, indicate
+ * that and try to fall back into SGMAPs.
+ */
+ if (t->_wsize != 0 && curaddr >= t->_wsize)
+ return (EINVAL);
+
+ curaddr |= t->_wbase;
+
+ /*
+ * Compute the segment size, and adjust counts.
+ */
+ sgsize = NBPG - ((u_long)vaddr & PGOFSET);
+ if (buflen < sgsize)
+ sgsize = buflen;
+ if (map->_dm_maxsegsz < sgsize)
+ sgsize = map->_dm_maxsegsz;
+
+ /*
+ * Make sure we don't cross any boundaries.
+ */
+ if (map->_dm_boundary > 0) {
+ baddr = (curaddr + map->_dm_boundary) & bmask;
+ if (sgsize > (baddr - curaddr))
+ sgsize = (baddr - curaddr);
+ }
+
+ /*
+ * Insert chunk into a segment, coalescing with
+ * the previous segment if possible.
+ */
+ if (first) {
+ map->dm_segs[seg].ds_addr = curaddr;
+ map->dm_segs[seg].ds_len = sgsize;
+ first = 0;
+ } else {
+ if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
+ curaddr == lastaddr &&
+ (map->dm_segs[seg].ds_len + sgsize) <=
+ map->_dm_maxsegsz &&
+ (map->_dm_boundary == 0 ||
+ (map->dm_segs[seg].ds_addr & bmask) ==
+ (curaddr & bmask)))
+ map->dm_segs[seg].ds_len += sgsize;
+ else {
+ if (++seg >= map->_dm_segcnt)
+ break;
+ map->dm_segs[seg].ds_addr = curaddr;
+ map->dm_segs[seg].ds_len = sgsize;
+ }
+ }
+
+ lastaddr = curaddr + sgsize;
+ vaddr += sgsize;
+ buflen -= sgsize;
+ }
+
+ *segp = seg;
+ *lastaddrp = lastaddr;
+
+ /*
+ * Did we fit?
+ */
+ if (buflen != 0) {
+ /*
+ * If there is a chained window, we will automatically
+ * fall back to it.
+ */
+ return (EFBIG); /* XXX better return value here? */
+ }
+
+ return (0);
+}
+
+/*
+ * Common function for loading a direct-mapped DMA map with a linear
+ * buffer. Called by bus-specific DMA map load functions with the
+ * OR value appropriate for indicating "direct-mapped" for that
+ * chipset.
+ */
+int
+_bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ void *buf;
+ bus_size_t buflen;
+ struct proc *p;
+ int flags;
+{
+ paddr_t lastaddr;
+ int seg, error;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ if (buflen > map->_dm_size)
+ return (EINVAL);
+
+ seg = 0;
+ error = _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen,
+ p, flags, &lastaddr, &seg, 1);
+ if (error == 0) {
+ map->dm_mapsize = buflen;
+ map->dm_nsegs = seg + 1;
+ } else if (t->_next_window != NULL) {
+ /*
+ * Give the next window a chance.
+ */
+ error = bus_dmamap_load(t->_next_window, map, buf, buflen,
+ p, flags);
+ }
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load_direct_common(), but for mbufs.
+ */
+int
+_bus_dmamap_load_mbuf_direct(t, map, m0, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ struct mbuf *m0;
+ int flags;
+{
+ paddr_t lastaddr;
+ int seg, error, first;
+ struct mbuf *m;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings."
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+#ifdef DIAGNOSTIC
+ if ((m0->m_flags & M_PKTHDR) == 0)
+ panic("_bus_dmamap_load_mbuf_direct_common: no packet header");
+#endif
+
+ if (m0->m_pkthdr.len > map->_dm_size)
+ return (EINVAL);
+
+ first = 1;
+ seg = 0;
+ error = 0;
+ for (m = m0; m != NULL && error == 0; m = m->m_next) {
+ error = _bus_dmamap_load_buffer_direct_common(t, map,
+ m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
+ first = 0;
+ }
+ if (error == 0) {
+ map->dm_mapsize = m0->m_pkthdr.len;
+ map->dm_nsegs = seg + 1;
+ } else if (t->_next_window != NULL) {
+ /*
+ * Give the next window a chance.
+ */
+ error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
+ }
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load_direct_common(), but for uios.
+ */
+int
+_bus_dmamap_load_uio_direct(t, map, uio, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ struct uio *uio;
+ int flags;
+{
+ paddr_t lastaddr;
+ int seg, i, error, first;
+ bus_size_t minlen, resid;
+ struct proc *p = NULL;
+ struct iovec *iov;
+ caddr_t addr;
+
+ /*
+ * Make sure that on error condition we return "no valid mappings."
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ resid = uio->uio_resid;
+ iov = uio->uio_iov;
+
+ if (uio->uio_segflg == UIO_USERSPACE) {
+ p = uio->uio_procp;
+#ifdef DIAGNOSTIC
+ if (p == NULL)
+ panic("_bus_dmamap_load_direct_common: USERSPACE but no proc");
+#endif
+ }
+
+ first = 1;
+ seg = 0;
+ error = 0;
+ for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
+ /*
+ * Now at the first iovec to load. Load each iovec
+ * until we have exhausted the residual count.
+ */
+ minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
+ addr = (caddr_t)iov[i].iov_base;
+
+ error = _bus_dmamap_load_buffer_direct_common(t, map,
+ addr, minlen, p, flags, &lastaddr, &seg, first);
+ first = 0;
+
+ resid -= minlen;
+ }
+ if (error == 0) {
+ map->dm_mapsize = uio->uio_resid;
+ map->dm_nsegs = seg + 1;
+ } else if (t->_next_window != NULL) {
+ /*
+ * Give the next window a chance.
+ */
+ error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
+ }
+ return (error);
+}
+
+/*
+ * Like _bus_dmamap_load_direct_common(), but for raw memory.
+ */
+int
+_bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ bus_size_t size;
+ int flags;
+{
+
+ panic("_bus_dmamap_load_raw_direct: not implemented");
+}
+
+/*
+ * Common function for unloading a DMA map. May be called by
+ * chipset-specific DMA map unload functions.
+ */
+void
+_bus_dmamap_unload(t, map)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+{
+
+ /*
+ * No resources to free; just mark the mappings as
+ * invalid.
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+}
+
+/*
+ * Common function for DMA map synchronization. May be called
+ * by chipset-specific DMA map synchronization functions.
+ */
+void
+_bus_dmamap_sync(t, map, op)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ bus_dmasync_op_t op;
+{
+
+ /*
+ * Flush the store buffer.
+ */
+ alpha_mb();
+}
+
+/*
+ * Common function for DMA-safe memory allocation. May be called
+ * by bus-specific DMA memory allocation functions.
+ */
+int
+_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
+ bus_dma_tag_t t;
+ bus_size_t size, alignment, boundary;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ int *rsegs;
+ int flags;
+{
+
+ return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
+ segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
+}
+
+/*
+ * Allocate physical memory from the given physical address range.
+ * Called by DMA-safe memory allocation methods.
+ */
+int
+_bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
+ flags, low, high)
+ bus_dma_tag_t t;
+ bus_size_t size, alignment, boundary;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ int *rsegs;
+ int flags;
+ paddr_t low;
+ paddr_t high;
+{
+ paddr_t curaddr, lastaddr;
+ vm_page_t m;
+ struct pglist mlist;
+ int curseg, error;
+
+ /* Always round the size. */
+ size = round_page(size);
+
+ high = avail_end - PAGE_SIZE;
+
+ /*
+ * Allocate pages from the VM system.
+ */
+ TAILQ_INIT(&mlist);
+ error = uvm_pglistalloc(size, low, high, alignment, boundary,
+ &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
+ if (error)
+ return (error);
+
+ /*
+ * Compute the location, size, and number of segments actually
+ * returned by the VM code.
+ */
+ m = mlist.tqh_first;
+ curseg = 0;
+ lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
+ segs[curseg].ds_len = PAGE_SIZE;
+ m = m->pageq.tqe_next;
+
+ for (; m != NULL; m = m->pageq.tqe_next) {
+ curaddr = VM_PAGE_TO_PHYS(m);
+#ifdef DIAGNOSTIC
+ if (curaddr < avail_start || curaddr >= high) {
+ printf("vm_page_alloc_memory returned non-sensical"
+ " address 0x%lx\n", curaddr);
+ panic("_bus_dmamem_alloc");
+ }
+#endif
+ if (curaddr == (lastaddr + PAGE_SIZE))
+ segs[curseg].ds_len += PAGE_SIZE;
+ else {
+ curseg++;
+ segs[curseg].ds_addr = curaddr;
+ segs[curseg].ds_len = PAGE_SIZE;
+ }
+ lastaddr = curaddr;
+ }
+
+ *rsegs = curseg + 1;
+
+ return (0);
+}
+
+/*
+ * Common function for freeing DMA-safe memory. May be called by
+ * bus-specific DMA memory free functions.
+ */
+void
+_bus_dmamem_free(t, segs, nsegs)
+ bus_dma_tag_t t;
+ bus_dma_segment_t *segs;
+ int nsegs;
+{
+ vm_page_t m;
+ bus_addr_t addr;
+ struct pglist mlist;
+ int curseg;
+
+ /*
+ * Build a list of pages to free back to the VM system.
+ */
+ TAILQ_INIT(&mlist);
+ for (curseg = 0; curseg < nsegs; curseg++) {
+ for (addr = segs[curseg].ds_addr;
+ addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+ addr += PAGE_SIZE) {
+ m = PHYS_TO_VM_PAGE(addr);
+ TAILQ_INSERT_TAIL(&mlist, m, pageq);
+ }
+ }
+
+ uvm_pglistfree(&mlist);
+}
+
+/*
+ * Common function for mapping DMA-safe memory. May be called by
+ * bus-specific DMA memory map functions.
+ */
+int
+_bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
+ bus_dma_tag_t t;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ size_t size;
+ caddr_t *kvap;
+ int flags;
+{
+ vaddr_t va;
+ bus_addr_t addr;
+ int curseg;
+
+ /*
+ * If we're only mapping 1 segment, use K0SEG, to avoid
+ * TLB thrashing.
+ */
+ if (nsegs == 1) {
+ *kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
+ return (0);
+ }
+
+ size = round_page(size);
+
+ va = uvm_km_valloc(kernel_map, size);
+
+ if (va == 0)
+ return (ENOMEM);
+
+ *kvap = (caddr_t)va;
+
+ for (curseg = 0; curseg < nsegs; curseg++) {
+ for (addr = segs[curseg].ds_addr;
+ addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
+ addr += NBPG, va += NBPG, size -= NBPG) {
+ if (size == 0)
+ panic("_bus_dmamem_map: size botch");
+ pmap_enter(pmap_kernel(), va, addr,
+ VM_PROT_READ | VM_PROT_WRITE, 1,
+ VM_PROT_READ | VM_PROT_WRITE);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Common function for unmapping DMA-safe memory. May be called by
+ * bus-specific DMA memory unmapping functions.
+ */
+void
+_bus_dmamem_unmap(t, kva, size)
+ bus_dma_tag_t t;
+ caddr_t kva;
+ size_t size;
+{
+
+#ifdef DIAGNOSTIC
+ if ((u_long)kva & PGOFSET)
+ panic("_bus_dmamem_unmap");
+#endif
+
+ /*
+ * Nothing to do if we mapped it with K0SEG.
+ */
+ if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
+ kva <= (caddr_t)ALPHA_K0SEG_END)
+ return;
+
+ size = round_page(size);
+ uvm_km_free(kernel_map, (vaddr_t)kva, size);
+}
+
+/*
+ * Common functin for mmap(2)'ing DMA-safe memory. May be called by
+ * bus-specific DMA mmap(2)'ing functions.
+ */
+paddr_t
+_bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
+ bus_dma_tag_t t;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ off_t off;
+ int prot, flags;
+{
+ int i;
+
+ for (i = 0; i < nsegs; i++) {
+#ifdef DIAGNOSTIC
+ if (off & PGOFSET)
+ panic("_bus_dmamem_mmap: offset unaligned");
+ if (segs[i].ds_addr & PGOFSET)
+ panic("_bus_dmamem_mmap: segment unaligned");
+ if (segs[i].ds_len & PGOFSET)
+ panic("_bus_dmamem_mmap: segment size not multiple"
+ " of page size");
+#endif
+ if (off >= segs[i].ds_len) {
+ off -= segs[i].ds_len;
+ continue;
+ }
+
+ return (alpha_btop((caddr_t)segs[i].ds_addr + off));
+ }
+
+ /* Page not found. */
+ return (-1);
+}
diff --git a/sys/arch/alpha/common/sgmap_common.c b/sys/arch/alpha/common/sgmap_common.c
new file mode 100644
index 00000000000..b3de09bb579
--- /dev/null
+++ b/sys/arch/alpha/common/sgmap_common.c
@@ -0,0 +1,224 @@
+/* $NetBSD: sgmap_common.c,v 1.13 2000/06/29 09:02:57 mrg Exp $ */
+
+/*-
+ * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+
+#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/bus.h>
+
+#include <alpha/common/sgmapvar.h>
+
+/*
+ * Some systems will prefetch the next page during a memory -> device DMA.
+ * This can cause machine checks if there is not a spill page after the
+ * last page of the DMA (thus avoiding hitting an invalid SGMAP PTE).
+ */
+vaddr_t alpha_sgmap_prefetch_spill_page_va;
+bus_addr_t alpha_sgmap_prefetch_spill_page_pa;
+
+void
+alpha_sgmap_init(t, sgmap, name, wbase, sgvabase, sgvasize, ptesize, ptva,
+ minptalign)
+ bus_dma_tag_t t;
+ struct alpha_sgmap *sgmap;
+ const char *name;
+ bus_addr_t wbase;
+ bus_addr_t sgvabase;
+ bus_size_t sgvasize;
+ size_t ptesize;
+ void *ptva;
+ bus_size_t minptalign;
+{
+ bus_dma_segment_t seg;
+ size_t ptsize;
+ int rseg;
+
+ if (sgvasize & PGOFSET) {
+ printf("size botch for sgmap `%s'\n", name);
+ goto die;
+ }
+
+ sgmap->aps_wbase = wbase;
+ sgmap->aps_sgvabase = sgvabase;
+ sgmap->aps_sgvasize = sgvasize;
+
+ if (ptva != NULL) {
+ /*
+ * We already have a page table; this may be a system
+ * where the page table resides in bridge-resident SRAM.
+ */
+ sgmap->aps_pt = ptva;
+ sgmap->aps_ptpa = 0;
+ } else {
+ /*
+ * Compute the page table size and allocate it. At minimum,
+ * this must be aligned to the page table size. However,
+ * some platforms have more strict alignment reqirements.
+ */
+ ptsize = (sgvasize / NBPG) * ptesize;
+ if (minptalign != 0) {
+ if (minptalign < ptsize)
+ minptalign = ptsize;
+ } else
+ minptalign = ptsize;
+ if (bus_dmamem_alloc(t, ptsize, minptalign, 0, &seg, 1, &rseg,
+ BUS_DMA_NOWAIT)) {
+ panic("unable to allocate page table for sgmap `%s'\n",
+ name);
+ goto die;
+ }
+ sgmap->aps_ptpa = seg.ds_addr;
+ sgmap->aps_pt = (caddr_t)ALPHA_PHYS_TO_K0SEG(sgmap->aps_ptpa);
+ }
+
+ /*
+ * Create the extent map used to manage the virtual address
+ * space.
+ */
+ sgmap->aps_ex = extent_create((char *)name, sgvabase, sgvasize - 1,
+ M_DEVBUF, NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
+ if (sgmap->aps_ex == NULL) {
+ printf("unable to create extent map for sgmap `%s'\n",
+ name);
+ goto die;
+ }
+
+ /*
+ * Allocate a spill page if that hasn't already been done.
+ */
+ if (alpha_sgmap_prefetch_spill_page_va == 0) {
+ if (bus_dmamem_alloc(t, NBPG, 0, 0, &seg, 1, &rseg,
+ BUS_DMA_NOWAIT)) {
+ printf("unable to allocate spill page for sgmap `%s'\n",
+ name);
+ goto die;
+ }
+ alpha_sgmap_prefetch_spill_page_pa = seg.ds_addr;
+ alpha_sgmap_prefetch_spill_page_va =
+ ALPHA_PHYS_TO_K0SEG(alpha_sgmap_prefetch_spill_page_pa);
+ bzero((caddr_t)alpha_sgmap_prefetch_spill_page_va, NBPG);
+ }
+
+ return;
+ die:
+ panic("alpha_sgmap_init");
+}
+
+int
+alpha_sgmap_alloc(map, origlen, sgmap, flags)
+ bus_dmamap_t map;
+ bus_size_t origlen;
+ struct alpha_sgmap *sgmap;
+ int flags;
+{
+ int error;
+ bus_size_t len = origlen, boundary, alignment;
+
+#ifdef DIAGNOSTIC
+ if (map->_dm_flags & DMAMAP_HAS_SGMAP)
+ panic("alpha_sgmap_alloc: already have sgva space");
+#endif
+ /*
+ * Add a range for spill page.
+ */
+ len += NBPG;
+
+ /*
+ * And add an additional amount in case of ALLOCNOW.
+ */
+ if (flags & BUS_DMA_ALLOCNOW)
+ len += NBPG;
+
+ map->_dm_sgvalen = round_page(len);
+
+ /*
+ * ARGH! If the addition of spill pages bumped us over our
+ * boundary, we have to 2x the boundary limit.
+ */
+ boundary = map->_dm_boundary;
+ if (boundary && boundary < map->_dm_sgvalen) {
+ alignment = boundary;
+ do {
+ boundary <<= 1;
+ } while (boundary < map->_dm_sgvalen);
+ } else
+ alignment = NBPG;
+#if 0
+ printf("len %x -> %x, _dm_sgvalen %x _dm_boundary %x boundary %x -> ",
+ origlen, len, map->_dm_sgvalen, map->_dm_boundary, boundary);
+#endif
+
+ error = extent_alloc(sgmap->aps_ex, map->_dm_sgvalen, alignment,
+ boundary, (flags & BUS_DMA_NOWAIT) ? EX_NOWAIT : EX_WAITOK,
+ &map->_dm_sgva);
+#if 0
+ printf("error %d _dm_sgva %x\n", error, map->_dm_sgva);
+#endif
+
+ if (error == 0)
+ map->_dm_flags |= DMAMAP_HAS_SGMAP;
+ else
+ map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
+
+ return (error);
+}
+
+void
+alpha_sgmap_free(map, sgmap)
+ bus_dmamap_t map;
+ struct alpha_sgmap *sgmap;
+{
+
+#ifdef DIAGNOSTIC
+ if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0)
+ panic("alpha_sgmap_free: no sgva space to free");
+#endif
+
+ if (extent_free(sgmap->aps_ex, map->_dm_sgva, map->_dm_sgvalen,
+ EX_NOWAIT))
+ panic("alpha_sgmap_free");
+
+ map->_dm_flags &= ~DMAMAP_HAS_SGMAP;
+}
diff --git a/sys/arch/alpha/common/sgmap_typedep.c b/sys/arch/alpha/common/sgmap_typedep.c
new file mode 100644
index 00000000000..e5d26b4a103
--- /dev/null
+++ b/sys/arch/alpha/common/sgmap_typedep.c
@@ -0,0 +1,330 @@
+/* $NetBSD: sgmap_typedep.c,v 1.13 1999/07/08 18:05:23 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef SGMAP_LOG
+
+#ifndef SGMAP_LOGSIZE
+#define SGMAP_LOGSIZE 4096
+#endif
+
+struct sgmap_log_entry __C(SGMAP_TYPE,_log)[SGMAP_LOGSIZE];
+int __C(SGMAP_TYPE,_log_next);
+int __C(SGMAP_TYPE,_log_last);
+u_long __C(SGMAP_TYPE,_log_loads);
+u_long __C(SGMAP_TYPE,_log_unloads);
+
+#endif /* SGMAP_LOG */
+
+#ifdef SGMAP_DEBUG
+int __C(SGMAP_TYPE,_debug) = 0;
+#endif
+
+SGMAP_PTE_TYPE __C(SGMAP_TYPE,_prefetch_spill_page_pte);
+
+void
+__C(SGMAP_TYPE,_init_spill_page_pte)()
+{
+
+ __C(SGMAP_TYPE,_prefetch_spill_page_pte) =
+ (alpha_sgmap_prefetch_spill_page_pa >>
+ SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
+}
+
+int
+__C(SGMAP_TYPE,_load)(t, map, buf, buflen, p, flags, sgmap)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ void *buf;
+ bus_size_t buflen;
+ struct proc *p;
+ int flags;
+ struct alpha_sgmap *sgmap;
+{
+ vaddr_t endva, va = (vaddr_t)buf;
+ paddr_t pa;
+ bus_addr_t dmaoffset;
+ bus_size_t dmalen;
+ SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
+ int pteidx, error;
+#ifdef SGMAP_LOG
+ struct sgmap_log_entry sl;
+#endif
+
+ /*
+ * Initialize the spill page PTE if that hasn't already been done.
+ */
+ if (__C(SGMAP_TYPE,_prefetch_spill_page_pte) == 0)
+ __C(SGMAP_TYPE,_init_spill_page_pte)();
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+
+ if (buflen > map->_dm_size)
+ return (EINVAL);
+
+ /*
+ * Remember the offset into the first page and the total
+ * transfer length.
+ */
+ dmaoffset = ((u_long)buf) & PGOFSET;
+ dmalen = buflen;
+
+#ifdef SGMAP_DEBUG
+ if (__C(SGMAP_TYPE,_debug)) {
+ printf("sgmap_load: ----- buf = %p -----\n", buf);
+ printf("sgmap_load: dmaoffset = 0x%lx, dmalen = 0x%lx\n",
+ dmaoffset, dmalen);
+ }
+#endif
+
+#ifdef SGMAP_LOG
+ if (panicstr == NULL) {
+ sl.sl_op = 1;
+ sl.sl_sgmap = sgmap;
+ sl.sl_origbuf = buf;
+ sl.sl_pgoffset = dmaoffset;
+ sl.sl_origlen = dmalen;
+ }
+#endif
+
+ /*
+ * Allocate the necessary virtual address space for the
+ * mapping. Round the size, since we deal with whole pages.
+ *
+ * alpha_sgmap_alloc will deal with the appropriate spill page
+ * allocations.
+ *
+ */
+ endva = round_page(va + buflen);
+ va = trunc_page(va);
+ if ((map->_dm_flags & DMAMAP_HAS_SGMAP) == 0) {
+ error = alpha_sgmap_alloc(map, (endva - va), sgmap, flags);
+ if (error)
+ return (error);
+ }
+
+ pteidx = map->_dm_sgva >> PGSHIFT;
+ pte = &page_table[pteidx * SGMAP_PTE_SPACING];
+
+#ifdef SGMAP_DEBUG
+ if (__C(SGMAP_TYPE,_debug))
+ printf("sgmap_load: sgva = 0x%lx, pteidx = %d, "
+ "pte = %p (pt = %p)\n", map->_dm_sgva, pteidx, pte,
+ page_table);
+#endif
+
+ /*
+ * Generate the DMA address.
+ */
+ map->dm_segs[0].ds_addr = sgmap->aps_wbase |
+ (pteidx << SGMAP_ADDR_PTEIDX_SHIFT) | dmaoffset;
+ map->dm_segs[0].ds_len = dmalen;
+
+#ifdef SGMAP_LOG
+ if (panicstr == NULL) {
+ sl.sl_sgva = map->_dm_sgva;
+ sl.sl_dmaaddr = map->dm_segs[0].ds_addr;
+ }
+#endif
+
+#ifdef SGMAP_DEBUG
+ if (__C(SGMAP_TYPE,_debug))
+ printf("sgmap_load: wbase = 0x%lx, vpage = 0x%x, "
+ "dma addr = 0x%lx\n", sgmap->aps_wbase,
+ (pteidx << SGMAP_ADDR_PTEIDX_SHIFT),
+ map->dm_segs[0].ds_addr);
+#endif
+
+ map->_dm_pteidx = pteidx;
+ map->_dm_ptecnt = 0;
+
+ for (; va < endva; va += NBPG, pteidx++,
+ pte = &page_table[pteidx * SGMAP_PTE_SPACING],
+ map->_dm_ptecnt++) {
+ /*
+ * Get the physical address for this segment.
+ */
+ if (p != NULL)
+ pa = pmap_extract(p->p_vmspace->vm_map.pmap, va);
+ else
+ pa = vtophys(va);
+
+ /*
+ * Load the current PTE with this page.
+ */
+ *pte = (pa >> SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
+#ifdef SGMAP_DEBUG
+ if (__C(SGMAP_TYPE,_debug))
+ printf("sgmap_load: pa = 0x%lx, pte = %p, "
+ "*pte = 0x%lx\n", pa, pte, (u_long)(*pte));
+#endif
+ }
+
+ /*
+ * ...and the prefetch-spill page.
+ */
+ *pte = __C(SGMAP_TYPE,_prefetch_spill_page_pte);
+ map->_dm_ptecnt++;
+#ifdef SGMAP_DEBUG
+ if (__C(SGMAP_TYPE,_debug)) {
+ printf("sgmap_load: spill page, pte = %p, *pte = 0x%lx\n",
+ pte, *pte);
+ printf("sgmap_load: pte count = %d\n", map->_dm_ptecnt);
+ }
+#endif
+
+ alpha_mb();
+
+#ifdef SGMAP_LOG
+ if (panicstr == NULL) {
+ sl.sl_ptecnt = map->_dm_ptecnt;
+ bcopy(&sl, &__C(SGMAP_TYPE,_log)[__C(SGMAP_TYPE,_log_next)],
+ sizeof(sl));
+ __C(SGMAP_TYPE,_log_last) = __C(SGMAP_TYPE,_log_next);
+ if (++__C(SGMAP_TYPE,_log_next) == SGMAP_LOGSIZE)
+ __C(SGMAP_TYPE,_log_next) = 0;
+ __C(SGMAP_TYPE,_log_loads)++;
+ }
+#endif
+
+#if defined(SGMAP_DEBUG) && defined(DDB)
+ if (__C(SGMAP_TYPE,_debug) > 1)
+ Debugger();
+#endif
+ map->dm_mapsize = buflen;
+ map->dm_nsegs = 1;
+ return (0);
+}
+
+int
+__C(SGMAP_TYPE,_load_mbuf)(t, map, m, flags, sgmap)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ struct mbuf *m;
+ int flags;
+ struct alpha_sgmap *sgmap;
+{
+
+ panic(__S(__C(SGMAP_TYPE,_load_mbuf)) ": not implemented");
+}
+
+int
+__C(SGMAP_TYPE,_load_uio)(t, map, uio, flags, sgmap)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ struct uio *uio;
+ int flags;
+ struct alpha_sgmap *sgmap;
+{
+
+ panic(__S(__C(SGMAP_TYPE,_load_uio)) ": not implemented");
+}
+
+int
+__C(SGMAP_TYPE,_load_raw)(t, map, segs, nsegs, size, flags, sgmap)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ bus_size_t size;
+ int flags;
+ struct alpha_sgmap *sgmap;
+{
+
+ panic(__S(__C(SGMAP_TYPE,_load_raw)) ": not implemented");
+}
+
+void
+__C(SGMAP_TYPE,_unload)(t, map, sgmap)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ struct alpha_sgmap *sgmap;
+{
+ SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
+ int ptecnt, pteidx;
+#ifdef SGMAP_LOG
+ struct sgmap_log_entry *sl;
+
+ if (panicstr == NULL) {
+ sl = &__C(SGMAP_TYPE,_log)[__C(SGMAP_TYPE,_log_next)];
+
+ bzero(sl, sizeof(*sl));
+ sl->sl_op = 0;
+ sl->sl_sgmap = sgmap;
+ sl->sl_sgva = map->_dm_sgva;
+ sl->sl_dmaaddr = map->dm_segs[0].ds_addr;
+
+ __C(SGMAP_TYPE,_log_last) = __C(SGMAP_TYPE,_log_next);
+ if (++__C(SGMAP_TYPE,_log_next) == SGMAP_LOGSIZE)
+ __C(SGMAP_TYPE,_log_next) = 0;
+ __C(SGMAP_TYPE,_log_unloads)++;
+ }
+#endif
+
+ /*
+ * Invalidate the PTEs for the mapping.
+ */
+ for (ptecnt = map->_dm_ptecnt, pteidx = map->_dm_pteidx,
+ pte = &page_table[pteidx * SGMAP_PTE_SPACING];
+ ptecnt != 0;
+ ptecnt--, pteidx++,
+ pte = &page_table[pteidx * SGMAP_PTE_SPACING]) {
+#ifdef SGMAP_DEBUG
+ if (__C(SGMAP_TYPE,_debug))
+ printf("sgmap_unload: pte = %p, *pte = 0x%lx\n",
+ pte, (u_long)(*pte));
+#endif
+ *pte = 0;
+ }
+
+ /*
+ * Free the virtual address space used by the mapping
+ * if necessary.
+ */
+ if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
+ alpha_sgmap_free(map, sgmap);
+ /*
+ * Mark the mapping invalid.
+ */
+ map->dm_mapsize = 0;
+ map->dm_nsegs = 0;
+}
diff --git a/sys/arch/alpha/common/sgmap_typedep.h b/sys/arch/alpha/common/sgmap_typedep.h
new file mode 100644
index 00000000000..32c05a08c9a
--- /dev/null
+++ b/sys/arch/alpha/common/sgmap_typedep.h
@@ -0,0 +1,58 @@
+/* $NetBSD: sgmap_typedep.h,v 1.4 1998/06/04 01:22:52 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#undef __C
+#undef __S
+
+#define __C(A,B) __CONCAT(A,B)
+#define __S(S) __STRING(S)
+
+extern SGMAP_PTE_TYPE __C(SGMAP_TYPE,_prefetch_spill_page_pte);
+
+void __C(SGMAP_TYPE,_init_spill_page_pte) __P((void));
+int __C(SGMAP_TYPE,_load) __P((bus_dma_tag_t, bus_dmamap_t,
+ void *, bus_size_t, struct proc *, int, struct alpha_sgmap *));
+int __C(SGMAP_TYPE,_load_mbuf) __P((bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int, struct alpha_sgmap *));
+int __C(SGMAP_TYPE,_load_uio) __P((bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int, struct alpha_sgmap *));
+int __C(SGMAP_TYPE,_load_raw) __P((bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int, struct alpha_sgmap *));
+void __C(SGMAP_TYPE,_unload) __P((bus_dma_tag_t, bus_dmamap_t,
+ struct alpha_sgmap *));
diff --git a/sys/arch/alpha/common/sgmapvar.h b/sys/arch/alpha/common/sgmapvar.h
new file mode 100644
index 00000000000..e4abefd5a75
--- /dev/null
+++ b/sys/arch/alpha/common/sgmapvar.h
@@ -0,0 +1,95 @@
+/* $NetBSD: sgmapvar.h,v 1.10 1998/08/14 16:50:02 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ALPHA_COMMON_SGMAPVAR_H
+#define _ALPHA_COMMON_SGMAPVAR_H
+
+#include <sys/extent.h>
+#include <machine/bus.h>
+
+/*
+ * Bits n:13 of the DMA address are the index of the PTE into
+ * the SGMAP page table.
+ */
+#define SGMAP_ADDR_PTEIDX_SHIFT 13
+
+/*
+ * An Alpha SGMAP's state information. Nothing in the sgmap requires
+ * locking[*], with the exception of the extent map. Locking of the
+ * extent map is handled within the extent manager itself.
+ *
+ * [*] While the page table is a `global' resource, access to it is
+ * controlled by the extent map; once a region has been allocated from
+ * the map, that region is effectively `locked'.
+ */
+struct alpha_sgmap {
+ struct extent *aps_ex; /* extent map to manage sgva space */
+ void *aps_pt; /* page table */
+ bus_addr_t aps_ptpa; /* page table physical address */
+ bus_addr_t aps_sgvabase; /* base of the sgva space */
+ bus_size_t aps_sgvasize; /* size of the sgva space */
+ bus_addr_t aps_wbase; /* base of the dma window */
+};
+
+/*
+ * Log entry, used for debugging SGMAPs.
+ */
+struct sgmap_log_entry {
+ int sl_op; /* op; 1 = load, 0 = unload */
+ struct alpha_sgmap *sl_sgmap; /* sgmap for entry */
+ void *sl_origbuf; /* original buffer */
+ u_long sl_pgoffset; /* page offset of buffer start */
+ u_long sl_origlen; /* length of transfer */
+ u_long sl_sgva; /* sgva of transfer */
+ u_long sl_dmaaddr; /* dma address */
+ int sl_ptecnt; /* pte count */
+};
+
+extern vaddr_t alpha_sgmap_prefetch_spill_page_va;
+extern bus_addr_t alpha_sgmap_prefetch_spill_page_pa;
+
+void alpha_sgmap_init __P((bus_dma_tag_t, struct alpha_sgmap *,
+ const char *, bus_addr_t, bus_addr_t, bus_size_t, size_t, void *,
+ bus_size_t));
+
+int alpha_sgmap_alloc __P((bus_dmamap_t, bus_size_t,
+ struct alpha_sgmap *, int));
+void alpha_sgmap_free __P((bus_dmamap_t, struct alpha_sgmap *));
+
+#endif /* _ALPHA_COMMON_SGMAPVAR_H */
diff --git a/sys/arch/alpha/conf/GENERIC b/sys/arch/alpha/conf/GENERIC
index 284da0d7f99..f46cde04b09 100644
--- a/sys/arch/alpha/conf/GENERIC
+++ b/sys/arch/alpha/conf/GENERIC
@@ -1,4 +1,4 @@
-# $OpenBSD: GENERIC,v 1.53 2000/09/09 01:46:15 ericj Exp $
+# $OpenBSD: GENERIC,v 1.54 2000/11/08 16:01:07 art Exp $
# $NetBSD: GENERIC,v 1.31 1996/12/03 17:25:29 cgd Exp $
#
# Generic Alpha kernel. Enough to get booted, etc., but not much more.
@@ -8,6 +8,8 @@ machine alpha
include "../../../conf/GENERIC"
maxusers 8
+option PMAP_NEW
+option UVM
# CPU Support
option DEC_3000_500 # Flamingo etc: 3000/[4-9]00*
option DEC_3000_300 # Pelican etc: 3000/300*
diff --git a/sys/arch/alpha/conf/files.alpha b/sys/arch/alpha/conf/files.alpha
index 06379c28723..0184c6a13c4 100644
--- a/sys/arch/alpha/conf/files.alpha
+++ b/sys/arch/alpha/conf/files.alpha
@@ -1,4 +1,4 @@
-# $OpenBSD: files.alpha,v 1.34 2000/08/12 20:11:32 ericj Exp $
+# $OpenBSD: files.alpha,v 1.35 2000/11/08 16:01:07 art Exp $
# $NetBSD: files.alpha,v 1.32 1996/11/25 04:03:21 cgd Exp $
#
# alpha-specific configuration info
@@ -13,6 +13,9 @@ define alpha_shared_intr
file arch/alpha/dev/shared_intr.c alpha_shared_intr | dec_eb164 |
dec_kn20aa
+define alpha_sgmap
+file arch/alpha/common/sgmap_common.c alpha_sgmap | dec_3000_500
+
#
# Bus-independent devices
#
@@ -132,7 +135,6 @@ major { wd = 0 }
#device jeisa at ibus: eisabus
file arch/alpha/isa/isa_machdep.c isa
-file arch/alpha/pci/pciide_machdep.c pci
device pckbc { }
attach pckbc at isa
@@ -168,6 +170,11 @@ include "dev/eisa/files.eisa"
# include "dev/pci/files.pci" XXX SEE ABOVE
+file arch/alpha/pci/pciide_machdep.c pci
+
+define alpha_pci_sgmap_pte64
+file arch/alpha/pci/pci_sgmap_pte64.c alpha_pci_sgmap_pte64
+
device apecs: pcibus
attach apecs at mainbus
file arch/alpha/pci/apecs.c apecs
@@ -182,19 +189,23 @@ file arch/alpha/pci/lca_bus_io.c lca
file arch/alpha/pci/lca_bus_mem.c lca
file arch/alpha/pci/lca_pci.c lca
-device cia: pcibus
+device cia: pcibus, alpha_sgmap, alpha_pci_sgmap_pte64
attach cia at mainbus
file arch/alpha/pci/cia.c cia
+file arch/alpha/pci/cia_dma.c cia
+file arch/alpha/pci/cia_pci.c cia
file arch/alpha/pci/cia_bus_io.c cia
file arch/alpha/pci/cia_bus_mem.c cia
-file arch/alpha/pci/cia_pci.c cia
+file arch/alpha/pci/cia_bwx_bus_io.c cia
+file arch/alpha/pci/cia_bwx_bus_mem.c cia
+
file arch/alpha/pci/pci_2100_a50.c dec_2100_a50
file arch/alpha/pci/pci_axppci_33.c dec_axppci_33
file arch/alpha/pci/pci_eb164.c dec_eb164
file arch/alpha/pci/pci_eb164_intr.s dec_eb164
file arch/alpha/pci/pci_kn20aa.c dec_kn20aa
-
+file arch/alpha/pci/pci_550.c dec_550
#
# PCI Bus devices
@@ -243,8 +254,7 @@ file arch/alpha/alpha/interrupt.c
file arch/alpha/alpha/machdep.c
file arch/alpha/alpha/mainbus.c
file arch/alpha/alpha/mem.c
-file arch/alpha/alpha/pmap.c new_pmap
-file arch/alpha/alpha/pmap.old.c !new_pmap
+file arch/alpha/alpha/pmap.c
file arch/alpha/alpha/process_machdep.c
file arch/alpha/alpha/prom.c
file arch/alpha/alpha/support.c
@@ -252,6 +262,7 @@ file arch/alpha/alpha/sys_machdep.c
file arch/alpha/alpha/trap.c
file arch/alpha/alpha/vm_machdep.c
file arch/alpha/alpha/disksubr.c
+file arch/alpha/common/bus_dma.c
file dev/cons.c
@@ -275,6 +286,7 @@ file arch/alpha/alpha/dec_3000_500.c dec_3000_500 needs-flag
file arch/alpha/alpha/dec_axppci_33.c dec_axppci_33 needs-flag
file arch/alpha/alpha/dec_eb164.c dec_eb164 needs-flag
file arch/alpha/alpha/dec_kn20aa.c dec_kn20aa needs-flag
+file arch/alpha/alpha/dec_550.c dec_550 needs-flag
# OSF/1 Binary Compatibility (COMPAT_OSF1)
include "compat/osf1/files.osf1"
diff --git a/sys/arch/alpha/dev/shared_intr.c b/sys/arch/alpha/dev/shared_intr.c
index 6533387f66f..624e1b68a3a 100644
--- a/sys/arch/alpha/dev/shared_intr.c
+++ b/sys/arch/alpha/dev/shared_intr.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: shared_intr.c,v 1.7 1999/02/08 18:14:11 millert Exp $ */
-/* $NetBSD: shared_intr.c,v 1.1 1996/11/17 02:03:08 cgd Exp $ */
+/* $NetBSD: shared_intr.c,v 1.13 2000/03/19 01:46:18 thorpej Exp $ */
/*
* Copyright (c) 1996 Carnegie-Mellon University.
@@ -33,6 +32,7 @@
*/
#include <sys/param.h>
+#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/syslog.h>
@@ -40,10 +40,10 @@
#include <machine/intr.h>
-extern int cold;
-
static const char *intr_typename __P((int));
+extern int cold;
+
static const char *
intr_typename(type)
int type;
@@ -82,6 +82,7 @@ alpha_shared_intr_alloc(n)
intr[i].intr_dfltsharetype = IST_NONE;
intr[i].intr_nstrays = 0;
intr[i].intr_maxstrays = 5;
+ intr[i].intr_private = NULL;
}
return (intr);
@@ -115,38 +116,6 @@ alpha_shared_intr_dispatch(intr, num)
return (handled);
}
-/*
- * Just check to see if an IRQ is available/can be shared.
- * 0 = interrupt not available
- * 1 = interrupt shareable
- * 2 = interrupt all to ourself
- */
-int
-alpha_shared_intr_check(intr, num, type)
- struct alpha_shared_intr *intr;
- unsigned int num;
- int type;
-{
-
- switch (intr[num].intr_sharetype) {
- case IST_UNUSABLE:
- return (0);
- break;
- case IST_NONE:
- return (2);
- break;
- case IST_LEVEL:
- if (type == intr[num].intr_sharetype)
- break;
- case IST_EDGE:
- case IST_PULSE:
- if ((type != IST_NONE) && (intr[num].intr_q.tqh_first != NULL))
- return (0);
- }
-
- return (1);
-}
-
void *
alpha_shared_intr_establish(intr, num, type, level, fn, arg, basename)
struct alpha_shared_intr *intr;
@@ -199,9 +168,11 @@ alpha_shared_intr_establish(intr, num, type, level, fn, arg, basename)
break;
}
+ ih->ih_intrhead = intr;
ih->ih_fn = fn;
ih->ih_arg = arg;
ih->ih_level = level;
+ ih->ih_num = num;
intr[num].intr_sharetype = type;
TAILQ_INSERT_TAIL(&intr[num].intr_q, ih, ih_q);
@@ -209,6 +180,22 @@ alpha_shared_intr_establish(intr, num, type, level, fn, arg, basename)
return (ih);
}
+void
+alpha_shared_intr_disestablish(intr, cookie, basename)
+ struct alpha_shared_intr *intr;
+ void *cookie;
+ const char *basename;
+{
+ struct alpha_shared_intrhand *ih = cookie;
+ unsigned int num = ih->ih_num;
+
+ /*
+ * Just remove it from the list and free the entry. We let
+ * the caller deal with resetting the share type, if appropriate.
+ */
+ TAILQ_REMOVE(&intr[num].intr_q, ih, ih_q);
+}
+
int
alpha_shared_intr_get_sharetype(intr, num)
struct alpha_shared_intr *intr;
@@ -249,14 +236,10 @@ alpha_shared_intr_set_maxstrays(intr, num, newmaxstrays)
unsigned int num;
int newmaxstrays;
{
-
-#ifdef DIAGNOSTIC
- if (alpha_shared_intr_isactive(intr, num))
- panic("alpha_shared_intr_set_maxstrays on active intr");
-#endif
-
+ int s = splhigh();
intr[num].intr_maxstrays = newmaxstrays;
intr[num].intr_nstrays = 0;
+ splx(s);
}
void
@@ -276,3 +259,22 @@ alpha_shared_intr_stray(intr, num, basename)
intr[num].intr_nstrays >= intr[num].intr_maxstrays ?
"; stopped logging" : "");
}
+
+void
+alpha_shared_intr_set_private(intr, num, v)
+ struct alpha_shared_intr *intr;
+ unsigned int num;
+ void *v;
+{
+
+ intr[num].intr_private = v;
+}
+
+void *
+alpha_shared_intr_get_private(intr, num)
+ struct alpha_shared_intr *intr;
+ unsigned int num;
+{
+
+ return (intr[num].intr_private);
+}
diff --git a/sys/arch/alpha/include/alpha.h b/sys/arch/alpha/include/alpha.h
new file mode 100644
index 00000000000..69b79bb3561
--- /dev/null
+++ b/sys/arch/alpha/include/alpha.h
@@ -0,0 +1,112 @@
+/* $NetBSD: alpha.h,v 1.11 2000/08/15 22:16:18 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1988 University of Utah.
+ * Copyright (c) 1982, 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: Utah $Hdr: cpu.h 1.16 91/03/25$
+ *
+ * @(#)cpu.h 8.4 (Berkeley) 1/5/94
+ */
+
+#ifndef _ALPHA_H_
+#define _ALPHA_H_
+#ifdef _KERNEL
+
+#include <machine/bus.h>
+
+struct pcb;
+struct proc;
+struct reg;
+struct rpb;
+struct trapframe;
+
+extern int bootdev_debug;
+
+void XentArith(u_int64_t, u_int64_t, u_int64_t); /* MAGIC */
+void XentIF(u_int64_t, u_int64_t, u_int64_t); /* MAGIC */
+void XentInt(u_int64_t, u_int64_t, u_int64_t); /* MAGIC */
+void XentMM(u_int64_t, u_int64_t, u_int64_t); /* MAGIC */
+void XentRestart(void); /* MAGIC */
+void XentSys(u_int64_t, u_int64_t, u_int64_t); /* MAGIC */
+void XentUna(u_int64_t, u_int64_t, u_int64_t); /* MAGIC */
+void alpha_init(u_long, u_long, u_long, u_long, u_long);
+int alpha_pa_access(u_long);
+void ast(struct trapframe *);
+int badaddr(void *, size_t);
+int badaddr_read(void *, size_t, void *);
+void child_return(void *);
+u_int64_t console_restart(struct trapframe *);
+void do_sir(void);
+void dumpconf(void);
+void exception_return(void); /* MAGIC */
+void frametoreg(struct trapframe *, struct reg *);
+long fswintrberr(void); /* MAGIC */
+void init_bootstrap_console(void);
+void init_prom_interface(struct rpb *);
+void interrupt(unsigned long, unsigned long, unsigned long,
+ struct trapframe *);
+void machine_check(unsigned long, struct trapframe *, unsigned long,
+ unsigned long);
+u_int64_t hwrpb_checksum(void);
+void hwrpb_restart_setup(void);
+void regdump(struct trapframe *);
+void regtoframe(struct reg *, struct trapframe *);
+void savectx(struct pcb *);
+void switch_exit(struct proc *); /* MAGIC */
+void switch_trampoline(void); /* MAGIC */
+void syscall(u_int64_t, struct trapframe *);
+void trap(unsigned long, unsigned long, unsigned long, unsigned long,
+ struct trapframe *);
+void trap_init(void);
+void enable_nsio_ide(bus_space_tag_t);
+char * dot_conv(unsigned long);
+
+void release_fpu(int);
+void synchronize_fpstate(struct proc *, int);
+
+/* Multiprocessor glue; cpu.c */
+struct cpu_info;
+int cpu_iccb_send(long, const char *);
+void cpu_iccb_receive(void);
+void cpu_hatch(struct cpu_info *);
+void cpu_halt_secondary(unsigned long);
+void cpu_spinup_trampoline(void); /* MAGIC */
+void cpu_pause(unsigned long);
+void cpu_resume(unsigned long);
+
+#endif /* _KERNEL */
+#endif /* _ALPHA_H_ */
diff --git a/sys/arch/alpha/include/alpha_cpu.h b/sys/arch/alpha/include/alpha_cpu.h
index da421819842..a3447ed9549 100644
--- a/sys/arch/alpha/include/alpha_cpu.h
+++ b/sys/arch/alpha/include/alpha_cpu.h
@@ -1,5 +1,4 @@
-/* $OpenBSD: alpha_cpu.h,v 1.4 1998/06/05 13:28:32 janjaap Exp $ */
-/* $NetBSD: alpha_cpu.h,v 1.7 1996/11/23 06:25:31 cgd Exp $ */
+/* $NetBSD: alpha_cpu.h,v 1.41 2000/06/08 03:10:06 thorpej Exp $ */
/*
* Copyright (c) 1996 Carnegie-Mellon University.
@@ -41,6 +40,7 @@
* Processor Status Register
* Machine Check Error Summary Register
* Machine Check Logout Area
+ * Per CPU state Management of Machine Check Handling
* Virtual Memory Management
* Kernel Entry Vectors
* MMCSR Fault Type Codes
@@ -61,6 +61,7 @@ struct alpha_pcb {
unsigned int apcb_cpc; /* charged process cycles */
unsigned int apcb_asn; /* address space number */
unsigned long apcb_unique; /* process unique value */
+#define apcb_backup_ksp apcb_unique /* backup kernel stack ptr */
unsigned long apcb_flags; /* flags; see below */
unsigned long apcb_decrsv0; /* DEC reserved */
unsigned long apcb_decrsv1; /* DEC reserved */
@@ -111,6 +112,17 @@ struct alpha_pcb {
#define ALPHA_PSL_USERCLR (ALPHA_PSL_MUST_BE_ZERO | ALPHA_PSL_IPL_MASK)
/*
+ * Interrupt Type Code Definitions [OSF/1 PALcode Specific]
+ */
+
+#define ALPHA_INTR_XPROC 0 /* interprocessor interrupt */
+#define ALPHA_INTR_CLOCK 1 /* clock interrupt */
+#define ALPHA_INTR_ERROR 2 /* correctable error or mcheck */
+#define ALPHA_INTR_DEVICE 3 /* device interrupt */
+#define ALPHA_INTR_PERF 4 /* performance counter */
+#define ALPHA_INTR_PASSIVE 5 /* passive release */
+
+/*
* Machine Check Error Summary Register definitions [OSF/1 PALcode Specific]
*
* The following bits are values as read. On write, _PCE, _SCE, and
@@ -134,6 +146,9 @@ struct alpha_pcb {
/*
* Machine Check Error Summary Register definitions [OSF/1 PALcode Specific]
+ *
+ * Note that these are *generic* OSF/1 PALcode specific defines. There are
+ * platform variations to these entities.
*/
struct alpha_logout_area {
@@ -162,7 +177,13 @@ struct alpha_logout_area {
(unsigned long *)((unsigned char *)(lap) + (lap)->la_system_offset)
#define ALPHA_LOGOUT_SYSTEM_SIZE(lap) \
((lap)->la_frame_size - (lap)->la_system_offset)
-
+
+/* types of machine checks */
+#define ALPHA_SYS_ERROR 0x620 /* System correctable error */
+#define ALPHA_PROC_ERROR 0x630 /* Processor correctable error */
+#define ALPHA_SYS_MCHECK 0x660 /* System machine check */
+#define ALPHA_PROC_MCHECK 0x670 /* Processor machine check */
+
/*
* Virtual Memory Management definitions [OSF/1 PALcode Specific]
*
@@ -203,7 +224,8 @@ struct alpha_logout_area {
#define ALPHA_PTE_WRITE (ALPHA_PTE_KW | ALPHA_PTE_UW)
-#define ALPHA_PTE_SOFTWARE 0xffff0000
+#define ALPHA_PTE_SOFTWARE 0x00000000ffff0000
+#define ALPHA_PTE_PALCODE (~ALPHA_PTE_SOFTWARE) /* shorthand */
#define ALPHA_PTE_PFN 0xffffffff00000000
@@ -254,28 +276,253 @@ typedef unsigned long alpha_pt_entry_t;
#define ALPHA_TBIS(va) alpha_pal_tbi(3, (va)) /* all for va */
/*
+ * Bits used in the amask instruction [EV56 and later]
+ */
+
+#define ALPHA_AMASK_BWX 0x0001 /* byte/word extension */
+#define ALPHA_AMASK_FIX 0x0002 /* floating point conv. ext. */
+#define ALPHA_AMASK_CIX 0x0004 /* count extension */
+#define ALPHA_AMASK_MVI 0x0100 /* multimedia extension */
+#define ALPHA_AMASK_PAT 0x0200 /* precise arith. traps */
+
+#define ALPHA_AMASK_ALL (ALPHA_AMASK_BWX|ALPHA_AMASK_FIX| \
+ ALPHA_AMASK_CIX|ALPHA_AMASK_MVI| \
+ ALPHA_AMASK_PAT)
+
+#define ALPHA_AMASK_BITS \
+ "\20\12PAT\11MVI\3CIX\2FIX\1BWX"
+
+/*
+ * Chip family IDs returned by implver instruction
+ */
+
+#define ALPHA_IMPLVER_EV4 0 /* LCA/EV4/EV45 */
+#define ALPHA_IMPLVER_EV5 1 /* EV5/EV56/PCA56 */
+#define ALPHA_IMPLVER_EV6 2 /* EV6 */
+
+/*
+ * Maximum processor ID we allow from `whami', and related constants.
+ *
+ * XXX This is not really processor or PALcode specific, but this is
+ * a convenient place to put these definitions.
+ *
+ * XXX This is clipped at 63 so that we can use `long's for proc bitmasks.
+ */
+
+#define ALPHA_WHAMI_MAXID 63
+#define ALPHA_MAXPROCS (ALPHA_WHAMI_MAXID + 1)
+
+/*
+ * Misc. support routines.
+ */
+const char *alpha_dsr_sysname(void);
+
+/*
* Stubs for Alpha instructions normally inaccessible from C.
*/
-unsigned long alpha_rpcc __P((void));
-void alpha_mb __P((void));
-void alpha_wmb __P((void));
+unsigned long alpha_amask(unsigned long);
+unsigned long alpha_implver(void);
+
+static __inline unsigned long
+alpha_rpcc(void)
+{
+ unsigned long v0;
+
+ __asm __volatile("rpcc %0" : "=r" (v0));
+ return (v0);
+}
+
+#define alpha_mb() __asm __volatile("mb" : : : "memory")
+#define alpha_wmb() __asm __volatile("mb" : : : "memory") /* XXX */
/*
* Stubs for OSF/1 PALcode operations.
*/
-void alpha_pal_imb __P((void));
-void alpha_pal_draina __P((void));
-void alpha_pal_halt __P((void)) __attribute__((__noreturn__));
-unsigned long alpha_pal_rdmces __P((void));
-unsigned long alpha_pal_rdusp __P((void));
-unsigned long alpha_pal_swpipl __P((unsigned long));
-unsigned long _alpha_pal_swpipl __P((unsigned long)); /* for profiling */
-void alpha_pal_tbi __P((unsigned long, vm_offset_t));
-unsigned long alpha_pal_whami __P((void));
-void alpha_pal_wrent __P((void *, unsigned long));
-void alpha_pal_wrfen __P((unsigned long));
-void alpha_pal_wrusp __P((unsigned long));
-void alpha_pal_wrvptptr __P((unsigned long));
-void alpha_pal_wrmces __P((unsigned long));
+#include <machine/pal.h>
+
+void alpha_pal_cflush(unsigned long);
+void alpha_pal_halt(void) __attribute__((__noreturn__));
+unsigned long _alpha_pal_swpipl(unsigned long); /* for profiling */
+void alpha_pal_wrent(void *, unsigned long);
+void alpha_pal_wrvptptr(unsigned long);
+
+#define alpha_pal_draina() __asm __volatile("call_pal %0 # PAL_draina" \
+ : : "i" (PAL_draina) : "memory")
+
+#define alpha_pal_imb() __asm __volatile("call_pal %0 # PAL_imb" \
+ : : "i" (PAL_imb) : "memory")
+
+static __inline unsigned long
+alpha_pal_rdmces(void)
+{
+ register unsigned long v0 __asm("$0");
+
+ __asm __volatile("call_pal %1 # PAL_OSF1_rdmces"
+ : "=r" (v0)
+ : "i" (PAL_OSF1_rdmces)
+ /* clobbers t0, t8..t11 */
+ : "$1", "$22", "$23", "$24", "$25");
+
+ return (v0);
+}
+
+static __inline unsigned long
+alpha_pal_rdps(void)
+{
+ register unsigned long v0 __asm("$0");
+
+ __asm __volatile("call_pal %1 # PAL_OSF1_rdps"
+ : "=r" (v0)
+ : "i" (PAL_OSF1_rdps)
+ /* clobbers t0, t8..t11 */
+ : "$1", "$22", "$23", "$24", "$25");
+
+ return (v0);
+}
+
+static __inline unsigned long
+alpha_pal_rdusp(void)
+{
+ register unsigned long v0 __asm("$0");
+
+ __asm __volatile("call_pal %1 # PAL_OSF1_rdusp"
+ : "=r" (v0)
+ : "i" (PAL_OSF1_rdusp)
+ /* clobbers t0, t8..t11 */
+ : "$1", "$22", "$23", "$24", "$25");
+
+ return (v0);
+}
+
+static __inline unsigned long
+alpha_pal_rdval(void)
+{
+ register unsigned long v0 __asm("$0");
+
+ __asm __volatile("call_pal %1 # PAL_OSF1_rdval"
+ : "=r" (v0)
+ : "i" (PAL_OSF1_rdval)
+ /* clobbers t0, t8..t11 */
+ : "$1", "$22", "$23", "$24", "$25");
+
+ return (v0);
+}
+
+static __inline unsigned long
+alpha_pal_swpctx(unsigned long ctx)
+{
+ register unsigned long a0 __asm("$16") = ctx;
+ register unsigned long v0 __asm("$0");
+
+ __asm __volatile("call_pal %2 # PAL_OSF1_swpctx"
+ : "=r" (a0), "=r" (v0)
+ : "i" (PAL_OSF1_swpctx), "0" (a0)
+ /* clobbers t0, t8..t11, a0 (above) */
+ : "$1", "$22", "$23", "$24", "$25");
+
+ return (v0);
+}
+
+static __inline unsigned long
+alpha_pal_swpipl(unsigned long ipl)
+{
+ register unsigned long a0 __asm("$16") = ipl;
+ register unsigned long v0 __asm("$0");
+
+ __asm __volatile("call_pal %2 # PAL_OSF1_swpipl"
+ : "=r" (a0), "=r" (v0)
+ : "i" (PAL_OSF1_swpipl), "0" (a0)
+ /* clobbers t0, t8..t11, a0 (above) */
+ : "$1", "$22", "$23", "$24", "$25");
+
+ return (v0);
+}
+
+static __inline void
+alpha_pal_tbi(unsigned long op, vaddr_t va)
+{
+ register unsigned long a0 __asm("$16") = op;
+ register unsigned long a1 __asm("$17") = va;
+
+ __asm __volatile("call_pal %2 # PAL_OSF1_tbi"
+ : "=r" (a0), "=r" (a1)
+ : "i" (PAL_OSF1_tbi), "0" (a0), "1" (a1)
+ /* clobbers t0, t8..t11, a0 (above), a1 (above) */
+ : "$1", "$22", "$23", "$24", "$25");
+}
+
+static __inline unsigned long
+alpha_pal_whami(void)
+{
+ register unsigned long v0 __asm("$0");
+
+ __asm __volatile("call_pal %1 # PAL_OSF1_whami"
+ : "=r" (v0)
+ : "i" (PAL_OSF1_whami)
+ /* clobbers t0, t8..t11 */
+ : "$1", "$22", "$23", "$24", "$25");
+
+ return (v0);
+}
+
+static __inline void
+alpha_pal_wrfen(unsigned long onoff)
+{
+ register unsigned long a0 __asm("$16") = onoff;
+
+ __asm __volatile("call_pal %1 # PAL_OSF1_wrfen"
+ : "=r" (a0)
+ : "i" (PAL_OSF1_wrfen), "0" (a0)
+ /* clobbers t0, t8..t11, a0 (above) */
+ : "$1", "$22", "$23", "$24", "$25");
+}
+
+static __inline void
+alpha_pal_wripir(unsigned long cpu_id)
+{
+ register unsigned long a0 __asm("$16") = cpu_id;
+
+ __asm __volatile("call_pal %1 # PAL_ipir"
+ : "=r" (a0)
+ : "i" (PAL_ipir), "0" (a0)
+ /* clobbers t0, t8..t11, a0 (above) */
+ : "$1", "$22", "$23", "$24", "$25");
+}
+
+static __inline void
+alpha_pal_wrusp(unsigned long usp)
+{
+ register unsigned long a0 __asm("$16") = usp;
+
+ __asm __volatile("call_pal %1 # PAL_OSF1_wrusp"
+ : "=r" (a0)
+ : "i" (PAL_OSF1_wrusp), "0" (a0)
+ /* clobbers t0, t8..t11, a0 (above) */
+ : "$1", "$22", "$23", "$24", "$25");
+}
+
+static __inline void
+alpha_pal_wrmces(unsigned long mces)
+{
+ register unsigned long a0 __asm("$16") = mces;
+
+ __asm __volatile("call_pal %1 # PAL_OSF1_wrmces"
+ : "=r" (a0)
+ : "i" (PAL_OSF1_wrmces), "0" (a0)
+ /* clobbers t0, t8..t11 */
+ : "$1", "$22", "$23", "$24", "$25");
+}
+
+static __inline void
+alpha_pal_wrval(unsigned long val)
+{
+ register unsigned long a0 __asm("$16") = val;
+
+ __asm __volatile("call_pal %1 # PAL_OSF1_wrval"
+ : "=r" (a0)
+ : "i" (PAL_OSF1_wrval), "0" (a0)
+ /* clobbers t0, t8..t11, a0 (above) */
+ : "$1", "$22", "$23", "$24", "$25");
+}
#endif /* __ALPHA_ALPHA_CPU_H__ */
diff --git a/sys/arch/alpha/include/asm.h b/sys/arch/alpha/include/asm.h
index 5ec88decbca..1cd4a507db1 100644
--- a/sys/arch/alpha/include/asm.h
+++ b/sys/arch/alpha/include/asm.h
@@ -1,5 +1,4 @@
-/* $OpenBSD: asm.h,v 1.7 1999/09/26 11:07:32 kstailey Exp $ */
-/* $NetBSD: asm.h,v 1.11 1996/11/30 02:48:57 jtc Exp $ */
+/* $NetBSD: asm.h,v 1.23 2000/06/23 12:18:45 kleink Exp $ */
/*
* Copyright (c) 1991,1990,1989,1994,1995,1996 Carnegie Mellon University
@@ -179,6 +178,17 @@
* end for the encoding of this 32bit value.
* "f_mask" is the same, for floating point registers.
*
+ * Note, 10/31/97: This is interesting but it isn't the way gcc outputs
+ * frame directives and it isn't the way the macros below output them
+ * either. Frame directives look like this:
+ *
+ * .frame $15,framesize,$26,0
+ *
+ * If no fp is set up then $30 should be used instead of $15.
+ * Also, gdb expects to find a <lda sp,-framesize(sp)> at the beginning
+ * of a procedure. Don't use things like sub sp,framesize,sp for this
+ * reason. End Note 10/31/97. ross@netbsd.org
+ *
* Note that registers should be saved starting at "old_sp-8", where the
* return address should be stored. Other registers follow at -16-24-32..
* starting from register 0 (if saved) and up. Then float registers (ifany)
@@ -223,6 +233,38 @@
jsr at_reg,_mcount; \
.set at
#endif
+/*
+ * PALVECT, ESETUP, and ERSAVE
+ * Declare a palcode transfer point, and carefully construct
+ * gdb symbols with an unusual _negative_ register-save offset
+ * so that gdb can find the otherwise lost PC and then
+ * invert the vector for traceback. Also, fix up framesize,
+ * allowing for the palframe for the same reason.
+ */
+
+#define PALVECT(_name_) \
+ ESETUP(_name_); \
+ ERSAVE()
+
+#define ESETUP(_name_) \
+ .loc 1 __LINE__; \
+ .globl _name_; \
+ .ent _name_ 0; \
+_name_:; \
+ .set noat; \
+ lda sp,-(FRAME_SW_SIZE*8)(sp); \
+ .frame $30,(FRAME_SW_SIZE+6)*8,$26,0; /* give gdb the real size */\
+ .mask 0x4000000,-0x28; \
+ .set at
+
+#define ERSAVE() \
+ .set noat; \
+ stq at_reg,(FRAME_AT*8)(sp); \
+ .set at; \
+ stq ra,(FRAME_RA*8)(sp); \
+ .loc 1 __LINE__; \
+ bsr ra,exception_save_regs /* jmp/CALL trashes pv/t12 */
+
/*
* LEAF
@@ -370,6 +412,7 @@ _name_:
* Function invocation
*/
#define CALL(_name_) \
+ .loc 1 __LINE__; \
jsr ra,_name_; \
ldgp gp,0(ra)
/* but this would cover longer jumps
@@ -430,7 +473,7 @@ _name_ = _value_
_name_:; \
.mask _i_mask_|IM_EXC,0; \
.frame sp,MSS_SIZE,ra;
-/* .livereg _i_mask_|IM_EXC,0
+/* .livereg _i_mask_|IM_EXC,0 */
/* should have been
.proc _name_,1; \
.frame MSS_SIZE,$31,_i_mask_,0; \
@@ -541,69 +584,8 @@ label: ASCIZ msg; \
#define FM_V1 FM_T0
#define FM_V0 0x00000001
-/*
- * PAL "function" codes (used as arguments to call_pal instructions).
- *
- * Those marked with "P" are privileged, and those marked with "U"
- * are unprivileged.
- */
-
-/* Common PAL function codes. */
-#define PAL_halt 0x0000 /* P */
-#define PAL_draina 0x0002 /* P */
-#define PAL_cserve 0x0009 /* P */
-#define PAL_swppal 0x000a /* P */
-#define PAL_bpt 0x0080 /* U */
-#define PAL_bugchk 0x0081 /* U */
-#define PAL_imb 0x0086 /* U */
-#define PAL_rdunique 0x009e /* U */
-#define PAL_wrunique 0x009f /* U */
-#define PAL_gentrap 0x00aa /* U */
-
-/* VMS PAL function codes. */
-#define PAL_VMS_ldqp 0x0003 /* P */
-#define PAL_VMS_stqp 0x0004 /* P */
-#define PAL_VMS_mtpr_fen 0x000c /* P */
-#define PAL_VMS_mtpr_ipir 0x000d /* P */
-#define PAL_VMS_mfpr_ipl 0x000e /* P */
-#define PAL_VMS_mtpr_ipl 0x000f /* P */
-#define PAL_VMS_mfpr_mces 0x0010 /* P */
-#define PAL_VMS_mtpr_mces 0x0011 /* P */
-#define PAL_VMS_mfpr_prbr 0x0013 /* P */
-#define PAL_VMS_mtpr_prbr 0x0014 /* P */
-#define PAL_VMS_mfpr_ptbr 0x0015 /* P */
-#define PAL_VMS_mtpr_scbb 0x0017 /* P */
-#define PAL_VMS_mtpr_sirr 0x0018 /* P */
-#define PAL_VMS_mtpr_tbia 0x001b /* P */
-#define PAL_VMS_mtpr_tbiap 0x001c /* P */
-#define PAL_VMS_mtpr_tbis 0x001d /* P */
-#define PAL_VMS_mfpr_usp 0x0022 /* P */
-#define PAL_VMS_mtpr_usp 0x0023 /* P */
-#define PAL_VMS_mfpr_vptb 0x0029 /* P */
-#define PAL_VMS_mfpr_whami 0x003f /* P */
-#define PAL_VMS_rei 0x0092 /* U */
-
-/* OSF/1 PAL function codes. */
-#define PAL_OSF1_rdmces 0x0010 /* P */
-#define PAL_OSF1_wrmces 0x0011 /* P */
-#define PAL_OSF1_wrfen 0x002b /* P */
-#define PAL_OSF1_wrvptptr 0x002d /* P */
-#define PAL_OSF1_swpctx 0x0030 /* P */
-#define PAL_OSF1_wrval 0x0031 /* P */
-#define PAL_OSF1_rdval 0x0032 /* P */
-#define PAL_OSF1_tbi 0x0033 /* P */
-#define PAL_OSF1_wrent 0x0034 /* P */
-#define PAL_OSF1_swpipl 0x0035 /* P */
-#define PAL_OSF1_rdps 0x0036 /* P */
-#define PAL_OSF1_wrkgp 0x0037 /* P */
-#define PAL_OSF1_wrusp 0x0038 /* P */
-#define PAL_OSF1_rdusp 0x003a /* P */
-#define PAL_OSF1_whami 0x003c /* P */
-#define PAL_OSF1_retsys 0x003d /* P */
-#define PAL_OSF1_rti 0x003f /* P */
-#define PAL_OSF1_callsys 0x0083 /* U */
-#define PAL_OSF1_imb 0x0086 /* U */
-
+/* Pull in PAL "function" codes. */
+#include <machine/pal.h>
/*
* System call glue.
@@ -636,3 +618,41 @@ label: ASCIZ msg; \
.weak alias; \
alias = sym
#endif
+
+/*
+ * WARN_REFERENCES: create a warning if the specified symbol is referenced
+ * (ELF only).
+ */
+#ifdef __ELF__
+#ifdef __STDC__
+#define WARN_REFERENCES(_sym,_msg) \
+ .section .gnu.warning. ## _sym ; .ascii _msg ; .text
+#else
+#define WARN_REFERENCES(_sym,_msg) \
+ .section .gnu.warning./**/_sym ; .ascii _msg ; .text
+#endif /* __STDC__ */
+#endif /* __ELF__ */
+
+/*
+ * Kernel RCS ID tag and copyright macros
+ */
+
+#ifdef _KERNEL
+
+#ifdef __ELF__
+#define __KERNEL_SECTIONSTRING(_sec, _str) \
+ .section _sec ; .asciz _str ; .text
+#else /* __ELF__ */
+#define __KERNEL_SECTIONSTRING(_sec, _str) \
+ .data ; .asciz _str ; .align 3 ; .text
+#endif /* __ELF__ */
+
+#define __KERNEL_RCSID(_n, _s) __KERNEL_SECTIONSTRING(.ident, _s)
+#define __KERNEL_COPYRIGHT(_n, _s) __KERNEL_SECTIONSTRING(.copyright, _s)
+
+#ifdef NO_KERNEL_RCSIDS
+#undef __KERNEL_RCSID
+#define __KERNEL_RCSID(_n, _s) /* nothing */
+#endif
+
+#endif /* _KERNEL */
diff --git a/sys/arch/alpha/include/atomic.h b/sys/arch/alpha/include/atomic.h
new file mode 100644
index 00000000000..d3980e96d2d
--- /dev/null
+++ b/sys/arch/alpha/include/atomic.h
@@ -0,0 +1,182 @@
+/* $NetBSD: atomic.h,v 1.5 2000/06/08 02:54:55 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Misc. `atomic' operations.
+ */
+
+#ifndef _ALPHA_ATOMIC_H_
+#define _ALPHA_ATOMIC_H_
+
+/*
+ * atomic_setbits_ulong:
+ *
+ * Atomically set bits in a `unsigned long'.
+ */
+static __inline void
+atomic_setbits_ulong(__volatile unsigned long *ulp, unsigned long v)
+{
+#if 1
+ *ulp |= v;
+#else
+ unsigned long t0;
+
+ __asm __volatile(
+ "# BEGIN atomic_setbits_ulong\n"
+ "1: ldq_l %0, %3 \n"
+ " or %0, %2, %0 \n"
+ " stq_c %0, %1 \n"
+ " beq %0, 2f \n"
+ " mb \n"
+ " br 3f \n"
+ "2: br 1b \n"
+ "3: \n"
+ " # END atomic_setbits_ulong"
+ : "=r" (t0), "=m" (*ulp)
+ : "r" (v), "1" (*ulp));
+#endif
+}
+
+/*
+ * atomic_clearbits_ulong:
+ *
+ * Atomically clear bits in a `unsigned long'.
+ */
+static __inline void
+atomic_clearbits_ulong(__volatile unsigned long *ulp, unsigned long v)
+{
+#if 1
+ *ulp &= ~v;
+#else
+ unsigned long t0;
+
+ __asm __volatile(
+ "# BEGIN atomic_clearbits_ulong\n"
+ "1: ldq_l %0, %3 \n"
+ " and %0, %2, %0 \n"
+ " stq_c %0, %1 \n"
+ " beq %0, 2f \n"
+ " mb \n"
+ " br 3f \n"
+ "2: br 1b \n"
+ "3: \n"
+ " # END atomic_clearbits_ulong"
+ : "=r" (t0), "=m" (*ulp)
+ : "r" (~v), "1" (*ulp));
+#endif
+}
+
+/*
+ * atomic_add_ulong:
+ *
+ * Atomically add a value to a `unsigned long'.
+ */
+static __inline void
+atomic_add_ulong(__volatile unsigned long *ulp, unsigned long v)
+{
+ unsigned long t0;
+
+ __asm __volatile(
+ "# BEGIN atomic_add_ulong\n"
+ "1: ldq_l %0, %3 \n"
+ " addq %0, %2, %0 \n"
+ " stq_c %0, %1 \n"
+ " beq %0, 2f \n"
+ " mb \n"
+ " br 3f \n"
+ "2: br 1b \n"
+ "3: \n"
+ " # END atomic_add_ulong"
+ : "=r" (t0), "=m" (*ulp)
+ : "r" (v), "1" (*ulp));
+}
+
+/*
+ * atomic_sub_ulong:
+ *
+ * Atomically subtract a value from a `unsigned long'.
+ */
+static __inline void
+atomic_sub_ulong(__volatile unsigned long *ulp, unsigned long v)
+{
+ unsigned long t0;
+
+ __asm __volatile(
+ "# BEGIN atomic_sub_ulong\n"
+ "1: ldq_l %0, %3 \n"
+ " subq %0, %2, %0 \n"
+ " stq_c %0, %1 \n"
+ " beq %0, 2f \n"
+ " mb \n"
+ " br 3f \n"
+ "2: br 1b \n"
+ "3: \n"
+ " # END atomic_sub_ulong"
+ : "=r" (t0), "=m" (*ulp)
+ : "r" (v), "1" (*ulp));
+}
+
+/*
+ * atomic_loadlatch_ulong:
+ *
+ * Atomically load and latch a `unsigned long' value.
+ */
+static __inline unsigned long
+atomic_loadlatch_ulong(__volatile unsigned long *ulp, unsigned long v)
+{
+ unsigned long t0, v0;
+
+ __asm __volatile(
+ "# BEGIN atomic_loadlatch_ulong\n"
+ "1: mov %3, %0 \n"
+ " ldq_l %1, %4 \n"
+ " stq_c %0, %2 \n"
+ " beq %0, 2f \n"
+ " mb \n"
+ " br 3f \n"
+ "2: br 1b \n"
+ "3: \n"
+ " # END atomic_loadlatch_ulong"
+ : "=r" (t0), "=r" (v0), "=m" (*ulp)
+ : "r" (v), "2" (*ulp));
+
+ return (v0);
+}
+
+#endif /* _ALPHA_ATOMIC_H_ */
diff --git a/sys/arch/alpha/include/autoconf.h b/sys/arch/alpha/include/autoconf.h
index 47566f2798f..9919fe117c2 100644
--- a/sys/arch/alpha/include/autoconf.h
+++ b/sys/arch/alpha/include/autoconf.h
@@ -1,5 +1,4 @@
-/* $OpenBSD: autoconf.h,v 1.7 1997/01/24 19:57:08 niklas Exp $ */
-/* $NetBSD: autoconf.h,v 1.6 1996/11/13 21:13:17 cgd Exp $ */
+/* $NetBSD: autoconf.h,v 1.19 2000/06/08 03:10:06 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -32,42 +31,10 @@
* Machine-dependent structures of autoconfiguration
*/
-struct confargs;
-
-typedef int (*intr_handler_t) __P((void *));
-
-struct abus {
- struct device *ab_dv; /* back-pointer to device */
- int ab_type; /* bus type (see below) */
- void (*ab_intr_establish) /* bus's set-handler function */
- __P((struct confargs *, intr_handler_t, void *));
- void (*ab_intr_disestablish) /* bus's unset-handler function */
- __P((struct confargs *));
- caddr_t (*ab_cvtaddr) /* convert slot/offset to address */
- __P((struct confargs *));
- int (*ab_matchname) /* see if name matches driver */
- __P((struct confargs *, char *));
-};
-
-#define BUS_MAIN 1 /* mainbus */
-#define BUS_TC 2 /* TurboChannel */
-#define BUS_ASIC 3 /* IOCTL ASIC; under TurboChannel */
-#define BUS_TCDS 4 /* TCDS ASIC; under TurboChannel */
-
-#define BUS_INTR_ESTABLISH(ca, handler, val) \
- (*(ca)->ca_bus->ab_intr_establish)((ca), (handler), (val))
-#define BUS_INTR_DISESTABLISH(ca) \
- (*(ca)->ca_bus->ab_intr_establish)(ca)
-#define BUS_CVTADDR(ca) \
- (*(ca)->ca_bus->ab_cvtaddr)(ca)
-#define BUS_MATCHNAME(ca, name) \
- (*(ca)->ca_bus->ab_matchname)((ca), (name))
-
-struct confargs {
- char *ca_name; /* Device name. */
- int ca_slot; /* Device slot. */
- int ca_offset; /* Offset into slot. */
- struct abus *ca_bus; /* bus device resides on. */
+struct mainbus_attach_args {
+ const char *ma_name; /* device name */
+ int ma_slot; /* CPU "slot" number; only meaningful
+ when attaching CPUs */
};
struct bootdev_data {
@@ -81,14 +48,74 @@ struct bootdev_data {
char *ctrl_dev_type;
};
-void configure __P((void));
-void device_register __P((struct device *, void *));
-void dumpconf __P((void));
+/*
+ * The boot program passes a pointer (in the boot environment virtual
+ * address address space; "BEVA") to a bootinfo to the kernel using
+ * the following convention:
+ *
+ * a0 contains first free page frame number
+ * a1 contains page number of current level 1 page table
+ * if a2 contains BOOTINFO_MAGIC and a4 is nonzero:
+ * a3 contains pointer (BEVA) to bootinfo
+ * a4 contains bootinfo version number
+ * if a2 contains BOOTINFO_MAGIC and a4 contains 0 (backward compat):
+ * a3 contains pointer (BEVA) to bootinfo version
+ * (u_long), then the bootinfo
+ */
+
+#define BOOTINFO_MAGIC 0xdeadbeeffeedface
+
+struct bootinfo_v1 {
+ u_long ssym; /* 0: start of kernel sym table */
+ u_long esym; /* 8: end of kernel sym table */
+ char boot_flags[64]; /* 16: boot flags */
+ char booted_kernel[64]; /* 80: name of booted kernel */
+ void *hwrpb; /* 144: hwrpb pointer (BEVA) */
+ u_long hwrpbsize; /* 152: size of hwrpb data */
+ int (*cngetc)(void); /* 160: console getc pointer */
+ void (*cnputc)(int); /* 168: console putc pointer */
+ void (*cnpollc)(int); /* 176: console pollc pointer */
+ u_long pad[9]; /* 184: rsvd for future use */
+ /* 256: total size */
+};
+
+/*
+ * Kernel-internal structure used to hold important bits of boot
+ * information. NOT to be used by boot blocks.
+ *
+ * Note that not all of the fields from the bootinfo struct(s)
+ * passed by the boot blocks aren't here (because they're not currently
+ * used by the kernel!). Fields here which aren't supplied by the
+ * bootinfo structure passed by the boot blocks are supposed to be
+ * filled in at startup with sane contents.
+ */
+struct bootinfo_kernel {
+ u_long ssym; /* start of syms */
+ u_long esym; /* end of syms */
+ u_long hwrpb_phys; /* hwrpb physical address */
+ u_long hwrpb_size; /* size of hwrpb data */
+ char boot_flags[64]; /* boot flags */
+ char booted_kernel[64]; /* name of booted kernel */
+ char booted_dev[64]; /* name of booted device */
+};
-#ifdef EVCNT_COUNTERS
-extern struct evcnt clock_intr_evcnt;
-#endif
+/*
+ * Lookup table entry for Alpha system variations.
+ */
+struct alpha_variation_table {
+ u_int64_t avt_variation; /* variation, from HWRPB */
+ const char *avt_model; /* model string */
+};
-extern struct device *booted_device;
+#ifdef _KERNEL
extern int booted_partition;
extern struct bootdev_data *bootdev_data;
+extern struct bootinfo_kernel bootinfo;
+
+const char *alpha_variation_name(u_int64_t,
+ const struct alpha_variation_table *);
+const char *alpha_unknown_sysname(void);
+
+void configure __P((void));
+void device_register __P((struct device *, void *));
+#endif /* _KERNEL */
diff --git a/sys/arch/alpha/include/bus.h b/sys/arch/alpha/include/bus.h
index 27f67558de3..b95bacf6dbe 100644
--- a/sys/arch/alpha/include/bus.h
+++ b/sys/arch/alpha/include/bus.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus.h,v 1.10 1998/01/20 18:40:09 niklas Exp $ */
+/* $OpenBSD: bus.h,v 1.11 2000/11/08 16:01:10 art Exp $ */
/* $NetBSD: bus.h,v 1.10 1996/12/02 22:19:32 cgd Exp $ */
/*
@@ -48,131 +48,131 @@ struct alpha_bus_space {
void *abs_cookie;
/* mapping/unmapping */
- int (*abs_map) __P((void *, bus_addr_t, bus_size_t,
- int, bus_space_handle_t *));
- void (*abs_unmap) __P((void *, bus_space_handle_t,
- bus_size_t));
- int (*abs_subregion) __P((void *, bus_space_handle_t,
- bus_size_t, bus_size_t, bus_space_handle_t *));
+ int (*abs_map)(void *, bus_addr_t, bus_size_t,
+ int, bus_space_handle_t *);
+ void (*abs_unmap)(void *, bus_space_handle_t,
+ bus_size_t);
+ int (*abs_subregion)(void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, bus_space_handle_t *);
/* allocation/deallocation */
- int (*abs_alloc) __P((void *, bus_addr_t, bus_addr_t,
+ int (*abs_alloc)(void *, bus_addr_t, bus_addr_t,
bus_size_t, bus_size_t, bus_size_t, int,
- bus_addr_t *, bus_space_handle_t *));
- void (*abs_free) __P((void *, bus_space_handle_t,
- bus_size_t));
+ bus_addr_t *, bus_space_handle_t *);
+ void (*abs_free)(void *, bus_space_handle_t,
+ bus_size_t);
/* barrier */
- void (*abs_barrier) __P((void *, bus_space_handle_t,
- bus_size_t, bus_size_t, int));
+ void (*abs_barrier)(void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, int);
/* read (single) */
- u_int8_t (*abs_r_1) __P((void *, bus_space_handle_t,
- bus_size_t));
- u_int16_t (*abs_r_2) __P((void *, bus_space_handle_t,
- bus_size_t));
- u_int32_t (*abs_r_4) __P((void *, bus_space_handle_t,
- bus_size_t));
- u_int64_t (*abs_r_8) __P((void *, bus_space_handle_t,
- bus_size_t));
+ u_int8_t (*abs_r_1)(void *, bus_space_handle_t,
+ bus_size_t);
+ u_int16_t (*abs_r_2)(void *, bus_space_handle_t,
+ bus_size_t);
+ u_int32_t (*abs_r_4)(void *, bus_space_handle_t,
+ bus_size_t);
+ u_int64_t (*abs_r_8)(void *, bus_space_handle_t,
+ bus_size_t);
/* read multiple */
- void (*abs_rm_1) __P((void *, bus_space_handle_t,
- bus_size_t, u_int8_t *, bus_size_t));
- void (*abs_rm_2) __P((void *, bus_space_handle_t,
- bus_size_t, u_int16_t *, bus_size_t));
- void (*abs_rm_4) __P((void *, bus_space_handle_t,
- bus_size_t, u_int32_t *, bus_size_t));
- void (*abs_rm_8) __P((void *, bus_space_handle_t,
- bus_size_t, u_int64_t *, bus_size_t));
+ void (*abs_rm_1)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*abs_rm_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*abs_rm_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*abs_rm_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
/* read region */
- void (*abs_rr_1) __P((void *, bus_space_handle_t,
- bus_size_t, u_int8_t *, bus_size_t));
- void (*abs_rr_2) __P((void *, bus_space_handle_t,
- bus_size_t, u_int16_t *, bus_size_t));
- void (*abs_rr_4) __P((void *, bus_space_handle_t,
- bus_size_t, u_int32_t *, bus_size_t));
- void (*abs_rr_8) __P((void *, bus_space_handle_t,
- bus_size_t, u_int64_t *, bus_size_t));
+ void (*abs_rr_1)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*abs_rr_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*abs_rr_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*abs_rr_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
/* write (single) */
- void (*abs_w_1) __P((void *, bus_space_handle_t,
- bus_size_t, u_int8_t));
- void (*abs_w_2) __P((void *, bus_space_handle_t,
- bus_size_t, u_int16_t));
- void (*abs_w_4) __P((void *, bus_space_handle_t,
- bus_size_t, u_int32_t));
- void (*abs_w_8) __P((void *, bus_space_handle_t,
- bus_size_t, u_int64_t));
+ void (*abs_w_1)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ void (*abs_w_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ void (*abs_w_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ void (*abs_w_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t);
/* write multiple */
- void (*abs_wm_1) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int8_t *, bus_size_t));
- void (*abs_wm_2) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int16_t *, bus_size_t));
- void (*abs_wm_4) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int32_t *, bus_size_t));
- void (*abs_wm_8) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int64_t *, bus_size_t));
+ void (*abs_wm_1)(void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*abs_wm_2)(void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*abs_wm_4)(void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*abs_wm_8)(void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
/* write region */
- void (*abs_wr_1) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int8_t *, bus_size_t));
- void (*abs_wr_2) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int16_t *, bus_size_t));
- void (*abs_wr_4) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int32_t *, bus_size_t));
- void (*abs_wr_8) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int64_t *, bus_size_t));
+ void (*abs_wr_1)(void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*abs_wr_2)(void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*abs_wr_4)(void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*abs_wr_8)(void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
/* set multiple */
- void (*abs_sm_1) __P((void *, bus_space_handle_t,
- bus_size_t, u_int8_t, bus_size_t));
- void (*abs_sm_2) __P((void *, bus_space_handle_t,
- bus_size_t, u_int16_t, bus_size_t));
- void (*abs_sm_4) __P((void *, bus_space_handle_t,
- bus_size_t, u_int32_t, bus_size_t));
- void (*abs_sm_8) __P((void *, bus_space_handle_t,
- bus_size_t, u_int64_t, bus_size_t));
+ void (*abs_sm_1)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*abs_sm_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*abs_sm_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*abs_sm_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
/* set region */
- void (*abs_sr_1) __P((void *, bus_space_handle_t,
- bus_size_t, u_int8_t, bus_size_t));
- void (*abs_sr_2) __P((void *, bus_space_handle_t,
- bus_size_t, u_int16_t, bus_size_t));
- void (*abs_sr_4) __P((void *, bus_space_handle_t,
- bus_size_t, u_int32_t, bus_size_t));
- void (*abs_sr_8) __P((void *, bus_space_handle_t,
- bus_size_t, u_int64_t, bus_size_t));
+ void (*abs_sr_1)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*abs_sr_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*abs_sr_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*abs_sr_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
/* copy */
- void (*abs_c_1) __P((void *, bus_space_handle_t, bus_size_t,
- bus_space_handle_t, bus_size_t, bus_size_t));
- void (*abs_c_2) __P((void *, bus_space_handle_t, bus_size_t,
- bus_space_handle_t, bus_size_t, bus_size_t));
- void (*abs_c_4) __P((void *, bus_space_handle_t, bus_size_t,
- bus_space_handle_t, bus_size_t, bus_size_t));
- void (*abs_c_8) __P((void *, bus_space_handle_t, bus_size_t,
- bus_space_handle_t, bus_size_t, bus_size_t));
+ void (*abs_c_1)(void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*abs_c_2)(void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*abs_c_4)(void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*abs_c_8)(void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
/* OpenBSD extensions follows */
/* read multiple raw */
- void (*abs_rrm_2) __P((void *, bus_space_handle_t,
- bus_size_t, u_int8_t *, bus_size_t));
- void (*abs_rrm_4) __P((void *, bus_space_handle_t,
- bus_size_t, u_int8_t *, bus_size_t));
- void (*abs_rrm_8) __P((void *, bus_space_handle_t,
- bus_size_t, u_int8_t *, bus_size_t));
+ void (*abs_rrm_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*abs_rrm_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*abs_rrm_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
/* write multiple raw */
- void (*abs_wrm_2) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int8_t *, bus_size_t));
- void (*abs_wrm_4) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int8_t *, bus_size_t));
- void (*abs_wrm_8) __P((void *, bus_space_handle_t,
- bus_size_t, const u_int8_t *, bus_size_t));
+ void (*abs_wrm_2)(void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*abs_wrm_4)(void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*abs_wrm_8)(void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
};
@@ -260,9 +260,9 @@ struct alpha_bus_space {
/*
- * void bus_space_read_raw_multi_N __P((bus_space_tag_t tag,
+ * void bus_space_read_raw_multi_N(bus_space_tag_t tag,
* bus_space_handle_t bsh, bus_size_t offset,
- * u_int8_t *addr, size_t count));
+ * u_int8_t *addr, size_t count);
*
* Read `count' bytes in 2, 4 or 8 byte wide quantities from bus space
* described by tag/handle/offset and copy into buffer provided. The buffer
@@ -312,9 +312,9 @@ struct alpha_bus_space {
__abs_aligned_nonsingle(wm,8,(t),(h),(o),(a),(c))
/*
- * void bus_space_write_raw_multi_N __P((bus_space_tag_t tag,
+ * void bus_space_write_raw_multi_N(bus_space_tag_t tag,
* bus_space_handle_t bsh, bus_size_t offset,
- * u_int8_t *addr, size_t count));
+ * u_int8_t *addr, size_t count);
*
* Write `count' bytes in 2, 4 or 8 byte wide quantities from the buffer
* provided to bus space described by tag/handle/offset. The buffer
@@ -380,8 +380,257 @@ struct alpha_bus_space {
#define bus_space_copy_8(t, h1, o1, h2, o2, c) \
__abs_copy(8, t, h1, o1, h2, o2, c)
-/* XXX placeholders */
-typedef void *bus_dma_tag_t;
-typedef void *bus_dmamap_t;
+/*
+ * Bus DMA methods.
+ */
+
+/*
+ * Flags used in various bus DMA methods.
+ */
+#define BUS_DMA_WAITOK 0x00 /* safe to sleep (pseudo-flag) */
+#define BUS_DMA_NOWAIT 0x01 /* not safe to sleep */
+#define BUS_DMA_ALLOCNOW 0x02 /* perform resource allocation now */
+#define BUS_DMA_COHERENT 0x04 /* hint: map memory DMA coherent */
+#define BUS_DMA_BUS1 0x10 /* placeholders for bus functions... */
+#define BUS_DMA_BUS2 0x20
+#define BUS_DMA_BUS3 0x40
+#define BUS_DMA_BUS4 0x80
+
+/*
+ * Private flags stored in the DMA map.
+ */
+#define DMAMAP_NO_COALESCE 0x40000000 /* don't coalesce adjacent
+ segments */
+#define DMAMAP_HAS_SGMAP 0x80000000 /* sgva/len are valid */
+
+/* Forwards needed by prototypes below. */
+struct mbuf;
+struct uio;
+struct alpha_sgmap;
+
+/*
+ * bus_dmasync_op_t
+ *
+ * Operations performed by bus_dmamap_sync().
+ */
+typedef enum {
+ BUS_DMASYNC_PREREAD,
+ BUS_DMASYNC_POSTREAD,
+ BUS_DMASYNC_PREWRITE,
+ BUS_DMASYNC_POSTWRITE,
+} bus_dmasync_op_t;
+
+/*
+ * alpha_bus_t
+ *
+ * Busses supported by NetBSD/alpha, used by internal
+ * utility functions. NOT TO BE USED BY MACHINE-INDEPENDENT
+ * CODE!
+ */
+typedef enum {
+ ALPHA_BUS_TURBOCHANNEL,
+ ALPHA_BUS_PCI,
+ ALPHA_BUS_EISA,
+ ALPHA_BUS_ISA,
+ ALPHA_BUS_TLSB,
+} alpha_bus_t;
+
+typedef struct alpha_bus_dma_tag *bus_dma_tag_t;
+typedef struct alpha_bus_dmamap *bus_dmamap_t;
+
+/*
+ * bus_dma_segment_t
+ *
+ * Describes a single contiguous DMA transaction. Values
+ * are suitable for programming into DMA registers.
+ */
+struct alpha_bus_dma_segment {
+ bus_addr_t ds_addr; /* DMA address */
+ bus_size_t ds_len; /* length of transfer */
+};
+typedef struct alpha_bus_dma_segment bus_dma_segment_t;
+
+/*
+ * bus_dma_tag_t
+ *
+ * A machine-dependent opaque type describing the implementation of
+ * DMA for a given bus.
+ */
+struct alpha_bus_dma_tag {
+ void *_cookie; /* cookie used in the guts */
+ bus_addr_t _wbase; /* DMA window base */
+
+ /*
+ * The following two members are used to chain DMA windows
+ * together. If, during the course of a map load, the
+ * resulting physical memory address is too large to
+ * be addressed by the window, the next window will be
+ * attempted. These would be chained together like so:
+ *
+ * direct -> sgmap -> NULL
+ * or
+ * sgmap -> NULL
+ * or
+ * direct -> NULL
+ *
+ * If the window size is 0, it will not be checked (e.g.
+ * TurboChannel DMA).
+ */
+ bus_size_t _wsize;
+ struct alpha_bus_dma_tag *_next_window;
+
+ /*
+ * Some chipsets have a built-in boundary constraint, independent
+ * of what the device requests. This allows that boundary to
+ * be specified. If the device has a more restrictive contraint,
+ * the map will use that, otherwise this boundary will be used.
+ * This value is ignored if 0.
+ */
+ bus_size_t _boundary;
+
+ /*
+ * A chipset may have more than one SGMAP window, so SGMAP
+ * windows also get a pointer to their SGMAP state.
+ */
+ struct alpha_sgmap *_sgmap;
+
+ /*
+ * Internal-use only utility methods. NOT TO BE USED BY
+ * MACHINE-INDEPENDENT CODE!
+ */
+ bus_dma_tag_t (*_get_tag)(bus_dma_tag_t, alpha_bus_t);
+
+ /*
+ * DMA mapping methods.
+ */
+ int (*_dmamap_create)(bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *);
+ void (*_dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t);
+ int (*_dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int);
+ int (*_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int);
+ int (*_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int);
+ int (*_dmamap_load_raw)(bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int);
+ void (*_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
+ void (*_dmamap_sync)(bus_dma_tag_t, bus_dmamap_t,
+ bus_dmasync_op_t);
+
+ /*
+ * DMA memory utility functions.
+ */
+ int (*_dmamem_alloc)(bus_dma_tag_t, bus_size_t, bus_size_t,
+ bus_size_t, bus_dma_segment_t *, int, int *, int);
+ void (*_dmamem_free)(bus_dma_tag_t,
+ bus_dma_segment_t *, int);
+ int (*_dmamem_map)(bus_dma_tag_t, bus_dma_segment_t *,
+ int, size_t, caddr_t *, int);
+ void (*_dmamem_unmap)(bus_dma_tag_t, caddr_t, size_t);
+ paddr_t (*_dmamem_mmap)(bus_dma_tag_t, bus_dma_segment_t *,
+ int, off_t, int, int);
+};
+
+#define alphabus_dma_get_tag(t, b) \
+ (*(t)->_get_tag)(t, b)
+
+#define bus_dmamap_create(t, s, n, m, b, f, p) \
+ (*(t)->_dmamap_create)((t), (s), (n), (m), (b), (f), (p))
+#define bus_dmamap_destroy(t, p) \
+ (*(t)->_dmamap_destroy)((t), (p))
+#define bus_dmamap_load(t, m, b, s, p, f) \
+ (*(t)->_dmamap_load)((t), (m), (b), (s), (p), (f))
+#define bus_dmamap_load_mbuf(t, m, b, f) \
+ (*(t)->_dmamap_load_mbuf)((t), (m), (b), (f))
+#define bus_dmamap_load_uio(t, m, u, f) \
+ (*(t)->_dmamap_load_uio)((t), (m), (u), (f))
+#define bus_dmamap_load_raw(t, m, sg, n, s, f) \
+ (*(t)->_dmamap_load_raw)((t), (m), (sg), (n), (s), (f))
+#define bus_dmamap_unload(t, p) \
+ (*(t)->_dmamap_unload)((t), (p))
+#define bus_dmamap_sync(t, p, op) \
+ (*(t)->_dmamap_sync)((t), (p), (op))
+#define bus_dmamem_alloc(t, s, a, b, sg, n, r, f) \
+ (*(t)->_dmamem_alloc)((t), (s), (a), (b), (sg), (n), (r), (f))
+#define bus_dmamem_free(t, sg, n) \
+ (*(t)->_dmamem_free)((t), (sg), (n))
+#define bus_dmamem_map(t, sg, n, s, k, f) \
+ (*(t)->_dmamem_map)((t), (sg), (n), (s), (k), (f))
+#define bus_dmamem_unmap(t, k, s) \
+ (*(t)->_dmamem_unmap)((t), (k), (s))
+#define bus_dmamem_mmap(t, sg, n, o, p, f) \
+ (*(t)->_dmamem_mmap)((t), (sg), (n), (o), (p), (f))
+
+/*
+ * bus_dmamap_t
+ *
+ * Describes a DMA mapping.
+ */
+struct alpha_bus_dmamap {
+ /*
+ * PRIVATE MEMBERS: not for use my machine-independent code.
+ */
+ bus_size_t _dm_size; /* largest DMA transfer mappable */
+ int _dm_segcnt; /* number of segs this map can map */
+ bus_size_t _dm_maxsegsz; /* largest possible segment */
+ bus_size_t _dm_boundary; /* don't cross this */
+ int _dm_flags; /* misc. flags */
+
+ /*
+ * This is used only for SGMAP-mapped DMA, but we keep it
+ * here to avoid pointless indirection.
+ */
+ int _dm_pteidx; /* PTE index */
+ int _dm_ptecnt; /* PTE count */
+ u_long _dm_sgva; /* allocated sgva */
+ bus_size_t _dm_sgvalen; /* svga length */
+
+ /*
+ * Private cookie to be used by the DMA back-end.
+ */
+ void *_dm_cookie;
+
+ /*
+ * PUBLIC MEMBERS: these are used by machine-independent code.
+ */
+ bus_size_t dm_mapsize; /* size of the mapping */
+ int dm_nsegs; /* # valid segments in mapping */
+ bus_dma_segment_t dm_segs[1]; /* segments; variable length */
+};
+
+#ifdef _ALPHA_BUS_DMA_PRIVATE
+int _bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
+ bus_size_t, int, bus_dmamap_t *);
+void _bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
+
+int _bus_dmamap_load_direct(bus_dma_tag_t, bus_dmamap_t,
+ void *, bus_size_t, struct proc *, int);
+int _bus_dmamap_load_mbuf_direct(bus_dma_tag_t,
+ bus_dmamap_t, struct mbuf *, int);
+int _bus_dmamap_load_uio_direct(bus_dma_tag_t,
+ bus_dmamap_t, struct uio *, int);
+int _bus_dmamap_load_raw_direct(bus_dma_tag_t,
+ bus_dmamap_t, bus_dma_segment_t *, int, bus_size_t, int);
+
+void _bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
+void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t);
+
+int _bus_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
+int _bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
+ paddr_t low, paddr_t high);
+void _bus_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs);
+int _bus_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs, size_t size, caddr_t *kvap, int flags);
+void _bus_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva,
+ size_t size);
+paddr_t _bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs, off_t off, int prot, int flags);
+#endif /* _ALPHA_BUS_DMA_PRIVATE */
#endif /* _ALPHA_BUS_H_ */
diff --git a/sys/arch/alpha/include/bwx.h b/sys/arch/alpha/include/bwx.h
new file mode 100644
index 00000000000..c24cb1f03fd
--- /dev/null
+++ b/sys/arch/alpha/include/bwx.h
@@ -0,0 +1,117 @@
+/* $NetBSD: bwx.h,v 1.3 2000/06/08 02:55:37 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ALPHA_BWX_H_
+#define _ALPHA_BWX_H_
+
+/*
+ * Alpha Byte/Word Extension instructions.
+ *
+ * These instructions are available on EV56 (21164A) and later processors.
+ *
+ * See "Alpha Architecture Handbook, Version 3", DEC order number EC-QD2KB-TE.
+ */
+
+static __inline u_int8_t
+alpha_ldbu(__volatile u_int8_t *a0)
+{
+ u_int8_t v0;
+
+ __asm __volatile("ldbu %0, %1"
+ : "=r" (v0)
+ : "m" (*a0));
+
+ return (v0);
+}
+
+static __inline u_int16_t
+alpha_ldwu(__volatile u_int16_t *a0)
+{
+ u_int16_t v0;
+
+ __asm __volatile("ldwu %0, %1"
+ : "=r" (v0)
+ : "m" (*a0));
+
+ return (v0);
+}
+
+static __inline void
+alpha_stb(__volatile u_int8_t *a0, u_int8_t a1)
+{
+
+ __asm __volatile("stb %1, %0"
+ : "=m" (*a0)
+ : "r" (a1));
+}
+
+static __inline void
+alpha_stw(__volatile u_int16_t *a0, u_int16_t a1)
+{
+
+ __asm __volatile("stw %1, %0"
+ : "=m" (*a0)
+ : "r" (a1));
+}
+
+static __inline u_int8_t
+alpha_sextb(u_int8_t a0)
+{
+ u_int8_t v0;
+
+ __asm __volatile("sextb %1, %0"
+ : "=r" (v0)
+ : "r" (a0));
+
+ return (v0);
+}
+
+static __inline u_int16_t
+alpha_sextw(u_int16_t a0)
+{
+ u_int16_t v0;
+
+ __asm __volatile("sextw %1, %0"
+ : "=r" (v0)
+ : "r" (a0));
+
+ return (v0);
+}
+
+#endif /* _ALPHA_BWX_H_ */
diff --git a/sys/arch/alpha/include/cpu.h b/sys/arch/alpha/include/cpu.h
index 1b5d53cc805..ace99eeae42 100644
--- a/sys/arch/alpha/include/cpu.h
+++ b/sys/arch/alpha/include/cpu.h
@@ -1,5 +1,41 @@
-/* $OpenBSD: cpu.h,v 1.7 1997/07/08 10:55:52 niklas Exp $ */
-/* $NetBSD: cpu.h,v 1.14 1996/12/07 01:54:50 cgd Exp $ */
+/* $NetBSD: cpu.h,v 1.45 2000/08/21 02:03:12 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
/*
* Copyright (c) 1988 University of Utah.
@@ -50,13 +86,86 @@
* Exported definitions unique to Alpha cpu support.
*/
+#include <machine/alpha_cpu.h>
+#include <machine/alpha.h>
+
+#ifdef _KERNEL
#include <machine/frame.h>
/*
+ * Machine check information.
+ */
+struct mchkinfo {
+ __volatile int mc_expected; /* machine check is expected */
+ __volatile int mc_received; /* machine check was received */
+};
+
+typedef long cpuid_t;
+
+struct cpu_info {
+ /*
+ * Public members.
+ */
+#if defined(DIAGNOSTIC) || defined(LOCKDEBUG)
+ u_long ci_spin_locks; /* # of spin locks held */
+ u_long ci_simple_locks; /* # of simple locks held */
+#endif
+ struct proc *ci_curproc; /* current owner of the processor */
+
+ /*
+ * Private members.
+ */
+ struct mchkinfo ci_mcinfo; /* machine check info */
+ cpuid_t ci_cpuid; /* our CPU ID */
+ struct proc *ci_fpcurproc; /* current owner of the FPU */
+ paddr_t ci_curpcb; /* PA of current HW PCB */
+ struct pcb *ci_idle_pcb; /* our idle PCB */
+ paddr_t ci_idle_pcb_paddr; /* PA of idle PCB */
+ struct cpu_softc *ci_softc; /* pointer to our device */
+ u_long ci_want_resched; /* preempt current process */
+ u_long ci_astpending; /* AST is pending */
+ u_long ci_intrdepth; /* interrupt trap depth */
+#if defined(MULTIPROCESSOR)
+ u_long ci_flags; /* flags; see below */
+ u_long ci_ipis; /* interprocessor interrupts pending */
+#endif
+};
+
+#define CPUF_PRIMARY 0x01 /* CPU is primary CPU */
+#define CPUF_PRESENT 0x02 /* CPU is present */
+#define CPUF_RUNNING 0x04 /* CPU is running */
+
+#if defined(MULTIPROCESSOR)
+extern __volatile u_long cpus_running;
+extern __volatile u_long cpus_paused;
+extern struct cpu_info cpu_info[];
+
+#define curcpu() ((struct cpu_info *)alpha_pal_rdval())
+#define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
+
+void cpu_boot_secondary_processors(void);
+
+void cpu_pause_resume(unsigned long, int);
+void cpu_pause_resume_all(int);
+#else /* ! MULTIPROCESSOR */
+extern struct cpu_info cpu_info_store;
+
+#define curcpu() (&cpu_info_store)
+#endif /* MULTIPROCESSOR */
+
+#define curproc curcpu()->ci_curproc
+#define fpcurproc curcpu()->ci_fpcurproc
+#define curpcb curcpu()->ci_curpcb
+
+extern u_long cpu_implver; /* from IMPLVER instruction */
+extern u_long cpu_amask; /* from AMASK instruction */
+
+/*
* definitions of cpu-dependent requirements
* referenced in generic code
*/
-#define cpu_wait(p) /* nothing */
+#define cpu_wait(p) /* nothing */
+#define cpu_number() alpha_pal_whami()
/*
* Arguments to hardclock and gatherstats encapsulate the previous
@@ -71,36 +180,64 @@ struct clockframe {
#define CLKF_BASEPRI(framep) \
(((framep)->cf_tf.tf_regs[FRAME_PS] & ALPHA_PSL_IPL_MASK) == 0)
#define CLKF_PC(framep) ((framep)->cf_tf.tf_regs[FRAME_PC])
+
/*
- * XXX No way to accurately tell if we were in interrupt mode before taking
- * clock interrupt.
+ * This isn't perfect; if the clock interrupt comes in before the
+ * r/m/w cycle is complete, we won't be counted... but it's not
+ * like this stastic has to be extremely accurate.
*/
-#define CLKF_INTR(framep) (0)
+#define CLKF_INTR(framep) (curcpu()->ci_intrdepth)
/*
* Preempt the current process if in interrupt from user mode,
* or after the current trap/syscall if in system mode.
+ *
+ * XXXSMP
+ * need_resched() needs to take a cpu_info *.
*/
-#define need_resched() { want_resched = 1; aston(); }
+#define need_resched() \
+do { \
+ curcpu()->ci_want_resched = 1; \
+ aston(curcpu()); \
+} while (/*CONSTCOND*/0)
/*
* Give a profiling tick to the current process when the user profiling
- * buffer pages are invalid. On the hp300, request an ast to send us
+ * buffer pages are invalid. On the Alpha, request an AST to send us
* through trap, marking the proc as needing a profiling tick.
*/
-#define need_proftick(p) { (p)->p_flag |= P_OWEUPC; aston(); }
+#ifdef notyet
+#define need_proftick(p) \
+do { \
+ (p)->p_flag |= P_OWEUPC; \
+ aston((p)->p_cpu); \
+} while (/*CONSTCOND*/0)
+#else
+#define need_proftick(p) \
+do { \
+ (p)->p_flag |= P_OWEUPC; \
+ aston(curcpu()); \
+} while (/*CONSTCOND*/0)
+#endif
/*
* Notify the current process (p) that it has a signal pending,
* process as soon as possible.
*/
-#define signotify(p) aston()
-
-#define aston() (astpending = 1)
-
-u_int64_t astpending; /* need to trap before returning to user mode */
-u_int64_t want_resched; /* resched() was called */
+#ifdef notyet
+#define signotify(p) aston((p)->p_cpu)
+#else
+#define signotify(p) aston(curcpu())
+#endif
+/*
+ * XXXSMP
+ * Should we send an AST IPI? Or just let it handle it next time
+ * it sees a normal kernel entry? I guess letting it happen later
+ * follows the `asynchronous' part of the name...
+ */
+#define aston(ci) ((ci)->ci_astpending = 1)
+#endif /* _KERNEL */
/*
* CTL_MACHDEP definitions.
@@ -131,44 +268,7 @@ struct reg;
struct rpb;
struct trapframe;
-extern int cold;
-extern struct proc *fpcurproc;
-extern struct rpb *hwrpb;
-
-void XentArith __P((u_int64_t, u_int64_t, u_int64_t)); /* MAGIC */
-void XentIF __P((u_int64_t, u_int64_t, u_int64_t)); /* MAGIC */
-void XentInt __P((u_int64_t, u_int64_t, u_int64_t)); /* MAGIC */
-void XentMM __P((u_int64_t, u_int64_t, u_int64_t)); /* MAGIC */
-void XentRestart __P((void)); /* MAGIC */
-void XentSys __P((u_int64_t, u_int64_t, u_int64_t)); /* MAGIC */
-void XentUna __P((u_int64_t, u_int64_t, u_int64_t)); /* MAGIC */
-void alpha_init __P((u_long, u_long, char *));
-void ast __P((struct trapframe *));
-int badaddr __P((void *, size_t));
-void child_return __P((struct proc *p));
-void configure __P((void));
-u_int64_t console_restart __P((u_int64_t, u_int64_t, u_int64_t));
-void do_sir __P((void));
-void dumpconf __P((void));
-void exception_return __P((void)); /* MAGIC */
-void frametoreg __P((struct trapframe *, struct reg *));
-long fswintrberr __P((void)); /* MAGIC */
-void init_prom_interface __P((void));
-void interrupt __P((unsigned long, unsigned long, unsigned long,
- struct trapframe *));
-u_int64_t hwrpb_checksum __P((void));
-void hwrpb_restart_setup __P((void));
-void regdump __P((struct trapframe *));
-void regtoframe __P((struct reg *, struct trapframe *));
-void savectx __P((struct pcb *));
-void set_clockintr __P((void));
-void set_iointr __P((void (*)(void *, unsigned long)));
-void switch_exit __P((struct proc *)); /* MAGIC */
-void switch_trampoline __P((void)); /* MAGIC */
-void syscall __P((u_int64_t, struct trapframe *));
-void trap __P((unsigned long, unsigned long, unsigned long, unsigned long,
- struct trapframe *));
+int badaddr(void *, size_t);
#endif /* _KERNEL */
-
#endif /* _ALPHA_CPU_H_ */
diff --git a/sys/arch/alpha/include/cpuconf.h b/sys/arch/alpha/include/cpuconf.h
index f60c3efb5c3..0a437f0831a 100644
--- a/sys/arch/alpha/include/cpuconf.h
+++ b/sys/arch/alpha/include/cpuconf.h
@@ -1,5 +1,4 @@
-/* $OpenBSD: cpuconf.h,v 1.2 1997/11/10 15:53:09 niklas Exp $ */
-/* $NetBSD: cpuconf.h,v 1.1 1996/11/12 05:14:40 cgd Exp $ */
+/* $NetBSD: cpuconf.h,v 1.12 2000/06/08 03:10:06 thorpej Exp $ */
/*
* Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
@@ -30,45 +29,79 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * Additional reworking by Matthew Jacob for NASA/Ames Research Center.
+ * Copyright (c) 1997
+ */
-struct cpusw {
- const char *family, *option;
- int present;
- const char *(*model_name) __P((void));
- void (*cons_init) __P((void));
- const char *(*iobus_name) __P((void));
- void (*device_register) __P((struct device *dev,
- void *aux));
-};
+#ifndef _ALPHA_CPUCONF_H_
+#define _ALPHA_CPUCONF_H_
+
+/*
+ * Platform Specific Information and Function Hooks.
+ *
+ * The tags family and model information are strings describing the platform.
+ *
+ * The tag iobus describes the primary iobus for the platform- primarily
+ * to give a hint as to where to start configuring. The likely choices
+ * are one of tcasic, lca, apecs, cia, or tlsb.
+ */
-#define CONCAT(a,b) __CONCAT(a,b)
+struct clockframe;
-#define cpu_fn_name(p,f) CONCAT(CONCAT(p,_),f)
+struct platform {
+ /*
+ * Platform Information.
+ */
+ const char *family; /* Family Name */
+ const char *model; /* Model (variant) Name */
+ const char *iobus; /* Primary iobus name */
-#define cpu_decl(p) \
- extern const char *cpu_fn_name(p,model_name) __P((void)); \
- extern void cpu_fn_name(p,cons_init) __P((void)); \
- extern const char *cpu_fn_name(p,iobus_name) __P((void)); \
- extern void cpu_fn_name(p,device_register) \
- __P((struct device *, void*));
+ /*
+ * Platform Specific Function Hooks
+ * cons_init - console initialization
+ * device_register - boot configuration aid
+ * iointr - I/O interrupt handler
+ * clockintr - Clock Interrupt Handler
+ * mcheck_handler - Platform Specific Machine Check Handler
+ */
+ void (*cons_init)(void);
+ void (*device_register)(struct device *, void *);
+ void (*iointr)(void *, unsigned long);
+ void (*clockintr)(struct clockframe *);
+ void (*mcheck_handler)(unsigned long, struct trapframe *,
+ unsigned long, unsigned long);
+ void (*powerdown)(void);
+};
-#define cpu_unknown() { NULL, NULL, 0, }
-#define cpu_notdef(f) { f, NULL, 0 }
+/*
+ * There is an array of functions to initialize the platform structure.
+ *
+ * It's responsible for filling in the family, model_name and iobus
+ * tags. It may optionally fill in the cons_init, device_register and
+ * mcheck_handler tags.
+ *
+ * The iointr tag is filled in by set_iointr (in interrupt.c).
+ * The clockintr tag is filled in by cpu_initclocks (in clock.c).
+ *
+ * nocpu is function to call when you can't figure what platform you're on.
+ * There's no return from this function.
+ */
-#define cpu_option_string(o) __STRING(o)
-#define cpu_option_present(o) (CONCAT(N,o) > NULL)
-#define cpu_function_init(o,p,f) \
- (cpu_option_present(o) ? cpu_fn_name(p,f) : 0)
-#define cpu_init(f,o,p) \
- { \
- f, cpu_option_string(o) , cpu_option_present(o), \
- cpu_function_init(o,p,model_name), \
- cpu_function_init(o,p,cons_init), \
- cpu_function_init(o,p,iobus_name), \
- cpu_function_init(o,p,device_register), \
- }
+struct cpuinit {
+ void (*init)(void);
+ u_int64_t systype;
+ const char *option;
+};
#ifdef _KERNEL
-extern const struct cpusw cpusw[];
-extern const int ncpusw;
-#endif
+#define cpu_notsupp(st, str) { platform_not_supported, st, str }
+
+#define cpu_init(st, fn, opt) { fn, st, opt }
+
+extern struct platform platform;
+extern const struct cpuinit *platform_lookup(int);
+extern void platform_not_configured(void);
+extern void platform_not_supported(void);
+#endif /* _KERNEL */
+#endif /* ! _ALPHA_CPUCONF_H_ */
diff --git a/sys/arch/alpha/include/db_machdep.h b/sys/arch/alpha/include/db_machdep.h
index 5e1f70abc05..b6a83772034 100644
--- a/sys/arch/alpha/include/db_machdep.h
+++ b/sys/arch/alpha/include/db_machdep.h
@@ -1,87 +1,183 @@
-/* $OpenBSD: db_machdep.h,v 1.8 2000/04/06 13:30:47 art Exp $ */
+/* $NetBSD: db_machdep.h,v 1.11 2000/06/29 09:02:57 mrg Exp $ */
/*
- * Copyright (c) 1997 Niklas Hallqvist. All rights reserverd.
+ * Copyright (c) 1995 Carnegie-Mellon University.
+ * All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Niklas Hallqvist.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
+ * Author: Chris G. Demetriou
*
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
*/
#ifndef _ALPHA_DB_MACHDEP_H_
#define _ALPHA_DB_MACHDEP_H_
-/* XXX - Need to include vm.h for boolean_t */
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+
+#include <sys/param.h>
#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
+#include <machine/frame.h>
-struct opcode {
- enum opc_fmt { OPC_PAL, OPC_RES, OPC_MEM, OPC_OP, OPC_BR } opc_fmt;
- char *opc_name;
- int opc_print;
-};
-extern struct opcode opcode[];
+typedef vaddr_t db_addr_t; /* address - unsigned */
+typedef long db_expr_t; /* expression - signed */
-/* types the generic ddb module needs */
-typedef vm_offset_t db_addr_t;
-typedef long db_expr_t;
typedef struct trapframe db_regs_t;
-
-db_regs_t ddb_regs;
+db_regs_t ddb_regs; /* register state */
#define DDB_REGS (&ddb_regs)
#define PC_REGS(regs) ((db_addr_t)(regs)->tf_regs[FRAME_PC])
-/* Breakpoint related definitions */
-#define BKPT_INST 0x00000080 /* call_pal bpt */
-#define BKPT_SIZE sizeof(int)
-#define BKPT_SET(inst) BKPT_INST
+#define BKPT_INST 0x00000080 /* breakpoint instruction */
+#define BKPT_SIZE (4) /* size of breakpoint inst */
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK(regs) \
+ ((regs)->tf_regs[FRAME_PC] -= BKPT_SIZE)
-#define IS_BREAKPOINT_TRAP(type, code) \
- ((type) == ALPHA_KENTRY_IF && (code) == ALPHA_IF_CODE_BPT)
-#ifdef notyet
-#define IS_WATCHPOINT_TRAP(type, code) ((type) == ALPHA_KENTRY_MM)
-#else
+#define SOFTWARE_SSTEP 1 /* no hardware support */
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == ALPHA_KENTRY_IF && \
+ (code) == ALPHA_IF_CODE_BPT)
#define IS_WATCHPOINT_TRAP(type, code) 0
-#endif
-
-#define FIXUP_PC_AFTER_BREAK(regs) ((regs)->tf_regs[FRAME_PC] -= sizeof(int))
-
-#define SOFTWARE_SSTEP
-#define DB_VALID_BREAKPOINT(addr) db_valid_breakpoint(addr)
-
-/* Hack to skip GCC "unused" warnings. */
-#define inst_trap_return(ins) ((ins) & 0) /* XXX */
-#define inst_return(ins) (((ins) & 0xfc000000) == 0x68000000)
-
-int inst_call __P((u_int));
-int inst_branch __P((u_int));
-int inst_load __P((u_int));
-int inst_store __P((u_int));
-db_addr_t branch_taken __P((u_int, db_addr_t,
- register_t (*) __P((db_regs_t *, int)), db_regs_t *));
-db_addr_t next_instr_address __P((db_addr_t, int));
-int kdb_trap __P((int, int, db_regs_t *));
-int db_valid_breakpoint __P((db_addr_t));
+
+/*
+ * Functions needed for software single-stepping.
+ */
+
+boolean_t db_inst_trap_return(int inst);
+boolean_t db_inst_return(int inst);
+boolean_t db_inst_call(int inst);
+boolean_t db_inst_branch(int inst);
+boolean_t db_inst_load(int inst);
+boolean_t db_inst_store(int inst);
+boolean_t db_inst_unconditional_flow_transfer(int inst);
+db_addr_t db_branch_taken(int inst, db_addr_t pc, db_regs_t *regs);
+
+#define inst_trap_return(ins) db_inst_trap_return(ins)
+#define inst_return(ins) db_inst_return(ins)
+#define inst_call(ins) db_inst_call(ins)
+#define inst_branch(ins) db_inst_branch(ins)
+#define inst_load(ins) db_inst_load(ins)
+#define inst_store(ins) db_inst_store(ins)
+#define inst_unconditional_flow_transfer(ins) \
+ db_inst_unconditional_flow_transfer(ins)
+#define branch_taken(ins, pc, getreg, regs) \
+ db_branch_taken((ins), (pc), (regs))
+
+/* No delay slots on Alpha. */
+#define next_instr_address(v, b) ((db_addr_t) ((b) ? (v) : ((v) + 4)))
+
+u_long db_register_value(db_regs_t *, int);
+int ddb_trap(unsigned long, unsigned long, unsigned long,
+ unsigned long, struct trapframe *);
+
+int alpha_debug(unsigned long, unsigned long, unsigned long,
+ unsigned long, struct trapframe *);
+
+/*
+ * We define some of our own commands.
+ */
+#define DB_MACHINE_COMMANDS
+
+/*
+ * We use Elf64 symbols in DDB.
+ */
+#define DB_ELF_SYMBOLS
+#define DB_ELFSIZE 64
+
+/*
+ * Stuff for KGDB.
+ */
+typedef long kgdb_reg_t;
+#define KGDB_NUMREGS 66 /* from tm-alpha.h, NUM_REGS */
+#define KGDB_REG_V0 0
+#define KGDB_REG_T0 1
+#define KGDB_REG_T1 2
+#define KGDB_REG_T2 3
+#define KGDB_REG_T3 4
+#define KGDB_REG_T4 5
+#define KGDB_REG_T5 6
+#define KGDB_REG_T6 7
+#define KGDB_REG_T7 8
+#define KGDB_REG_S0 9
+#define KGDB_REG_S1 10
+#define KGDB_REG_S2 11
+#define KGDB_REG_S3 12
+#define KGDB_REG_S4 13
+#define KGDB_REG_S5 14
+#define KGDB_REG_S6 15 /* FP */
+#define KGDB_REG_A0 16
+#define KGDB_REG_A1 17
+#define KGDB_REG_A2 18
+#define KGDB_REG_A3 19
+#define KGDB_REG_A4 20
+#define KGDB_REG_A5 21
+#define KGDB_REG_T8 22
+#define KGDB_REG_T9 23
+#define KGDB_REG_T10 24
+#define KGDB_REG_T11 25
+#define KGDB_REG_RA 26
+#define KGDB_REG_T12 27
+#define KGDB_REG_AT 28
+#define KGDB_REG_GP 29
+#define KGDB_REG_SP 30
+#define KGDB_REG_ZERO 31
+#define KGDB_REG_F0 32
+#define KGDB_REG_F1 33
+#define KGDB_REG_F2 34
+#define KGDB_REG_F3 35
+#define KGDB_REG_F4 36
+#define KGDB_REG_F5 37
+#define KGDB_REG_F6 38
+#define KGDB_REG_F7 39
+#define KGDB_REG_F8 40
+#define KGDB_REG_F9 41
+#define KGDB_REG_F10 42
+#define KGDB_REG_F11 43
+#define KGDB_REG_F12 44
+#define KGDB_REG_F13 45
+#define KGDB_REG_F14 46
+#define KGDB_REG_F15 47
+#define KGDB_REG_F16 48
+#define KGDB_REG_F17 49
+#define KGDB_REG_F18 50
+#define KGDB_REG_F19 51
+#define KGDB_REG_F20 52
+#define KGDB_REG_F21 53
+#define KGDB_REG_F22 54
+#define KGDB_REG_F23 55
+#define KGDB_REG_F24 56
+#define KGDB_REG_F25 57
+#define KGDB_REG_F26 58
+#define KGDB_REG_F27 59
+#define KGDB_REG_F28 60
+#define KGDB_REG_F29 61
+#define KGDB_REG_F30 62
+#define KGDB_REG_F31 63
+#define KGDB_REG_PC 64
+#define KGDB_REG_VFP 65
+
+/* Too much? Must be large enough for register transfer. */
+#define KGDB_BUFLEN 1024
#endif /* _ALPHA_DB_MACHDEP_H_ */
diff --git a/sys/arch/alpha/include/intr.h b/sys/arch/alpha/include/intr.h
index 6c30e75771a..2c9bebf21da 100644
--- a/sys/arch/alpha/include/intr.h
+++ b/sys/arch/alpha/include/intr.h
@@ -1,7 +1,7 @@
-/* $OpenBSD: intr.h,v 1.6 2000/07/06 15:25:02 ho Exp $ */
-/* $NetBSD: intr.h,v 1.4 1996/12/03 17:34:47 cgd Exp $ */
+/* $NetBSD: intr.h,v 1.25 2000/05/23 05:12:56 thorpej Exp $ */
/*
+ * Copyright (c) 1997 Christopher G. Demetriou. All rights reserved.
* Copyright (c) 1996 Carnegie-Mellon University.
* All rights reserved.
*
@@ -32,6 +32,7 @@
#define _ALPHA_INTR_H_
#include <sys/queue.h>
+#include <machine/atomic.h>
#define IPL_NONE 0 /* disable only this interrupt */
#define IPL_BIO 1 /* disable block I/O interrupts */
@@ -39,6 +40,7 @@
#define IPL_TTY 3 /* disable terminal interrupts */
#define IPL_CLOCK 4 /* disable clock interrupts */
#define IPL_HIGH 5 /* disable all interrupts */
+#define IPL_SERIAL 6 /* disable serial interrupts */
#define IST_UNUSABLE -1 /* interrupt cannot be used */
#define IST_NONE 0 /* none (dummy) */
@@ -46,19 +48,36 @@
#define IST_EDGE 2 /* edge-triggered */
#define IST_LEVEL 3 /* level-triggered */
+#ifdef _KERNEL
+
+/* IPL-lowering/restoring macros */
#define splx(s) \
- (s == ALPHA_PSL_IPL_0 ? spl0() : alpha_pal_swpipl(s))
-#define splsoft() alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT)
-#define spllowersoftclock() splsoft()
-#define splsoftclock() splsoft()
-#define splsoftnet() splsoft()
-#define splnet() alpha_pal_swpipl(ALPHA_PSL_IPL_IO)
-#define splbio() alpha_pal_swpipl(ALPHA_PSL_IPL_IO)
-#define splimp() alpha_pal_swpipl(ALPHA_PSL_IPL_IO)
-#define spltty() alpha_pal_swpipl(ALPHA_PSL_IPL_IO)
-#define splclock() alpha_pal_swpipl(ALPHA_PSL_IPL_CLOCK)
-#define splstatclock() alpha_pal_swpipl(ALPHA_PSL_IPL_CLOCK)
-#define splhigh() alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH)
+ ((s) == ALPHA_PSL_IPL_0 ? spl0() : alpha_pal_swpipl(s))
+#define spllowersoftclock() alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT)
+
+/* IPL-raising functions/macros */
+static __inline int _splraise __P((int)) __attribute__ ((unused));
+static __inline int
+_splraise(s)
+ int s;
+{
+ int cur = alpha_pal_rdps() & ALPHA_PSL_IPL_MASK;
+ return (s > cur ? alpha_pal_swpipl(s) : cur);
+}
+#define splsoft() _splraise(ALPHA_PSL_IPL_SOFT)
+#define splsoftserial() splsoft()
+#define splsoftclock() splsoft()
+#define splsoftnet() splsoft()
+#define splnet() _splraise(ALPHA_PSL_IPL_IO)
+#define splbio() _splraise(ALPHA_PSL_IPL_IO)
+#define splimp() _splraise(ALPHA_PSL_IPL_IO)
+#define spltty() _splraise(ALPHA_PSL_IPL_IO)
+#define splserial() _splraise(ALPHA_PSL_IPL_IO)
+#define splclock() _splraise(ALPHA_PSL_IPL_CLOCK)
+#define splstatclock() _splraise(ALPHA_PSL_IPL_CLOCK)
+#define splhigh() _splraise(ALPHA_PSL_IPL_HIGH)
+
+#define spllpt() spltty()
/*
* simulated software interrupt register
@@ -67,9 +86,31 @@ extern u_int64_t ssir;
#define SIR_NET 0x1
#define SIR_CLOCK 0x2
+#define SIR_SERIAL 0x4
+
+#define setsoft(x) atomic_setbits_ulong(&ssir, (x))
+
+#define setsoftnet() setsoft(SIR_NET)
+#define setsoftclock() setsoft(SIR_CLOCK)
+#define setsoftserial() setsoft(SIR_SERIAL)
+
+/*
+ * Interprocessor interrupts. In order how we want them processed.
+ */
+#define ALPHA_IPI_HALT 0x0000000000000001UL
+#define ALPHA_IPI_TBIA 0x0000000000000002UL
+#define ALPHA_IPI_TBIAP 0x0000000000000004UL
+#define ALPHA_IPI_SHOOTDOWN 0x0000000000000008UL
+#define ALPHA_IPI_IMB 0x0000000000000010UL
+#define ALPHA_IPI_AST 0x0000000000000020UL
+
+#define ALPHA_NIPIS 6 /* must not exceed 64 */
-#define setsoftnet() ssir |= SIR_NET
-#define setsoftclock() ssir |= SIR_CLOCK
+typedef void (*ipifunc_t) __P((void));
+extern ipifunc_t ipifuncs[ALPHA_NIPIS];
+
+void alpha_send_ipi __P((unsigned long, unsigned long));
+void alpha_broadcast_ipi __P((unsigned long));
/*
* Alpha shared-interrupt-line common code.
@@ -78,27 +119,34 @@ extern u_int64_t ssir;
struct alpha_shared_intrhand {
TAILQ_ENTRY(alpha_shared_intrhand)
ih_q;
+ struct alpha_shared_intr *ih_intrhead;
int (*ih_fn) __P((void *));
void *ih_arg;
int ih_level;
+ unsigned int ih_num;
};
struct alpha_shared_intr {
TAILQ_HEAD(,alpha_shared_intrhand)
intr_q;
+ void *intr_private;
int intr_sharetype;
int intr_dfltsharetype;
int intr_nstrays;
int intr_maxstrays;
};
+#define ALPHA_SHARED_INTR_DISABLE(asi, num) \
+ ((asi)[num].intr_maxstrays != 0 && \
+ (asi)[num].intr_nstrays == (asi)[num].intr_maxstrays)
+
struct alpha_shared_intr *alpha_shared_intr_alloc __P((unsigned int));
int alpha_shared_intr_dispatch __P((struct alpha_shared_intr *,
unsigned int));
-int alpha_shared_intr_check __P((struct alpha_shared_intr *,
- unsigned int, int));
void *alpha_shared_intr_establish __P((struct alpha_shared_intr *,
unsigned int, int, int, int (*)(void *), void *, const char *));
+void alpha_shared_intr_disestablish __P((struct alpha_shared_intr *,
+ void *, const char *));
int alpha_shared_intr_get_sharetype __P((struct alpha_shared_intr *,
unsigned int));
int alpha_shared_intr_isactive __P((struct alpha_shared_intr *,
@@ -109,5 +157,12 @@ void alpha_shared_intr_set_maxstrays __P((struct alpha_shared_intr *,
unsigned int, int));
void alpha_shared_intr_stray __P((struct alpha_shared_intr *, unsigned int,
const char *));
+void alpha_shared_intr_set_private __P((struct alpha_shared_intr *,
+ unsigned int, void *));
+void *alpha_shared_intr_get_private __P((struct alpha_shared_intr *,
+ unsigned int));
+
+void set_iointr(void (*)(void *, unsigned long));
-#endif
+#endif /* _KERNEL */
+#endif /* ! _ALPHA_INTR_H_ */
diff --git a/sys/arch/alpha/include/kcore.h b/sys/arch/alpha/include/kcore.h
index 87da4d36d0c..f0d0dbaac0f 100644
--- a/sys/arch/alpha/include/kcore.h
+++ b/sys/arch/alpha/include/kcore.h
@@ -1,5 +1,4 @@
-/* $OpenBSD: kcore.h,v 1.1 1996/10/30 22:39:11 niklas Exp $ */
-/* $NetBSD: kcore.h,v 1.1 1996/10/01 18:38:05 cgd Exp $ */
+/* $NetBSD: kcore.h,v 1.3 1998/02/14 00:17:57 cgd Exp $ */
/*
* Copyright (c) 1996 Carnegie-Mellon University.
@@ -34,7 +33,10 @@
typedef struct cpu_kcore_hdr {
u_int64_t lev1map_pa; /* PA of Lev1map */
u_int64_t page_size; /* Page size */
- phys_ram_seg_t core_seg; /* Core addrs; only one seg */
+ u_int64_t nmemsegs; /* Number of RAM segments */
+#if 0
+ phys_ram_seg_t memsegs[]; /* RAM segments */
+#endif
} cpu_kcore_hdr_t;
#endif /* _ALPHA_KCORE_H_ */
diff --git a/sys/arch/alpha/include/pal.h b/sys/arch/alpha/include/pal.h
new file mode 100644
index 00000000000..9fdeb8319f8
--- /dev/null
+++ b/sys/arch/alpha/include/pal.h
@@ -0,0 +1,92 @@
+/* $NetBSD: pal.h,v 1.1 1997/09/06 01:23:53 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1991,1990,1989,1994,1995,1996 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * PAL "function" codes (used as arguments to call_pal instructions).
+ *
+ * Those marked with "P" are privileged, and those marked with "U"
+ * are unprivileged.
+ */
+
+/* Common PAL function codes. */
+#define PAL_halt 0x0000 /* P */
+#define PAL_cflush 0x0001 /* P */
+#define PAL_draina 0x0002 /* P */
+#define PAL_cserve 0x0009 /* P */
+#define PAL_swppal 0x000a /* P */
+#define PAL_ipir 0x000d /* P */
+#define PAL_bpt 0x0080 /* U */
+#define PAL_bugchk 0x0081 /* U */
+#define PAL_imb 0x0086 /* U */
+#define PAL_rdunique 0x009e /* U */
+#define PAL_wrunique 0x009f /* U */
+#define PAL_gentrap 0x00aa /* U */
+
+/* VMS PAL function codes. */
+#define PAL_VMS_ldqp 0x0003 /* P */
+#define PAL_VMS_stqp 0x0004 /* P */
+#define PAL_VMS_mtpr_fen 0x000c /* P */
+#define PAL_VMS_mtpr_ipir 0x000d /* P */
+#define PAL_VMS_mfpr_ipl 0x000e /* P */
+#define PAL_VMS_mtpr_ipl 0x000f /* P */
+#define PAL_VMS_mfpr_mces 0x0010 /* P */
+#define PAL_VMS_mtpr_mces 0x0011 /* P */
+#define PAL_VMS_mfpr_prbr 0x0013 /* P */
+#define PAL_VMS_mtpr_prbr 0x0014 /* P */
+#define PAL_VMS_mfpr_ptbr 0x0015 /* P */
+#define PAL_VMS_mtpr_scbb 0x0017 /* P */
+#define PAL_VMS_mtpr_sirr 0x0018 /* P */
+#define PAL_VMS_mtpr_tbia 0x001b /* P */
+#define PAL_VMS_mtpr_tbiap 0x001c /* P */
+#define PAL_VMS_mtpr_tbis 0x001d /* P */
+#define PAL_VMS_mfpr_usp 0x0022 /* P */
+#define PAL_VMS_mtpr_usp 0x0023 /* P */
+#define PAL_VMS_mfpr_vptb 0x0029 /* P */
+#define PAL_VMS_mfpr_whami 0x003f /* P */
+#define PAL_VMS_rei 0x0092 /* U */
+
+/* OSF/1 PAL function codes. */
+#define PAL_OSF1_rdmces 0x0010 /* P */
+#define PAL_OSF1_wrmces 0x0011 /* P */
+#define PAL_OSF1_wrfen 0x002b /* P */
+#define PAL_OSF1_wrvptptr 0x002d /* P */
+#define PAL_OSF1_swpctx 0x0030 /* P */
+#define PAL_OSF1_wrval 0x0031 /* P */
+#define PAL_OSF1_rdval 0x0032 /* P */
+#define PAL_OSF1_tbi 0x0033 /* P */
+#define PAL_OSF1_wrent 0x0034 /* P */
+#define PAL_OSF1_swpipl 0x0035 /* P */
+#define PAL_OSF1_rdps 0x0036 /* P */
+#define PAL_OSF1_wrkgp 0x0037 /* P */
+#define PAL_OSF1_wrusp 0x0038 /* P */
+#define PAL_OSF1_wrperfmon 0x0039 /* P */
+#define PAL_OSF1_rdusp 0x003a /* P */
+#define PAL_OSF1_whami 0x003c /* P */
+#define PAL_OSF1_retsys 0x003d /* P */
+#define PAL_OSF1_rti 0x003f /* P */
+#define PAL_OSF1_callsys 0x0083 /* U */
+#define PAL_OSF1_imb 0x0086 /* U */
diff --git a/sys/arch/alpha/include/param.h b/sys/arch/alpha/include/param.h
index e48b30ee92d..ad7a203c948 100644
--- a/sys/arch/alpha/include/param.h
+++ b/sys/arch/alpha/include/param.h
@@ -1,5 +1,4 @@
-/* $OpenBSD: param.h,v 1.12 2000/07/06 13:38:30 ericj Exp $ */
-/* $NetBSD: param.h,v 1.15 1996/11/13 21:13:19 cgd Exp $ */
+/* $NetBSD: param.h,v 1.30 2000/06/09 16:03:04 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -59,18 +58,20 @@
* Round p (pointer or byte index) up to a correctly-aligned value for all
* data types (int, long, ...). The result is u_long and must be cast to
* any desired pointer type.
+ *
+ * ALIGNED_POINTER is a boolean macro that checks whether an address
+ * is valid to fetch data elements of type t from on this architecture.
+ * This does not reflect the optimal alignment, just the possibility
+ * (within reasonable limits).
+ *
*/
-#define ALIGNBYTES 7
-#define ALIGN(p) (((u_long)(p) + ALIGNBYTES) &~ ALIGNBYTES)
+#define ALIGNBYTES 7
+#define ALIGN(p) (((u_long)(p) + ALIGNBYTES) &~ ALIGNBYTES)
+#define ALIGNED_POINTER(p,t) ((((u_long)(p)) & (sizeof(t)-1)) == 0)
#define NBPG (1 << ALPHA_PGSHIFT) /* bytes/page */
#define PGOFSET (NBPG-1) /* byte off. into pg */
#define PGSHIFT ALPHA_PGSHIFT /* LOG2(NBPG) */
-#define NPTEPG (1 << (PGSHIFT-PTESHIFT)) /* pte's/page */
-
-#define SEGSHIFT (PGSHIFT + (PGSHIFT-PTESHIFT)) /* LOG2(NBSEG) */
-#define NBSEG (1 << SEGSHIFT) /* bytes/segment (8M) */
-#define SEGOFSET (NBSEG-1) /* byte off. into seg */
#define KERNBASE 0xfffffc0000230000 /* start of kernel virtual */
#define BTOPKERNBASE ((u_long)KERNBASE >> PGSHIFT)
@@ -78,30 +79,39 @@
#define DEV_BSIZE 512
#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
#define BLKDEV_IOSIZE 2048
+#ifndef MAXPHYS
#define MAXPHYS (64 * 1024) /* max raw I/O transfer size */
+#endif
-#define CLSIZE 1
-#define CLSIZELOG2 0
+#define CLSIZE 1
+#define CLSIZELOG2 0
-/* NOTE: SSIZE, SINCR and UPAGES must be multiples of CLSIZE */
#define SSIZE 1 /* initial stack size/NBPG */
#define SINCR 1 /* increment of stack/NBPG */
#define UPAGES 2 /* pages of u-area */
#define USPACE (UPAGES * NBPG) /* total size of u-area */
+#ifndef MSGBUFSIZE
+#define MSGBUFSIZE NBPG /* default message buffer size */
+#endif
+
/*
* Constants related to network buffer management.
- * MCLBYTES must be no larger than CLBYTES (the software page size), and,
+ * MCLBYTES must be no larger than NBPG (the software page size), and,
* on machines that exchange pages of input or output buffers with mbuf
* clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple
* of the hardware page size.
*/
#define MSIZE 256 /* size of an mbuf */
-#define MCLSHIFT 11
-#define MCLBYTES (1 << MCLSHIFT) /* large enough for ether MTU */
+#ifndef MCLSHIFT
+# define MCLSHIFT 11 /* convert bytes to m_buf clusters */
+ /* 2K cluster can hold Ether frame */
+#endif /* MCLSHIFT */
+#define MCLBYTES (1 << MCLSHIFT) /* size of a m_buf cluster */
#define MCLOFSET (MCLBYTES - 1)
#ifndef NMBCLUSTERS
+
#ifdef GATEWAY
#define NMBCLUSTERS 1024 /* map size, max cluster allocation */
#else
@@ -109,17 +119,14 @@
#endif
#endif
-#ifndef MSGBUFSIZE
-#define MSGBUFSIZE 1*NBPG
-#endif
-
/*
* Size of kernel malloc arena in CLBYTES-sized logical pages
*/
#ifndef NKMEMCLUSTERS
-#define NKMEMCLUSTERS (4096*1024/CLBYTES) /* XXX? */
+#define NKMEMCLUSTERS (4096*1024/NBPG) /* XXX? */
#endif
+
/* pages ("clicks") to disk blocks */
#define ctod(x) ((x) << (PGSHIFT - DEV_BSHIFT))
#define dtoc(x) ((x) >> (PGSHIFT - DEV_BSHIFT))
@@ -148,20 +155,20 @@
#define alpha_btop(x) ((unsigned long)(x) >> PGSHIFT)
#define alpha_ptob(x) ((unsigned long)(x) << PGSHIFT)
-#include <machine/intr.h>
-
#ifdef _KERNEL
#ifndef _LOCORE
-void delay __P((unsigned long));
+#include <machine/intr.h>
+
+void delay(unsigned long);
#define DELAY(n) delay(n)
/* XXX THE FOLLOWING PROTOTYPE BELONGS IN INTR.H */
-int spl0 __P((void)); /* drop ipl to zero */
+int spl0 __P((void)); /* drop ipl to zero */
/* XXX END INTR.H */
/* XXX THE FOLLOWING PROTOTYPE SHOULD BE A BUS.H INTERFACE */
-vm_offset_t alpha_XXX_dmamap __P((vm_offset_t));
+paddr_t alpha_XXX_dmamap(vaddr_t);
/* XXX END BUS.H */
#endif
diff --git a/sys/arch/alpha/include/pmap.h b/sys/arch/alpha/include/pmap.h
index 9b2120cd6ce..8a8923dedba 100644
--- a/sys/arch/alpha/include/pmap.h
+++ b/sys/arch/alpha/include/pmap.h
@@ -1,10 +1,318 @@
-/* $OpenBSD: pmap.h,v 1.4 1996/10/30 22:39:16 niklas Exp $ */
-/* $NetBSD: pmap.h,v 1.9 1996/08/20 23:02:30 cgd Exp $ */
+/* $NetBSD: pmap.h,v 1.35 2000/06/08 03:10:06 thorpej Exp $ */
-#ifndef NEW_PMAP
-#include <machine/pmap.old.h>
-#else
-#include <machine/pmap.new.h>
+/*-
+ * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center and by Chris G. Demetriou.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1987 Carnegie-Mellon University
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.h 8.1 (Berkeley) 6/10/93
+ */
+
+#ifndef _PMAP_MACHINE_
+#define _PMAP_MACHINE_
+
+#include <sys/lock.h>
+#include <sys/queue.h>
+
+#include <machine/pte.h>
+
+/*
+ * Machine-dependent virtual memory state.
+ *
+ * If we ever support processor numbers higher than 63, we'll have to
+ * rethink the CPU mask.
+ *
+ * Note pm_asn and pm_asngen are arrays allocated in pmap_create().
+ * Their size is based on the PCS count from the HWRPB, and indexed
+ * by processor ID (from `whami').
+ *
+ * The kernel pmap is a special case; it gets statically-allocated
+ * arrays which hold enough for ALPHA_MAXPROCS.
+ */
+struct pmap {
+ TAILQ_ENTRY(pmap) pm_list; /* list of all pmaps */
+ pt_entry_t *pm_lev1map; /* level 1 map */
+ int pm_count; /* pmap reference count */
+ struct simplelock pm_slock; /* lock on pmap */
+ struct pmap_statistics pm_stats; /* pmap statistics */
+ long pm_nlev2; /* level 2 pt page count */
+ long pm_nlev3; /* level 3 pt page count */
+ unsigned int *pm_asn; /* address space number */
+ unsigned long *pm_asngen; /* ASN generation number */
+ unsigned long pm_cpus; /* mask of CPUs using pmap */
+ unsigned long pm_needisync; /* mask of CPUs needing isync */
+};
+
+typedef struct pmap *pmap_t;
+
+#define PMAP_ASN_RESERVED 0 /* reserved for Lev1map users */
+
+extern struct pmap kernel_pmap_store;
+
+#define pmap_kernel() (&kernel_pmap_store)
+
+/*
+ * For each vm_page_t, there is a list of all currently valid virtual
+ * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
+ */
+typedef struct pv_entry {
+ LIST_ENTRY(pv_entry) pv_list; /* pv_entry list */
+ struct pmap *pv_pmap; /* pmap where mapping lies */
+ vaddr_t pv_va; /* virtual address for mapping */
+ pt_entry_t *pv_pte; /* PTE that maps the VA */
+} *pv_entry_t;
+
+/*
+ * The head of the list of pv_entry_t's, also contains page attributes.
+ */
+struct pv_head {
+ LIST_HEAD(, pv_entry) pvh_list; /* pv_entry list */
+ struct simplelock pvh_slock; /* lock on this head */
+ int pvh_attrs; /* page attributes */
+ int pvh_usage; /* page usage */
+ int pvh_refcnt; /* special use ref count */
+};
+
+/* pvh_attrs */
+#define PGA_MODIFIED 0x01 /* modified */
+#define PGA_REFERENCED 0x02 /* referenced */
+
+/* pvh_usage */
+#define PGU_NORMAL 0 /* free or normal use */
+#define PGU_PVENT 1 /* PV entries */
+#define PGU_L1PT 2 /* level 1 page table */
+#define PGU_L2PT 3 /* level 2 page table */
+#define PGU_L3PT 4 /* level 3 page table */
+
+#define PGU_ISPTPAGE(pgu) ((pgu) >= PGU_L1PT)
+
+#define PGU_STRINGS \
+{ \
+ "normal", \
+ "pvent", \
+ "l1pt", \
+ "l2pt", \
+ "l3pt", \
+}
+
+#ifdef _KERNEL
+
+#ifndef _LKM
+#if defined(NEW_SCC_DRIVER)
+#if defined(DEC_KN8AE)
+#define _PMAP_MAY_USE_PROM_CONSOLE
#endif
+#else /* ! NEW_SCC_DRIVER */
+#if defined(DEC_3000_300) \
+ || defined(DEC_3000_500) \
+ || defined(DEC_KN8AE) /* XXX */
+#define _PMAP_MAY_USE_PROM_CONSOLE /* XXX */
+#endif /* XXX */
+#endif /* NEW_SCC_DRIVER */
+
+#if defined(MULTIPROCESSOR)
+void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t);
+void pmap_do_tlb_shootdown(void);
+#endif /* MULTIPROCESSOR */
+#endif /* _LKM */
+
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
+
+extern pt_entry_t *VPT; /* Virtual Page Table */
+
+#define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */
+
+/*
+ * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
+ */
+#define PMAP_MAP_POOLPAGE(pa) ALPHA_PHYS_TO_K0SEG((pa))
+#define PMAP_UNMAP_POOLPAGE(va) ALPHA_K0SEG_TO_PHYS((va))
+
+paddr_t vtophys(vaddr_t);
+
+/* Machine-specific functions. */
+void pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids);
+void pmap_emulate_reference(struct proc *p, vaddr_t v,
+ int user, int write);
+#ifdef _PMAP_MAY_USE_PROM_CONSOLE
+int pmap_uses_prom_console(void);
+#endif
+void pmap_activate(struct proc *);
+void pmap_deactivate(struct proc *);
+
+#define pmap_pte_pa(pte) (PG_PFNUM(*(pte)) << PGSHIFT)
+#define pmap_pte_prot(pte) (*(pte) & PG_PROT)
+#define pmap_pte_w(pte) (*(pte) & PG_WIRED)
+#define pmap_pte_v(pte) (*(pte) & PG_V)
+#define pmap_pte_pv(pte) (*(pte) & PG_PVLIST)
+#define pmap_pte_asm(pte) (*(pte) & PG_ASM)
+#define pmap_pte_exec(pte) (*(pte) & PG_EXEC)
+
+#define pmap_pte_set_w(pte, v) \
+do { \
+ if (v) \
+ *(pte) |= PG_WIRED; \
+ else \
+ *(pte) &= ~PG_WIRED; \
+} while (0)
+
+#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
+
+#define pmap_pte_set_prot(pte, np) \
+do { \
+ *(pte) &= ~PG_PROT; \
+ *(pte) |= (np); \
+} while (0)
+
+#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
+
+static __inline pt_entry_t *pmap_l2pte(pmap_t, vaddr_t, pt_entry_t *);
+static __inline pt_entry_t *pmap_l3pte(pmap_t, vaddr_t, pt_entry_t *);
+
+#define pmap_l1pte(pmap, v) \
+ (&(pmap)->pm_lev1map[l1pte_index((vaddr_t)(v))])
+
+static __inline pt_entry_t *
+pmap_l2pte(pmap, v, l1pte)
+ pmap_t pmap;
+ vaddr_t v;
+ pt_entry_t *l1pte;
+{
+ pt_entry_t *lev2map;
+
+ if (l1pte == NULL) {
+ l1pte = pmap_l1pte(pmap, v);
+ if (pmap_pte_v(l1pte) == 0)
+ return (NULL);
+ }
+
+ lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
+ return (&lev2map[l2pte_index(v)]);
+}
+
+static __inline pt_entry_t *
+pmap_l3pte(pmap, v, l2pte)
+ pmap_t pmap;
+ vaddr_t v;
+ pt_entry_t *l2pte;
+{
+ pt_entry_t *l1pte, *lev2map, *lev3map;
+
+ if (l2pte == NULL) {
+ l1pte = pmap_l1pte(pmap, v);
+ if (pmap_pte_v(l1pte) == 0)
+ return (NULL);
+
+ lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
+ l2pte = &lev2map[l2pte_index(v)];
+ if (pmap_pte_v(l2pte) == 0)
+ return (NULL);
+ }
+
+ lev3map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l2pte));
+ return (&lev3map[l3pte_index(v)]);
+}
+
+/*
+ * Macros for locking pmap structures.
+ *
+ * Note that we if we access the kernel pmap in interrupt context, it
+ * is only to update statistics. Since stats are updated using atomic
+ * operations, locking the kernel pmap is not necessary. Therefore,
+ * it is not necessary to block interrupts when locking pmap strucutres.
+ */
+#define PMAP_LOCK(pmap) simple_lock(&(pmap)->pm_slock)
+#define PMAP_UNLOCK(pmap) simple_unlock(&(pmap)->pm_slock)
+
+/*
+ * Macro for processing deferred I-stream synchronization.
+ *
+ * The pmap module may defer syncing the user I-stream until the
+ * return to userspace, since the IMB PALcode op can be quite
+ * expensive. Since user instructions won't be executed until
+ * the return to userspace, this can be deferred until userret().
+ */
+#define PMAP_USERRET(pmap) \
+do { \
+ u_long cpu_mask = (1UL << cpu_number()); \
+ \
+ if ((pmap)->pm_needisync & cpu_mask) { \
+ atomic_clearbits_ulong(&(pmap)->pm_needisync, \
+ cpu_mask); \
+ alpha_pal_imb(); \
+ } \
+} while (0)
+
+#endif /* _KERNEL */
-void pmap_unmap_prom __P((void));
+#endif /* _PMAP_MACHINE_ */
diff --git a/sys/arch/alpha/include/proc.h b/sys/arch/alpha/include/proc.h
index 1738d1bb5c9..22ceefe5803 100644
--- a/sys/arch/alpha/include/proc.h
+++ b/sys/arch/alpha/include/proc.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: proc.h,v 1.4 1997/01/24 19:57:16 niklas Exp $ */
+/* $OpenBSD: proc.h,v 1.5 2000/11/08 16:01:13 art Exp $ */
/* $NetBSD: proc.h,v 1.2 1995/03/24 15:01:36 cgd Exp $ */
/*
@@ -28,6 +28,7 @@
* rights to redistribute these changes.
*/
+#include <machine/cpu.h>
/*
* Machine-dependent part of the proc struct for the Alpha.
*/
diff --git a/sys/arch/alpha/include/prom.h b/sys/arch/alpha/include/prom.h
index f8d26ee4093..16306908854 100644
--- a/sys/arch/alpha/include/prom.h
+++ b/sys/arch/alpha/include/prom.h
@@ -1,5 +1,4 @@
-/* $OpenBSD: prom.h,v 1.6 1997/01/24 19:57:18 niklas Exp $ */
-/* $NetBSD: prom.h,v 1.6 1996/11/13 22:21:03 cgd Exp $ */
+/* $NetBSD: prom.h,v 1.12 2000/06/08 03:10:06 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -30,8 +29,8 @@
#ifndef ASSEMBLER
struct prom_vec {
- int (*routine) __P((struct crd *));
- struct crd *routine_arg;
+ u_int64_t routine;
+ void *routine_arg;
};
/* The return value from a prom call. */
@@ -47,17 +46,17 @@ typedef union {
u_int64_t bits;
} prom_return_t;
-#ifdef STANDALONE
-int getchar __P((void));
-int prom_open __P((char *, int));
-void putchar __P((int));
+#ifdef _STANDALONE
+int getchar(void);
+int prom_open(char *, int);
+void putchar(int);
#endif
-void prom_halt __P((int)) __attribute__((__noreturn__));
-int prom_getenv __P((int, char *, int));
+void prom_halt(int) __attribute__((__noreturn__));
+int prom_getenv(int, char *, int);
-void init_prom_interface __P((void));
-void hwrbp_restart_setup __P((void));
+void hwrpb_primary_init(void);
+void hwrpb_restart_setup(void);
#endif
/* Prom operation values. */
@@ -68,14 +67,26 @@ void hwrbp_restart_setup __P((void));
#define PROM_R_PUTS 0x02
#define PROM_R_READ 0x13
#define PROM_R_WRITE 0x14
+#define PROM_R_IOCTL 0x12
+
+/* Prom IOCTL operation subcodes */
+#define PROM_I_SKIP2IRG 1
+#define PROM_I_SKIP2MARK 2
+#define PROM_I_REWIND 3
+#define PROM_I_WRITEMARK 4
/* Environment variable values. */
#define PROM_E_BOOTED_DEV 0x4
#define PROM_E_BOOTED_FILE 0x6
#define PROM_E_BOOTED_OSFLAGS 0x8
#define PROM_E_TTY_DEV 0xf
+#define PROM_E_SCSIID 0x42
+#define PROM_E_SCSIFAST 0x43
+#if defined(_STANDALONE) || defined(ENABLEPROM)
/*
+ * These can't be called from the kernel without great care.
+ *
* There have to be stub routines to do the copying that ensures that the
* PROM doesn't get called with an address larger than 32 bits. Calls that
* either don't need to copy anything, or don't need the copy because it's
@@ -87,20 +98,26 @@ void hwrbp_restart_setup __P((void));
prom_dispatch(PROM_R_READ, chan, len, (u_int64_t)buf, blkno)
#define prom_write(chan, len, buf, blkno) \
prom_dispatch(PROM_R_WRITE, chan, len, (u_int64_t)buf, blkno)
+#define prom_ioctl(chan, op, count) \
+ prom_dispatch(PROM_R_IOCTL, chan, op, (int64_t)count, 0, 0)
#define prom_putstr(chan, str, len) \
prom_dispatch(PROM_R_PUTS, chan, (u_int64_t)str, len, 0)
#define prom_getc(chan) \
prom_dispatch(PROM_R_GETC, chan, 0, 0, 0)
#define prom_getenv_disp(id, buf, len) \
prom_dispatch(PROM_R_GETENV, id, (u_int64_t)buf, len, 0)
+#endif
#ifndef ASSEMBLER
#ifdef _KERNEL
-void promcnputc __P((dev_t, int));
-int promcngetc __P((dev_t));
-int promcnlookc __P((dev_t, char *));
+int prom_enter(void);
+void prom_leave(int);
+
+void promcnputc(dev_t, int);
+int promcngetc(dev_t);
+int promcnlookc(dev_t, char *);
-u_int64_t prom_dispatch __P((u_int64_t, u_int64_t, u_int64_t, u_int64_t,
- u_int64_t));
+u_int64_t prom_dispatch(u_int64_t, u_int64_t, u_int64_t, u_int64_t,
+ u_int64_t);
#endif /* _KERNEL */
#endif /* ASSEMBLER */
diff --git a/sys/arch/alpha/include/pte.h b/sys/arch/alpha/include/pte.h
index 57438dca065..63c300f6a8c 100644
--- a/sys/arch/alpha/include/pte.h
+++ b/sys/arch/alpha/include/pte.h
@@ -1,5 +1,41 @@
-/* $OpenBSD: pte.h,v 1.6 1997/01/24 19:57:19 niklas Exp $ */
-/* $NetBSD: pte.h,v 1.8 1996/11/13 22:21:04 cgd Exp $ */
+/* $NetBSD: pte.h,v 1.26 1999/04/09 00:38:11 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -28,6 +64,9 @@
* rights to redistribute these changes.
*/
+#ifndef _ALPHA_PTE_H_
+#define _ALPHA_PTE_H_
+
/*
* Alpha page table entry.
* Things which are in the VMS PALcode but not in the OSF PALcode
@@ -61,55 +100,50 @@ typedef alpha_pt_entry_t pt_entry_t;
#define PG_KWE ALPHA_PTE_KW
#define PG_UWE ALPHA_PTE_UW
#define PG_PROT ALPHA_PTE_PROT
-#define PG_RSVD 0x000000000000cc80 /* Reserved fpr hardware */
+#define PG_RSVD 0x000000000000cc80 /* Reserved for hardware */
#define PG_WIRED 0x0000000000010000 /* Wired. [SOFTWARE] */
+#define PG_PVLIST 0x0000000000020000 /* on pv list [SOFTWARE] */
+#define PG_EXEC 0x0000000000040000 /* execute perms [SOFTWARE] */
#define PG_FRAME ALPHA_PTE_RAME
#define PG_SHIFT 32
#define PG_PFNUM(x) ALPHA_PTE_TO_PFN(x)
-#if 0 /* XXX NOT HERE */
-#define K0SEG_BEGIN 0xfffffc0000000000 /* unmapped, cached */
-#define K0SEG_END 0xfffffe0000000000
-#define PHYS_UNCACHED 0x0000000040000000
-#endif
-
-#ifndef _LOCORE
-#if 0 /* XXX NOT HERE */
-#define k0segtophys(x) ((vm_offset_t)(x) & 0x00000003ffffffff)
-#define phystok0seg(x) ((vm_offset_t)(x) | K0SEG_BEGIN)
+/*
+ * These are the PALcode PTE bits that we care about when checking to see
+ * if a PTE has changed in such a way as to require a TBI.
+ */
+#define PG_PALCODE(x) ((x) & ALPHA_PTE_PALCODE)
-#define phystouncached(x) ((vm_offset_t)(x) | PHYS_UNCACHED)
-#define uncachedtophys(x) ((vm_offset_t)(x) & ~PHYS_UNCACHED)
-#endif
+#if defined(_KERNEL) || defined(__KVM_ALPHA_PRIVATE)
+#define NPTEPG_SHIFT (PAGE_SHIFT - PTESHIFT)
+#define NPTEPG (1L << NPTEPG_SHIFT)
#define PTEMASK (NPTEPG - 1)
-#define vatopte(va) (((va) >> PGSHIFT) & PTEMASK)
-#define vatoste(va) (((va) >> SEGSHIFT) & PTEMASK)
-#define kvtol1pte(va) \
- (((vm_offset_t)(va) >> (PGSHIFT + 2*(PGSHIFT-PTESHIFT))) & PTEMASK)
-#define vatopa(va) \
- ((PG_PFNUM(*kvtopte(va)) << PGSHIFT) | ((vm_offset_t)(va) & PGOFSET))
+#define l3pte_index(va) \
+ (((vaddr_t)(va) >> PAGE_SHIFT) & PTEMASK)
+
+#define l2pte_index(va) \
+ (((vaddr_t)(va) >> (PAGE_SHIFT + NPTEPG_SHIFT)) & PTEMASK)
+
+#define l1pte_index(va) \
+ (((vaddr_t)(va) >> (PAGE_SHIFT + 2 * NPTEPG_SHIFT)) & PTEMASK)
+
+#define VPT_INDEX(va) \
+ (((vaddr_t)(va) >> PAGE_SHIFT) & ((1 << 3 * NPTEPG_SHIFT) - 1))
+
+/* Space mapped by one level 1 PTE */
+#define ALPHA_L1SEG_SIZE (1L << ((2 * NPTEPG_SHIFT) + PAGE_SHIFT))
-#define ALPHA_STSIZE ((u_long)NBPG) /* 8k */
-#define ALPHA_MAX_PTSIZE ((u_long)(NPTEPG * NBPG)) /* 8M */
+/* Space mapped by one level 2 PTE */
+#define ALPHA_L2SEG_SIZE (1L << (NPTEPG_SHIFT + PAGE_SHIFT))
+
+#define alpha_trunc_l1seg(x) (((u_long)(x)) & ~(ALPHA_L1SEG_SIZE-1))
+#define alpha_trunc_l2seg(x) (((u_long)(x)) & ~(ALPHA_L2SEG_SIZE-1))
+#endif /* _KERNEL || __KVM_ALPHA_PRIVATE */
#ifdef _KERNEL
-/*
- * Kernel virtual address to Sysmap entry and visa versa.
- */
-#define kvtopte(va) \
- (Sysmap + (((vm_offset_t)(va) - VM_MIN_KERNEL_ADDRESS) >> PGSHIFT))
-#define ptetokv(pte) \
- ((((pt_entry_t *)(pte) - Sysmap) << PGSHIFT) + VM_MIN_KERNEL_ADDRESS)
-
-#define loadustp(stpte) { \
- Lev1map[kvtol1pte(VM_MIN_ADDRESS)] = stpte; \
- ALPHA_TBIAP(); \
-}
-
-extern pt_entry_t *Lev1map; /* Alpha Level One page table */
-extern pt_entry_t *Sysmap; /* kernel pte table */
-extern vm_size_t Sysmapsize; /* number of pte's in Sysmap */
-#endif
-#endif
+extern pt_entry_t *kernel_lev1map; /* kernel level 1 page table */
+#endif /* _KERNEL */
+
+#endif /* ! _ALPHA_PTE_H_ */
diff --git a/sys/arch/alpha/include/rpb.h b/sys/arch/alpha/include/rpb.h
index bbd03b2e6f3..ea48a4a561e 100644
--- a/sys/arch/alpha/include/rpb.h
+++ b/sys/arch/alpha/include/rpb.h
@@ -1,5 +1,4 @@
-/* $OpenBSD: rpb.h,v 1.7 1998/06/28 01:09:58 angelos Exp $ */
-/* $NetBSD: rpb.h,v 1.11 1996/11/13 22:26:41 cgd Exp $ */
+/* $NetBSD: rpb.h,v 1.38 2000/07/06 23:29:13 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@@ -40,41 +39,51 @@
#ifndef ASSEMBLER
struct rpb {
- struct restart_blk *rpb; /* 0: HWRPB phys. address. */
+ u_int64_t rpb_phys; /* 0: HWRPB phys. address. */
char rpb_magic[8]; /* 8: "HWRPB" (in ASCII) */
u_int64_t rpb_version; /* 10 */
u_int64_t rpb_size; /* 18: HWRPB size in bytes */
u_int64_t rpb_primary_cpu_id; /* 20 */
u_int64_t rpb_page_size; /* 28: (8192) */
- u_int64_t rpb_phys_addr_size; /* 30: (34) */
+ u_int32_t rpb_phys_addr_size; /* 30: physical address size */
+ u_int32_t rpb_extended_va_size; /* 34: extended VA size (4L) */
u_int64_t rpb_max_asn; /* 38: (16) */
char rpb_ssn[16]; /* 40: only first 10 valid */
#define ST_ADU 1 /* Alpha Demo. Unit (?) */
-#define ST_DEC_4000 2 /* "Cobra" (?) */
-#define ST_DEC_7000 3 /* "Ruby" (?) */
+#define ST_DEC_4000 2 /* "Cobra" */
+#define ST_DEC_7000 3 /* "Ruby" */
#define ST_DEC_3000_500 4 /* "Flamingo" family (TC) */
#define ST_DEC_2000_300 6 /* "Jensen" (EISA/ISA) */
#define ST_DEC_3000_300 7 /* "Pelican" (TC) */
-#define ST_DEC_2100_A500 9 /* "Sable" (?) */
-#define ST_DEC_APXVME_64 10 /* "AXPvme" (VME?) */
+#define ST_AVALON_A12 8 /* XXX Avalon Multicomputer */
+#define ST_DEC_2100_A500 9 /* "Sable" */
+#define ST_DEC_APXVME_64 10 /* "AXPvme" (VME) */
#define ST_DEC_AXPPCI_33 11 /* "NoName" (PCI/ISA) */
-#define ST_DEC_21000 12 /* "TurboLaser" (?) */
+#define ST_DEC_21000 12 /* "TurboLaser" (PCI/EISA) */
#define ST_DEC_2100_A50 13 /* "Avanti" (PCI/ISA) */
-#define ST_DEC_MUSTANG 14 /* "Mustang" (?) */
+#define ST_DEC_MUSTANG 14 /* "Mustang" */
#define ST_DEC_KN20AA 15 /* kn20aa (PCI/EISA) */
-#define ST_DEC_1000 17 /* "Mikasa" (PCI/ISA?) */
+#define ST_DEC_1000 17 /* "Mikasa" (PCI/EISA) */
#define ST_EB66 19 /* EB66 (PCI/ISA?) */
#define ST_EB64P 20 /* EB64+ (PCI/ISA?) */
-#define ST_DEC_4100 22 /* "Rawhide" (?) */
-#define ST_DEC_EV45_PBP 23 /* "Lego" (?) */
-#define ST_DEC_2100A_A500 24 /* "Lynx" (?) */
+#define ST_ALPHABOOK1 21 /* Alphabook1 */
+#define ST_DEC_4100 22 /* "Rawhide" (PCI/EISA) */
+#define ST_DEC_EV45_PBP 23 /* "Lego" K2 Passive SBC */
+#define ST_DEC_2100A_A500 24 /* "Lynx" */
#define ST_EB164 26 /* EB164 (PCI/ISA) */
-#define ST_DEC_1000A 27 /* "Noritake" (?) */
-#define ST_DEC_ALPHAVME_224 28 /* "Cortex" (?) */
-#define ST_DEC_550 30 /* "Miata" (PCI/ISA) */
-#define ST_DEC_EV56_PBP 32 /* "Takara" (?) */
-#define ST_DEC_ALPHAVME_320 33 /* "Yukon" (VME?) */
+#define ST_DEC_1000A 27 /* "Noritake" (PCI/EISA)*/
+#define ST_DEC_ALPHAVME_224 28 /* "Cortex" */
+#define ST_DEC_550 30 /* "Miata" (PCI/ISA) */
+#define ST_DEC_EV56_PBP 32 /* "Takara" */
+#define ST_DEC_ALPHAVME_320 33 /* "Yukon" (VME) */
+#define ST_DEC_6600 34 /* EV6-Tsunami based systems */
+#define ST_DEC_WILDFIRE 35 /* "Wildfire" */
+#define ST_DEC_CUSCO 36 /* "CUSCO" */
+#define ST_DEC_EIGER 37 /* "Eiger" */
+
+ /* Alpha Processor, Inc. systypes */
+#define ST_API_NAUTILUS 201 /* EV6-AMD 751 UP1000 */
u_int64_t rpb_type; /* 50: */
@@ -127,45 +136,66 @@ struct rpb {
#define SV_ST_MUSTANG2_4_166 0x00000800 /* Mustang II; 200 4/166 */
#define SV_ST_MUSTANG2_4_233 0x00001000 /* Mustang II; 200 4/233 */
#define SV_ST_AVANTI_XXX 0x00001400 /* also Avanti; 400 4/233 */
+#define SV_ST_AVANTI_4_266 0x00002000
#define SV_ST_MUSTANG2_4_100 0x00002400 /* Mustang II; 200 4/100 */
+#define SV_ST_AVANTI_4_233 0x0000a800 /* AlphaStation 255/233 */
+
+#define SV_ST_KN20AA 0x00000400 /* AlphaStation 500/600 */
+
+/*
+ * System types for the AXPvme Family
+ */
+#define SV_ST_AXPVME_64 0x00000000 /* 21068, 64MHz */
+#define SV_ST_AXPVME_160 0x00000400 /* 21066, 160MHz */
+#define SV_ST_AXPVME_100 0x00000c00 /* 21066A, 99MHz */
+#define SV_ST_AXPVME_230 0x00001000 /* 21066A, 231MHz */
+#define SV_ST_AXPVME_66 0x00001400 /* 21066A, 66MHz */
+#define SV_ST_AXPVME_166 0x00001800 /* 21066A, 165MHz */
+#define SV_ST_AXPVME_264 0x00001c00 /* 21066A, 264MHz */
/*
* System types for the EB164 Family
*/
-#define SV_ST_EB164_266 0x00000400 /* EB164, 266MHz */
-#define SV_ST_EB164_300 0x00000800 /* EB164, 300MHz */
-#define SV_ST_ALPHAPC164_366 0x00000c00 /* AlphaPC164, 366MHz */
-#define SV_ST_ALPHAPC164_400 0x00001000 /* AlphaPC164, 400MHz */
-#define SV_ST_ALPHAPC164_433 0x00001400 /* AlphaPC164, 433MHz */
-#define SV_ST_ALPHAPC164_466 0x00001800 /* AlphaPC164, 466MHz */
-#define SV_ST_ALPHAPC164_500 0x00001c00 /* AlphaPC164, 500MHz */
-#define SV_ST_ALPHAPC164LX_400 0x00002000 /* AlphaPC164LX, 400MHz */
-#define SV_ST_ALPHAPC164LX_466 0x00002400 /* AlphaPC164LX, 466MHz */
-#define SV_ST_ALPHAPC164LX_533 0x00002800 /* AlphaPC164LX, 533MHz */
-#define SV_ST_ALPHAPC164LX_600 0x00002c00 /* AlphaPC164LX, 600MHz */
-#define SV_ST_ALPHAPC164SX_400 0x00003000 /* AlphaPC164SX, 400MHz */
-#define SV_ST_ALPHAPC164SX_466 0x00003400 /* AlphaPC164SX, 433MHz */
-#define SV_ST_ALPHAPC164SX_533 0x00003800 /* AlphaPC164SX, 533MHz */
-#define SV_ST_ALPHAPC164SX_600 0x00003c00 /* AlphaPC164SX, 600MHz */
+#define SV_ST_EB164_266 0x00000400 /* EB164, 266MHz */
+#define SV_ST_EB164_300 0x00000800 /* EB164, 300MHz */
+#define SV_ST_ALPHAPC164_366 0x00000c00 /* AlphaPC164, 366MHz */
+#define SV_ST_ALPHAPC164_400 0x00001000 /* AlphaPC164, 400MHz */
+#define SV_ST_ALPHAPC164_433 0x00001400 /* AlphaPC164, 433MHz */
+#define SV_ST_ALPHAPC164_466 0x00001800 /* AlphaPC164, 466MHz */
+#define SV_ST_ALPHAPC164_500 0x00001c00 /* AlphaPC164, 500MHz */
+#define SV_ST_ALPHAPC164LX_400 0x00002000 /* AlphaPC164LX, 400MHz */
+#define SV_ST_ALPHAPC164LX_466 0x00002400 /* AlphaPC164LX, 466MHz */
+#define SV_ST_ALPHAPC164LX_533 0x00002800 /* AlphaPC164LX, 533MHz */
+#define SV_ST_ALPHAPC164LX_600 0x00002c00 /* AlphaPC164LX, 600MHz */
+#define SV_ST_ALPHAPC164SX_400 0x00003000 /* AlphaPC164SX, 400MHz */
+#define SV_ST_ALPHAPC164SX_466 0x00003400 /* AlphaPC164SX, 433MHz */
+#define SV_ST_ALPHAPC164SX_533 0x00003800 /* AlphaPC164SX, 533MHz */
+#define SV_ST_ALPHAPC164SX_600 0x00003c00 /* AlphaPC164SX, 600MHz */
+
+/*
+ * System types for the Digital Personal Workstation (Miata) Family
+ * XXX These are not very complete!
+ */
+#define SV_ST_MIATA_1_5 0x00004c00 /* Miata 1.5 */
u_int64_t rpb_variation; /* 58 */
char rpb_revision[8]; /* 60; only first 4 valid */
u_int64_t rpb_intr_freq; /* 68; scaled by 4096 */
u_int64_t rpb_cc_freq; /* 70: cycle cntr frequency */
- vm_offset_t rpb_vptb; /* 78: */
+ u_long rpb_vptb; /* 78: */
u_int64_t rpb_reserved_arch; /* 80: */
- vm_offset_t rpb_tbhint_off; /* 88: */
+ u_long rpb_tbhint_off; /* 88: */
u_int64_t rpb_pcs_cnt; /* 90: */
u_int64_t rpb_pcs_size; /* 98; pcs size in bytes */
- vm_offset_t rpb_pcs_off; /* A0: offset to pcs info */
+ u_long rpb_pcs_off; /* A0: offset to pcs info */
u_int64_t rpb_ctb_cnt; /* A8: console terminal */
u_int64_t rpb_ctb_size; /* B0: ctb size in bytes */
- vm_offset_t rpb_ctb_off; /* B8: offset to ctb */
- vm_offset_t rpb_crb_off; /* C0: offset to crb */
- vm_offset_t rpb_memdat_off; /* C8: memory data offset */
- vm_offset_t rpb_condat_off; /* D0: config data offset */
- vm_offset_t rpb_fru_off; /* D8: FRU table offset */
+ u_long rpb_ctb_off; /* B8: offset to ctb */
+ u_long rpb_crb_off; /* C0: offset to crb */
+ u_long rpb_memdat_off; /* C8: memory data offset */
+ u_long rpb_condat_off; /* D0: config data offset */
+ u_long rpb_fru_off; /* D8: FRU table offset */
u_int64_t rpb_save_term; /* E0: terminal save */
u_int64_t rpb_save_term_val; /* E8: */
u_int64_t rpb_rest_term; /* F0: terminal restore */
@@ -177,10 +207,13 @@ struct rpb {
u_int64_t rpb_checksum; /* 120: HWRPB checksum */
u_int64_t rpb_rxrdy; /* 128: receive ready */
u_int64_t rpb_txrdy; /* 130: transmit ready */
- vm_offset_t rpb_dsrdb_off; /* 138: HWRPB + DSRDB offset */
+ u_long rpb_dsrdb_off; /* 138: HWRPB + DSRDB offset */
u_int64_t rpb_tbhint[8]; /* 149: TB hint block */
};
+#define LOCATE_PCS(h,cpunumber) ((struct pcs *) \
+ ((char *)(h) + (h)->rpb_pcs_off + ((cpunumber) * (h)->rpb_pcs_size)))
+
/*
* PCS: Per-CPU information.
*/
@@ -208,34 +241,34 @@ struct pcs {
u_int64_t pcs_pal_memsize; /* 88: PAL memory size */
u_int64_t pcs_pal_scrsize; /* 90: PAL scratch size */
- vm_offset_t pcs_pal_memaddr; /* 98: PAL memory addr */
- vm_offset_t pcs_pal_scraddr; /* A0: PAL scratch addr */
+ u_long pcs_pal_memaddr; /* 98: PAL memory addr */
+ u_long pcs_pal_scraddr; /* A0: PAL scratch addr */
struct {
u_int64_t
- pcs_alpha : 8, /* alphabetic char 'a' - 'z' */
+ minorrev : 8, /* alphabetic char 'a' - 'z' */
+ majorrev : 8, /* alphabetic char 'a' - 'z' */
#define PAL_TYPE_STANDARD 0
#define PAL_TYPE_VMS 1
#define PAL_TYPE_OSF1 2
- pcs_pal_type : 8, /* PALcode type:
+ pal_type : 8, /* PALcode type:
* 0 == standard
* 1 == OpenVMS
* 2 == OSF/1
* 3-127 DIGITAL reserv.
* 128-255 non-DIGITAL reserv.
*/
- sbz1 : 16,
- pcs_proc_cnt : 7, /* Processor count */
- sbz2 : 25;
+ sbz1 : 8,
+ compatibility : 16, /* Compatibility revision */
+ proc_cnt : 16; /* Processor count */
} pcs_pal_rev; /* A8: */
-#define pcs_alpha pcs_pal_rev.alpha
-#define pcs_pal_type pcs_pal_rev.pal_type
-#define pcs_proc_cnt pcs_pal_rev.proc_cnt
+#define pcs_minorrev pcs_pal_rev.minorrev
+#define pcs_majorrev pcs_pal_rev.majorrev
+#define pcs_pal_type pcs_pal_rev.pal_type
+#define pcs_compatibility pcs_pal_rev.compatibility
+#define pcs_proc_cnt pcs_pal_rev.proc_cnt
u_int64_t pcs_proc_type; /* B0: processor type */
-#define PCS_PROC_MAJOR 0x00000000ffffffff
-#define PCS_PROC_MAJORSHIFT 0
-
#define PCS_PROC_EV3 1 /* EV3 */
#define PCS_PROC_EV4 2 /* EV4: 21064 */
#define PCS_PROC_SIMULATION 3 /* Simulation */
@@ -244,10 +277,12 @@ struct pcs {
#define PCS_PROC_EV45 6 /* EV45: 21064A */
#define PCS_PROC_EV56 7 /* EV56: 21164A */
#define PCS_PROC_EV6 8 /* EV6: 21264 */
-#define PCS_PROC_PCA56 9 /* PCA256: 21164PC */
+#define PCS_PROC_PCA56 9 /* PCA56: 21164PC */
+#define PCS_PROC_PCA57 10 /* PCA57: 21164?? */
+#define PCS_PROC_EV67 11 /* EV67: 21246A */
-#define PCS_PROC_MINOR 0xffffffff00000000
-#define PCS_PROC_MINORSHIFT 32
+#define PCS_CPU_MAJORTYPE(p) ((p)->pcs_proc_type & 0xffffffff)
+#define PCS_CPU_MINORTYPE(p) ((p)->pcs_proc_type >> 32)
/* Minor number interpretation is processor specific. See cpu.c. */
@@ -260,10 +295,10 @@ struct pcs {
char pcs_proc_revision[8]; /* C0: only first 4 valid */
char pcs_proc_sn[16]; /* C8: only first 10 valid */
- vm_offset_t pcs_machcheck; /* D8: mach chk phys addr. */
+ u_long pcs_machcheck; /* D8: mach chk phys addr. */
u_int64_t pcs_machcheck_len; /* E0: length in bytes */
- vm_offset_t pcs_halt_pcbb; /* E8: phys addr of halt PCB */
- vm_offset_t pcs_halt_pc; /* F0: halt PC */
+ u_long pcs_halt_pcbb; /* E8: phys addr of halt PCB */
+ u_long pcs_halt_pc; /* F0: halt PC */
u_int64_t pcs_halt_ps; /* F8: halt PS */
u_int64_t pcs_halt_r25; /* 100: halt argument list */
u_int64_t pcs_halt_r26; /* 108: halt return addr list */
@@ -281,7 +316,13 @@ struct pcs {
u_int64_t pcs_halt_reason; /* 118: */
u_int64_t pcs_reserved_soft; /* 120: preserved software */
- u_int64_t pcs_buffer[21]; /* 128: console buffers */
+
+ struct { /* 128: inter-console buffers */
+ u_int iccb_rxlen;
+ u_int iccb_txlen;
+ char iccb_rxbuf[80];
+ char iccb_txbuf[80];
+ } pcs_iccb;
#define PALvar_reserved 0
#define PALvar_OpenVMS 1
@@ -295,26 +336,29 @@ struct pcs {
* CTB: Console Terminal Block
*/
struct ctb {
- u_int64_t ctb_type; /* 0: always 4 */
+ u_int64_t ctb_type; /* 0: CTB type */
u_int64_t ctb_unit; /* 8: */
u_int64_t ctb_reserved; /* 16: */
u_int64_t ctb_len; /* 24: bytes of info */
u_int64_t ctb_ipl; /* 32: console ipl level */
- vm_offset_t ctb_tintr_vec; /* 40: transmit vec (0x800) */
- vm_offset_t ctb_rintr_vec; /* 48: receive vec (0x800) */
-
-#define CTB_GRAPHICS 3 /* graphics device */
-#define CTB_NETWORK 0xC0 /* network device */
-#define CTB_PRINTERPORT 2 /* printer port on the SCC */
+ u_long ctb_tintr_vec; /* 40: transmit vec (0x800) */
+ u_long ctb_rintr_vec; /* 48: receive vec (0x800) */
+
+#define CTB_NONE 0x00 /* no console present */
+#define CTB_SERVICE 0x01 /* service processor */
+#define CTB_PRINTERPORT 0x02 /* printer port on the SCC */
+#define CTB_GRAPHICS 0x03 /* graphics device */
+#define CTB_TYPE4 0x04 /* type 4 CTB */
+#define CTB_NETWORK 0xC0 /* network device */
u_int64_t ctb_term_type; /* 56: terminal type */
u_int64_t ctb_keybd_type; /* 64: keyboard nationality */
- vm_offset_t ctb_keybd_trans; /* 72: trans. table addr */
- vm_offset_t ctb_keybd_map; /* 80: map table addr */
+ u_long ctb_keybd_trans; /* 72: trans. table addr */
+ u_long ctb_keybd_map; /* 80: map table addr */
u_int64_t ctb_keybd_state; /* 88: keyboard flags */
u_int64_t ctb_keybd_last; /* 96: last key entered */
- vm_offset_t ctb_font_us; /* 104: US font table addr */
- vm_offset_t ctb_font_mcs; /* 112: MCS font table addr */
+ u_long ctb_font_us; /* 104: US font table addr */
+ u_long ctb_font_mcs; /* 112: MCS font table addr */
u_int64_t ctb_font_width; /* 120: font width, height */
u_int64_t ctb_font_height; /* 128: in pixels */
u_int64_t ctb_mon_width; /* 136: monitor width, height */
@@ -325,23 +369,54 @@ struct ctb {
u_int64_t ctb_cur_height; /* 176: in pixels */
u_int64_t ctb_head_cnt; /* 184: # of heads */
u_int64_t ctb_opwindow; /* 192: opwindow on screen */
- vm_offset_t ctb_head_offset; /* 200: offset to head info */
- vm_offset_t ctb_putchar; /* 208: output char to TURBO */
+ u_long ctb_head_offset; /* 200: offset to head info */
+ u_long ctb_putchar; /* 208: output char to TURBO */
u_int64_t ctb_io_state; /* 216: I/O flags */
u_int64_t ctb_listen_state; /* 224: listener flags */
- vm_offset_t ctb_xaddr; /* 232: extended info addr */
+ u_long ctb_xaddr; /* 232: extended info addr */
u_int64_t ctb_turboslot; /* 248: TURBOchannel slot # */
u_int64_t ctb_server_off; /* 256: offset to server info */
u_int64_t ctb_line_off; /* 264: line parameter offset */
u_int8_t ctb_csd; /* 272: console specific data */
};
+struct ctb_tt {
+ u_int64_t ctb_type; /* 0: CTB type */
+ u_int64_t ctb_unit; /* 8: console unit */
+ u_int64_t ctb_reserved; /* 16: reserved */
+ u_int64_t ctb_length; /* 24: length */
+ u_int64_t ctb_csr; /* 32: address */
+ u_int64_t ctb_tivec; /* 40: Tx intr vector */
+ u_int64_t ctb_rivec; /* 48: Rx intr vector */
+ u_int64_t ctb_baud; /* 56: baud rate */
+ u_int64_t ctb_put_sts; /* 64: PUTS status */
+ u_int64_t ctb_get_sts; /* 72: GETS status */
+ u_int64_t ctb_reserved0; /* 80: reserved */
+};
+
+/*
+ * Format of the Console Terminal Block Type 4 `turboslot' field:
+ *
+ * 63 40 39 32 31 24 23 16 15 8 7 0
+ * | reserved | channel | hose | bus type | bus | slot|
+ */
+#define CTB_TURBOSLOT_CHANNEL(x) (((x) >> 32) & 0xff)
+#define CTB_TURBOSLOT_HOSE(x) (((x) >> 24) & 0xff)
+#define CTB_TURBOSLOT_TYPE(x) (((x) >> 16) & 0xff)
+#define CTB_TURBOSLOT_BUS(x) (((x) >> 8) & 0xff)
+#define CTB_TURBOSLOT_SLOT(x) ((x) & 0xff)
+
+#define CTB_TURBOSLOT_TYPE_TC 0 /* TURBOchannel */
+#define CTB_TURBOSLOT_TYPE_ISA 1 /* ISA */
+#define CTB_TURBOSLOT_TYPE_EISA 2 /* EISA */
+#define CTB_TURBOSLOT_TYPE_PCI 3 /* PCI */
+
/*
* CRD: Console Routine Descriptor
*/
struct crd {
int64_t descriptor;
- int (*entry_va) __P((struct crd *));
+ u_int64_t entry_va;
};
/*
@@ -349,9 +424,9 @@ struct crd {
*/
struct crb {
struct crd *crb_v_dispatch; /* 0: virtual dispatch addr */
- vm_offset_t crb_p_dispatch; /* 8: phys dispatch addr */
+ u_long crb_p_dispatch; /* 8: phys dispatch addr */
struct crd *crb_v_fixup; /* 10: virtual fixup addr */
- vm_offset_t crb_p_fixup; /* 18: phys fixup addr */
+ u_long crb_p_fixup; /* 18: phys fixup addr */
u_int64_t crb_map_cnt; /* 20: phys/virt map entries */
u_int64_t crb_page_cnt; /* 28: pages to be mapped */
};
@@ -361,22 +436,48 @@ struct crb {
*/
struct mddt {
int64_t mddt_cksum; /* 0: 7-N checksum */
- vm_offset_t mddt_physaddr; /* 8: bank config addr
+ u_long mddt_physaddr; /* 8: bank config addr
* IMPLEMENTATION SPECIFIC
*/
u_int64_t mddt_cluster_cnt; /* 10: memory cluster count */
- struct {
- vm_offset_t mddt_pfn; /* 0: starting PFN */
+ struct mddt_cluster {
+ u_long mddt_pfn; /* 0: starting PFN */
u_int64_t mddt_pg_cnt; /* 8: 8KB page count */
u_int64_t mddt_pg_test; /* 10: tested page count */
- vm_offset_t mddt_v_bitaddr; /* 18: bitmap virt addr */
- vm_offset_t mddt_p_bitaddr; /* 20: bitmap phys addr */
+ u_long mddt_v_bitaddr; /* 18: bitmap virt addr */
+ u_long mddt_p_bitaddr; /* 20: bitmap phys addr */
int64_t mddt_bit_cksum; /* 28: bitmap checksum */
+#define MDDT_NONVOLATILE 0x10 /* cluster is non-volatile */
#define MDDT_PALCODE 0x01 /* console and PAL only */
#define MDDT_SYSTEM 0x00 /* system software only */
-#define MDDT_mbz 0xfffffffffffffffe /* 1:63 -- must be zero */
+#define MDDT_mbz 0xfffffffffffffffc /* 2:63 -- must be zero */
int64_t mddt_usage; /* 30: bitmap permissions */
} mddt_clusters[1]; /* variable length array */
};
+
+/*
+ * DSR: Dynamic System Recognition. We're interested in the sysname
+ * offset. The data pointed to by sysname is:
+ *
+ * [8 bytes: length of system name][N bytes: system name string]
+ *
+ * The system name string is NUL-terminated.
+ */
+struct dsrdb {
+ int64_t dsr_smm; /* 0: SMM number */
+ u_int64_t dsr_lurt_off; /* 8: LURT table offset */
+ u_int64_t dsr_sysname_off; /* 16: offset to sysname */
+};
+
+/*
+ * The DSR appeared in version 5 of the HWRPB.
+ */
+#define HWRPB_DSRDB_MINVERS 5
+
+#ifdef _KERNEL
+extern int cputype;
+extern struct rpb *hwrpb;
+#endif
+
#endif /* ASSEMBLER */
diff --git a/sys/arch/alpha/include/vmparam.h b/sys/arch/alpha/include/vmparam.h
index 62eb118299c..829e4f2365a 100644
--- a/sys/arch/alpha/include/vmparam.h
+++ b/sys/arch/alpha/include/vmparam.h
@@ -1,5 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.4 1996/10/30 22:39:34 niklas Exp $ */
-/* $NetBSD: vmparam.h,v 1.3 1996/07/09 00:28:25 cgd Exp $ */
+/* $NetBSD: vmparam.h,v 1.18 2000/05/22 17:13:54 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -43,17 +42,26 @@
* @(#)vmparam.h 8.2 (Berkeley) 4/22/94
*/
+#ifndef _ALPHA_VMPARAM_H_
+#define _ALPHA_VMPARAM_H_
+
/*
* Machine dependent constants for Alpha.
*/
+
/*
* USRTEXT is the start of the user text/data space, while USRSTACK
* is the top (end) of the user stack. Immediately above the user stack
* resides the user structure, which is UPAGES long and contains the
* kernel stack.
+ *
+ * Digital UNIX (formerly DEC OSF/1) places the stack below the
+ * text segment (i.e. growing downward from 4G). We may want to
+ * consider doing that at some point, but it might require changes
+ * to the exec code.
*/
-#define USRTEXT CLBYTES
-#define USRSTACK VM_MAXUSER_ADDRESS
+#define USRTEXT NBPG
+#define USRSTACK ((vaddr_t)0x0000000200000000) /* 8G */
/*
* Virtual memory related constants, all in bytes
@@ -75,16 +83,6 @@
#endif
/*
- * Default sizes of swap allocation chunks (see dmap.h).
- * The actual values may be changed in vminit() based on MAXDSIZ.
- * With MAXDSIZ of 16Mb and NDMAP of 38, dmmax will be 1024.
- * DMMIN should be at least ctod(1) so that vtod() works.
- * vminit() insures this.
- */
-#define DMMIN 32 /* smallest swap allocation */
-#define DMMAX 4096 /* largest potential swap allocation */
-
-/*
* PTEs for mapping user space into the kernel for phyio operations.
* 64 pte's are enough to cover 8 disks * MAXBSIZE.
*/
@@ -101,18 +99,6 @@
#endif
/*
- * Boundary at which to place first MAPMEM segment if not explicitly
- * specified. Should be a power of two. This allows some slop for
- * the data segment to grow underneath the first mapped segment.
- */
-#define MMSEG 0x200000
-
-/*
- * The size of the clock loop.
- */
-#define LOOPPAGES (maxfree - firstfree)
-
-/*
* The time for a process to be blocked before being very swappable.
* This is a number of seconds which the system takes as being a non-trivial
* amount of real time. You probably shouldn't change this;
@@ -124,31 +110,38 @@
#define MAXSLP 20
/*
- * A swapped in process is given a small amount of core without being bothered
- * by the page replacement algorithm. Basically this says that if you are
- * swapped in you deserve some resources. We protect the last SAFERSS
- * pages against paging and will just swap you out rather than paging you.
- * Note that each process has at least UPAGES+CLSIZE pages which are not
- * paged anyways, in addition to SAFERSS.
- */
-#define SAFERSS 10 /* nominal ``small'' resident set size
- protected against replacement */
-
-/*
* Mach derived constants
*/
/* user/kernel map constants */
-#define VM_MIN_ADDRESS ((vm_offset_t)ALPHA_USEG_BASE) /* 0 */
-#define VM_MAXUSER_ADDRESS ((vm_offset_t)0x0000000200000000) /* 8G XXX */
+#define VM_MIN_ADDRESS ((vaddr_t)ALPHA_USEG_BASE) /* 0 */
+#define VM_MAXUSER_ADDRESS ((vaddr_t)(ALPHA_USEG_END + 1L)) /* 4T */
#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
-#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)ALPHA_K1SEG_BASE)
-#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)ALPHA_K1SEG_END)
+#define VM_MIN_KERNEL_ADDRESS ((vaddr_t)ALPHA_K1SEG_BASE)
+#define VM_MAX_KERNEL_ADDRESS ((vaddr_t)ALPHA_K1SEG_END)
/* virtual sizes (bytes) for various kernel submaps */
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
-#define VM_KMEM_SIZE (NKMEMCLUSTERS*CLBYTES)
-#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES)
+#define VM_KMEM_SIZE (NKMEMCLUSTERS*NBPG)
+#define VM_PHYS_SIZE (USRIOSIZE*NBPG)
/* some Alpha-specific constants */
-#define VPTBASE ((vm_offset_t)0xfffffffc00000000) /* Virt. pg table */
+#define VPTBASE ((vaddr_t)0xfffffffc00000000) /* Virt. pg table */
+
+#define MACHINE_NEW_NONCONTIG
+
+#define VM_PHYSSEG_MAX 16 /* XXX */
+#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
+#define VM_PHYSSEG_NOADD /* no more after vm_mem_init */
+
+#define VM_NFREELIST 1
+#define VM_FREELIST_DEFAULT 0
+
+/*
+ * pmap-specific data stored in the vm_physmem[] array.
+ */
+struct pmap_physseg {
+ struct pv_head *pvhead; /* pv list of this seg */
+};
+
+#endif /* ! _ALPHA_VMPARAM_H_ */
diff --git a/sys/arch/alpha/isa/isa_machdep.h b/sys/arch/alpha/isa/isa_machdep.h
index 6f3745d0bc6..f2ab47a455e 100644
--- a/sys/arch/alpha/isa/isa_machdep.h
+++ b/sys/arch/alpha/isa/isa_machdep.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: isa_machdep.h,v 1.5 1998/06/29 05:32:53 downsj Exp $ */
+/* $OpenBSD: isa_machdep.h,v 1.6 2000/11/08 16:01:15 art Exp $ */
/* $NetBSD: isa_machdep.h,v 1.3 1996/11/19 04:53:07 cgd Exp $ */
/*
@@ -41,7 +41,6 @@ struct alpha_isa_chipset {
void *(*ic_intr_establish) __P((void *, int, int, int,
int (*)(void *), void *, char *));
void (*ic_intr_disestablish) __P((void *, void *));
- int (*ic_intr_check) __P((void *, int, int));
};
/*
@@ -53,8 +52,8 @@ struct alpha_isa_chipset {
(*(c)->ic_intr_establish)((c)->ic_v, (i), (t), (l), (f), (a), (nm))
#define isa_intr_disestablish(c, h) \
(*(c)->ic_intr_disestablish)((c)->ic_v, (h))
-#define isa_intr_check(c, i, t) \
- (*(c)->ic_intr_check)((c)->ic_v, (i), (t))
+
+#define __NO_ISA_INTR_CHECK
/*
* alpha-specific ISA functions.
diff --git a/sys/arch/alpha/pci/apecs.c b/sys/arch/alpha/pci/apecs.c
index c4ee4f99b73..fb3fdaa0459 100644
--- a/sys/arch/alpha/pci/apecs.c
+++ b/sys/arch/alpha/pci/apecs.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: apecs.c,v 1.8 1997/01/24 19:57:32 niklas Exp $ */
+/* $OpenBSD: apecs.c,v 1.9 2000/11/08 16:01:16 art Exp $ */
/* $NetBSD: apecs.c,v 1.16 1996/12/05 01:39:34 cgd Exp $ */
/*
@@ -80,10 +80,10 @@ apecsmatch(parent, match, aux)
#endif
void *aux;
{
- struct confargs *ca = aux;
+ struct mainbus_attach_args *ma = aux;
/* Make sure that we're looking for an APECS. */
- if (strcmp(ca->ca_name, apecs_cd.cd_name) != 0)
+ if (strcmp(ma->ma_name, apecs_cd.cd_name) != 0)
return (0);
if (apecsfound)
diff --git a/sys/arch/alpha/pci/cia.c b/sys/arch/alpha/pci/cia.c
index 4630e5c72ed..eada12b4fab 100644
--- a/sys/arch/alpha/pci/cia.c
+++ b/sys/arch/alpha/pci/cia.c
@@ -1,5 +1,41 @@
-/* $OpenBSD: cia.c,v 1.8 1998/07/01 05:32:35 angelos Exp $ */
-/* $NetBSD: cia.c,v 1.15 1996/12/05 01:39:35 cgd Exp $ */
+/* $NetBSD: cia.c,v 1.56 2000/06/29 08:58:45 mrg Exp $ */
+
+/*-
+ * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
/*
* Copyright (c) 1995, 1996 Carnegie-Mellon University.
@@ -45,18 +81,24 @@
#include <dev/pci/pcivar.h>
#include <alpha/pci/ciareg.h>
#include <alpha/pci/ciavar.h>
-#if defined(DEC_KN20AA)
+
+#ifdef DEC_KN20AA
#include <alpha/pci/pci_kn20aa.h>
#endif
-#if defined(DEC_EB164)
+#ifdef DEC_EB164
#include <alpha/pci/pci_eb164.h>
#endif
+#ifdef DEC_550
+#include <alpha/pci/pci_550.h>
+#endif
+#ifdef DEC_1000A
+#include <alpha/pci/pci_1000a.h>
+#endif
+#ifdef DEC_1000
+#include <alpha/pci/pci_1000.h>
+#endif
-#ifdef __BROKEN_INDIRECT_CONFIG
int ciamatch __P((struct device *, void *, void *));
-#else
-int ciamatch __P((struct device *, struct cfdata *, void *));
-#endif
void ciaattach __P((struct device *, struct device *, void *));
struct cfattach cia_ca = {
@@ -67,26 +109,53 @@ struct cfdriver cia_cd = {
NULL, "cia", DV_DULL,
};
-int ciaprint __P((void *, const char *pnp));
+static int ciaprint __P((void *, const char *pnp));
/* There can be only one. */
int ciafound;
struct cia_config cia_configuration;
+/*
+ * This determines if we attempt to use BWX for PCI bus and config space
+ * access. Some systems, notably with Pyxis, don't fare so well unless
+ * BWX is used.
+ *
+ * EXCEPT! Some devices have a really hard time if BWX is used (WHY?!).
+ * So, we decouple the uses for PCI config space and PCI bus space.
+ *
+ * FURTHERMORE! The Pyxis, most notably earlier revs, really don't
+ * do so well if you don't use BWX for bus access. So we default to
+ * forcing BWX on those chips.
+ *
+ * Geez.
+ */
+
+#ifndef CIA_PCI_USE_BWX
+#define CIA_PCI_USE_BWX 1
+#endif
+
+#ifndef CIA_BUS_USE_BWX
+#define CIA_BUS_USE_BWX 0
+#endif
+
+#ifndef CIA_PYXIS_FORCE_BWX
+#define CIA_PYXIS_FORCE_BWX 0
+#endif
+
+int cia_pci_use_bwx = CIA_PCI_USE_BWX;
+int cia_bus_use_bwx = CIA_BUS_USE_BWX;
+int cia_pyxis_force_bwx = CIA_PYXIS_FORCE_BWX;
+
int
ciamatch(parent, match, aux)
struct device *parent;
-#ifdef __BROKEN_INDIRECT_CONFIG
void *match;
-#else
- struct cfdata *match;
-#endif
void *aux;
{
- struct confargs *ca = aux;
+ struct mainbus_attach_args *ma = aux;
/* Make sure that we're looking for a CIA. */
- if (strcmp(ca->ca_name, cia_cd.cd_name) != 0)
+ if (strcmp(ma->ma_name, cia_cd.cd_name) != 0)
return (0);
if (ciafound)
@@ -103,49 +172,80 @@ cia_init(ccp, mallocsafe)
struct cia_config *ccp;
int mallocsafe;
{
+ int pci_use_bwx = cia_pci_use_bwx;
+ int bus_use_bwx = cia_bus_use_bwx;
+
+ ccp->cc_hae_mem = REGVAL(CIA_CSR_HAE_MEM);
+ ccp->cc_hae_io = REGVAL(CIA_CSR_HAE_IO);
+ ccp->cc_rev = REGVAL(CIA_CSR_REV) & REV_MASK;
/*
- * Can't set up SGMAP data here; can be called before malloc().
- * XXX THIS COMMENT NO LONGER MAKES SENSE.
+ * Determine if we have a Pyxis. Only two systypes can
+ * have this: the EB164 systype (AlphaPC164LX and AlphaPC164SX)
+ * and the DEC_550 systype (Miata).
*/
+ if ((cputype == ST_EB164 &&
+ (hwrpb->rpb_variation & SV_ST_MASK) >= SV_ST_ALPHAPC164LX_400) ||
+ cputype == ST_DEC_550) {
+ ccp->cc_flags |= CCF_ISPYXIS;
+ if (cia_pyxis_force_bwx)
+ pci_use_bwx = bus_use_bwx = 1;
+ }
- ccp->cc_hae_mem = REGVAL(CIA_CSR_HAE_MEM);
- ccp->cc_hae_io = REGVAL(CIA_CSR_HAE_IO);
+ /*
+ * ALCOR/ALCOR2 Revisions >= 2 and Pyxis have the CNFG register.
+ */
+ if (ccp->cc_rev >= 2 || (ccp->cc_flags & CCF_ISPYXIS) != 0)
+ ccp->cc_cnfg = REGVAL(CIA_CSR_CNFG);
+ else
+ ccp->cc_cnfg = 0;
- /*
- * Determine if we have a Pyxis. Only two systypes can
- * have this: the EB164 systype (AlphaPC164LX and AlphaPC164SX)
- * and the DEC_550 systype (Miata).
- */
- if ((hwrpb->rpb_type == ST_EB164 &&
- (hwrpb->rpb_variation & SV_ST_MASK) >= SV_ST_ALPHAPC164LX_400) ||
- hwrpb->rpb_type == ST_DEC_550)
- ccp->cc_flags |= CCF_ISPYXIS;
-
- /*
- * ALCOR/ALCOR2 Revisions >= 2 and Pyxis have the CNFG register.
- */
- if (ccp->cc_rev >= 2 || (ccp->cc_flags & CCF_ISPYXIS) != 0)
- ccp->cc_cnfg = REGVAL(CIA_CSR_CNFG);
- else
- ccp->cc_cnfg = 0;
+ /*
+ * Use BWX iff:
+ *
+ * - It hasn't been disbled by the user,
+ * - it's enabled in CNFG,
+ * - we're implementation version ev5,
+ * - BWX is enabled in the CPU's capabilities mask (yes,
+ * the bit is really cleared if the capability exists...)
+ */
+ if ((pci_use_bwx || bus_use_bwx) &&
+ (ccp->cc_cnfg & CNFG_BWEN) != 0 &&
+ (cpu_amask & ALPHA_AMASK_BWX) != 0) {
+ u_int32_t ctrl;
+
+ if (pci_use_bwx)
+ ccp->cc_flags |= CCF_PCI_USE_BWX;
+ if (bus_use_bwx)
+ ccp->cc_flags |= CCF_BUS_USE_BWX;
+
+ /*
+ * For whatever reason, the firmware seems to enable PCI
+ * loopback mode if it also enables BWX. Make sure it's
+ * enabled if we have an old, buggy firmware rev.
+ */
+ alpha_mb();
+ ctrl = REGVAL(CIA_CSR_CTRL);
+ if ((ctrl & CTRL_PCI_LOOP_EN) == 0) {
+ REGVAL(CIA_CSR_CTRL) = ctrl | CTRL_PCI_LOOP_EN;
+ alpha_mb();
+ }
+ }
if (!ccp->cc_initted) {
/* don't do these twice since they set up extents */
- ccp->cc_iot = cia_bus_io_init(ccp);
- ccp->cc_memt = cia_bus_mem_init(ccp);
+ if (ccp->cc_flags & CCF_BUS_USE_BWX) {
+ ccp->cc_iot = cia_bwx_bus_io_init(ccp);
+ ccp->cc_memt = cia_bwx_bus_mem_init(ccp);
+ } else {
+ ccp->cc_iot = cia_bus_io_init(ccp);
+ ccp->cc_memt = cia_bus_mem_init(ccp);
+ }
}
ccp->cc_mallocsafe = mallocsafe;
cia_pci_init(&ccp->cc_pc, ccp);
- /* XXX XXX BEGIN XXX XXX */
- { /* XXX */
- extern vm_offset_t alpha_XXX_dmamap_or; /* XXX */
- alpha_XXX_dmamap_or = 0x40000000; /* XXX */
- } /* XXX */
- /* XXX XXX END XXX XXX */
-
ccp->cc_initted = 1;
}
@@ -157,36 +257,123 @@ ciaattach(parent, self, aux)
struct cia_softc *sc = (struct cia_softc *)self;
struct cia_config *ccp;
struct pcibus_attach_args pba;
+ const char *name;
+ int pass;
/* note that we've attached the chipset; can't have 2 CIAs. */
ciafound = 1;
/*
* set up the chipset's info; done once at console init time
- * (maybe), but doesn't hurt to do twice.
+ * (maybe), but we must do it here as well to take care of things
+ * that need to use memory allocation.
*/
ccp = sc->sc_ccp = &cia_configuration;
cia_init(ccp, 1);
- /* XXX print chipset information */
- printf("\n");
+ if (ccp->cc_flags & CCF_ISPYXIS) {
+ name = "Pyxis";
+ pass = ccp->cc_rev;
+ } else {
+ name = "ALCOR/ALCOR2";
+ pass = ccp->cc_rev + 1;
+ }
+
+ printf(": DECchip 2117x Core Logic Chipset (%s), pass %d\n",
+ name, pass);
- switch (hwrpb->rpb_type) {
-#if defined(DEC_KN20AA)
+ if (ccp->cc_cnfg)
+ printf("%s: extended capabilities: %b\n", self->dv_xname,
+ ccp->cc_cnfg, CIA_CSR_CNFG_BITS);
+
+ switch (ccp->cc_flags & (CCF_PCI_USE_BWX|CCF_BUS_USE_BWX)) {
+ case CCF_PCI_USE_BWX|CCF_BUS_USE_BWX:
+ name = "PCI config and bus";
+ break;
+ case CCF_PCI_USE_BWX:
+ name = "PCI config";
+ break;
+ case CCF_BUS_USE_BWX:
+ name = "bus";
+ break;
+ default:
+ name = NULL;
+ break;
+ }
+ if (name != NULL)
+ printf("%s: using BWX for %s access\n", self->dv_xname, name);
+
+#ifdef DEC_550
+ if (cputype == ST_DEC_550 &&
+ (hwrpb->rpb_variation & SV_ST_MASK) < SV_ST_MIATA_1_5) {
+ /*
+ * Miata 1 systems have a bug: DMA cannot cross
+ * an 8k boundary! Make sure PCI read prefetching
+ * is disabled on these chips. Note that secondary
+ * PCI busses don't have this problem, because of
+ * the way PPBs handle PCI read requests.
+ *
+ * In the 21174 Technical Reference Manual, this is
+ * actually documented as "Pyxis Pass 1", but apparently
+ * there are chips that report themselves as "Pass 1"
+ * which do not have the bug! Miatas with the Cypress
+ * PCI-ISA bridge (i.e. Miata 1.5 and Miata 2) do not
+ * have the bug, so we use this check.
+ *
+ * NOTE: This bug is actually worked around in cia_dma.c,
+ * when direct-mapped DMA maps are created.
+ *
+ * XXX WE NEED TO THINK ABOUT HOW TO HANDLE THIS FOR
+ * XXX SGMAP DMA MAPPINGS!
+ */
+ u_int32_t ctrl;
+
+ /* XXX no bets... */
+ printf("%s: WARNING: Pyxis pass 1 DMA bug; no bets...\n",
+ self->dv_xname);
+
+ ccp->cc_flags |= CCF_PYXISBUG;
+
+ alpha_mb();
+ ctrl = REGVAL(CIA_CSR_CTRL);
+ ctrl &= ~(CTRL_RD_TYPE|CTRL_RL_TYPE|CTRL_RM_TYPE);
+ REGVAL(CIA_CSR_CTRL) = ctrl;
+ alpha_mb();
+ }
+#endif /* DEC_550 */
+
+ cia_dma_init(ccp);
+
+ switch (cputype) {
+#ifdef DEC_KN20AA
case ST_DEC_KN20AA:
pci_kn20aa_pickintr(ccp);
-#ifdef EVCNT_COUNTERS
- evcnt_attach(self, "intr", &kn20aa_intr_evcnt);
-#endif
break;
#endif
-#if defined(DEC_EB164)
+#ifdef DEC_EB164
case ST_EB164:
pci_eb164_pickintr(ccp);
-#ifdef EVCNT_COUNTERS
- evcnt_attach(self, "intr", &eb164_intr_evcnt);
+ break;
#endif
+
+#ifdef DEC_550
+ case ST_DEC_550:
+ pci_550_pickintr(ccp);
+ break;
+#endif
+
+#ifdef DEC_1000A
+ case ST_DEC_1000A:
+ pci_1000a_pickintr(ccp, &ccp->cc_iot, &ccp->cc_memt,
+ &ccp->cc_pc);
+ break;
+#endif
+
+#ifdef DEC_1000
+ case ST_DEC_1000:
+ pci_1000_pickintr(ccp, &ccp->cc_iot, &ccp->cc_memt,
+ &ccp->cc_pc);
break;
#endif
@@ -197,12 +384,21 @@ ciaattach(parent, self, aux)
pba.pba_busname = "pci";
pba.pba_iot = ccp->cc_iot;
pba.pba_memt = ccp->cc_memt;
+ pba.pba_dmat =
+ alphabus_dma_get_tag(&ccp->cc_dmat_direct, ALPHA_BUS_PCI);
pba.pba_pc = &ccp->cc_pc;
pba.pba_bus = 0;
+
+#ifdef notyet
+ pba.pba_flags = PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED;
+ if ((ccp->cc_flags & CCF_PYXISBUG) == 0)
+ pba.pba_flags |= PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY |
+ PCI_FLAGS_MWI_OKAY;
+#endif
config_found(self, &pba, ciaprint);
}
-int
+static int
ciaprint(aux, pnp)
void *aux;
const char *pnp;
@@ -215,3 +411,27 @@ ciaprint(aux, pnp)
printf(" bus %d", pba->pba_bus);
return (UNCONF);
}
+
+void
+cia_pyxis_intr_enable(irq, onoff)
+ int irq, onoff;
+{
+ u_int64_t imask;
+ int s;
+
+#if 0
+ printf("cia_pyxis_intr_enable: %s %d\n",
+ onoff ? "enabling" : "disabling", irq);
+#endif
+
+ s = splhigh();
+ alpha_mb();
+ imask = REGVAL64(PYXIS_INT_MASK);
+ if (onoff)
+ imask |= (1UL << irq);
+ else
+ imask &= ~(1UL << irq);
+ REGVAL64(PYXIS_INT_MASK) = imask;
+ alpha_mb();
+ splx(s);
+}
diff --git a/sys/arch/alpha/pci/cia_bwx_bus_io.c b/sys/arch/alpha/pci/cia_bwx_bus_io.c
new file mode 100644
index 00000000000..b159c8f5982
--- /dev/null
+++ b/sys/arch/alpha/pci/cia_bwx_bus_io.c
@@ -0,0 +1,57 @@
+/* $NetBSD: cia_bwx_bus_io.c,v 1.3 2000/06/29 08:58:46 mrg Exp $ */
+
+/*
+ * Copyright (c) 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/syslog.h>
+#include <sys/device.h>
+
+#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/bus.h>
+
+#include <alpha/pci/ciareg.h>
+#include <alpha/pci/ciavar.h>
+
+#define CHIP cia_bwx
+
+#define CHIP_EX_MALLOC_SAFE(v) (((struct cia_config *)(v))->cc_mallocsafe)
+#define CHIP_IO_EXTENT(v) (((struct cia_config *)(v))->cc_io_ex)
+
+#define CHIP_IO_SYS_START(v) CIA_EV56_BWIO
+
+/*
+ * CIA core logic with BWX enabled appears on EV56 and PCA56. We
+ * require at least EV56 support for the assembler to emit BWX opcodes.
+ */
+__asm(".arch ev56");
+
+#include <alpha/pci/pci_bwx_bus_io_chipdep.c>
diff --git a/sys/arch/alpha/pci/cia_bwx_bus_mem.c b/sys/arch/alpha/pci/cia_bwx_bus_mem.c
new file mode 100644
index 00000000000..2bc7b6e8f92
--- /dev/null
+++ b/sys/arch/alpha/pci/cia_bwx_bus_mem.c
@@ -0,0 +1,57 @@
+/* $NetBSD: cia_bwx_bus_mem.c,v 1.3 2000/06/29 08:58:46 mrg Exp $ */
+
+/*
+ * Copyright (c) 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/syslog.h>
+#include <sys/device.h>
+
+#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/bus.h>
+
+#include <alpha/pci/ciareg.h>
+#include <alpha/pci/ciavar.h>
+
+#define CHIP cia_bwx
+
+#define CHIP_EX_MALLOC_SAFE(v) (((struct cia_config *)(v))->cc_mallocsafe)
+#define CHIP_MEM_EXTENT(v) (((struct cia_config *)(v))->cc_d_mem_ex)
+
+#define CHIP_MEM_SYS_START(v) CIA_EV56_BWMEM
+
+/*
+ * CIA core logic with BWX enabled appears on EV56 and PCA56. We
+ * require at least EV56 support for the assembler to emit BWX opcodes.
+ */
+__asm(".arch ev56");
+
+#include <alpha/pci/pci_bwx_bus_mem_chipdep.c>
diff --git a/sys/arch/alpha/pci/cia_dma.c b/sys/arch/alpha/pci/cia_dma.c
new file mode 100644
index 00000000000..e39c50fa70b
--- /dev/null
+++ b/sys/arch/alpha/pci/cia_dma.c
@@ -0,0 +1,544 @@
+/* $NetBSD: cia_dma.c,v 1.16 2000/06/29 08:58:46 mrg Exp $ */
+
+/*-
+ * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * XXX - We should define this before including bus.h, but since other stuff
+ * pulls in bus.h we must do this here.
+ */
+#define _ALPHA_BUS_DMA_PRIVATE
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <sys/malloc.h>
+
+#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/bus.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <alpha/pci/ciareg.h>
+#include <alpha/pci/ciavar.h>
+
+bus_dma_tag_t cia_dma_get_tag __P((bus_dma_tag_t, alpha_bus_t));
+
+int cia_bus_dmamap_create_direct __P((bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *));
+
+int cia_bus_dmamap_create_sgmap __P((bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *));
+
+void cia_bus_dmamap_destroy_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
+
+int cia_bus_dmamap_load_sgmap __P((bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int));
+
+int cia_bus_dmamap_load_mbuf_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int));
+
+int cia_bus_dmamap_load_uio_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int));
+
+int cia_bus_dmamap_load_raw_sgmap __P((bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int));
+
+void cia_bus_dmamap_unload_sgmap __P((bus_dma_tag_t, bus_dmamap_t));
+
+/*
+ * Direct-mapped window: 1G at 1G
+ */
+#define CIA_DIRECT_MAPPED_BASE (1*1024*1024*1024)
+#define CIA_DIRECT_MAPPED_SIZE (1*1024*1024*1024)
+
+/*
+ * SGMAP window: 8M at 8M
+ */
+#define CIA_SGMAP_MAPPED_BASE (8*1024*1024)
+#define CIA_SGMAP_MAPPED_SIZE (8*1024*1024)
+
+void cia_tlb_invalidate __P((void));
+void cia_broken_pyxis_tlb_invalidate __P((void));
+
+void (*cia_tlb_invalidate_fn) __P((void));
+
+#define CIA_TLB_INVALIDATE() (*cia_tlb_invalidate_fn)()
+
+struct alpha_sgmap cia_pyxis_bug_sgmap;
+#define CIA_PYXIS_BUG_BASE (128*1024*1024)
+#define CIA_PYXIS_BUG_SIZE (2*1024*1024)
+
+void
+cia_dma_init(ccp)
+ struct cia_config *ccp;
+{
+ bus_addr_t tbase;
+ bus_dma_tag_t t;
+
+ /*
+ * Initialize the DMA tag used for direct-mapped DMA.
+ */
+ t = &ccp->cc_dmat_direct;
+ t->_cookie = ccp;
+ t->_wbase = CIA_DIRECT_MAPPED_BASE;
+ t->_wsize = CIA_DIRECT_MAPPED_SIZE;
+ t->_next_window = NULL;
+ t->_boundary = 0;
+ t->_sgmap = NULL;
+ t->_get_tag = cia_dma_get_tag;
+ t->_dmamap_create = cia_bus_dmamap_create_direct;
+ t->_dmamap_destroy = _bus_dmamap_destroy;
+ t->_dmamap_load = _bus_dmamap_load_direct;
+ t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
+ t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
+ t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
+ t->_dmamap_unload = _bus_dmamap_unload;
+ t->_dmamap_sync = _bus_dmamap_sync;
+
+ t->_dmamem_alloc = _bus_dmamem_alloc;
+ t->_dmamem_free = _bus_dmamem_free;
+ t->_dmamem_map = _bus_dmamem_map;
+ t->_dmamem_unmap = _bus_dmamem_unmap;
+ t->_dmamem_mmap = _bus_dmamem_mmap;
+
+ /*
+ * Initialize the DMA tag used for sgmap-mapped DMA.
+ */
+ t = &ccp->cc_dmat_sgmap;
+ t->_cookie = ccp;
+ t->_wbase = CIA_SGMAP_MAPPED_BASE;
+ t->_wsize = CIA_SGMAP_MAPPED_SIZE;
+ t->_next_window = NULL;
+ t->_boundary = 0;
+ t->_sgmap = &ccp->cc_sgmap;
+ t->_get_tag = cia_dma_get_tag;
+ t->_dmamap_create = cia_bus_dmamap_create_sgmap;
+ t->_dmamap_destroy = cia_bus_dmamap_destroy_sgmap;
+ t->_dmamap_load = cia_bus_dmamap_load_sgmap;
+ t->_dmamap_load_mbuf = cia_bus_dmamap_load_mbuf_sgmap;
+ t->_dmamap_load_uio = cia_bus_dmamap_load_uio_sgmap;
+ t->_dmamap_load_raw = cia_bus_dmamap_load_raw_sgmap;
+ t->_dmamap_unload = cia_bus_dmamap_unload_sgmap;
+ t->_dmamap_sync = _bus_dmamap_sync;
+
+ t->_dmamem_alloc = _bus_dmamem_alloc;
+ t->_dmamem_free = _bus_dmamem_free;
+ t->_dmamem_map = _bus_dmamem_map;
+ t->_dmamem_unmap = _bus_dmamem_unmap;
+ t->_dmamem_mmap = _bus_dmamem_mmap;
+
+ /*
+ * The firmware has set up window 1 as a 1G direct-mapped DMA
+ * window beginning at 1G. We leave it alone. Leave window
+ * 0 alone until we reconfigure it for SGMAP-mapped DMA.
+ * Windows 2 and 3 are already disabled.
+ */
+
+ /*
+ * Initialize the SGMAP. Must align page table to 32k
+ * (hardware bug?).
+ */
+ alpha_sgmap_init(t, &ccp->cc_sgmap, "cia_sgmap",
+ CIA_SGMAP_MAPPED_BASE, 0, CIA_SGMAP_MAPPED_SIZE,
+ sizeof(u_int64_t), NULL, (32*1024));
+
+ /*
+ * Set up window 0 as an 8MB SGMAP-mapped window
+ * starting at 8MB.
+ */
+ REGVAL(CIA_PCI_W0BASE) = CIA_SGMAP_MAPPED_BASE |
+ CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
+ alpha_mb();
+
+ REGVAL(CIA_PCI_W0MASK) = CIA_PCI_WnMASK_8M;
+ alpha_mb();
+
+ tbase = ccp->cc_sgmap.aps_ptpa >> CIA_PCI_TnBASE_SHIFT;
+ if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
+ panic("cia_dma_init: bad page table address");
+ REGVAL(CIA_PCI_T0BASE) = tbase;
+ alpha_mb();
+
+ /*
+ * Pass 1 and 2 (i.e. revision <= 1) of the Pyxis have a
+ * broken scatter/gather TLB; it cannot be invalidated. To
+ * work around this problem, we configure window 2 as an SG
+ * 2M window at 128M, which we use in DMA loopback mode to
+ * read a spill page. This works by causing TLB misses,
+ * causing the old entries to be purged to make room for
+ * the new entries coming in for the spill page.
+ */
+ if ((ccp->cc_flags & CCF_ISPYXIS) != 0 && ccp->cc_rev <= 1) {
+ u_int64_t *page_table;
+ int i;
+
+ cia_tlb_invalidate_fn =
+ cia_broken_pyxis_tlb_invalidate;
+
+ alpha_sgmap_init(t, &cia_pyxis_bug_sgmap,
+ "pyxis_bug_sgmap", CIA_PYXIS_BUG_BASE, 0,
+ CIA_PYXIS_BUG_SIZE, sizeof(u_int64_t), NULL,
+ (32*1024));
+
+ REGVAL(CIA_PCI_W2BASE) = CIA_PYXIS_BUG_BASE |
+ CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
+ alpha_mb();
+
+ REGVAL(CIA_PCI_W2MASK) = CIA_PCI_WnMASK_2M;
+ alpha_mb();
+
+ tbase = cia_pyxis_bug_sgmap.aps_ptpa >>
+ CIA_PCI_TnBASE_SHIFT;
+ if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
+ panic("cia_dma_init: bad page table address");
+ REGVAL(CIA_PCI_T2BASE) = tbase;
+ alpha_mb();
+
+ /*
+ * Initialize the page table to point at the spill
+ * page. Leave the last entry invalid.
+ */
+ pci_sgmap_pte64_init_spill_page_pte();
+ for (i = 0, page_table = cia_pyxis_bug_sgmap.aps_pt;
+ i < (CIA_PYXIS_BUG_SIZE / PAGE_SIZE) - 1; i++) {
+ page_table[i] =
+ pci_sgmap_pte64_prefetch_spill_page_pte;
+ }
+ alpha_mb();
+ } else
+ cia_tlb_invalidate_fn = cia_tlb_invalidate;
+
+ CIA_TLB_INVALIDATE();
+
+ /* XXX XXX BEGIN XXX XXX */
+ { /* XXX */
+ extern paddr_t alpha_XXX_dmamap_or; /* XXX */
+ alpha_XXX_dmamap_or = CIA_DIRECT_MAPPED_BASE; /* XXX */
+ } /* XXX */
+ /* XXX XXX END XXX XXX */
+}
+
+/*
+ * Return the bus dma tag to be used for the specified bus type.
+ * INTERNAL USE ONLY!
+ */
+bus_dma_tag_t
+cia_dma_get_tag(t, bustype)
+ bus_dma_tag_t t;
+ alpha_bus_t bustype;
+{
+ struct cia_config *ccp = t->_cookie;
+
+ switch (bustype) {
+ case ALPHA_BUS_PCI:
+ case ALPHA_BUS_EISA:
+ /*
+ * Systems with a CIA can only support 1G
+ * of memory, so we use the direct-mapped window
+ * on busses that have 32-bit DMA.
+ */
+ return (&ccp->cc_dmat_direct);
+
+ case ALPHA_BUS_ISA:
+ /*
+ * ISA doesn't have enough address bits to use
+ * the direct-mapped DMA window, so we must use
+ * SGMAPs.
+ */
+ return (&ccp->cc_dmat_sgmap);
+
+ default:
+ panic("cia_dma_get_tag: shouldn't be here, really...");
+ }
+}
+
+/*
+ * Create a CIA direct-mapped DMA map.
+ */
+int
+cia_bus_dmamap_create_direct(t, size, nsegments, maxsegsz, boundary,
+ flags, dmamp)
+ bus_dma_tag_t t;
+ bus_size_t size;
+ int nsegments;
+ bus_size_t maxsegsz;
+ bus_size_t boundary;
+ int flags;
+ bus_dmamap_t *dmamp;
+{
+ struct cia_config *ccp = t->_cookie;
+ bus_dmamap_t map;
+ int error;
+
+ error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
+ boundary, flags, dmamp);
+ if (error)
+ return (error);
+
+ map = *dmamp;
+
+ if ((ccp->cc_flags & CCF_PYXISBUG) != 0 &&
+ map->_dm_segcnt > 1) {
+ /*
+ * We have a Pyxis with the DMA page crossing bug, make
+ * sure we don't coalesce adjacent DMA segments.
+ *
+ * NOTE: We can only do this if the max segment count
+ * is greater than 1. This is because many network
+ * drivers allocate large contiguous blocks of memory
+ * for control data structures, even though they won't
+ * do any single DMA that crosses a page coundary.
+ * -- thorpej@netbsd.org, 2/5/2000
+ */
+ map->_dm_flags |= DMAMAP_NO_COALESCE;
+ }
+
+ return (0);
+}
+
+/*
+ * Create a CIA SGMAP-mapped DMA map.
+ */
+int
+cia_bus_dmamap_create_sgmap(t, size, nsegments, maxsegsz, boundary,
+ flags, dmamp)
+ bus_dma_tag_t t;
+ bus_size_t size;
+ int nsegments;
+ bus_size_t maxsegsz;
+ bus_size_t boundary;
+ int flags;
+ bus_dmamap_t *dmamp;
+{
+ bus_dmamap_t map;
+ int error;
+
+ error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
+ boundary, flags, dmamp);
+ if (error)
+ return (error);
+
+ map = *dmamp;
+
+ if (flags & BUS_DMA_ALLOCNOW) {
+ error = alpha_sgmap_alloc(map, round_page(size),
+ t->_sgmap, flags);
+ if (error)
+ cia_bus_dmamap_destroy_sgmap(t, map);
+ }
+
+ return (error);
+}
+
+/*
+ * Destroy a CIA SGMAP-mapped DMA map.
+ */
+void
+cia_bus_dmamap_destroy_sgmap(t, map)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+{
+
+ if (map->_dm_flags & DMAMAP_HAS_SGMAP)
+ alpha_sgmap_free(map, t->_sgmap);
+
+ _bus_dmamap_destroy(t, map);
+}
+
+/*
+ * Load a CIA SGMAP-mapped DMA map with a linear buffer.
+ */
+int
+cia_bus_dmamap_load_sgmap(t, map, buf, buflen, p, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ void *buf;
+ bus_size_t buflen;
+ struct proc *p;
+ int flags;
+{
+ int error;
+
+ error = pci_sgmap_pte64_load(t, map, buf, buflen, p, flags,
+ t->_sgmap);
+ if (error == 0)
+ CIA_TLB_INVALIDATE();
+
+ return (error);
+}
+
+/*
+ * Load a CIA SGMAP-mapped DMA map with an mbuf chain.
+ */
+int
+cia_bus_dmamap_load_mbuf_sgmap(t, map, m, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ struct mbuf *m;
+ int flags;
+{
+ int error;
+
+ error = pci_sgmap_pte64_load_mbuf(t, map, m, flags, t->_sgmap);
+ if (error == 0)
+ CIA_TLB_INVALIDATE();
+
+ return (error);
+}
+
+/*
+ * Load a CIA SGMAP-mapped DMA map with a uio.
+ */
+int
+cia_bus_dmamap_load_uio_sgmap(t, map, uio, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ struct uio *uio;
+ int flags;
+{
+ int error;
+
+ error = pci_sgmap_pte64_load_uio(t, map, uio, flags, t->_sgmap);
+ if (error == 0)
+ CIA_TLB_INVALIDATE();
+
+ return (error);
+}
+
+/*
+ * Load a CIA SGMAP-mapped DMA map with raw memory.
+ */
+int
+cia_bus_dmamap_load_raw_sgmap(t, map, segs, nsegs, size, flags)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ bus_size_t size;
+ int flags;
+{
+ int error;
+
+ error = pci_sgmap_pte64_load_raw(t, map, segs, nsegs, size, flags,
+ t->_sgmap);
+ if (error == 0)
+ CIA_TLB_INVALIDATE();
+
+ return (error);
+}
+
+/*
+ * Unload a CIA DMA map.
+ */
+void
+cia_bus_dmamap_unload_sgmap(t, map)
+ bus_dma_tag_t t;
+ bus_dmamap_t map;
+{
+
+ /*
+ * Invalidate any SGMAP page table entries used by this
+ * mapping.
+ */
+ pci_sgmap_pte64_unload(t, map, t->_sgmap);
+ CIA_TLB_INVALIDATE();
+
+ /*
+ * Do the generic bits of the unload.
+ */
+ _bus_dmamap_unload(t, map);
+}
+
+/*
+ * Flush the CIA scatter/gather TLB.
+ */
+void
+cia_tlb_invalidate()
+{
+
+ alpha_mb();
+ REGVAL(CIA_PCI_TBIA) = CIA_PCI_TBIA_ALL;
+ alpha_mb();
+}
+
+/*
+ * Flush the scatter/gather TLB on broken Pyxis chips.
+ */
+void
+cia_broken_pyxis_tlb_invalidate()
+{
+ volatile u_int64_t dummy;
+ u_int32_t ctrl;
+ int i, s;
+
+ s = splhigh();
+
+ /*
+ * Put the Pyxis into PCI loopback mode.
+ */
+ alpha_mb();
+ ctrl = REGVAL(CIA_CSR_CTRL);
+ REGVAL(CIA_CSR_CTRL) = ctrl | CTRL_PCI_LOOP_EN;
+ alpha_mb();
+
+ /*
+ * Now, read from PCI dense memory space at offset 128M (our
+ * target window base), skipping 64k on each read. This forces
+ * S/G TLB misses.
+ *
+ * XXX Looks like the TLB entries are `not quite LRU'. We need
+ * XXX to read more times than there are actual tags!
+ */
+ for (i = 0; i < CIA_TLB_NTAGS + 4; i++) {
+ dummy = *((volatile u_int64_t *)
+ ALPHA_PHYS_TO_K0SEG(CIA_PCI_DENSE + CIA_PYXIS_BUG_BASE +
+ (i * 65536)));
+ }
+
+ /*
+ * Restore normal PCI operation.
+ */
+ alpha_mb();
+ REGVAL(CIA_CSR_CTRL) = ctrl;
+ alpha_mb();
+
+ splx(s);
+}
diff --git a/sys/arch/alpha/pci/cia_pci.c b/sys/arch/alpha/pci/cia_pci.c
index 353c5615a21..1dcd6ee0593 100644
--- a/sys/arch/alpha/pci/cia_pci.c
+++ b/sys/arch/alpha/pci/cia_pci.c
@@ -1,5 +1,4 @@
-/* $OpenBSD: cia_pci.c,v 1.5 1997/01/24 19:57:39 niklas Exp $ */
-/* $NetBSD: cia_pci.c,v 1.7 1996/11/23 06:46:50 cgd Exp $ */
+/* $NetBSD: cia_pci.c,v 1.25 2000/06/29 08:58:46 mrg Exp $ */
/*
* Copyright (c) 1995, 1996 Carnegie-Mellon University.
@@ -32,18 +31,15 @@
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/device.h>
-#include <vm/vm.h>
-#include <machine/autoconf.h> /* badaddr proto */
+#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <alpha/pci/ciareg.h>
#include <alpha/pci/ciavar.h>
-#include <machine/rpb.h> /* XXX for eb164 CIA firmware workarounds. */
-#include "dec_eb164.h" /* XXX for eb164 CIA firmware workarounds. */
-
void cia_attach_hook __P((struct device *, struct device *,
struct pcibus_attach_args *));
int cia_bus_maxdevs __P((void *, int));
@@ -117,72 +113,86 @@ cia_conf_read(cpv, tag, offset)
struct cia_config *ccp = cpv;
pcireg_t *datap, data;
int s, secondary, ba;
- int32_t old_haxr2; /* XXX */
-#if NDEC_EB164
- extern int cputype; /* XXX */
-#endif
+ u_int32_t old_cfg, errbits;
-#ifdef DIAGNOSTIC
+#ifdef __GNUC__
s = 0; /* XXX gcc -Wuninitialized */
- old_haxr2 = 0; /* XXX gcc -Wuninitialized */
+ old_cfg = 0; /* XXX gcc -Wuninitialized */
#endif
-#if NDEC_EB164
/*
- * Some (apparently-common) revisions of EB164 firmware do the
- * Wrong thing with PCI master aborts, which are caused by
- * accesing the configuration space of devices that don't
- * exist (for example).
+ * Some (apparently-common) revisions of EB164 and AlphaStation
+ * firmware do the Wrong thing with PCI master and target aborts,
+ * which are caused by accesing the configuration space of devices
+ * that don't exist (for example).
*
- * On EB164's we clear the CIA error register's PCI master
- * abort bit before touching PCI configuration space and
- * check it afterwards. If it indicates a master abort,
- * the device wasn't there so we return 0xffffffff.
+ * To work around this, we clear the CIA error register's PCI
+ * master and target abort bits before touching PCI configuration
+ * space and check it afterwards. If it indicates a master or target
+ * abort, the device wasn't there so we return 0xffffffff.
*/
- if (cputype == ST_EB164) {
- /* clear the PCI master abort bit in CIA error register */
- REGVAL(CIA_CSR_CIA_ERR) = 0x00000080; /* XXX */
- alpha_mb();
- alpha_pal_draina();
- }
-#endif
+ REGVAL(CIA_CSR_CIA_ERR) = CIA_ERR_RCVD_MAS_ABT|CIA_ERR_RCVD_TAR_ABT;
+ alpha_mb();
+ alpha_pal_draina();
/* secondary if bus # != 0 */
- pci_decompose_tag(&ccp->cc_pc, tag, &secondary, 0, 0);
+ alpha_pci_decompose_tag(&ccp->cc_pc, tag, &secondary, 0, 0);
if (secondary) {
s = splhigh();
- old_haxr2 = REGVAL(CIA_CSRS + 0x480); /* XXX */
+ old_cfg = REGVAL(CIA_CSR_CFG);
alpha_mb();
- REGVAL(CIA_CSRS + 0x480) = old_haxr2 | 0x1; /* XXX */
+ REGVAL(CIA_CSR_CFG) = old_cfg | 0x1;
alpha_mb();
}
- datap = (pcireg_t *)ALPHA_PHYS_TO_K0SEG(CIA_PCI_CONF |
- tag << 5UL | /* XXX */
- (offset & ~0x03) << 5 | /* XXX */
- 0 << 5 | /* XXX */
- 0x3 << 3); /* XXX */
+ /*
+ * We just inline the BWX support, since this is the only
+ * difference between BWX and swiz for config space.
+ */
+ if (ccp->cc_flags & CCF_PCI_USE_BWX) {
+ if (secondary) {
+ datap =
+ (pcireg_t *)ALPHA_PHYS_TO_K0SEG(CIA_EV56_BWCONF1 |
+ tag | (offset & ~0x03));
+ } else {
+ datap =
+ (pcireg_t *)ALPHA_PHYS_TO_K0SEG(CIA_EV56_BWCONF0 |
+ tag | (offset & ~0x03));
+ }
+ } else {
+ datap = (pcireg_t *)ALPHA_PHYS_TO_K0SEG(CIA_PCI_CONF |
+ tag << 5UL | /* XXX */
+ (offset & ~0x03) << 5 | /* XXX */
+ 0 << 5 | /* XXX */
+ 0x3 << 3); /* XXX */
+ }
data = (pcireg_t)-1;
+ alpha_mb();
if (!(ba = badaddr(datap, sizeof *datap)))
data = *datap;
+ alpha_mb();
+ alpha_mb();
if (secondary) {
alpha_mb();
- REGVAL(CIA_CSRS + 0x480) = old_haxr2; /* XXX */
+ REGVAL(CIA_CSR_CFG) = old_cfg;
alpha_mb();
splx(s);
}
-#if NDEC_EB164
- if (cputype == ST_EB164) {
- alpha_pal_draina();
- /* check CIA error register for PCI master abort */
- if (REGVAL(CIA_CSR_CIA_ERR) & 0x00000080) { /* XXX */
- ba = 1;
- data = 0xffffffff;
- }
+ alpha_pal_draina();
+ alpha_mb();
+ errbits = REGVAL(CIA_CSR_CIA_ERR);
+ if (errbits & (CIA_ERR_RCVD_MAS_ABT|CIA_ERR_RCVD_TAR_ABT)) {
+ ba = 1;
+ data = 0xffffffff;
+ }
+
+ if (errbits) {
+ REGVAL(CIA_CSR_CIA_ERR) = errbits;
+ alpha_mb();
+ alpha_pal_draina();
}
-#endif
#if 0
printf("cia_conf_read: tag 0x%lx, reg 0x%lx -> %x @ %p%s\n", tag, reg,
@@ -202,33 +212,52 @@ cia_conf_write(cpv, tag, offset, data)
struct cia_config *ccp = cpv;
pcireg_t *datap;
int s, secondary;
- int32_t old_haxr2; /* XXX */
+ u_int32_t old_cfg;
-#ifdef DIAGNOSTIC
+#ifdef __GNUC__
s = 0; /* XXX gcc -Wuninitialized */
- old_haxr2 = 0; /* XXX gcc -Wuninitialized */
+ old_cfg = 0; /* XXX gcc -Wuninitialized */
#endif
/* secondary if bus # != 0 */
- pci_decompose_tag(&ccp->cc_pc, tag, &secondary, 0, 0);
+ alpha_pci_decompose_tag(&ccp->cc_pc, tag, &secondary, 0, 0);
if (secondary) {
s = splhigh();
- old_haxr2 = REGVAL(CIA_CSRS + 0x480); /* XXX */
+ old_cfg = REGVAL(CIA_CSR_CFG);
alpha_mb();
- REGVAL(CIA_CSRS + 0x480) = old_haxr2 | 0x1; /* XXX */
+ REGVAL(CIA_CSR_CFG) = old_cfg | 0x1;
alpha_mb();
}
- datap = (pcireg_t *)ALPHA_PHYS_TO_K0SEG(CIA_PCI_CONF |
- tag << 5UL | /* XXX */
- (offset & ~0x03) << 5 | /* XXX */
- 0 << 5 | /* XXX */
- 0x3 << 3); /* XXX */
+ /*
+ * We just inline the BWX support, since this is the only
+ * difference between BWX and swiz for config space.
+ */
+ if (ccp->cc_flags & CCF_PCI_USE_BWX) {
+ if (secondary) {
+ datap =
+ (pcireg_t *)ALPHA_PHYS_TO_K0SEG(CIA_EV56_BWCONF1 |
+ tag | (offset & ~0x03));
+ } else {
+ datap =
+ (pcireg_t *)ALPHA_PHYS_TO_K0SEG(CIA_EV56_BWCONF0 |
+ tag | (offset & ~0x03));
+ }
+ } else {
+ datap = (pcireg_t *)ALPHA_PHYS_TO_K0SEG(CIA_PCI_CONF |
+ tag << 5UL | /* XXX */
+ (offset & ~0x03) << 5 | /* XXX */
+ 0 << 5 | /* XXX */
+ 0x3 << 3); /* XXX */
+ }
+ alpha_mb();
*datap = data;
+ alpha_mb();
+ alpha_mb();
if (secondary) {
alpha_mb();
- REGVAL(CIA_CSRS + 0x480) = old_haxr2; /* XXX */
+ REGVAL(CIA_CSR_CFG) = old_cfg;
alpha_mb();
splx(s);
}
diff --git a/sys/arch/alpha/pci/ciareg.h b/sys/arch/alpha/pci/ciareg.h
index f66efc9466b..a40bae99a46 100644
--- a/sys/arch/alpha/pci/ciareg.h
+++ b/sys/arch/alpha/pci/ciareg.h
@@ -1,11 +1,10 @@
-/* $OpenBSD: ciareg.h,v 1.7 1998/06/28 03:00:25 angelos Exp $ */
-/* $NetBSD: ciareg.h,v 1.7 1996/11/23 06:42:55 cgd Exp $ */
+/* $NetBSD: ciareg.h,v 1.22 1998/06/06 20:40:14 thorpej Exp $ */
/*
* Copyright (c) 1995, 1996 Carnegie-Mellon University.
* All rights reserved.
*
- * Author: Chris G. Demetriou
+ * Authors: Chris G. Demetriou, Jason R. Thorpe
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
@@ -31,10 +30,11 @@
/*
* 21171 Chipset registers and constants.
*
- * Taken from XXX
+ * Taken from EC-QE18B-TE.
*/
-#define REGVAL(r) (*(int32_t *)ALPHA_PHYS_TO_K0SEG(r))
+#define REGVAL(r) (*(volatile int32_t *)ALPHA_PHYS_TO_K0SEG(r))
+#define REGVAL64(r) (*(volatile u_int64_t *)ALPHA_PHYS_TO_K0SEG(r))
/*
* Base addresses
@@ -50,121 +50,121 @@
#define CIA_CSRS 0x8740000000UL
#define CIA_PCI_MC_CSRS 0x8750000000UL
#define CIA_PCI_ATRANS 0x8760000000UL
-#define CIA_PCI_TBIA 0x8760000100UL
-#define CIA_EV56_BWMEM 0x8800000000UL
-#define CIA_EV56_BWIO 0x8900000000UL
-#define CIA_EV56_BWCONF0 0x8a00000000UL
-#define CIA_EV56_BWCONF1 0x8b00000000UL
+#define CIA_PCI_TBIA 0x8760000100UL
+#define CIA_EV56_BWMEM 0x8800000000UL
+#define CIA_EV56_BWIO 0x8900000000UL
+#define CIA_EV56_BWCONF0 0x8a00000000UL
+#define CIA_EV56_BWCONF1 0x8b00000000UL
-#define CIA_PCI_W0BASE 0x8760000400UL
-#define CIA_PCI_W0MASK 0x8760000440UL
-#define CIA_PCI_T0BASE 0x8760000480UL
+#define CIA_PCI_W0BASE 0x8760000400UL
+#define CIA_PCI_W0MASK 0x8760000440UL
+#define CIA_PCI_T0BASE 0x8760000480UL
-#define CIA_PCI_W1BASE 0x8760000500UL
-#define CIA_PCI_W1MASK 0x8760000540UL
-#define CIA_PCI_T1BASE 0x8760000580UL
+#define CIA_PCI_W1BASE 0x8760000500UL
+#define CIA_PCI_W1MASK 0x8760000540UL
+#define CIA_PCI_T1BASE 0x8760000580UL
-#define CIA_PCI_W2BASE 0x8760000600UL
-#define CIA_PCI_W2MASK 0x8760000640UL
-#define CIA_PCI_T2BASE 0x8760000680UL
+#define CIA_PCI_W2BASE 0x8760000600UL
+#define CIA_PCI_W2MASK 0x8760000640UL
+#define CIA_PCI_T2BASE 0x8760000680UL
-#define CIA_PCI_W3BASE 0x8760000700UL
-#define CIA_PCI_W3MASK 0x8760000740UL
-#define CIA_PCI_T3BASE 0x8760000780UL
+#define CIA_PCI_W3BASE 0x8760000700UL
+#define CIA_PCI_W3MASK 0x8760000740UL
+#define CIA_PCI_T3BASE 0x8760000780UL
-#define PYXIS_INT_REQ 0x87a0000000UL
-#define PYXIS_INT_MASK 0x87a0000040UL
-#define PYXIS_GPO 0x87a0000180UL
+#define PYXIS_INT_REQ 0x87a0000000UL
+#define PYXIS_INT_MASK 0x87a0000040UL
+#define PYXIS_GPO 0x87a0000180UL
/*
* Values for CIA_PCI_TBIA
*/
-#define CIA_PCI_TBIA_NOOP 0 /* no operation */
-#define CIA_PCI_TBIA_LOCKED 1 /* invalidate and unlock locked tags */
-#define CIA_PCI_TBIA_UNLOCKED 2 /* invalidate unlocked tags */
-#define CIA_PCI_TBIA_ALL 3 /* invalidate and unlock all tags */
+#define CIA_PCI_TBIA_NOOP 0 /* no operation */
+#define CIA_PCI_TBIA_LOCKED 1 /* invalidate and unlock locked tags */
+#define CIA_PCI_TBIA_UNLOCKED 2 /* invalidate unlocked tags */
+#define CIA_PCI_TBIA_ALL 3 /* invalidate and unlock all tags */
-#define CIA_TLB_NTAGS 8 /* number of TLB entries */
+#define CIA_TLB_NTAGS 8 /* number of TLB entries */
/*
* Values for CIA_PCI_WnBASE
*/
-#define CIA_PCI_WnBASE_W_BASE 0xfff00000
-#define CIA_PCI_WnBASE_DAC_EN 0x00000008 /* W3BASE only */
-#define CIA_PCI_WnBASE_MEMCS_EN 0x00000004 /* W0BASE only */
-#define CIA_PCI_WnBASE_SG_EN 0x00000002
-#define CIA_PCI_WnBASE_W_EN 0x00000001
+#define CIA_PCI_WnBASE_W_BASE 0xfff00000
+#define CIA_PCI_WnBASE_DAC_EN 0x00000008 /* W3BASE only */
+#define CIA_PCI_WnBASE_MEMCS_EN 0x00000004 /* W0BASE only */
+#define CIA_PCI_WnBASE_SG_EN 0x00000002
+#define CIA_PCI_WnBASE_W_EN 0x00000001
/*
* Values for CIA_PCI_WnMASK
*/
-#define CIA_PCI_WnMASK_W_MASK 0xfff00000
-#define CIA_PCI_WnMASK_1M 0x00000000
-#define CIA_PCI_WnMASK_2M 0x00100000
-#define CIA_PCI_WnMASK_4M 0x00300000
-#define CIA_PCI_WnMASK_8M 0x00700000
-#define CIA_PCI_WnMASK_16M 0x00f00000
-#define CIA_PCI_WnMASK_32M 0x01f00000
-#define CIA_PCI_WnMASK_64M 0x03f00000
-#define CIA_PCI_WnMASK_128M 0x07f00000
-#define CIA_PCI_WnMASK_256M 0x0ff00000
-#define CIA_PCI_WnMASK_512M 0x1ff00000
-#define CIA_PCI_WnMASK_1G 0x3ff00000
-#define CIA_PCI_WnMASK_2G 0x7ff00000
-#define CIA_PCI_WnMASK_4G 0xfff00000
+#define CIA_PCI_WnMASK_W_MASK 0xfff00000
+#define CIA_PCI_WnMASK_1M 0x00000000
+#define CIA_PCI_WnMASK_2M 0x00100000
+#define CIA_PCI_WnMASK_4M 0x00300000
+#define CIA_PCI_WnMASK_8M 0x00700000
+#define CIA_PCI_WnMASK_16M 0x00f00000
+#define CIA_PCI_WnMASK_32M 0x01f00000
+#define CIA_PCI_WnMASK_64M 0x03f00000
+#define CIA_PCI_WnMASK_128M 0x07f00000
+#define CIA_PCI_WnMASK_256M 0x0ff00000
+#define CIA_PCI_WnMASK_512M 0x1ff00000
+#define CIA_PCI_WnMASK_1G 0x3ff00000
+#define CIA_PCI_WnMASK_2G 0x7ff00000
+#define CIA_PCI_WnMASK_4G 0xfff00000
/*
* Values for CIA_PCI_TnBASE
*/
-#define CIA_PCI_TnBASE_MASK 0xfffffff0
-#define CIA_PCI_TnBASE_SHIFT 2
+#define CIA_PCI_TnBASE_MASK 0xfffffff0
+#define CIA_PCI_TnBASE_SHIFT 2
/*
* General CSRs
*/
-#define CIA_CSR_REV (CIA_CSRS + 0x80)
-
-#define REV_MASK 0x000000ff
-#define REV_ALT_MEM 0x00000100 /* not on Pyxis */
-
-#define REV_PYXIS_ID_MASK 0x0000ff00
-#define REV_PYXIS_ID_21174 0x00000100
-
-#define CIA_CSR_CTRL (CIA_CSRS + 0x100)
-
-#define CTRL_RCI_EN 0x00000001
-#define CTRL_PCI_LOCK_EN 0x00000002
-#define CTRL_PCI_LOOP_EN 0x00000004
-#define CTRL_FST_BB_EN 0x00000008
-#define CTRL_PCI_MST_EN 0x00000010
-#define CTRL_PCI_MEM_EN 0x00000020
-#define CTRL_PCI_REQ64_EN 0x00000040
-#define CTRL_PCI_ACK64_EN 0x00000080
-#define CTRL_ADDR_PE_EN 0x00000100
-#define CTRL_PERR_EN 0x00000200
-#define CTRL_FILL_ERR_EN 0x00000400
-#define CTRL_ECC_CHK_EN 0x00001000
-#define CTRL_CACK_EN_PE 0x00002000
-#define CTRL_CON_IDLE_BC 0x00004000
-#define CTRL_CSR_IOA_BYPASS 0x00008000
-#define CTRL_IO_FLUSHREQ_EN 0x00010000
-#define CTRL_CPU_CLUSHREQ_EN 0x00020000
-#define CTRL_ARB_EV5_EN 0x00040000
-#define CTRL_EN_ARB_LINK 0x00080000
-#define CTRL_RD_TYPE 0x00300000
-#define CTRL_RL_TYPE 0x03000000
-#define CTRL_RM_TYPE 0x30000000
+#define CIA_CSR_REV (CIA_CSRS + 0x80)
+
+#define REV_MASK 0x000000ff
+#define REV_ALT_MEM 0x00000100 /* not on Pyxis */
+
+#define REV_PYXIS_ID_MASK 0x0000ff00
+#define REV_PYXIS_ID_21174 0x00000100
+
+#define CIA_CSR_CTRL (CIA_CSRS + 0x100)
+
+#define CTRL_RCI_EN 0x00000001
+#define CTRL_PCI_LOCK_EN 0x00000002
+#define CTRL_PCI_LOOP_EN 0x00000004
+#define CTRL_FST_BB_EN 0x00000008
+#define CTRL_PCI_MST_EN 0x00000010
+#define CTRL_PCI_MEM_EN 0x00000020
+#define CTRL_PCI_REQ64_EN 0x00000040
+#define CTRL_PCI_ACK64_EN 0x00000080
+#define CTRL_ADDR_PE_EN 0x00000100
+#define CTRL_PERR_EN 0x00000200
+#define CTRL_FILL_ERR_EN 0x00000400
+#define CTRL_ECC_CHK_EN 0x00001000
+#define CTRL_CACK_EN_PE 0x00002000
+#define CTRL_CON_IDLE_BC 0x00004000
+#define CTRL_CSR_IOA_BYPASS 0x00008000
+#define CTRL_IO_FLUSHREQ_EN 0x00010000
+#define CTRL_CPU_CLUSHREQ_EN 0x00020000
+#define CTRL_ARB_EV5_EN 0x00040000
+#define CTRL_EN_ARB_LINK 0x00080000
+#define CTRL_RD_TYPE 0x00300000
+#define CTRL_RL_TYPE 0x03000000
+#define CTRL_RM_TYPE 0x30000000
/* a.k.a. CIA_CSR_PYXIS_CTRL1 */
-#define CIA_CSR_CNFG (CIA_CSRS + 0x140)
+#define CIA_CSR_CNFG (CIA_CSRS + 0x140)
-#define CNFG_BWEN 0x00000001
-#define CNFG_MWEN 0x00000010
-#define CNFG_DWEN 0x00000020
-#define CNFG_WLEN 0x00000100
+#define CNFG_BWEN 0x00000001
+#define CNFG_MWEN 0x00000010
+#define CNFG_DWEN 0x00000020
+#define CNFG_WLEN 0x00000100
-#define CIA_CSR_CNFG_BITS "\20\11WLEN\6DWEN\5MWEN\1BWEN"
+#define CIA_CSR_CNFG_BITS "\20\11WLEN\6DWEN\5MWEN\1BWEN"
#define CIA_CSR_HAE_MEM (CIA_CSRS + 0x400)
@@ -182,33 +182,33 @@
#define HAE_IO_REG2_START(x) (((u_int32_t)(x) & 0xfe000000UL) << 0)
#define HAE_IO_REG2_MASK 0x01ffffffUL
-#define CIA_CSR_CFG (CIA_CSRS + 0x480)
+#define CIA_CSR_CFG (CIA_CSRS + 0x480)
-#define CFG_CFG_MASK 0x00000003UL
+#define CFG_CFG_MASK 0x00000003UL
#define CIA_CSR_CIA_ERR (CIA_CSRS + 0x8200)
-#define CIA_ERR_COR_ERR 0x00000001
-#define CIA_ERR_UN_COR_ERR 0x00000002
-#define CIA_ERR_CPU_PE 0x00000004
-#define CIA_ERR_MEM_NEM 0x00000008
-#define CIA_ERR_PCI_SERR 0x00000010
-#define CIA_ERR_PERR 0x00000020
-#define CIA_ERR_PCI_ADDR_PE 0x00000040
-#define CIA_ERR_RCVD_MAS_ABT 0x00000080
-#define CIA_ERR_RCVD_TAR_ABT 0x00000100
-#define CIA_ERR_PA_PTE_INV 0x00000200
-#define CIA_ERR_FROM_WRT_ERR 0x00000400
-#define CIA_ERR_IOA_TIMEOUT 0x00000800
-#define CIA_ERR_LOST_COR_ERR 0x00010000
-#define CIA_ERR_LOST_UN_COR_ERR 0x00020000
-#define CIA_ERR_LOST_CPU_PE 0x00040000
-#define CIA_ERR_LOST_MEM_NEM 0x00080000
-#define CIA_ERR_LOST_PERR 0x00200000
-#define CIA_ERR_LOST_PCI_ADDR_PE 0x00400000
-#define CIA_ERR_LOST_RCVD_MAS_ABT 0x00800000
-#define CIA_ERR_LOST_RCVD_TAR_ABT 0x01000000
-#define CIA_ERR_LOST_PA_PTE_INV 0x02000000
-#define CIA_ERR_LOST_FROM_WRT_ERR 0x04000000
-#define CIA_ERR_LOST_IOA_TIMEOUT 0x08000000
-#define CIA_ERR_VALID 0x80000000
+#define CIA_ERR_COR_ERR 0x00000001
+#define CIA_ERR_UN_COR_ERR 0x00000002
+#define CIA_ERR_CPU_PE 0x00000004
+#define CIA_ERR_MEM_NEM 0x00000008
+#define CIA_ERR_PCI_SERR 0x00000010
+#define CIA_ERR_PERR 0x00000020
+#define CIA_ERR_PCI_ADDR_PE 0x00000040
+#define CIA_ERR_RCVD_MAS_ABT 0x00000080
+#define CIA_ERR_RCVD_TAR_ABT 0x00000100
+#define CIA_ERR_PA_PTE_INV 0x00000200
+#define CIA_ERR_FROM_WRT_ERR 0x00000400
+#define CIA_ERR_IOA_TIMEOUT 0x00000800
+#define CIA_ERR_LOST_COR_ERR 0x00010000
+#define CIA_ERR_LOST_UN_COR_ERR 0x00020000
+#define CIA_ERR_LOST_CPU_PE 0x00040000
+#define CIA_ERR_LOST_MEM_NEM 0x00080000
+#define CIA_ERR_LOST_PERR 0x00200000
+#define CIA_ERR_LOST_PCI_ADDR_PE 0x00400000
+#define CIA_ERR_LOST_RCVD_MAS_ABT 0x00800000
+#define CIA_ERR_LOST_RCVD_TAR_ABT 0x01000000
+#define CIA_ERR_LOST_PA_PTE_INV 0x02000000
+#define CIA_ERR_LOST_FROM_WRT_ERR 0x04000000
+#define CIA_ERR_LOST_IOA_TIMEOUT 0x08000000
+#define CIA_ERR_VALID 0x80000000
diff --git a/sys/arch/alpha/pci/ciavar.h b/sys/arch/alpha/pci/ciavar.h
index a9b30a86a16..24ec3b2aa2c 100644
--- a/sys/arch/alpha/pci/ciavar.h
+++ b/sys/arch/alpha/pci/ciavar.h
@@ -1,5 +1,4 @@
-/* $OpenBSD: ciavar.h,v 1.8 1998/06/28 02:43:23 angelos Exp $ */
-/* $NetBSD: ciavar.h,v 1.6 1996/11/25 03:49:11 cgd Exp $ */
+/* $NetBSD: ciavar.h,v 1.17 2000/03/19 01:43:25 thorpej Exp $ */
/*
* Copyright (c) 1995, 1996 Carnegie-Mellon University.
@@ -30,6 +29,7 @@
#include <dev/isa/isavar.h>
#include <dev/pci/pcivar.h>
+#include <alpha/pci/pci_sgmap_pte64.h>
/*
* A 21171 chipset's configuration.
@@ -43,16 +43,23 @@ struct cia_config {
bus_space_tag_t cc_iot, cc_memt;
struct alpha_pci_chipset cc_pc;
+ struct alpha_bus_dma_tag cc_dmat_direct;
+ struct alpha_bus_dma_tag cc_dmat_sgmap;
+
+ struct alpha_sgmap cc_sgmap;
+
u_int32_t cc_hae_mem;
u_int32_t cc_hae_io;
- u_int32_t cc_rev;
- u_int32_t cc_cnfg;
-
- int cc_flags;
-
-#define CCF_ISPYXIS 0x01 /* chip is a 21174 Pyxis */
-#define CCF_USEBWX 0x02 /* use BWX when possible */
+ u_int32_t cc_rev;
+ u_int32_t cc_cnfg;
+
+ int cc_flags;
+
+#define CCF_ISPYXIS 0x01 /* chip is a 21174 Pyxis */
+#define CCF_PYXISBUG 0x02
+#define CCF_PCI_USE_BWX 0x04 /* use BWX for PCI config space */
+#define CCF_BUS_USE_BWX 0x08 /* use BWX for bus space */
struct extent *cc_io_ex, *cc_d_mem_ex, *cc_s_mem_ex;
int cc_mallocsafe;
@@ -62,11 +69,16 @@ struct cia_softc {
struct device sc_dev;
struct cia_config *sc_ccp;
- /* XXX SGMAP info */
};
void cia_init __P((struct cia_config *, int));
void cia_pci_init __P((pci_chipset_tag_t, void *));
+void cia_dma_init __P((struct cia_config *));
+
+bus_space_tag_t cia_bwx_bus_io_init __P((void *));
+bus_space_tag_t cia_bwx_bus_mem_init __P((void *));
bus_space_tag_t cia_bus_io_init __P((void *));
bus_space_tag_t cia_bus_mem_init __P((void *));
+
+void cia_pyxis_intr_enable __P((int, int));
diff --git a/sys/arch/alpha/pci/lca.c b/sys/arch/alpha/pci/lca.c
index 469bbb115e1..a4ccb4473b8 100644
--- a/sys/arch/alpha/pci/lca.c
+++ b/sys/arch/alpha/pci/lca.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: lca.c,v 1.7 1997/01/24 19:57:41 niklas Exp $ */
+/* $OpenBSD: lca.c,v 1.8 2000/11/08 16:01:19 art Exp $ */
/* $NetBSD: lca.c,v 1.14 1996/12/05 01:39:35 cgd Exp $ */
/*
@@ -80,10 +80,10 @@ lcamatch(parent, match, aux)
#endif
void *aux;
{
- struct confargs *ca = aux;
+ struct mainbus_attach_args *ma = aux;
/* Make sure that we're looking for a LCA. */
- if (strcmp(ca->ca_name, lca_cd.cd_name) != 0)
+ if (strcmp(ma->ma_name, lca_cd.cd_name) != 0)
return (0);
if (lcafound)
diff --git a/sys/arch/alpha/pci/pci_550.c b/sys/arch/alpha/pci/pci_550.c
new file mode 100644
index 00000000000..8dc4730c4e4
--- /dev/null
+++ b/sys/arch/alpha/pci/pci_550.c
@@ -0,0 +1,450 @@
+/* $NetBSD: pci_550.c,v 1.18 2000/06/29 08:58:48 mrg Exp $ */
+
+/*-
+ * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center, and by Andrew Gallatin.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/systm.h>
+#include <sys/errno.h>
+#include <sys/malloc.h>
+#include <sys/device.h>
+#include <sys/syslog.h>
+
+#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/autoconf.h>
+#include <machine/rpb.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pciidereg.h>
+#include <dev/pci/pciidevar.h>
+
+#include <alpha/pci/ciareg.h>
+#include <alpha/pci/ciavar.h>
+
+#include <alpha/pci/pci_550.h>
+
+#include "sio.h"
+#if NSIO
+#include <alpha/pci/siovar.h>
+#endif
+
+int dec_550_intr_map __P((void *, pcitag_t, int, int,
+ pci_intr_handle_t *));
+const char *dec_550_intr_string __P((void *, pci_intr_handle_t));
+const struct evcnt *dec_550_intr_evcnt __P((void *, pci_intr_handle_t));
+void *dec_550_intr_establish __P((void *, pci_intr_handle_t,
+ int, int (*func)(void *), void *, char *));
+void dec_550_intr_disestablish __P((void *, void *));
+
+void *dec_550_pciide_compat_intr_establish __P((void *, struct device *,
+ struct pci_attach_args *, int, int (*)(void *), void *));
+
+#define DEC_550_PCI_IRQ_BEGIN 8
+#define DEC_550_MAX_IRQ (64 - DEC_550_PCI_IRQ_BEGIN)
+
+/*
+ * The Miata has a Pyxis, which seems to have problems with stray
+ * interrupts. Work around this by just ignoring strays.
+ */
+#define PCI_STRAY_MAX 0
+
+/*
+ * Some Miata models, notably models with a Cypress PCI-ISA bridge, have
+ * a PCI device (the OHCI USB controller) with interrupts tied to ISA IRQ
+ * lines. This IRQ is encoded as:
+ *
+ * line = 0xe0 | isa_irq;
+ */
+#define DEC_550_LINE_IS_ISA(line) ((line) >= 0xe0 && (line) <= 0xef)
+#define DEC_550_LINE_ISA_IRQ(line) ((line) & 0x0f)
+
+struct alpha_shared_intr *dec_550_pci_intr;
+
+void dec_550_iointr __P((void *framep, unsigned long vec));
+void dec_550_intr_enable __P((int irq));
+void dec_550_intr_disable __P((int irq));
+
+void
+pci_550_pickintr(ccp)
+ struct cia_config *ccp;
+{
+ bus_space_tag_t iot = ccp->cc_iot;
+ pci_chipset_tag_t pc = &ccp->cc_pc;
+#if 0
+ char *cp;
+#endif
+ int i;
+
+ pc->pc_intr_v = ccp;
+ pc->pc_intr_map = dec_550_intr_map;
+ pc->pc_intr_string = dec_550_intr_string;
+#if 0
+ pc->pc_intr_evcnt = dec_550_intr_evcnt;
+#endif
+ pc->pc_intr_establish = dec_550_intr_establish;
+ pc->pc_intr_disestablish = dec_550_intr_disestablish;
+
+ pc->pc_pciide_compat_intr_establish =
+ dec_550_pciide_compat_intr_establish;
+
+ /*
+ * DEC 550's interrupts are enabled via the Pyxis interrupt
+ * mask register. Nothing to map.
+ */
+
+ for (i = 0; i < DEC_550_MAX_IRQ; i++)
+ dec_550_intr_disable(i);
+
+ dec_550_pci_intr = alpha_shared_intr_alloc(DEC_550_MAX_IRQ);
+ for (i = 0; i < DEC_550_MAX_IRQ; i++) {
+ alpha_shared_intr_set_maxstrays(dec_550_pci_intr, i,
+ PCI_STRAY_MAX);
+ alpha_shared_intr_set_private(dec_550_pci_intr, i, ccp);
+#if 0
+ cp = alpha_shared_intr_string(dec_550_pci_intr, i);
+ sprintf(cp, "irq %d", i);
+ evcnt_attach_dynamic(alpha_shared_intr_evcnt(
+ dec_550_pci_intr, i), EVCNT_TYPE_INTR, NULL,
+ "dec_550", cp);
+#endif
+ }
+
+#if NSIO
+ sio_intr_setup(pc, iot);
+#endif
+
+ set_iointr(dec_550_iointr);
+}
+
+int
+dec_550_intr_map(ccv, bustag, buspin, line, ihp)
+ void *ccv;
+ pcitag_t bustag;
+ int buspin, line;
+ pci_intr_handle_t *ihp;
+{
+ struct cia_config *ccp = ccv;
+ pci_chipset_tag_t pc = &ccp->cc_pc;
+ int bus, device, function;
+
+ if (buspin == 0) {
+ /* No IRQ used. */
+ return 1;
+ }
+ if (buspin > 4) {
+ printf("dec_550_intr_map: bad interrupt pin %d\n", buspin);
+ return 1;
+ }
+
+ alpha_pci_decompose_tag(pc, bustag, &bus, &device, &function);
+
+ /*
+ * There are two main variants of Miata: Miata 1 (Intel SIO)
+ * and Miata {1.5,2} (Cypress).
+ *
+ * The Miata 1 has a CMD PCI IDE wired to compatibility mode at
+ * device 4 of bus 0. This variant apparently also has the
+ * Pyxis DMA bug.
+ *
+ * On the Miata 1.5 and Miata 2, the Cypress PCI-ISA bridge lives
+ * on device 7 of bus 0. This device has PCI IDE wired to
+ * compatibility mode on functions 1 and 2.
+ *
+ * There will be no interrupt mapping for these devices, so just
+ * bail out now.
+ */
+ if (bus == 0) {
+ if ((hwrpb->rpb_variation & SV_ST_MASK) < SV_ST_MIATA_1_5) {
+ /* Miata 1 */
+ if (device == 7)
+ panic("dec_550_intr_map: SIO device");
+ else if (device == 4)
+ return (1);
+ } else {
+ /* Miata 1.5 or Miata 2 */
+ if (device == 7) {
+ if (function == 0)
+ panic("dec_550_intr_map: SIO device");
+ if (function == 1 || function == 2)
+ return (1);
+ }
+ }
+ }
+
+ /*
+ * The console places the interrupt mapping in the "line" value.
+ * A value of (char)-1 indicates there is no mapping.
+ */
+ if (line == 0xff) {
+ printf("dec_550_intr_map: no mapping for %d/%d/%d\n",
+ bus, device, function);
+ return (1);
+ }
+
+#if NSIO == 0
+ if (DEC_550_LINE_IS_ISA(line)) {
+ printf("dec_550_intr_map: ISA IRQ %d for %d/%d/%d\n",
+ DEC_550_LINE_ISA_IRQ(line), bus, device, function);
+ return (1);
+ }
+#endif
+
+ if (DEC_550_LINE_IS_ISA(line) == 0 && line >= DEC_550_MAX_IRQ)
+ panic("dec_550_intr_map: dec 550 irq too large (%d)\n",
+ line);
+
+ *ihp = line;
+ return (0);
+}
+
+const char *
+dec_550_intr_string(ccv, ih)
+ void *ccv;
+ pci_intr_handle_t ih;
+{
+#if 0
+ struct cia_config *ccp = ccv;
+#endif
+ static char irqstr[16]; /* 12 + 2 + NULL + sanity */
+
+#if NSIO
+ if (DEC_550_LINE_IS_ISA(ih))
+ return (sio_intr_string(NULL /*XXX*/,
+ DEC_550_LINE_ISA_IRQ(ih)));
+#endif
+
+ if (ih >= DEC_550_MAX_IRQ)
+ panic("dec_550_intr_string: bogus 550 IRQ 0x%lx\n", ih);
+ sprintf(irqstr, "dec 550 irq %ld", ih);
+ return (irqstr);
+}
+
+#if 0
+const struct evcnt *
+dec_550_intr_evcnt(ccv, ih)
+ void *ccv;
+ pci_intr_handle_t ih;
+{
+#if 0
+ struct cia_config *ccp = ccv;
+#endif
+
+#if NSIO
+ if (DEC_550_LINE_IS_ISA(ih))
+ return (sio_intr_evcnt(NULL /*XXX*/,
+ DEC_550_LINE_ISA_IRQ(ih)));
+#endif
+
+ if (ih >= DEC_550_MAX_IRQ)
+ panic("dec_550_intr_evcnt: bogus 550 IRQ 0x%lx\n", ih);
+
+ return (alpha_shared_intr_evcnt(dec_550_pci_intr, ih));
+}
+#endif
+
+void *
+dec_550_intr_establish(ccv, ih, level, func, arg, name)
+ void *ccv, *arg;
+ pci_intr_handle_t ih;
+ int level;
+ int (*func) __P((void *));
+ char *name;
+{
+#if 0
+ struct cia_config *ccp = ccv;
+#endif
+ void *cookie;
+
+#if NSIO
+ if (DEC_550_LINE_IS_ISA(ih))
+ return (sio_intr_establish(NULL /*XXX*/,
+ DEC_550_LINE_ISA_IRQ(ih), IST_LEVEL, level, func, arg,
+ name));
+#endif
+
+ if (ih >= DEC_550_MAX_IRQ)
+ panic("dec_550_intr_establish: bogus dec 550 IRQ 0x%lx\n", ih);
+
+ cookie = alpha_shared_intr_establish(dec_550_pci_intr, ih, IST_LEVEL,
+ level, func, arg, name);
+
+ if (cookie != NULL && alpha_shared_intr_isactive(dec_550_pci_intr, ih))
+ dec_550_intr_enable(ih);
+ return (cookie);
+}
+
+void
+dec_550_intr_disestablish(ccv, cookie)
+ void *ccv, *cookie;
+{
+ struct cia_config *ccp = ccv;
+ struct alpha_shared_intrhand *ih = cookie;
+ unsigned int irq = ih->ih_num;
+ int s;
+
+#if NSIO
+ /*
+ * We have to determine if this is an ISA IRQ or not! We do this
+ * by checking to see if the intrhand points back to an intrhead
+ * that points to our cia_config. If not, it's an ISA IRQ. Pretty
+ * disgusting, eh?
+ */
+ if (ih->ih_intrhead->intr_private != ccp) {
+ sio_intr_disestablish(NULL /*XXX*/, cookie);
+ return;
+ }
+#endif
+
+ s = splhigh();
+
+ alpha_shared_intr_disestablish(dec_550_pci_intr, cookie,
+ "dec 550 irq");
+ if (alpha_shared_intr_isactive(dec_550_pci_intr, irq) == 0) {
+ dec_550_intr_disable(irq);
+ alpha_shared_intr_set_dfltsharetype(dec_550_pci_intr, irq,
+ IST_NONE);
+ }
+
+ splx(s);
+}
+
+void *
+dec_550_pciide_compat_intr_establish(v, dev, pa, chan, func, arg)
+ void *v;
+ struct device *dev;
+ struct pci_attach_args *pa;
+ int chan;
+ int (*func) __P((void *));
+ void *arg;
+{
+ pci_chipset_tag_t pc = pa->pa_pc;
+ void *cookie = NULL;
+ int bus, irq;
+
+ alpha_pci_decompose_tag(pc, pa->pa_tag, &bus, NULL, NULL);
+
+ /*
+ * If this isn't PCI bus #0, all bets are off.
+ */
+ if (bus != 0)
+ return (NULL);
+
+ irq = PCIIDE_COMPAT_IRQ(chan);
+#if NSIO
+ cookie = sio_intr_establish(NULL /*XXX*/, irq, IST_EDGE, IPL_BIO,
+ func, arg, "dec 550 irq");
+#endif
+ return (cookie);
+}
+
+void
+dec_550_iointr(framep, vec)
+ void *framep;
+ unsigned long vec;
+{
+ int irq;
+
+ if (vec >= 0x900) {
+ irq = ((vec - 0x900) >> 4);
+
+ if (irq >= DEC_550_MAX_IRQ)
+ panic("550_iointr: vec 0x%lx out of range\n", vec);
+
+ if (!alpha_shared_intr_dispatch(dec_550_pci_intr, irq)) {
+ alpha_shared_intr_stray(dec_550_pci_intr, irq,
+ "dec 550 irq");
+ if (ALPHA_SHARED_INTR_DISABLE(dec_550_pci_intr, irq))
+ dec_550_intr_disable(irq);
+ }
+ return;
+ }
+#if NSIO
+ if (vec >= 0x800) {
+ sio_iointr(framep, vec);
+ return;
+ }
+#endif
+ panic("dec_550_iointr: weird vec 0x%lx\n", vec);
+}
+
+void
+dec_550_intr_enable(irq)
+ int irq;
+{
+
+ cia_pyxis_intr_enable(irq + DEC_550_PCI_IRQ_BEGIN, 1);
+}
+
+void
+dec_550_intr_disable(irq)
+ int irq;
+{
+
+ cia_pyxis_intr_enable(irq + DEC_550_PCI_IRQ_BEGIN, 0);
+}
diff --git a/sys/arch/alpha/pci/pci_550.h b/sys/arch/alpha/pci/pci_550.h
new file mode 100644
index 00000000000..e9a3a1d8e5a
--- /dev/null
+++ b/sys/arch/alpha/pci/pci_550.h
@@ -0,0 +1,30 @@
+/* $NetBSD: pci_550.h,v 1.4 2000/06/05 21:47:23 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+void pci_550_pickintr __P((struct cia_config *));
diff --git a/sys/arch/alpha/pci/pci_bwx_bus_io_chipdep.c b/sys/arch/alpha/pci/pci_bwx_bus_io_chipdep.c
new file mode 100644
index 00000000000..2d88ae52685
--- /dev/null
+++ b/sys/arch/alpha/pci/pci_bwx_bus_io_chipdep.c
@@ -0,0 +1,698 @@
+/* $OpenBSD: pci_bwx_bus_io_chipdep.c,v 1.1 2000/11/08 16:01:21 art Exp $ */
+/* $NetBSD: pcs_bus_io_common.c,v 1.14 1996/12/02 22:19:35 cgd Exp $ */
+
+/*
+ * Copyright (c) 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * Common PCI Chipset "bus I/O" functions, for chipsets which have to
+ * deal with only a single PCI interface chip in a machine.
+ *
+ * uses:
+ * CHIP name of the 'chip' it's being compiled for.
+ * CHIP_IO_BASE Sparse I/O space base to use.
+ */
+
+#include <sys/extent.h>
+
+#include <machine/bwx.h>
+
+#define __C(A,B) __CONCAT(A,B)
+#define __S(S) __STRING(S)
+
+/* mapping/unmapping */
+int __C(CHIP,_io_map) __P((void *, bus_addr_t, bus_size_t, int,
+ bus_space_handle_t *));
+void __C(CHIP,_io_unmap) __P((void *, bus_space_handle_t,
+ bus_size_t));
+int __C(CHIP,_io_subregion) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, bus_space_handle_t *));
+
+/* allocation/deallocation */
+int __C(CHIP,_io_alloc) __P((void *, bus_addr_t, bus_addr_t,
+ bus_size_t, bus_size_t, bus_addr_t, int, bus_addr_t *,
+ bus_space_handle_t *));
+void __C(CHIP,_io_free) __P((void *, bus_space_handle_t,
+ bus_size_t));
+
+/* barrier */
+inline void __C(CHIP,_io_barrier) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, int));
+
+/* read (single) */
+inline u_int8_t __C(CHIP,_io_read_1) __P((void *, bus_space_handle_t,
+ bus_size_t));
+inline u_int16_t __C(CHIP,_io_read_2) __P((void *, bus_space_handle_t,
+ bus_size_t));
+inline u_int32_t __C(CHIP,_io_read_4) __P((void *, bus_space_handle_t,
+ bus_size_t));
+inline u_int64_t __C(CHIP,_io_read_8) __P((void *, bus_space_handle_t,
+ bus_size_t));
+
+/* read multiple */
+void __C(CHIP,_io_read_multi_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+void __C(CHIP,_io_read_multi_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t));
+void __C(CHIP,_io_read_multi_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t));
+void __C(CHIP,_io_read_multi_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t));
+
+/* read region */
+void __C(CHIP,_io_read_region_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+void __C(CHIP,_io_read_region_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t));
+void __C(CHIP,_io_read_region_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t));
+void __C(CHIP,_io_read_region_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t));
+
+/* write (single) */
+inline void __C(CHIP,_io_write_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t));
+inline void __C(CHIP,_io_write_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t));
+inline void __C(CHIP,_io_write_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t));
+inline void __C(CHIP,_io_write_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t));
+
+/* write multiple */
+void __C(CHIP,_io_write_multi_1) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t));
+void __C(CHIP,_io_write_multi_2) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t));
+void __C(CHIP,_io_write_multi_4) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t));
+void __C(CHIP,_io_write_multi_8) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t));
+
+/* write region */
+void __C(CHIP,_io_write_region_1) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t));
+void __C(CHIP,_io_write_region_2) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t));
+void __C(CHIP,_io_write_region_4) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t));
+void __C(CHIP,_io_write_region_8) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t));
+
+/* set multiple */
+void __C(CHIP,_io_set_multi_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t));
+void __C(CHIP,_io_set_multi_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t));
+void __C(CHIP,_io_set_multi_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t));
+void __C(CHIP,_io_set_multi_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t));
+
+/* set region */
+void __C(CHIP,_io_set_region_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t));
+void __C(CHIP,_io_set_region_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t));
+void __C(CHIP,_io_set_region_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t));
+void __C(CHIP,_io_set_region_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t));
+
+/* copy */
+void __C(CHIP,_io_copy_1) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t));
+void __C(CHIP,_io_copy_2) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t));
+void __C(CHIP,_io_copy_4) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t));
+void __C(CHIP,_io_copy_8) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t));
+
+/* read multiple raw */
+void __C(CHIP,_io_read_raw_multi_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+void __C(CHIP,_io_read_raw_multi_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+void __C(CHIP,_io_read_raw_multi_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+
+/* write multiple raw */
+void __C(CHIP,_io_write_raw_multi_2) __P((void *,
+ bus_space_handle_t, bus_size_t, const u_int8_t *,
+ bus_size_t));
+void __C(CHIP,_io_write_raw_multi_4) __P((void *,
+ bus_space_handle_t, bus_size_t, const u_int8_t *,
+ bus_size_t));
+void __C(CHIP,_io_write_raw_multi_8) __P((void *,
+ bus_space_handle_t, bus_size_t, const u_int8_t *,
+ bus_size_t));
+
+static long
+ __C(CHIP,_io_ex_storage)[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)];
+
+static struct alpha_bus_space __C(CHIP,_io_space) = {
+ /* cookie */
+ NULL,
+
+ /* mapping/unmapping */
+ __C(CHIP,_io_map),
+ __C(CHIP,_io_unmap),
+ __C(CHIP,_io_subregion),
+
+ /* allocation/deallocation */
+ __C(CHIP,_io_alloc),
+ __C(CHIP,_io_free),
+
+ /* barrier */
+ __C(CHIP,_io_barrier),
+
+ /* read (single) */
+ __C(CHIP,_io_read_1),
+ __C(CHIP,_io_read_2),
+ __C(CHIP,_io_read_4),
+ __C(CHIP,_io_read_8),
+
+ /* read multiple */
+ __C(CHIP,_io_read_multi_1),
+ __C(CHIP,_io_read_multi_2),
+ __C(CHIP,_io_read_multi_4),
+ __C(CHIP,_io_read_multi_8),
+
+ /* read region */
+ __C(CHIP,_io_read_region_1),
+ __C(CHIP,_io_read_region_2),
+ __C(CHIP,_io_read_region_4),
+ __C(CHIP,_io_read_region_8),
+
+ /* write (single) */
+ __C(CHIP,_io_write_1),
+ __C(CHIP,_io_write_2),
+ __C(CHIP,_io_write_4),
+ __C(CHIP,_io_write_8),
+
+ /* write multiple */
+ __C(CHIP,_io_write_multi_1),
+ __C(CHIP,_io_write_multi_2),
+ __C(CHIP,_io_write_multi_4),
+ __C(CHIP,_io_write_multi_8),
+
+ /* write region */
+ __C(CHIP,_io_write_region_1),
+ __C(CHIP,_io_write_region_2),
+ __C(CHIP,_io_write_region_4),
+ __C(CHIP,_io_write_region_8),
+
+ /* set multiple */
+ __C(CHIP,_io_set_multi_1),
+ __C(CHIP,_io_set_multi_2),
+ __C(CHIP,_io_set_multi_4),
+ __C(CHIP,_io_set_multi_8),
+
+ /* set region */
+ __C(CHIP,_io_set_region_1),
+ __C(CHIP,_io_set_region_2),
+ __C(CHIP,_io_set_region_4),
+ __C(CHIP,_io_set_region_8),
+
+ /* copy */
+ __C(CHIP,_io_copy_1),
+ __C(CHIP,_io_copy_2),
+ __C(CHIP,_io_copy_4),
+ __C(CHIP,_io_copy_8),
+
+ /* read multiple raw */
+ __C(CHIP,_io_read_raw_multi_2),
+ __C(CHIP,_io_read_raw_multi_4),
+ __C(CHIP,_io_read_raw_multi_8),
+
+ /* write multiple raw*/
+ __C(CHIP,_io_write_raw_multi_2),
+ __C(CHIP,_io_write_raw_multi_4),
+ __C(CHIP,_io_write_raw_multi_8),
+};
+
+bus_space_tag_t
+__C(CHIP,_bus_io_init)(v)
+ void *v;
+{
+ bus_space_tag_t t = &__C(CHIP,_io_space);
+ struct extent *ex;
+
+ t->abs_cookie = v;
+
+ ex = extent_create(__S(__C(CHIP,_bus_io)), 0x0UL, 0xffffffffUL,
+ M_DEVBUF, (caddr_t)__C(CHIP,_io_ex_storage),
+ sizeof(__C(CHIP,_io_ex_storage)), EX_NOWAIT|EX_NOCOALESCE);
+
+ CHIP_IO_EXTENT(v) = ex;
+
+ return (t);
+}
+
+int
+__C(CHIP,_io_map)(v, ioaddr, iosize, cacheable, iohp)
+ void *v;
+ bus_addr_t ioaddr;
+ bus_size_t iosize;
+ int cacheable;
+ bus_space_handle_t *iohp;
+{
+ int error;
+
+#ifdef EXTENT_DEBUG
+ printf("io: allocating 0x%lx to 0x%lx\n", ioaddr, ioaddr + iosize - 1);
+#endif
+ error = extent_alloc_region(CHIP_IO_EXTENT(v), ioaddr, iosize,
+ EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
+ if (error) {
+#ifdef EXTENT_DEBUG
+ printf("io: allocation failed (%d)\n", error);
+ extent_print(CHIP_IO_EXTENT(v));
+#endif
+ return (error);
+ }
+
+ *iohp = ALPHA_PHYS_TO_K0SEG(CHIP_IO_SYS_START(v)) + ioaddr;
+
+ return (0);
+}
+
+void
+__C(CHIP,_io_unmap)(v, ioh, iosize)
+ void *v;
+ bus_space_handle_t ioh;
+ bus_size_t iosize;
+{
+ bus_addr_t ioaddr;
+ int error;
+
+#ifdef EXTENT_DEBUG
+ printf("io: freeing handle 0x%lx for 0x%lx\n", ioh, iosize);
+#endif
+
+ ioaddr = ioh - ALPHA_PHYS_TO_K0SEG(CHIP_IO_SYS_START(v));
+
+#ifdef EXTENT_DEBUG
+ printf("io: freeing 0x%lx to 0x%lx\n", ioaddr, ioaddr + iosize - 1);
+#endif
+ error = extent_free(CHIP_IO_EXTENT(v), ioaddr, iosize,
+ EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
+ if (error) {
+ printf("%s: WARNING: could not unmap 0x%lx-0x%lx (error %d)\n",
+ __S(__C(CHIP,_io_unmap)), ioaddr, ioaddr + iosize - 1,
+ error);
+#ifdef EXTENT_DEBUG
+ extent_print(CHIP_IO_EXTENT(v));
+#endif
+ }
+}
+
+int
+__C(CHIP,_io_subregion)(v, ioh, offset, size, nioh)
+ void *v;
+ bus_space_handle_t ioh, *nioh;
+ bus_size_t offset, size;
+{
+
+ *nioh = ioh + offset;
+ return (0);
+}
+
+int
+__C(CHIP,_io_alloc)(v, rstart, rend, size, align, boundary, cacheable,
+ addrp, bshp)
+ void *v;
+ bus_addr_t rstart, rend, *addrp;
+ bus_size_t size, align, boundary;
+ int cacheable;
+ bus_space_handle_t *bshp;
+{
+
+ /* XXX XXX XXX XXX XXX XXX */
+ panic("%s not implemented", __S(__C(CHIP,_io_alloc)));
+}
+
+void
+__C(CHIP,_io_free)(v, bsh, size)
+ void *v;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+
+ /* XXX XXX XXX XXX XXX XXX */
+ panic("%s not implemented", __S(__C(CHIP,_io_free)));
+}
+
+inline void
+__C(CHIP,_io_barrier)(v, h, o, l, f)
+ void *v;
+ bus_space_handle_t h;
+ bus_size_t o, l;
+ int f;
+{
+
+ if ((f & BUS_BARRIER_READ) != 0)
+ alpha_mb();
+ else if ((f & BUS_BARRIER_WRITE) != 0)
+ alpha_wmb();
+}
+
+inline u_int8_t
+__C(CHIP,_io_read_1)(v, ioh, off)
+ void *v;
+ bus_space_handle_t ioh;
+ bus_size_t off;
+{
+ bus_addr_t addr;
+
+ addr = ioh + off;
+ alpha_mb();
+ return (alpha_ldbu((u_int8_t *)addr));
+}
+
+inline u_int16_t
+__C(CHIP,_io_read_2)(v, ioh, off)
+ void *v;
+ bus_space_handle_t ioh;
+ bus_size_t off;
+{
+ bus_addr_t addr;
+
+ addr = ioh + off;
+#ifdef DIAGNOSTIC
+ if (addr & 1)
+ panic(__S(__C(CHIP,_io_read_2)) ": addr 0x%lx not aligned",
+ addr);
+#endif
+ alpha_mb();
+ return (alpha_ldwu((u_int16_t *)addr));
+}
+
+inline u_int32_t
+__C(CHIP,_io_read_4)(v, ioh, off)
+ void *v;
+ bus_space_handle_t ioh;
+ bus_size_t off;
+{
+ bus_addr_t addr;
+
+ addr = ioh + off;
+#ifdef DIAGNOSTIC
+ if (addr & 3)
+ panic(__S(__C(CHIP,_io_read_4)) ": addr 0x%lx not aligned",
+ addr);
+#endif
+ alpha_mb();
+ return (*(u_int32_t *)addr);
+}
+
+inline u_int64_t
+__C(CHIP,_io_read_8)(v, ioh, off)
+ void *v;
+ bus_space_handle_t ioh;
+ bus_size_t off;
+{
+
+ /* XXX XXX XXX */
+ panic("%s not implemented", __S(__C(CHIP,_io_read_8)));
+}
+
+#define CHIP_io_read_multi_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_io_read_multi_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ TYPE *a; \
+{ \
+ \
+ while (c-- > 0) { \
+ __C(CHIP,_io_barrier)(v, h, o, sizeof *a, \
+ BUS_BARRIER_READ); \
+ *a++ = __C(__C(CHIP,_io_read_),BYTES)(v, h, o); \
+ } \
+}
+CHIP_io_read_multi_N(1,u_int8_t)
+CHIP_io_read_multi_N(2,u_int16_t)
+CHIP_io_read_multi_N(4,u_int32_t)
+CHIP_io_read_multi_N(8,u_int64_t)
+
+#define CHIP_io_read_region_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_io_read_region_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ TYPE *a; \
+{ \
+ \
+ while (c-- > 0) { \
+ *a++ = __C(__C(CHIP,_io_read_),BYTES)(v, h, o); \
+ o += sizeof *a; \
+ } \
+}
+CHIP_io_read_region_N(1,u_int8_t)
+CHIP_io_read_region_N(2,u_int16_t)
+CHIP_io_read_region_N(4,u_int32_t)
+CHIP_io_read_region_N(8,u_int64_t)
+
+inline void
+__C(CHIP,_io_write_1)(v, ioh, off, val)
+ void *v;
+ bus_space_handle_t ioh;
+ bus_size_t off;
+ u_int8_t val;
+{
+ bus_addr_t addr;
+
+ addr = ioh + off;
+ alpha_stb((u_int8_t *)addr, val);
+ alpha_mb();
+}
+
+inline void
+__C(CHIP,_io_write_2)(v, ioh, off, val)
+ void *v;
+ bus_space_handle_t ioh;
+ bus_size_t off;
+ u_int16_t val;
+{
+ bus_addr_t addr;
+
+ addr = ioh + off;
+#ifdef DIAGNOSTIC
+ if (addr & 1)
+ panic(__S(__C(CHIP,_io_write_2)) ": addr 0x%lx not aligned",
+ addr);
+#endif
+ alpha_stw((u_int16_t *)addr, val);
+ alpha_mb();
+}
+
+inline void
+__C(CHIP,_io_write_4)(v, ioh, off, val)
+ void *v;
+ bus_space_handle_t ioh;
+ bus_size_t off;
+ u_int32_t val;
+{
+ bus_addr_t addr;
+
+ addr = ioh + off;
+#ifdef DIAGNOSTIC
+ if (addr & 3)
+ panic(__S(__C(CHIP,_io_write_4)) ": addr 0x%lx not aligned",
+ addr);
+#endif
+ *(u_int32_t *)addr = val;
+ alpha_mb();
+}
+
+inline void
+__C(CHIP,_io_write_8)(v, ioh, off, val)
+ void *v;
+ bus_space_handle_t ioh;
+ bus_size_t off;
+ u_int64_t val;
+{
+
+ /* XXX XXX XXX */
+ panic("%s not implemented", __S(__C(CHIP,_io_write_8)));
+ alpha_mb();
+}
+
+#define CHIP_io_write_multi_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_io_write_multi_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ const TYPE *a; \
+{ \
+ \
+ while (c-- > 0) { \
+ __C(__C(CHIP,_io_write_),BYTES)(v, h, o, *a++); \
+ __C(CHIP,_io_barrier)(v, h, o, sizeof *a, \
+ BUS_BARRIER_WRITE); \
+ } \
+}
+CHIP_io_write_multi_N(1,u_int8_t)
+CHIP_io_write_multi_N(2,u_int16_t)
+CHIP_io_write_multi_N(4,u_int32_t)
+CHIP_io_write_multi_N(8,u_int64_t)
+
+#define CHIP_io_write_region_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_io_write_region_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ const TYPE *a; \
+{ \
+ \
+ while (c-- > 0) { \
+ __C(__C(CHIP,_io_write_),BYTES)(v, h, o, *a++); \
+ o += sizeof *a; \
+ } \
+}
+CHIP_io_write_region_N(1,u_int8_t)
+CHIP_io_write_region_N(2,u_int16_t)
+CHIP_io_write_region_N(4,u_int32_t)
+CHIP_io_write_region_N(8,u_int64_t)
+
+#define CHIP_io_set_multi_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_io_set_multi_),BYTES)(v, h, o, val, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ TYPE val; \
+{ \
+ \
+ while (c-- > 0) { \
+ __C(__C(CHIP,_io_write_),BYTES)(v, h, o, val); \
+ __C(CHIP,_io_barrier)(v, h, o, sizeof val, \
+ BUS_BARRIER_WRITE); \
+ } \
+}
+CHIP_io_set_multi_N(1,u_int8_t)
+CHIP_io_set_multi_N(2,u_int16_t)
+CHIP_io_set_multi_N(4,u_int32_t)
+CHIP_io_set_multi_N(8,u_int64_t)
+
+#define CHIP_io_set_region_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_io_set_region_),BYTES)(v, h, o, val, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ TYPE val; \
+{ \
+ \
+ while (c-- > 0) { \
+ __C(__C(CHIP,_io_write_),BYTES)(v, h, o, val); \
+ o += sizeof val; \
+ } \
+}
+CHIP_io_set_region_N(1,u_int8_t)
+CHIP_io_set_region_N(2,u_int16_t)
+CHIP_io_set_region_N(4,u_int32_t)
+CHIP_io_set_region_N(8,u_int64_t)
+
+#define CHIP_io_copy_N(BYTES) \
+void \
+__C(__C(CHIP,_io_copy_),BYTES)(v, h1, o1, h2, o2, c) \
+ void *v; \
+ bus_space_handle_t h1, h2; \
+ bus_size_t o1, o2, c; \
+{ \
+ bus_size_t i, o; \
+ \
+ for (i = 0, o = 0; i < c; i++, o += BYTES) \
+ __C(__C(CHIP,_io_write_),BYTES)(v, h2, o2 + o, \
+ __C(__C(CHIP,_io_read_),BYTES)(v, h1, o1 + o)); \
+}
+CHIP_io_copy_N(1)
+CHIP_io_copy_N(2)
+CHIP_io_copy_N(4)
+CHIP_io_copy_N(8)
+
+#define CHIP_io_read_raw_multi_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_io_read_raw_multi_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ u_int8_t *a; \
+{ \
+ TYPE temp; \
+ int i; \
+ \
+ while (c > 0) { \
+ __C(CHIP,_io_barrier)(v, h, o, BYTES, BUS_BARRIER_READ); \
+ temp = __C(__C(CHIP,_io_read_),BYTES)(v, h, o); \
+ i = MIN(c, BYTES); \
+ c -= i; \
+ while (i--) { \
+ *a++ = temp & 0xff; \
+ temp >>= 8; \
+ } \
+ } \
+}
+CHIP_io_read_raw_multi_N(2,u_int16_t)
+CHIP_io_read_raw_multi_N(4,u_int32_t)
+CHIP_io_read_raw_multi_N(8,u_int64_t)
+
+#define CHIP_io_write_raw_multi_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_io_write_raw_multi_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ const u_int8_t *a; \
+{ \
+ TYPE temp; \
+ int i; \
+ \
+ while (c > 0) { \
+ temp = 0; \
+ for (i = BYTES - 1; i >= 0; i--) { \
+ temp <<= 8; \
+ if (i < c) \
+ temp |= *(a + i); \
+ } \
+ __C(__C(CHIP,_io_write_),BYTES)(v, h, o, temp); \
+ __C(CHIP,_io_barrier)(v, h, o, BYTES, BUS_BARRIER_WRITE); \
+ i = MIN(c, BYTES); \
+ c -= i; \
+ a += i; \
+ } \
+}
+CHIP_io_write_raw_multi_N(2,u_int16_t)
+CHIP_io_write_raw_multi_N(4,u_int32_t)
+CHIP_io_write_raw_multi_N(8,u_int64_t)
diff --git a/sys/arch/alpha/pci/pci_bwx_bus_mem_chipdep.c b/sys/arch/alpha/pci/pci_bwx_bus_mem_chipdep.c
new file mode 100644
index 00000000000..7a1ee194f6f
--- /dev/null
+++ b/sys/arch/alpha/pci/pci_bwx_bus_mem_chipdep.c
@@ -0,0 +1,720 @@
+/* $OpenBSD: pci_bwx_bus_mem_chipdep.c,v 1.1 2000/11/08 16:01:21 art Exp $ */
+/* $NetBSD: pcs_bus_mem_common.c,v 1.15 1996/12/02 22:19:36 cgd Exp $ */
+
+/*
+ * Copyright (c) 1995, 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * Common PCI Chipset "bus I/O" functions, for chipsets which have to
+ * deal with only a single PCI interface chip in a machine.
+ *
+ * uses:
+ * CHIP name of the 'chip' it's being compiled for.
+ * CHIP_MEM_BASE Mem space base to use.
+ * CHIP_MEM_EX_STORE
+ * If defined, device-provided static storage area
+ * for the memory space extent. If this is
+ * defined, CHIP_MEM_EX_STORE_SIZE must also be
+ * defined. If this is not defined, a static area
+ * will be declared.
+ * CHIP_MEM_EX_STORE_SIZE
+ * Size of the device-provided static storage area
+ * for the memory space extent.
+ */
+
+#include <sys/extent.h>
+#include <machine/bwx.h>
+
+#define __C(A,B) __CONCAT(A,B)
+#define __S(S) __STRING(S)
+
+/* mapping/unmapping */
+int __C(CHIP,_mem_map) __P((void *, bus_addr_t, bus_size_t, int,
+ bus_space_handle_t *));
+void __C(CHIP,_mem_unmap) __P((void *, bus_space_handle_t,
+ bus_size_t));
+int __C(CHIP,_mem_subregion) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, bus_space_handle_t *));
+
+/* allocation/deallocation */
+int __C(CHIP,_mem_alloc) __P((void *, bus_addr_t, bus_addr_t,
+ bus_size_t, bus_size_t, bus_addr_t, int, bus_addr_t *,
+ bus_space_handle_t *));
+void __C(CHIP,_mem_free) __P((void *, bus_space_handle_t,
+ bus_size_t));
+
+/* barrier */
+inline void __C(CHIP,_mem_barrier) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, int));
+
+/* read (single) */
+inline u_int8_t __C(CHIP,_mem_read_1) __P((void *, bus_space_handle_t,
+ bus_size_t));
+inline u_int16_t __C(CHIP,_mem_read_2) __P((void *, bus_space_handle_t,
+ bus_size_t));
+inline u_int32_t __C(CHIP,_mem_read_4) __P((void *, bus_space_handle_t,
+ bus_size_t));
+inline u_int64_t __C(CHIP,_mem_read_8) __P((void *, bus_space_handle_t,
+ bus_size_t));
+
+/* read multiple */
+void __C(CHIP,_mem_read_multi_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+void __C(CHIP,_mem_read_multi_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t));
+void __C(CHIP,_mem_read_multi_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t));
+void __C(CHIP,_mem_read_multi_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t));
+
+/* read region */
+void __C(CHIP,_mem_read_region_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t));
+void __C(CHIP,_mem_read_region_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t));
+void __C(CHIP,_mem_read_region_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t));
+void __C(CHIP,_mem_read_region_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t));
+
+/* write (single) */
+inline void __C(CHIP,_mem_write_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t));
+inline void __C(CHIP,_mem_write_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t));
+inline void __C(CHIP,_mem_write_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t));
+inline void __C(CHIP,_mem_write_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t));
+
+/* write multiple */
+void __C(CHIP,_mem_write_multi_1) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t));
+void __C(CHIP,_mem_write_multi_2) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t));
+void __C(CHIP,_mem_write_multi_4) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t));
+void __C(CHIP,_mem_write_multi_8) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t));
+
+/* write region */
+void __C(CHIP,_mem_write_region_1) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t));
+void __C(CHIP,_mem_write_region_2) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t));
+void __C(CHIP,_mem_write_region_4) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t));
+void __C(CHIP,_mem_write_region_8) __P((void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t));
+
+/* set multiple */
+void __C(CHIP,_mem_set_multi_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t));
+void __C(CHIP,_mem_set_multi_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t));
+void __C(CHIP,_mem_set_multi_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t));
+void __C(CHIP,_mem_set_multi_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t));
+
+/* set region */
+void __C(CHIP,_mem_set_region_1) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t));
+void __C(CHIP,_mem_set_region_2) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t));
+void __C(CHIP,_mem_set_region_4) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t));
+void __C(CHIP,_mem_set_region_8) __P((void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t));
+
+/* copy */
+void __C(CHIP,_mem_copy_1) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t));
+void __C(CHIP,_mem_copy_2) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t));
+void __C(CHIP,_mem_copy_4) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t));
+void __C(CHIP,_mem_copy_8) __P((void *, bus_space_handle_t,
+ bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t));
+
+/* read multiple raw */
+void __C(CHIP,_mem_read_raw_multi_2) __P((void *,
+ bus_space_handle_t, bus_size_t, u_int8_t *, bus_size_t));
+void __C(CHIP,_mem_read_raw_multi_4) __P((void *,
+ bus_space_handle_t, bus_size_t, u_int8_t *, bus_size_t));
+void __C(CHIP,_mem_read_raw_multi_8) __P((void *,
+ bus_space_handle_t, bus_size_t, u_int8_t *, bus_size_t));
+
+/* write multiple raw */
+void __C(CHIP,_mem_write_raw_multi_2) __P((void *,
+ bus_space_handle_t, bus_size_t, const u_int8_t *,
+ bus_size_t));
+void __C(CHIP,_mem_write_raw_multi_4) __P((void *,
+ bus_space_handle_t, bus_size_t, const u_int8_t *,
+ bus_size_t));
+void __C(CHIP,_mem_write_raw_multi_8) __P((void *,
+ bus_space_handle_t, bus_size_t, const u_int8_t *,
+ bus_size_t));
+
+static long
+ __C(CHIP,_mem_ex_storage)[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)];
+
+static struct alpha_bus_space __C(CHIP,_mem_space) = {
+ /* cookie */
+ NULL,
+
+ /* mapping/unmapping */
+ __C(CHIP,_mem_map),
+ __C(CHIP,_mem_unmap),
+ __C(CHIP,_mem_subregion),
+
+ /* allocation/deallocation */
+ __C(CHIP,_mem_alloc),
+ __C(CHIP,_mem_free),
+
+ /* barrier */
+ __C(CHIP,_mem_barrier),
+
+ /* read (single) */
+ __C(CHIP,_mem_read_1),
+ __C(CHIP,_mem_read_2),
+ __C(CHIP,_mem_read_4),
+ __C(CHIP,_mem_read_8),
+
+ /* read multiple */
+ __C(CHIP,_mem_read_multi_1),
+ __C(CHIP,_mem_read_multi_2),
+ __C(CHIP,_mem_read_multi_4),
+ __C(CHIP,_mem_read_multi_8),
+
+ /* read region */
+ __C(CHIP,_mem_read_region_1),
+ __C(CHIP,_mem_read_region_2),
+ __C(CHIP,_mem_read_region_4),
+ __C(CHIP,_mem_read_region_8),
+
+ /* write (single) */
+ __C(CHIP,_mem_write_1),
+ __C(CHIP,_mem_write_2),
+ __C(CHIP,_mem_write_4),
+ __C(CHIP,_mem_write_8),
+
+ /* write multiple */
+ __C(CHIP,_mem_write_multi_1),
+ __C(CHIP,_mem_write_multi_2),
+ __C(CHIP,_mem_write_multi_4),
+ __C(CHIP,_mem_write_multi_8),
+
+ /* write region */
+ __C(CHIP,_mem_write_region_1),
+ __C(CHIP,_mem_write_region_2),
+ __C(CHIP,_mem_write_region_4),
+ __C(CHIP,_mem_write_region_8),
+
+ /* set multiple */
+ __C(CHIP,_mem_set_multi_1),
+ __C(CHIP,_mem_set_multi_2),
+ __C(CHIP,_mem_set_multi_4),
+ __C(CHIP,_mem_set_multi_8),
+
+ /* set region */
+ __C(CHIP,_mem_set_region_1),
+ __C(CHIP,_mem_set_region_2),
+ __C(CHIP,_mem_set_region_4),
+ __C(CHIP,_mem_set_region_8),
+
+ /* copy */
+ __C(CHIP,_mem_copy_1),
+ __C(CHIP,_mem_copy_2),
+ __C(CHIP,_mem_copy_4),
+ __C(CHIP,_mem_copy_8),
+
+ /* read multiple raw */
+ __C(CHIP,_mem_read_raw_multi_2),
+ __C(CHIP,_mem_read_raw_multi_4),
+ __C(CHIP,_mem_read_raw_multi_8),
+
+ /* write multiple raw*/
+ __C(CHIP,_mem_write_raw_multi_2),
+ __C(CHIP,_mem_write_raw_multi_4),
+ __C(CHIP,_mem_write_raw_multi_8),
+};
+
+bus_space_tag_t
+__C(CHIP,_bus_mem_init)(v)
+ void *v;
+{
+ bus_space_tag_t t = &__C(CHIP,_mem_space);
+ struct extent *ex;
+
+ t->abs_cookie = v;
+
+ ex = extent_create(__S(__C(CHIP,_bus_dmem)), 0x0UL,
+ 0xffffffffffffffffUL, M_DEVBUF,
+ (caddr_t)__C(CHIP,_mem_ex_storage),
+ sizeof(__C(CHIP,_mem_ex_storage)), EX_NOWAIT|EX_NOCOALESCE);
+
+ CHIP_MEM_EXTENT(v) = ex;
+
+ return (t);
+}
+
+int
+__C(CHIP,_mem_map)(v, memaddr, memsize, cacheable, memhp)
+ void *v;
+ bus_addr_t memaddr;
+ bus_size_t memsize;
+ int cacheable;
+ bus_space_handle_t *memhp;
+{
+ int error;
+
+#ifdef EXTENT_DEBUG
+ printf("mem: allocating 0x%lx to 0x%lx\n", memaddr,
+ memaddr + memsize - 1);
+#endif
+ error = extent_alloc_region(CHIP_MEM_EXTENT(v), memaddr, memsize,
+ EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
+ if (error) {
+#ifdef EXTENT_DEBUG
+ printf("mem: allocation failed (%d)\n", error);
+ extent_print(CHIP_MEM_EXTENT(v));
+#endif
+ return (error);
+ }
+
+ *memhp = ALPHA_PHYS_TO_K0SEG(CHIP_MEM_SYS_START(v)) + memaddr;
+
+ return (0);
+}
+
+void
+__C(CHIP,_mem_unmap)(v, memh, memsize)
+ void *v;
+ bus_space_handle_t memh;
+ bus_size_t memsize;
+{
+ bus_addr_t memaddr;
+ int error;
+
+#ifdef EXTENT_DEBUG
+ printf("mem: freeing handle 0x%lx for 0x%lx\n", memh, memsize);
+#endif
+ memaddr = memh - ALPHA_PHYS_TO_K0SEG(CHIP_MEM_SYS_START(v));
+
+#ifdef EXTENT_DEBUG
+ "mem: freeing 0x%lx to 0x%lx\n", memaddr, memaddr + memsize - 1);
+#endif
+ error = extent_free(CHIP_MEM_EXTENT(v), memaddr, memsize,
+ EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
+ if (error) {
+ printf("%s: WARNING: could not unmap 0x%lx-0x%lx (error %d)\n",
+ __S(__C(CHIP,_mem_unmap)), memaddr, memaddr + memsize - 1,
+ error);
+#ifdef EXTENT_DEBUG
+ extent_print(CHIP_MEM_EXTENT(v));
+#endif
+ }
+}
+
+int
+__C(CHIP,_mem_subregion)(v, memh, offset, size, nmemh)
+ void *v;
+ bus_space_handle_t memh, *nmemh;
+ bus_size_t offset, size;
+{
+
+ *nmemh = memh + offset;
+ return (0);
+}
+
+int
+__C(CHIP,_mem_alloc)(v, rstart, rend, size, align, boundary, cacheable,
+ addrp, bshp)
+ void *v;
+ bus_addr_t rstart, rend, *addrp;
+ bus_size_t size, align, boundary;
+ int cacheable;
+ bus_space_handle_t *bshp;
+{
+
+ /* XXX XXX XXX XXX XXX XXX */
+ panic("%s not implemented", __S(__C(CHIP,_mem_alloc)));
+}
+
+void
+__C(CHIP,_mem_free)(v, bsh, size)
+ void *v;
+ bus_space_handle_t bsh;
+ bus_size_t size;
+{
+
+ /* XXX XXX XXX XXX XXX XXX */
+ panic("%s not implemented", __S(__C(CHIP,_mem_free)));
+}
+
+inline void
+__C(CHIP,_mem_barrier)(v, h, o, l, f)
+ void *v;
+ bus_space_handle_t h;
+ bus_size_t o, l;
+ int f;
+{
+
+ if ((f & BUS_BARRIER_READ) != 0)
+ alpha_mb();
+ else if ((f & BUS_BARRIER_WRITE) != 0)
+ alpha_wmb();
+}
+
+inline u_int8_t
+__C(CHIP,_mem_read_1)(v, memh, off)
+ void *v;
+ bus_space_handle_t memh;
+ bus_size_t off;
+{
+ bus_addr_t addr;
+
+ addr = memh + off;
+ alpha_mb();
+ return (alpha_ldbu((u_int8_t *)addr));
+}
+
+inline u_int16_t
+__C(CHIP,_mem_read_2)(v, memh, off)
+ void *v;
+ bus_space_handle_t memh;
+ bus_size_t off;
+{
+ bus_addr_t addr;
+
+ addr = memh + off;
+#ifdef DIAGNOSTIC
+ if (addr & 1)
+ panic(__S(__C(CHIP,_mem_read_2)) ": addr 0x%lx not aligned",
+ addr);
+#endif
+ alpha_mb();
+ return (alpha_ldwu((u_int16_t *)addr));
+}
+
+inline u_int32_t
+__C(CHIP,_mem_read_4)(v, memh, off)
+ void *v;
+ bus_space_handle_t memh;
+ bus_size_t off;
+{
+ bus_addr_t addr;
+
+ addr = memh + off;
+#ifdef DIAGNOSTIC
+ if (addr & 3)
+ panic(__S(__C(CHIP,_mem_read_4)) ": addr 0x%lx not aligned",
+ addr);
+#endif
+ alpha_mb();
+ return (*(u_int32_t *)addr);
+}
+
+inline u_int64_t
+__C(CHIP,_mem_read_8)(v, memh, off)
+ void *v;
+ bus_space_handle_t memh;
+ bus_size_t off;
+{
+
+ alpha_mb();
+
+ /* XXX XXX XXX */
+ panic("%s not implemented", __S(__C(CHIP,_mem_read_8)));
+}
+
+#define CHIP_mem_read_multi_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_mem_read_multi_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ TYPE *a; \
+{ \
+ \
+ while (c-- > 0) { \
+ __C(CHIP,_mem_barrier)(v, h, o, sizeof *a, \
+ BUS_BARRIER_READ); \
+ *a++ = __C(__C(CHIP,_mem_read_),BYTES)(v, h, o); \
+ } \
+}
+CHIP_mem_read_multi_N(1,u_int8_t)
+CHIP_mem_read_multi_N(2,u_int16_t)
+CHIP_mem_read_multi_N(4,u_int32_t)
+CHIP_mem_read_multi_N(8,u_int64_t)
+
+#define CHIP_mem_read_region_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_mem_read_region_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ TYPE *a; \
+{ \
+ \
+ while (c-- > 0) { \
+ *a++ = __C(__C(CHIP,_mem_read_),BYTES)(v, h, o); \
+ o += sizeof *a; \
+ } \
+}
+CHIP_mem_read_region_N(1,u_int8_t)
+CHIP_mem_read_region_N(2,u_int16_t)
+CHIP_mem_read_region_N(4,u_int32_t)
+CHIP_mem_read_region_N(8,u_int64_t)
+
+inline void
+__C(CHIP,_mem_write_1)(v, memh, off, val)
+ void *v;
+ bus_space_handle_t memh;
+ bus_size_t off;
+ u_int8_t val;
+{
+ bus_addr_t addr;
+
+ addr = memh + off;
+ alpha_stb((u_int8_t *)addr, val);
+ alpha_mb();
+}
+
+inline void
+__C(CHIP,_mem_write_2)(v, memh, off, val)
+ void *v;
+ bus_space_handle_t memh;
+ bus_size_t off;
+ u_int16_t val;
+{
+ bus_addr_t addr;
+
+ addr = memh + off;
+#ifdef DIAGNOSTIC
+ if (addr & 1)
+ panic(__S(__C(CHIP,_mem_write_2)) ": addr 0x%lx not aligned",
+ addr);
+#endif
+ alpha_stw((u_int16_t *)addr, val);
+ alpha_mb();
+}
+
+inline void
+__C(CHIP,_mem_write_4)(v, memh, off, val)
+ void *v;
+ bus_space_handle_t memh;
+ bus_size_t off;
+ u_int32_t val;
+{
+ bus_addr_t addr;
+
+ addr = memh + off;
+#ifdef DIAGNOSTIC
+ if (addr & 3)
+ panic(__S(__C(CHIP,_mem_write_4)) ": addr 0x%lx not aligned",
+ addr);
+#endif
+ *(u_int32_t *)addr = val;
+ alpha_mb();
+}
+
+inline void
+__C(CHIP,_mem_write_8)(v, memh, off, val)
+ void *v;
+ bus_space_handle_t memh;
+ bus_size_t off;
+ u_int64_t val;
+{
+
+ /* XXX XXX XXX */
+ panic("%s not implemented", __S(__C(CHIP,_mem_write_8)));
+ alpha_mb();
+}
+
+#define CHIP_mem_write_multi_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_mem_write_multi_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ const TYPE *a; \
+{ \
+ \
+ while (c-- > 0) { \
+ __C(__C(CHIP,_mem_write_),BYTES)(v, h, o, *a++); \
+ __C(CHIP,_mem_barrier)(v, h, o, sizeof *a, \
+ BUS_BARRIER_WRITE); \
+ } \
+}
+CHIP_mem_write_multi_N(1,u_int8_t)
+CHIP_mem_write_multi_N(2,u_int16_t)
+CHIP_mem_write_multi_N(4,u_int32_t)
+CHIP_mem_write_multi_N(8,u_int64_t)
+
+#define CHIP_mem_write_region_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_mem_write_region_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ const TYPE *a; \
+{ \
+ \
+ while (c-- > 0) { \
+ __C(__C(CHIP,_mem_write_),BYTES)(v, h, o, *a++); \
+ o += sizeof *a; \
+ } \
+}
+CHIP_mem_write_region_N(1,u_int8_t)
+CHIP_mem_write_region_N(2,u_int16_t)
+CHIP_mem_write_region_N(4,u_int32_t)
+CHIP_mem_write_region_N(8,u_int64_t)
+
+#define CHIP_mem_set_multi_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_mem_set_multi_),BYTES)(v, h, o, val, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ TYPE val; \
+{ \
+ \
+ while (c-- > 0) { \
+ __C(__C(CHIP,_mem_write_),BYTES)(v, h, o, val); \
+ __C(CHIP,_mem_barrier)(v, h, o, sizeof val, \
+ BUS_BARRIER_WRITE); \
+ } \
+}
+CHIP_mem_set_multi_N(1,u_int8_t)
+CHIP_mem_set_multi_N(2,u_int16_t)
+CHIP_mem_set_multi_N(4,u_int32_t)
+CHIP_mem_set_multi_N(8,u_int64_t)
+
+#define CHIP_mem_set_region_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_mem_set_region_),BYTES)(v, h, o, val, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ TYPE val; \
+{ \
+ \
+ while (c-- > 0) { \
+ __C(__C(CHIP,_mem_write_),BYTES)(v, h, o, val); \
+ o += sizeof val; \
+ } \
+}
+CHIP_mem_set_region_N(1,u_int8_t)
+CHIP_mem_set_region_N(2,u_int16_t)
+CHIP_mem_set_region_N(4,u_int32_t)
+CHIP_mem_set_region_N(8,u_int64_t)
+
+#define CHIP_mem_copy_N(BYTES) \
+void \
+__C(__C(CHIP,_mem_copy_),BYTES)(v, h1, o1, h2, o2, c) \
+ void *v; \
+ bus_space_handle_t h1, h2; \
+ bus_size_t o1, o2, c; \
+{ \
+ bus_size_t i, o; \
+ \
+ if ((h1 >> 63) != 0 && (h2 >> 63) != 0) { \
+ bcopy((void *)(h1 + o1), (void *)(h2 + o2), c * BYTES); \
+ return; \
+ } \
+ \
+ /* Circumvent a common case of overlapping problems */ \
+ if (h1 == h2 && o2 > o1) \
+ for (i = 0, o = (c - 1) * BYTES; i < c; i++, o -= BYTES)\
+ __C(__C(CHIP,_mem_write_),BYTES)(v, h2, o2 + o, \
+ __C(__C(CHIP,_mem_read_),BYTES)(v, h1, o1 + o));\
+ else \
+ for (i = 0, o = 0; i < c; i++, o += BYTES) \
+ __C(__C(CHIP,_mem_write_),BYTES)(v, h2, o2 + o, \
+ __C(__C(CHIP,_mem_read_),BYTES)(v, h1, o1 + o));\
+}
+CHIP_mem_copy_N(1)
+CHIP_mem_copy_N(2)
+CHIP_mem_copy_N(4)
+CHIP_mem_copy_N(8)
+
+#define CHIP_mem_read_raw_multi_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_mem_read_raw_multi_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ u_int8_t *a; \
+{ \
+ TYPE temp; \
+ int i; \
+ \
+ while (c > 0) { \
+ __C(CHIP,_mem_barrier)(v, h, o, BYTES, BUS_BARRIER_READ); \
+ temp = __C(__C(CHIP,_mem_read_),BYTES)(v, h, o); \
+ i = MIN(c, BYTES); \
+ c -= i; \
+ while (i--) { \
+ *a++ = temp & 0xff; \
+ temp >>= 8; \
+ } \
+ } \
+}
+CHIP_mem_read_raw_multi_N(2,u_int16_t)
+CHIP_mem_read_raw_multi_N(4,u_int32_t)
+CHIP_mem_read_raw_multi_N(8,u_int64_t)
+
+#define CHIP_mem_write_raw_multi_N(BYTES,TYPE) \
+void \
+__C(__C(CHIP,_mem_write_raw_multi_),BYTES)(v, h, o, a, c) \
+ void *v; \
+ bus_space_handle_t h; \
+ bus_size_t o, c; \
+ const u_int8_t *a; \
+{ \
+ TYPE temp; \
+ int i; \
+ \
+ while (c > 0) { \
+ temp = 0; \
+ for (i = BYTES - 1; i >= 0; i--) { \
+ temp <<= 8; \
+ if (i < c) \
+ temp |= *(a + i); \
+ } \
+ __C(__C(CHIP,_mem_write_),BYTES)(v, h, o, temp); \
+ __C(CHIP,_mem_barrier)(v, h, o, BYTES, BUS_BARRIER_WRITE); \
+ i = MIN(c, BYTES); \
+ c -= i; \
+ a += i; \
+ } \
+}
+CHIP_mem_write_raw_multi_N(2,u_int16_t)
+CHIP_mem_write_raw_multi_N(4,u_int32_t)
+CHIP_mem_write_raw_multi_N(8,u_int64_t)
diff --git a/sys/arch/alpha/pci/pci_sgmap_pte64.c b/sys/arch/alpha/pci/pci_sgmap_pte64.c
new file mode 100644
index 00000000000..88768b619c8
--- /dev/null
+++ b/sys/arch/alpha/pci/pci_sgmap_pte64.c
@@ -0,0 +1,54 @@
+/* $NetBSD: pci_sgmap_pte64.c,v 1.4 2000/06/29 08:58:49 mrg Exp $ */
+
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+
+#include <vm/vm.h>
+#include <uvm/uvm_extern.h>
+
+#include <machine/bus.h>
+
+#include <alpha/pci/pci_sgmap_pte64.h>
+
+#include <alpha/common/sgmap_typedep.c>
diff --git a/sys/arch/alpha/pci/pci_sgmap_pte64.h b/sys/arch/alpha/pci/pci_sgmap_pte64.h
new file mode 100644
index 00000000000..ac031078df4
--- /dev/null
+++ b/sys/arch/alpha/pci/pci_sgmap_pte64.h
@@ -0,0 +1,57 @@
+/* $NetBSD: pci_sgmap_pte64.h,v 1.2 1997/06/06 23:59:29 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the NetBSD
+ * Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define SGMAP_TYPE pci_sgmap_pte64
+#define SGMAP_PTE_TYPE u_int64_t
+#define SGMAP_PTE_SPACING 1
+
+/*
+ * A 64-bit PCI SGMAP page table entry looks like this:
+ *
+ * 63 n+1 n 1 0
+ * | | Page address | V |
+ *
+ * The page address is bits <n:13> of the physical address of the
+ * page. The V bit is set if the PTE holds a valid mapping.
+ */
+#define SGPTE_PGADDR_SHIFT 12
+#define SGPTE_VALID 0x0000000000000001UL
+
+#include <alpha/common/sgmapvar.h>
+#include <alpha/common/sgmap_typedep.h>
diff --git a/sys/arch/alpha/pci/sio.c b/sys/arch/alpha/pci/sio.c
index 701bb0aee1c..4c09fed9399 100644
--- a/sys/arch/alpha/pci/sio.c
+++ b/sys/arch/alpha/pci/sio.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sio.c,v 1.15 1999/02/08 18:17:21 millert Exp $ */
+/* $OpenBSD: sio.c,v 1.16 2000/11/08 16:01:21 art Exp $ */
/* $NetBSD: sio.c,v 1.15 1996/12/05 01:39:36 cgd Exp $ */
/*
@@ -191,7 +191,6 @@ sio_bridge_callback(v)
ic.ic_attach_hook = sio_isa_attach_hook;
ic.ic_intr_establish = sio_intr_establish;
ic.ic_intr_disestablish = sio_intr_disestablish;
- ic.ic_intr_check = sio_intr_check;
sa.sa_iba.iba_busname = "isa";
sa.sa_iba.iba_iot = sc->sc_iot;
diff --git a/sys/arch/alpha/pci/sio_pic.c b/sys/arch/alpha/pci/sio_pic.c
index 56ad7958e5a..8a14f5ff969 100644
--- a/sys/arch/alpha/pci/sio_pic.c
+++ b/sys/arch/alpha/pci/sio_pic.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sio_pic.c,v 1.14 1999/02/08 18:17:21 millert Exp $ */
+/* $OpenBSD: sio_pic.c,v 1.15 2000/11/08 16:01:22 art Exp $ */
/* $NetBSD: sio_pic.c,v 1.16 1996/11/17 02:05:26 cgd Exp $ */
/*
@@ -432,17 +432,6 @@ sio_intr_string(v, irq)
return (irqstr);
}
-int
-sio_intr_check(v, irq, type)
- void *v;
- int irq, type;
-{
- if (irq > ICU_LEN || type == IST_NONE)
- return (0);
-
- return (alpha_shared_intr_check(sio_intr, irq, type));
-}
-
void *
sio_intr_establish(v, irq, type, level, fn, arg, name)
void *v, *arg;
diff --git a/sys/arch/alpha/pci/siovar.h b/sys/arch/alpha/pci/siovar.h
index 2e75cf02f81..bf7a8bd50c0 100644
--- a/sys/arch/alpha/pci/siovar.h
+++ b/sys/arch/alpha/pci/siovar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: siovar.h,v 1.7 1998/07/01 05:32:43 angelos Exp $ */
+/* $OpenBSD: siovar.h,v 1.8 2000/11/08 16:01:22 art Exp $ */
/* $NetBSD: siovar.h,v 1.5 1996/10/23 04:12:34 cgd Exp $ */
/*
@@ -32,7 +32,6 @@ void sio_intr_setup __P((pci_chipset_tag_t, bus_space_tag_t));
void sio_iointr __P((void *framep, unsigned long vec));
const char *sio_intr_string __P((void *, int));
-int sio_intr_check __P((void *, int, int));
void *sio_intr_establish __P((void *, int, int, int, int (*)(void *),
void *, char *));
void sio_intr_disestablish __P((void *, void *));
diff --git a/sys/arch/alpha/stand/Makefile b/sys/arch/alpha/stand/Makefile
index 4abf0902d32..3c09a2a57c8 100644
--- a/sys/arch/alpha/stand/Makefile
+++ b/sys/arch/alpha/stand/Makefile
@@ -1,10 +1,10 @@
-# $OpenBSD: Makefile,v 1.7 2000/05/25 21:54:39 deraadt Exp $
+# $OpenBSD: Makefile,v 1.8 2000/11/08 16:01:24 art Exp $
# $NetBSD: Makefile,v 1.7 1997/04/10 23:03:38 cgd Exp $
.if ${MACHINE} == "alpha"
SUBDIR= boot bootxx netboot libkern libsa libz
.endif
-SUBDIR= setnetbootinfo installboot
+SUBDIR+= setnetbootinfo installboot
.include <bsd.subdir.mk>
diff --git a/sys/arch/alpha/stand/Makefile.inc b/sys/arch/alpha/stand/Makefile.inc
index 3c958d54528..975ded922cf 100644
--- a/sys/arch/alpha/stand/Makefile.inc
+++ b/sys/arch/alpha/stand/Makefile.inc
@@ -1,4 +1,4 @@
-# $OpenBSD: Makefile.inc,v 1.4 1997/05/05 06:01:45 millert Exp $
+# $OpenBSD: Makefile.inc,v 1.5 2000/11/08 16:01:24 art Exp $
# $NetBSD: Makefile.inc,v 1.8 1997/04/06 08:39:38 cgd Exp $
.include <bsd.own.mk> # for ELF_TOOLCHAIN definition
@@ -16,6 +16,8 @@ HEAP_LIMIT= 20040000 # "Region 1 start" + 256k
CPPFLAGS+= -DPRIMARY_LOAD_ADDRESS="0x${PRIMARY_LOAD_ADDRESS}"
CPPFLAGS+= -DSECONDARY_LOAD_ADDRESS="0x${SECONDARY_LOAD_ADDRESS}"
+CPPFLAGS+= -D_STANDALONE
+
XCPPFLAGS+= -DHEAP_LIMIT="0x${HEAP_LIMIT}"
.if !defined(ELF_TOOLCHAIN)
diff --git a/sys/arch/alpha/stand/boot/boot.c b/sys/arch/alpha/stand/boot/boot.c
index 47154c401ec..315d862d10e 100644
--- a/sys/arch/alpha/stand/boot/boot.c
+++ b/sys/arch/alpha/stand/boot/boot.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: boot.c,v 1.11 1998/03/05 23:08:17 deraadt Exp $ */
+/* $OpenBSD: boot.c,v 1.12 2000/11/08 16:01:25 art Exp $ */
/* $NetBSD: boot.c,v 1.10 1997/01/18 01:58:33 cgd Exp $ */
/*
@@ -48,6 +48,7 @@
#include <machine/rpb.h>
#include <machine/prom.h>
+#include <machine/autoconf.h>
#define _KERNEL
#include "include/pte.h"
@@ -59,7 +60,11 @@ char boot_flags[128];
extern char bootprog_name[], bootprog_rev[], bootprog_date[], bootprog_maker[];
-vm_offset_t ffp_save, ptbr_save, esym;
+struct bootinfo_v1 bootinfo_v1;
+
+extern paddr_t ffp_save, ptbr_save;
+
+extern vaddr_t ssym, esym;
int debug;
@@ -107,11 +112,31 @@ main()
win = (loadfile(name = *namep, &entry) == 0);
printf("\n");
- if (win) {
- (void)printf("Entering %s at 0x%lx...\n", name, entry);
- (*(void (*)())entry)(ffp_save, ptbr_save, esym);
- }
-
+ if (!win)
+ goto fail;
+
+ /*
+ * Fill in the bootinfo for the kernel.
+ */
+ bzero(&bootinfo_v1, sizeof(bootinfo_v1));
+ bootinfo_v1.ssym = ssym;
+ bootinfo_v1.esym = esym;
+ bcopy(name, bootinfo_v1.booted_kernel,
+ sizeof(bootinfo_v1.booted_kernel));
+ bcopy(boot_flags, bootinfo_v1.boot_flags,
+ sizeof(bootinfo_v1.boot_flags));
+ bootinfo_v1.hwrpb = (void *)HWRPB_ADDR;
+ bootinfo_v1.hwrpbsize = ((struct rpb *)HWRPB_ADDR)->rpb_size;
+ bootinfo_v1.cngetc = NULL;
+ bootinfo_v1.cnputc = NULL;
+ bootinfo_v1.cnpollc = NULL;
+
+ (void)printf("Entering %s at 0x%lx...\n", name, entry);
+ (*(void (*)(u_int64_t, u_int64_t, u_int64_t, void *, u_int64_t,
+ u_int64_t))entry)(ffp_save, ptbr_save, BOOTINFO_MAGIC,
+ &bootinfo_v1, 1, 0);
+
+fail:
(void)printf("Boot failed! Halting...\n");
halt();
}
diff --git a/sys/arch/alpha/stand/loadfile.c b/sys/arch/alpha/stand/loadfile.c
index 261148f9f45..4d8bb2656d3 100644
--- a/sys/arch/alpha/stand/loadfile.c
+++ b/sys/arch/alpha/stand/loadfile.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: loadfile.c,v 1.7 1998/09/04 17:03:24 millert Exp $ */
+/* $OpenBSD: loadfile.c,v 1.8 2000/11/08 16:01:24 art Exp $ */
/* $NetBSD: loadfile.c,v 1.3 1997/04/06 08:40:59 cgd Exp $ */
/*
@@ -65,7 +65,8 @@ static int elf_exec __P((int, Elf_Ehdr *, u_int64_t *));
#endif
int loadfile __P((char *, u_int64_t *));
-vm_offset_t ffp_save, ptbr_save, esym;
+paddr_t ffp_save, ptbr_save;
+vaddr_t ssym, esym;
/*
* Open 'filename', read in program and return the entry point or -1 if error.
@@ -218,6 +219,7 @@ coff_exec(fd, coff, entryp)
ffp_save += symhdr.estrMax;
printf("+%d]", symhdr.estrMax);
esym = ((ffp_save + sizeof(int) - 1) & ~(sizeof(int) - 1));
+ ssym = (vaddr_t)symtab;
}
ffp_save = ALPHA_K0SEG_TO_PHYS((ffp_save + PGOFSET & ~PGOFSET)) >>
diff --git a/sys/arch/alpha/tc/tcasic.c b/sys/arch/alpha/tc/tcasic.c
index e917edbcca3..90106977613 100644
--- a/sys/arch/alpha/tc/tcasic.c
+++ b/sys/arch/alpha/tc/tcasic.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: tcasic.c,v 1.7 1997/01/24 19:58:21 niklas Exp $ */
+/* $OpenBSD: tcasic.c,v 1.8 2000/11/08 16:01:26 art Exp $ */
/* $NetBSD: tcasic.c,v 1.14 1996/12/05 01:39:45 cgd Exp $ */
/*
@@ -71,10 +71,10 @@ tcasicmatch(parent, cfdata, aux)
#endif
void *aux;
{
- struct confargs *ca = aux;
+ struct mainbus_attach_args *ma = aux;
/* Make sure that we're looking for a TurboChannel ASIC. */
- if (strcmp(ca->ca_name, tcasic_cd.cd_name))
+ if (strcmp(ma->ma_name, tcasic_cd.cd_name))
return (0);
/* Make sure that the system supports a TurboChannel ASIC. */
diff --git a/sys/arch/alpha/tc/tcds_dma.c b/sys/arch/alpha/tc/tcds_dma.c
index 8e26331ba34..9453aeab1d3 100644
--- a/sys/arch/alpha/tc/tcds_dma.c
+++ b/sys/arch/alpha/tc/tcds_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: tcds_dma.c,v 1.6 2000/07/05 21:50:39 ericj Exp $ */
+/* $OpenBSD: tcds_dma.c,v 1.7 2000/11/08 16:01:26 art Exp $ */
/* $NetBSD: tcds_dma.c,v 1.15 1996/12/04 22:35:08 mycroft Exp $ */
/*
@@ -236,7 +236,7 @@ tcds_dma_setup(sc, addr, len, datain, dmasize)
/* Load address, set/clear unaligned transfer and read/write bits. */
/* XXX PICK AN ADDRESS TYPE, AND STICK TO IT! */
if ((u_long)*addr > VM_MIN_KERNEL_ADDRESS) {
- *sc->sc_sda = vatopa((u_long)*addr) >> 2;
+ *sc->sc_sda = vtophys((u_long)*addr) >> 2;
} else {
*sc->sc_sda = ALPHA_K0SEG_TO_PHYS((u_long)*addr) >> 2;
}