summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsmurph <smurph@openbsd.org>1999-09-27 19:13:20 +0000
committersmurph <smurph@openbsd.org>1999-09-27 19:13:20 +0000
commit22c71c78cb103b48b4495df706c774f7b1cdc7b0 (patch)
tree3fd8c424ce9656d362b96e88d322cd44ad06b1d7
parentadding support for MVME188 and MVME197. Plus 32bit if_ie. (diff)
downloadwireguard-openbsd-22c71c78cb103b48b4495df706c774f7b1cdc7b0.tar.xz
wireguard-openbsd-22c71c78cb103b48b4495df706c774f7b1cdc7b0.zip
Added to support MVME188 and MVME197
-rw-r--r--sys/arch/mvme88k/mvme88k/autoconf.c702
-rw-r--r--sys/arch/mvme88k/mvme88k/cmmu.c1645
-rw-r--r--sys/arch/mvme88k/mvme88k/conf.c142
-rw-r--r--sys/arch/mvme88k/mvme88k/disksubr.c37
-rw-r--r--sys/arch/mvme88k/mvme88k/eh.S3295
-rw-r--r--sys/arch/mvme88k/mvme88k/genassym.c17
-rw-r--r--sys/arch/mvme88k/mvme88k/locore.S307
-rw-r--r--sys/arch/mvme88k/mvme88k/locore_asm_routines.S1156
-rw-r--r--sys/arch/mvme88k/mvme88k/locore_c_routines.c766
-rw-r--r--sys/arch/mvme88k/mvme88k/m18x_cmmu.c2283
-rw-r--r--sys/arch/mvme88k/mvme88k/m197_cmmu.c809
-rw-r--r--sys/arch/mvme88k/mvme88k/m88100_fp.S8
-rw-r--r--sys/arch/mvme88k/mvme88k/m88110_fp.S266
-rw-r--r--sys/arch/mvme88k/mvme88k/m88110_mmu.S159
-rw-r--r--sys/arch/mvme88k/mvme88k/machdep.c2918
-rw-r--r--sys/arch/mvme88k/mvme88k/pmap.c5709
-rw-r--r--sys/arch/mvme88k/mvme88k/pmap_table.c118
-rw-r--r--sys/arch/mvme88k/mvme88k/process_machdep.c17
-rw-r--r--sys/arch/mvme88k/mvme88k/trap.c2413
-rw-r--r--sys/arch/mvme88k/mvme88k/vm_machdep.c32
20 files changed, 14588 insertions, 8211 deletions
diff --git a/sys/arch/mvme88k/mvme88k/autoconf.c b/sys/arch/mvme88k/mvme88k/autoconf.c
index df5a0ba3e52..41d99f42166 100644
--- a/sys/arch/mvme88k/mvme88k/autoconf.c
+++ b/sys/arch/mvme88k/mvme88k/autoconf.c
@@ -29,7 +29,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Id: autoconf.c,v 1.5 1999/05/29 04:41:45 smurph Exp $
+ * $Id: autoconf.c,v 1.6 1999/09/27 19:13:20 smurph Exp $
*/
#include <sys/param.h>
#include <sys/systm.h>
@@ -42,6 +42,7 @@
#include <sys/device.h>
#include <sys/disklabel.h>
#include <machine/vmparam.h>
+#include <machine/asm_macro.h> /* enable/disable interrupts */
#include <machine/cpu.h>
#include <machine/autoconf.h>
#include <machine/disklabel.h>
@@ -53,19 +54,19 @@
* the machine.
*/
-struct device *parsedisk __P((char *, int, int, dev_t *));
-void setroot __P((void));
-void swapconf __P((void));
-void configure __P((void));
+struct device *parsedisk __P((char *, int, int, dev_t *));
+void setroot __P((void));
+void swapconf __P((void));
+void configure __P((void));
char buginchr __P((void));
-int getsb __P((char *, int));
+int getsb __P((char *, int));
-int cold; /* 1 if still booting */
+int cold; /* 1 if still booting */
#include <sys/kernel.h>
/* XXX must be allocated statically because of early console init */
-struct map extiomap[EIOMAPSIZE/16];
-extern void *extiobase;
+struct map extiomap[EIOMAPSIZE/16];
+extern void *extiobase;
/*
* called at boot time, configure all devices on the system.
@@ -73,47 +74,47 @@ extern void *extiobase;
void
configure()
{
- bootdv = NULL; /* set by device drivers (if found) */
- /*rminit(extiomap, (long)EIOMAPSIZE, (long)1, "extio", EIOMAPSIZE/16);*/
-
- if (config_rootfound("mainbus", "mainbus") == 0)
- panic("no mainbus found");
-
- /*
- * Turn external interrupts on. We have all the drivers in
- * place now!
- */
- enable_interrupt();
- spl0();
- setroot();
- swapconf();
- /*
- * Done with autoconfig!
- */
- cold = 0;
+ bootdv = NULL; /* set by device drivers (if found) */
+/* rminit(extiomap, (long)EIOMAPSIZE, (long)1, "extio", EIOMAPSIZE/16);*/
+
+ if (config_rootfound("mainbus", "mainbus") == 0)
+ panic("no mainbus found");
+
+ /*
+ * Turn external interrupts on. We have all the drivers in
+ * place now!
+ */
+ enable_interrupt();
+ spl0();
+ setroot();
+ swapconf();
+ /*
+ * Done with autoconfig!
+ */
+ cold = 0;
}
/*ARGSUSED*/
int
simple_devprint(auxp, pnp)
- void *auxp;
- char *pnp;
+void *auxp;
+char *pnp;
{
- return(QUIET);
+ return (QUIET);
}
int
matchname(fp, sp)
- char *fp, *sp;
+char *fp, *sp;
{
- int len;
-
- len = strlen(fp);
- if (strlen(sp) != len)
- return(0);
- if (bcmp(fp, sp, len) == 0)
- return(1);
- return(0);
+ int len;
+
+ len = strlen(fp);
+ if (strlen(sp) != len)
+ return (0);
+ if (bcmp(fp, sp, len) == 0)
+ return (1);
+ return (0);
}
/*
@@ -122,18 +123,18 @@ matchname(fp, sp)
void
swapconf()
{
- register struct swdevt *swp;
- register int nblks;
- for (swp = swdevt; swp->sw_dev != NODEV; swp++){
- if (bdevsw[major(swp->sw_dev)].d_psize) {
- nblks =
- (*bdevsw[major(swp->sw_dev)].d_psize)(swp->sw_dev);
- if (nblks != -1 &&
- (swp->sw_nblks == 0 || swp->sw_nblks > nblks))
- swp->sw_nblks = nblks;
- }
- }
- dumpconf();
+ register struct swdevt *swp;
+ register int nblks;
+
+ for (swp = swdevt; swp->sw_dev != NODEV; swp++)
+ if (bdevsw[major(swp->sw_dev)].d_psize) {
+ nblks =
+ (*bdevsw[major(swp->sw_dev)].d_psize)(swp->sw_dev);
+ if (nblks != -1 &&
+ (swp->sw_nblks == 0 || swp->sw_nblks > nblks))
+ swp->sw_nblks = nblks;
+ }
+ dumpconf();
}
/*
@@ -142,93 +143,93 @@ swapconf()
*/
struct nam2blk {
- char *name;
- int maj;
+ char *name;
+ int maj;
} nam2blk[] = {
- { "sd", 4 },
- { "st", 5 },
- { "rd", 7 },
+ { "sd", 4},
+ { "st", 5},
+ { "rd", 7},
};
static int
findblkmajor(dv)
- struct device *dv;
+struct device *dv;
{
- char *name = dv->dv_xname;
- register int i;
+ char *name = dv->dv_xname;
+ register int i;
- for (i = 0; i < sizeof(nam2blk)/sizeof(nam2blk[0]); ++i)
- if (strncmp(name, nam2blk[i].name, strlen(nam2blk[0].name)) == 0)
- return (nam2blk[i].maj);
- return (-1);
+ for (i = 0; i < sizeof(nam2blk)/sizeof(nam2blk[0]); ++i)
+ if (strncmp(name, nam2blk[i].name, strlen(nam2blk[0].name)) == 0)
+ return (nam2blk[i].maj);
+ return (-1);
}
static struct device *
getdisk(str, len, defpart, devp)
- char *str;
- int len, defpart;
- dev_t *devp;
+char *str;
+int len, defpart;
+dev_t *devp;
{
- register struct device *dv;
-
- if ((dv = parsedisk(str, len, defpart, devp)) == NULL) {
- printf("use one of:");
- for (dv = alldevs.tqh_first; dv != NULL;
- dv = dv->dv_list.tqe_next) {
- if (dv->dv_class == DV_DISK)
- printf(" %s[a-h]", dv->dv_xname);
+ register struct device *dv;
+
+ if ((dv = parsedisk(str, len, defpart, devp)) == NULL) {
+ printf("use one of:");
+ for (dv = alldevs.tqh_first; dv != NULL;
+ dv = dv->dv_list.tqe_next) {
+ if (dv->dv_class == DV_DISK)
+ printf(" %s[a-h]", dv->dv_xname);
#ifdef NFSCLIENT
- if (dv->dv_class == DV_IFNET)
- printf(" %s", dv->dv_xname);
+ if (dv->dv_class == DV_IFNET)
+ printf(" %s", dv->dv_xname);
#endif
- }
- printf("\n");
- }
- return (dv);
+ }
+ printf("\n");
+ }
+ return (dv);
}
struct device *
parsedisk(str, len, defpart, devp)
- char *str;
- int len, defpart;
- dev_t *devp;
+char *str;
+int len, defpart;
+dev_t *devp;
{
- register struct device *dv;
- register char *cp, c;
- int majdev, mindev, part;
-
- if (len == 0)
- return (NULL);
- cp = str + len - 1;
- c = *cp;
- if (c >= 'a' && c <= 'h') {
- part = c - 'a';
- *cp = '\0';
- } else
- part = defpart;
-
- for (dv = alldevs.tqh_first; dv != NULL; dv = dv->dv_list.tqe_next) {
- if (dv->dv_class == DV_DISK &&
- strcmp(str, dv->dv_xname) == 0) {
- majdev = findblkmajor(dv);
- if (majdev < 0)
- panic("parsedisk");
- mindev = (dv->dv_unit << PARTITIONSHIFT) + part;
- *devp = makedev(majdev, mindev);
- break;
- }
+ register struct device *dv;
+ register char *cp, c;
+ int majdev, mindev, part;
+
+ if (len == 0)
+ return (NULL);
+ cp = str + len - 1;
+ c = *cp;
+ if (c >= 'a' && c <= 'h') {
+ part = c - 'a';
+ *cp = '\0';
+ } else
+ part = defpart;
+
+ for (dv = alldevs.tqh_first; dv != NULL; dv = dv->dv_list.tqe_next) {
+ if (dv->dv_class == DV_DISK &&
+ strcmp(str, dv->dv_xname) == 0) {
+ majdev = findblkmajor(dv);
+ if (majdev < 0)
+ panic("parsedisk");
+ mindev = (dv->dv_unit << PARTITIONSHIFT) + part;
+ *devp = makedev(majdev, mindev);
+ break;
+ }
#ifdef NFSCLIENT
- if (dv->dv_class == DV_IFNET &&
- strcmp(str, dv->dv_xname) == 0) {
- *devp = NODEV;
- break;
- }
+ if (dv->dv_class == DV_IFNET &&
+ strcmp(str, dv->dv_xname) == 0) {
+ *devp = NODEV;
+ break;
+ }
#endif
- }
+ }
- *cp = c;
- return (dv);
+ *cp = c;
+ return (dv);
}
/*
@@ -243,176 +244,176 @@ parsedisk(str, len, defpart, devp)
void
setroot()
{
- register struct swdevt *swp;
- register struct device *dv;
- register int len, majdev, mindev;
- dev_t nrootdev, nswapdev = NODEV;
- char buf[128];
- dev_t temp;
+ register struct swdevt *swp;
+ register struct device *dv;
+ register int len, majdev, mindev;
+ dev_t nrootdev, nswapdev = NODEV;
+ char buf[128];
+ dev_t temp;
#if defined(NFSCLIENT)
- extern char *nfsbootdevname;
+ extern char *nfsbootdevname;
#endif
- printf("boot device: %s\n",
- (bootdv) ? bootdv->dv_xname : "<unknown>");
-
- if (boothowto & RB_ASKNAME) {
- for (;;) {
- printf("root device");
- if (bootdv != NULL)
- printf("(default %s%c)",
- bootdv->dv_xname,
- bootdv->dv_class == DV_DISK
- ? 'a' : ' ');
- printf(": ");
- len = getsb(buf, sizeof(buf));
- if (len == 0 && bootdv != NULL) {
- strcpy(buf, bootdv->dv_xname);
- len = strlen(buf);
- }
- if (len > 0 && buf[len - 1] == '*') {
- buf[--len] = '\0';
- dv = getdisk(buf, len, 1, &nrootdev);
- if (dv != NULL) {
- bootdv = dv;
- nswapdev = nrootdev;
- goto gotswap;
- }
- }
- dv = getdisk(buf, len, 0, &nrootdev);
- if (dv != NULL) {
- bootdv = dv;
- break;
- }
- }
-
- /*
- * because swap must be on same device as root, for
- * network devices this is easy.
- */
- if (bootdv->dv_class == DV_IFNET) {
- goto gotswap;
- }
- for (;;) {
- printf("swap device ");
- if (bootdv != NULL)
- printf("(default %s%c)",
- bootdv->dv_xname,
- bootdv->dv_class == DV_DISK?'b':' ');
- printf(": ");
- len = getsb(buf, sizeof(buf));
- if (len == 0 && bootdv != NULL) {
- switch (bootdv->dv_class) {
- case DV_IFNET:
- nswapdev = NODEV;
- break;
- case DV_DISK:
- nswapdev = makedev(major(nrootdev),
- (minor(nrootdev) & ~ PARTITIONMASK) | 1);
- break;
- case DV_TAPE:
- case DV_TTY:
- case DV_DULL:
- case DV_CPU:
- break;
- }
- break;
- }
- dv = getdisk(buf, len, 1, &nswapdev);
- if (dv) {
- if (dv->dv_class == DV_IFNET)
- nswapdev = NODEV;
- break;
- }
- }
-gotswap:
- rootdev = nrootdev;
- dumpdev = nswapdev;
- swdevt[0].sw_dev = nswapdev;
- swdevt[1].sw_dev = NODEV;
-
- } else if (mountroot == NULL) {
-
- /*
- * `swap generic': Use the device the ROM told us to use.
- */
- if (bootdv == NULL)
- panic("boot device not known");
-
- majdev = findblkmajor(bootdv);
- if (majdev >= 0) {
- /*
- * Root and swap are on a disk.
- * val[2] of the boot device is the partition number.
- * Assume swap is on partition b.
- */
- int part = bootpart;
- mindev = (bootdv->dv_unit << PARTITIONSHIFT) + part;
- rootdev = makedev(majdev, mindev);
- nswapdev = dumpdev = makedev(major(rootdev),
- (minor(rootdev) & ~ PARTITIONMASK) | 1);
- } else {
- /*
- * Root and swap are on a net.
- */
- nswapdev = dumpdev = NODEV;
- }
- swdevt[0].sw_dev = nswapdev;
- swdevt[1].sw_dev = NODEV;
-
- } else {
-
- /*
- * `root DEV swap DEV': honour rootdev/swdevt.
- * rootdev/swdevt/mountroot already properly set.
- */
- return;
- }
-
- switch (bootdv->dv_class) {
+ printf("boot device: %s\n",
+ (bootdv) ? bootdv->dv_xname : "<unknown>");
+
+ if (boothowto & RB_ASKNAME) {
+ for (;;) {
+ printf("root device");
+ if (bootdv != NULL)
+ printf("(default %s%c)",
+ bootdv->dv_xname,
+ bootdv->dv_class == DV_DISK
+ ? 'a' : ' ');
+ printf(": ");
+ len = getsb(buf, sizeof(buf));
+ if (len == 0 && bootdv != NULL) {
+ strcpy(buf, bootdv->dv_xname);
+ len = strlen(buf);
+ }
+ if (len > 0 && buf[len - 1] == '*') {
+ buf[--len] = '\0';
+ dv = getdisk(buf, len, 1, &nrootdev);
+ if (dv != NULL) {
+ bootdv = dv;
+ nswapdev = nrootdev;
+ goto gotswap;
+ }
+ }
+ dv = getdisk(buf, len, 0, &nrootdev);
+ if (dv != NULL) {
+ bootdv = dv;
+ break;
+ }
+ }
+
+ /*
+ * because swap must be on same device as root, for
+ * network devices this is easy.
+ */
+ if (bootdv->dv_class == DV_IFNET) {
+ goto gotswap;
+ }
+ for (;;) {
+ printf("swap device ");
+ if (bootdv != NULL)
+ printf("(default %s%c)",
+ bootdv->dv_xname,
+ bootdv->dv_class == DV_DISK?'b':' ');
+ printf(": ");
+ len = getsb(buf, sizeof(buf));
+ if (len == 0 && bootdv != NULL) {
+ switch (bootdv->dv_class) {
+ case DV_IFNET:
+ nswapdev = NODEV;
+ break;
+ case DV_DISK:
+ nswapdev = makedev(major(nrootdev),
+ (minor(nrootdev) & ~ PARTITIONMASK) | 1);
+ break;
+ case DV_TAPE:
+ case DV_TTY:
+ case DV_DULL:
+ case DV_CPU:
+ break;
+ }
+ break;
+ }
+ dv = getdisk(buf, len, 1, &nswapdev);
+ if (dv) {
+ if (dv->dv_class == DV_IFNET)
+ nswapdev = NODEV;
+ break;
+ }
+ }
+ gotswap:
+ rootdev = nrootdev;
+ dumpdev = nswapdev;
+ swdevt[0].sw_dev = nswapdev;
+ swdevt[1].sw_dev = NODEV;
+
+ } else if (mountroot == NULL) {
+
+ /*
+ * `swap generic': Use the device the ROM told us to use.
+ */
+ if (bootdv == NULL)
+ panic("boot device not known");
+
+ majdev = findblkmajor(bootdv);
+ if (majdev >= 0) {
+ /*
+ * Root and swap are on a disk.
+ * val[2] of the boot device is the partition number.
+ * Assume swap is on partition b.
+ */
+ int part = bootpart;
+ mindev = (bootdv->dv_unit << PARTITIONSHIFT) + part;
+ rootdev = makedev(majdev, mindev);
+ nswapdev = dumpdev = makedev(major(rootdev),
+ (minor(rootdev) & ~ PARTITIONMASK) | 1);
+ } else {
+ /*
+ * Root and swap are on a net.
+ */
+ nswapdev = dumpdev = NODEV;
+ }
+ swdevt[0].sw_dev = nswapdev;
+ swdevt[1].sw_dev = NODEV;
+
+ } else {
+
+ /*
+ * `root DEV swap DEV': honour rootdev/swdevt.
+ * rootdev/swdevt/mountroot already properly set.
+ */
+ return;
+ }
+
+ switch (bootdv->dv_class) {
#if defined(NFSCLIENT)
- case DV_IFNET:
- mountroot = nfs_mountroot;
- nfsbootdevname = bootdv->dv_xname;
- return;
+ case DV_IFNET:
+ mountroot = nfs_mountroot;
+ nfsbootdevname = bootdv->dv_xname;
+ return;
#endif
#if defined(FFS)
- case DV_DISK:
- mountroot = dk_mountroot;
- majdev = major(rootdev);
- mindev = minor(rootdev);
- printf("root on %s%c\n", bootdv->dv_xname,
- (mindev & PARTITIONMASK) + 'a');
- break;
+ case DV_DISK:
+ mountroot = dk_mountroot;
+ majdev = major(rootdev);
+ mindev = minor(rootdev);
+ printf("root on %s%c\n", bootdv->dv_xname,
+ (mindev & PARTITIONMASK) + 'a');
+ break;
#endif
- default:
- printf("can't figure root, hope your kernel is right\n");
- return;
- }
-
- /*
- * XXX: What is this doing?
- */
- mindev &= ~PARTITIONMASK;
- temp = NODEV;
- for (swp = swdevt; swp->sw_dev != NODEV; swp++) {
- if (majdev == major(swp->sw_dev) &&
- mindev == (minor(swp->sw_dev) & ~PARTITIONMASK)) {
- temp = swdevt[0].sw_dev;
- swdevt[0].sw_dev = swp->sw_dev;
- swp->sw_dev = temp;
- break;
- }
- }
- if (swp->sw_dev == NODEV)
- return;
-
- /*
- * If dumpdev was the same as the old primary swap device, move
- * it to the new primary swap device.
- */
- if (temp == dumpdev)
- dumpdev = swdevt[0].sw_dev;
+ default:
+ printf("can't figure root, hope your kernel is right\n");
+ return;
+ }
+
+ /*
+ * XXX: What is this doing?
+ */
+ mindev &= ~PARTITIONMASK;
+ temp = NODEV;
+ for (swp = swdevt; swp->sw_dev != NODEV; swp++) {
+ if (majdev == major(swp->sw_dev) &&
+ mindev == (minor(swp->sw_dev) & ~PARTITIONMASK)) {
+ temp = swdevt[0].sw_dev;
+ swdevt[0].sw_dev = swp->sw_dev;
+ swp->sw_dev = temp;
+ break;
+ }
+ }
+ if (swp->sw_dev == NODEV)
+ return;
+
+ /*
+ * If dumpdev was the same as the old primary swap device, move
+ * it to the new primary swap device.
+ */
+ if (temp == dumpdev)
+ dumpdev = swdevt[0].sw_dev;
}
/*
@@ -420,74 +421,91 @@ gotswap:
*/
struct device *
getdevunit(name, unit)
- char *name;
- int unit;
+char *name;
+int unit;
{
- struct device *dev = alldevs.tqh_first;
- char num[10], fullname[16];
- int lunit;
-
- /* compute length of name and decimal expansion of unit number */
- sprintf(num, "%d", unit);
- lunit = strlen(num);
- if (strlen(name) + lunit >= sizeof(fullname) - 1)
- panic("config_attach: device name too long");
-
- strcpy(fullname, name);
- strcat(fullname, num);
-
- while (strcmp(dev->dv_xname, fullname) != 0) {
- if ((dev = dev->dv_list.tqe_next) == NULL)
- return NULL;
- }
- return dev;
+ struct device *dev = alldevs.tqh_first;
+ char num[10], fullname[16];
+ int lunit;
+
+ /* compute length of name and decimal expansion of unit number */
+ sprintf(num, "%d", unit);
+ lunit = strlen(num);
+ if (strlen(name) + lunit >= sizeof(fullname) - 1)
+ panic("config_attach: device name too long");
+
+ strcpy(fullname, name);
+ strcat(fullname, num);
+
+ while (strcmp(dev->dv_xname, fullname) != 0) {
+ if ((dev = dev->dv_list.tqe_next) == NULL)
+ return NULL;
+ }
+ return dev;
}
int
getsb(cp, size)
- char *cp;
- int size;
+char *cp;
+int size;
{
- register char *lp;
- register int len;
- register int c;
-
- lp = cp;
- len = 0;
- for (;;) {
- c = buginchr();
-
- switch (c) {
- case '\n':
- case '\r':
- printf("\n");
- *lp++ = '\0';
- return (len);
- case '\b':
- case '\177':
- if (len) {
- printf("\b \b");
- --lp;
- --len;
- }
- break;
- case 'u' & 037:
- while (len) {
- printf("\b \b");
- --lp;
- --len;
- }
- break;
- case '\t':
- c = ' ';
- default:
- if (len + 1 >= size || c < ' ') {
- printf("\007");
- break;
- }
- printf("%c", c);
- ++len;
- *lp++ = c;
- }
- }
+ register char *lp;
+ register int len;
+ register int c;
+
+ lp = cp;
+ len = 0;
+ for (;;) {
+ c = buginchr();
+
+ switch (c) {
+ case '\n':
+ case '\r':
+ printf("\n");
+ *lp++ = '\0';
+ return (len);
+ case '\b':
+ case '\177':
+ if (len) {
+ printf("\b \b");
+ --lp;
+ --len;
+ }
+ break;
+ case 'u' & 037:
+ while (len) {
+ printf("\b \b");
+ --lp;
+ --len;
+ }
+ break;
+ case '\t':
+ c = ' ';
+ default:
+ if (len + 1 >= size || c < ' ') {
+ printf("\007");
+ break;
+ }
+ printf("%c", c);
+ ++len;
+ *lp++ = c;
+ }
+ }
}
+
+/*
+ * Slave CPU pre-main routine.
+ * Determine CPU number and set it.
+ *
+ * Running on an interrupt stack here; do nothing fancy.
+ *
+ * Called from "mvme88k/locore.S"
+ */
+void slave_pre_main(void)
+{
+ set_cpu_number(cmmu_cpu_number()); /* Determine cpu number by CMMU */
+ splhigh();
+ enable_interrupt();
+}
+
+
diff --git a/sys/arch/mvme88k/mvme88k/cmmu.c b/sys/arch/mvme88k/mvme88k/cmmu.c
index 33061b2d668..63f2eb8e78a 100644
--- a/sys/arch/mvme88k/mvme88k/cmmu.c
+++ b/sys/arch/mvme88k/mvme88k/cmmu.c
@@ -28,7 +28,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Id: cmmu.c,v 1.4 1998/12/15 05:11:01 smurph Exp $
+ * $Id: cmmu.c,v 1.5 1999/09/27 19:13:21 smurph Exp $
*/
/*
* Mach Operating System
@@ -59,299 +59,205 @@
#include <sys/param.h>
#include <sys/types.h>
+#include <sys/simplelock.h>
#include <machine/board.h>
#include <machine/cpus.h>
+#include <machine/cpu_number.h>
+#include <machine/cmmu.h>
+#if defined(MVME187) || defined(MVME188)
#include <machine/m882xx.h>
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+#include <machine/m88110.h>
+#endif /* MVME197 */
+
+/*
+ * This lock protects the cmmu SAR and SCR's; other ports
+ * can be accessed without locking it
+ *
+ * May be used from "db_interface.c".
+ */
+struct simplelock cmmu_cpu_lock;
-/* On some versions of 88200, page size flushes don't work. I am using
- * sledge hammer approach till I find for sure which ones are bad XXX nivas */
-#define BROKEN_MMU_MASK
-#define CMMU_DEBUG 1
+#define CMMU_LOCK simple_lock(&cmmu_cpu_lock)
+#define CMMU_UNLOCK simple_unlock(&cmmu_cpu_lock)
-#if defined(MVME187)
-#undef SNOOP_ENABLE
-#else
-#define SNOOP_ENABLE
-#endif /* defined(MVME187)
+unsigned cache_policy = /*CACHE_INH*/ 0;
+unsigned cpu_sets[MAX_CPUS];
+unsigned number_cpus = 0;
+unsigned master_cpu = 0;
+int vme188_config;
+int max_cpus, max_cmmus;
+int cpu_cmmu_ratio;
-#undef SHADOW_BATC /* don't use BATCs for now XXX nivas */
+void
+show_apr(unsigned value)
+{
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_show_apr(value);
+ break;
+#endif
+#ifdef MVME197
+ case CPU_197:
+ m197_show_apr(value);
+ break;
+#endif
+ }
+}
-struct cmmu_regs
+void
+show_sctr(unsigned value)
{
- /* base + $000 */ volatile unsigned idr;
- /* base + $004 */ volatile unsigned scr;
- /* base + $008 */ volatile unsigned ssr;
- /* base + $00C */ volatile unsigned sar;
- /* */ unsigned padding1[0x3D];
- /* base + $104 */ volatile unsigned sctr;
- /* base + $108 */ volatile unsigned pfSTATUSr;
- /* base + $10C */ volatile unsigned pfADDRr;
- /* */ unsigned padding2[0x3C];
- /* base + $200 */ volatile unsigned sapr;
- /* base + $204 */ volatile unsigned uapr;
- /* */ unsigned padding3[0x7E];
- /* base + $400 */ volatile unsigned bwp[8];
- /* */ unsigned padding4[0xF8];
- /* base + $800 */ volatile unsigned cdp[4];
- /* */ unsigned padding5[0x0C];
- /* base + $840 */ volatile unsigned ctp[4];
- /* */ unsigned padding6[0x0C];
- /* base + $880 */ volatile unsigned cssp;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_show_sctr(value);
+ break;
+#endif
+#ifdef MVME197
+ case CPU_197:
+ m197_show_sctr(value);
+ break;
+#endif
+ }
+}
- /* The rest for the 88204 */
- #define cssp0 cssp
- /* */ unsigned padding7[0x03];
- /* base + $890 */ volatile unsigned cssp1;
- /* */ unsigned padding8[0x03];
- /* base + $8A0 */ volatile unsigned cssp2;
- /* */ unsigned padding9[0x03];
- /* base + $8B0 */ volatile unsigned cssp3;
-};
+void
+setup_board_config(void)
+{
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_setup_board_config();
+ break;
+#endif
+#ifdef MVME197
+ case CPU_197:
+ m197_setup_board_config();
+ break;
+#endif
+ }
+}
-static struct cmmu {
- struct cmmu_regs *cmmu_regs; /* CMMU "base" area */
- unsigned char cmmu_cpu; /* cpu number it is attached to */
- unsigned char which; /* either INST_CMMU || DATA_CMMU */
- unsigned char cmmu_alive;
-#define CMMU_DEAD 0 /* This cmmu not there */
-#define CMMU_AVAILABLE 1 /* It's there, but which cpu's? */
-#define CMMU_MARRIED 2 /* Know which cpu it belongs to. */
-#if SHADOW_BATC
- unsigned batc[8];
+void
+setup_cmmu_config(void)
+{
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_setup_cmmu_config();
+ break;
#endif
- unsigned char pad;
-} cmmu[MAX_CMMUS] = {
- {(void *)CMMU_I, 0, 0, 0, 0},
- {(void *)CMMU_D, 0, 1, 0, 0},
-};
+#ifdef MVME197
+ case CPU_197:
+ m197_setup_cmmu_config();
+ break;
+#endif
+ }
+ return;
+}
-/*
- * We rely upon and use INST_CMMU == 0 and DATA_CMMU == 1
- */
-#if INST_CMMU != 0 || DATA_CMMU != 1
- error("ack gag barf!");
+void
+cmmu_dump_config(void)
+{
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_dump_config();
+ break;
#endif
-struct cpu_cmmu {
- struct cmmu *pair[2];
-} cpu_cmmu[1];
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_dump_config();
+ break;
+#endif
+ }
+ return;
+}
+#ifdef DDB
/*
- * CMMU(cpu,data) Is the cmmu struct for the named cpu's indicated cmmu.
- * REGS(cpu,data) is the actual register structure.
+ * Used by DDB for cache probe functions
*/
-#define CMMU(cpu, data) cpu_cmmu[(cpu)].pair[(data)?DATA_CMMU:INST_CMMU]
-#define REGS(cpu, data) (*CMMU(cpu, data)->cmmu_regs)
-
-unsigned cache_policy = /*CACHE_INH*/ 0;
-
-#ifdef CMMU_DEBUG
-void
-show_apr(unsigned value)
+unsigned
+cmmu_get_by_mode(int cpu, int mode)
{
- union apr_template apr_template;
- apr_template.bits = value;
-
- printf("table @ 0x%x000", apr_template.field.st_base);
- if (apr_template.field.wt) printf(", writethrough");
- if (apr_template.field.g) printf(", global");
- if (apr_template.field.ci) printf(", cache inhibit");
- if (apr_template.field.te) printf(", valid");
- else printf(", not valid");
- printf("\n");
+ unsigned retval;
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ retval = m18x_cmmu_get_by_mode(cpu, mode);
+ break;
+#endif
+#ifdef MVME197
+ case CPU_197:
+ retval = m197_cmmu_get_by_mode(cpu, mode);
+ break;
+#endif
+ }
+ CMMU_UNLOCK;
+ return retval;
}
+#endif
-void
-show_sctr(unsigned value)
+/*
+ * Should only be called after the calling cpus knows its cpu
+ * number and master/slave status . Should be called first
+ * by the master, before the slaves are started.
+*/
+void
+cpu_configuration_print(int master)
{
- union {
- unsigned bits;
- struct {
- unsigned :16,
- pe: 1,
- se: 1,
- pr: 1,
- :13;
- } fields;
- } sctr;
- sctr.bits = value;
- printf("%spe, %sse %spr]\n",
- sctr.fields.pe ? "" : "!",
- sctr.fields.se ? "" : "!",
- sctr.fields.pr ? "" : "!");
-}
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cpu_configuration_print(master);
+ break;
#endif
+#ifdef MVME197
+ case CPU_197:
+ m197_cpu_configuration_print(master);
+ break;
+#endif
+ }
+ CMMU_UNLOCK;
+ return;
+}
/*
* CMMU initialization routine
*/
-void
+void
cmmu_init(void)
{
- unsigned tmp, cmmu_num;
- union cpupid id;
- int cpu;
-
- cpu_cmmu[0].pair[INST_CMMU] = cpu_cmmu[0].pair[DATA_CMMU] = 0;
-
- for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++) {
- if (!badwordaddr((vm_offset_t)cmmu[cmmu_num].cmmu_regs)) {
- id.cpupid = cmmu[cmmu_num].cmmu_regs->idr;
- if (id.m88200.type != M88200 && id.m88200.type !=M88204)
- continue;
- cmmu[cmmu_num].cmmu_alive = CMMU_AVAILABLE;
-
- cpu_cmmu[cmmu[cmmu_num].cmmu_cpu].pair[cmmu[cmmu_num].which] =
- &cmmu[cmmu_num];
-
- /*
- * Reset cache data....
- * as per M88200 Manual (2nd Ed.) section 3.11.
- */
- for (tmp = 0; tmp < 255; tmp++) {
- cmmu[cmmu_num].cmmu_regs->sar = tmp << 4;
- cmmu[cmmu_num].cmmu_regs->cssp = 0x3f0ff000;
- }
-
- /* 88204 has additional cache to clear */
- if(id.m88200.type == M88204)
- {
- for (tmp = 0; tmp < 255; tmp++) {
- cmmu[cmmu_num].cmmu_regs->sar =
- tmp<<4;
- cmmu[cmmu_num].cmmu_regs->cssp1 =
- 0x3f0ff000;
- }
- for (tmp = 0; tmp < 255; tmp++) {
- cmmu[cmmu_num].cmmu_regs->sar =
- tmp<<4;
- cmmu[cmmu_num].cmmu_regs->cssp2 =
- 0x3f0ff000;
- }
- for (tmp = 0; tmp < 255; tmp++) {
- cmmu[cmmu_num].cmmu_regs->sar =
- tmp<<4;
- cmmu[cmmu_num].cmmu_regs->cssp3 = 0x3f0ff000;
- }
- }
-
- /*
- * Set the SCTR, SAPR, and UAPR to some known state
- * (I don't trust the reset to do it).
- */
- tmp =
- ! CMMU_SCTR_PE | /* not parity enable */
- ! CMMU_SCTR_SE | /* not snoop enable */
- ! CMMU_SCTR_PR ; /*not priority arbitration */
- cmmu[cmmu_num].cmmu_regs->sctr = tmp;
-
- tmp =
- (0x00000 << 12) |/*segment table base address */
- AREA_D_WT | /* write through */
- AREA_D_G | /* global */
- AREA_D_CI | /* cache inhibit */
- ! AREA_D_TE ; /* not translation enable */
-
- cmmu[cmmu_num].cmmu_regs->sapr =
- cmmu[cmmu_num].cmmu_regs->uapr = tmp;
-
-#if SHADOW_BATC
- cmmu[cmmu_num].batc[0] =
- cmmu[cmmu_num].batc[1] =
- cmmu[cmmu_num].batc[2] =
- cmmu[cmmu_num].batc[3] =
- cmmu[cmmu_num].batc[4] =
- cmmu[cmmu_num].batc[5] =
- cmmu[cmmu_num].batc[6] =
- cmmu[cmmu_num].batc[7] = 0;
-#endif
- cmmu[cmmu_num].cmmu_regs->bwp[0] =
- cmmu[cmmu_num].cmmu_regs->bwp[1] =
- cmmu[cmmu_num].cmmu_regs->bwp[2] =
- cmmu[cmmu_num].cmmu_regs->bwp[3] =
- cmmu[cmmu_num].cmmu_regs->bwp[4] =
- cmmu[cmmu_num].cmmu_regs->bwp[5] =
- cmmu[cmmu_num].cmmu_regs->bwp[6] =
- cmmu[cmmu_num].cmmu_regs->bwp[7] = 0;
-
- cmmu[cmmu_num].cmmu_regs->scr =CMMU_FLUSH_CACHE_INV_ALL;
- cmmu[cmmu_num].cmmu_regs->scr = CMMU_FLUSH_SUPER_ALL;
- cmmu[cmmu_num].cmmu_regs->scr = CMMU_FLUSH_USER_ALL;
- }
- }
-
- /*
- * Now that we know which CMMUs are there, let's report on which
- * CPU/CMMU sets seem complete (hopefully all)
- */
- for (cpu = 0; cpu < MAX_CPUS; cpu++)
- {
- if (cpu_cmmu[cpu].pair[INST_CMMU] && cpu_cmmu[cpu].pair[DATA_CMMU])
- {
- if(id.m88200.type == M88204)
- printf("CPU%d is attached with MC88204 CMMU\n",
- cpu);
- else
- printf("CPU%d is attached with MC88200 CMMU\n",
- cpu);
- }
- else if (cpu_cmmu[cpu].pair[INST_CMMU])
- {
- printf("CPU%d data CMMU is not working.\n", cpu);
- panic("cmmu-data");
- }
- else if (cpu_cmmu[cpu].pair[DATA_CMMU])
- {
- printf("CPU%d instruction CMMU is not working.\n", cpu);
- panic("cmmu");
- }
- }
-
-#if SNOOP_ENABLE
- /*
- * Enable snooping... MVME187 doesn't support snooping. The
- * processor will, but the processor is not going to see the cache
- * accesses going on the 040 local bus. XXX nivas
- */
- for (cpu = 0; cpu < MAX_CPUS; cpu++)
- {
- /*
- * Enable snooping.
- * We enable it for instruction cmmus as well so that we can
- * have breakpoints, etc, and modify code.
- */
- tmp =
- ! CMMU_SCTR_PE | /* not parity enable */
- CMMU_SCTR_SE | /* snoop enable */
- ! CMMU_SCTR_PR ; /* not priority arbitration */
-
- REGS(cpu, DATA_CMMU).sctr = tmp;
- REGS(cpu, INST_CMMU).sctr = tmp;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
- }
-
-#endif /* SNOOP_ENABLE */
-
- /*
- * Turn on some cache.
- */
- for (cpu = 0; cpu < MAX_CPUS; cpu++)
- {
- /*
- * Enable some caching for the instruction stream.
- * Can't cache data yet 'cause device addresses can never
- * be cached, and we don't have those no-caching zones
- * set up yet....
- */
- tmp =
- (0x00000 << 12) | /* segment table base address */
- AREA_D_WT | /* write through */
- AREA_D_G | /* global */
- AREA_D_CI | /* cache inhibit */
- ! AREA_D_TE ; /* not translation enable */
- REGS(cpu, INST_CMMU).sapr = tmp;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
- }
+ /* init the lock */
+ simple_lock_init(&cmmu_cpu_lock);
+
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_init();
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_init();
+ break;
+#endif /* MVME197 */
+ }
+ return;
}
/*
@@ -360,33 +266,77 @@ cmmu_init(void)
void
cmmu_shutdown_now(void)
{
- unsigned tmp;
- unsigned cmmu_num;
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_shutdown_now();
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_shutdown_now();
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
+}
- /*
- * Now set some state as we like...
- */
- for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++)
- {
- tmp =
- ! CMMU_SCTR_PE | /* parity enable */
-#if SNOOP_ENABLE
- ! CMMU_SCTR_SE | /* snoop enable */
-#endif /* SNOOP_ENABLE */
- ! CMMU_SCTR_PR ; /* priority arbitration */
+#define PARITY_ENABLE
- cmmu[cmmu_num].cmmu_regs->sctr = tmp;
+/*
+ * enable parity
+ */
+void
+cmmu_parity_enable(void)
+{
+#ifdef PARITY_ENABLE
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_parity_enable();
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_parity_enable();
+ break;
+#endif /* MVME197 */
+ }
+#endif /* PARITY_ENABLE */
+ CMMU_UNLOCK;
+ return;
+}
- tmp =
- (0x00000 << 12) | /* segment table base address */
- ! AREA_D_WT | /* write through */
- ! AREA_D_G | /* global */
- AREA_D_CI | /* cache inhibit */
- ! AREA_D_TE ; /* translation disable */
-
- cmmu[cmmu_num].cmmu_regs->sapr = tmp;
- cmmu[cmmu_num].cmmu_regs->uapr = tmp;
- }
+/*
+ * Find out the CPU number from accessing CMMU
+ * Better be at splhigh, or even better, with interrupts
+ * disabled.
+ */
+unsigned
+cmmu_cpu_number(void)
+{
+ unsigned retval;
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ retval = m18x_cmmu_cpu_number();
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ retval = m197_cmmu_cpu_number();
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return retval;
}
/**
@@ -399,59 +349,137 @@ static
void
cmmu_remote_set(unsigned cpu, unsigned r, unsigned data, unsigned x)
{
- *(volatile unsigned *)(r + (char*)&REGS(cpu,data)) = x;
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_remote_set(cpu, r, data, x);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_remote_set(cpu, r, data, x);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
/*
* cmmu_cpu_lock should be held when called if read
* the CMMU_SCR or CMMU_SAR.
-**/
+ */
#if !DDB
static
#endif
unsigned
cmmu_remote_get(unsigned cpu, unsigned r, unsigned data)
{
- return (*(volatile unsigned *)(r + (char*)&REGS(cpu,data)));
+ unsigned retval;
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ retval = m18x_cmmu_remote_get(cpu, r, data);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ retval = m197_cmmu_remote_get(cpu, r, data);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return retval;
}
/* Needs no locking - read only registers */
unsigned
cmmu_get_idr(unsigned data)
{
- return REGS(0,data).idr;
+ unsigned retval;
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ retval = m18x_cmmu_get_idr(data);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ retval = m197_cmmu_get_idr(data);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return retval;
}
void
cmmu_set_sapr(unsigned ap)
{
- int cpu = 0;
-
- if (cache_policy & CACHE_INH)
- ap |= AREA_D_CI;
-
- REGS(cpu, INST_CMMU).sapr = ap;
- REGS(cpu, DATA_CMMU).sapr = ap;
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_set_sapr(ap);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_set_sapr(ap);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
void
cmmu_remote_set_sapr(unsigned cpu, unsigned ap)
{
- if (cache_policy & CACHE_INH)
- ap |= AREA_D_CI;
-
- REGS(cpu, INST_CMMU).sapr = ap;
- REGS(cpu, DATA_CMMU).sapr = ap;
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_remote_set_sapr(cpu, ap);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_remote_set_sapr(cpu, ap);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
void
cmmu_set_uapr(unsigned ap)
{
- int cpu = 0;
-
- /* this functionality also mimiced in cmmu_pmap_activate() */
- REGS(cpu, INST_CMMU).uapr = ap;
- REGS(cpu, DATA_CMMU).uapr = ap;
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_set_uapr(ap);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_set_uapr(ap);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
/*
@@ -464,20 +492,27 @@ cmmu_set_uapr(unsigned ap)
*/
void
cmmu_set_batc_entry(
- unsigned cpu,
- unsigned entry_no,
- unsigned data, /* 1 = data, 0 = instruction */
- unsigned value) /* the value to stuff into the batc */
+ unsigned cpu,
+ unsigned entry_no,
+ unsigned data, /* 1 = data, 0 = instruction */
+ unsigned value) /* the value to stuff into the batc */
{
-
- REGS(cpu,data).bwp[entry_no] = value;
-#if SHADOW_BATC
- CMMU(cpu,data)->batc[entry_no] = value;
-#endif
-#if 0 /* was for debugging piece (peace?) of mind */
- REGS(cpu,data).scr = CMMU_FLUSH_SUPER_ALL;
- REGS(cpu,data).scr = CMMU_FLUSH_USER_ALL;
-#endif
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_set_batc_entry(cpu, entry_no, data, value);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_set_batc_entry(cpu, entry_no, data, value);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
/*
@@ -486,26 +521,26 @@ cmmu_set_batc_entry(
*/
void
cmmu_set_pair_batc_entry(
- unsigned cpu,
- unsigned entry_no,
- unsigned value) /* the value to stuff into the batc */
+ unsigned cpu,
+ unsigned entry_no,
+ unsigned value) /* the value to stuff into the batc */
{
-
- REGS(cpu,DATA_CMMU).bwp[entry_no] = value;
-#if SHADOW_BATC
- CMMU(cpu,DATA_CMMU)->batc[entry_no] = value;
-#endif
- REGS(cpu,INST_CMMU).bwp[entry_no] = value;
-#if SHADOW_BATC
- CMMU(cpu,INST_CMMU)->batc[entry_no] = value;
-#endif
-
-#if 0 /* was for debugging piece (peace?) of mind */
- REGS(cpu,INST_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
- REGS(cpu,INST_CMMU).scr = CMMU_FLUSH_USER_ALL;
- REGS(cpu,DATA_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
- REGS(cpu,DATA_CMMU).scr = CMMU_FLUSH_USER_ALL;
-#endif
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_set_pair_batc_entry(cpu, entry_no, value);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_set_pair_batc_entry(cpu, entry_no, value);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
/**
@@ -519,23 +554,22 @@ cmmu_set_pair_batc_entry(
void
cmmu_flush_remote_tlb(unsigned cpu, unsigned kernel, vm_offset_t vaddr, int size)
{
- register s = splhigh();
-
- if ((unsigned)size > M88K_PGBYTES)
- {
- REGS(cpu, INST_CMMU).scr =
- REGS(cpu, DATA_CMMU).scr =
- kernel ? CMMU_FLUSH_SUPER_ALL : CMMU_FLUSH_USER_ALL;
- }
- else /* a page or smaller */
- {
- REGS(cpu, INST_CMMU).sar = (unsigned)vaddr;
- REGS(cpu, DATA_CMMU).sar = (unsigned)vaddr;
- REGS(cpu, INST_CMMU).scr =
- REGS(cpu, DATA_CMMU).scr =
- kernel ? CMMU_FLUSH_SUPER_PAGE : CMMU_FLUSH_USER_PAGE;
- }
- splx(s);
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_flush_remote_tlb(cpu, kernel, vaddr, size);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_flush_remote_tlb(cpu, kernel, vaddr, size);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
/*
@@ -544,7 +578,9 @@ cmmu_flush_remote_tlb(unsigned cpu, unsigned kernel, vm_offset_t vaddr, int size
void
cmmu_flush_tlb(unsigned kernel, vm_offset_t vaddr, int size)
{
- cmmu_flush_remote_tlb(0, kernel, vaddr, size);
+ int cpu;
+ cpu = cpu_number();
+ cmmu_flush_remote_tlb(cpu, kernel, vaddr, size);
}
/*
@@ -554,33 +590,27 @@ cmmu_flush_tlb(unsigned kernel, vm_offset_t vaddr, int size)
*/
void
cmmu_pmap_activate(
- unsigned cpu,
- unsigned uapr,
- batc_template_t i_batc[BATC_MAX],
- batc_template_t d_batc[BATC_MAX])
+ unsigned cpu,
+ unsigned uapr,
+ batc_template_t i_batc[BATC_MAX],
+ batc_template_t d_batc[BATC_MAX])
{
- int entry_no;
-
- /* the following is from cmmu_set_uapr */
- REGS(cpu, INST_CMMU).uapr = uapr;
- REGS(cpu, DATA_CMMU).uapr = uapr;
-
- for (entry_no = 0; entry_no < BATC_MAX; entry_no++) {
- REGS(cpu,INST_CMMU).bwp[entry_no] = i_batc[entry_no].bits;
- REGS(cpu,DATA_CMMU).bwp[entry_no] = d_batc[entry_no].bits;
-#if SHADOW_BATC
- CMMU(cpu,INST_CMMU)->batc[entry_no] = i_batc[entry_no].bits;
- CMMU(cpu,DATA_CMMU)->batc[entry_no] = d_batc[entry_no].bits;
-#endif
- }
-
- /*
- * Flush the user TLB.
- * IF THE KERNEL WILL EVER CARE ABOUT THE BATC ENTRIES,
- * THE SUPERVISOR TLBs SHOULB EE FLUSHED AS WELL.
- */
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_USER_ALL;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_USER_ALL;
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_pmap_activate(cpu, uapr, i_batc, d_batc);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_pmap_activate(cpu, uapr, i_batc, d_batc);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
/**
@@ -602,38 +632,22 @@ cmmu_pmap_activate(
void
cmmu_flush_remote_cache(int cpu, vm_offset_t physaddr, int size)
{
- register s = splhigh();
-
-#if !defined(BROKEN_MMU_MASK)
-
- if (size < 0 || size > NBSG ) {
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
- }
- else if (size <= 16) {
- REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
- }
- else if (size <= NBPG) {
- REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
- }
- else {
- REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
- }
-
-#else
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
-#endif /* !BROKEN_MMU_MASK */
- splx(s);
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_flush_remote_cache(cpu, physaddr, size);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_flush_remote_cache(cpu, physaddr, size);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
/*
@@ -642,7 +656,8 @@ cmmu_flush_remote_cache(int cpu, vm_offset_t physaddr, int size)
void
cmmu_flush_cache(vm_offset_t physaddr, int size)
{
- cmmu_flush_remote_cache(0, physaddr, size);
+ int cpu = cpu_number();
+ cmmu_flush_remote_cache(cpu, physaddr, size);
}
/*
@@ -651,29 +666,22 @@ cmmu_flush_cache(vm_offset_t physaddr, int size)
void
cmmu_flush_remote_inst_cache(int cpu, vm_offset_t physaddr, int size)
{
- register s = splhigh();
-
-#if !defined(BROKEN_MMU_MASK)
- if (size < 0 || size > NBSG ) {
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
- }
- else if (size <= 16) {
- REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
- }
- else if (size <= NBPG) {
- REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
- }
- else {
- REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
- }
-#else
- REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
-#endif /* !BROKEN_MMU_MASK */
-
- splx(s);
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_flush_remote_inst_cache(cpu, physaddr, size);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_flush_remote_inst_cache(cpu, physaddr, size);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
/*
@@ -682,35 +690,30 @@ cmmu_flush_remote_inst_cache(int cpu, vm_offset_t physaddr, int size)
void
cmmu_flush_inst_cache(vm_offset_t physaddr, int size)
{
- cmmu_flush_remote_inst_cache(0, physaddr, size);
+ int cpu;
+ cpu = cpu_number();
+ cmmu_flush_remote_inst_cache(cpu, physaddr, size);
}
void
cmmu_flush_remote_data_cache(int cpu, vm_offset_t physaddr, int size)
{
- register s = splhigh();
-
-#if !defined(BROKEN_MMU_MASK)
- if (size < 0 || size > NBSG ) {
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
- }
- else if (size <= 16) {
- REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
- }
- else if (size <= NBPG) {
- REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
- }
- else {
- REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
- }
-#else
- REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
-#endif /* !BROKEN_MMU_MASK */
-
- splx(s);
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_flush_remote_data_cache(cpu, physaddr, size);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_flush_remote_data_cache(cpu, physaddr, size);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
/*
@@ -719,7 +722,9 @@ cmmu_flush_remote_data_cache(int cpu, vm_offset_t physaddr, int size)
void
cmmu_flush_data_cache(vm_offset_t physaddr, int size)
{
- cmmu_flush_remote_data_cache(0, physaddr, size);
+ int cpu;
+ cpu = cpu_number();
+ cmmu_flush_remote_data_cache(cpu, physaddr, size);
}
/*
@@ -728,216 +733,88 @@ cmmu_flush_data_cache(vm_offset_t physaddr, int size)
static void
cmmu_sync_cache(vm_offset_t physaddr, int size)
{
- register s = splhigh();
-
-#if !defined(BROKEN_MMU_MASK)
- if (size < 0 || size > NBSG ) {
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_CB_ALL;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_ALL;
- }
- else if (size <= 16) {
- REGS(0, INST_CMMU).sar = (unsigned)physaddr;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_CB_LINE;
- REGS(0, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_LINE;
- }
- else if (size <= NBPG) {
- REGS(0, INST_CMMU).sar = (unsigned)physaddr;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_CB_PAGE;
- REGS(0, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_PAGE;
- }
- else {
- REGS(0, INST_CMMU).sar = (unsigned)physaddr;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_CB_SEGMENT;
- REGS(0, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_SEGMENT;
- }
-#else
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_ALL;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_ALL;
-#endif /* !BROKEN_MMU_MASK */
- splx(s);
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_sync_cache(physaddr, size);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_sync_cache(physaddr, size);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
static void
cmmu_sync_inval_cache(vm_offset_t physaddr, int size)
{
- register s = splhigh();
-
-#if !defined(BROKEN_MMU_MASK)
- if (size < 0 || size > NBSG ) {
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
- }
- else if (size <= 16) {
- REGS(0, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
- REGS(0, INST_CMMU).sar = (unsigned)physaddr;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
- }
- else if (size <= NBPG) {
- REGS(0, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
- REGS(0, INST_CMMU).sar = (unsigned)physaddr;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
- }
- else {
- REGS(0, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
- REGS(0, INST_CMMU).sar = (unsigned)physaddr;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
- }
-
-#else
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
-#endif /* !BROKEN_MMU_MASK */
- splx(s);
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_sync_inval_cache(physaddr, size);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_sync_inval_cache(physaddr, size);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
static void
cmmu_inval_cache(vm_offset_t physaddr, int size)
{
- register s = splhigh();
-
-#if !defined(BROKEN_MMU_MASK)
- if (size < 0 || size > NBSG ) {
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_INV_ALL;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_INV_ALL;
- }
- else if (size <= 16) {
- REGS(0, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_INV_LINE;
- REGS(0, INST_CMMU).sar = (unsigned)physaddr;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_INV_LINE;
- }
- else if (size <= NBPG) {
- REGS(0, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_INV_PAGE;
- REGS(0, INST_CMMU).sar = (unsigned)physaddr;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_INV_PAGE;
- }
- else {
- REGS(0, DATA_CMMU).sar = (unsigned)physaddr;
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_INV_SEGMENT;
- REGS(0, INST_CMMU).sar = (unsigned)physaddr;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_INV_SEGMENT;
- }
-#else
- REGS(0, DATA_CMMU).scr = CMMU_FLUSH_CACHE_INV_ALL;
- REGS(0, INST_CMMU).scr = CMMU_FLUSH_CACHE_INV_ALL;
-#endif /* !BROKEN_MMU_MASK */
-
- splx(s);
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_inval_cache(physaddr, size);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_inval_cache(physaddr, size);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
void
dma_cachectl(vm_offset_t va, int size, int op)
{
- int count;
-
-#if !defined(BROKEN_MMU_MASK)
- while (size) {
-
- count = NBPG - ((int)va & PGOFSET);
-
- if (size < count)
- count = size;
-
- if (op == DMA_CACHE_SYNC)
- cmmu_sync_cache(kvtop(va), count);
- else if (op == DMA_CACHE_SYNC_INVAL)
- cmmu_sync_inval_cache(kvtop(va), count);
- else
- cmmu_inval_cache(kvtop(va), count);
-
- va = (vm_offset_t)((int)va + count);
- size -= count;
- }
-#else
-
- if (op == DMA_CACHE_SYNC)
- cmmu_sync_cache(kvtop(va), size);
- else if (op == DMA_CACHE_SYNC_INVAL)
- cmmu_sync_inval_cache(kvtop(va), size);
- else
- cmmu_inval_cache(kvtop(va), size);
-#endif /* !BROKEN_MMU_MASK */
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_dma_cachectl(va, size, op);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_dma_cachectl(va, size, op);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
#if DDB
-union ssr {
- unsigned bits;
- struct {
- unsigned :16,
- ce:1,
- be:1,
- :4,
- wt:1,
- sp:1,
- g:1,
- ci:1,
- :1,
- m:1,
- u:1,
- wp:1,
- bh:1,
- v:1;
- } field;
-};
-
-union cssp {
- unsigned bits;
- struct {
- unsigned : 2,
- l: 6,
- d3: 1,
- d2: 1,
- d1: 1,
- d0: 1,
- vv3: 2,
- vv2: 2,
- vv1: 2,
- vv0: 2,
- :12;
- } field;
-};
-
-union batcu {
- unsigned bits;
- struct { /* block address translation register */
- unsigned int
- lba:13, /* logical block address */
- pba:13, /* physical block address */
- s:1, /* supervisor */
- wt:4, /* write through */
- g:1, /* global */
- ci:1, /* cache inhibit */
- wp:1, /* write protect */
- v:1; /* valid */
- } field;
-};
-
-#define VV_EX_UNMOD 0
-#define VV_EX_MOD 1
-#define VV_SHARED_UNMOD 2
-#define VV_INVALID 3
-
-#define D(UNION, LINE) \
- ((LINE) == 3 ? (UNION).field.d3 : \
- ((LINE) == 2 ? (UNION).field.d2 : \
- ((LINE) == 1 ? (UNION).field.d1 : \
- ((LINE) == 0 ? (UNION).field.d0 : ~0))))
-#define VV(UNION, LINE) \
- ((LINE) == 3 ? (UNION).field.vv3 : \
- ((LINE) == 2 ? (UNION).field.vv2 : \
- ((LINE) == 1 ? (UNION).field.vv1 : \
- ((LINE) == 0 ? (UNION).field.vv0 : ~0))))
-
-
-#undef VEQR_ADDR
-#define VEQR_ADDR 0
/*
* Show (for debugging) how the given CMMU translates the given ADDRESS.
@@ -945,413 +822,67 @@ union batcu {
*/
void
cmmu_show_translation(
- unsigned address,
- unsigned supervisor_flag,
- unsigned verbose_flag,
- int cmmu_num)
+ unsigned address,
+ unsigned supervisor_flag,
+ unsigned verbose_flag,
+ int cmmu_num)
{
- /*
- * A virtual address is split into three fields. Two are used as
- * indicies into tables (segment and page), and one is an offset into
- * a page of memory.
- */
- union {
- unsigned bits;
- struct {
- unsigned segment_table_index:10,
- page_table_index:10,
- page_offset:12;
- } field;
- } virtual_address;
- unsigned value;
-
- if (verbose_flag)
- db_printf("-------------------------------------------\n");
-
-
- /****** ACCESS PROPER CMMU or THREAD ***********/
-#if 0 /* no thread */
- if (thread != 0)
- {
- /* the following tidbit from _pmap_activate in m88k/pmap.c */
- register apr_template_t apr_data;
- supervisor_flag = 0; /* thread implies user */
-
- if (thread->task == 0) {
- db_printf("[thread %x has empty task pointer]\n", thread);
- return;
- } else if (thread->task->map == 0) {
- db_printf("[thread/task %x/%x has empty map pointer]\n",
- thread, thread->task);
- return;
- } else if (thread->task->map->pmap == 0) {
- db_printf("[thread/task/map %x/%x/%x has empty pmap pointer]\n",
- thread, thread->task, thread->task->map);
- return;
- }
- if (thread->task->map->pmap->lock.lock_data) {
- db_printf("[Warning: thread %x's task %x's map %x's "
- "pmap %x is locked]\n", thread, thread->task,
- thread->task->map, thread->task->map->pmap);
- }
- apr_data.bits = 0;
- apr_data.field.st_base = M88K_BTOP(thread->task->map->pmap->sdt_paddr);
- apr_data.field.wt = 0;
- apr_data.field.g = 1;
- apr_data.field.ci = 0;
- apr_data.field.te = 1;
- value = apr_data.bits;
- if (verbose_flag) {
- db_printf("[thread %x task %x map %x pmap %x UAPR is %x]\n",
- thread, thread->task, thread->task->map,
- thread->task->map->pmap, value);
- }
- }
- else
-#endif /* 0 */
- {
- if (cmmu_num == -1)
- {
- if (cpu_cmmu[0].pair[DATA_CMMU] == 0)
- {
- db_printf("ack! can't figure my own data cmmu number.\n");
- return;
- }
- cmmu_num = cpu_cmmu[0].pair[DATA_CMMU] - cmmu;
- if (verbose_flag)
- db_printf("The data cmmu for cpu#%d is cmmu#%d.\n",
- 0, cmmu_num);
- }
- else if (cmmu_num < 0 || cmmu_num >= MAX_CMMUS)
- {
- db_printf("invalid cpu number [%d]... must be in range [0..%d]\n",
- cmmu_num, MAX_CMMUS - 1);
- return;
- }
-
- if (cmmu[cmmu_num].cmmu_alive == 0)
- {
- db_printf("warning: cmmu %d is not alive.\n", cmmu_num);
- #if 0
- return;
- #endif
- }
-
- if (!verbose_flag)
- {
- if (!(cmmu[cmmu_num].cmmu_regs->sctr & CMMU_SCTR_SE))
- db_printf("WARNING: snooping not enabled for CMMU#%d.\n",
- cmmu_num);
- }
- else
- {
- int i;
- for (i=0; i<MAX_CMMUS; i++)
- if ((i == cmmu_num || cmmu[i].cmmu_alive) &&
- (verbose_flag>1 || !(cmmu[i].cmmu_regs->sctr&CMMU_SCTR_SE)))
- {
- db_printf("CMMU#%d (cpu %d %s) snooping %s\n", i,
- cmmu[i].cmmu_cpu, cmmu[i].which ? "data" : "inst",
- (cmmu[i].cmmu_regs->sctr & CMMU_SCTR_SE) ? "on":"OFF");
- }
- }
-
- if (supervisor_flag)
- value = cmmu[cmmu_num].cmmu_regs->sapr;
- else
- value = cmmu[cmmu_num].cmmu_regs->uapr;
-
- }
-
- /******* LOOK AT THE BATC ** (if not a thread) **************/
-#if 0
-#if SHADOW_BATC
- if (thread == 0)
- {
- int i;
- union batcu batc;
- for (i = 0; i < 8; i++) {
- batc.bits = cmmu[cmmu_num].batc[i];
- if (batc.field.v == 0) {
- if (verbose_flag>1)
- db_printf("cmmu #%d batc[%d] invalid.\n", cmmu_num, i);
- } else {
- db_printf("cmmu#%d batc[%d] v%08x p%08x", cmmu_num, i,
- batc.field.lba << 18, batc.field.pba);
- if (batc.field.s) db_printf(", supervisor");
- if (batc.field.wt) db_printf(", wt.th");
- if (batc.field.g) db_printf(", global");
- if (batc.field.ci) db_printf(", cache inhibit");
- if (batc.field.wp) db_printf(", write protect");
- }
- }
- }
-#endif
-#endif /* 0 */
-
- /******* SEE WHAT A PROBE SAYS (if not a thread) ***********/
-#if 0
- if (thread == 0)
-#endif /* 0 */
- {
- union ssr ssr;
- struct cmmu_regs *cmmu_regs = cmmu[cmmu_num].cmmu_regs;
- cmmu_regs->sar = address;
- cmmu_regs->scr = supervisor_flag ? CMMU_PROBE_SUPER : CMMU_PROBE_USER;
- ssr.bits = cmmu_regs->ssr;
- if (verbose_flag > 1)
- db_printf("probe of 0x%08x returns ssr=0x%08x\n",
- address, ssr.bits);
- if (ssr.field.v)
- db_printf("PROBE of 0x%08x returns phys=0x%x",
- address, cmmu_regs->sar);
- else
- db_printf("PROBE fault at 0x%x", cmmu_regs->pfADDRr);
- if (ssr.field.ce) db_printf(", copyback err");
- if (ssr.field.be) db_printf(", bus err");
- if (ssr.field.wt) db_printf(", writethrough");
- if (ssr.field.sp) db_printf(", sup prot");
- if (ssr.field.g) db_printf(", global");
- if (ssr.field.ci) db_printf(", cache inhibit");
- if (ssr.field.m) db_printf(", modified");
- if (ssr.field.u) db_printf(", used");
- if (ssr.field.wp) db_printf(", write prot");
- if (ssr.field.bh) db_printf(", BATC");
- db_printf(".\n");
- }
-
- /******* INTERPRET AREA DESCRIPTOR *********/
- {
- union apr_template apr_template;
- apr_template.bits = value;
- if (verbose_flag > 1) {
- db_printf("CMMU#%d", cmmu_num);
-#if 0
- if (thread == 0)
- db_printf("CMMU#%d", cmmu_num);
- else
- db_printf("THREAD %x", thread);
-#endif /* 0 */
- db_printf(" %cAPR is 0x%08x\n",
- supervisor_flag ? 'S' : 'U', apr_template.bits);
- }
- db_printf("CMMU#%d", cmmu_num);
-#if 0
- if (thread == 0)
- db_printf("CMMU#%d", cmmu_num);
- else
- db_printf("THREAD %x", thread);
-#endif /* 0 /
- db_printf(" %cAPR: SegTbl: 0x%x000p",
- supervisor_flag ? 'S' : 'U', apr_template.field.st_base);
- if (apr_template.field.wt) db_printf(", WTHRU");
- else db_printf(", !wthru");
- if (apr_template.field.g) db_printf(", GLOBAL");
- else db_printf(", !global");
- if (apr_template.field.ci) db_printf(", $INHIBIT");
- else db_printf(", $ok");
- if (apr_template.field.te) db_printf(", VALID");
- else db_printf(", !valid");
- db_printf(".\n");
-
- /* if not valid, done now */
- if (apr_template.field.te == 0) {
- db_printf("<would report an error, valid bit not set>\n");
- return;
- }
-
- value = apr_template.field.st_base << 12; /* now point to seg page */
- }
-
- /* translate value from physical to virtual */
- if (verbose_flag)
- db_printf("[%x physical is %x virtual]\n", value, value + VEQR_ADDR);
- value += VEQR_ADDR;
-
- virtual_address.bits = address;
-
- /****** ACCESS SEGMENT TABLE AND INTERPRET SEGMENT DESCRIPTOR *******/
- {
- union sdt_entry_template std_template;
- if (verbose_flag)
- db_printf("will follow to entry %d of page at 0x%x...\n",
- virtual_address.field.segment_table_index, value);
- value |= virtual_address.field.segment_table_index *
- sizeof(struct sdt_entry);
-
- if (badwordaddr(value)) {
- db_printf("ERROR: unable to access page at 0x%08x.\n", value);
- return;
- }
-
- std_template.bits = *(unsigned *)value;
- if (verbose_flag > 1)
- db_printf("SEG DESC @0x%x is 0x%08x\n", value, std_template.bits);
- db_printf("SEG DESC @0x%x: PgTbl: 0x%x000",
- value, std_template.sdt_desc.table_addr);
- if (std_template.sdt_desc.wt) db_printf(", WTHRU");
- else db_printf(", !wthru");
- if (std_template.sdt_desc.sup) db_printf(", S-PROT");
- else db_printf(", UserOk");
- if (std_template.sdt_desc.g) db_printf(", GLOBAL");
- else db_printf(", !global");
- if (std_template.sdt_desc.no_cache) db_printf(", $INHIBIT");
- else db_printf(", $ok");
- if (std_template.sdt_desc.prot) db_printf(", W-PROT");
- else db_printf(", WriteOk");
- if (std_template.sdt_desc.dtype) db_printf(", VALID");
- else db_printf(", !valid");
- db_printf(".\n");
-
- /* if not valid, done now */
- if (std_template.sdt_desc.dtype == 0) {
- db_printf("<would report an error, STD entry not valid>\n");
- return;
- }
-
- value = std_template.sdt_desc.table_addr << 12;
- }
-
- /* translate value from physical to virtual */
- if (verbose_flag)
- db_printf("[%x physical is %x virtual]\n", value, value + VEQR_ADDR);
- value += VEQR_ADDR;
-
- /******* PAGE TABLE *********/
- {
- union pte_template pte_template;
- if (verbose_flag)
- db_printf("will follow to entry %d of page at 0x%x...\n",
- virtual_address.field.page_table_index, value);
- value |= virtual_address.field.page_table_index *
- sizeof(struct pt_entry);
-
- if (badwordaddr(value)) {
- db_printf("error: unable to access page at 0x%08x.\n", value);
- return;
- }
-
- pte_template.bits = *(unsigned *)value;
- if (verbose_flag > 1)
- db_printf("PAGE DESC @0x%x is 0x%08x.\n", value, pte_template.bits);
- db_printf("PAGE DESC @0x%x: page @%x000",
- value, pte_template.pte.pfn);
- if (pte_template.pte.wired) db_printf(", WIRE");
- else db_printf(", !wire");
- if (pte_template.pte.wt) db_printf(", WTHRU");
- else db_printf(", !wthru");
- if (pte_template.pte.sup) db_printf(", S-PROT");
- else db_printf(", UserOk");
- if (pte_template.pte.g) db_printf(", GLOBAL");
- else db_printf(", !global");
- if (pte_template.pte.ci) db_printf(", $INHIBIT");
- else db_printf(", $ok");
- if (pte_template.pte.modified) db_printf(", MOD");
- else db_printf(", !mod");
- if (pte_template.pte.pg_used) db_printf(", USED");
- else db_printf(", !used");
- if (pte_template.pte.prot) db_printf(", W-PROT");
- else db_printf(", WriteOk");
- if (pte_template.pte.dtype) db_printf(", VALID");
- else db_printf(", !valid");
- db_printf(".\n");
-
- /* if not valid, done now */
- if (pte_template.pte.dtype == 0) {
- db_printf("<would report an error, PTE entry not valid>\n");
- return;
- }
-
- value = pte_template.pte.pfn << 12;
- if (verbose_flag)
- db_printf("will follow to byte %d of page at 0x%x...\n",
- virtual_address.field.page_offset, value);
- value |= virtual_address.field.page_offset;
-
- if (badwordaddr(value)) {
- db_printf("error: unable to access page at 0x%08x.\n", value);
- return;
- }
- }
-
- /* translate value from physical to virtual */
- if (verbose_flag)
- db_printf("[%x physical is %x virtual]\n", value, value + VEQR_ADDR);
- value += VEQR_ADDR;
-
- db_printf("WORD at 0x%x is 0x%08x.\n", value, *(unsigned *)value);
+ CMMU_LOCK;
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_show_translation(address, supervisor_flag,
+ verbose_flag, cmmu_num);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_show_translation(address, supervisor_flag,
+ verbose_flag, cmmu_num);
+ break;
+#endif /* MVME197 */
+ }
+ CMMU_UNLOCK;
+ return;
}
void
cmmu_cache_state(unsigned addr, unsigned supervisor_flag)
{
- static char *vv_name[4] =
- {"exclu-unmod", "exclu-mod", "shared-unmod", "invalid"};
- int cmmu_num;
- for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++)
- {
- union ssr ssr;
- union cssp cssp;
- struct cmmu_regs *R;
- unsigned tag, line;
- if (!cmmu[cmmu_num].cmmu_alive)
- continue;
- R = cmmu[cmmu_num].cmmu_regs;
- db_printf("cmmu #%d %s cmmu for cpu %d.\n", cmmu_num,
- cmmu[cmmu_num].which ? "data" : "inst",
- cmmu[cmmu_num].cmmu_cpu);
- R->sar = addr;
- R->scr = supervisor_flag ? CMMU_PROBE_SUPER : CMMU_PROBE_USER;
-
- ssr.bits = R->ssr;
- if (!ssr.field.v) {
- db_printf("PROBE of 0x%08x faults.\n",addr);
- continue;
- }
- db_printf("PROBE of 0x%08x returns phys=0x%x", addr, R->sar);
-
- tag = R->sar & ~0xfff;
- cssp.bits = R->cssp;
-
- /* check to see if any of the tags for the set match the address */
- for (line = 0; line < 4; line++)
- {
- if (VV(cssp, line) == VV_INVALID)
- {
- db_printf("line %d invalid.\n", line);
- continue; /* line is invalid */
- }
- if (D(cssp, line))
- {
- db_printf("line %d disabled.\n", line);
- continue; /* line is disabled */
- }
-
- if ((R->ctp[line] & ~0xfff) != tag)
- {
- db_printf("line %d address tag is %x.\n", line,
- (R->ctp[line] & ~0xfff));
- continue;
- }
- db_printf("found in line %d as %08x (%s).\n",
- line, R->cdp[line], vv_name[VV(cssp, line)]);
- }
- }
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_cmmu_cache_state(addr, supervisor_flag);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_cmmu_cache_state(addr, supervisor_flag);
+ break;
+#endif /* MVME197 */
+ }
+ return;
}
void
show_cmmu_info(unsigned addr)
{
- int cmmu_num;
- cmmu_cache_state(addr, 1);
-
- for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++)
- if (cmmu[cmmu_num].cmmu_alive) {
- db_printf("cmmu #%d %s cmmu for cpu %d: ", cmmu_num,
- cmmu[cmmu_num].which ? "data" : "inst",
- cmmu[cmmu_num].cmmu_cpu);
- cmmu_show_translation(addr, 1, 0, cmmu_num);
- }
+ switch (cputyp) {
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ m18x_show_cmmu_info(addr);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ case CPU_197:
+ m197_show_cmmu_info(addr);
+ break;
+#endif /* MVME197 */
+ }
+ return;
}
#endif /* end if DDB */
diff --git a/sys/arch/mvme88k/mvme88k/conf.c b/sys/arch/mvme88k/mvme88k/conf.c
index f91962bbd7c..d39f986279d 100644
--- a/sys/arch/mvme88k/mvme88k/conf.c
+++ b/sys/arch/mvme88k/mvme88k/conf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: conf.c,v 1.11 1999/05/29 04:41:46 smurph Exp $ */
+/* $OpenBSD: conf.c,v 1.12 1999/09/27 19:13:21 smurph Exp $ */
/*-
* Copyright (c) 1991 The Regents of the University of California.
@@ -134,10 +134,9 @@ cdev_decl(ptc);
cdev_decl(log);
cdev_decl(fd);
-#if notyet
-#include "zs.h"
-cdev_decl(zs);
-#endif /* notyet */
+#include "dart.h"
+cdev_decl(dart);
+
#include "cl.h"
cdev_decl(cl);
@@ -203,76 +202,75 @@ cdev_decl(lkm);
struct cdevsw cdevsw[] =
{
- cdev_cn_init(1,cn), /* 0: virtual console */
- cdev_ctty_init(1,ctty), /* 1: controlling terminal */
- cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
- cdev_swap_init(1,sw), /* 3: /dev/drum (swap pseudo-device) */
- cdev_tty_init(NPTY,pts), /* 4: pseudo-tty slave */
- cdev_ptc_init(NPTY,ptc), /* 5: pseudo-tty master */
- cdev_log_init(1,log), /* 6: /dev/klog */
- cdev_mdev_init(NSRAM,sram), /* 7: /dev/sramX */
- cdev_disk_init(NSD,sd), /* 8: SCSI disk */
- cdev_disk_init(NCD,cd), /* 9: SCSI CD-ROM */
- cdev_mdev_init(NNVRAM,nvram), /* 10: /dev/nvramX */
+ cdev_cn_init(1,cn), /* 0: virtual console */
+ cdev_ctty_init(1,ctty), /* 1: controlling terminal */
+ cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
+ cdev_swap_init(1,sw), /* 3: /dev/drum (swap pseudo-device) */
+ cdev_tty_init(NPTY,pts), /* 4: pseudo-tty slave */
+ cdev_ptc_init(NPTY,ptc), /* 5: pseudo-tty master */
+ cdev_log_init(1,log), /* 6: /dev/klog */
+ cdev_mdev_init(NSRAM,sram), /* 7: /dev/sramX */
+ cdev_disk_init(NSD,sd), /* 8: SCSI disk */
+ cdev_disk_init(NCD,cd), /* 9: SCSI CD-ROM */
+ cdev_mdev_init(NNVRAM,nvram), /* 10: /dev/nvramX */
#if notyet
- cdev_mdev_init(NFLASH,flash), /* 11: /dev/flashX */
- cdev_tty_init(NZS,zs), /* 12: SCC serial (tty[a-d]) */
+ cdev_mdev_init(NFLASH,flash), /* 11: /dev/flashX */
#else
- cdev_notdef(), /* 11: */
- cdev_notdef(), /* 12: SCC serial (tty[a-d]) */
+ cdev_notdef(), /* 11: */
#endif /* notyet */
- cdev_tty_init(NCL,cl), /* 13: CL-CD1400 serial (tty0[0-3]) */
- cdev_tty_init(NBUGTTY,bugtty), /* 14: BUGtty (ttyB) */
- cdev_tty_init(NVX,vx), /* 15: MVME332XT serial/lpt ttyv[0-7][a-i] */
- cdev_notdef(), /* 16 */
- cdev_notdef(), /* 17: concatenated disk */
- cdev_disk_init(NRD,rd), /* 18: ramdisk disk */
- cdev_disk_init(NVND,vnd), /* 19: vnode disk */
- cdev_tape_init(NST,st), /* 20: SCSI tape */
- cdev_fd_init(1,filedesc), /* 21: file descriptor pseudo-dev */
- cdev_bpftun_init(NBPFILTER,bpf),/* 22: berkeley packet filter */
- cdev_bpftun_init(NTUN,tun), /* 23: network tunnel */
- cdev_lkm_init(NLKM,lkm), /* 24: loadable module driver */
- cdev_notdef(), /* 25 */
+ cdev_tty_init(NDART,dart), /* 12: MVME188 serial (tty[a-b]) */
+ cdev_tty_init(NCL,cl), /* 13: CL-CD1400 serial (tty0[0-3]) */
+ cdev_tty_init(NBUGTTY,bugtty), /* 14: BUGtty (ttyB) */
+ cdev_tty_init(NVX,vx), /* 15: MVME332XT serial/lpt ttyv[0-7][a-i] */
+ cdev_notdef(), /* 16 */
+ cdev_notdef(), /* 17: concatenated disk */
+ cdev_disk_init(NRD,rd), /* 18: ramdisk disk */
+ cdev_disk_init(NVND,vnd), /* 19: vnode disk */
+ cdev_tape_init(NST,st), /* 20: SCSI tape */
+ cdev_fd_init(1,filedesc), /* 21: file descriptor pseudo-dev */
+ cdev_bpftun_init(NBPFILTER,bpf), /* 22: berkeley packet filter */
+ cdev_bpftun_init(NTUN,tun), /* 23: network tunnel */
+ cdev_lkm_init(NLKM,lkm), /* 24: loadable module driver */
+ cdev_notdef(), /* 25 */
#if notyet
- cdev_disk_init(NXD,xd), /* 26: XD disk */
+ cdev_disk_init(NXD,xd), /* 26: XD disk */
#else
- cdev_notdef(), /* 26: XD disk */
+ cdev_notdef(), /* 26: XD disk */
#endif /* notyet */
- cdev_notdef(), /* 27 */
+ cdev_notdef(), /* 27 */
#if notyet
- cdev_lp_init(NLP,lp), /* 28: lp */
- cdev_lp_init(NLPTWO,lptwo), /* 29: lptwo */
+ cdev_lp_init(NLP,lp), /* 28: lp */
+ cdev_lp_init(NLPTWO,lptwo), /* 29: lptwo */
#else
- cdev_notdef(), /* 28: lp */
- cdev_notdef(), /* 29: lptwo */
+ cdev_notdef(), /* 28: lp */
+ cdev_notdef(), /* 29: lptwo */
#endif /* notyet */
- cdev_notdef(), /* 30 */
- cdev_mdev_init(NVMEL,vmel), /* 31: /dev/vmelX */
- cdev_mdev_init(NVMES,vmes), /* 32: /dev/vmesX */
- cdev_lkm_dummy(), /* 33 */
- cdev_lkm_dummy(), /* 34 */
- cdev_lkm_dummy(), /* 35 */
- cdev_lkm_dummy(), /* 36 */
- cdev_lkm_dummy(), /* 37 */
- cdev_lkm_dummy(), /* 38 */
- cdev_gen_ipf(NIPF,ipl), /* 39: IP filter */
- cdev_notdef(), /* 40 */
- cdev_notdef(), /* 41 */
- cdev_notdef(), /* 42 */
- cdev_notdef(), /* 43 */
- cdev_notdef(), /* 44 */
- cdev_notdef(), /* 45 */
- cdev_notdef(), /* 46 */
- cdev_notdef(), /* 47 */
- cdev_notdef(), /* 48 */
- cdev_notdef(), /* 49 */
- cdev_notdef(), /* 50 */
+ cdev_notdef(), /* 30 */
+ cdev_mdev_init(NVMEL,vmel), /* 31: /dev/vmelX */
+ cdev_mdev_init(NVMES,vmes), /* 32: /dev/vmesX */
+ cdev_lkm_dummy(), /* 33 */
+ cdev_lkm_dummy(), /* 34 */
+ cdev_lkm_dummy(), /* 35 */
+ cdev_lkm_dummy(), /* 36 */
+ cdev_lkm_dummy(), /* 37 */
+ cdev_lkm_dummy(), /* 38 */
+ cdev_gen_ipf(NIPF,ipl), /* 39: IP filter */
+ cdev_notdef(), /* 40 */
+ cdev_notdef(), /* 41 */
+ cdev_notdef(), /* 42 */
+ cdev_notdef(), /* 43 */
+ cdev_notdef(), /* 44 */
+ cdev_notdef(), /* 45 */
+ cdev_notdef(), /* 46 */
+ cdev_notdef(), /* 47 */
+ cdev_notdef(), /* 48 */
+ cdev_notdef(), /* 49 */
+ cdev_notdef(), /* 50 */
#ifdef XFS
- cde_xfs_init(NXFS,xfs_dev), /* 51: xfs communication device */
+ cde_xfs_init(NXFS,xfs_dev), /* 51: xfs communication device */
#else
- cdev_notdef(), /* 51 */
+ cdev_notdef(), /* 51 */
#endif
};
int nchrdev = sizeof(cdevsw) / sizeof(cdevsw[0]);
@@ -321,8 +319,8 @@ static int chrtoblktbl[] = {
/* 5 */ NODEV,
/* 6 */ NODEV,
/* 7 */ NODEV,
- /* 8 */ 4, /* SCSI disk */
- /* 9 */ 6, /* SCSI CD-ROM */
+ /* 8 */ 4, /* SCSI disk */
+ /* 9 */ 6, /* SCSI CD-ROM */
/* 10 */ NODEV,
/* 11 */ NODEV,
/* 12 */ NODEV,
@@ -331,15 +329,15 @@ static int chrtoblktbl[] = {
/* 15 */ NODEV,
/* 16 */ NODEV,
/* 17 */ NODEV,
- /* 18 */ 7, /* ram disk */
- /* 19 */ 8, /* vnode disk */
+ /* 18 */ 7, /* ram disk */
+ /* 19 */ 8, /* vnode disk */
/* 20 */ NODEV,
/* 21 */ NODEV,
/* 22 */ NODEV,
/* 23 */ NODEV,
/* 24 */ NODEV,
/* 25 */ NODEV,
- /* 26 */ 10, /* XD disk */
+ /* 26 */ 10, /* XD disk */
};
/*
@@ -385,16 +383,16 @@ blktochr(dev)
*/
#include <dev/cons.h>
-#define zscnpollc nullcnpollc
-cons_decl(zs);
+#define dartcnpollc nullcnpollc
+cons_decl(dart);
#define clcnpollc nullcnpollc
cons_decl(cl);
#define bugttycnpollc nullcnpollc
cons_decl(bugtty);
struct consdev constab[] = {
-#if NZS > 0
- cons_init(zs),
+#if NDART > 0
+ cons_init(dart),
#endif
#if NCL > 0
cons_init(cl),
diff --git a/sys/arch/mvme88k/mvme88k/disksubr.c b/sys/arch/mvme88k/mvme88k/disksubr.c
index 5d09855a35f..f8042fcdffe 100644
--- a/sys/arch/mvme88k/mvme88k/disksubr.c
+++ b/sys/arch/mvme88k/mvme88k/disksubr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: disksubr.c,v 1.9 1999/02/09 06:36:28 smurph Exp $ */
+/* $OpenBSD: disksubr.c,v 1.10 1999/09/27 19:13:21 smurph Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* Copyright (c) 1995 Dale Rahn.
@@ -58,6 +58,37 @@ static void printlp __P((struct disklabel *lp, char *str));
static void printclp __P((struct cpu_disklabel *clp, char *str));
#endif
+/*
+ * Returns the ID of the SCSI disk based on Motorala's CLUN/DLUN stuff
+ * bootdev == CLUN << 8 | DLUN.
+ * This handels SBC SCSI and MVME328. It will need to be modified for
+ * MVME327. We do not handel MVME328 daughter cards. smurph
+ */
+int
+get_target(void)
+{
+ extern int bootdev;
+ switch (bootdev)
+ {
+ case 0x0000: case 0x0600: case 0x0700: case 0x1600: case 0x1700: case 0x1800: case 0x1900:
+ return 0;
+ case 0x0010: case 0x0608: case 0x0708: case 0x1608: case 0x1708: case 0x1808: case 0x1908:
+ return 1;
+ case 0x0020: case 0x0610: case 0x0710: case 0x1610: case 0x1710: case 0x1810: case 0x1910:
+ return 2;
+ case 0x0030: case 0x0618: case 0x0718: case 0x1618: case 0x1718: case 0x1818: case 0x1918:
+ return 3;
+ case 0x0040: case 0x0620: case 0x0720: case 0x1620: case 0x1720: case 0x1820: case 0x1920:
+ return 4;
+ case 0x0050: case 0x0628: case 0x0728: case 0x1628: case 0x1728: case 0x1828: case 0x1928:
+ return 5;
+ case 0x0060: case 0x0630: case 0x0730: case 0x1630: case 0x1730: case 0x1830: case 0x1930:
+ return 6;
+ default:
+ return 0;
+ }
+}
+
void
dk_establish(dk, dev)
struct disk *dk;
@@ -77,8 +108,8 @@ dk_establish(dk, dev)
strncmp("cd", dev->dv_xname, 2) == 0) {
sbsc = (struct scsibus_softc *)dev->dv_parent;
- target = bootdevlun / 10;
- lun = bootdevlun % 10;
+ target = get_target(); /* Work the Motorola Magic */
+ lun = 0;
if (sbsc->sc_link[target][lun] != NULL &&
sbsc->sc_link[target][lun]->device_softc == (void *)dev) {
diff --git a/sys/arch/mvme88k/mvme88k/eh.S b/sys/arch/mvme88k/mvme88k/eh.S
index 3ba6fc8046e..8315a17a27e 100644
--- a/sys/arch/mvme88k/mvme88k/eh.S
+++ b/sys/arch/mvme88k/mvme88k/eh.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: eh.S,v 1.7 1999/05/29 04:41:46 smurph Exp $ */
+/* $OpenBSD: eh.S,v 1.8 1999/09/27 19:13:21 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1993-1991 Carnegie Mellon University
@@ -32,11 +32,11 @@
* HISTORY
* 1. Should get rid of SR0 reference for thread stuff.
* 2. Make up my mind what is _kstack. I think it
- * should be p->p_addr+UPAGES. (p_addr
- * is pointing to user struct and swapin is
- * making sure it is updated)
- * Whatever is _kstack, its usage in this file should be
- * revisited.
+ * should be p->p_addr+UPAGES. (p_addr
+ * is pointing to user struct and swapin is
+ * making sure it is updated)
+ * Whatever is _kstack, its usage in this file should be
+ * revisited.
*/
/*
@@ -55,7 +55,7 @@
* ; frequent misspellings. ;
* ; ;
* ; Jeffrey Friedl ;
- * ; jfriedl@rna.ncl.omron.co.jp ;
+ * ; jfriedl@rna.ncl.omron.co.jp ;
* ; December, 1989 ;
* -------------------------------------------------------------------
*
@@ -114,7 +114,7 @@
* saved). The same stack is also used when C routines are called (to
* service the exception).
*
- * Each process has a stack in kernel space (called the "kernel stack",
+ * Each process has a stack in kernel space (called the "kernel stack",
* short for "process's kernel stack) as well as the user space stack. When
* entering the kernel from user space, the kernel stack is unused. On this
* stack we save the exception state and (most likely call a C routine to)
@@ -157,7 +157,7 @@
*
* More on Restarting the FPU
* --------------------------
- * The manual [section 6.4.3.4] gives only minor mention to this
+ * The manual [section 6.4.3.4] gives only minor mention to this
* rather complex task. Before the FPU is restarted all SSBR bits are
* cleared for actions that the exception handler completes (as mentioned
* above) so that the SSBR is clear unless there are FPU operations that
@@ -218,7 +218,7 @@
#endif
#include "assym.s"
-#include <machine/trap.h> /* for T_ defines */
+#include <machine/trap.h> /* for T_ defines */
#include <machine/asm.h>
/*
@@ -227,22 +227,20 @@
* pseudo-fields there for our needs.
*
* EF_SR3 A place to save the exception-time SR3 from just after the
- * time when an exception is raised until just after the FPU
- * has been restarted. This does not necessarly conflict with
- * the general registers (though it can if you're not careful)
- * and so we can use a spot later used to save a general register.
+ * time when an exception is raised until just after the FPU
+ * has been restarted. This does not necessarly conflict with
+ * the general registers (though it can if you're not careful)
+ * and so we can use a spot later used to save a general register.
*
* EF_FLAGS This is just the old EF_MODE. "EF_MODE" isn't a very good name.
*/
-#define EF_SR3 (EF_R0 + 5)
-#define EF_FLAGS EF_MODE
+#define EF_SR3 (EF_R0 + 5)
+#define EF_FLAGS EF_MODE
-#define FLAG_FROM_KERNEL 8 /* this should be in asm.h */
+#define INTSTACK 0 /* To make interupts use their own stack */
-#define INTSTACK 0 /* To make interupts use their own stack */
-
- text
- align 8
+ text
+ align 8
/***************************************************************************
***************************************************************************
@@ -252,22 +250,22 @@
** This is the "exception processing preparaton" common to all exception
** processing. It is used in the following manor:
**
- ** LABEL(foo_handler)
+ ** LABEL(foo_handler)
** PREP("foo", 11, DEBUG_FOO_BIT, No_SSBR_Stuff, No_Precheck)
** CALL(_trap, T_FOO_FAULT, r31)
** DONE(DEBUG_FOO_BIT)
**
** This defines the exception handler for the "foo" exception.
** The arguments ro PREP():
- ** NAME - String for debugging (more info later)
- ** NUM - The exception number [see the manual, Table 6-1]
- ** BIT - Bit to check in eh_debug for debugging (more info later)
- ** SSBR_STUFF -
- ** If the exception might leave some bits in the SSBR set,
- ** this should indicate how they are cleared.
- ** FLAG_PRECHECK -
- ** This is for the data access exception only. See it for
- ** more info.
+ ** NAME - String for debugging (more info later)
+ ** NUM - The exception number [see the manual, Table 6-1]
+ ** BIT - Bit to check in eh_debug for debugging (more info later)
+ ** SSBR_STUFF -
+ ** If the exception might leave some bits in the SSBR set,
+ ** this should indicate how they are cleared.
+ ** FLAG_PRECHECK -
+ ** This is for the data access exception only. See it for
+ ** more info.
**
**
** What's in between PREP() and DONE() (usually a CALL) is the actual
@@ -276,118 +274,135 @@
** (which is pointed-to by r31).
**/
-/* This define can replace the xcr instruction XXX smurph */
-#define XCR(DR, SR, CR) ; \
- stcr r13, SR0 ; \
- or r13, r0, SR ; \
- ldcr DR, CR ; \
- stcr r13, CR ; \
- ldcr r13, SR0
-
-/* This define can be used to debug sub routine returns XXX smurph*/
-#define STORE_R1(varname) ; \
- stcr r13, SR0 /* r13 now free */ ; \
- /* save r1 to memory location varname */ ; \
- or.u r13, r0, hi16(varname) ; \
- st r1, r13, lo16(varname) ; \
- ldcr r13, SR0 /* retore r13 */
-
-#define PREP(NAME, NUM, BIT, SSBR_STUFF, FLAG_PRECHECK) ; \
- xcr FLAGS, FLAGS, SR1 ; \
- FLAG_PRECHECK ; \
- ; \
- /* the bsr later clobbers r1, so save now */ ; \
- stcr r1, SR2 /* r1 now free */ ; \
- /* set or clear the FLAG_FROM_KERNEL bit */ ; \
- ldcr r1, EPSR ; \
- bb0.n PSR_SUPERVISOR_MODE_BIT, r1, 1f ; \
- clr FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> ; \
- set FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> ; \
- ; \
- /* get a stack (exception frame) */ ; \
- 1: bsr setup_phase_one ; \
- ; \
- /* TMP2 now free -- use to set EF_VECTOR */ ; \
- or TMP2, r0, NUM ; \
- st TMP2, r31, REG_OFF(EF_VECTOR) ; \
- ; \
- /* Clear any bits in the SSBR (held in TMP) */ ; \
- /* SSBR_STUFF may be empty, though. */ ; \
- SSBR_STUFF ; \
- ; \
- /* call setup_phase_two to restart the FPU */ ; \
- /* and to save all general registers. */ ; \
- bsr setup_phase_two ; \
- ; \
- /* All general regs free -- do any debugging */ ; \
- PREP_DEBUG(BIT, NAME)
+
+#define PREP(NAME, NUM, BIT, SSBR_STUFF, FLAG_PRECHECK) ; \
+ xcr FLAGS, FLAGS, SR1 ; \
+ FLAG_PRECHECK ; \
+ ; \
+ /* the bsr later clobbers r1, so save now */ ; \
+ stcr r1, SR2 /* r1 now free */ ; \
+ /* set or clear the FLAG_FROM_KERNEL bit */ ; \
+ ldcr r1, EPSR ; \
+ bb0.n PSR_SUPERVISOR_MODE_BIT, r1, 1f ; \
+ clr FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> ; \
+ set FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> ; \
+ ; \
+ /* get a stack (exception frame) */ ; \
+ 1: bsr setup_phase_one ; \
+ ; \
+ /* TMP2 now free -- use to set EF_VECTOR */ ; \
+ or TMP2, r0, NUM ; \
+ st TMP2, r31, REG_OFF(EF_VECTOR) ; \
+ ; \
+ /* Clear any bits in the SSBR (held in TMP) */ ; \
+ /* SSBR_STUFF may be empty, though. */ ; \
+ SSBR_STUFF ; \
+ ; \
+ /* call setup_phase_two to restart the FPU */ ; \
+ /* and to save all general registers. */ ; \
+ bsr setup_phase_two ; \
+ ; \
+ /* All general regs free -- do any debugging */ ; \
+ PREP_DEBUG(BIT, NAME)
+
+#define PREP2(NAME, NUM, BIT, SSBR_STUFF, FLAG_PRECHECK); \
+ xcr FLAGS, FLAGS, SR1 ; \
+ FLAG_PRECHECK ; \
+ ; \
+ /* the bsr later clobbers r1, so save now */ ; \
+ stcr r1, SR2 /* r1 now free */ ; \
+ /* set or clear the FLAG_FROM_KERNEL bit */ ; \
+ ldcr r1, EPSR ; \
+ bb0.n PSR_SUPERVISOR_MODE_BIT, r1, 1f ; \
+ clr FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> ; \
+ set FLAGS, FLAGS, 1<FLAG_FROM_KERNEL> ; \
+ ; \
+ /* get a stack (exception frame) */ ; \
+ 1: bsr m197_setup_phase_one ; \
+ ; \
+ /* TMP2 now free -- use to set EF_VECTOR */ ; \
+ or TMP2, r0, NUM ; \
+ st TMP2, r31, REG_OFF(EF_VECTOR) ; \
+ ; \
+ /* Clear any bits in the SSBR (held in TMP) */ ; \
+ /* SSBR_STUFF may be empty, though. */ ; \
+ SSBR_STUFF ; \
+ ; \
+ /* call setup_phase_two to restart the FPU */ ; \
+ /* and to save all general registers. */ ; \
+ bsr m197_setup_phase_two ; \
+ ; \
+ /* All general regs free -- do any debugging */ ; \
+ PREP_DEBUG(BIT, NAME)
/* Some defines for use with PREP() */
-#define No_SSBR_Stuff /* empty */
-#define Clear_SSBR_Dest bsr clear_dest_ssbr_bit
-#define No_Precheck /* empty */
+#define No_SSBR_Stuff /* empty */
+#define Clear_SSBR_Dest bsr clear_dest_ssbr_bit
+#define No_Precheck /* empty */
#define Data_Precheck \
- bb1.n FLAG_IGNORE_DATA_EXCEPTION, FLAGS, ignore_data_exception
+ bb1.n FLAG_IGNORE_DATA_EXCEPTION, FLAGS, ignore_data_exception
+#define M197_Data_Precheck \
+ bb1.n FLAG_IGNORE_DATA_EXCEPTION, FLAGS, m197_ignore_data_exception
#if EH_DEBUG
- /*
- * If we allow debugging, there is a variable "eh_debug"
- * in which there is a bit for each exception. If the bit
- * is set for an exception, debugging information is printed
- * about that exception whenever it occurs.
- *
- * The bits are defined in "asm.h"
- */
- LABEL(_eh_debug) word 0x00000000
-
- /*
- * additional pre-servicing preparation to be done when
- * debugging... check eh_debug and make the call if
- * need be.
- */
- #define PREP_DEBUG(DebugNumber, Name) \
- or.u r2, r0, hi16(_eh_debug) ; \
- ld r3, r2, lo16(_eh_debug) ; \
- bb0 DebugNumber, r3, 4f ; \
- /* call MY_info(ef,thread,flags,kind)*/ ; \
- or r2, r30, r0 ; \
- ldcr r3, SR0 ; \
- ldcr r4, SR1 ; \
- or.u r5, r0, hi16(2f) ; \
- or r5, r5, lo16(2f) ; \
- bsr.n _MY_info ; \
- subu r31, r31, 40 ; \
- br.n 4f ; \
- addu r31, r31, 40 ; \
- data ; \
- 2: string Name ; \
- byte 0 ; \
- align 4 ; \
- text ; \
- 4:
-
-
- /*
- * Post-servicing work to be done.
- * When debugging, check "eh_debug" and call the
- * debug routined if neeed be.
- *
- * Then, return from the interrupt handler.
- */
- #define DONE(DebugNumber) \
- or.u r2, r0, hi16(_eh_debug) ; \
- ld r3, r2, lo16(_eh_debug) ; \
- bb0 DebugNumber, r3, 2f ; \
- ldcr r4, SR1 ; \
- CALL(_MY_info_done, r31, r4) ; \
- 2: br return_from_exception_handler
+ /*
+ * If we allow debugging, there is a variable "eh_debug"
+ * in which there is a bit for each exception. If the bit
+ * is set for an exception, debugging information is printed
+ * about that exception whenever it occurs.
+ *
+ * The bits are defined in "asm.h"
+ */
+LABEL(_eh_debug) word 0x00000000
+
+ /*
+ * additional pre-servicing preparation to be done when
+ * debugging... check eh_debug and make the call if
+ * need be.
+ */
+#define PREP_DEBUG(DebugNumber, Name) \
+ or.u r2, r0, hi16(_eh_debug) ; \
+ ld r3, r2, lo16(_eh_debug) ; \
+ bb0 DebugNumber, r3, 4f ; \
+ /* call MY_info(ef,thread,flags,kind)*/ ; \
+ or r2, r30, r0 ; \
+ ldcr r3, SR0 ; \
+ ldcr r4, SR1 ; \
+ or.u r5, r0, hi16(2f) ; \
+ or r5, r5, lo16(2f) ; \
+ bsr.n _MY_info ; \
+ subu r31, r31, 40 ; \
+ br.n 4f ; \
+ addu r31, r31, 40 ; \
+ data ; \
+ 2: string Name ; \
+ byte 0 ; \
+ align 4 ; \
+ text ; \
+ 4:
+
+
+ /*
+ * Post-servicing work to be done.
+ * When debugging, check "eh_debug" and call the
+ * debug routined if neeed be.
+ *
+ * Then, return from the interrupt handler.
+ */
+#define DONE(DebugNumber) \
+ or.u r2, r0, hi16(_eh_debug) ; \
+ ld r3, r2, lo16(_eh_debug) ; \
+ bb0 DebugNumber, r3, 2f ; \
+ ldcr r4, SR1 ; \
+ CALL(_MY_info_done, r31, r4) ; \
+2: br return_from_exception_handler
#else
- /*
- * If not debugging, then no debug-prep to do.
- * Also, when you're done, you're done! (no debug check).
- */
- #define PREP_DEBUG(bit, name)
- #define DONE(num) br return_from_exception_handler
+ /*
+ * If not debugging, then no debug-prep to do.
+ * Also, when you're done, you're done! (no debug check).
+ */
+#define PREP_DEBUG(bit, name)
+#define DONE(num) br return_from_exception_handler
#endif
@@ -397,55 +412,43 @@
/* unknown exception handler */
LABEL(_unknown_handler)
- PREP("unknown", 0, DEBUG_UNKNOWN_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_UNKNOWNFLT, r30)
- DONE(DEBUG_UNKNOWN_BIT)
+ PREP("unknown", 0, DEBUG_UNKNOWN_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_UNKNOWN_BIT)
/* interrupt exception handler */
LABEL(_interrupt_handler)
- PREP("interrupt", 1, DEBUG_INTERRUPT_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_ext_int, 1, r30)
- DONE(DEBUG_INTERRUPT_BIT)
+ PREP("interrupt", 1, DEBUG_INTERRUPT_BIT, No_SSBR_Stuff, No_Precheck)
+ /* interrupt_func is set in mvme_bootstrap() */
+ CALL(_trap, T_INT, r30)
+ /*CALLP(_interrupt_func, 1, r30) */
+ DONE(DEBUG_INTERRUPT_BIT)
/* instruction access exception handler */
LABEL(_instruction_access_handler)
- PREP("inst", 2, DEBUG_INSTRUCTION_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_INSTFLT, r30)
-#if 0
- /* done in trap now */
- /*
- * Now, to retry the instruction.
- * Copy the SNIP to the SFIP, clearing the E bit.
- * Copy the SXIP to the SNIP, clearing the E bit.
- */
- ld r1, r30, REG_OFF(EF_SNIP)
- ld r2, r30, REG_OFF(EF_SXIP)
- clr r1, r1, 1<RTE_ERROR_BIT>
- clr r2, r2, 1<RTE_ERROR_BIT>
- st r1, r30, REG_OFF(EF_SFIP)
- st r2, r30, REG_OFF(EF_SNIP)
-#endif /* 0 */
- DONE(DEBUG_INSTRUCTION_BIT)
+ PREP("inst", 2, DEBUG_INSTRUCTION_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_INSTFLT, r30)
+ DONE(DEBUG_INSTRUCTION_BIT)
/*
* data access exception handler --
* See badaddr() below for info about Data_Precheck.
*/
LABEL(_data_exception_handler)
- PREP("data", 3, DEBUG_DATA_BIT, No_SSBR_Stuff, Data_Precheck)
- DONE(DEBUG_DATA_BIT)
+ PREP("data", 3, DEBUG_DATA_BIT, No_SSBR_Stuff, Data_Precheck)
+ DONE(DEBUG_DATA_BIT)
/* misaligned access exception handler */
LABEL(_misaligned_handler)
- PREP("misalign", 4, DEBUG_MISALIGN_BIT, Clear_SSBR_Dest, No_Precheck)
- CALL(_trap, T_MISALGNFLT, r30)
- DONE(DEBUG_MISALIGN_BIT)
+ PREP("misalign", 4, DEBUG_MISALIGN_BIT, Clear_SSBR_Dest, No_Precheck)
+ CALL(_trap, T_MISALGNFLT, r30)
+ DONE(DEBUG_MISALIGN_BIT)
/* unimplemented opcode exception handler */
LABEL(_unimplemented_handler)
- PREP("unimp", 5, DEBUG_UNIMPLEMENTED_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_ILLFLT, r30)
- DONE(DEBUG_UNIMPLEMENTED_BIT)
+ PREP("unimp", 5, DEBUG_UNIMPLEMENTED_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_ILLFLT, r30)
+ DONE(DEBUG_UNIMPLEMENTED_BIT)
/*
* Some versions of the chip have * a bug whereby false privilege
@@ -453,114 +456,113 @@ LABEL(_unimplemented_handler)
* it is false. If so, just return. The code before PREP handles this....
*/
LABEL(_privilege_handler)
- stcr r1, SR2 /* hold r1 for a moment */
- ldcr r1, SXIP /* look at the sxip... valid bit set? */
- bb1.n RTE_VALID_BIT, r1, 1f /*skip over return if a valid exception*/
- ldcr r1, SR2 /* restore r1 */
- RTE
- 1: PREP("privilege", 6, DEBUG_PRIVILEGE_BIT, Clear_SSBR_Dest, No_Precheck)
- CALL(_trap, T_PRIVINFLT, r30)
- DONE(DEBUG_PRIVILEGE_BIT)
+ stcr r1, SR2 /* hold r1 for a moment */
+ ldcr r1, SXIP /* look at the sxip... valid bit set? */
+ bb1.n RTE_VALID_BIT, r1, 1f /*skip over return if a valid exception*/
+ ldcr r1, SR2 /* restore r1 */
+ RTE
+1: PREP("privilege", 6, DEBUG_PRIVILEGE_BIT, Clear_SSBR_Dest, No_Precheck)
+ CALL(_trap, T_PRIVINFLT, r30)
+ DONE(DEBUG_PRIVILEGE_BIT)
/*
* I'm not sure what the trap(T_BNDFLT,...) does, but it doesn't send
* a signal to the process...
*/
LABEL(_bounds_handler)
- PREP("bounds", 7, DEBUG_BOUNDS_BIT, Clear_SSBR_Dest, No_Precheck)
- CALL(_trap, T_BNDFLT, r30)
- DONE(DEBUG_BOUNDS_BIT)
+ PREP("bounds", 7, DEBUG_BOUNDS_BIT, Clear_SSBR_Dest, No_Precheck)
+ CALL(_trap, T_BNDFLT, r30)
+ DONE(DEBUG_BOUNDS_BIT)
/* integer divide-by-zero exception handler */
LABEL(_divide_handler)
- PREP("divide", 8, DEBUG_DIVIDE_BIT, Clear_SSBR_Dest, No_Precheck)
- CALL(_trap, T_ZERODIV, r30)
- DONE(DEBUG_DIVIDE_BIT)
+ PREP("divide", 8, DEBUG_DIVIDE_BIT, Clear_SSBR_Dest, No_Precheck)
+ CALL(_trap, T_ZERODIV, r30)
+ DONE(DEBUG_DIVIDE_BIT)
/* integer overflow exception handelr */
LABEL(_overflow_handler)
- PREP("overflow", 9, DEBUG_OVERFLOW_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_OVFFLT, r30)
- DONE(DEBUG_OVERFLOW_BIT)
+ PREP("overflow", 9, DEBUG_OVERFLOW_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_OVFFLT, r30)
+ DONE(DEBUG_OVERFLOW_BIT)
/* Floating-point precise handler */
#define FPp_SSBR_STUFF bsr clear_FPp_ssbr_bit
LABEL(fp_precise_handler)
- PREP("FPU precise", 114, DEBUG_FPp_BIT, FPp_SSBR_STUFF, No_Precheck)
- CALL(_Xfp_precise, r0, r30) /* call fp_precise(??, exception_frame)*/
- DONE(DEBUG_FPp_BIT)
+ PREP("FPU precise", 114, DEBUG_FPp_BIT, FPp_SSBR_STUFF, No_Precheck)
+ CALL(_m88100_Xfp_precise, r0, r30) /* call fp_precise(??, exception_frame)*/
+ DONE(DEBUG_FPp_BIT)
/* Floating-point imprecise handler */
#define FPi_SSBR_STUFF bsr clear_FPi_ssbr_bit
LABEL(fp_imprecise_handler)
- PREP("FPU imprecise", 115, DEBUG_FPi_BIT, FPi_SSBR_STUFF, No_Precheck)
- CALL(_Xfp_imprecise, r0, r30) /*call fp_imprecise(??,exception_frame)*/
- DONE(DEBUG_FPi_BIT)
+ PREP("FPU imprecise", 115, DEBUG_FPi_BIT, FPi_SSBR_STUFF, No_Precheck)
+ CALL(_Xfp_imprecise, r0, r30) /*call fp_imprecise(??,exception_frame)*/
+ DONE(DEBUG_FPi_BIT)
/* All standard system calls. */
LABEL(_syscall_handler)
- PREP("syscall", 128, DEBUG_SYSCALL_BIT, No_SSBR_Stuff, No_Precheck)
- ld r13, r30, GENREG_OFF(13)
- CALL(_syscall, r13, r30) /* system call no. is in r13 */
- DONE(DEBUG_SYSCALL_BIT)
+ PREP("syscall", 128, DEBUG_SYSCALL_BIT, No_SSBR_Stuff, No_Precheck)
+ ld r13, r30, GENREG_OFF(13)
+ CALL(_syscall, r13, r30) /* system call no. is in r13 */
+ DONE(DEBUG_SYSCALL_BIT)
/* trap 496 comes here */
LABEL(_bugtrap)
- PREP("bugsyscall", 496, DEBUG_BUGCALL_BIT, No_SSBR_Stuff, No_Precheck)
- ld r9, r30, GENREG_OFF(9)
- CALL(_bugsyscall, r9, r30) /* system call no. is in r9 */
- DONE(DEBUG_SYSCALL_BIT)
+ PREP("bugsyscall", 496, DEBUG_BUGCALL_BIT, No_SSBR_Stuff, No_Precheck)
+ ld r9, r30, GENREG_OFF(9)
+ CALL(_bugsyscall, r9, r30) /* system call no. is in r9 */
+ DONE(DEBUG_SYSCALL_BIT)
LABEL(_sigsys)
- PREP("sigsys", 0, DEBUG_SIGSYS_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_SIGSYS, r30)
- DONE(DEBUG_SIGSYS_BIT)
+ PREP("sigsys", 0, DEBUG_SIGSYS_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_SIGSYS, r30)
+ DONE(DEBUG_SIGSYS_BIT)
LABEL(_sigtrap)
- PREP("sigtrap", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_SIGTRAP, r30)
- DONE(DEBUG_SIGTRAP_BIT)
+ PREP("sigtrap", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_SIGTRAP, r30)
+ DONE(DEBUG_SIGTRAP_BIT)
LABEL(_stepbpt)
- PREP("sigtrap", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_STEPBPT, r30)
- DONE(DEBUG_SIGTRAP_BIT)
+ PREP("stepbpt", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_STEPBPT, r30)
+ DONE(DEBUG_SIGTRAP_BIT)
LABEL(_userbpt)
- PREP("sigtrap", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_USERBPT, r30)
- DONE(DEBUG_SIGTRAP_BIT)
+ PREP("userbpt", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_USERBPT, r30)
+ DONE(DEBUG_SIGTRAP_BIT)
#if DDB
LABEL(break)
- PREP("break", 130, DEBUG_BREAK_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_KDB_BREAK, r30)
- DONE(DEBUG_BREAK_BIT)
+ PREP("break", 130, DEBUG_BREAK_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_KDB_BREAK, r30)
+ DONE(DEBUG_BREAK_BIT)
LABEL(trace)
- PREP("trace", 131, DEBUG_TRACE_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_KDB_TRACE, r30)
- DONE(DEBUG_TRACE_BIT)
+ PREP("trace", 131, DEBUG_TRACE_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_KDB_TRACE, r30)
+ DONE(DEBUG_TRACE_BIT)
LABEL(_entry)
- PREP("kdb", 132, DEBUG_KDB_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_KDB_ENTRY, r30)
- DONE(DEBUG_KDB_BIT)
+ PREP("kdb", 132, DEBUG_KDB_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_KDB_ENTRY, r30)
+ DONE(DEBUG_KDB_BIT)
#else /* else not DDB */
LABEL(break)
- PREP("break", 130, DEBUG_BREAK_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_UNKNOWNFLT, r30)
- DONE(DEBUG_BREAK_BIT)
+ PREP("break", 130, DEBUG_BREAK_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_BREAK_BIT)
LABEL(trace)
- PREP("trace", 131, DEBUG_TRACE_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_UNKNOWNFLT, r30)
- DONE(DEBUG_TRACE_BIT)
+ PREP("trace", 131, DEBUG_TRACE_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_TRACE_BIT)
LABEL(_entry)
- PREP("unknown", 132, DEBUG_UNKNOWN_BIT, No_SSBR_Stuff, No_Precheck)
- CALL(_trap, T_UNKNOWNFLT, r30)
- DONE(DEBUG_KDB_BIT)
-#endif /* DDB */
-
+ PREP("unknown", 132, DEBUG_UNKNOWN_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_KDB_BIT)
+#endif /* DDB */
/*--------------------------------------------------------------------------*/
@@ -578,26 +580,26 @@ LABEL(_userbpt)
LABEL(_error_handler)
/* pick up the slavestack */
or r26, r0, r31 /* save old stack */
- or.u r31, r0, hi16(_intstack_end)
- or r31, r31, lo16(_intstack_end)
+ or.u r31, r0, hi16(_intstack_end)
+ or r31, r31, lo16(_intstack_end)
/* zero the stack, so we'll know what we're lookin' at */
- or.u r27, r0, hi16(_intstack)
- or r27, r27, lo16(_intstack)
- 1: cmp r28, r27, r31
- bb1 ge, r28, 2f /* branch if at the end of the stack */
- st r0, r0, r27
- br.n 1b
- addu r27, r27, 4 /* bump up */
- 2: /* stack has been cleared */
-
- /* ensure that stack is 8-byte aligned */
+ or.u r27, r0, hi16(_intstack)
+ or r27, r27, lo16(_intstack)
+1: cmp r28, r27, r31
+ bb1 ge, r28, 2f /* branch if at the end of the stack */
+ st r0, r0, r27
+ br.n 1b
+ addu r27, r27, 4 /* bump up */
+2: /* stack has been cleared */
+
+ /* ensure that stack is 8-byte aligned */
clr r31, r31, 3<0> /* round down to 8-byte boundary */
- /* create exception frame on stack */
+ /* create exception frame on stack */
subu r31, r31, SIZEOF_EF /* r31 now our E.F. */
- /* save old R31 and other R registers */
+ /* save old R31 and other R registers */
st.d r0 , r31, GENREG_OFF(0)
st.d r2 , r31, GENREG_OFF(2)
st.d r4 , r31, GENREG_OFF(4)
@@ -613,31 +615,32 @@ LABEL(_error_handler)
st.d r24, r31, GENREG_OFF(24)
st r30, r31, GENREG_OFF(30)
st r26, r31, GENREG_OFF(31)
-
- /* save shadow registers (are OLD, though) */
+
+ /* save shadow registers (are OLD if error_handler, though) */
+ ldcr r10, EPSR
+ st r10, r31, REG_OFF(EF_EPSR)
ldcr r10, SXIP
st r10, r31, REG_OFF(EF_SXIP)
- ldcr r10, SFIP
- st r10, r31, REG_OFF(EF_SFIP)
ldcr r10, SNIP
st r10, r31, REG_OFF(EF_SNIP)
+ ldcr r10, SR1
+ st r10, r31, REG_OFF(EF_MODE)
+ ldcr r10, SFIP
+ st r10, r31, REG_OFF(EF_SFIP)
ldcr r10, SSBR
st r10, r31, REG_OFF(EF_SSBR)
- ldcr r10, EPSR
- st r10, r31, REG_OFF(EF_EPSR)
-
+ stcr r0, SSBR /* won't want shadow bits bothering us later */
ldcr r10, DMT0
st r10, r31, REG_OFF(EF_DMT0)
ldcr r11, DMD0
st r11, r31, REG_OFF(EF_DMD0)
ldcr r12, DMA0
st r12, r31, REG_OFF(EF_DMA0)
-
ldcr r10, DMT1
st r10, r31, REG_OFF(EF_DMT1)
- tb1 0, r0, 0
+ FLUSH_PIPELINE
ldcr r11, DMD1
- st r11, r31, REG_OFF(EF_DMD1)
+ st r11, r31, REG_OFF(EF_DMD1)
ldcr r12, DMA1
st r12, r31, REG_OFF(EF_DMA1)
@@ -647,10 +650,7 @@ LABEL(_error_handler)
st r11, r31, REG_OFF(EF_DMD2)
ldcr r12, DMA2
st r12, r31, REG_OFF(EF_DMA2)
-
- ldcr r10, SR1
- st r10, r31, REG_OFF(EF_MODE)
-
+
/* shove sr2 into EF_FPLS1 */
ldcr r10, SR2
st r10, r31, REG_OFF(EF_FPLS1)
@@ -662,26 +662,34 @@ LABEL(_error_handler)
/* error vector is zippo numero el'zeroooo */
st r0, r31, REG_OFF(EF_VECTOR)
- stcr r0, SSBR /* won't want shadow bits bothering us later */
-
+#ifdef MVME188
+#define IST_REG 0xfff84040 /* interrupt status addr */
+ /* check if it's a mvme188 */
+ or.u r10, r0, hi16(_cputyp)
+ ld r11, r10, lo16(_cputyp)
+ cmp r10, r11, 0x188
+ bb1 ne, r10, 3f
+ or.u r10, r0, hi16(IST_REG) /* interrupt status register */
+ ld r11, r10, lo16(IST_REG)
+ st r11, r31, REG_OFF(EF_MASK) /* put in EF_MASK for regdump */
+#endif /* MVME188 */
/*
* Cheap way to enable FPU and start shadowing again.
*/
- ldcr r10, PSR
+3: ldcr r10, PSR
clr r10, r10, 1<PSR_FPU_DISABLE_BIT> /* enable the FPU */
clr r10, r10, 1<PSR_SHADOW_FREEZE_BIT> /* also enable shadowing */
-
stcr r10, PSR /* bang */
FLUSH_PIPELINE
/* put pointer to regs into r30... r31 will become a simple stack */
or r30, r31, r0
- subu r31, r31, 0x10 /* make some breathing space */
- st r30, r31, 0x0c /* store frame pointer on the st */
- st r30, r31, 0x08 /* store again for the debugger to recognize */
- or.u r20, r0, hi16(0x87654321)
- or r20, r20, lo16(0x87654321)
+ subu r31, r31, 0x10 /* make some breathing space */
+ st r30, r31, 0x0c /* store frame pointer on the st */
+ st r30, r31, 0x08 /* store again for the debugger to recognize */
+ or.u r20, r0, hi16(0x87654321)
+ or r20, r20, lo16(0x87654321)
st r20, r31, 0x04
st r20, r31, 0x00
@@ -689,20 +697,12 @@ LABEL(_error_handler)
/* TURN INTERUPTS back on */
ldcr r1, PSR
- clr r1, r1, 1<PSR_INTERRUPT_DISABLE_BIT>
+ clr r1, r1, 1<PSR_INTERRUPT_DISABLE_BIT>
stcr r1, PSR
FLUSH_PIPELINE
LABEL(_error_loop) bsr _error_loop
- /* never returns*/
-
-/*
- *----------------------------------------------------------------------------
- *----------------------------------------------------------------------------
- *----------------------------------------------------------------------------
- */
-
-/*--------------------------------------------------------------------------*/
+ /* never returns*/
/*
* The reset exception handler.
@@ -726,53 +726,56 @@ LABEL(_error_loop) bsr _error_loop
LABEL(_reset_handler)
/* pick up the slavestack */
or r26, r0, r31 /* save old stack */
- or.u r31, r0, hi16(_intstack_end)
- or r31, r31, lo16(_intstack_end)
+ or.u r31, r0, hi16(_intstack_end)
+ or r31, r31, lo16(_intstack_end)
/* zero the stack, so we'll know what we're lookin' at */
- or.u r27, r0, hi16(_intstack)
- or r27, r27, lo16(_intstack)
- 1: cmp r28, r27, r31
- bb1 ge, r28, 2f /* branch if at the end of the stack */
- st r0, r0, r27
- br.n 1b
- addu r27, r27, 4 /* bump up */
- 2: /* stack has been cleared */
+ or.u r27, r0, hi16(_intstack)
+ or r27, r27, lo16(_intstack)
+1: cmp r28, r27, r31
+ bb1 ge, r28, 2f /* branch if at the end of the stack */
+ st r0, r0, r27
+ br.n 1b
+ addu r27, r27, 4 /* bump up */
+2: /* stack has been cleared */
/* ensure that stack is 8-byte aligned */
- clr r31, r31, 3<0> /* round down to 8-byte boundary */
+ clr r31, r31, 3<0> /* round down to 8-byte boundary */
/* create exception frame on stack */
- subu r31, r31, SIZEOF_EF /* r31 now our E.F. */
+ subu r31, r31, SIZEOF_EF /* r31 now our E.F. */
/* save old R31 and other R registers */
- st.d r0 , r31, GENREG_OFF(0)
- st.d r2 , r31, GENREG_OFF(2)
- st.d r4 , r31, GENREG_OFF(4)
- st.d r6 , r31, GENREG_OFF(6)
- st.d r8 , r31, GENREG_OFF(8)
- st.d r10, r31, GENREG_OFF(10)
- st.d r12, r31, GENREG_OFF(12)
- st.d r14, r31, GENREG_OFF(14)
- st.d r16, r31, GENREG_OFF(16)
- st.d r18, r31, GENREG_OFF(18)
- st.d r20, r31, GENREG_OFF(20)
- st.d r22, r31, GENREG_OFF(22)
- st.d r24, r31, GENREG_OFF(24)
- st r30, r31, GENREG_OFF(30)
+ st.d r0 , r31, GENREG_OFF(0)
+ st.d r2 , r31, GENREG_OFF(2)
+ st.d r4 , r31, GENREG_OFF(4)
+ st.d r6 , r31, GENREG_OFF(6)
+ st.d r8 , r31, GENREG_OFF(8)
+ st.d r10, r31, GENREG_OFF(10)
+ st.d r12, r31, GENREG_OFF(12)
+ st.d r14, r31, GENREG_OFF(14)
+ st.d r16, r31, GENREG_OFF(16)
+ st.d r18, r31, GENREG_OFF(18)
+ st.d r20, r31, GENREG_OFF(20)
+ st.d r22, r31, GENREG_OFF(22)
+ st.d r24, r31, GENREG_OFF(24)
+ st r30, r31, GENREG_OFF(30)
st r26, r31, GENREG_OFF(31)
/* save shadow registers */
+ ldcr r10, EPSR
+ st r10, r31, REG_OFF(EF_EPSR)
ldcr r10, SXIP
st r10, r31, REG_OFF(EF_SXIP)
- ldcr r10, SFIP
- st r10, r31, REG_OFF(EF_SFIP)
ldcr r10, SNIP
st r10, r31, REG_OFF(EF_SNIP)
+ ldcr r10, SR1
+ st r10, r31, REG_OFF(EF_MODE)
+ ldcr r10, SFIP
+ st r10, r31, REG_OFF(EF_SFIP)
ldcr r10, SSBR
st r10, r31, REG_OFF(EF_SSBR)
- ldcr r10, EPSR
- st r10, r31, REG_OFF(EF_EPSR)
+ stcr r0, SSBR /* won't want shadow bits bothering us later */
ldcr r10, DMT0
st r10, r31, REG_OFF(EF_DMT0)
@@ -783,9 +786,9 @@ LABEL(_reset_handler)
ldcr r10, DMT1
st r10, r31, REG_OFF(EF_DMT1)
- tb1 0, r0, 0
+ FLUSH_PIPELINE
ldcr r11, DMD1
- st r11, r31, REG_OFF(EF_DMD1)
+ st r11, r31, REG_OFF(EF_DMD1)
ldcr r12, DMA1
st r12, r31, REG_OFF(EF_DMA1)
@@ -796,9 +799,6 @@ LABEL(_reset_handler)
ldcr r12, DMA2
st r12, r31, REG_OFF(EF_DMA2)
- ldcr r10, SR1
- st r10, r31, REG_OFF(EF_MODE)
-
/* shove sr2 into EF_FPLS1 */
ldcr r10, SR2
st r10, r31, REG_OFF(EF_FPLS1)
@@ -810,43 +810,37 @@ LABEL(_reset_handler)
/* error vector is zippo numero el'zeroooo */
st r0, r31, REG_OFF(EF_VECTOR)
- stcr r0, SSBR /* won't want shadow bits bothering us later */
-
/*
* Cheap way to enable FPU and start shadowing again.
*/
ldcr r10, PSR
clr r10, r10, 1<PSR_FPU_DISABLE_BIT> /* enable the FPU */
clr r10, r10, 1<PSR_SHADOW_FREEZE_BIT> /* also enable shadowing */
-
+
stcr r10, PSR /* bang */
FLUSH_PIPELINE
/* put pointer to regs into r30... r31 will become a simple stack */
or r30, r31, r0
- subu r31, r31, 0x10 /* make some breathing space */
- st r30, r31, 0x0c /* store frame pointer on the st */
- st r30, r31, 0x08 /* store again for the debugger to recognize */
- or.u r20, r0, hi16(0x87654321)
- or r20, r20, lo16(0x87654321)
+ subu r31, r31, 0x10 /* make some breathing space */
+ st r30, r31, 0x0c /* store frame pointer on the st */
+ st r30, r31, 0x08 /* store again for the debugger to recognize */
+ or.u r20, r0, hi16(0x87654321)
+ or r20, r20, lo16(0x87654321)
st r20, r31, 0x04
st r20, r31, 0x00
CALL(_error_reset, r30, r30)
- /* TURN INTERUPTS back on */
- ldcr r1, PSR
- clr r1, r1, 1<PSR_INTERRUPT_DISABLE_BIT>
- stcr r1, PSR
- FLUSH_PIPELINE
+ /* TURN INTERUPTS back on */
+ ldcr r1, PSR
+ clr r1, r1, 1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r1, PSR
+ FLUSH_PIPELINE
LABEL(_error_loop2) bsr _error_loop2
- /* never returns*/
-
-/*
- *----------------------------------------------------------------------------
-*/
+/* never returns*/
/*
* This is part of baddadr (below).
@@ -854,12 +848,12 @@ LABEL(_error_loop2) bsr _error_loop2
_LABEL(ignore_data_exception)
/******************************************************\
* SR0: pointer to the current thread structure *
- * SR1: previous FLAGS reg *
+ * SR1: previous FLAGS reg *
* SR2: free *
* SR3: must presere *
* FLAGS: CPU status flags *
\******************************************************/
- xcr FLAGS, FLAGS, SR1 /* replace SR1, FLAGS */
+ xcr FLAGS, FLAGS, SR1 /* replace SR1, FLAGS */
/*
* For more info, see badaddr() below.
@@ -873,13 +867,46 @@ _LABEL(ignore_data_exception)
/* the "+2" below is to set the VALID bit. */
or.u r2, r0, hi16(badaddr__return_nonzero + 2)
- or r2, r2, lo16(badaddr__return_nonzero + 2)
- stcr r2, SNIP /* Make it the next instruction to execute */
+ or r2, r2, lo16(badaddr__return_nonzero + 2)
+ stcr r2, SNIP /* Make it the next instruction to execute */
+
addu r2, r2, 4
- stcr r2, SFIP /* and the next one after that, too. */
- stcr r0, SSBR /* make the scoreboard happy. */
+ stcr r2, SFIP /* and the next one after that, too. */
+ stcr r0, SSBR /* make the scoreboard happy. */
+1:
+
+ /* the following jumps to "badaddr__return_nonzero" in below */
+ NOP
+ RTE
+/*
+ * This is part of baddadr (below).
+ */
+_LABEL(m197_ignore_data_exception)
+ /******************************************************\
+ * SR0: pointer to the current thread structure *
+ * SR1: previous FLAGS reg *
+ * SR2: free *
+ * SR3: must presere *
+ * FLAGS: CPU status flags *
+ \******************************************************/
+ xcr FLAGS, FLAGS, SR1 /* replace SR1, FLAGS */
+
+ /*
+ * For more info, see badaddr() below.
+ *
+ * We just want to jump to "badaddr__return_nonzero" below.
+ *
+ * We don't worry about trashing R2 here because we're
+ * jumping back to the function badaddr() where we're allowd
+ * to blast r2..r9 as we see fit.
+ */
- /* the following jumps to "badaddr__return_nonzero" in below */
+ or.u r2, r0, hi16(badaddr__return_nonzero)
+ or r2, r2, lo16(badaddr__return_nonzero)
+ stcr r2, SXIP /* Make it the next instruction to execute */
+
+ /* the following jumps to "badaddr__return_nonzero" in below */
+ NOP
RTE
/*
@@ -904,10 +931,10 @@ _LABEL(ignore_data_exception)
*/
LABEL(_badaddr)
- /*
- * Disable interrupts ... don't want a context switch while we're
- * doing this! Also, save the old PSR in R8 to restore later.
- */
+ /*
+ * Disable interrupts ... don't want a context switch while we're
+ * doing this! Also, save the old PSR in R8 to restore later.
+ */
ldcr r8, PSR
set r4, r8, 1<PSR_INTERRUPT_DISABLE_BIT>
FLUSH_PIPELINE
@@ -921,76 +948,76 @@ LABEL(_badaddr)
* If it's a word we're doing, do that here. Otherwise,
* see if it's a halfword.....
*/
- sub r6, r3, 4
+ sub r6, r3, 4
bcnd.n ne0, r6, badaddr__maybe_halfword
- stcr r5, SR1
+ stcr r5, SR1
FLUSH_PIPELINE
/*
* It's a bad address if it's misaligned.
*/
- bb1 0, r2, badaddr__return_nonzero
- bb1 1, r2, badaddr__return_nonzero
+ bb1 0, r2, badaddr__return_nonzero
+ bb1 1, r2, badaddr__return_nonzero
/*
* The next line will either fault or not. If it faults, execution
* will go to: data_access_handler (see above)
* and then to: ignore_data_exception (see above)
* and then to: badaddr__return_nonzero (see below)
* which will return to the calling function.
- *
+ *
* If there is no fault, execution just continues as normal.
*/
- ld r5, r2, 0
+ ld r5, r2, 0
FLUSH_PIPELINE
- br.n badaddr__return
- or r2, r0, r0 /* indicate a zero (address not bad) return.*/
+ br.n badaddr__return
+ or r2, r0, r0 /* indicate a zero (address not bad) return.*/
- badaddr__maybe_halfword:
+badaddr__maybe_halfword:
/* More or less like the code for checking a word above */
- sub r6, r3, 2
- bcnd ne0, r6, badaddr__maybe_byte
+ sub r6, r3, 2
+ bcnd ne0, r6, badaddr__maybe_byte
/* it's bad if it's misaligned */
- bb1 0, r2, badaddr__return_nonzero
+ bb1 0, r2, badaddr__return_nonzero
FLUSH_PIPELINE
- ld.h r5, r2, 0
+ ld.h r5, r2, 0
FLUSH_PIPELINE
- br.n badaddr__return
- or r2, r0, r0
+ br.n badaddr__return
+ or r2, r0, r0
- badaddr__maybe_byte:
+badaddr__maybe_byte:
/* More or less like the code for checking a word above */
- sub r6, r3, 1
- bcnd ne0, r6, badaddr__unknown_size
+ sub r6, r3, 1
+ bcnd ne0, r6, badaddr__unknown_size
FLUSH_PIPELINE
- ld.b r5, r2, 0
+ ld.b r5, r2, 0
FLUSH_PIPELINE
- br.n badaddr__return
- or r2, r0, r0
- badaddr__unknown_size:
+ br.n badaddr__return
+ or r2, r0, r0
+badaddr__unknown_size:
#ifndef NDEBUG
data
- 1: string "bad length (%d) to badaddr() from 0x%x\n\000"
+1: string "bad length (%d) to badaddr() from 0x%x\n\000"
text
- or.u r2, r0, hi16(1b)
- or r2, r2, lo16(1b)
- or r4, r0, r1
- bsr _printf
- or.u r2, r0, hi16(1b)
- or r2, r2, lo16(1b)
- bsr _panic
+ or.u r2, r0, hi16(1b)
+ or r2, r2, lo16(1b)
+ or r4, r0, r1
+ bsr _printf
+ or.u r2, r0, hi16(1b)
+ or r2, r2, lo16(1b)
+ bsr _panic
/*NOTREACHED*/
#endif
_LABEL(badaddr__return_nonzero)
- or r2, r0, 1
+ or r2, r0, 1
/* fall through to badaddr__return */
_LABEL(badaddr__return)
- ldcr r4, SR1
- clr r4, r4, 1<FLAG_IGNORE_DATA_EXCEPTION>
- stcr r4, SR1
+ ldcr r4, SR1
+ clr r4, r4, 1<FLAG_IGNORE_DATA_EXCEPTION>
+ stcr r4, SR1
/*
* Restore the PSR to what it was before.
@@ -1000,9 +1027,8 @@ _LABEL(badaddr__return)
* where we saved it.
*/
FLUSH_PIPELINE
- stcr r8, PSR
- jmp r1
-
+ stcr r8, PSR
+ jmp r1
/*
******************************************************************************
@@ -1012,746 +1038,802 @@ _LABEL(badaddr__return)
LABEL(setup_phase_one)
- /***************** REGISTER STATUS BLOCK ***********************\
- * SR0: current thread (if any, null if not) *
- * SR1: saved copy of exception-time register now holding FLAGS *
- * SR2: saved copy of exception-time r1 *
- * SR3: must be preserved .. may be the exception-time stack *
- * r1: return address to calling exception handler *
- * FLAGS: CPU status flags *
- *************************************************** *
- * immediate goal: *
- * Decide where we're going to put the exception frame. *
- * Might be at the end of R31, SR3, or the thread's *
- * pcb. *
- \***************************************************************/
-
- /* Check if we are coming in from a FPU restart exception.
- If so, the pcb will be in SR3 */
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread (if any, null if not) *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: saved copy of exception-time r1 *
+ * SR3: must be preserved .. may be the exception-time stack *
+ * r1: return address to calling exception handler *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * Decide where we're going to put the exception frame. *
+ * Might be at the end of R31, SR3, or the thread's *
+ * pcb. *
+ \***************************************************************/
+
+ /* Check if we are coming in from a FPU restart exception.
+ If so, the pcb will be in SR3 */
NOP
- xcr r1, r1, SR2
- /*xcr r1, r1, SR2*/
+ xcr r1, r1, SR2
NOP
NOP
NOP
bb1 FLAG_ENABLING_FPU, FLAGS, use_SR3_pcb
- /*xcr r1, r1, SR0*/
- /* are we coming in from user mode? If so, pick up thread pcb */
+ /* are we coming in from user mode? If so, pick up thread pcb */
bb0 FLAG_FROM_KERNEL, FLAGS, pickup_stack
- /* Interrupt in kernel mode, not FPU restart */
- _LABEL(already_on_kernel_stack)
- /***************** REGISTER STATUS BLOCK ***********************\
- * SR0: current thread (if any, null if not) *
- * SR1: saved copy of exception-time register now holding FLAGS *
- * SR2: return address to the calling exception handler *
- * SR3: must be preserved; may be important for other exceptions *
- * FLAGS: CPU status flags *
- *************************************************** *
- * immediate goal: *
- * We're already on the kernel stack, but not having *
- * needed to use SR3. We can just make room on the *
- * stack (r31) for our exception frame. *
- \***************************************************************/
- subu r31, r31, SIZEOF_EF /* r31 now our E.F. */
- st FLAGS, r31, REG_OFF(EF_FLAGS) /* save flags */
- st r1, r31, GENREG_OFF(1) /* save prev. r1 (now r1 free)*/
-
- ldcr r1, SR3 /* save previous SR3 */
- st r1, r31, REG_OFF(EF_SR3)
-
- addu r1, r31, SIZEOF_EF /* save previous r31 */
+ /* Interrupt in kernel mode, not FPU restart */
+_LABEL(already_on_kernel_stack)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread (if any, null if not) *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: must be preserved; may be important for other exceptions *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * We're already on the kernel stack, but not having *
+ * needed to use SR3. We can just make room on the *
+ * stack (r31) for our exception frame. *
+ \***************************************************************/
+ subu r31, r31, SIZEOF_EF /* r31 now our E.F. */
+ st FLAGS,r31, REG_OFF(EF_FLAGS) /* save flags */
+ st r1, r31, GENREG_OFF(1) /* save prev. r1 (now r1 free)*/
+
+ ldcr r1, SR3 /* save previous SR3 */
+ st r1, r31, REG_OFF(EF_SR3)
+
+ addu r1, r31, SIZEOF_EF /* save previous r31 */
br.n have_pcb
- st r1, r31, GENREG_OFF(31)
-
-
- _LABEL(use_SR3_pcb)
- /***************** REGISTER STATUS BLOCK ***********************\
- * SR0: current thread (if any, null if not) *
- * SR1: saved copy of exception-time register now holding FLAGS *
- * SR2: return address to the calling exception handler *
- * SR3: must be preserved; exception-time stack pointer *
- * FLAGS: CPU status flags *
- *************************************************** *
- * immediate goal: *
- * An exception occured while enabling the FPU. Since r31 *
- * is the user's r31 while enabling the FPU, we had put *
- * our pcb pointer into SR3, so make room from *
- * there for our stack pointer. *
- * We need to check if SR3 is the old stack pointer or the *
- * pointer off to the user pcb. If it pointing to the user *
- * pcb, we need to pick up the kernel stack. Otherwise *
- * we need to allocate a frame upon it. *
- * We look at the EPSR to see if it was from user mode *
- * Unfortunately, we have no registers free at the moment *
- * But we know register 0 in the pcb frame will always be *
- * zero, so we can use it as scratch storage. *
- * *
- * *
- \***************************************************************/
- xcr r30, r30, SR3 /* r30 = old exception frame */
- st r1, r30, GENREG_OFF(0) /* free up r1 */
- ld r1, r30, REG_OFF(EF_EPSR) /* get back the epsr */
- bb0.n PSR_SUPERVISOR_MODE_BIT, r1, 1f /* if user mode */
- ld r1, r30, GENREG_OFF(0) /* restore r1 */
- /* we were in kernel mode - dump frame upon the stack */
- st r0, r30, GENREG_OFF(0) /* repair old frame */
- subu r30, r30, SIZEOF_EF /* r30 now our E.F. */
- st FLAGS, r30, REG_OFF(EF_FLAGS) /* save flags */
- st r1, r30, GENREG_OFF(1) /* save prev. r1 (now r1 free) */
-
- st r31, r30, GENREG_OFF(31) /* save previous r31 */
- or r31, r0, r30 /* make r31 our pointer. */
- addu r30, r30, SIZEOF_EF /* r30 now has previous SR3 */
- st r30, r31, REG_OFF(EF_SR3) /* save previous SR3 */
+ st r1, r31, GENREG_OFF(31)
+
+
+_LABEL(use_SR3_pcb)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread (if any, null if not) *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: must be preserved; exception-time stack pointer *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * An exception occured while enabling the FPU. Since r31 *
+ * is the user's r31 while enabling the FPU, we had put *
+ * our pcb pointer into SR3, so make room from *
+ * there for our stack pointer. *
+ * We need to check if SR3 is the old stack pointer or the *
+ * pointer off to the user pcb. If it pointing to the user *
+ * pcb, we need to pick up the kernel stack. Otherwise *
+ * we need to allocate a frame upon it. *
+ * We look at the EPSR to see if it was from user mode *
+ * Unfortunately, we have no registers free at the moment *
+ * But we know register 0 in the pcb frame will always be *
+ * zero, so we can use it as scratch storage. *
+ * *
+ * *
+ \***************************************************************/
+ xcr r30, r30, SR3 /* r30 = old exception frame */
+ st r1, r30, GENREG_OFF(0) /* free up r1 */
+ ld r1, r30, REG_OFF(EF_EPSR) /* get back the epsr */
+ bb0.n PSR_SUPERVISOR_MODE_BIT, r1, 1f /* if user mode */
+ ld r1, r30, GENREG_OFF(0) /* restore r1 */
+ /* we were in kernel mode - dump frame upon the stack */
+ st r0, r30, GENREG_OFF(0) /* repair old frame */
+ subu r30, r30, SIZEOF_EF /* r30 now our E.F. */
+ st FLAGS,r30, REG_OFF(EF_FLAGS) /* save flags */
+ st r1, r30, GENREG_OFF(1) /* save prev. r1 (now r1 free) */
+
+ st r31, r30, GENREG_OFF(31) /* save previous r31 */
+ or r31, r0, r30 /* make r31 our pointer. */
+ addu r30, r30, SIZEOF_EF /* r30 now has previous SR3 */
+ st r30, r31, REG_OFF(EF_SR3) /* save previous SR3 */
br.n have_pcb
- xcr r30, r30, SR3 /* restore r30 */
- 1:
- /* we took an exception while restarting the FPU from user space.
- Consequently, we never picked up a stack. Do so now.
- R1 is currently free (saved in the exception frame pointed at by
- r30) */
- or.u r1, r0, hi16(_kstack)
- ld r1, r1, lo16(_kstack)
- addu r1, r1, USIZE-SIZEOF_EF
- st FLAGS, r1, REG_OFF(EF_FLAGS) /* store flags */
- st r31, r1, GENREG_OFF(31) /* store r31 - now free */
- st r30, r1, REG_OFF(EF_SR3) /* store old SR3 (pcb) */
- or r31, r1, r0 /* make r31 our exception frame pointer */
- ld r1, r30, GENREG_OFF(0) /* restore old r1 */
- st r0, r30, GENREG_OFF(0) /* repair that frame */
- st r1, r31, GENREG_OFF(1) /* store r1 in its proper place */
+ xcr r30, r30, SR3 /* restore r30 */
+1:
+ /* we took an exception while restarting the FPU from user space.
+ * Consequently, we never picked up a stack. Do so now.
+ * R1 is currently free (saved in the exception frame pointed at by
+ * r30) */
+ or.u r1, r0, hi16(_kstack)
+ ld r1, r1, lo16(_kstack)
+ addu r1, r1, USIZE-SIZEOF_EF
+ st FLAGS,r1, REG_OFF(EF_FLAGS) /* store flags */
+ st r31, r1, GENREG_OFF(31) /* store r31 - now free */
+ st r30, r1, REG_OFF(EF_SR3) /* store old SR3 (pcb) */
+ or r31, r1, r0 /* make r31 our exception frame pointer */
+ ld r1, r30, GENREG_OFF(0) /* restore old r1 */
+ st r0, r30, GENREG_OFF(0) /* repair that frame */
+ st r1, r31, GENREG_OFF(1) /* store r1 in its proper place */
br.n have_pcb
- xcr r30, r30, SR3 /* restore r30 */
-
- _LABEL(pickup_stack)
- /***************** REGISTER STATUS BLOCK ***********************\
- * SR0: current thread *
- * SR1: saved copy of exception-time register now holding FLAGS *
- * SR2: return address to the calling exception handler *
- * SR3: free *
- * FLAGS: CPU status flags *
- *************************************************** *
- * immediate goal: *
- * Since we're servicing an exception from user mode, we *
- * know that SR3 is free. We use it to free up a temp. *
- * register to be used in getting the thread's pcb *
- \***************************************************************/
- stcr r31, SR3 /* save previous r31 */
-
- /* switch to the thread's kernel stack. */
- or.u r31, r0, hi16(_curpcb)
- ld r31, r31, lo16(_curpcb)
- addu r31, r31, PCB_USER_STATE /* point to user save area */
- st FLAGS, r31, REG_OFF(EF_FLAGS) /* save flags */
- st r1, r31, GENREG_OFF(1) /* save prev. r1 (now r1 free)*/
- ldcr r1, SR3 /* save previous r31 */
- st r1, r31, GENREG_OFF(31)
- /*FALLTHROUGH */
-
- _LABEL(have_pcb)
- /***************** REGISTER STATUS BLOCK ***********************\
- * SR0: current thread *
- * SR1: saved copy of exception-time register now holding FLAGS *
- * SR2: return address to the calling exception handler *
- * SR3: free *
- * r1: free *
- * FLAGS: CPU status flags *
- * r31: our exception frame *
- * Valid in the exception frame: *
- * Exception-time r1, r31, FLAGS. *
- * Exception SR3, if appropriate. *
- *************************************************** *
- * immediate goal: *
- * Save the shadow registers that need to be saved to *
- * the exception frame. *
- \***************************************************************/
- stcr TMP, SR3 /* free up TMP, TMP2, TMP3 */
+ xcr r30, r30, SR3 /* restore r30 */
+
+_LABEL(pickup_stack)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: free *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * Since we're servicing an exception from user mode, we *
+ * know that SR3 is free. We use it to free up a temp. *
+ * register to be used in getting the thread's pcb *
+ \***************************************************************/
+ stcr r31, SR3 /* save previous r31 */
+
+ /* switch to the thread's kernel stack. */
+ or.u r31, r0, hi16(_curpcb)
+ ld r31, r31, lo16(_curpcb)
+ addu r31, r31, PCB_USER_STATE /* point to user save area */
+ st FLAGS,r31, REG_OFF(EF_FLAGS) /* save flags */
+ st r1, r31, GENREG_OFF(1) /* save prev. r1 (now r1 free)*/
+ ldcr r1, SR3 /* save previous r31 */
+ st r1, r31, GENREG_OFF(31)
+ /*FALLTHROUGH */
+
+_LABEL(have_pcb)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: free *
+ * r1: free *
+ * FLAGS: CPU status flags *
+ * r31: our exception frame *
+ * Valid in the exception frame: *
+ * Exception-time r1, r31, FLAGS. *
+ * Exception SR3, if appropriate. *
+ *************************************************** *
+ * immediate goal: *
+ * Save the shadow registers that need to be saved to *
+ * the exception frame. *
+ \***************************************************************/
+ stcr TMP, SR3 /* free up TMP, TMP2, TMP3 */
SAVE_TMP2
SAVE_TMP3
- /* save some exception-time registers to the exception frame */
- ldcr TMP, EPSR
- ldcr TMP2, SFIP
+ /* save some exception-time registers to the exception frame */
+ ldcr TMP, EPSR
+ st TMP, r31, REG_OFF(EF_EPSR)
ldcr TMP3, SNIP
- st TMP, r31, REG_OFF(EF_EPSR)
- st TMP2, r31, REG_OFF(EF_SFIP)
- st TMP3, r31, REG_OFF(EF_SNIP)
-
- /*
- * Save Pbus fault status register from data and inst CMMU.
- */
-
- or.u TMP, r0, hi16(CMMU_I)
- ld TMP2, TMP, lo16(CMMU_I) + 0x108
- st TMP2, r31, REG_OFF(EF_IPFSR)
- or.u TMP, r0, hi16(CMMU_D)
- ld TMP2, TMP, lo16(CMMU_D) + 0x108
- st TMP2, r31, REG_OFF(EF_DPFSR)
-
- ldcr TMP, SSBR
+ st TMP3, r31, REG_OFF(EF_SNIP)
+ ldcr TMP2, SFIP
+ st TMP2, r31, REG_OFF(EF_SFIP)
+
+ /*
+ * Save Pbus fault status register from data and inst CMMU.
+ */
+#ifdef MVME188
+ /* check if it's a mvme188 */
+ or.u TMP, r0, hi16(_cputyp)
+ ld TMP2, TMP, lo16(_cputyp)
+ cmp TMP, TMP2, 0x188
+ bb1 ne, TMP, 4f
+ ldcr TMP, SR1
+ mak TMP, TMP, FLAG_CPU_FIELD_WIDTH<2> /* TMP = cpu# */
+ cmp TMP2, TMP, 0x0 /* CPU0 ? */
+ bb1 ne, TMP2, 1f
+ /* must be CPU0 */
+ or.u TMP, r0, hi16(VME_CMMU_I0)
+ ld TMP2, TMP, lo16(VME_CMMU_I0) + 0x108
+ st TMP2, r31, REG_OFF(EF_IPFSR)
+ or.u TMP, r0, hi16(VME_CMMU_D0)
+ ld TMP2, TMP, lo16(VME_CMMU_D0) + 0x108
+ st TMP2, r31, REG_OFF(EF_DPFSR)
+ br pfsr_done
+1:
+ cmp TMP2, TMP, 0x1 /* CPU1 ? */
+ bb1 ne, TMP2, 2f
+ /* must be CPU1 */
+ or.u TMP, r0, hi16(VME_CMMU_I1)
+ ld TMP2, TMP, lo16(VME_CMMU_I1) + 0x108
+ st TMP2, r31, REG_OFF(EF_IPFSR)
+ or.u TMP, r0, hi16(VME_CMMU_D1)
+ ld TMP2, TMP, lo16(VME_CMMU_D1) + 0x108
+ st TMP2, r31, REG_OFF(EF_DPFSR)
+ br pfsr_done
+2:
+ cmp TMP2, TMP, 0x2 /* CPU2 ? */
+ bb1 ne, TMP2, 3f
+ /* must be CPU2 */
+ or.u TMP, r0, hi16(VME_CMMU_I2)
+ ld TMP2, TMP, lo16(VME_CMMU_I2) + 0x108
+ st TMP2, r31, REG_OFF(EF_IPFSR)
+ or.u TMP, r0, hi16(VME_CMMU_D2)
+ ld TMP2, TMP, lo16(VME_CMMU_D2) + 0x108
+ st TMP2, r31, REG_OFF(EF_DPFSR)
+ br pfsr_done
+3:
+ /* must be CPU3 */
+ or.u TMP, r0, hi16(VME_CMMU_I3)
+ ld TMP2, TMP, lo16(VME_CMMU_I3) + 0x108
+ st TMP2, r31, REG_OFF(EF_IPFSR)
+ or.u TMP, r0, hi16(VME_CMMU_D3)
+ ld TMP2, TMP, lo16(VME_CMMU_D3) + 0x108
+ st TMP2, r31, REG_OFF(EF_DPFSR)
+ br pfsr_done
+4:
+#endif /* MVME188 */
+ /* it's a single processor SBC */
+ or.u TMP, r0, hi16(SBC_CMMU_I)
+ ld TMP2, TMP, lo16(SBC_CMMU_I) + 0x108
+ st TMP2, r31, REG_OFF(EF_IPFSR)
+ or.u TMP, r0, hi16(SBC_CMMU_D)
+ ld TMP2, TMP, lo16(SBC_CMMU_D) + 0x108
+ st TMP2, r31, REG_OFF(EF_DPFSR)
+
+_LABEL(pfsr_done)
+ ldcr TMP, SSBR
ldcr TMP2, SXIP
ldcr TMP3, DMT0
- st TMP2, r31, REG_OFF(EF_SXIP)
-
+ st TMP2, r31, REG_OFF(EF_SXIP)
+
#if 0
- /*
- * The following is a kludge so that
- * a core file will have a copy of
- * DMT0 so that 'sim' can display it
- * correctly.
- * After a data fault has been noticed,
- * the real EF_DTM0 is cleared, so I need
- * to throw this somewhere.
- * There's no special reason I chose this
- * register (FPIT)... it's just one of many
- * for which it causes no pain to do this.
- */
- st TMP3, r31, REG_OFF(EF_FPIT)
+ /*
+ * The following is a kludge so that
+ * a core file will have a copy of
+ * DMT0 so that 'sim' can display it
+ * correctly.
+ * After a data fault has been noticed,
+ * the real EF_DTM0 is cleared, so I need
+ * to throw this somewhere.
+ * There's no special reason I chose this
+ * register (FPIT)... it's just one of many
+ * for which it causes no pain to do this.
+ */
+ st TMP3, r31, REG_OFF(EF_FPIT)
#endif
- /*
- * The above shadow registers are obligatory for any and all
- * exceptions. Now, if the data access pipeline is not clear,
- * we must save the DMx shadow registers, as well as clear
- * the appropriate SSBR bits for the destination registers of
- * loads or xmems.
- */
+ /*
+ * The above shadow registers are obligatory for any and all
+ * exceptions. Now, if the data access pipeline is not clear,
+ * we must save the DMx shadow registers, as well as clear
+ * the appropriate SSBR bits for the destination registers of
+ * loads or xmems.
+ */
bb0.n DMT_VALID_BIT, TMP3, DMT_check_finished
- st TMP3, r31, REG_OFF(EF_DMT0)
+ st TMP3, r31, REG_OFF(EF_DMT0)
ldcr TMP2, DMT1
ldcr TMP3, DMT2
- st TMP2, r31, REG_OFF(EF_DMT1)
- st TMP3, r31, REG_OFF(EF_DMT2)
+ st TMP2, r31, REG_OFF(EF_DMT1)
+ st TMP3, r31, REG_OFF(EF_DMT2)
ldcr TMP2, DMA0
ldcr TMP3, DMA1
- st TMP2, r31, REG_OFF(EF_DMA0)
- st TMP3, r31, REG_OFF(EF_DMA1)
+ st TMP2, r31, REG_OFF(EF_DMA0)
+ st TMP3, r31, REG_OFF(EF_DMA1)
ldcr TMP2, DMA2
ldcr TMP3, DMD0
- st TMP2, r31, REG_OFF(EF_DMA2)
- st TMP3, r31, REG_OFF(EF_DMD0)
+ st TMP2, r31, REG_OFF(EF_DMA2)
+ st TMP3, r31, REG_OFF(EF_DMD0)
- tb1 0,r0,0
+ tb1 0, r0, 0
ldcr TMP2, DMD1
ldcr TMP3, DMD2
- st TMP2, r31, REG_OFF(EF_DMD1)
- st TMP3, r31, REG_OFF(EF_DMD2)
-
- /*
- *---------------------------------------------------------------
- * need to clear "appropriate" bits in the SSBR before
- * we restart the FPU
- */
+ st TMP2, r31, REG_OFF(EF_DMD1)
+ st TMP3, r31, REG_OFF(EF_DMD2)
+ /*
+ *---------------------------------------------------------------
+ * need to clear "appropriate" bits in the SSBR before
+ * we restart the FPU
+ */
- _LABEL(check_DMT0)
+_LABEL(check_DMT0)
ldcr TMP2, DMT0
bb0.n DMT_VALID_BIT, TMP2, DMT_check_finished
- stcr r0, DMT0 /* so an exception at fpu_enable doesn't see our DMT0*/
+ stcr r0, DMT0 /* so an exception at fpu_enable doesn't see our DMT0*/
bb1 DMT_LOCK_BIT, TMP2, do_DMT0
bb1 DMT_WRITE_BIT, TMP2, check_DMT1
- _LABEL(do_DMT0)
+
+_LABEL(do_DMT0)
extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET>
set TMP2, TMP2, 1<5>
- clr TMP, TMP, TMP2
+ clr TMP, TMP, TMP2
- _LABEL(check_DMT1)
+_LABEL(check_DMT1)
ldcr TMP2, DMT1
bb0 DMT_VALID_BIT, TMP2, check_DMT2
bb1 DMT_LOCK_BIT, TMP2, do_DMT1
bb1 DMT_WRITE_BIT, TMP2, check_DMT2
- _LABEL(do_DMT1)
+
+_LABEL(do_DMT1)
extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET>
set TMP2, TMP2, 1<5>
- clr TMP, TMP, TMP2
+ clr TMP, TMP, TMP2
- _LABEL(check_DMT2)
+_LABEL(check_DMT2)
ldcr TMP2, DMT2
bb0 DMT_VALID_BIT, TMP2, DMT_check_finished
bb1 DMT_LOCK_BIT, TMP2, do_DMT2_single
bb1 DMT_WRITE_BIT, TMP2, DMT_check_finished
bb1 DMT_DOUBLE_BIT,TMP2, do_DMT2_double
- _LABEL(do_DMT2_single)
+
+_LABEL(do_DMT2_single)
extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET>
br.n 1f
set TMP2, TMP2, 1<5>
- _LABEL(do_DMT2_double)
+
+_LABEL(do_DMT2_double)
extu TMP2, TMP2, DMT_DREG_WIDTH <DMT_DREG_OFFSET>
set TMP2, TMP2, 1<6>
-1: clr TMP, TMP, TMP2
-
- _LABEL(DMT_check_finished)
- /***************** REGISTER STATUS BLOCK ***********************\
- * SR0: current thread *
- * SR1: saved copy of exception-time register now holding FLAGS *
- * SR2: return address to the calling exception handler *
- * SR3: saved TMP *
- * r1: free *
- * TMP: possibly revised SSBR *
- * TMP2: free *
- * TMP3: free *
- * FLAGS: CPU status flags *
- * r31: exception frame *
- * Valid in the exception frame: *
- * Exception-time r1, r31, FLAGS. *
- * Exception-time TMP2, TMP3. *
- * Exception-time espr, sfip, snip, sxip. *
- * Dmt0. *
- * Other data pipeline control registers, if appropriate. *
- * Exception SR3, if appropriate. *
- \***************************************************************/
- ldcr r1, SR2
+1: clr TMP, TMP, TMP2
+
+_LABEL(DMT_check_finished)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: saved TMP *
+ * r1: free *
+ * TMP: possibly revised SSBR *
+ * TMP2: free *
+ * TMP3: free *
+ * FLAGS: CPU status flags *
+ * r31: exception frame *
+ * Valid in the exception frame: *
+ * Exception-time r1, r31, FLAGS. *
+ * Exception-time TMP2, TMP3. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Dmt0. *
+ * Other data pipeline control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ \***************************************************************/
+ ldcr r1, SR2
jmp r1 /* return to allow the handler to clear more SSBR bits */
/************************************************************************/
/************************************************************************/
- _LABEL(clear_FPi_ssbr_bit)
- /*
- * Clear floatingpont-imprecise ssbr bits.
- * Also, save appropriate FPU control registers to the E.F.
- *
- * r1: return address to calling exception handler
- * TMP : (possibly) revised ssbr
- * TMP2 : free
- * TMP3 : free
- */
- fldcr TMP2, FPSR
+_LABEL(clear_FPi_ssbr_bit)
+ /*
+ * Clear floatingpont-imprecise ssbr bits.
+ * Also, save appropriate FPU control registers to the E.F.
+ *
+ * r1: return address to calling exception handler
+ * TMP : (possibly) revised ssbr
+ * TMP2 : free
+ * TMP3 : free
+ */
+ fldcr TMP2, FPSR
fldcr TMP3, FPCR
- st TMP2, r31, REG_OFF(EF_FPSR)
- st TMP3, r31, REG_OFF(EF_FPCR)
+ st TMP2, r31, REG_OFF(EF_FPSR)
+ st TMP3, r31, REG_OFF(EF_FPCR)
- fldcr TMP2, FPECR
+ fldcr TMP2, FPECR
fldcr TMP3, FPRH
- st TMP2, r31, REG_OFF(EF_FPECR)
- st TMP3, r31, REG_OFF(EF_FPRH)
-
- fldcr TMP2, FPIT
- fldcr TMP3, FPRL
- st TMP2, r31, REG_OFF(EF_FPIT)
- st TMP3, r31, REG_OFF(EF_FPRL)
-
- /*
- * We only need clear the bit in the SSBR for the
- * 2nd reg of a double result [see section 6.8.5]
- */
- #define FPIT_SIZE_BIT 10
+ st TMP2, r31, REG_OFF(EF_FPECR)
+ st TMP3, r31, REG_OFF(EF_FPRH)
+
+ fldcr TMP2, FPIT
+ fldcr TMP3, FPRL
+ st TMP2, r31, REG_OFF(EF_FPIT)
+ st TMP3, r31, REG_OFF(EF_FPRL)
+
+ /*
+ * We only need clear the bit in the SSBR for the
+ * 2nd reg of a double result [see section 6.8.5]
+ */
+#define FPIT_SIZE_BIT 10
bb0 FPIT_SIZE_BIT, TMP2, not_double_fpi
extu TMP2, TMP2, 5<0> /* get the reg. */
- set TMP2, TMP2, 1<6> /* set width (width=2 will clear two bits) */
- clr TMP, TMP, TMP2
+ set TMP2, TMP2, 1<6> /* set width (width=2 will clear two bits) */
+ clr TMP, TMP, TMP2
- _LABEL(not_double_fpi)
- jmp r1
+_LABEL(not_double_fpi)
+ jmp r1
/************************************************************************/
/************************************************************************/
- _LABEL(clear_FPp_ssbr_bit)
- /*
- * Clear floating pont precise ssbr bits.
- * Also, save appropriate FPU control registers to the E.F.
- *
- * r1: return address to calling exception handler
- * TMP : (possibly) revised ssbr
- * TMP2 : free
- * TMP3 : free
- */
- fldcr TMP2, FPSR
- fldcr TMP3, FPCR
- st TMP2, r31, REG_OFF(EF_FPSR)
- st TMP3, r31, REG_OFF(EF_FPCR)
-
- fldcr TMP2, FPHS1
- fldcr TMP3, FPHS2
- st TMP2, r31, REG_OFF(EF_FPHS1)
- st TMP3, r31, REG_OFF(EF_FPHS2)
-
- fldcr TMP2, FPLS1
- fldcr TMP3, FPLS2
- st TMP2, r31, REG_OFF(EF_FPLS1)
- st TMP3, r31, REG_OFF(EF_FPLS2)
-
- fldcr TMP2, FPPT
- fldcr TMP3, FPECR
- st TMP2, r31, REG_OFF(EF_FPPT)
- st TMP3, r31, REG_OFF(EF_FPECR)
-
- #define FPPT_SIZE_BIT 5
- bb1.n FPPT_SIZE_BIT, TMP2, 1f
- extu TMP3, TMP2, 5<0> /* get FP operation dest reg */
- br.n 2f
- set TMP3, TMP3, 1<5> /* set size=1 -- clear one bit for "float" */
- 1: set TMP3, TMP3, 1<6> /* set size=2 -- clear two bit for "double" */
- 2:
- clr TMP, TMP, TMP3 /* clear bit(s) in ssbr. */
- jmp r1
+_LABEL(clear_FPp_ssbr_bit)
+ /*
+ * Clear floating pont precise ssbr bits.
+ * Also, save appropriate FPU control registers to the E.F.
+ *
+ * r1: return address to calling exception handler
+ * TMP : (possibly) revised ssbr
+ * TMP2 : free
+ * TMP3 : free
+ */
+ fldcr TMP2, FPSR
+ fldcr TMP3, FPCR
+ st TMP2, r31, REG_OFF(EF_FPSR)
+ st TMP3, r31, REG_OFF(EF_FPCR)
+
+ fldcr TMP3, FPECR
+ st TMP3, r31, REG_OFF(EF_FPECR)
+ fldcr TMP2, FPHS1
+ fldcr TMP3, FPHS2
+ st TMP2, r31, REG_OFF(EF_FPHS1)
+ st TMP3, r31, REG_OFF(EF_FPHS2)
+
+ fldcr TMP2, FPLS1
+ fldcr TMP3, FPLS2
+ st TMP2, r31, REG_OFF(EF_FPLS1)
+ st TMP3, r31, REG_OFF(EF_FPLS2)
+
+ fldcr TMP2, FPPT
+ st TMP2, r31, REG_OFF(EF_FPPT)
+1:
+
+#define FPPT_SIZE_BIT 5
+ bb1.n FPPT_SIZE_BIT, TMP2, 2f
+ extu TMP3, TMP2, 5<0> /* get FP operation dest reg */
+ br.n 3f
+ set TMP3, TMP3, 1<5> /* set size=1 -- clear one bit for "float" */
+2: set TMP3, TMP3, 1<6> /* set size=2 -- clear two bit for "double" */
+3:
+ clr TMP, TMP, TMP3 /* clear bit(s) in ssbr. */
+4:
+ jmp r1
/************************************************************************/
/************************************************************************/
- _LABEL(clear_dest_ssbr_bit)
- /*
- * There are various cases where an exception can leave the
- * destination register's bit in the SB set.
- * Examples:
- * misaligned or privilege exception on a LD or XMEM
- * DIV or DIVU by zero.
- *
- * I think that if the instruction is LD.D, then two bits must
- * be cleared.
- *
- * Even though there are a number of instructions/exception
- * combinations that could fire this code up, it's only required
- * to be run for the above cases. However, I don't think it'll
- * ever be a problem to run this in other cases (ST instructions,
- * for example), so I don't bother checking. If we had to check
- * for every possible instruction, this code would be much larger.
- *
- * The only checking, then, is to see if it's a LD.D or not.
- *
- * At the moment....
- * r1: return address to calling exception handler
- * TMP : (possibly) revised ssbr
- * TMP2 : free
- * TMP3 : free
- */
- ldcr TMP3, EPSR /* going to check: user or system memory? */
- ldcr TMP2, SXIP /* get the instruction's address */
- bb1.n PSR_SUPERVISOR_MODE_BIT, TMP3, 2f
- clr TMP2, TMP2, 2<0> /* get rid of valid and error bits. */
-
- 1: /* user space load here */
+_LABEL(clear_dest_ssbr_bit)
+ /*
+ * There are various cases where an exception can leave the
+ * destination register's bit in the SB set.
+ * Examples:
+ * misaligned or privilege exception on a LD or XMEM
+ * DIV or DIVU by zero.
+ *
+ * I think that if the instruction is LD.D, then two bits must
+ * be cleared.
+ *
+ * Even though there are a number of instructions/exception
+ * combinations that could fire this code up, it's only required
+ * to be run for the above cases. However, I don't think it'll
+ * ever be a problem to run this in other cases (ST instructions,
+ * for example), so I don't bother checking. If we had to check
+ * for every possible instruction, this code would be much larger.
+ *
+ * The only checking, then, is to see if it's a LD.D or not.
+ *
+ * At the moment....
+ * r1: return address to calling exception handler
+ * TMP : (possibly) revised ssbr
+ * TMP2 : free
+ * TMP3 : free
+ */
+
+ ldcr TMP3, EPSR /* going to check: user or system memory? */
+ ldcr TMP2, SXIP /* get the instruction's address */
+ bb1.n PSR_SUPERVISOR_MODE_BIT, TMP3, 2f
+ clr TMP2, TMP2, 2<0> /* get rid of valid and error bits. */
+
+1: /* user space load here */
#if ERRATA__XXX_USR
- NOP
- ld.usr TMP2, TMP2, r0 /* get the instruction itself */
- NOP
- NOP
- NOP
- br 3f
+ NOP
+ ld.usr TMP2,TMP2, r0 /* get the instruction itself */
+ NOP
+ NOP
+ NOP
+ br 3f
#else
- br.n 3f
- ld.usr TMP2, TMP2, r0 /* get the instruction itself */
+ br.n 3f
+ ld.usr TMP2,TMP2, r0 /* get the instruction itself */
#endif
- 2: /* system space load here */
- ld TMP2, TMP2, r0 /* get the instruction itself */
-
- 3: /* now have the instruction..... */
- /*
- * Now see if it's a double load
- * There are three forms of double load [IMM16, scaled, unscaled],
- * which can be checked by matching against two templates:
- * -- 77776666555544443333222211110000 --
- * if (((instruction & 11111100000000000000000000000000) ==
- * 00010000000000000000000000000000) ;;
- * ((instruction & 11111100000000001111110011100000) ==
- * 11110100000000000001000000000000))
- * {
- * It's a load double, so
- * clear two SSBR bits.
- * }
- * else
- * {
- * It's not a load double.
- * Must be a load single, xmem, or st
- * Thus, clear one SSBR bit.
- * }
- */
- /* check the first pattern for ld.d */
- extu TMP3, TMP2, 16<16> /* get the upper 16 bits */
- mask TMP3, TMP3, 0xFC00 /* apply the mask */
- cmp TMP3, TMP3, 0x1000 /* if this is equal, it's a load double */
- bb1 eq, TMP3, misaligned_double
-
- /* still could be -- check the second pattern for ld.d */
- /* look at the upper 16 bits first */
- extu TMP3, TMP2, 16<16> /* get the upper 16 bits */
- mask TMP3, TMP3, 0xFC00 /* apply the mask */
- cmp TMP3, TMP3, 0xF400 /* if equal, it might be a load double */
- bb1 ne, TMP3, misaligned_single /* not equal, so must be single */
-
- /* now look at the lower 16 bits */
- extu TMP3, TMP2, 16<0> /* get the lower 16 bits */
- mask TMP3, TMP3, 0xFCE0 /* apply the mask */
- cmp TMP3, TMP3, 0x1000 /* if this is equal, it's a load double */
- bb1 eq, TMP3, misaligned_double
-
- _LABEL(misaligned_single)
- extu TMP2, TMP2, 5<21> /* get the destination register */
- br.n 1f
- set TMP2, TMP2, 1<5> /* set size=1 */
-
- _LABEL(misaligned_double)
- extu TMP2, TMP2, 5<21> /* get the destination register */
- set TMP2, TMP2, 1<6> /* set size=2 -- clear two bit for "ld.d" */
-
- 1: jmp.n r1
- clr TMP, TMP, TMP2 /* clear bit(s) in ssbr. */
+2: /* system space load here */
+ ld TMP2, TMP2, r0 /* get the instruction itself */
+
+3: /* now have the instruction..... */
+ /*
+ * Now see if it's a double load
+ * There are three forms of double load [IMM16, scaled, unscaled],
+ * which can be checked by matching against two templates:
+ * -- 77776666555544443333222211110000 --
+ * if (((instruction & 11111100000000000000000000000000) ==
+ * 00010000000000000000000000000000) ;;
+ * ((instruction & 11111100000000001111110011100000) ==
+ * 11110100000000000001000000000000))
+ * {
+ * It's a load double, so
+ * clear two SSBR bits.
+ * } else {
+ * It's not a load double.
+ * Must be a load single, xmem, or st
+ * Thus, clear one SSBR bit.
+ * }
+ */
+ /* check the first pattern for ld.d */
+ extu TMP3, TMP2, 16<16> /* get the upper 16 bits */
+ mask TMP3, TMP3, 0xFC00 /* apply the mask */
+ cmp TMP3, TMP3, 0x1000 /* if this is equal, it's a load double */
+ bb1 eq, TMP3, misaligned_double
+
+ /* still could be -- check the second pattern for ld.d */
+ /* look at the upper 16 bits first */
+ extu TMP3, TMP2, 16<16> /* get the upper 16 bits */
+ mask TMP3, TMP3, 0xFC00 /* apply the mask */
+ cmp TMP3, TMP3, 0xF400 /* if equal, it might be a load double */
+ bb1 ne, TMP3, misaligned_single /* not equal, so must be single */
+
+ /* now look at the lower 16 bits */
+ extu TMP3, TMP2, 16<0> /* get the lower 16 bits */
+ mask TMP3, TMP3, 0xFCE0 /* apply the mask */
+ cmp TMP3, TMP3, 0x1000 /* if this is equal, it's a load double */
+ bb1 eq, TMP3, misaligned_double
+
+_LABEL(misaligned_single)
+ extu TMP2, TMP2, 5<21> /* get the destination register */
+ br.n 1f
+ set TMP2, TMP2, 1<5> /* set size=1 */
+
+_LABEL(misaligned_double)
+ extu TMP2, TMP2, 5<21> /* get the destination register */
+ set TMP2, TMP2, 1<6> /* set size=2 -- clear two bit for "ld.d" */
+
+1: jmp.n r1
+ clr TMP, TMP, TMP2 /* clear bit(s) in ssbr. */
/************************************************************************/
/************************************************************************/
- LABEL(setup_phase_two)
- /***************** REGISTER STATUS BLOCK ***********************\
- * SR0: saved return address to calling exception handler *
- * SR1: saved copy of exception-time register now holding FLAGS *
- * SR2: free *
- * SR3: saved TMP *
- * r1: return address to calling exception handler *
- * TMP: possibly revised SSBR *
- * TMP2: free *
- * TMP3: free *
- * FLAGS: CPU status flags *
- * r31: our exception frame *
- * Valid in the exception frame: *
- * Exception-time r1, r31, FLAGS. *
- * Exception-time TMP2, TMP3. *
- * Exception-time espr, sfip, snip, sxip. *
- * Exception number (EF_VECTOR). *
- * Dmt0 *
- * Other data pipeline control registers, if appropriate. *
- * FPU control registers, if appropriate. *
- * Exception SR3, if appropriate. *
- *************************************************** *
- * immediate goal: *
- * restore the system to the exception-time state (except *
- * SR3 will be OUR stack pointer) so that we may resart the FPU. *
- \***************************************************************/
- /*stcr r1, SR0*/ /* save return address */
- stcr TMP, SSBR /* done with SSBR, TMP now free */
- RESTORE_TMP2 /* done with extra temp regs */
- RESTORE_TMP3 /* done with extra temp regs */
-
- /* Get the current PSR and modify for the rte to enable the FPU */
+LABEL(setup_phase_two)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: saved return address to calling exception handler *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: free *
+ * SR3: saved TMP *
+ * r1: return address to calling exception handler *
+ * TMP: possibly revised SSBR *
+ * TMP2: free *
+ * TMP3: free *
+ * FLAGS: CPU status flags *
+ * r31: our exception frame *
+ * Valid in the exception frame: *
+ * Exception-time r1, r31, FLAGS. *
+ * Exception-time TMP2, TMP3. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Exception number (EF_VECTOR). *
+ * Dmt0 *
+ * Other data pipeline control registers, if appropriate. *
+ * FPU control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ *************************************************** *
+ * immediate goal: *
+ * restore the system to the exception-time state (except *
+ * SR3 will be OUR stack pointer) so that we may resart the FPU. *
+ \***************************************************************/
+ /*stcr r1, SR0*/ /* save return address */
+
+ stcr TMP, SSBR /* done with SSBR, TMP now free */
+ RESTORE_TMP2 /* done with extra temp regs */
+ RESTORE_TMP3 /* done with extra temp regs */
+
+ /* Get the current PSR and modify for the rte to enable the FPU */
#if 1
- ldcr TMP, PSR
- clr TMP, TMP, 1<PSR_FPU_DISABLE_BIT> /* enable the FPU */
- clr TMP, TMP, 1<PSR_SHADOW_FREEZE_BIT> /* also enable shadowing */
- stcr TMP, EPSR
-
- /* the "+2" below is to set the VALID_BIT */
- or.u TMP, r0, hi16(fpu_enable +2)
- or TMP, TMP, lo16(fpu_enable +2)
- stcr TMP, SNIP /* jump to here fpu_enable */
- addu TMP, TMP, 4
- stcr TMP, SFIP /* and then continue after that */
+ ldcr TMP, PSR
+ clr TMP, TMP, 1<PSR_FPU_DISABLE_BIT> /* enable the FPU */
+ clr TMP, TMP, 1<PSR_SHADOW_FREEZE_BIT> /* also enable shadowing */
+ stcr TMP, EPSR
+
+ /* the "+2" below is to set the VALID_BIT */
+ or.u TMP, r0, hi16(fpu_enable +2)
+ or TMP, TMP, lo16(fpu_enable +2)
+ stcr TMP, SNIP /* jump to here fpu_enable */
+ addu TMP, TMP, 4
+ stcr TMP, SFIP /* and then continue after that */
#else
- ldcr TMP, PSR
- or.u TMP, TMP, 0x8000 /* set supervisor mode */
- and TMP, TMP, 0xfff7 /* also enable shadowing */
- stcr TMP, EPSR
- stcr r0, SXIP /* clear valid bit */
- stcr r0, SNIP /* clear valid bit */
- or.u TMP, r0, hi16(fpu_enable)
- or TMP, TMP, lo16(fpu_enable)
- or TMP, TMP, 0x2 /* set the VALID_BIT and clear Exception bit */
- stcr TMP, SFIP /* jump to here fpu_enable */
+ ldcr TMP, PSR
+ or.u TMP, TMP, 0x8000 /* set supervisor mode */
+ and TMP, TMP, 0xfff7 /* also enable shadowing */
+ stcr TMP, EPSR
+ stcr r0, SXIP /* clear valid bit */
+ stcr r0, SNIP /* clear valid bit */
+ or.u TMP, r0, hi16(fpu_enable)
+ or TMP, TMP, lo16(fpu_enable)
+ or TMP, TMP, 0x2 /* set the VALID_BIT and clear Exception bit */
+ stcr TMP, SFIP /* jump to here fpu_enable */
#endif
-
- set FLAGS, FLAGS, 1<FLAG_ENABLING_FPU> /* note what we're doing.*/
- xcr FLAGS, FLAGS, SR1
- st r1, r31, REG_OFF(EF_RET) /* save the return address */
- ld r1, r31, GENREG_OFF(1) /* get original r1 */
-
- xcr TMP, r31, SR3 /* TMP now restored. R31 now saved in SR3 */
- ld r31, r31, GENREG_OFF(31) /* get original r31 */
-
- /***************** REGISTER STATUS BLOCK ***********************\
- * SR0: current thread *
- * SR1: CPU flags *
- * SR2: free *
- * SR3: pointer to our exception frame (our stack pointer) *
- * r1 through r31: original exception-time values *
- * *
- * Valid in the exception frame: *
- * Exception-time FLAGS. *
- * Exception-time espr, sfip, snip, sxip. *
- * Exception number (EF_VECTOR). *
- * Dmt0 *
- * Other data pipeline control registers, if appropriate. *
- * FPU control registers, if appropriate. *
- * Exception SR3, if appropriate. *
- * Held temporarly in the exception frame: *
- * Return address to the calling excption handler. *
- *************************************************** *
- * immediate goal: *
- * Do an RTE to restart the fpu and jump to "fpu_enable" *
- * Another exception (or exceptions) may be raised in *
- * this, which is why FLAG_ENABLING_FPU is set in SR1. *
- \***************************************************************/
-
- RTE /* jumps to "fpu_enable" on the next line to enable the FPU. */
-
- _LABEL(fpu_enable)
- FLUSH_PIPELINE
- xcr TMP, TMP, SR3 /* get E.F. pointer */
- st.d r30, TMP, GENREG_OFF(30) /* save previous r30, r31 */
- or r31, TMP, r0 /* transfer E.F. pointer to r31 */
- ld TMP, r31, REG_OFF(EF_SR3)/* get previous SR3; maybe important*/
- /* make sure that the FLAG_ENABLING_FPU bit is off */
- xcr FLAGS, FLAGS, SR1
- clr FLAGS, FLAGS, 1<FLAG_ENABLING_FPU>
- xcr FLAGS, FLAGS, SR1
-
- xcr TMP, TMP, SR3 /* replace TMP, SR3 */
-
- /* now save all regs to the exception frame. */
- st.d r0 , r31, GENREG_OFF(0)
- st.d r2 , r31, GENREG_OFF(2)
- st.d r4 , r31, GENREG_OFF(4)
- st.d r6 , r31, GENREG_OFF(6)
- st.d r8 , r31, GENREG_OFF(8)
- st.d r10, r31, GENREG_OFF(10)
- st.d r12, r31, GENREG_OFF(12)
- st.d r14, r31, GENREG_OFF(14)
- st.d r16, r31, GENREG_OFF(16)
- st.d r18, r31, GENREG_OFF(18)
- st.d r20, r31, GENREG_OFF(20)
- st.d r22, r31, GENREG_OFF(22)
- st.d r24, r31, GENREG_OFF(24)
- st.d r26, r31, GENREG_OFF(26)
- st.d r28, r31, GENREG_OFF(28)
+setup_phase_two_cont:
+ set FLAGS, FLAGS, 1<FLAG_ENABLING_FPU> /* note what we're doing.*/
+ xcr FLAGS, FLAGS, SR1
+ st r1, r31, REG_OFF(EF_RET) /* save the return address */
+ ld r1, r31, GENREG_OFF(1) /* get original r1 */
+
+ xcr TMP, r31, SR3 /* TMP now restored. R31 now saved in SR3 */
+ ld r31, r31, GENREG_OFF(31) /* get original r31 */
+
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: CPU flags *
+ * SR2: free *
+ * SR3: pointer to our exception frame (our stack pointer) *
+ * r1 through r31: original exception-time values *
+ * *
+ * Valid in the exception frame: *
+ * Exception-time FLAGS. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Exception number (EF_VECTOR). *
+ * Dmt0 *
+ * Other data pipeline control registers, if appropriate. *
+ * FPU control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ * Held temporarly in the exception frame: *
+ * Return address to the calling excption handler. *
+ *************************************************** *
+ * immediate goal: *
+ * Do an RTE to restart the fpu and jump to "fpu_enable" *
+ * Another exception (or exceptions) may be raised in *
+ * this, which is why FLAG_ENABLING_FPU is set in SR1. *
+ \***************************************************************/
+
+ RTE /* jumps to "fpu_enable" on the next line to enable the FPU. */
+
+_LABEL(fpu_enable)
+ FLUSH_PIPELINE
+ xcr TMP, TMP, SR3 /* get E.F. pointer */
+ st.d r30, TMP, GENREG_OFF(30) /* save previous r30, r31 */
+ or r31, TMP, r0 /* transfer E.F. pointer to r31 */
+ ld TMP, r31, REG_OFF(EF_SR3) /* get previous SR3; maybe important*/
+
+ /* make sure that the FLAG_ENABLING_FPU bit is off */
+ xcr FLAGS,FLAGS,SR1
+ clr FLAGS,FLAGS,1<FLAG_ENABLING_FPU>
+ xcr FLAGS,FLAGS,SR1
+
+ xcr TMP, TMP, SR3 /* replace TMP, SR3 */
+
+ /* now save all regs to the exception frame. */
+ st.d r0 , r31, GENREG_OFF(0)
+ st.d r2 , r31, GENREG_OFF(2)
+ st.d r4 , r31, GENREG_OFF(4)
+ st.d r6 , r31, GENREG_OFF(6)
+ st.d r8 , r31, GENREG_OFF(8)
+ st.d r10, r31, GENREG_OFF(10)
+ st.d r12, r31, GENREG_OFF(12)
+ st.d r14, r31, GENREG_OFF(14)
+ st.d r16, r31, GENREG_OFF(16)
+ st.d r18, r31, GENREG_OFF(18)
+ st.d r20, r31, GENREG_OFF(20)
+ st.d r22, r31, GENREG_OFF(22)
+ st.d r24, r31, GENREG_OFF(24)
+ st.d r26, r31, GENREG_OFF(26)
+ st.d r28, r31, GENREG_OFF(28)
#ifdef JEFF_DEBUG
- /* mark beginning of frame with notable value */
- or.u r20, r0, hi16(0x12345678)
- or r20, r20, lo16(0x12345678)
- st r20, r31, GENREG_OFF(0)
+ /* mark beginning of frame with notable value */
+ or.u r20, r0, hi16(0x12345678)
+ or r20, r20, lo16(0x12345678)
+ st r20, r31, GENREG_OFF(0)
#endif
- /***************** REGISTER STATUS BLOCK ***********************\
- * SR0: current thread *
- * SR1: free *
- * SR2: free *
- * SR3: previous exception-time SR3 *
- * r1: return address to the calling exception handler *
- * r2 through r30: free *
- * r31: our exception frame *
- * *
- * Valid in the exception frame: *
- * Exception-time r0 through r31. *
- * Exception-time FLAGS. *
- * Exception-time espr, sfip, snip, sxip. *
- * Exception number (EF_VECTOR). *
- * Dmt0 *
- * Other data pipeline control registers, if appropriate. *
- * FPU control registers, if appropriate. *
- * Exception SR3, if appropriate. *
- *************************************************** *
- * immediate goal: *
- * Pick up a stack if we came in from user mode. Put *
- * A copy of the exception frame pointer into r30 *
- * bump the stack a doubleword and write the exception *
- * frame pointer. *
- * if not an interrupt exception, *
- * Turn on interrupts and service any outstanding *
- * data access exceptions. *
- * Return to calling exception handler to *
- * service the exception. *
- \***************************************************************/
-
- /*
- * If it's not the interrupt exception, enable interrupts and
- * take care of any data access exceptions......
- *
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: free *
+ * SR2: free *
+ * SR3: previous exception-time SR3 *
+ * r1: return address to the calling exception handler *
+ * r2 through r30: free *
+ * r31: our exception frame *
+ * *
+ * Valid in the exception frame: *
+ * Exception-time r0 through r31. *
+ * Exception-time FLAGS. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Exception number (EF_VECTOR). *
+ * Dmt0 *
+ * Other data pipeline control registers, if appropriate. *
+ * FPU control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ *************************************************** *
+ * immediate goal: *
+ * Pick up a stack if we came in from user mode. Put *
+ * A copy of the exception frame pointer into r30 *
+ * bump the stack a doubleword and write the exception *
+ * frame pointer. *
+ * if not an interrupt exception, *
+ * Turn on interrupts and service any outstanding *
+ * data access exceptions. *
+ * Return to calling exception handler to *
+ * service the exception. *
+ \***************************************************************/
+
+ /*
+ * If it's not the interrupt exception, enable interrupts and
+ * take care of any data access exceptions......
+ *
#if INTSTACK
- * If interrupt exception, switch to interrupt stack if not
- * already there. Else, switch to kernel stack.
+ * If interrupt exception, switch to interrupt stack if not
+ * already there. Else, switch to kernel stack.
#endif
- */
- or r30, r0, r31 /* get a copy of the e.f. pointer */
- ld r2, r31, REG_OFF(EF_EPSR)
- bb1 PSR_SUPERVISOR_MODE_BIT, r2, 1f /* If in kernel mode */
+ */
+ or r30, r0, r31 /* get a copy of the e.f. pointer */
+ ld r2, r31, REG_OFF(EF_EPSR)
+ bb1 PSR_SUPERVISOR_MODE_BIT, r2, 1f /* If in kernel mode */
#if INTSTACK
- ld r3, r31, REG_OFF(EF_VECTOR)
- cmp r3, r3, 1 /* is interrupt ? */
- bb0 eq, r3, 2f
- or.u r31, r0, hi16(_intstack_end) /* swith to int stack */
- or r31, r31, lo16(_intstack_end)
- br 3f
- 2:
+ ld r3, r31, REG_OFF(EF_VECTOR)
+ cmp r3, r3, 1 /* is interrupt ? */
+ bb0 eq, r3, 2f
+ or.u r31, r0, hi16(_intstack_end) /* swith to int stack */
+ or r31, r31, lo16(_intstack_end)
+ br 3f
+2:
#endif
- or.u r31, r0, hi16(_kstack)
- ld r31, r31, lo16(_kstack)
- addu r31, r31, USIZE /* point at proper end */
- br 3f
- 1:
+ or.u r31, r0, hi16(_kstack)
+ ld r31, r31, lo16(_kstack)
+ addu r31, r31, USIZE /* point at proper end */
+ br 3f
+1:
#if INTSTACK
- ld r3, r31, REG_OFF(EF_VECTOR)
- cmp r3, r3, 1 /* is interrupt ? */
- bb0 eq, r3, 3f /* no, we will stay on kern stack */
- or.u r31, r0, hi16(_intstack_end) /* swith to int stack */
- or r31, r31, lo16(_intstack_end)
+ ld r3, r31, REG_OFF(EF_VECTOR)
+ cmp r3, r3, 1 /* is interrupt ? */
+ bb0 eq, r3, 3f /* no, we will stay on kern stack */
+ or.u r31, r0, hi16(_intstack_end) /* swith to int stack */
+ or r31, r31, lo16(_intstack_end)
#endif /* INTSTACK */
- /* This label is here for debugging */
- exception_handler_has_ksp: global exception_handler_has_ksp
- 3: /*
- here - r30 holds a pointer to the exception frame.
- r31 is a pointer to the kernel stack/interrupt stack.
- */
- subu r31, r31, 8 /* make some breathing space */
- st r30, r31, 0 /* store frame pointer on the stack */
+ /* This label is here for debugging */
+exception_handler_has_ksp: global exception_handler_has_ksp
+3: /*
+ here - r30 holds a pointer to the exception frame.
+ r31 is a pointer to the kernel stack/interrupt stack.
+ */
+ subu r31, r31, 8 /* make some breathing space */
+ st r30, r31, 0 /* store frame pointer on the stack */
#if DDB
- st r30, r31, 4 /* store it again for the debugger to recognize */
+ st r30, r31, 4 /* store it again for the debugger to recognize */
#endif DDB
- ld r2, r30, REG_OFF(EF_VECTOR)
- bcnd.n eq0, r2, return_to_calling_exception_handler /* is error */
- ld r14, r30, REG_OFF(EF_RET)
- cmp r3, r2, 1 /* interrupt is exception #1 ;Is an interrupt? */
- bb1.n eq, r3, return_to_calling_exception_handler /* skip if so */
+ ld r2, r30, REG_OFF(EF_VECTOR)
+ bcnd.n eq0, r2, return_to_calling_exception_handler /* is error */
+ ld r14, r30, REG_OFF(EF_RET)
+ cmp r3, r2, 1 /* interrupt is exception #1 ;Is an interrupt? */
+ bb1.n eq, r3, return_to_calling_exception_handler /* skip if so */
#if DDB
- cmp r3, r2, 130 /* DDB break exception */
- bb1.n eq, r3, return_to_calling_exception_handler
+ cmp r3, r2, 130 /* DDB break exception */
+ bb1.n eq, r3, return_to_calling_exception_handler
- cmp r3, r2, 132 /* DDB entry exception */
- bb1.n eq, r3, return_to_calling_exception_handler
+ cmp r3, r2, 132 /* DDB entry exception */
+ bb1.n eq, r3, return_to_calling_exception_handler
#endif
- ldcr r2, PSR
- clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT> /* enable interrupts */
- stcr r2, PSR
+ ldcr r2, PSR
+ clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT> /* enable interrupts */
+ stcr r2, PSR
#if DDB
- FLUSH_PIPELINE
+ FLUSH_PIPELINE
#endif
- /* service any outstanding data pipeline stuff
- - check dmt0 anything outstanding?*/
+ /* service any outstanding data pipeline stuff
+ - check dmt0 anything outstanding?*/
- ld r3, r30, REG_OFF(EF_DMT0)
- bb0 DMT_VALID_BIT, r3, return_to_calling_exception_handler
+ ld r3, r30, REG_OFF(EF_DMT0)
+ bb0 DMT_VALID_BIT, r3, return_to_calling_exception_handler
/*
- r30 can be clobbered by calls. So stuff its value into a
- preserved register, say r15. R14 is in use (see return_to_... below).
+ r30 can be clobbered by calls. So stuff its value into a
+ preserved register, say r15. R14 is in use (see return_to_... below).
*/
- or r15, r0, r30
+ or r15, r0, r30
- CALL(_trap, T_DATAFLT, r15)
- CALL(_data_access_emulation, r15, r0)
+ CALL(_trap, T_DATAFLT, r15)
+ CALL(_data_access_emulation, r15, r0)
-/* restore it... */
- or r30, r0, r15
+/* restore it... */
+ or r30, r0, r15
- /* clear the dmt0 word in the E.F */
- st r0, r30, REG_OFF(EF_DMT0)
+ /* clear the dmt0 word in the E.F */
+ st r0, r30, REG_OFF(EF_DMT0)
- _LABEL(return_to_calling_exception_handler)
- jmp r14 /* loaded above */
+_LABEL(return_to_calling_exception_handler)
+ jmp r14 /* loaded above */
/*
@@ -1764,16 +1846,16 @@ LABEL(setup_phase_one)
*/
ENTRY(proc_trampoline)
- ld r1,r31,0 /* load func */
- ld r2,r31,4 /* load proc pointer */
- jsr.n r1
- subu r31,r31,40 /* create stack space for function */
- addu r31,r31,48 /* stack space above + ksigframe */
- ld r1, r31,0 /* load pc */
- ld r2, r31,4 /* & proc pointer from switch frame */
- jsr.n r1
- addu r31,r31,8
- bsr _panic
+ ld r1,r31,0 /* load func */
+ ld r2,r31,4 /* load proc pointer */
+ jsr.n r1
+ subu r31,r31,40 /* create stack space for function */
+ addu r31,r31,48 /* stack space above + ksigframe */
+ ld r1, r31,0 /* load pc */
+ ld r2, r31,4 /* & proc pointer from switch frame */
+ jsr.n r1
+ addu r31,r31,8
+ bsr _panic
/*
* proc_do_uret
@@ -1784,204 +1866,1097 @@ ENTRY(proc_trampoline)
*/
ENTRY(proc_do_uret)
- ld r3,r2,P_ADDR /* p->p_addr */
- addu r3,r3,PCB_USER_STATE /* p->p_addr.u_pcb.user_state */
- st r3,r31,0 /* put it on the stack */
- br return_from_exception_handler
-
+ ld r3,r2,P_ADDR /* p->p_addr */
+ addu r3,r3,PCB_USER_STATE /* p->p_addr.u_pcb.user_state */
+ st r3,r31,0 /* put it on the stack */
+ br return_from_exception_handler
+
LABEL(return_from_exception_handler)
LABEL(_return_from_main)
- /*
- * Regs r1-r30 are free. R31 is pointing at the word
+ /*
+ * Regs r1-r30 are free. R31 is pointing at the word
* on the kernel stack where our pointer to the exception frame
* it stored. Reload it now.
- *
- * At this point, if EF_DMT0 is not zero, then
- * this must have been an interrupt where the fault didn't
- * get corrected above. We'll do that now.
- *
- * We load it into r14 since it is preserved across function
- * calls, and we may have to call some routines from within here.
- *
- * control is transfered here from obvious places in this file
+ *
+ * At this point, if EF_DMT0 is not zero, then
+ * this must have been an interrupt where the fault didn't
+ * get corrected above. We'll do that now.
+ *
+ * We load it into r14 since it is preserved across function
+ * calls, and we may have to call some routines from within here.
+ *
+ * control is transfered here from obvious places in this file
* and thread_bootstrap in luna88k/locore.c.
- *
- */
+ *
+ */
+ or.u r2, r0, hi16(_cputyp)
+ ld r3, r2, lo16(_cputyp)
+ cmp r2, r3, 0x197
+ bb1 eq, r2, m197_return_code
+
#define FPTR r14
- ld FPTR, r31, 0 /* grab exception frame pointer */
+ ld FPTR, r31, 0 /* grab exception frame pointer */
ld r3, FPTR, REG_OFF(EF_DMT0)
bb0 DMT_VALID_BIT, r3, _check_ast /*[Oh well, nothing to do here] */
#if 1
- /*
- * This might happen for non-interrupts If the user sets DMT0
- * in an exception handler.........
- */
- ld r2, FPTR, REG_OFF(EF_VECTOR)
- cmp r2, r2, 1 /* interrupt is exception #1 ; Is an interrupt? */
- bb1 eq, r2, 1f
- LABEL(oops)
- or.u r4, r0, hi16(2f)
- or r4, r4, lo16(2f)
+ /*
+ * This might happen for non-interrupts If the user sets DMT0
+ * in an exception handler.........
+ */
+ ld r2, FPTR, REG_OFF(EF_VECTOR)
+ cmp r2, r2, 1 /* interrupt is exception #1 ; Is an interrupt? */
+ bb1 eq, r2, 1f
+LABEL(oops)
+ or.u r4, r0, hi16(2f)
+ or r4, r4, lo16(2f)
#if DDB
- CALL(_db_printf, r4, r0)
- tb0 0, r0, 132
+ CALL(_db_printf, r4, r0)
+ tb0 0, r0, 132
#endif
- br 1f
- data
- 2: string "OOPS: DMT0 not zero and not interrupt.\n\000"
- align 8
- text
- 1:
+ br 1f
+ data
+2: string "OOPS: DMT0 not zero and not interrupt.\n\000"
+ align 8
+ text
+1:
#endif
- /*
- * If it's the interrupt exception, enable interrupt.
- * Take care of any data access exception...... 90/8/15 add by yama
- */
-
- /*
- * Is it ever possible to have interrupt exception while EPSR has
- * it disabled? I don't think so.. XXX nivas
- */
- ld r2, FPTR, REG_OFF(EF_VECTOR)
- cmp r2, r2, 1 /* interrupt is exception #1 ; Is an interrupt? */
- bb1 ne, r2, 1f /* If not so, skip */
-
- /* if EPSR has interrupts disabled, skip also */
- ld r2, FPTR, REG_OFF(EF_EPSR)
- bb1 PSR_INTERRUPT_DISABLE_BIT, r2, 1f /* skip if disabled */
- ldcr r2, PSR
- clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT> /* enable interrupts */
+ /*
+ * If it's the interrupt exception, enable interrupt.
+ * Take care of any data access exception...... 90/8/15 add by yama
+ */
+
+ /*
+ * Is it ever possible to have interrupt exception while EPSR has
+ * it disabled? I don't think so.. XXX nivas
+ */
+ ld r2, FPTR, REG_OFF(EF_VECTOR)
+ cmp r2, r2, 1 /* interrupt is exception #1 ; Is an interrupt? */
+ bb1 ne, r2, 1f /* If not so, skip */
+
+ /* if EPSR has interrupts disabled, skip also */
+ ld r2, FPTR, REG_OFF(EF_EPSR)
+ bb1 PSR_INTERRUPT_DISABLE_BIT, r2, 1f /* skip if disabled */
+ ldcr r2, PSR
+ clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT> /* enable interrupts */
FLUSH_PIPELINE
- stcr r2, PSR
+ stcr r2, PSR
1:
- ld r2, FPTR, REG_OFF(EF_DMT0)
- bb0 DMT_VALID_BIT, r2, 2f
+ ld r2, FPTR, REG_OFF(EF_DMT0)
+ bb0 DMT_VALID_BIT, r2, 2f
- /*
- * if there happens to be a data fault that hasn't been serviced yet,
- * go off and service that...
- */
+ /*
+ * if there happens to be a data fault that hasn't been serviced yet,
+ * go off and service that...
+ */
CALL(_trap, T_DATAFLT, r30)
CALL(_data_access_emulation, r30, r0) /* really only 2 args */
- /* clear the dmt0 word in the E.F. */
- st r0 , FPTR, REG_OFF(EF_DMT0)
+ /* clear the dmt0 word in the E.F. */
+ st r0 , FPTR, REG_OFF(EF_DMT0)
2:
+ br _check_ast
+
+LABEL(m197_return_code)
+#define FPTR r14
+ ld FPTR, r31, 0 /* grab exception frame pointer */
+ ld r3, FPTR, REG_OFF(EF_DSR)
+ cmp r2, r3, 0x0
+ bb1 eq, r2, _check_ast /*[Oh well, nothing to do here] */
+
+#if 1
+ /*
+ * This might happen for non-interrupts If the user sets DMT0
+ * in an exception handler.........
+ */
+ ld r2, FPTR, REG_OFF(EF_VECTOR)
+ cmp r2, r2, 1 /* interrupt is exception #1 ; Is an interrupt? */
+ bb1 eq, r2, 1f
+LABEL(oops2)
+ or.u r4, r0, hi16(2f)
+ or r4, r4, lo16(2f)
+#if DDB
+ CALL(_db_printf, r4, r0)
+ tb0 0, r0, 132
+#endif
+ br 1f
+ data
+2: string "OOPS: DSR not zero and not interrupt.\n\000"
+ align 8
+ text
+1:
+#endif
+ /*
+ * If it's the interrupt exception, enable interrupt.
+ * Take care of any data access exception...... 90/8/15 add by yama
+ */
+
+ /*
+ * Is it ever possible to have interrupt exception while EPSR has
+ * it disabled? I don't think so.. XXX nivas
+ */
+ ld r2, FPTR, REG_OFF(EF_VECTOR)
+ cmp r2, r2, 1 /* interrupt is exception #1 ; Is an interrupt? */
+ bb1 ne, r2, 1f /* If not so, skip */
+
+ /* if EPSR has interrupts disabled, skip also */
+ ld r2, FPTR, REG_OFF(EF_EPSR)
+ bb1 PSR_INTERRUPT_DISABLE_BIT, r2, 1f /* skip if disabled */
+ ldcr r2, PSR
+ clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT> /* enable interrupts */
+ FLUSH_PIPELINE
+ stcr r2, PSR
+1:
+ ld r2, FPTR, REG_OFF(EF_DSR)
+ cmp r3, r2, 0x0
+ bb1 eq, r3, 2f
+
+ /*
+ * if there happens to be a data fault that hasn't been serviced yet,
+ * go off and service that...
+ */
+ CALL(_trap2, T_DATAFLT, r30)
+
+ /* clear the dmt0 word in the E.F. */
+ st r0, FPTR, REG_OFF(EF_DSR)
+2:
/*
- * If the saved ipl is 0, then call dosoftint() to process soft
- * interrupts.
- * If returning to user land, look for ASTs
+ * If the saved ipl is 0, then call dosoftint() to process soft
+ * interrupts.
+ * If returning to user land, look for ASTs
*/
LABEL(_check_ast)
- ld r2, FPTR, REG_OFF(EF_EPSR) /* get pre-exception PSR */
- bb1 PSR_INTERRUPT_DISABLE_BIT, r2, 1f /* skip if ints off */
- ld r2, FPTR, REG_OFF(EF_MASK) /* get pre-exception ipl */
- bcnd ne0, r2, 1f /* can't do softint's */
- bsr.n _setipl
- or r2,r0,1
- bsr _dosoftint
- /* is this needed? we are going to restore the ipl below XXX nivas */
- bsr.n _setipl
- or r2,r0,0 /* ints are enabled */
- /* at ipl 0 now */
- 1:
- ld r2, FPTR, REG_OFF(EF_EPSR) /* get pre-exception PSR */
- bb1 PSR_SUPERVISOR_MODE_BIT, r2, no_ast /*skip if in system mode */
-
- /* should assert here - not in user mode with ints off XXX nivas */
- /* get and check want_ast */
- or.u r2, r0, hi16(_want_ast)
- ld r3, r2, lo16(_want_ast)
- bcnd eq0, r3, no_ast
-
- /*
- * trap(AST,...) will service ast's.
- */
+ ld r2, FPTR, REG_OFF(EF_EPSR) /* get pre-exception PSR */
+ bb1 PSR_INTERRUPT_DISABLE_BIT, r2, 1f /* skip if ints off */
+ ld r2, FPTR, REG_OFF(EF_MASK) /* get pre-exception ipl */
+ bcnd ne0, r2, 1f /* can't do softint's */
+ bsr.n _setipl
+ or r2,r0,1
+ bsr _dosoftint
+ /* is this needed? we are going to restore the ipl below XXX nivas */
+ bsr.n _setipl
+ or r2,r0,0 /* ints are enabled */
+ /* at ipl 0 now */
+1:
+ ld r2, FPTR, REG_OFF(EF_EPSR) /* get pre-exception PSR */
+ bb1 PSR_SUPERVISOR_MODE_BIT, r2, no_ast /*skip if in system mode */
+
+ /* should assert here - not in user mode with ints off XXX nivas */
+ /* get and check want_ast */
+ or.u r2, r0, hi16(_want_ast)
+ ld r3, r2, lo16(_want_ast)
+ bcnd eq0, r3, no_ast
+
+ /*
+ * trap(AST,...) will service ast's.
+ */
CALL(_trap, T_ASTFLT, FPTR)
#if 0
- /* assert that ipl is 0; if going back to user, it should be 0 */
+ /* assert that ipl is 0; if going back to user, it should be 0 */
- bsr _getipl
- bcnd eq0, r2, 2f
- bsr panic
- 2:
+ bsr _getipl
+ bcnd eq0, r2, 2f
+ bsr panic
+2:
#endif
_LABEL(no_ast)
- /* disable interrupts */
+ /* disable interrupts */
- ldcr r1, PSR
- set r1, r1, 1<PSR_INTERRUPT_DISABLE_BIT>
+ ldcr r1, PSR
+ set r1, r1, 1<PSR_INTERRUPT_DISABLE_BIT>
FLUSH_PIPELINE
- stcr r1, PSR
-
- /* now ready to return....*/
-
- /*
- * Transfer the frame pointer to r31, since we no longer need a stack.
- * No page faults here, and interrupts are disabled.
- */
-
- ld r2, FPTR, REG_OFF(EF_MASK) /* get pre-exception ipl */
- bsr _setipl
-
- or r31, r0, FPTR
- /* restore r1 later */
- ld.d r2 , r31, GENREG_OFF(2)
- ld.d r4 , r31, GENREG_OFF(4)
- ld.d r6 , r31, GENREG_OFF(6)
- ld.d r8 , r31, GENREG_OFF(8)
- ld.d r10, r31, GENREG_OFF(10)
- ld.d r12, r31, GENREG_OFF(12)
- ld.d r14, r31, GENREG_OFF(14)
- ld.d r16, r31, GENREG_OFF(16)
- ld.d r18, r31, GENREG_OFF(18)
- ld.d r20, r31, GENREG_OFF(20)
- ld.d r22, r31, GENREG_OFF(22)
- ld.d r24, r31, GENREG_OFF(24)
- ld.d r26, r31, GENREG_OFF(26)
- ld.d r28, r31, GENREG_OFF(28)
- /* restore r1, r30, r31 later */
-
- /* disable shadowing */
- ldcr r1, PSR
- set r1, r1, 1<PSR_SHADOW_FREEZE_BIT>
+ stcr r1, PSR
+
+ /* now ready to return....*/
+
+ /*
+ * Transfer the frame pointer to r31, since we no longer need a stack.
+ * No page faults here, and interrupts are disabled.
+ */
+
+ ld r2, FPTR, REG_OFF(EF_MASK) /* get pre-exception ipl */
+ bsr _setipl
+
+ or r31, r0, FPTR
+ /* restore r1 later */
+ ld.d r2 , r31, GENREG_OFF(2)
+ ld.d r4 , r31, GENREG_OFF(4)
+ ld.d r6 , r31, GENREG_OFF(6)
+ ld.d r8 , r31, GENREG_OFF(8)
+ ld.d r10, r31, GENREG_OFF(10)
+ ld.d r12, r31, GENREG_OFF(12)
+ ld.d r14, r31, GENREG_OFF(14)
+ ld.d r16, r31, GENREG_OFF(16)
+ ld.d r18, r31, GENREG_OFF(18)
+ ld.d r20, r31, GENREG_OFF(20)
+ ld.d r22, r31, GENREG_OFF(22)
+ ld.d r24, r31, GENREG_OFF(24)
+ ld.d r26, r31, GENREG_OFF(26)
+ ld.d r28, r31, GENREG_OFF(28)
+ /* restore r1, r30, r31 later */
+
+ /* disable shadowing */
+ ldcr r1, PSR
+ set r1, r1, 1<PSR_SHADOW_FREEZE_BIT>
+ FLUSH_PIPELINE
+ stcr r1, PSR
+
+ or.u r1, r0, hi16(_cputyp)
+ ld r1, r1, lo16(_cputyp)
+ cmp r1, r1, 0x197
+ bb0 eq, r1, 1f
+
+ ld r30, r31, REG_OFF(EF_SNIP)
+ ld r1, r31, REG_OFF(EF_SXIP)
+ stcr r30, SNIP
+ stcr r1, SXIP
+ br 2f
+1:
+ /* reload the control regs*/
+ st r0, r31, REG_OFF(EF_IPFSR)
+ st r0, r31, REG_OFF(EF_DPFSR)
+
+ /*
+ * Note: no need to restore the SXIP.
+ * When the "rte" causes execution to continue
+ * first with the instruction pointed to by the NIP
+ * and then the FIP.
+ *
+ * See MC88100 Risc Processor User's Manual, 2nd Edition,
+ * section 6.4.3.1.2-4
+ */
+ ld r30, r31, REG_OFF(EF_SNIP)
+ ld r1, r31, REG_OFF(EF_SFIP)
+ stcr r0, SSBR
+ stcr r30, SNIP
+ stcr r1, SFIP
+
+2:
+ ld r30, r31, REG_OFF(EF_EPSR)
+ ld r1, r31, REG_OFF(EF_MODE)
+ stcr r30, EPSR
+
+ /* Now restore r1, r30, and r31 */
+ ld r1, r31, GENREG_OFF(1)
+ ld.d r30, r31, GENREG_OFF(30)
+
+_LABEL(return_from_exception)
+ RTE
+
+#ifdef MVME197
+/*#########################################################################*/
+/*#### THE ACTUAL EXCEPTION HANDLER ENTRY POINTS - MVME197 ################*/
+/*#########################################################################*/
+
+/* unknown exception handler */
+LABEL(_m197_unknown_handler)
+ PREP2("unknown", 0, DEBUG_UNKNOWN_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_UNKNOWN_BIT)
+
+/* interrupt exception handler */
+LABEL(_m197_interrupt_handler)
+ PREP2("interrupt", 1, DEBUG_INTERRUPT_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_sbc_ext_int, 1, r30)
+ DONE(DEBUG_INTERRUPT_BIT)
+
+/* instruction access exception handler */
+LABEL(_m197_instruction_access_handler)
+ PREP2("inst", 2, DEBUG_INSTRUCTION_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_INSTFLT, r30)
+ DONE(DEBUG_INSTRUCTION_BIT)
+
+/*
+ * data access exception handler --
+ * See badaddr() below for info about Data_Precheck.
+ */
+LABEL(_m197_data_exception_handler)
+ PREP2("data", 3, DEBUG_DATA_BIT, No_SSBR_Stuff, M197_Data_Precheck)
+ DONE(DEBUG_DATA_BIT)
+
+/* misaligned access exception handler */
+LABEL(_m197_misaligned_handler)
+ PREP2("misalign", 4, DEBUG_MISALIGN_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_MISALGNFLT, r30)
+ DONE(DEBUG_MISALIGN_BIT)
+
+/* unimplemented opcode exception handler */
+LABEL(_m197_unimplemented_handler)
+ PREP2("unimp", 5, DEBUG_UNIMPLEMENTED_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_ILLFLT, r30)
+ DONE(DEBUG_UNIMPLEMENTED_BIT)
+
+/* privilege exception handler */
+LABEL(_m197_privilege_handler)
+ PREP2("privilege", 6, DEBUG_PRIVILEGE_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_PRIVINFLT, r30)
+ DONE(DEBUG_PRIVILEGE_BIT)
+
+/*
+ * I'm not sure what the trap(T_BNDFLT,...) does, but it doesn't send
+ * a signal to the process...
+ */
+LABEL(_m197_bounds_handler)
+ PREP2("bounds", 7, DEBUG_BOUNDS_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_BNDFLT, r30)
+ DONE(DEBUG_BOUNDS_BIT)
+
+/* integer divide-by-zero exception handler */
+LABEL(_m197_divide_handler)
+ PREP2("divide", 8, DEBUG_DIVIDE_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_ZERODIV, r30)
+ DONE(DEBUG_DIVIDE_BIT)
+
+/* integer overflow exception handelr */
+LABEL(_m197_overflow_handler)
+ PREP2("overflow", 9, DEBUG_OVERFLOW_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_OVFFLT, r30)
+ DONE(DEBUG_OVERFLOW_BIT)
+
+/* Floating-point precise handler */
+LABEL(_m197_fp_precise_handler)
+ PREP2("FPU precise", 114, DEBUG_FPp_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_m88110_Xfp_precise, r0, r30) /* call fp_precise(??, exception_frame)*/
+ DONE(DEBUG_FPp_BIT)
+
+/* MVME197 non-maskable interrupt handler */
+LABEL(_m197_nonmaskable)
+ PREP2("MVME197 non-mask", 11, DEBUG_NON_MASK_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_NON_MASK, r30)
+ DONE(DEBUG_NON_MASK_BIT)
+
+/* MVME197 data MMU read miss handler */
+LABEL(_m197_data_read_miss)
+ PREP2("MVME197 read miss", 12, DEBUG_197_READ_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_197_READ, r30)
+ DONE(DEBUG_197_READ_BIT)
+
+/* MVME197 data MMU write miss handler */
+LABEL(_m197_data_write_miss)
+ PREP2("MVME197 write miss", 13, DEBUG_197_WRITE_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_197_WRITE, r30)
+ DONE(DEBUG_197_WRITE_BIT)
+
+/* MVME197 inst MMU ATC miss handler */
+LABEL(_m197_inst_atc_miss)
+ PREP2("MVME197 inst miss", 14, DEBUG_197_INST_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_197_INST, r30)
+ DONE(DEBUG_197_INST_BIT)
+
+/* All standard system calls. */
+LABEL(_m197_syscall_handler)
+ PREP2("syscall", 128, DEBUG_SYSCALL_BIT, No_SSBR_Stuff, No_Precheck)
+ ld r13, r30, GENREG_OFF(13)
+ CALL(_m197_syscall, r13, r30) /* system call no. is in r13 */
+ DONE(DEBUG_SYSCALL_BIT)
+
+/* trap 496 comes here */
+LABEL(_m197_bugtrap)
+ PREP2("bugsyscall", 496, DEBUG_BUGCALL_BIT, No_SSBR_Stuff, No_Precheck)
+ ld r9, r30, GENREG_OFF(9)
+ CALL(_bugsyscall, r9, r30) /* system call no. is in r9 */
+ DONE(DEBUG_SYSCALL_BIT)
+
+LABEL(_m197_sigsys)
+ PREP2("sigsys", 0, DEBUG_SIGSYS_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_SIGSYS, r30)
+ DONE(DEBUG_SIGSYS_BIT)
+
+LABEL(_m197_sigtrap)
+ PREP2("sigtrap", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_SIGTRAP, r30)
+ DONE(DEBUG_SIGTRAP_BIT)
+
+LABEL(_m197_stepbpt)
+ PREP2("sigtrap", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_STEPBPT, r30)
+ DONE(DEBUG_SIGTRAP_BIT)
+
+LABEL(_m197_userbpt)
+ PREP2("sigtrap", 0, DEBUG_SIGTRAP_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_USERBPT, r30)
+ DONE(DEBUG_SIGTRAP_BIT)
+
+#if DDB
+ LABEL(_m197_break)
+ PREP2("break", 130, DEBUG_BREAK_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_KDB_BREAK, r30)
+ DONE(DEBUG_BREAK_BIT)
+ LABEL(_m197_trace)
+ PREP2("trace", 131, DEBUG_TRACE_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_KDB_TRACE, r30)
+ DONE(DEBUG_TRACE_BIT)
+ LABEL(_m197_entry)
+ PREP2("kdb", 132, DEBUG_KDB_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_KDB_ENTRY, r30)
+ DONE(DEBUG_KDB_BIT)
+
+#else /* else not DDB */
+ LABEL(_m197_break)
+ PREP2("break", 130, DEBUG_BREAK_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_BREAK_BIT)
+ LABEL(_m197_trace)
+ PREP2("trace", 131, DEBUG_TRACE_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_TRACE_BIT)
+ LABEL(_m197_entry)
+ PREP2("unknown", 132, DEBUG_UNKNOWN_BIT, No_SSBR_Stuff, No_Precheck)
+ CALL(_trap2, T_UNKNOWNFLT, r30)
+ DONE(DEBUG_KDB_BIT)
+#endif /* DDB */
+
+/*--------------------------------------------------------------------------*/
+
+/*
+ * The error exception handler.
+ * The error exception is raised when any other non-trap exception is raised
+ * while shadowing is off. This is Bad News.
+ *
+ * The shadow registers are not valid in this case (shadowing was off, ne).
+ * R1-R31 may be interesting though, so we'll save them.
+ *
+ * We'll not worry about trashing r26-29 here,
+ * since they aren't generally used.
+ */
+LABEL(_m197_error_handler)
+ /* pick up the slavestack */
+ or r26, r0, r31 /* save old stack */
+ or.u r31, r0, hi16(_intstack_end)
+ or r31, r31, lo16(_intstack_end)
+
+ /* zero the stack, so we'll know what we're lookin' at */
+ or.u r27, r0, hi16(_intstack)
+ or r27, r27, lo16(_intstack)
+1: cmp r28, r27, r31
+ bb1 ge, r28, 2f /* branch if at the end of the stack */
+ st r0, r0, r27
+ br.n 1b
+ addu r27, r27, 4 /* bump up */
+2: /* stack has been cleared */
+
+ /* ensure that stack is 8-byte aligned */
+ clr r31, r31, 3<0> /* round down to 8-byte boundary */
+
+ /* create exception frame on stack */
+ subu r31, r31, SIZEOF_EF /* r31 now our E.F. */
+
+ /* save old R31 and other R registers */
+ st.d r0 , r31, GENREG_OFF(0)
+ st.d r2 , r31, GENREG_OFF(2)
+ st.d r4 , r31, GENREG_OFF(4)
+ st.d r6 , r31, GENREG_OFF(6)
+ st.d r8 , r31, GENREG_OFF(8)
+ st.d r10, r31, GENREG_OFF(10)
+ st.d r12, r31, GENREG_OFF(12)
+ st.d r14, r31, GENREG_OFF(14)
+ st.d r16, r31, GENREG_OFF(16)
+ st.d r18, r31, GENREG_OFF(18)
+ st.d r20, r31, GENREG_OFF(20)
+ st.d r22, r31, GENREG_OFF(22)
+ st.d r24, r31, GENREG_OFF(24)
+ st r30, r31, GENREG_OFF(30)
+ st r26, r31, GENREG_OFF(31)
+
+ /* save shadow registers (are OLD if error_handler, though) */
+ ldcr r10, EPSR
+ st r10, r31, REG_OFF(EF_EPSR)
+ ldcr r10, SXIP
+ st r10, r31, REG_OFF(EF_SXIP)
+ ldcr r10, SNIP
+ st r10, r31, REG_OFF(EF_SNIP)
+ ldcr r10, SR1
+ st r10, r31, REG_OFF(EF_MODE)
+
+ /* shove sr2 into EF_FPLS1 */
+ ldcr r10, SR2
+ st r10, r31, REG_OFF(EF_FPLS1)
+
+ /* shove sr3 into EF_FPHS2 */
+ ldcr r10, SR3
+ st r10, r31, REG_OFF(EF_FPHS2)
+
+ /* error vector is zippo numero el'zeroooo */
+ st r0, r31, REG_OFF(EF_VECTOR)
+
+ /*
+ * Cheap way to enable FPU and start shadowing again.
+ */
+ ldcr r10, PSR
+ clr r10, r10, 1<PSR_FPU_DISABLE_BIT> /* enable the FPU */
+ clr r10, r10, 1<PSR_SHADOW_FREEZE_BIT> /* also enable shadowing */
+
+ stcr r10, PSR /* bang */
FLUSH_PIPELINE
- stcr r1, PSR
- /* reload the control regs*/
- st r0,r31, REG_OFF(EF_IPFSR)
- st r0,r31, REG_OFF(EF_DPFSR)
+ /* put pointer to regs into r30... r31 will become a simple stack */
+ or r30, r31, r0
+
+ subu r31, r31, 0x10 /* make some breathing space */
+ st r30, r31, 0x0c /* store frame pointer on the st */
+ st r30, r31, 0x08 /* store again for the debugger to recognize */
+ or.u r20, r0, hi16(0x87654321)
+ or r20, r20, lo16(0x87654321)
+ st r20, r31, 0x04
+ st r20, r31, 0x00
+
+ CALL(_error_fault, r30, r30)
+
+ /* TURN INTERUPTS back on */
+ ldcr r1, PSR
+ clr r1, r1, 1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r1, PSR
+ FLUSH_PIPELINE
+
+LABEL(m197_error_loop) bsr m197_error_loop
+ /* never returns*/
+
+/*
+ * The reset exception handler.
+ * The reset exception is raised when the RST signal is asserted (machine
+ * is reset), the value of VBR is changed after exceptions are enabled,
+ * or when a jmp, br/bsr to addr 0 (accidents do happen :-)
+ *
+ * To tell the difference, you should check the value of r1 and the valid
+ * bit of SXIP.
+ *
+ * Upon a real reset, VBR is set to zero (0), so code must be at addr 0
+ * to handle it!!!
+ *
+ * This is totaly different than _error_handler. Shadowing might or
+ * might not be on.
+ * R1-R31 could tell u alot about what happend, so we'll save them.
+ *
+ * We'll not worry about trashing r26-29 here,
+ * since they aren't generally used.
+ */
+LABEL(_m197_reset_handler)
+ /* pick up the slavestack */
+ or r26, r0, r31 /* save old stack */
+ or.u r31, r0, hi16(_intstack_end)
+ or r31, r31, lo16(_intstack_end)
+
+ /* zero the stack, so we'll know what we're lookin' at */
+ or.u r27, r0, hi16(_intstack)
+ or r27, r27, lo16(_intstack)
+1: cmp r28, r27, r31
+ bb1 ge, r28, 2f /* branch if at the end of the stack */
+ st r0, r0, r27
+ br.n 1b
+ addu r27, r27, 4 /* bump up */
+2: /* stack has been cleared */
+
+ /* ensure that stack is 8-byte aligned */
+ clr r31, r31, 3<0> /* round down to 8-byte boundary */
+
+ /* create exception frame on stack */
+ subu r31, r31, SIZEOF_EF /* r31 now our E.F. */
+
+ /* save old R31 and other R registers */
+ st.d r0 , r31, GENREG_OFF(0)
+ st.d r2 , r31, GENREG_OFF(2)
+ st.d r4 , r31, GENREG_OFF(4)
+ st.d r6 , r31, GENREG_OFF(6)
+ st.d r8 , r31, GENREG_OFF(8)
+ st.d r10, r31, GENREG_OFF(10)
+ st.d r12, r31, GENREG_OFF(12)
+ st.d r14, r31, GENREG_OFF(14)
+ st.d r16, r31, GENREG_OFF(16)
+ st.d r18, r31, GENREG_OFF(18)
+ st.d r20, r31, GENREG_OFF(20)
+ st.d r22, r31, GENREG_OFF(22)
+ st.d r24, r31, GENREG_OFF(24)
+ st r30, r31, GENREG_OFF(30)
+ st r26, r31, GENREG_OFF(31)
+
+ /* save shadow registers */
+ ldcr r10, EPSR
+ st r10, r31, REG_OFF(EF_EPSR)
+ ldcr r10, SXIP
+ st r10, r31, REG_OFF(EF_SXIP)
+ ldcr r10, SNIP
+ st r10, r31, REG_OFF(EF_SNIP)
+ ldcr r10, SR1
+ st r10, r31, REG_OFF(EF_MODE)
+
+ /* shove sr2 into EF_FPLS1 */
+ ldcr r10, SR2
+ st r10, r31, REG_OFF(EF_FPLS1)
+
+ /* shove sr3 into EF_FPHS2 */
+ ldcr r10, SR3
+ st r10, r31, REG_OFF(EF_FPHS2)
+
+ /* error vector is zippo numero el'zeroooo */
+ st r0, r31, REG_OFF(EF_VECTOR)
/*
- * Note: no need to restore the SXIP.
- * When the "rte" causes execution to continue
- * first with the instruction pointed to by the NIP
- * and then the FIP.
- *
- * See MC88100 Risc Processor User's Manual, 2nd Edition,
- * section 6.4.3.1.2-4
+ * Cheap way to enable FPU and start shadowing again.
*/
- ld r30, r31, REG_OFF(EF_SNIP)
- ld r1, r31, REG_OFF(EF_SFIP)
- stcr r0, SSBR
- stcr r30, SNIP
- stcr r1, SFIP
+ ldcr r10, PSR
+ clr r10, r10, 1<PSR_FPU_DISABLE_BIT> /* enable the FPU */
+ clr r10, r10, 1<PSR_SHADOW_FREEZE_BIT> /* also enable shadowing */
+
+ stcr r10, PSR /* bang */
+ FLUSH_PIPELINE
+
+ /* put pointer to regs into r30... r31 will become a simple stack */
+ or r30, r31, r0
- ld r30, r31, REG_OFF(EF_EPSR)
- ld r1, r31, REG_OFF(EF_MODE)
- stcr r30, EPSR
+ subu r31, r31, 0x10 /* make some breathing space */
+ st r30, r31, 0x0c /* store frame pointer on the st */
+ st r30, r31, 0x08 /* store again for the debugger to recognize */
+ or.u r20, r0, hi16(0x87654321)
+ or r20, r20, lo16(0x87654321)
+ st r20, r31, 0x04
+ st r20, r31, 0x00
- /* Now restore r1, r30, and r31 */
- ld r1, r31, GENREG_OFF(1)
- ld.d r30, r31, GENREG_OFF(30)
+ CALL(_error_reset, r30, r30)
- _LABEL(return_from_exception)
- RTE
+ /* TURN INTERUPTS back on */
+ ldcr r1, PSR
+ clr r1, r1, 1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r1, PSR
+ FLUSH_PIPELINE
+
+LABEL(m197_error_loop2) bsr m197_error_loop2
+/* never returns*/
+
+
+LABEL(m197_setup_phase_one)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread (if any, null if not) *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: saved copy of exception-time r1 *
+ * SR3: must be preserved .. may be the exception-time stack *
+ * r1: return address to calling exception handler *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * Decide where we're going to put the exception frame. *
+ * Might be at the end of R31, SR3, or the thread's *
+ * pcb. *
+ \***************************************************************/
+
+ /* Check if we are coming in from a FPU restart exception.
+ If so, the pcb will be in SR3 */
+ NOP
+ xcr r1, r1, SR2
+ NOP
+ NOP
+ NOP
+
+ bb1 FLAG_ENABLING_FPU, FLAGS, m197_use_SR3_pcb
+ /* are we coming in from user mode? If so, pick up thread pcb */
+ bb0 FLAG_FROM_KERNEL, FLAGS, m197_pickup_stack
+
+ /* Interrupt in kernel mode, not FPU restart */
+_LABEL(m197_already_on_kernel_stack)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread (if any, null if not) *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: must be preserved; may be important for other exceptions *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * We're already on the kernel stack, but not having *
+ * needed to use SR3. We can just make room on the *
+ * stack (r31) for our exception frame. *
+ \***************************************************************/
+ subu r31, r31, SIZEOF_EF /* r31 now our E.F. */
+ st FLAGS,r31, REG_OFF(EF_FLAGS) /* save flags */
+ st r1, r31, GENREG_OFF(1) /* save prev. r1 (now r1 free)*/
+
+ ldcr r1, SR3 /* save previous SR3 */
+ st r1, r31, REG_OFF(EF_SR3)
+
+ addu r1, r31, SIZEOF_EF /* save previous r31 */
+ br.n m197_have_pcb
+ st r1, r31, GENREG_OFF(31)
+
+
+_LABEL(m197_use_SR3_pcb)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread (if any, null if not) *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: must be preserved; exception-time stack pointer *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * An exception occured while enabling the FPU. Since r31 *
+ * is the user's r31 while enabling the FPU, we had put *
+ * our pcb pointer into SR3, so make room from *
+ * there for our stack pointer. *
+ * We need to check if SR3 is the old stack pointer or the *
+ * pointer off to the user pcb. If it pointing to the user *
+ * pcb, we need to pick up the kernel stack. Otherwise *
+ * we need to allocate a frame upon it. *
+ * We look at the EPSR to see if it was from user mode *
+ * Unfortunately, we have no registers free at the moment *
+ * But we know register 0 in the pcb frame will always be *
+ * zero, so we can use it as scratch storage. *
+ * *
+ * *
+ \***************************************************************/
+ xcr r30, r30, SR3 /* r30 = old exception frame */
+ st r1, r30, GENREG_OFF(0) /* free up r1 */
+ ld r1, r30, REG_OFF(EF_EPSR) /* get back the epsr */
+ bb0.n PSR_SUPERVISOR_MODE_BIT, r1, 1f /* if user mode */
+ ld r1, r30, GENREG_OFF(0) /* restore r1 */
+ /* we were in kernel mode - dump frame upon the stack */
+ st r0, r30, GENREG_OFF(0) /* repair old frame */
+ subu r30, r30, SIZEOF_EF /* r30 now our E.F. */
+ st FLAGS,r30, REG_OFF(EF_FLAGS) /* save flags */
+ st r1, r30, GENREG_OFF(1) /* save prev. r1 (now r1 free) */
+
+ st r31, r30, GENREG_OFF(31) /* save previous r31 */
+ or r31, r0, r30 /* make r31 our pointer. */
+ addu r30, r30, SIZEOF_EF /* r30 now has previous SR3 */
+ st r30, r31, REG_OFF(EF_SR3) /* save previous SR3 */
+ br.n m197_have_pcb
+ xcr r30, r30, SR3 /* restore r30 */
+1:
+ /* we took an exception while restarting the FPU from user space.
+ * Consequently, we never picked up a stack. Do so now.
+ * R1 is currently free (saved in the exception frame pointed at by
+ * r30) */
+ or.u r1, r0, hi16(_kstack)
+ ld r1, r1, lo16(_kstack)
+ addu r1, r1, USIZE-SIZEOF_EF
+ st FLAGS,r1, REG_OFF(EF_FLAGS) /* store flags */
+ st r31, r1, GENREG_OFF(31) /* store r31 - now free */
+ st r30, r1, REG_OFF(EF_SR3) /* store old SR3 (pcb) */
+ or r31, r1, r0 /* make r31 our exception frame pointer */
+ ld r1, r30, GENREG_OFF(0) /* restore old r1 */
+ st r0, r30, GENREG_OFF(0) /* repair that frame */
+ st r1, r31, GENREG_OFF(1) /* store r1 in its proper place */
+ br.n have_pcb
+ xcr r30, r30, SR3 /* restore r30 */
+
+_LABEL(m197_pickup_stack)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: free *
+ * FLAGS: CPU status flags *
+ *************************************************** *
+ * immediate goal: *
+ * Since we're servicing an exception from user mode, we *
+ * know that SR3 is free. We use it to free up a temp. *
+ * register to be used in getting the thread's pcb *
+ \***************************************************************/
+ stcr r31, SR3 /* save previous r31 */
+
+ /* switch to the thread's kernel stack. */
+ or.u r31, r0, hi16(_curpcb)
+ ld r31, r31, lo16(_curpcb)
+ addu r31, r31, PCB_USER_STATE /* point to user save area */
+ st FLAGS,r31, REG_OFF(EF_FLAGS) /* save flags */
+ st r1, r31, GENREG_OFF(1) /* save prev. r1 (now r1 free)*/
+ ldcr r1, SR3 /* save previous r31 */
+ st r1, r31, GENREG_OFF(31)
+ /*FALLTHROUGH */
+
+_LABEL(m197_have_pcb)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: return address to the calling exception handler *
+ * SR3: free *
+ * r1: free *
+ * FLAGS: CPU status flags *
+ * r31: our exception frame *
+ * Valid in the exception frame: *
+ * Exception-time r1, r31, FLAGS. *
+ * Exception SR3, if appropriate. *
+ *************************************************** *
+ * immediate goal: *
+ * Save the shadow registers that need to be saved to *
+ * the exception frame. *
+ \***************************************************************/
+ stcr TMP, SR3 /* free up TMP, TMP2, TMP3 */
+ SAVE_TMP2
+ SAVE_TMP3
+
+ /* save some exception-time registers to the exception frame */
+ ldcr TMP, EPSR
+ st TMP, r31, REG_OFF(EF_EPSR)
+ ldcr TMP3, SNIP
+ st TMP3, r31, REG_OFF(EF_SNIP)
+
+ /*
+ * Save Pbus fault status register from data and inst CMMU.
+ */
+
+ ldcr TMP, ISR
+ ldcr TMP2, ILAR
+ ldcr TMP3, IPAR
+ st TMP, r31, REG_OFF(EF_ISR)
+ st TMP2, r31, REG_OFF(EF_ILAR)
+ st TMP3, r31, REG_OFF(EF_IPAR)
+ ldcr TMP, ISAP
+ st TMP, r31, REG_OFF(EF_DMT0) /* hack ef! */
+ ldcr TMP, DSR
+ ldcr TMP2, DLAR
+ ldcr TMP3, DPAR
+ st TMP, r31, REG_OFF(EF_DSR)
+ st TMP2, r31, REG_OFF(EF_DLAR)
+ st TMP3, r31, REG_OFF(EF_DPAR)
+ ldcr TMP, DSAP
+ st TMP, r31, REG_OFF(EF_DMT1) /* hack ef! */
+ ldcr TMP2, SXIP
+ st TMP2, r31, REG_OFF(EF_SXIP)
+
+ ldcr r1, SR2
+ jmp r1 /* return */
+
+/************************************************************************/
+/************************************************************************/
+
+LABEL(m197_setup_phase_two)
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: saved return address to calling exception handler *
+ * SR1: saved copy of exception-time register now holding FLAGS *
+ * SR2: free *
+ * SR3: saved TMP *
+ * r1: return address to calling exception handler *
+ * TMP: possibly revised SSBR *
+ * TMP2: free *
+ * TMP3: free *
+ * FLAGS: CPU status flags *
+ * r31: our exception frame *
+ * Valid in the exception frame: *
+ * Exception-time r1, r31, FLAGS. *
+ * Exception-time TMP2, TMP3. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Exception number (EF_VECTOR). *
+ * Dmt0 *
+ * Other data pipeline control registers, if appropriate. *
+ * FPU control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ *************************************************** *
+ * immediate goal: *
+ * restore the system to the exception-time state (except *
+ * SR3 will be OUR stack pointer) so that we may resart the FPU. *
+ \***************************************************************/
+ /*stcr r1, SR0*/ /* save return address */
+
+ RESTORE_TMP2 /* done with extra temp regs */
+ RESTORE_TMP3 /* done with extra temp regs */
+
+ ldcr TMP, PSR
+ clr TMP, TMP, 1<PSR_FPU_DISABLE_BIT> /* enable the FPU */
+ clr TMP, TMP, 1<PSR_SHADOW_FREEZE_BIT> /* also enable shadowing */
+ stcr TMP, EPSR
+
+ or.u TMP, r0, hi16(m197_fpu_enable)
+ or TMP, TMP, lo16(m197_fpu_enable)
+ stcr TMP, EXIP /* jump to here fpu_enable */
+ addu TMP, TMP, 4
+ stcr TMP, ENIP /* and then continue after that */
+
+ set FLAGS, FLAGS, 1<FLAG_ENABLING_FPU> /* note what we're doing.*/
+ xcr FLAGS, FLAGS, SR1
+ st r1, r31, REG_OFF(EF_RET) /* save the return address */
+ ld r1, r31, GENREG_OFF(1) /* get original r1 */
+
+ xcr TMP, r31, SR3 /* TMP now restored. R31 now saved in SR3 */
+ ld r31, r31, GENREG_OFF(31) /* get original r31 */
+
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: CPU flags *
+ * SR2: free *
+ * SR3: pointer to our exception frame (our stack pointer) *
+ * r1 through r31: original exception-time values *
+ * *
+ * Valid in the exception frame: *
+ * Exception-time FLAGS. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Exception number (EF_VECTOR). *
+ * Dmt0 *
+ * Other data pipeline control registers, if appropriate. *
+ * FPU control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ * Held temporarly in the exception frame: *
+ * Return address to the calling excption handler. *
+ *************************************************** *
+ * immediate goal: *
+ * Do an RTE to restart the fpu and jump to "fpu_enable" *
+ * Another exception (or exceptions) may be raised in *
+ * this, which is why FLAG_ENABLING_FPU is set in SR1. *
+ \***************************************************************/
+
+ RTE /* jumps to "fpu_enable" on the next line to enable the FPU. */
+
+_LABEL(m197_fpu_enable)
+ FLUSH_PIPELINE
+ xcr TMP, TMP, SR3 /* get E.F. pointer */
+ st.d r30, TMP, GENREG_OFF(30) /* save previous r30, r31 */
+ or r31, TMP, r0 /* transfer E.F. pointer to r31 */
+ ld TMP, r31, REG_OFF(EF_SR3) /* get previous SR3; maybe important*/
+
+ /* make sure that the FLAG_ENABLING_FPU bit is off */
+ xcr FLAGS,FLAGS,SR1
+ clr FLAGS,FLAGS,1<FLAG_ENABLING_FPU>
+ xcr FLAGS,FLAGS,SR1
+
+ xcr TMP, TMP, SR3 /* replace TMP, SR3 */
+
+ /* now save all regs to the exception frame. */
+ st.d r0 , r31, GENREG_OFF(0)
+ st.d r2 , r31, GENREG_OFF(2)
+ st.d r4 , r31, GENREG_OFF(4)
+ st.d r6 , r31, GENREG_OFF(6)
+ st.d r8 , r31, GENREG_OFF(8)
+ st.d r10, r31, GENREG_OFF(10)
+ st.d r12, r31, GENREG_OFF(12)
+ st.d r14, r31, GENREG_OFF(14)
+ st.d r16, r31, GENREG_OFF(16)
+ st.d r18, r31, GENREG_OFF(18)
+ st.d r20, r31, GENREG_OFF(20)
+ st.d r22, r31, GENREG_OFF(22)
+ st.d r24, r31, GENREG_OFF(24)
+ st.d r26, r31, GENREG_OFF(26)
+ st.d r28, r31, GENREG_OFF(28)
+#ifdef JEFF_DEBUG
+ /* mark beginning of frame with notable value */
+ or.u r20, r0, hi16(0x12345678)
+ or r20, r20, lo16(0x12345678)
+ st r20, r31, GENREG_OFF(0)
+#endif
+
+ /***************** REGISTER STATUS BLOCK ***********************\
+ * SR0: current thread *
+ * SR1: free *
+ * SR2: free *
+ * SR3: previous exception-time SR3 *
+ * r1: return address to the calling exception handler *
+ * r2 through r30: free *
+ * r31: our exception frame *
+ * *
+ * Valid in the exception frame: *
+ * Exception-time r0 through r31. *
+ * Exception-time FLAGS. *
+ * Exception-time espr, sfip, snip, sxip. *
+ * Exception number (EF_VECTOR). *
+ * Dmt0 *
+ * Other data pipeline control registers, if appropriate. *
+ * FPU control registers, if appropriate. *
+ * Exception SR3, if appropriate. *
+ *************************************************** *
+ * immediate goal: *
+ * Pick up a stack if we came in from user mode. Put *
+ * A copy of the exception frame pointer into r30 *
+ * bump the stack a doubleword and write the exception *
+ * frame pointer. *
+ * if not an interrupt exception, *
+ * Turn on interrupts and service any outstanding *
+ * data access exceptions. *
+ * Return to calling exception handler to *
+ * service the exception. *
+ \***************************************************************/
+
+ /*
+ * If it's not the interrupt exception, enable interrupts and
+ * take care of any data access exceptions......
+ *
+#if INTSTACK
+ * If interrupt exception, switch to interrupt stack if not
+ * already there. Else, switch to kernel stack.
+#endif
+ */
+ or r30, r0, r31 /* get a copy of the e.f. pointer */
+ ld r2, r31, REG_OFF(EF_EPSR)
+ bb1 PSR_SUPERVISOR_MODE_BIT, r2, 1f /* If in kernel mode */
+
+#if INTSTACK
+ ld r3, r31, REG_OFF(EF_VECTOR)
+ cmp r3, r3, 1 /* is interrupt ? */
+ bb0 eq, r3, 2f
+ or.u r31, r0, hi16(_intstack_end) /* swith to int stack */
+ or r31, r31, lo16(_intstack_end)
+ br 3f
+2:
+#endif
+ or.u r31, r0, hi16(_kstack)
+ ld r31, r31, lo16(_kstack)
+ addu r31, r31, USIZE /* point at proper end */
+ br 3f
+1:
+#if INTSTACK
+ ld r3, r31, REG_OFF(EF_VECTOR)
+ cmp r3, r3, 1 /* is interrupt ? */
+ bb0 eq, r3, 3f /* no, we will stay on kern stack */
+ or.u r31, r0, hi16(_intstack_end) /* swith to int stack */
+ or r31, r31, lo16(_intstack_end)
+#endif /* INTSTACK */
+ /* This label is here for debugging */
+m197_exception_handler_has_ksp: global m197_exception_handler_has_ksp
+3: /*
+ here - r30 holds a pointer to the exception frame.
+ r31 is a pointer to the kernel stack/interrupt stack.
+ */
+ subu r31, r31, 8 /* make some breathing space */
+ st r30, r31, 0 /* store frame pointer on the stack */
+#if DDB
+ st r30, r31, 4 /* store it again for the debugger to recognize */
+#endif DDB
+
+ ld r2, r30, REG_OFF(EF_VECTOR)
+ bcnd.n eq0, r2, m197_return_to_calling_exception_handler /* is error */
+ ld r14, r30, REG_OFF(EF_RET)
+ cmp r3, r2, 1 /* interrupt is exception #1 ;Is an interrupt? */
+ bb1.n eq, r3, m197_return_to_calling_exception_handler /* skip if so */
+
+#if DDB
+ cmp r3, r2, 130 /* DDB break exception */
+ bb1.n eq, r3, m197_return_to_calling_exception_handler
+
+ cmp r3, r2, 132 /* DDB entry exception */
+ bb1.n eq, r3, m197_return_to_calling_exception_handler
+#endif
+
+ ldcr r2, PSR
+ clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT> /* enable interrupts */
+ stcr r2, PSR
+#if DDB
+ FLUSH_PIPELINE
+#endif
+
+ /* service any outstanding data pipeline stuff
+ - check dsr... anything outstanding?*/
+
+ ld r3, r30, REG_OFF(EF_DSR)
+ cmp r3, r3, 0
+ bb1 eq, r3, m197_return_to_calling_exception_handler
+
+/*
+ r30 can be clobbered by calls. So stuff its value into a
+ preserved register, say r15. R14 is in use (see return_to_... below).
+ */
+ or r15, r0, r30
+
+ CALL(_trap2, T_DATAFLT, r15)
+
+/* restore it... */
+ or r30, r0, r15
+
+ /* clear the dsr word in the E.F */
+ st r0, r30, REG_OFF(EF_DSR)
+
+_LABEL(m197_return_to_calling_exception_handler)
+ jmp r14 /* loaded above */
+
+#endif
+
diff --git a/sys/arch/mvme88k/mvme88k/genassym.c b/sys/arch/mvme88k/mvme88k/genassym.c
index 7ef19c706b7..8090849ed48 100644
--- a/sys/arch/mvme88k/mvme88k/genassym.c
+++ b/sys/arch/mvme88k/mvme88k/genassym.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: genassym.c,v 1.4 1999/02/09 06:36:28 smurph Exp $ */
+/* $OpenBSD: genassym.c,v 1.5 1999/09/27 19:13:22 smurph Exp $ */
/*
* Copyright (c) 1982, 1990 The Regents of the University of California.
* All rights reserved.
@@ -32,7 +32,7 @@
* SUCH DAMAGE.
*
* @(#)genassym.c 7.8 (Berkeley) 5/7/91
- * $Id: genassym.c,v 1.4 1999/02/09 06:36:28 smurph Exp $
+ * $Id: genassym.c,v 1.5 1999/09/27 19:13:22 smurph Exp $
*/
#ifndef KERNEL
@@ -110,6 +110,8 @@ main()
pair("EF_EPSR", int_offset_of_element(ss->epsr));
pair("EF_SXIP", int_offset_of_element(ss->sxip));
pair("EF_SFIP", int_offset_of_element(ss->sfip));
+ pair("EF_EXIP", int_offset_of_element(ss->sxip)); /* MVME197 */
+ pair("EF_EFIP", int_offset_of_element(ss->sfip)); /* MVME197 */
pair("EF_SNIP", int_offset_of_element(ss->snip));
pair("EF_SSBR", int_offset_of_element(ss->ssbr));
pair("EF_DMT0", int_offset_of_element(ss->dmt0));
@@ -122,6 +124,8 @@ main()
pair("EF_DMD2", int_offset_of_element(ss->dmd2));
pair("EF_DMA2", int_offset_of_element(ss->dma2));
pair("EF_FPECR", int_offset_of_element(ss->fpecr));
+ pair("EF_FPCR", int_offset_of_element(ss->fpcr)); /* MVME197 */
+ pair("EF_FPSR", int_offset_of_element(ss->fpsr)); /* MVME197 */
pair("EF_FPHS1", int_offset_of_element(ss->fphs1));
pair("EF_FPLS1", int_offset_of_element(ss->fpls1));
pair("EF_FPHS2", int_offset_of_element(ss->fphs2));
@@ -137,8 +141,17 @@ main()
pair("EF_RET", int_offset_of_element(ss->scratch1));
pair("EF_IPFSR",int_offset_of_element(ss->ipfsr));
pair("EF_DPFSR",int_offset_of_element(ss->dpfsr));
+ pair("EF_DSR",int_offset_of_element(ss->dsr)); /* MVME197 */
+ pair("EF_DLAR",int_offset_of_element(ss->dlar)); /* MVME197 */
+ pair("EF_DPAR",int_offset_of_element(ss->dpar)); /* MVME197 */
+ pair("EF_ISR",int_offset_of_element(ss->dsr)); /* MVME197 */
+ pair("EF_ILAR",int_offset_of_element(ss->ilar)); /* MVME197 */
+ pair("EF_IPAR",int_offset_of_element(ss->ipar)); /* MVME197 */
+ pair("EF_SRX",int_offset_of_element(ss->dpfsr));
pair("EF_NREGS", sizeof(*ss)/sizeof(int));
+/* end MVME197 only */
+
/* make a sanity check */
if (sizeof(*ss) & 7)
{
diff --git a/sys/arch/mvme88k/mvme88k/locore.S b/sys/arch/mvme88k/mvme88k/locore.S
index 498331426a1..7b102554934 100644
--- a/sys/arch/mvme88k/mvme88k/locore.S
+++ b/sys/arch/mvme88k/mvme88k/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.8 1999/05/29 04:41:46 smurph Exp $ */
+/* $OpenBSD: locore.S,v 1.9 1999/09/27 19:13:22 smurph Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -89,18 +89,24 @@ LABEL(_kernel_text)
LABEL(_start)
LABEL(start)
br _start_text
-#if 0
- .align 4096 /* VBR points to page aligned list */
- LABEL(_vector_list) /* references memory BELOW this line */
- #include "machine/exception_vectors.h"
- word END_OF_VECTOR_LIST
-
-#endif
+ br _start_text
+ br _start_text
+ br _start_text
ENTRY(doboot)
/*
* Try hitting the SRST bit in VMEchip2 to reset the system.
*/
+#ifdef MVME188
+ /* check if it's a mvme188 */
+ or.u r4, r0, hi16(_cputyp)
+ ld r3, r4, lo16(_cputyp)
+ cmp r4, r3, 0x188
+ bb1 ne, r4, 1f
+ bsr _m188_reset
+ br m188_doboot_fail
+#endif /* MVME188 */
+1:
or.u r3,r0, 0xfff4
ld r4,r3, 0x0060 /* read offset (LCSR +0x60) */
set r4,r4,1<23> /* set SYSRST bit - bit 23 */
@@ -117,6 +123,7 @@ ENTRY(doboot)
*/
/* Should we use idle_u instead? XXX nivas */
+m188_doboot_fail:
or.u r31, r0, hi16(_intstack_end)
or r31, r31, lo16(_intstack_end)
clr r31, r31, 3<0> /* round down to 8-byte boundary */
@@ -126,28 +133,6 @@ ENTRY(doboot)
/**************************************************************************/
LABEL(_start_text) /* This is the *real* start upon poweron or reset */
-#ifdef OLD_BOOT_LOADER
- /*
- * Args passed by boot loader
- * r2 howto
- * r3 first_addr (first available address)
- * r4 ((Clun << 8) ; Dlun & FF) -> bootdev
- * r5 esym
- * r6 miniroot
- */
- or.u r13, r0, hi16(_boothowto)
- st r2, r13, lo16(_boothowto)
- or.u r13, r0, hi16(_first_addr)
- st r3, r13, lo16(_first_addr)
-#if 0
- or.u r13, r0, hi16(_bootdev)
- st r4, r13, lo16(_bootdev)
-#endif
- or.u r13, r0, hi16(_esym)
- st r5, r13, lo16(_esym)
- or.u r13, r0, hi16(_miniroot)
- st r6, r13, lo16(_miniroot)
-#else /* OLD_BOOT_LOADER */
/*
* Args passed by boot loader
* r2 howto
@@ -155,7 +140,13 @@ LABEL(_start_text) /* This is the *real* start upon poweron or reset */
* r4 esym
* r5 start of mini
* r6 end miniroot
+ * r7 ((Clun << 8) ; Dlun & FF) -> bootdev
+ * r8 cpu type (0x187, 0x188, 1x197)
*/
+/*
+ * (*entry)(flag, bugargs.ctrl_addr, cp, kernel.smini,
+ * kernel.emini, bootdev, cputyp);
+ */
or.u r13, r0, hi16(_boothowto)
st r2, r13, lo16(_boothowto)
or.u r13, r0, hi16(_bootaddr)
@@ -166,7 +157,11 @@ LABEL(_start_text) /* This is the *real* start upon poweron or reset */
st r4, r13, lo16(_esym)
or.u r13, r0, hi16(_miniroot)
st r5, r13, lo16(_miniroot)
-#endif /* OLD_BOOT_LOADER */
+ or.u r13, r0, hi16(_bootdev)
+ st r7, r13, lo16(_bootdev)
+ or.u r13, r0, hi16(_cputyp)
+ st r8, r13, lo16(_cputyp)
+
/*
* CPU Initialization
*
@@ -201,34 +196,103 @@ LABEL(_start_text) /* This is the *real* start upon poweron or reset */
*
* jfriedl@omron.co.jp
*/
+ cmp r2, r8, 0x197 /* r8 contains cputyp */
+ bb1 eq, r2, 1f /* if it's a '197, skip SSBR */
stcr r0, SSBR /* clear this for later */
-#if 0
+1:
stcr r0, SR0 /* clear "current thread" */
stcr r0, SR1 /* clear the CPU flags */
-#define PSR_SHADOW_FREEZE_BIT 0
-#define PSR_INTERRUPT_DISABLE_BIT 1
-#define PSR_FPU_DISABLE_BIT 3
-#define PSR_BIG_ENDIAN_MODE 30
-#define PSR_SUPERVISOR_MODE_BIT 31
- set r11, r11, 1<PSR_SHADOW_FREEZE_BIT>
- set r11, r11, 1<PSR_FPU_DISABLE_BIT>
-#endif
-#define PSR_MXM_DISABLE_BIT 2
-
+
set r11, r0, 1<PSR_SUPERVISOR_MODE_BIT>
set r11, r11, 1<PSR_INTERRUPT_DISABLE_BIT>
- set r11, r11, 1<PSR_MXM_DISABLE_BIT> /* a cheat! for now... */
+ set r11, r11, 1<4>
+ set r11, r11, 1<29>
+ set r11, r11, 1<25>
stcr r11, PSR
- FLUSH_PIPELINE
-
+ stcr r0, VBR /* set Vector Base Register to 0, ALWAYS! */
+ FLUSH_PIPELINE
+ cmp r2, r8, 0x197 /* r8 contains cputyp */
+ bb1 eq, r2, master_start /* if it's a '197, skip to master_start */
#if 0
- or.u r11, r0, hi16(_vector_table)
- or r11, r11, lo16(_vector_table)
- stcr r11, VBR
-#else
- stcr r0, VBR
-#endif /* 0 */
+ /* clear BSS. Boot loader might have already done this... */
+ or.u r2, r0, hi16(_edata)
+ or r2, r2, lo16(_edata)
+ or.u r4, r0, hi16(_end)
+ or r4, r4, lo16(_end)
+ bsr.n _bzero /* bzero(edata, end-edata) */
+ subu r3, r4, r2
+#endif
+ /*
+ * First time to count how many CPUs to attach
+ */
+ or.u r11, r0, hi16(initialized_cpu_lock)
+ or r11, r11, lo16(initialized_cpu_lock)
+_LABEL(check_init_lock)
+ FLUSH_PIPELINE
+ or r22, r0, 1
+ xmem r22, r11, r0 /* If r22 gets 0, we have the lock.. */
+ bcnd eq0, r22, have_init_lock/* ..but if not, we must wait */
+
+_LABEL(wait_for_init_lock)
+ /* just watch the lock until it looks clear */
+ ld r22, r11, r0
+ bcnd eq0, r22, check_init_lock
+ br wait_for_init_lock /* looks clear -- try to grab */
+
+_LABEL(have_init_lock)
+ FLUSH_PIPELINE
+ or.u r11, r0, hi16(_initialized_cpus)
+ ld r22, r11, lo16(_initialized_cpus)
+ add r23, r22, 1
+ st r23, r11, lo16(_initialized_cpus)
+
+ or.u r11, r0, hi16(initialized_cpu_lock)
+ st r0, r11, lo16(initialized_cpu_lock)
+ /*
+ * Now we vie with any other processors to see who's the master.
+ * We first try to obtain a lock to see who's allowed
+ * to check/set the master lock.
+ */
+ or.u r11, r0, hi16(_inter_processor_lock)
+ or r11, r11, lo16(_inter_processor_lock)
+_LABEL(check_ip_lock)
+ FLUSH_PIPELINE
+ or r22, r0, 1
+ xmem r22, r11, r0 /* If r22 gets 0, we have the lock.. */
+ bcnd eq0, r22, have_ip_lock /* ..but if not, we must wait */
+_LABEL(wait_for_ip_lock)
+ /* just watch the lock until it looks clear */
+ ld r22, r11, r0
+ bcnd ne0, r22, wait_for_ip_lock
+ /* since we can be here with caches off, add a few nops to
+ keep the bus from getting overloaded */
+ or r2, r0, lo16(1000)
+_LABEL(ip_loop)
+ subu r2, r2, 1
+ bcnd eq0, r2, ip_loop
+ br check_ip_lock /* looks clear -- try to grab */
+
+_LABEL(have_ip_lock)
+ /* now try to grab the master_processor_chosen prize */
+ FLUSH_PIPELINE
+ or.u r11, r0, hi16(master_processor_chosen)
+ or r11, r11, lo16(master_processor_chosen)
+ or r22, r0, 1
+ xmem r22, r11, r0
+
+ /*
+ * If r22 is not clear we're a slave,
+ * otherwise we're first and the master.
+ *
+ * Note that we haven't released the interprocessor lock....
+ * We'll do that when we're ready for another CPU to go.
+ * (if we're the master, we'll do that in master_start below.
+ * if we're a slave, we'll do it in slave_start below).
+ */
+ bcnd ne0, r22, slave_start
+ /* fall through to master start if that's appropriate */
+_LABEL(master_start)
/*
* Switch to interrupt stack
* Use idle_u's stack instead?
@@ -237,6 +301,17 @@ LABEL(_start_text) /* This is the *real* start upon poweron or reset */
or r31, r31, lo16(_intstack_end)
clr r31, r31, 3<0> /* round down to 8-byte boundary */
+#ifdef MVME197
+ cmp r2, r8, 0x197 /* r8 contains cputyp */
+ bb1 ne, r2, 1f /* if it's a '197, use different vectors */
+ or.u r3, r0, hi16(_m197_vector_list)
+ or r3, r3, lo16(_m197_vector_list)
+ bsr.n _vector_init
+ ldcr r2, VBR
+ br 2f
+#endif /* MVME197 */
+1:
+#if defined(MVME187) || defined(MVME188)
/*
* Want to make the call:
* vector_init(VBR, vector_list)
@@ -245,47 +320,31 @@ LABEL(_start_text) /* This is the *real* start upon poweron or reset */
or r3, r3, lo16(_vector_list)
bsr.n _vector_init
ldcr r2, VBR
-
-#if 0
- /* clear BSS. Boot loader might have already done this... */
- or.u r2, r0, hi16(_edata)
- or r2, r2, lo16(_edata)
- or.u r4, r0, hi16(_end)
- or r4, r4, lo16(_end)
- bsr.n _bzero /* bzero(edata, end-edata) */
- subu r3, r4, r2
-#endif
-
+#endif /* defined(MVME187) || defined(MVME188) */
+2:
/* still on int stack */
- bsr.n _m187_bootstrap
+ bsr.n _mvme_bootstrap
subu r31, r31, 40
addu r31, r31, 40
- /* switch to proc0 uarea */
+ /* we now know our cpu number, so we
+ * can set interrupt_stack[cpu_number()] = _intstack
+ */
+ ldcr r10, SR1
+ mak r10, r10, FLAG_CPU_FIELD_WIDTH<0> /* r10 <-- CPU# */
+ /* figure interrupt_stack[cpu_number()] */
+ or.u r11, r0, hi16(_interrupt_stack)
+ or r11, r11, lo16(_interrupt_stack)
+ or.u r12, r0, hi16(_intstack)
+ or r12, r12, lo16(_intstack)
+ st r12, r11 [r10]
+
+ /* switch to proc0 uarea */
or.u r10, r0, hi16(UADDR)
or r31, r10,lo16(UADDR)
addu r31, r31, USIZE - 8
- /*
- * Block clock interrupts for now. There is a problem with
- * clock interrupts when the first clock interrupt is received.
- * Hardclock() sees the base priority to be 0 and drops IPL to
- * splsofclock() before calling softclock(). This opens up other
- * clock interrupts to be received before the first one is ever
- * finished. Also, the first entry on calltodo list is stuck for
- * ever. As a work around, I will set the IPL to softclock so
- * that the CLKF_BASEPRI() check in hardclock() will return false.
- * XXX nivas
- */
-
-#if XXX
- bsr.n _setipl
- or r2, r0, IPL_SOFTCLOCK
- bsr _enable_interrupt
- bsr.n _setipl
- or r2, r0, IPL_HIGH
-#endif
/* make the call: main() */
or.u r2, r0, hi16(UADDR)
or r2, r2,lo16(UADDR)
@@ -295,29 +354,97 @@ LABEL(_start_text) /* This is the *real* start upon poweron or reset */
addu r31, r31, 40
bsr _panic
+/***********************************************************************
+ * slave CPUs starts here
+ */
+_LABEL(slave_start)
+ /* while holding the inter_processor_lock, the slave cpu
+ can find use the slavestack to call slave_pre_main and
+ determine its cpu number. After that, however, it should
+ switch over to the interrupt stack associated with its
+ cpu */
+
+ /* r31 <-- slavestack */
+ or.u r31, r0, hi16(_slavestack_end)
+ or r31, r31, lo16(_slavestack_end)
+ clr r31, r31, 3<0> /* round down to 8-byte boundary */
+
+ bsr.n _slave_pre_main /* set cpu number */
+ subu r31, r31, 48 /* allocate frame */
+
+ bsr _get_slave_stack
+ addu r31, r2, INTSTACK_SIZE + 4096
+
+ /* SR1 now contains our cpu number. We can now release
+ the inter_processor_lock, as we are done with the
+ slavestack. We also have an interrupt stack */
+
+ or.u r10, r0, hi16(_inter_processor_lock)
+ st r0, r10, lo16(_inter_processor_lock)
+
+ br.n _slave_main /* does not return */
+ subu r31, r31, 40 /* allocate frame */
+
+_LABEL(_spin_cpu)
+ or.u r3, r0, hi16(_start_text)
+ or r3, r3, lo16(_start_text)
+ or r9, r0, 0x100 /* .FORKMPU */
+ tb0 0, r0, 0x200-16 /* call 188Bug */
+ jmp r1
+
/*****************************************************************************/
data
.align 4096 /* VBR points to page aligned list */
-LABEL(_vector_list) /* references memory BELOW this line */
+ global _vector_list, _vector_list_end
+#if defined(MVME187) || defined(MVME188)
+_vector_list: /* references memory BELOW this line */
#include "machine/exception_vectors.h"
- word END_OF_VECTOR_LIST
-
+_vector_list_end:
+ word END_OF_VECTOR_LIST
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ global _vector_list, _vector_list_end
+_m197_vector_list: /* references memory BELOW this line */
+#include "machine/exception_vectors2.h"
+_m197_vector_list_end:
+ word END_OF_VECTOR_LIST
+#endif /* MVME197 */
.align 4096 /* Vector table is a page aligned list */
LABEL(_vector_table) /* new vector table location, was addr 0 */
- space (0x1000) /* 16K */
+ space (0x1000) /* 16K */
+
+ .align 4096 /* SDT (segment descriptor table */
+ global _kernel_sdt
+_kernel_sdt:
+ space (0x2000) /* 8K - 4K phys, 4K virt*/
global _ret_addr
_ret_addr:
word 0
global _msgsw
_msgsw:
word 0 /* Bits here turn on/off debugging somewhere */
+_LABEL(initialized_cpu_lock)
+ /* XMEM spin lock -- to count CPUs */
+ word 0
+LABEL(_initialized_cpus)
+ /* CPU counter to initialize */
+ word 0
+_LABEL(master_processor_chosen)
+ /* The first processor that XMEMs this becomes the master */
+ word 0
+LABEL(_inter_processor_lock)
+ /* XMEM spin lock -- controls access to master_processor_chosen */
+ word 0
+
.align 4096
- global _intstack
- global _intstack_end
+ global _intstack, _intstack_end, _slavestack, _slavestack_end
_intstack:
- space (4 * NBPG) /* 16K, just to be safe*/
+ space (4 * NBPG) /* 16K, just to be safe */
_intstack_end:
+_slavestack:
+ space (NBPG) /* 4K, small, interim stack */
+_slavestack_end:
/*
* When a process exits and its u. area goes away, we set curpcb to point
diff --git a/sys/arch/mvme88k/mvme88k/locore_asm_routines.S b/sys/arch/mvme88k/mvme88k/locore_asm_routines.S
index cb7857a4178..316011a63b3 100644
--- a/sys/arch/mvme88k/mvme88k/locore_asm_routines.S
+++ b/sys/arch/mvme88k/mvme88k/locore_asm_routines.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore_asm_routines.S,v 1.6 1999/02/09 06:36:28 smurph Exp $ */
+/* $OpenBSD: locore_asm_routines.S,v 1.7 1999/09/27 19:13:22 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1993-1992 Carnegie Mellon University
@@ -42,11 +42,11 @@
#include "assym.s"
#include <machine/trap.h>
+#include <machine/cpu_number.h>
#include <machine/board.h>
#include <machine/asm.h>
#include <sys/errno.h>
-
/*****************************************************************************
* DO_LOAD_ADDRESS
*
@@ -179,6 +179,249 @@ ENTRY(do_xmem_byte) /* do_xmem_byte(address, data, supervisor) */
1: xmem.bu r3,r2,r0
2: jmp r1
+ENTRY(ff1)
+ jmp.n r1
+ ff1 r2, r2
+
+
+ .data
+
+/*
+ * reserve MAX_CPUS words for lockinit and lockeach;
+ * counters for simple_lock_init calls and lock acquisition calls.
+ */
+
+LABEL(lockinit)
+ zero 4*MAX_CPUS
+
+LABEL(lockuse)
+ zero 4*MAX_CPUS
+
+LABEL(lockpause)
+ zero 4*MAX_CPUS
+
+ .text
+
+/*************************************************************************/
+/****************** SIMPLE LOCK OPERATIONS *****************************/
+/*************************************************************************/
+
+#ifdef done_in_kernel
+/*************************************************************
+ *************************************************************
+ **
+ ** void simple_lock_init(int *lock_data)
+ ** {
+ ** *lock_data = 0;
+ ** }
+ **
+ ** void simple_unlock(simple_lock_t *)
+ ** {
+ ** *lock_data = 0;
+ ** }
+ **/
+#undef simple_unlock
+ENTRY(simple_lock_init)
+ st r0, r2, 0 /* init */
+ ldcr r2, SR1 /* extract cpu number*/
+ clr r2, r2, 0<FLAG_CPU_FIELD_WIDTH>
+ mask r2, r2, 3 /* play it safe */
+ or.u r3, r0, hi16(lockinit)
+ or r3, r3, lo16(lockinit)
+ ld r4, r3[r2]
+ addu r4, r4, 1
+ jmp.n r1
+ st r4, r3[r2]
+
+ENTRY(simple_unlock)
+ jmp.n r1
+ st r0, r2, 0
+
+#if DDB
+/* version of simple_unlock for the debugger - should be identical to
+ simple_unlock, but should never have breakpoints inserted on it */
+ENTRY(db_simple_unlock)
+ jmp.n r1
+ st r0, r2, 0
+#endif
+
+/**
+ ** Simple_lock
+ **
+ ** Returns when the lock is taken. It also increments lockuse[cpu]
+ **/
+ENTRY(simple_lock)
+ /* do r3 = test_and_set(r2, 1) */
+ or r3, r0, 1
+ xmem r3, r2, r0
+ bcnd ne0, r3, 1f
+#if 0
+ ldcr r5, SR1 /* extract cpu number */
+ clr r5, r5, 0<FLAG_CPU_FIELD_WIDTH>
+ mask r5, r5, 3 /* play it safe */
+ or.u r3, r0, hi16(lockuse)
+ or r3, r3, lo16(lockuse)
+ ld r4, r3[r5]
+ addu r4, r4, 1
+ st r4, r3[r5]
+#endif
+ jmp r1
+
+1:
+ /* wait until the lock appears to be free */
+ or.u r4, r0, 0x0300
+2:
+ subu r4, r4, 1
+ bcnd eq0, r4, _simple_lock_too_long
+ ld r3, r2, 0 /* check lock */
+ bcnd ne0, r3, 2b
+ br _simple_lock /* looks free... check again with the xmem */
+
+ENTRY(simple_lock_too_long)
+#ifdef JEFF_DEBUG
+ /* just want to break here.... */
+ tb0 0, r0 , 0x84 /* gimmeabreak */
+#else
+ /* waited too long */
+ subu r31, r31, 0x40
+ st r1, r31, 0x30
+ st r30, r31, 0x34
+ st r2, r31, 0x38
+ or r3, r0, r1
+#if 0
+ bsr _report_lock_info
+#endif
+ ld r2, r31, 0x38
+ ld r30, r31, 0x34
+ ld r1, r31, 0x30
+ addu r31, r31, 0x40
+ br _simple_lock
+#endif /* JEFF_DEBUG */
+
+
+#if DDB
+/*
+ * Version of simple_lock for the kernel debugger; should never have
+ * breakpoints set on it. Should be kept consistent with simple_lock.
+ */
+ENTRY(db_simple_lock)
+ /* do r10 = test_and_set(r2, 1) */
+ or r10, r0, 1
+ xmem r10, r2, r0
+ bcnd ne0, r10, db_simple_lock_watch
+ ldcr r2, SR1 /* extract cpu number*/
+ clr r2, r2, 0<FLAG_CPU_FIELD_WIDTH>
+ mask r2, r2, 3 /* play it safe*/
+ or.u r3, r0, hi16(lockuse)
+ or r3, r3, lo16(lockuse)
+ ld r4, r3[r2]
+ addu r4, r4, 1
+ jmp.n r1
+ st r4, r3[r2]
+
+db_simple_lock_watch:
+ /* wait until the lock appears to be free */
+3:
+ ld r10, r2, 0
+ bcnd ne0, r10, 3b
+ br _db_simple_lock /* looks free... check again with the xmem */
+#endif /* DDB */
+
+/*************************************************************
+ *************************************************************
+ **
+ ** boolean_t simple_try_lock(simple_lock_t *);
+ **
+ ** Grab the lock if it's free. Return zero if the lock was
+ ** busy, non-zero if the lock has been taken.
+ **/
+ENTRY(simple_lock_try)
+ or r3, r0, 1 /* r3 := test_and_set(r2, 1) */
+ xmem r3, r2, r0
+ /* If r3 is now zero, we hold the lock -- return non-zero. */
+ /* If r3 is now one, we didn't get it -- return zero. */
+ /* Thus, we want to "return(!r3)" */
+ cmp r4, r3, r0
+ jmp.n r1
+ extu r2, r4, 1<2>
+
+#if DDB
+/* version for the kernel debugger - keep consistent with above */
+ENTRY(db_simple_lock_try)
+ or r3, r0, 1 /* r3 := test_and_set(r2, 1) */
+ xmem r3, r2, r0
+ /* If r3 is now zero, we hold the lock -- return non-zero. */
+ /* If r3 is now one, we didn't get it -- return zero. */
+ /* Thus, we want to "return(!r3)" */
+ cmp r4, r3, r0
+ jmp.n r1
+ extu r2, r4, 1<2>
+#endif
+
+#if DDB /* version for the debugger */
+ENTRY(db_simple_lock_held)
+ jmp.n r1
+ ld r2, r2, 0
+#endif
+
+/*
+ * void simple_lock_pause(void).
+ *
+ * This routine is called when we find a simple lock held that we wish to
+ * acquire, but cannot spin on because we are holding a lock that is in the
+ * wrong order to it with respect to the locking hierarchy. Once we drop the
+ * lock we are holding, however, we cannot assume the lock we were trying to
+ * acquire is not deallocated. Consequently, we drop the lock we are holding
+ * and wait for a while, then retry. This is the wait for a while routine.
+ *
+ * We define a array of counters[cpus], lockpause
+ * to tell use how many times this routine is called.
+ * We currently wait 128 cycles per call.
+ */
+
+ENTRY(simple_lock_pause)
+ ldcr r2, SR1 /* extract cpu number*/
+ clr r2, r2, FLAG_CPU_FIELD_WIDTH
+ mask r2, r2, 3 /* play it safe */
+ or.u r3, r0, hi16(lockpause)
+ or r3, r3, lo16(lockpause)
+ ld r4, r3[r2]
+ or r5, r0, 128 /* initialize counters*/
+1: subu r5, r5, 1 /* count down */
+ bcnd ne0, r5, 1b
+ addu r4, r4, 1
+ jmp.n r1 /* return*/
+ st r4, r3[r2]
+
+#endif /* done_in_kernel */
+
+#ifdef now_in_c
+/*************************************************************************
+ *************************************************************************
+ **
+ ** void get_psr(unsigned psr)
+ **
+ ** Enables processor interrupts (for the executing cpu).
+ **/
+#undef get_psr
+ENTRY(get_psr)
+ ldcr r2, PSR
+ jmp r1
+
+/*************************************************************************
+ *************************************************************************
+ **
+ ** void set_psr(unsigned psr)
+ **
+ ** Enables processor interrupts (for the executing cpu).
+ **/
+
+#undef set_psr
+ENTRY(set_psr)
+ stcr r2, PSR
+ FLUSH_PIPELINE
+ jmp r1
+
/*************************************************************************
*************************************************************************
**
@@ -225,10 +468,6 @@ ENTRY(disable_interrupt)
FLUSH_PIPELINE
jmp r1
-/* a version of disable_interrupt for the kernel debugger. Should never
- have breakpoints set in it. Make sure it stays consistent with
- disable_interrupt */
-
#if DDB
ENTRY(db_disable_interrupt)
ldcr r2, PSR
@@ -238,15 +477,23 @@ ENTRY(db_disable_interrupt)
jmp r1
#endif /* DDB */
-/* version for the debugger */
+#endif /* now_in_c */
+
+ENTRY(are_interrupts_disabled)
+ ldcr r2, PSR /* get the processor status word */
+ set r3, r0, 1<PSR_INTERRUPT_DISABLE_BIT> /* set mask */
+ jmp.n r1 /* delayed return */
+ and r2, r2, r3 /* r2 = r3 & r2 */
+
+/* version for the debugger */
#if DDB
ENTRY(db_are_interrupts_disabled)
- ldcr r2, PSR /* get the processor status word */
- set r3, r0, 1<PSR_INTERRUPT_DISABLE_BIT> /* set mask */
- jmp.n r1 /* delayed return */
- and r2, r2, r3 /* r2 = r3 & r2 */
+ ldcr r2, PSR /* get the processor status word */
+ set r3, r0, 1<PSR_INTERRUPT_DISABLE_BIT> /* set mask */
+ jmp.n r1 /* delayed return */
+ and r2, r2, r3 /* r2 = r3 & r2 */
#endif /* DDB */
LABEL(_FAULT_ERROR)
@@ -262,89 +509,89 @@ LABEL(_FAULT_ERROR)
ENTRY(fuword)
ENTRY(fuiword)
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- or.u r5, r0, hi16(fusu_fault)
- or r5, r5, lo16(fusu_fault)
- st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
#if ERRATA__XXX_USR
NOP
- ld.usr r5, r0, r2
+ ld.usr r5, r0, r2
NOP
NOP
NOP
#else
- ld.usr r5, r0, r2
+ ld.usr r5, r0, r2
#endif
- or r2, r0, r5
- br fusu_ret
+ or r2, r0, r5
+ br fusu_ret
fusu_fault:
- subu r2, r0, 1
+ subu r2, r0, 1
fusu_ret:
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- st r0, r6, PCB_ONFAULT /* pcb_onfault = 0 */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ st r0, r6, PCB_ONFAULT /* pcb_onfault = 0 */
- jmp r1
+ jmp r1
ENTRY(fusword)
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- or.u r5, r0, hi16(fusu_fault)
- or r5, r5, lo16(fusu_fault)
- st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
#if ERRATA__XXX_USR
NOP
- ld.h.usr r5, r0, r2
+ ld.h.usr r5, r0, r2
NOP
NOP
NOP
#else
- ld.h.usr r5, r0, r2
+ ld.h.usr r5, r0, r2
#endif
- or r2, r0, r5
- br fusu_ret
+ or r2, r0, r5
+ br fusu_ret
ENTRY(fubyte)
ENTRY(fuibyte)
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- or.u r5, r0, hi16(fusu_fault)
- or r5, r5, lo16(fusu_fault)
- st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
#if ERRATA__XXX_USR
NOP
- ld.b.usr r5, r0, r2
+ ld.b.usr r5, r0, r2
NOP
NOP
NOP
#else
- ld.b.usr r5, r0, r2
+ ld.b.usr r5, r0, r2
#endif
- or r2, r0, r5
- br fusu_ret
+ or r2, r0, r5
+ br fusu_ret
ENTRY(fuswintr)
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- or.u r5, r0, hi16(_fubail)
- or r5, r5, lo16(_fubail)
- st r5, r6, PCB_ONFAULT /* pcb_onfault = fubail */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(_fubail)
+ or r5, r5, lo16(_fubail)
+ st r5, r6, PCB_ONFAULT /* pcb_onfault = fubail */
#if ERRATA__XXX_USR
NOP
- ld.h.usr r5, r2, r0
+ ld.h.usr r5, r2, r0
NOP
NOP
NOP
#else
- ld.h.usr r5, r2, r0
+ ld.h.usr r5, r2, r0
#endif
- or r2, r0, r5
- br fusu_ret
+ or r2, r0, r5
+ br fusu_ret
ENTRY(fubail)
- subu r2, r0, 1
- br fusu_ret
+ subu r2, r0, 1
+ br fusu_ret
/*
* store to user space.
@@ -354,81 +601,81 @@ ENTRY(fubail)
ENTRY(suword)
ENTRY(suiword)
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- or.u r5, r0, hi16(fusu_fault)
- or r5, r5, lo16(fusu_fault)
- st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
#if ERRATA__XXX_USR
NOP
- st.usr r3, r2, r0
+ st.usr r3, r2, r0
NOP
NOP
NOP
#else
- st.usr r3, r2, r0
+ st.usr r3, r2, r0
#endif
- or r2, r0, r0 /* return success */
- br fusu_ret
+ or r2, r0, r0 /* return success */
+ br fusu_ret
ENTRY(susword)
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- or.u r5, r0, hi16(fusu_fault)
- or r5, r5, lo16(fusu_fault)
- st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
#if ERRATA__XXX_USR
NOP
- st.h.usr r3, r2, r0
+ st.h.usr r3, r2, r0
NOP
NOP
NOP
#else
- st.h.usr r3, r2, r0
+ st.h.usr r3, r2, r0
#endif
- or r2, r0, r0 /* return success */
- br fusu_ret
+ or r2, r0, r0 /* return success */
+ br fusu_ret
ENTRY(subyte)
ENTRY(suibyte)
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- or.u r5, r0, hi16(fusu_fault)
- or r5, r5, lo16(fusu_fault)
- st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(fusu_fault)
+ or r5, r5, lo16(fusu_fault)
+ st r5, r6, PCB_ONFAULT /* pcb_onfault = fusu_fault */
#if ERRATA__XXX_USR
NOP
- st.b.usr r3, r2, r0
+ st.b.usr r3, r2, r0
NOP
NOP
NOP
#else
- st.b.usr r3, r2, r0
+ st.b.usr r3, r2, r0
#endif
- or r2, r0, r0 /* return success */
- br fusu_ret
+ or r2, r0, r0 /* return success */
+ br fusu_ret
ENTRY(suswintr)
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- or.u r5, r0, hi16(_subail)
- or r5, r5, lo16(_subail)
- st r5, r6, PCB_ONFAULT /* pcb_onfault = subail */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(_subail)
+ or r5, r5, lo16(_subail)
+ st r5, r6, PCB_ONFAULT /* pcb_onfault = subail */
#if ERRATA__XXX_USR
NOP
- st.h.usr r3, r2, r0
+ st.h.usr r3, r2, r0
NOP
NOP
NOP
#else
- st.h.usr r3, r2, r0
+ st.h.usr r3, r2, r0
#endif
- or r2, r0, r0 /* return success */
- br fusu_ret
+ or r2, r0, r0 /* return success */
+ br fusu_ret
ENTRY(subail)
- subu r2, r0, 1
- br fusu_ret
+ subu r2, r0, 1
+ br fusu_ret
#if 0
/*
@@ -479,172 +726,172 @@ Lcsdone:
ENTRY(copyin)
/* set up fault handler */
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- or.u r5, r0, hi16(.Lciflt)
- or r5, r5, lo16(.Lciflt)
- st r5, r6, PCB_ONFAULT /* pcb_onfault = .Lciflt */
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(.Lciflt)
+ or r5, r5, lo16(.Lciflt)
+ st r5, r6, PCB_ONFAULT /* pcb_onfault = .Lciflt */
- /*bcnd ne0, LEN, 1f ; XXX optimize len = 0 case */
- /*;or r2, r0, 0 */
- /*;br .Lcidone */
+ /*bcnd ne0, LEN, 1f ; XXX optimize len = 0 case */
+ /*;or r2, r0, 0 */
+ /*;br .Lcidone */
/*;1: ;bcnd lt0, LEN, .Lciflt ; EFAULT if len < 0 */
/* If it's a small length (less than 8), then do byte-by-byte */
- cmp r9, LEN, 8
- bb1 lt, r9, copyin_byte_only
+ cmp r9, LEN, 8
+ bb1 lt, r9, copyin_byte_only
/* If they're not aligned similiarly, use byte only... */
- xor r9, SRC, DEST
- mask r8, r9, 0x3
- bcnd ne0, r8, copyin_byte_only
+ xor r9, SRC, DEST
+ mask r8, r9, 0x3
+ bcnd ne0, r8, copyin_byte_only
/*
* At this point, we don't know if they're word aligned or not,
* but we know that what needs to be done to one to align
* it is what's needed for the other.
*/
- bb1 0, SRC, copyin_left_align_to_halfword
+ bb1 0, SRC, copyin_left_align_to_halfword
copyin_left_aligned_to_halfword:
- bb1 1, SRC, copyin_left_align_to_word
+ bb1 1, SRC, copyin_left_align_to_word
copyin_left_aligned_to_word:
- bb1 0, LEN, copyin_right_align_to_halfword
+ bb1 0, LEN, copyin_right_align_to_halfword
copyin_right_aligned_to_halfword:
- bb1 1, LEN, copyin_right_align_to_word
+ bb1 1, LEN, copyin_right_align_to_word
copyin_right_aligned_to_word:
/* At this point, both SRC and DEST are aligned to a word */
/* boundry, and LEN is an even multiple of 4. */
- bb1.n 2, LEN, copyin_right_align_to_doubleword
- or r7, r0, 4
+ bb1.n 2, LEN, copyin_right_align_to_doubleword
+ or r7, r0, 4
copyin_right_aligned_to_doubleword:
#if ERRATA__XXX_USR
NOP
- ld.usr r5, SRC, r0
+ ld.usr r5, SRC, r0
NOP
NOP
NOP
- ld.usr r6, SRC, r7
+ ld.usr r6, SRC, r7
NOP
NOP
NOP
#else
- ld.usr r5, SRC, r0
- ld.usr r6, SRC, r7
+ ld.usr r5, SRC, r0
+ ld.usr r6, SRC, r7
#endif
- subu LEN, LEN, 8
- st r5, DEST, r0
- addu SRC, SRC, 8
- st r6, DEST, r7
- bcnd.n ne0, LEN, copyin_right_aligned_to_doubleword
- addu DEST, DEST, 8
- or r2, r0, r0 /* successful return */
- br .Lcidone
+ subu LEN, LEN, 8
+ st r5, DEST, r0
+ addu SRC, SRC, 8
+ st r6, DEST, r7
+ bcnd.n ne0, LEN, copyin_right_aligned_to_doubleword
+ addu DEST, DEST, 8
+ or r2, r0, r0 /* successful return */
+ br .Lcidone
/***************************************************/
copyin_left_align_to_halfword:
#if ERRATA__XXX_USR
NOP
- ld.b.usr r5, SRC, r0
+ ld.b.usr r5, SRC, r0
NOP
NOP
NOP
#else
- ld.b.usr r5, SRC, r0
+ ld.b.usr r5, SRC, r0
#endif
- subu LEN, LEN, 1
- st.b r5, DEST, r0
- addu SRC, SRC, 1
- br.n copyin_left_aligned_to_halfword
- addu DEST, DEST, 1
+ subu LEN, LEN, 1
+ st.b r5, DEST, r0
+ addu SRC, SRC, 1
+ br.n copyin_left_aligned_to_halfword
+ addu DEST, DEST, 1
copyin_left_align_to_word:
#if ERRATA__XXX_USR
NOP
- ld.h.usr r5, SRC, r0
+ ld.h.usr r5, SRC, r0
NOP
NOP
NOP
#else
- ld.h.usr r5, SRC, r0
+ ld.h.usr r5, SRC, r0
#endif
- subu LEN, LEN, 2
- st.h r5, DEST, r0
- addu SRC, SRC, 2
- br.n copyin_left_aligned_to_word
- addu DEST, DEST, 2
+ subu LEN, LEN, 2
+ st.h r5, DEST, r0
+ addu SRC, SRC, 2
+ br.n copyin_left_aligned_to_word
+ addu DEST, DEST, 2
copyin_right_align_to_halfword:
- subu LEN, LEN, 1
+ subu LEN, LEN, 1
#if ERRATA__XXX_USR
NOP
- ld.b.usr r5, SRC, LEN
+ ld.b.usr r5, SRC, LEN
NOP
NOP
NOP
#else
- ld.b.usr r5, SRC, LEN
+ ld.b.usr r5, SRC, LEN
#endif
- br.n copyin_right_aligned_to_halfword
- st.b r5, DEST, LEN
+ br.n copyin_right_aligned_to_halfword
+ st.b r5, DEST, LEN
copyin_right_align_to_word:
- subu LEN, LEN, 2
+ subu LEN, LEN, 2
#if ERRATA__XXX_USR
NOP
- ld.h.usr r5, SRC, LEN
+ ld.h.usr r5, SRC, LEN
NOP
NOP
NOP
#else
- ld.h.usr r5, SRC, LEN
+ ld.h.usr r5, SRC, LEN
#endif
- br.n copyin_right_aligned_to_word
- st.h r5, DEST, LEN
+ br.n copyin_right_aligned_to_word
+ st.h r5, DEST, LEN
copyin_right_align_to_doubleword:
- subu LEN, LEN, 4
+ subu LEN, LEN, 4
#if ERRATA__XXX_USR
NOP
- ld.usr r5, SRC, LEN
+ ld.usr r5, SRC, LEN
NOP
NOP
NOP
#else
- ld.usr r5, SRC, LEN
+ ld.usr r5, SRC, LEN
#endif
- bcnd.n ne0, LEN, copyin_right_aligned_to_doubleword
- st r5, DEST, LEN
- or r2, r0, r0 /* successful return */
- br .Lcidone
+ bcnd.n ne0, LEN, copyin_right_aligned_to_doubleword
+ st r5, DEST, LEN
+ or r2, r0, r0 /* successful return */
+ br .Lcidone
copyin_byte_only:
- bcnd eq0, LEN, 2f
+ bcnd eq0, LEN, 2f
1:
- subu LEN, LEN, 1
+ subu LEN, LEN, 1
#if ERRATA__XXX_USR
NOP
- ld.b.usr r5, SRC, LEN
+ ld.b.usr r5, SRC, LEN
NOP
NOP
NOP
#else
- ld.b.usr r5, SRC, LEN
+ ld.b.usr r5, SRC, LEN
#endif
- bcnd.n ne0, LEN, 1b
- st.b r5, DEST, LEN
- 2: or r2, r0, r0 /* successful return */
- br .Lcidone
+ bcnd.n ne0, LEN, 1b
+ st.b r5, DEST, LEN
+ 2: or r2, r0, r0 /* successful return */
+ br .Lcidone
.Lcidone:
- or.u r5,r0,hi16(_curpcb)
- ld r6,r5,lo16(_curpcb)
- st r0,r6,PCB_ONFAULT
- jmp r1
+ or.u r5,r0,hi16(_curpcb)
+ ld r6,r5,lo16(_curpcb)
+ st r0,r6,PCB_ONFAULT
+ jmp r1
.Lciflt:
- or r2, r0, EFAULT /* return fault */
- br .Lcidone
+ or r2, r0, EFAULT /* return fault */
+ br .Lcidone
#undef SRC
#undef DEST
@@ -668,47 +915,48 @@ copyin_byte_only:
#define CNT r4
#define LEN r5
-ENTRY(copyinstr)
+ENTRY(copyinstr)
+
/* setup fault handler */
- or.u r6, r0, hi16(_curpcb)
- ld r7, r6, lo16(_curpcb)
- or.u r6, r0, hi16(.Lcisflt)
- or r6, r6, lo16(.Lcisflt)
- st r6, r7, PCB_ONFAULT
- bcnd lt0, CNT, .Lcisflt
- bcnd eq0, CNT, .Lcisdone
- or r6, r0, 0
+ or.u r6, r0, hi16(_curpcb)
+ ld r7, r6, lo16(_curpcb)
+ or.u r6, r0, hi16(.Lcisflt)
+ or r6, r6, lo16(.Lcisflt)
+ st r6, r7, PCB_ONFAULT
+ bcnd lt0, CNT, .Lcisflt
+ bcnd eq0, CNT, .Lcisdone
+ or r6, r0, 0
1:
#if ERRATA__XXX_USR
NOP
- ld.bu.usr r7, SRC, r6
+ ld.bu.usr r7, SRC, r6
NOP
NOP
NOP
#else
ld.bu.usr r7, SRC, r6
#endif
- st.b r7, DEST, r6
- bcnd.n eq0, r7, 2f /* all done */
- addu r6, r6, 1
- cmp r7, r6, CNT
- bb1 lt, r7, 1b
- or r2, r0, ENAMETOOLONG /* over flow */
- br .Lcisdone
+ st.b r7, DEST, r6
+ bcnd.n eq0, r7, 2f /* all done */
+ addu r6, r6, 1
+ cmp r7, r6, CNT
+ bb1 lt, r7, 1b
+ or r2, r0, ENAMETOOLONG /* over flow */
+ br .Lcisdone
2: /* all done */
- or r2, r0, 0
- br .Lcisdone
+ or r2, r0, 0
+ br .Lcisdone
.Lcisdone:
- bcnd eq0, LEN, 3f
- st r6, r0, LEN
- 3: or.u r5,r0,hi16(_curpcb)
- ld r6,r5,lo16(_curpcb)
- st r0,r6,PCB_ONFAULT /* clear the handler */
- jmp r1
+ bcnd eq0, LEN, 3f
+ st r6, r0, LEN
+ 3: or.u r5,r0,hi16(_curpcb)
+ ld r6,r5,lo16(_curpcb)
+ st r0,r6,PCB_ONFAULT /* clear the handler */
+ jmp r1
.Lcisflt:
- or r2, r0, EFAULT /* return fault */
- br .Lcisdone
+ or r2, r0, EFAULT /* return fault */
+ br .Lcisdone
#undef SRC
#undef DEST
@@ -730,181 +978,182 @@ ENTRY(copyinstr)
ENTRY(copyout)
/* setup fault handler */
/* tb0 0, r0, 132 entry trap */
- or.u r5, r0, hi16(_curpcb)
- ld r6, r5, lo16(_curpcb)
- or.u r5, r0, hi16(.Lcoflt)
- or r5, r5, lo16(.Lcoflt)
- st r5, r6, PCB_ONFAULT /* pcb_onfault = .Lcoflt */
-/* ;bcnd ne0, LEN, 1f ; XXX optimize len = 0 case */
-/* ;or r2, r0, 0 */
-/* ;br .Lcodone */
- /*;1: ;bcnd lt0, LEN, .Lcoflt ; EFAULT if len < 0 */
+/* SET_PCB_ONFAULT(r5, r6, .Lcoflt)*/
+ or.u r5, r0, hi16(_curpcb)
+ ld r6, r5, lo16(_curpcb)
+ or.u r5, r0, hi16(.Lcoflt)
+ or r5, r5, lo16(.Lcoflt)
+ st r5, r6, PCB_ONFAULT /* pcb_onfault = .Lcoflt */
+/* ;bcnd ne0, LEN, 1f ; XXX optimize len = 0 case */
+/* ;or r2, r0, 0 */
+/* ;br .Lcodone */
+ /*;1: ;bcnd lt0, LEN, .Lcoflt ; EFAULT if len < 0 */
/* If it's a small length (less than 8), then do byte-by-byte */
- cmp r9, LEN, 8
- bb1 lt, r9, copyout_byte_only
+ cmp r9, LEN, 8
+ bb1 lt, r9, copyout_byte_only
/* If they're not aligned similiarly, use byte only... */
- xor r9, SRC, DEST
- mask r8, r9, 0x3
- bcnd ne0, r8, copyout_byte_only
+ xor r9, SRC, DEST
+ mask r8, r9, 0x3
+ bcnd ne0, r8, copyout_byte_only
/*
* At this point, we don't know if they're word aligned or not,
* but we know that what needs to be done to one to align
* it is what's needed for the other.
*/
- bb1 0, SRC, copyout_left_align_to_halfword
+ bb1 0, SRC, copyout_left_align_to_halfword
copyout_left_aligned_to_halfword:
- bb1 1, SRC, copyout_left_align_to_word
+ bb1 1, SRC, copyout_left_align_to_word
copyout_left_aligned_to_word:
- bb1 0, LEN, copyout_right_align_to_halfword
+ bb1 0, LEN, copyout_right_align_to_halfword
copyout_right_aligned_to_halfword:
- bb1 1, LEN, copyout_right_align_to_word
+ bb1 1, LEN, copyout_right_align_to_word
copyout_right_aligned_to_word:
/*
* At this point, both SRC and DEST are aligned to a word
* boundry, and LEN is an even multiple of 4.
*/
- bb1.n 2, LEN, copyout_right_align_to_doubleword
- or r7, r0, 4
+ bb1.n 2, LEN, copyout_right_align_to_doubleword
+ or r7, r0, 4
copyout_right_aligned_to_doubleword:
- ld r5, SRC, r0
- ld r6, SRC, r7
- subu LEN, LEN, 8
+ ld r5, SRC, r0
+ ld r6, SRC, r7
+ subu LEN, LEN, 8
#if ERRATA__XXX_USR
NOP
- st.usr r5, DEST, r0
+ st.usr r5, DEST, r0
NOP
NOP
NOP
#else
- st.usr r5, DEST, r0
+ st.usr r5, DEST, r0
#endif
- addu SRC, SRC, 8
+ addu SRC, SRC, 8
#if ERRATA__XXX_USR
NOP
- st.usr r6, DEST, r7
+ st.usr r6, DEST, r7
NOP
NOP
NOP
#else
- st.usr r6, DEST, r7
+ st.usr r6, DEST, r7
#endif
- bcnd.n ne0, LEN, copyout_right_aligned_to_doubleword
- addu DEST, DEST, 8
- or r2, r0, r0 /* successful return */
- br .Lcodone
+ bcnd.n ne0, LEN, copyout_right_aligned_to_doubleword
+ addu DEST, DEST, 8
+ or r2, r0, r0 /* successful return */
+ br .Lcodone
/***************************************************/
copyout_left_align_to_halfword:
- ld.b r5, SRC, r0
- subu LEN, LEN, 1
+ ld.b r5, SRC, r0
+ subu LEN, LEN, 1
#if ERRATA__XXX_USR
NOP
- st.b.usr r5, DEST, r0
+ st.b.usr r5, DEST, r0
NOP
NOP
NOP
#else
- st.b.usr r5, DEST, r0
+ st.b.usr r5, DEST, r0
#endif
- addu SRC, SRC, 1
- br.n copyout_left_aligned_to_halfword
- addu DEST, DEST, 1
+ addu SRC, SRC, 1
+ br.n copyout_left_aligned_to_halfword
+ addu DEST, DEST, 1
copyout_left_align_to_word:
- ld.h r5, SRC, r0
- subu LEN, LEN, 2
+ ld.h r5, SRC, r0
+ subu LEN, LEN, 2
#if ERRATA__XXX_USR
NOP
- st.h.usr r5, DEST, r0
+ st.h.usr r5, DEST, r0
NOP
NOP
NOP
#else
- st.h.usr r5, DEST, r0
+ st.h.usr r5, DEST, r0
#endif
- addu SRC, SRC, 2
- br.n copyout_left_aligned_to_word
- addu DEST, DEST, 2
+ addu SRC, SRC, 2
+ br.n copyout_left_aligned_to_word
+ addu DEST, DEST, 2
copyout_right_align_to_halfword:
- subu LEN, LEN, 1
- ld.b r5, SRC, LEN
+ subu LEN, LEN, 1
+ ld.b r5, SRC, LEN
#if ERRATA__XXX_USR
+ NOP
+ st.b.usr r5, DEST, LEN
NOP
- st.b.usr r5, DEST, LEN
NOP
NOP
- NOP
- br copyout_right_aligned_to_halfword
+ br copyout_right_aligned_to_halfword
#else
- br.n copyout_right_aligned_to_halfword
- st.b.usr r5, DEST, LEN
+ br.n copyout_right_aligned_to_halfword
+ st.b.usr r5, DEST, LEN
#endif
copyout_right_align_to_word:
- subu LEN, LEN, 2
- ld.h r5, SRC, LEN
+ subu LEN, LEN, 2
+ ld.h r5, SRC, LEN
#if ERRATA__XXX_USR
- NOP
- st.h.usr r5, DEST, LEN
+ NOP
+ st.h.usr r5, DEST, LEN
NOP
NOP
NOP
- br copyout_right_aligned_to_word
-#else
- br.n copyout_right_aligned_to_word
- st.h.usr r5, DEST, LEN
+ br copyout_right_aligned_to_word
+#else
+ br.n copyout_right_aligned_to_word
+ st.h.usr r5, DEST, LEN
#endif
copyout_right_align_to_doubleword:
- subu LEN, LEN, 4
- ld r5, SRC, LEN
+ subu LEN, LEN, 4
+ ld r5, SRC, LEN
#if ERRATA__XXX_USR
NOP
- st.usr r5, DEST, LEN
+ st.usr r5, DEST, LEN
NOP
NOP
NOP
- bcnd ne0, LEN, copyout_right_aligned_to_doubleword
+ bcnd ne0, LEN, copyout_right_aligned_to_doubleword
#else
- bcnd.n ne0, LEN, copyout_right_aligned_to_doubleword
- st.usr r5, DEST, LEN
+ bcnd.n ne0, LEN, copyout_right_aligned_to_doubleword
+ st.usr r5, DEST, LEN
#endif
- or r2, r0, r0 /* successful return */
- br .Lcodone
+ or r2, r0, r0 /* successful return */
+ br .Lcodone
_LABEL(copyout_byte_only)
- bcnd eq0, LEN, 2f
+ bcnd eq0, LEN, 2f
1:
- subu LEN, LEN, 1
- ld.b r5, SRC, LEN
+ subu LEN, LEN, 1
+ ld.b r5, SRC, LEN
#if ERRATA__XXX_USR
NOP
- st.b.usr r5, DEST, LEN
+ st.b.usr r5, DEST, LEN
NOP
NOP
NOP
- bcnd ne0, LEN, 1b
+ bcnd ne0, LEN, 1b
# else
- bcnd.n ne0, LEN, 1b
- st.b.usr r5, DEST, LEN
+ bcnd.n ne0, LEN, 1b
+ st.b.usr r5, DEST, LEN
# endif
- 2: or r2, r0, r0 /* successful return */
- br .Lcodone
+ 2: or r2, r0, r0 /* successful return */
+ br .Lcodone
.Lcodone:
- or.u r5,r0,hi16(_curpcb)
- ld r6,r5,lo16(_curpcb)
- st r0,r6,PCB_ONFAULT /* clear the handler */
- jmp r1
+ or.u r5,r0,hi16(_curpcb)
+ ld r6,r5,lo16(_curpcb)
+ st r0,r6,PCB_ONFAULT /* clear the handler */
+ jmp r1
.Lcoflt:
- or r2, r0, EFAULT /* return fault */
- br .Lcodone
+ or r2, r0, EFAULT /* return fault */
+ br .Lcodone
#undef SRC
#undef DEST
@@ -928,46 +1177,46 @@ _LABEL(copyout_byte_only)
ENTRY(copyoutstr)
/* setup fault handler */
- or.u r6, r0, hi16(_curpcb)
- ld r7, r6, lo16(_curpcb)
- or.u r6, r0, hi16(.Lcosflt)
- or r6, r6, lo16(.Lcosflt)
- st r6, r7, PCB_ONFAULT
- bcnd lt0, CNT, .Lcosflt
- bcnd eq0, CNT, .Lcosdone
- or r6, r0, 0
+ or.u r6, r0, hi16(_curpcb)
+ ld r7, r6, lo16(_curpcb)
+ or.u r6, r0, hi16(.Lcosflt)
+ or r6, r6, lo16(.Lcosflt)
+ st r6, r7, PCB_ONFAULT
+ bcnd lt0, CNT, .Lcosflt
+ bcnd eq0, CNT, .Lcosdone
+ or r6, r0, 0
1:
- ld.bu r7, SRC, r6
+ ld.bu r7, SRC, r6
#if ERRATA__XXX_USR
NOP
- st.b.usr r7, DEST, r6
+ st.b.usr r7, DEST, r6
NOP
NOP
NOP
#else
- st.b.usr r7, DEST, r6
+ st.b.usr r7, DEST, r6
#endif
- bcnd.n eq0, r7, 2f /* all done */
- addu r6, r6, 1
- cmp r7, r6, CNT
- bb1 lt, r7, 1b
- or r2, r0, ENAMETOOLONG /* over flow */
- br .Lcosdone
+ bcnd.n eq0, r7, 2f /* all done */
+ addu r6, r6, 1
+ cmp r7, r6, CNT
+ bb1 lt, r7, 1b
+ or r2, r0, ENAMETOOLONG /* over flow */
+ br .Lcosdone
2: /* all done */
- or r2, r0, 0
- br .Lcosdone
+ or r2, r0, 0
+ br .Lcosdone
.Lcosflt:
- or r2, r0, EFAULT /* return fault */
- br .Lcosdone
+ or r2, r0, EFAULT /* return fault */
+ br .Lcosdone
.Lcosdone:
- bcnd eq0, LEN, 3f
- st r6, r0, LEN
- 3: or.u r5,r0,hi16(_curpcb)
- ld r6,r5,lo16(_curpcb)
- st r0,r6,PCB_ONFAULT /* clear the handler */
- jmp r1
+ bcnd eq0, LEN, 3f
+ st r6, r0, LEN
+ 3: or.u r5,r0,hi16(_curpcb)
+ ld r6,r5,lo16(_curpcb)
+ st r0,r6,PCB_ONFAULT /* clear the handler */
+ jmp r1
#undef SRC
#undef DEST
@@ -1338,7 +1587,7 @@ ENTRY(bzero)
or R_mark_address, R_mark_address, lo16(mark)
top_of_main_loop:
-# define MAX_AT_ONE_TIME 128
+#define MAX_AT_ONE_TIME 128
/*
* Now we find out how many words we can zero-fill in a row.
* We do this by doing something like:
@@ -1361,20 +1610,20 @@ ENTRY(bzero)
cmp R_temp, R_bytes, MAX_AT_ONE_TIME
bb1 lt, R_temp, 1f
- /*
- * Since we're doing the max, we know exactly where we're
- * jumping (the first one in the list!), so we can jump
- * right there. However, we've still got to adjust
- * the length, so we'll jump to where we ajust the length
- * which just happens to fall through to the first store zero
- * in the list.
- *
- * Note, however, that we're jumping to an instruction that
- * would be in the delay slot for the jump in front of it,
- * so if you change things here, WATCH OUT.
- */
- br.n do_max
- or R_bytes, r0, MAX_AT_ONE_TIME
+ /*
+ * Since we're doing the max, we know exactly where we're
+ * jumping (the first one in the list!), so we can jump
+ * right there. However, we've still got to adjust
+ * the length, so we'll jump to where we ajust the length
+ * which just happens to fall through to the first store zero
+ * in the list.
+ *
+ * Note, however, that we're jumping to an instruction that
+ * would be in the delay slot for the jump in front of it,
+ * so if you change things here, WATCH OUT.
+ */
+ br.n do_max
+ or R_bytes, r0, MAX_AT_ONE_TIME
1:
@@ -1551,6 +1800,21 @@ ENTRY(getsp)
jmp r1
/*
+ * invalidate_pte(pte)
+ *
+ * This function will invalidate specified pte indivisibly
+ * to avoid the write-back of used-bit and/or modify-bit into
+ * that pte. It also returns the pte found in the table.
+ */
+ENTRY(invalidate_pte)
+ or r3,r0,r0
+ xmem r3,r2,r0
+ tb1 0,r0,0
+ jmp.n r1
+ or r2,r3,r0
+
+#ifdef now_in_c
+/*
* This has to be cleaned - we should not use INT_MASK_LEVEL. Should
* use pcc2_int_lvl instead. XXX nivas
*/
@@ -1566,20 +1830,39 @@ ENTRY(getsp)
ENTRY(spln)
ldcr r10,PSR
or r11,r0,r10
- or r5,r2,r0
+ or r5,r2,r0 /* copy of r2 */
bb1 PSR_INTERRUPT_DISABLE_BIT,r10,1f
set r10,r10,1<PSR_INTERRUPT_DISABLE_BIT>
stcr r10,PSR
FLUSH_PIPELINE
1:
+#ifdef MVME188
+ /* check if it's a mvme188 */
+ or.u r4, r0, hi16(_cputyp)
+ ld r3, r4, lo16(_cputyp)
+ cmp r4, r3, 0x188
+ bb1 ne, r4, 2f
+ or.u r2, r0, hi16(IEN0_REG)
+ bsr.n _m188_get_mask
+ or r2, r2, lo16(IEN0_REG)
+ or r4,r2,r0 /* old mask in r4 */
+ or r3,r5,r0 /* new mask in r3 */
+ or.u r2, r0, hi16(IEN0_REG)
+ bsr.n _m188_set_mask
+ or r2, r2, lo16(IEN0_REG)
+ or r2,r4,r0 /* old mask in r2 */
+ br m188_spln_done
+#endif /* MVME188 */
+ 2:
or.u r3,r0,hi16(INT_MASK_LEVEL)
or r3,r3,lo16(INT_MASK_LEVEL)
xmem.bu r2,r3,r0
- bcnd ne0, r5, 2f
+m188_spln_done:
+ bcnd ne0, r5, 3f
clr r11, r11, 1<PSR_INTERRUPT_DISABLE_BIT>
stcr r11,PSR
FLUSH_PIPELINE
- 2:
+ 3:
jmp r1
ENTRY(getipl)
@@ -1591,8 +1874,23 @@ ENTRY(spl)
stcr r10,PSR
FLUSH_PIPELINE
1:
+#ifdef MVME188
+ /* check if it's a mvme188 */
+ or.u r4, r0, hi16(_cputyp)
+ ld r3, r4, lo16(_cputyp)
+ cmp r4, r3, 0x188
+ bb1 ne, r4, 2f
+ /* get the current mask value mvme188 */
+ or.u r2, r0, hi16(IEN0_REG)
+ bsr.n _m188_get_mask
+ or r2, r2, lo16(IEN0_REG)
+ br m188_spl_done
+#endif /* MVME188 */
+ 2:
+ /* get the current mask value mvme1x7 */
or.u r3,r0,hi16(INT_MASK_LEVEL)
ld.b r2,r3,lo16(INT_MASK_LEVEL)
+m188_spl_done:
stcr r11,PSR
FLUSH_PIPELINE
jmp r1
@@ -1610,50 +1908,104 @@ ENTRY(setipl)
bb1 PSR_INTERRUPT_DISABLE_BIT, r10, 1f
set r10,r10,1<PSR_INTERRUPT_DISABLE_BIT>
stcr r10,PSR /* disable ints, if needed */
- FLUSH_PIPELINE
- 1:
- /* get the current mask value */
+ FLUSH_PIPELINE
+1:
+#ifdef MVME188
+ /* check if it's a mvme188 */
+ or.u r4, r0, hi16(_cputyp)
+ ld r3, r4, lo16(_cputyp)
+ cmp r4, r3, 0x188
+ bb1 ne, r4, 2f
+ or r3, r0, r2 /* r3 now new mask value */
+ /* get the current mask value mvme188 */
+ or.u r2,r0,hi16(IEN0_REG)
+ bsr.n _m188_get_mask
+ or r2,r2,lo16(IEN0_REG)
+ or r4, r0, r2 /* r4 now current mask value */
+ or.u r2,r0,hi16(IEN0_REG)
+ bsr.n _m188_set_mask
+ or r2,r2,lo16(IEN0_REG)
+ or r2, r0, r4 /* r2 now the old value */
+ br m188_setipl_done
+#endif /* MVME188 */
+ 2:
+ /* get the current mask value mvme1x7 */
or.u r3,r0,hi16(INT_MASK_LEVEL)
or r3,r3,lo16(INT_MASK_LEVEL)
xmem.bu r2,r3,r0 /* xchng the new mask value */
+m188_setipl_done:
FLUSH_PIPELINE
stcr r11,PSR /* restore psr */
FLUSH_PIPELINE
jmp r1 /* and return the old value */
-/*
- * invalidate_pte(pte)
- *
- * This function will invalidate specified pte indivisibly
- * to avoid the write-back of used-bit and/or modify-bit into
- * that pte. It also returns the pte found in the table.
- */
-ENTRY(invalidate_pte)
- or r3,r0,r0
- xmem r3,r2,r0
- tb1 0,r0,0
- jmp.n r1
- or r2,r3,r0
-
#if DDB
-
ENTRY(db_spln)
ldcr r10,PSR
or r11,r0,r10
- or r5,r2,r0
+ or r5,r2,r0 /* copy of r2 */
bb1 PSR_INTERRUPT_DISABLE_BIT,r10,1f
set r10,r10,1<PSR_INTERRUPT_DISABLE_BIT>
stcr r10,PSR
FLUSH_PIPELINE
1:
+#ifdef MVME188
+ /* check if it's a mvme188 */
+ or.u r4, r0, hi16(_cputyp)
+ ld r3, r4, lo16(_cputyp)
+ cmp r4, r3, 0x188
+ bb1 ne, r4, 2f
+ or.u r2,r0,hi16(IEN0_REG)
+ bsr.n _m188_get_mask
+ or r2,r2,lo16(IEN0_REG)
+ or r4,r2,r0 /* old mask in r4 */
+ or r3,r5,r0 /* new mask in r3 */
+ or.u r2,r0,hi16(IEN0_REG)
+ bsr.n _m188_set_mask
+ or r2,r2,lo16(IEN0_REG)
+ or r2,r4,r0 /* old mask in r2 */
+ br m188_db_spln_done
+#endif /* MVME188 */
+ 2:
or.u r3,r0,hi16(INT_MASK_LEVEL)
or r3,r3,lo16(INT_MASK_LEVEL)
xmem.bu r2,r3,r0
- bcnd ne0, r5, 2f
+m188_db_spln_done:
+ bcnd ne0, r5, 3f
clr r11, r11, 1<PSR_INTERRUPT_DISABLE_BIT>
stcr r11,PSR
FLUSH_PIPELINE
- 2:
+ 3:
+ jmp r1
+
+ENTRY(db_getipl)
+ENTRY(db_spl)
+ ldcr r10,PSR
+ or r11,r0,r10
+ bb1 PSR_INTERRUPT_DISABLE_BIT, r10, 1f
+ set r10,r10,1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r10,PSR
+ FLUSH_PIPELINE
+ 1:
+#ifdef MVME188
+ /* check if it's a mvme188 */
+ or.u r4, r0, hi16(_cputyp)
+ ld r3, r4, lo16(_cputyp)
+ cmp r4, r3, 0x188
+ bb1 ne, r4, 2f
+ /* get the current mask value mvme188 */
+ or.u r2,r0,hi16(IEN0_REG)
+ bsr.n _m188_get_mask
+ or r2,r2,lo16(IEN0_REG)
+ br m188_db_spl_done
+#endif /* MVME188 */
+ 2:
+ /* get the current mask value mvme1x7 */
+ or.u r3,r0,hi16(INT_MASK_LEVEL)
+ ld.b r2,r3,lo16(INT_MASK_LEVEL)
+m188_db_spl_done:
+ stcr r11,PSR
+ FLUSH_PIPELINE
jmp r1
ENTRY(db_setipl)
@@ -1664,29 +2016,38 @@ ENTRY(db_setipl)
stcr r10,PSR /* disable ints, if needed */
FLUSH_PIPELINE
1:
- /* get the current mask value */
+#ifdef MVME188
+ /* check if it's a mvme188 */
+ or.u r4, r0, hi16(_cputyp)
+ ld r3, r4, lo16(_cputyp)
+ cmp r4, r3, 0x188
+ bb1 ne, r4, 2f
+ or r3, r0, r2 /* r3 now new mask value */
+ /* get the current mask value mvme188 */
+ or.u r2,r0,hi16(IEN0_REG)
+ bsr.n _m188_get_mask
+ or r2,r2,lo16(IEN0_REG)
+ or r4, r0, r2 /* r4 now current mask value */
+ or.u r2,r0,hi16(IEN0_REG)
+ bsr.n _m188_set_mask
+ or r2,r2,lo16(IEN0_REG)
+ or r2, r0, r4 /* r2 now the old value */
+ br m188_db_setipl_done
+#endif /* MVME188 */
+ 2:
+ /* get the current mask value mvme1x7 */
or.u r3,r0,hi16(INT_MASK_LEVEL)
or r3,r3,lo16(INT_MASK_LEVEL)
xmem.bu r2,r3,r0 /* xchng the new mask value */
+m188_db_setipl_done:
FLUSH_PIPELINE
stcr r11,PSR /* restore psr */
FLUSH_PIPELINE
jmp r1 /* and return the old value */
+#endif /* DDB */
+#endif /* now_in_c */
-ENTRY(db_getipl)
-ENTRY(db_spl)
- ldcr r10,PSR
- or r11,r0,r10
- bb1 PSR_INTERRUPT_DISABLE_BIT, r10, 1f
- set r10,r10,1<PSR_INTERRUPT_DISABLE_BIT>
- stcr r10,PSR
- 1:
- or.u r3,r0,hi16(INT_MASK_LEVEL)
- ld.b r2,r3,lo16(INT_MASK_LEVEL)
- stcr r11,PSR
- FLUSH_PIPELINE
- jmp r1
-
+#if DDB
ENTRY(db_flush_pipeline)
FLUSH_PIPELINE
jmp r1
@@ -1738,3 +2099,78 @@ ENTRY(guarded_access_bad)
ENTRY(guarded_access_end)
jmp.n r1
or r2,r0,0
+
+#if 0 /* There is an inline version of this in
+machine/cpu_number.h but it doesn't work right for some reason.
+/*************************************************************************
+ *************************************************************************
+ **
+ ** int cpu_number(void)
+ **
+ ** Returns the number of the current cpu.
+ **/
+#undef cpu_number
+ENTRY(cpu_number)
+ or r2, r0, r0 /* clear return value */
+#ifdef MVME188
+ /* check if it's a mvme188 */
+ or.u r4, r0, hi16(_cputyp)
+ ld r3, r4, lo16(_cputyp)
+ cmp r4, r3, 0x188
+ bb1 ne, r4, 1f
+ ldcr r2, SR1
+#endif /* MVME188 */
+1:
+ jmp.n r1
+ clr r2, r2, 28<FLAG_CPU_FIELD_WIDTH>/* clears all but the CPU num */
+#endif
+
+/*************************************************************************
+ *************************************************************************
+ **
+ ** void set_cpu_number(unsigned number);
+ **
+ ** Sets the kernel cpu number for this cpu to the given value.
+ **
+ ** Input:
+ ** r1 return address
+ ** r2 the number (should be 0, 1, 2, or 3).
+ **
+ ** Other registers used:
+ ** r3 temp
+ ** r4 original PSR
+ ** r5 temporary new PSR
+ **
+ ** Output:
+ ** none
+ **/
+ENTRY(set_cpu_number)
+ /* make sure the CPU number is valid */
+ clr r3, r2, FLAG_CPU_FIELD_WIDTH<0>
+ bcnd ne0, r3, 1f /* bad cpu number */
+
+ /* going to change a control register -- disable interrupts */
+ ldcr r4, PSR
+ set r5, r4, 1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r5, PSR
+ tcnd ne0,r0,10 /* make sure interrupts are really disabled */
+ /* if they are not, branch to error_handler() */
+ /* put in the cpu number */
+ ldcr r3, SR1 /* get the flags */
+ clr r3, r3, FLAG_CPU_FIELD_WIDTH<0> /* clean the slate */
+ or r3, r3, r2 /* add the cpu number */
+ stcr r3, SR1 /* put back */
+
+ /* put back the PSR to what it was before and return */
+ stcr r4, PSR
+ jmp r1
+
+ 1: /* bad cpu number*/
+ or.u r2, r0, hi16(1f)
+ bsr.n _panic
+ or r2, r2, lo16(1f)
+ 1: string "bad CPU number\0"
+ align 4
+ /* will not return */
+
+
diff --git a/sys/arch/mvme88k/mvme88k/locore_c_routines.c b/sys/arch/mvme88k/mvme88k/locore_c_routines.c
index 37b91989349..ab494b2b335 100644
--- a/sys/arch/mvme88k/mvme88k/locore_c_routines.c
+++ b/sys/arch/mvme88k/mvme88k/locore_c_routines.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore_c_routines.c,v 1.5 1999/02/09 06:36:28 smurph Exp $ */
+/* $OpenBSD: locore_c_routines.c,v 1.6 1999/09/27 19:13:22 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1993-1991 Carnegie Mellon University
@@ -30,23 +30,26 @@
*****************************************************************RCS**/
/* This file created by Omron Corporation, 1990. */
+#include <machine/cpu_number.h> /* cpu_number() */
+#include <machine/board.h> /* m188 bit defines */
#include <machine/m88100.h> /* DMT_VALID */
#include <assym.s> /* EF_NREGS, etc. */
#include <machine/asm.h> /* END_OF_VECTOR_LIST, etc. */
+#include <machine/asm_macro.h> /* enable/disable interrupts */
#ifdef DDB
- #include <ddb/db_output.h> /* db_printf() */
+ #include <ddb/db_output.h> /* db_printf() */
#endif /* DDB */
#if defined(DDB) && defined(JEFF_DEBUG)
-# define DATA_DEBUG 1
+ #define DATA_DEBUG 1
#endif
#if DDB
-# define DEBUG_MSG db_printf
+ #define DEBUG_MSG db_printf
#else
-# define DEBUG_MSG printf
+ #define DEBUG_MSG printf
#endif /* DDB */
/*
@@ -56,220 +59,252 @@
#define DMT_HALF 2
#define DMT_WORD 4
-static struct
-{
- unsigned char offset;
- unsigned char size;
+extern volatile unsigned int * int_mask_reg[MAX_CPUS]; /* in machdep.c */
+extern u_char *int_mask_level; /* in machdep.c */
+extern unsigned master_cpu; /* in cmmu.c */
+
+static struct {
+ unsigned char offset;
+ unsigned char size;
} dmt_en_info[16] =
{
- {0, 0}, {3, DMT_BYTE}, {2, DMT_BYTE}, {2, DMT_HALF},
- {1, DMT_BYTE}, {0, 0}, {0, 0}, {0, 0},
- {0, DMT_BYTE}, {0, 0}, {0, 0}, {0, 0},
- {0, DMT_HALF}, {0, 0}, {0, 0}, {0, DMT_WORD}
+ {0, 0}, {3, DMT_BYTE}, {2, DMT_BYTE}, {2, DMT_HALF},
+ {1, DMT_BYTE}, {0, 0}, {0, 0}, {0, 0},
+ {0, DMT_BYTE}, {0, 0}, {0, 0}, {0, 0},
+ {0, DMT_HALF}, {0, 0}, {0, 0}, {0, DMT_WORD}
};
#if DATA_DEBUG
- int data_access_emulation_debug = 0;
+int data_access_emulation_debug = 0;
+static char *bytes[] =
+{
+ "____", "___x", "__x_", "__xx",
+ "_x__", "_x_x", "_xx_", "_xxx",
+ "x___", "x__x", "x_x_", "x_xx",
+ "xx__", "xx_x", "xxx_", "xxxx",
+};
+ #define DAE_DEBUG(stuff) { \
+ if (data_access_emulation_debug != 0) { stuff ;} }
+#else
+ #define DAE_DEBUG(stuff)
+#endif
+
+void
+dae_print(unsigned *eframe)
+{
+ register int x;
+ register struct dmt_reg *dmtx;
+ register unsigned dmax, dmdx;
+ register unsigned v, reg;
static char *bytes[] =
{
- "____", "___x", "__x_", "__xx",
- "_x__", "_x_x", "_xx_", "_xxx",
- "x___", "x__x", "x_x_", "x_xx",
- "xx__", "xx_x", "xxx_", "xxxx",
+ "____", "___x", "__x_", "__xx",
+ "_x__", "_x_x", "_xx_", "_xxx",
+ "x___", "x__x", "x_x_", "x_xx",
+ "xx__", "xx_x", "xxx_", "xxxx",
};
- #define DAE_DEBUG(stuff) { \
- if ((data_access_emulation_debug != 0) && ( \
- data_access_emulation_debug == 0xffffffff)) { stuff ;} }
-#else
- #define DAE_DEBUG(stuff)
-#endif
+ if (!(eframe[EF_DMT0] & DMT_VALID))
+ return;
+
+ for (x = 0; x < 3; x++) {
+ dmtx = (struct dmt_reg *)&eframe[EF_DMT0+x*3];
+
+ if (!dmtx->dmt_valid)
+ continue;
+
+ dmdx = eframe[EF_DMD0+x*3];
+ dmax = eframe[EF_DMA0+x*3];
+
+ if (dmtx->dmt_write)
+ printf("[DMT%d=%x: st.%c %x to %x as [%s] %s %s]\n",
+ x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
+ dmdx, dmax, bytes[dmtx->dmt_en],
+ dmtx->dmt_doub1 ? "double": "not double",
+ dmtx->dmt_lockbar ? "xmem": "not xmem");
+ else
+ printf("[DMT%d=%x: ld.%c r%d <- %x as [%s] %s %s]\n",
+ x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
+ dmtx->dmt_dreg, dmax, bytes[dmtx->dmt_en],
+ dmtx->dmt_doub1 ? "double": "not double",
+ dmtx->dmt_lockbar ? "xmem": "not xmem");
+
+ }
+}
+#if defined(MVME187) || defined(MVME188)
void data_access_emulation(unsigned *eframe)
{
- register int x;
- register struct dmt_reg *dmtx;
- register unsigned dmax, dmdx;
- register unsigned v, reg;
-
- if (!(eframe[EF_DMT0] & DMT_VALID))
- return;
-
- for (x = 0; x < 3; x++)
- {
- dmtx = (struct dmt_reg *)&eframe[EF_DMT0+x*3];
-
- if (!dmtx->dmt_valid)
- continue;
-
- dmdx = eframe[EF_DMD0+x*3];
- dmax = eframe[EF_DMA0+x*3];
-
- DAE_DEBUG
- (
- if (dmtx->dmt_write)
- DEBUG_MSG("[DMT%d=%x: st.%c %x to %x as [%s] %s %s]\n",
- x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
- dmdx, dmax, bytes[dmtx->dmt_en],
- dmtx->dmt_doub1 ? "double": "not double",
- dmtx->dmt_lockbar ? "xmem": "not xmem");
- else
- DEBUG_MSG("[DMT%d=%x: ld.%c r%d<-%x as [%s] %s %s]\n",
- x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
- dmtx->dmt_dreg, dmax, bytes[dmtx->dmt_en],
- dmtx->dmt_doub1 ? "double": "not double",
- dmtx->dmt_lockbar ? "xmem": "not xmem");
- )
-
- dmax += dmt_en_info[dmtx->dmt_en].offset;
- reg = dmtx->dmt_dreg;
-
- if ( ! dmtx->dmt_lockbar)
- {
- /* the fault is not during an XMEM */
-
- if (x == 2 && dmtx->dmt_doub1)
- {
- /* pipeline 2 (earliest stage) for a double */
-
- if (dmtx->dmt_write)
- {
- /* STORE DOUBLE WILL BE RE-INITIATED BY rte */
- }
- else
- {
- /* EMULATE ld.d INSTRUCTION */
- v = do_load_word(dmax, dmtx->dmt_das);
- if (reg != 0)
- eframe[EF_R0 + reg] = v;
- v = do_load_word(dmax ^ 4, dmtx->dmt_das);
- if (reg != 31)
- eframe[EF_R0 + reg + 1] = v;
- }
- }
- else /* not pipeline #2 with a double */
- {
- if (dmtx->dmt_write) switch (dmt_en_info[dmtx->dmt_en].size)
- {
- case DMT_BYTE:
- DAE_DEBUG(DEBUG_MSG("[byte %x -> [%x(%c)]\n",
- dmdx & 0xff, dmax, dmtx->dmt_das ? 's' : 'u'))
- do_store_byte(dmax, dmdx, dmtx->dmt_das);
- break;
- case DMT_HALF:
- DAE_DEBUG(DEBUG_MSG("[half %x -> [%x(%c)]\n",
- dmdx & 0xffff, dmax, dmtx->dmt_das ? 's' : 'u'))
- do_store_half(dmax, dmdx, dmtx->dmt_das);
- break;
- case DMT_WORD:
- DAE_DEBUG(DEBUG_MSG("[word %x -> [%x(%c)]\n",
- dmdx, dmax, dmtx->dmt_das ? 's' : 'u'))
- do_store_word(dmax, dmdx, dmtx->dmt_das);
- break;
- }
- else /* else it's a read */
- {
- switch (dmt_en_info[dmtx->dmt_en].size)
- {
- case DMT_BYTE:
- v = do_load_byte(dmax, dmtx->dmt_das);
- if (!dmtx->dmt_signed)
- v &= 0x000000ff;
- break;
- case DMT_HALF:
- v = do_load_half(dmax, dmtx->dmt_das);
- if (!dmtx->dmt_signed)
- v &= 0x0000ffff;
- break;
- case DMT_WORD:
- default: /* 'default' just to shut up lint */
- v = do_load_word(dmax, dmtx->dmt_das);
- break;
- }
- if (reg == 0) {
- DAE_DEBUG(DEBUG_MSG("[no write to r0 done]\n"));
- }
- else
- {
- DAE_DEBUG(DEBUG_MSG("[r%d <- %x]\n",
- reg, v));
- eframe[EF_R0 + reg] = v;
- }
- }
- }
- }
- else /* if lockbar is set... it's part of an XMEM */
- {
- /*
- * According to Motorola's "General Information",
- * the dmt_doub1 bit is never set in this case, as it should be.
- * They call this "general information" - I call it a f*cking bug!
- *
- * Anyway, if lockbar is set (as it is if we're here) and if
- * the write is not set, then it's the same as if doub1
- * was set...
- */
- if ( ! dmtx->dmt_write)
- {
- if (x != 2)
- {
- /* RERUN xmem WITH DMD(x+1) */
- x++;
- dmdx = eframe[EF_DMD0 + x*3];
- }
- else
- {
- /* RERUN xmem WITH DMD2 */
- }
-
- if (dmt_en_info[dmtx->dmt_en].size == DMT_WORD)
- v = do_xmem_word(dmax, dmdx, dmtx->dmt_das);
- else
- v = do_xmem_byte(dmax, dmdx, dmtx->dmt_das);
- eframe[EF_R0 + reg] = v;
- }
- else
- {
- if (x == 0)
- {
- eframe[EF_R0 + reg] = dmdx;
- eframe[EF_SFIP] = eframe[EF_SNIP];
- eframe[EF_SNIP] = eframe[EF_SXIP];
- eframe[EF_SXIP] = 0;
- /* xmem RERUN ON rte */
- eframe[EF_DMT0] = 0;
- return;
- }
- }
- }
- }
- eframe[EF_DMT0] = 0;
+ register int x;
+ register struct dmt_reg *dmtx;
+ register unsigned dmax, dmdx;
+ register unsigned v, reg;
+
+ if (!(eframe[EF_DMT0] & DMT_VALID))
+ return;
+
+ for (x = 0; x < 3; x++) {
+ dmtx = (struct dmt_reg *)&eframe[EF_DMT0+x*3];
+
+ if (!dmtx->dmt_valid)
+ continue;
+
+ dmdx = eframe[EF_DMD0+x*3];
+ dmax = eframe[EF_DMA0+x*3];
+
+ DAE_DEBUG(
+ if (dmtx->dmt_write)
+ DEBUG_MSG("[DMT%d=%x: st.%c %x to %x as [%s] %s %s]\n",
+ x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
+ dmdx, dmax, bytes[dmtx->dmt_en],
+ dmtx->dmt_doub1 ? "double": "not double",
+ dmtx->dmt_lockbar ? "xmem": "not xmem");
+ else
+ DEBUG_MSG("[DMT%d=%x: ld.%c r%d<-%x as [%s] %s %s]\n",
+ x, eframe[EF_DMT0+x*3], dmtx->dmt_das ? 's' : 'u',
+ dmtx->dmt_dreg, dmax, bytes[dmtx->dmt_en],
+ dmtx->dmt_doub1 ? "double": "not double",
+ dmtx->dmt_lockbar ? "xmem": "not xmem");
+ )
+
+ dmax += dmt_en_info[dmtx->dmt_en].offset;
+ reg = dmtx->dmt_dreg;
+
+ if ( ! dmtx->dmt_lockbar) {
+ /* the fault is not during an XMEM */
+
+ if (x == 2 && dmtx->dmt_doub1) {
+ /* pipeline 2 (earliest stage) for a double */
+
+ if (dmtx->dmt_write) {
+ /* STORE DOUBLE WILL BE RE-INITIATED BY rte */
+
+ }
+
+ else {
+ /* EMULATE ld.d INSTRUCTION */
+ v = do_load_word(dmax, dmtx->dmt_das);
+ if (reg != 0)
+ eframe[EF_R0 + reg] = v;
+ v = do_load_word(dmax ^ 4, dmtx->dmt_das);
+ if (reg != 31)
+ eframe[EF_R0 + reg + 1] = v;
+ }
+ } else { /* not pipeline #2 with a double */
+ if (dmtx->dmt_write) {
+ switch (dmt_en_info[dmtx->dmt_en].size) {
+ case DMT_BYTE:
+ DAE_DEBUG(DEBUG_MSG("[byte %x -> [%x(%c)]\n",
+ dmdx & 0xff, dmax, dmtx->dmt_das ? 's' : 'u'))
+ do_store_byte(dmax, dmdx, dmtx->dmt_das);
+ break;
+ case DMT_HALF:
+ DAE_DEBUG(DEBUG_MSG("[half %x -> [%x(%c)]\n",
+ dmdx & 0xffff, dmax, dmtx->dmt_das ? 's' : 'u'))
+ do_store_half(dmax, dmdx, dmtx->dmt_das);
+ break;
+ case DMT_WORD:
+ DAE_DEBUG(DEBUG_MSG("[word %x -> [%x(%c)]\n",
+ dmdx, dmax, dmtx->dmt_das ? 's' : 'u'))
+ do_store_word(dmax, dmdx, dmtx->dmt_das);
+ break;
+ }
+ } else { /* else it's a read */
+ switch (dmt_en_info[dmtx->dmt_en].size) {
+ case DMT_BYTE:
+ v = do_load_byte(dmax, dmtx->dmt_das);
+ if (!dmtx->dmt_signed)
+ v &= 0x000000ff;
+ break;
+ case DMT_HALF:
+ v = do_load_half(dmax, dmtx->dmt_das);
+ if (!dmtx->dmt_signed)
+ v &= 0x0000ffff;
+ break;
+ case DMT_WORD:
+ default: /* 'default' just to shut up lint */
+ v = do_load_word(dmax, dmtx->dmt_das);
+ break;
+ }
+ if (reg == 0) {
+ DAE_DEBUG(DEBUG_MSG("[no write to r0 done]\n"));
+ } else {
+ DAE_DEBUG(DEBUG_MSG("[r%d <- %x]\n", reg, v));
+ eframe[EF_R0 + reg] = v;
+ }
+ }
+ }
+ } else { /* if lockbar is set... it's part of an XMEM */
+ /*
+ * According to Motorola's "General Information",
+ * the dmt_doub1 bit is never set in this case, as it should be.
+ * They call this "general information" - I call it a f*cking bug!
+ *
+ * Anyway, if lockbar is set (as it is if we're here) and if
+ * the write is not set, then it's the same as if doub1
+ * was set...
+ */
+ if ( ! dmtx->dmt_write) {
+ if (x != 2) {
+ /* RERUN xmem WITH DMD(x+1) */
+ x++;
+ dmdx = eframe[EF_DMD0 + x*3];
+ } else {
+ /* RERUN xmem WITH DMD2 */
+
+ }
+
+ if (dmt_en_info[dmtx->dmt_en].size == DMT_WORD)
+ v = do_xmem_word(dmax, dmdx, dmtx->dmt_das);
+ else
+ v = do_xmem_byte(dmax, dmdx, dmtx->dmt_das);
+ eframe[EF_R0 + reg] = v;
+ } else {
+ if (x == 0) {
+ eframe[EF_R0 + reg] = dmdx;
+ eframe[EF_SFIP] = eframe[EF_SNIP];
+ eframe[EF_SNIP] = eframe[EF_SXIP];
+ eframe[EF_SXIP] = 0;
+ /* xmem RERUN ON rte */
+ eframe[EF_DMT0] = 0;
+ return;
+ }
+ }
+ }
+ }
+ eframe[EF_DMT0] = 0;
}
+#endif /* defined(MVME187) || defined(MVME188) */
/*
***********************************************************************
***********************************************************************
*/
#define SIGSYS_MAX 501
-#define SIGTRAP_MAX 511
+#define SIGTRAP_MAX 510
#define EMPTY_BR 0xC0000000U /* empty "br" instruction */
#define NO_OP 0xf4005800U /* "or r0, r0, r0" */
-typedef struct
-{
- unsigned word_one,
- word_two;
+typedef struct {
+ unsigned word_one,
+ word_two;
} m88k_exception_vector_area;
#define BRANCH(FROM, TO) (EMPTY_BR | ((unsigned)(TO) - (unsigned)(FROM)) >> 2)
-#define SET_VECTOR(NUM, to, VALUE) { \
+#if 0
+ #define SET_VECTOR(NUM, to, VALUE) { \
unsigned _NUM = (unsigned)(NUM); \
unsigned _VALUE = (unsigned)(VALUE); \
vector[_NUM].word_one = NO_OP; \
vector[_NUM].word_two = BRANCH(&vector[_NUM].word_two, _VALUE); \
}
-
+#else
+ #define SET_VECTOR(NUM, to, VALUE) { \
+ vector[NUM].word_one = NO_OP; \
+ vector[NUM].word_two = BRANCH(&vector[NUM].word_two, VALUE); \
+}
+#endif
/*
* vector_init(vector, vector_init_list)
*
@@ -279,35 +314,330 @@ typedef struct
* XXX clean this - nivas
*/
void vector_init(
- m88k_exception_vector_area *vector,
- unsigned *vector_init_list)
+ m88k_exception_vector_area *vector,
+ unsigned *vector_init_list)
{
- register unsigned num;
- register unsigned vec;
- extern void sigsys(), sigtrap(), stepbpt(), userbpt();
- extern void syscall_handler();
-
- for (num = 0; (vec = vector_init_list[num]) != END_OF_VECTOR_LIST; num++)
- {
- if (vec != PREDEFINED_BY_ROM)
- SET_VECTOR(num, to, vec);
- }
- while (num < 496)
- SET_VECTOR(num++, to, sigsys);
- num++; /* skip 496, BUG ROM vector */
-
- SET_VECTOR(450, to, syscall_handler);
-
- while (num <= SIGSYS_MAX)
- SET_VECTOR(num++, to, sigsys);
-
- while (num <= SIGTRAP_MAX)
- SET_VECTOR(num++, to, sigtrap);
-
- SET_VECTOR(504, to, stepbpt);
- SET_VECTOR(511, to, userbpt);
-#if 0
- vector[496].word_one = 496 * 4;
- vector[497].word_two = 497 * 4;
-#endif
+ unsigned num;
+ unsigned vec;
+#if defined(MVME187) || defined(MVME188)
+ extern void sigsys(), sigtrap(), stepbpt(), userbpt();
+ extern void syscall_handler();
+#endif /* defined(MVME187) || defined(MVME188) */
+#ifdef MVME197
+ extern void m197_sigsys(), m197_sigtrap(), m197_stepbpt(), m197_userbpt();
+ extern void m197_syscall_handler();
+#endif /* MVME197 */
+
+ for (num = 0; (vec = vector_init_list[num]) != END_OF_VECTOR_LIST; num++) {
+ if (vec != PREDEFINED_BY_ROM)
+ SET_VECTOR(num, to, vec);
+ asm ("or r0, r0, r0");
+ asm ("or r0, r0, r0");
+ asm ("or r0, r0, r0");
+ asm ("or r0, r0, r0");
+ }
+
+ switch (cputyp) {
+#ifdef MVME197
+ case CPU_197:
+ while (num < 496){
+ SET_VECTOR(num, to, m197_sigsys);
+ num++;
+ }
+ num++; /* skip 496, BUG ROM vector */
+ SET_VECTOR(450, to, m197_syscall_handler);
+
+ while (num <= SIGSYS_MAX)
+ SET_VECTOR(num++, to, m197_sigsys);
+
+ while (num <= SIGTRAP_MAX)
+ SET_VECTOR(num++, to, m197_sigtrap);
+
+ SET_VECTOR(504, to, m197_stepbpt);
+ SET_VECTOR(511, to, m197_userbpt);
+ break;
+#endif /* MVME197 */
+#if defined(MVME187) || defined(MVME188)
+ case CPU_187:
+ case CPU_188:
+ while (num < 496){
+ SET_VECTOR(num, to, sigsys);
+ num++;
+ }
+ num++; /* skip 496, BUG ROM vector */
+
+ SET_VECTOR(450, to, syscall_handler);
+
+ while (num <= SIGSYS_MAX)
+ SET_VECTOR(num++, to, sigsys);
+
+ while (num <= SIGTRAP_MAX)
+ SET_VECTOR(num++, to, sigtrap);
+
+ SET_VECTOR(504, to, stepbpt);
+ SET_VECTOR(511, to, userbpt);
+ break;
+#endif /* defined(MVME187) || defined(MVME188) */
+ }
}
+
+#ifdef MVME188
+unsigned int int_mask_shadow[MAX_CPUS] = {0,0,0,0};
+unsigned int m188_curspl[MAX_CPUS] = {0,0,0,0};
+unsigned int blocked_interrupts_mask;
+
+unsigned int int_mask_val[INT_LEVEL] = {
+ MASK_LVL_0,
+ MASK_LVL_1,
+ MASK_LVL_2,
+ MASK_LVL_3,
+ MASK_LVL_4,
+ MASK_LVL_5,
+ MASK_LVL_6,
+ MASK_LVL_7
+};
+
+
+/*
+ * return next safe spl to reenable interrupts.
+ */
+unsigned int
+safe_level(mask, curlevel)
+unsigned mask;
+unsigned curlevel;
+{
+ register int i;
+
+ for (i = curlevel; i < 8; i++)
+ if (! (int_mask_val[i] & mask))
+ return i;
+ printf("safe_level: no safe level for mask 0x%08x level %d found\n",
+ mask, curlevel);
+ panic("safe_level");
+}
+
+void
+setlevel(int level)
+{
+ m88k_psr_type psr;
+ register unsigned int mask;
+ register int cpu = 0; /* cpu_number(); */
+
+ mask = int_mask_val[level];
+
+ if (cpu != master_cpu)
+ mask &= SLAVE_MASK;
+
+ mask &= ISR_SOFTINT_EXCEPT_MASK(cpu);
+
+ mask &= ~blocked_interrupts_mask;
+
+ *int_mask_reg[cpu] = mask;
+ int_mask_shadow[cpu] = mask;
+
+ m188_curspl[cpu] = level;
+}
+
+#ifdef DDB
+void
+db_setlevel(int level)
+{
+ m88k_psr_type psr;
+ register unsigned int mask;
+ register int cpu = 0; /* cpu_number(); */
+
+ mask = int_mask_val[level];
+
+ if (cpu != master_cpu)
+ mask &= SLAVE_MASK;
+
+ mask &= ISR_SOFTINT_EXCEPT_MASK(cpu);
+
+ mask &= ~blocked_interrupts_mask;
+
+ *int_mask_reg[cpu] = mask;
+ int_mask_shadow[cpu] = mask;
+
+ m188_curspl[cpu] = level;
+}
+#endif /* DDB */
+
+void block_obio_interrupt(unsigned mask)
+{
+ blocked_interrupts_mask |= mask;
+}
+
+void unblock_obio_interrupt(unsigned mask)
+{
+ blocked_interrupts_mask |= ~mask;
+}
+#endif /* MVME188 */
+
+unsigned spl(void)
+{
+ unsigned curspl;
+ m88k_psr_type psr; /* proccessor status register */
+ int cpu = 0;
+
+ psr = disable_interrupts_return_psr();
+ switch (cputyp) {
+#ifdef MVME188
+ case CPU_188:
+ /*cpu = cpu_number();*/
+ curspl = m188_curspl[cpu];
+ break;
+#endif /* MVME188 */
+#if defined(MVME187) || defined(MVME197)
+ case CPU_187:
+ case CPU_197:
+ curspl = *int_mask_level;
+ break;
+#endif /* defined(MVME187) || defined(MVME197) */
+ default:
+ panic("spl: Can't determine cpu type!");
+ }
+ set_psr(psr);
+ return curspl;
+}
+
+#if DDB
+unsigned db_spl(void)
+{
+ unsigned curspl;
+ m88k_psr_type psr; /* proccessor status register */
+ int cpu = 0;
+
+ psr = disable_interrupts_return_psr();
+ switch (cputyp) {
+ #ifdef MVME188
+ case CPU_188:
+ /*cpu = cpu_number();*/
+ curspl = m188_curspl[cpu];
+ break;
+ #endif /* MVME188 */
+ #if defined(MVME187) || defined(MVME197)
+ case CPU_187:
+ case CPU_197:
+ curspl = *int_mask_level;
+ break;
+ #endif /* defined(MVME187) || defined(MVME197) */
+ default:
+ panic("db_spl: Can't determine cpu type!");
+ }
+ set_psr(psr);
+ return curspl;
+}
+#endif /* DDB */
+
+unsigned getipl(void)
+{
+ return (spl());
+}
+
+#if DDB
+unsigned db_getipl(void)
+{
+ return (db_spl());
+}
+#endif /* DDB */
+
+unsigned setipl(unsigned level)
+{
+ unsigned curspl;
+ m88k_psr_type psr; /* proccessor status register */
+ int cpu = 0;
+
+ psr = disable_interrupts_return_psr();
+ switch (cputyp) {
+#ifdef MVME188
+ case CPU_188:
+ /*cpu = cpu_number();*/
+ curspl = m188_curspl[cpu];
+ setlevel(level);
+ break;
+#endif /* MVME188 */
+#if defined(MVME187) || defined(MVME197)
+ case CPU_187:
+ case CPU_197:
+ curspl = *int_mask_level;
+ *int_mask_level = level;
+ break;
+#endif /* defined(MVME187) || defined(MVME197) */
+ default:
+ panic("setipl: Can't determine cpu type!");
+ }
+
+ flush_pipeline();
+
+ /* The flush pipeline is required to make sure the above write gets
+ * through the data pipe and to the hardware; otherwise, the next
+ * bunch of instructions could execute at the wrong spl protection
+ */
+ set_psr(psr);
+ return curspl;
+}
+
+#ifdef DDB
+unsigned db_setipl(unsigned level)
+{
+ unsigned curspl;
+ m88k_psr_type psr; /* proccessor status register */
+ int cpu = 0;
+
+ psr = disable_interrupts_return_psr();
+ switch (cputyp) {
+#ifdef MVME188
+ case CPU_188:
+ /*cpu = cpu_number();*/
+ curspl = m188_curspl[cpu];
+ db_setlevel(level);
+ break;
+#endif /* MVME188 */
+#if defined(MVME187) || defined(MVME197)
+ case CPU_187:
+ case CPU_197:
+ curspl = *int_mask_level;
+ *int_mask_level = level;
+ break;
+#endif /* defined(MVME187) || defined(MVME197) */
+ default:
+ panic("db_setipl: Can't determine cpu type!");
+ }
+
+ flush_pipeline();
+
+ /* The flush pipeline is required to make sure the above write gets
+ * through the data pipe and to the hardware; otherwise, the next
+ * bunch of instructions could execute at the wrong spl protection
+ */
+ set_psr(psr);
+ return curspl;
+}
+#endif /* DDB */
+
+#if NCPUS > 1
+ #include <sys/simplelock.h>
+void
+simple_lock_init(lkp)
+__volatile struct simplelock *lkp;
+{
+ lkp->lock_data = 0;
+}
+
+int test_and_set(lock)
+__volatile int *lock;
+{
+/*
+ int oldlock = *lock;
+ if (*lock == 0) {
+ *lock = 1;
+ return 0;
+ }
+*/
+ return *lock;
+ *lock = 1;
+ return 0;
+}
+#endif
+
+
diff --git a/sys/arch/mvme88k/mvme88k/m18x_cmmu.c b/sys/arch/mvme88k/mvme88k/m18x_cmmu.c
new file mode 100644
index 00000000000..bae0c4ea69d
--- /dev/null
+++ b/sys/arch/mvme88k/mvme88k/m18x_cmmu.c
@@ -0,0 +1,2283 @@
+/*
+ * Copyright (c) 1998 Steve Murphree, Jr.
+ * Copyright (c) 1996 Nivas Madhur
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Nivas Madhur.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: m18x_cmmu.c,v 1.1 1999/09/27 19:13:22 smurph Exp $
+ */
+
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/simplelock.h>
+#include <machine/board.h>
+#include <machine/cpus.h>
+#include <machine/cpu_number.h>
+#include <machine/m882xx.h>
+
+/* On some versions of 88200, page size flushes don't work. I am using
+ * sledge hammer approach till I find for sure which ones are bad XXX nivas */
+#define BROKEN_MMU_MASK
+#define CMMU_DEBUG 1
+
+#ifdef DEBUG
+ #define DB_CMMU 0x4000 /* MMU debug */
+unsigned int debuglevel = 0;
+ #define dprintf(_L_,_X_) { if (debuglevel & (_L_)) { unsigned int psr = disable_interrupts_return_psr(); printf("%d: ", cpu_number()); printf _X_; set_psr(psr); } }
+#else
+ #define dprintf(_L_,_X_)
+#endif
+#undef SHADOW_BATC /* don't use BATCs for now XXX nivas */
+
+struct cmmu_regs {
+ /* base + $000 */
+ volatile unsigned idr;
+ /* base + $004 */volatile unsigned scr;
+ /* base + $008 */volatile unsigned ssr;
+ /* base + $00C */volatile unsigned sar;
+ /* */unsigned padding1[0x3D];
+ /* base + $104 */volatile unsigned sctr;
+ /* base + $108 */volatile unsigned pfSTATUSr;
+ /* base + $10C */volatile unsigned pfADDRr;
+ /* */unsigned padding2[0x3C];
+ /* base + $200 */volatile unsigned sapr;
+ /* base + $204 */volatile unsigned uapr;
+ /* */unsigned padding3[0x7E];
+ /* base + $400 */volatile unsigned bwp[8];
+ /* */unsigned padding4[0xF8];
+ /* base + $800 */volatile unsigned cdp[4];
+ /* */unsigned padding5[0x0C];
+ /* base + $840 */volatile unsigned ctp[4];
+ /* */unsigned padding6[0x0C];
+ /* base + $880 */volatile unsigned cssp;
+
+ /* The rest for the 88204 */
+#define cssp0 cssp
+ /* */ unsigned padding7[0x03];
+ /* base + $890 */volatile unsigned cssp1;
+ /* */unsigned padding8[0x03];
+ /* base + $8A0 */volatile unsigned cssp2;
+ /* */unsigned padding9[0x03];
+ /* base + $8B0 */volatile unsigned cssp3;
+};
+
+struct cmmu {
+ struct cmmu_regs *cmmu_regs; /* CMMU "base" area */
+ unsigned char cmmu_cpu; /* cpu number it is attached to */
+ unsigned char which; /* either INST_CMMU || DATA_CMMU */
+ unsigned char cmmu_access; /* either CMMU_ACS_{SUPER,USER,BOTH} */
+ unsigned char cmmu_alive;
+#define CMMU_DEAD 0 /* This cmmu not there */
+#define CMMU_AVAILABLE 1 /* It's there, but which cpu's? */
+#define CMMU_ALIVE 1 /* It's there. */
+#define CMMU_MARRIED 2 /* Know which cpu it belongs to. */
+ vm_offset_t cmmu_addr; /* address range */
+ vm_offset_t cmmu_addr_mask; /* address mask */
+ int cmmu_addr_match; /* return value of address comparison */
+#if SHADOW_BATC
+ unsigned batc[8];
+#endif
+};
+/*
+ * We rely upon and use INST_CMMU == 0 and DATA_CMMU == 1
+ */
+#if INST_CMMU != 0 || DATA_CMMU != 1
+error("ack gag barf!");
+#endif
+
+/*
+ * CMMU(cpu,data) Is the cmmu struct for the named cpu's indicated cmmu.
+ * REGS(cpu,data) is the actual register structure.
+ */
+
+#define CMMU(cpu, data) cpu_cmmu[(cpu)].pair[(data)?DATA_CMMU:INST_CMMU]
+#define REGS(cpu, data) (*CMMU(cpu, data)->cmmu_regs)
+
+/*
+ * This lock protects the cmmu SAR and SCR's; other ports
+ * can be accessed without locking it
+ *
+ * May be used from "db_interface.c".
+ */
+
+extern unsigned cache_policy;
+extern unsigned cpu_sets[];
+extern unsigned number_cpus;
+extern unsigned master_cpu;
+extern int max_cpus, max_cmmus;
+extern int cpu_cmmu_ratio;
+
+int vme188_config;
+
+/* FORWARDS */
+void m18x_setup_cmmu_config(void);
+void m18x_setup_board_config(void);
+
+#ifdef CMMU_DEBUG
+void
+m18x_show_apr(unsigned value)
+{
+ union apr_template apr_template;
+ apr_template.bits = value;
+
+ printf("table @ 0x%x000", apr_template.field.st_base);
+ if (apr_template.field.wt) printf(", writethrough");
+ if (apr_template.field.g) printf(", global");
+ if (apr_template.field.ci) printf(", cache inhibit");
+ if (apr_template.field.te) printf(", valid");
+ else printf(", not valid");
+ printf("\n");
+}
+
+void
+m18x_show_sctr(unsigned value)
+{
+ union {
+ unsigned bits;
+ struct {
+ unsigned :16,
+ pe: 1,
+ se: 1,
+ pr: 1,
+ :13;
+ } fields;
+ } sctr;
+ sctr.bits = value;
+ printf("%spe, %sse %spr]\n",
+ sctr.fields.pe ? "" : "!",
+ sctr.fields.se ? "" : "!",
+ sctr.fields.pr ? "" : "!");
+}
+#endif
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The cmmu.c module was initially designed for the Omron Luna 88K
+ * layout consisting of 4 CPUs with 2 CMMUs each, one for data
+ * and one for instructions.
+ *
+ * Trying to support a few more board configurations for the
+ * Motorola MVME188 we have these layouts:
+ *
+ * - config 0: 4 CPUs, 8 CMMUs
+ * - config 1: 2 CPUs, 8 CMMUs
+ * - config 2: 1 CPUs, 8 CMMUs
+ * - config 5: 2 CPUs, 4 CMMUs
+ * - config 6: 1 CPU, 4 CMMUs
+ * - config A: 1 CPU, 2 CMMUs
+ *
+ * We use these splitup schemas:
+ * - split between data and instructions (always enabled)
+ * - split between user/spv (and A14 in config 2)
+ * - split because of A12 (and A14 in config 2)
+ * - one SRAM supervisor, other rest
+ * - one whole SRAM, other rest
+ *
+ * The main problem is to find the right suited CMMU for a given
+ * CPU number at those configurations.
+ * em, 10.5.94
+ *
+ * WARNING: the code was never tested on a uniprocessor
+ * system. All effort was made to support these configuration
+ * but the kernel never ran on such a system.
+ *
+ * em, 12.7.94
+ */
+
+/*
+ * This structure describes the CMMU per CPU split strategies
+ * used for data and instruction CMMUs.
+ */
+struct cmmu_strategy {
+ int inst;
+ int data;
+} cpu_cmmu_strategy[] = {
+ /* inst data */
+ { CMMU_SPLIT_SPV, CMMU_SPLIT_SPV}, /* CPU 0 */
+ { CMMU_SPLIT_SPV, CMMU_SPLIT_SPV}, /* CPU 1 */
+ { CMMU_SPLIT_ADDRESS, CMMU_SPLIT_ADDRESS}, /* CPU 2 */
+ { CMMU_SPLIT_ADDRESS, CMMU_SPLIT_ADDRESS} /* CPU 3 */
+};
+
+/*
+ * The following list of structs describe the different
+ * MVME188 configurations which are supported by this module.
+ */
+struct board_config {
+ int supported;
+ int ncpus;
+ int ncmmus;
+} bd_config[] =
+{
+ /* sup, CPU MMU */
+ { 1, 4, 8}, /* 4P128 - 4P512 */
+ { 1, 2, 8}, /* 2P128 - 2P512 */
+ { 1, 1, 8}, /* 1P128 - 1P512 */
+ { -1, -1, -1},
+ { -1, -1, -1},
+ { 1, 2, 4}, /* 2P64 - 2P256 */
+ { 1, 1, 4}, /* 1P64 - 1P256 */
+ { -1, -1, -1},
+ { -1, -1, -1},
+ { -1, -1, -1},
+ { 1, 1, 2}, /* 1P32 - 1P128 */
+ { -1, -1, -1},
+ { -1, -1, -1},
+ { -1, -1, -1},
+ { -1, -1, -1},
+ { -1, -1, -1}
+};
+
+/*
+ * Structure for accessing MMUS properly.
+ */
+
+struct cmmu cmmu[MAX_CMMUS] =
+{
+ /* addr cpu mode access
+ alive addr mask */
+ {(void *)VME_CMMU_I0, -1, INST_CMMU, CMMU_ACS_BOTH,
+ CMMU_DEAD, 0, 0},
+ {(void *)VME_CMMU_D0, -1, DATA_CMMU, CMMU_ACS_BOTH,
+ CMMU_DEAD, 0, 0},
+ {(void *)VME_CMMU_I1, -1, INST_CMMU, CMMU_ACS_BOTH,
+ CMMU_DEAD, 0, 0},
+ {(void *)VME_CMMU_D1, -1, DATA_CMMU, CMMU_ACS_BOTH,
+ CMMU_DEAD, 0, 0},
+ {(void *)VME_CMMU_I2, -1, INST_CMMU, CMMU_ACS_BOTH,
+ CMMU_DEAD, 0, 0},
+ {(void *)VME_CMMU_D2, -1, DATA_CMMU, CMMU_ACS_BOTH,
+ CMMU_DEAD, 0, 0},
+ {(void *)VME_CMMU_I3, -1, INST_CMMU, CMMU_ACS_BOTH,
+ CMMU_DEAD, 0, 0},
+ {(void *)VME_CMMU_D3, -1, DATA_CMMU, CMMU_ACS_BOTH,
+ CMMU_DEAD, 0, 0}
+};
+
+struct cpu_cmmu {
+ struct cmmu *pair[2];
+} cpu_cmmu[MAX_CPUS];
+
+void
+m18x_setup_board_config(void)
+{
+ volatile unsigned long *whoami;
+
+ master_cpu = 0; /* temp to get things going */
+ switch (cputyp) {
+ case CPU_187:
+ case CPU_197:
+ vme188_config = 10; /* There is no WHOAMI reg on MVME1x7 - fake it... */
+ cmmu[0].cmmu_regs = (void *)SBC_CMMU_I;
+ cmmu[0].cmmu_cpu = 0;
+ cmmu[1].cmmu_regs = (void *)SBC_CMMU_D;
+ cmmu[1].cmmu_cpu = 0;
+ cmmu[2].cmmu_regs = (void *)NULL;
+ cmmu[3].cmmu_regs = (void *)NULL;
+ cmmu[4].cmmu_regs = (void *)NULL;
+ cmmu[5].cmmu_regs = (void *)NULL;
+ cmmu[6].cmmu_regs = (void *)NULL;
+ cmmu[7].cmmu_regs = (void *)NULL;
+ max_cpus = 1;
+ max_cmmus = 2;
+ break;
+ case CPU_188:
+ whoami = (volatile unsigned long *)MVME188_WHOAMI;
+ vme188_config = (*whoami & 0xf0) >> 4;
+ dprintf(DB_CMMU,("m18x_setup_board_config: WHOAMI @ 0x%08x holds value 0x%08x\n",
+ whoami, *whoami));
+ max_cpus = bd_config[vme188_config].ncpus;
+ max_cmmus = bd_config[vme188_config].ncmmus;
+ break;
+ default:
+ panic("m18x_setup_board_config: Unknown CPU type.");
+ }
+ cpu_cmmu_ratio = max_cmmus / max_cpus;
+ switch (bd_config[vme188_config].supported) {
+ case 0:
+ printf("MVME%x board configuration #%X: %d CPUs %d CMMUs\n", cputyp,
+ vme188_config, max_cpus, max_cmmus);
+ panic("This configuration is not supported - go and get another OS.\n");
+ /* NOTREACHED */
+ break;
+ case 1:
+ printf("MVME%x board configuration #%X: %d CPUs %d CMMUs\n", cputyp,
+ vme188_config, max_cpus, max_cmmus);
+ m18x_setup_cmmu_config();
+ break;
+ default:
+ panic("UNKNOWN MVME%x board configuration: WHOAMI = 0x%02x\n", cputyp, *whoami);
+ /* NOTREACHED */
+ break;
+ }
+ return;
+}
+
+/*
+ * This routine sets up the CPU/CMMU tables used in the
+ * motorola/m88k/m88100/cmmu.c module.
+ */
+void
+m18x_setup_cmmu_config(void)
+{
+ volatile unsigned long *pcnfa;
+ volatile unsigned long *pcnfb;
+
+ register int num, cmmu_num, val1, val2;
+
+ dprintf(DB_CMMU,("m18x_setup_cmmu_config: initializing with %d CPU(s) and %d CMMU(s)\n",
+ max_cpus, max_cmmus));
+
+ /*
+ * Probe for available MMUs
+ */
+ for (cmmu_num = 0; cmmu_num < max_cmmus; cmmu_num++)
+ if (!badwordaddr((vm_offset_t)cmmu[cmmu_num].cmmu_regs)) {
+ union cpupid id;
+
+ id.cpupid = cmmu[cmmu_num].cmmu_regs->idr;
+ if (id.m88200.type != M88200 && id.m88200.type != M88204) {
+ printf("WARNING: non M8820x circuit found at CMMU address 0x%08x\n",
+ cmmu[cmmu_num].cmmu_regs);
+ continue;
+ }
+ cmmu[cmmu_num].cmmu_alive = CMMU_ALIVE;
+ dprintf(DB_CMMU,("m18x_setup_cmmu_config: CMMU %d found at 0x%08x\n",
+ cmmu_num, cmmu[cmmu_num].cmmu_regs));
+ }
+
+ /*
+ * Now that we know which CMMUs are there, let's report on which
+ * CPU/CMMU sets seem complete (hopefully all)
+ */
+ for (num = 0; num < max_cpus; num++) {
+ register int i;
+ union cpupid id;
+
+ for (i = 0; i < cpu_cmmu_ratio; i++) {
+ dprintf(DB_CMMU,("cmmu_init: testing CMMU %d for CPU %d\n",
+ num*cpu_cmmu_ratio+i, num));
+ if (!m18x_cmmu_alive(num*cpu_cmmu_ratio + i)) {
+ printf("CMMU %d attached to CPU %d is not working\n");
+ panic("m18x_setup_cmmu_config");
+ }
+ }
+ cpu_sets[num] = 1; /* This cpu installed... */
+ id.cpupid = cmmu[num*cpu_cmmu_ratio].cmmu_regs->idr;
+
+ if (id.m88200.type == M88204)
+ printf("CPU%d is attached with %d MC88204 CMMUs\n",
+ num, cpu_cmmu_ratio);
+ else
+ printf("CPU%d is attached with %d MC88200 CMMUs\n",
+ num, cpu_cmmu_ratio);
+ }
+
+ for (num = 0; num < max_cpus; num++) {
+ cpu_cmmu_strategy[num].inst &= CMMU_SPLIT_MASK;
+ cpu_cmmu_strategy[num].data &= CMMU_SPLIT_MASK;
+ dprintf(DB_CMMU,("m18x_setup_cmmu_config: CPU %d inst strat %d data strat %d\n",
+ num, cpu_cmmu_strategy[num].inst, cpu_cmmu_strategy[num].data));
+ }
+
+ switch (vme188_config) {
+ /*
+ * These configurations have hardwired CPU/CMMU configurations.
+ */
+ case CONFIG_0:
+ case CONFIG_5:
+ case CONFIG_A:
+ dprintf(DB_CMMU,("m18x_setup_cmmu_config: resetting strategies\n"));
+ for (num = 0; num < max_cpus; num++)
+ cpu_cmmu_strategy[num].inst = cpu_cmmu_strategy[num].data =
+ CMMU_SPLIT_ADDRESS;
+ break;
+ /*
+ * Configure CPU/CMMU strategy into PCNFA and PCNFB board registers.
+ */
+ case CONFIG_1:
+ pcnfa = (volatile unsigned long *)MVME188_PCNFA;
+ pcnfb = (volatile unsigned long *)MVME188_PCNFB;
+ val1 = (cpu_cmmu_strategy[0].inst << 2) | cpu_cmmu_strategy[0].data;
+ val2 = (cpu_cmmu_strategy[1].inst << 2) | cpu_cmmu_strategy[1].data;
+ *pcnfa = val1;
+ *pcnfb = val2;
+ dprintf(DB_CMMU,("m18x_setup_cmmu_config: 2P128: PCNFA = 0x%x, PCNFB = 0x%x\n", val1, val2));
+ break;
+ case CONFIG_2:
+ pcnfa = (volatile unsigned long *)MVME188_PCNFA;
+ pcnfb = (volatile unsigned long *)MVME188_PCNFB;
+ val1 = (cpu_cmmu_strategy[0].inst << 2) | cpu_cmmu_strategy[0].inst;
+ val2 = (cpu_cmmu_strategy[0].data << 2) | cpu_cmmu_strategy[0].data;
+ *pcnfa = val1;
+ *pcnfb = val2;
+ dprintf(DB_CMMU,("m18x_setup_cmmu_config: 1P128: PCNFA = 0x%x, PCNFB = 0x%x\n", val1, val2));
+ break;
+ case CONFIG_6:
+ pcnfa = (volatile unsigned long *)MVME188_PCNFA;
+ val1 = (cpu_cmmu_strategy[0].inst << 2) | cpu_cmmu_strategy[0].data;
+ *pcnfa = val1;
+ dprintf(DB_CMMU,("m18x_setup_cmmu_config: 1P64: PCNFA = 0x%x\n", val1));
+ break;
+ default:
+ panic("m18x_setup_cmmu_config");
+ break;
+ }
+
+ dprintf(DB_CMMU,("m18x_setup_cmmu_config: PCNFA = 0x%x, PCNFB = 0x%x\n", *pcnfa, *pcnfb));
+
+ /*
+ * Calculate the CMMU<->CPU connections
+ */
+ for (cmmu_num = 0; cmmu_num < max_cmmus; cmmu_num++) {
+ cmmu[cmmu_num].cmmu_cpu =
+ (int) (((float) cmmu_num) * ((float) max_cpus) / ((float) max_cmmus));
+ dprintf(DB_CMMU,("m18x_setup_cmmu_config: CMMU %d connected with CPU %d\n",
+ cmmu_num, cmmu[cmmu_num].cmmu_cpu));
+ }
+
+ /*
+ * Now set cmmu[].cmmu_access and addr
+ */
+ for (cmmu_num = 0; cmmu_num < max_cmmus; cmmu_num++) {
+ /*
+ * We don't set up anything for the hardwired configurations.
+ */
+ if (cpu_cmmu_ratio == 2) {
+ cmmu[cmmu_num].cmmu_addr =
+ cmmu[cmmu_num].cmmu_addr_mask = 0;
+ cmmu[cmmu_num].cmmu_addr_match = 1;
+ cmmu[cmmu_num].cmmu_access = CMMU_ACS_BOTH;
+ continue;
+ }
+
+ /*
+ * First we set the address/mask pairs for the exact address
+ * matches.
+ */
+ switch ((cmmu[cmmu_num].which == INST_CMMU) ?
+ cpu_cmmu_strategy[cmmu[cmmu_num].cmmu_cpu].inst :
+ cpu_cmmu_strategy[cmmu[cmmu_num].cmmu_cpu].data) {
+ case CMMU_SPLIT_ADDRESS:
+ cmmu[cmmu_num].cmmu_addr = ((cmmu_num & 0x2) ^ 0x2) << 11;
+ cmmu[cmmu_num].cmmu_addr_mask = CMMU_A12_MASK;
+ cmmu[cmmu_num].cmmu_addr_match = 1;
+ break;
+ case CMMU_SPLIT_SPV:
+ cmmu[cmmu_num].cmmu_addr =
+ cmmu[cmmu_num].cmmu_addr_mask = 0;
+ cmmu[cmmu_num].cmmu_addr_match = 1;
+ break;
+ case CMMU_SPLIT_SRAM_ALL:
+ cmmu[cmmu_num].cmmu_addr = CMMU_SRAM;
+ cmmu[cmmu_num].cmmu_addr_mask = CMMU_SRAM_MASK;
+ cmmu[cmmu_num].cmmu_addr_match = (cmmu_num & 0x2) ? 1 : 0;
+ break;
+ case CMMU_SPLIT_SRAM_SPV:
+ if (cmmu_num & 0x2) {
+ cmmu[cmmu_num].cmmu_addr = CMMU_SRAM;
+ cmmu[cmmu_num].cmmu_addr_mask = CMMU_SRAM_MASK;
+ } else {
+ cmmu[cmmu_num].cmmu_addr =
+ cmmu[cmmu_num].cmmu_addr_mask = 0;
+ }
+ cmmu[cmmu_num].cmmu_addr_match = 1;
+ break;
+ }
+
+ /*
+ * For MVME188 single processors, we've got to look at A14.
+ * This bit splits the CMMUs independent of the enabled strategy.
+ *
+ * NOT TESTED!!! - em
+ */
+ if (cpu_cmmu_ratio > 4) {
+ cmmu[cmmu_num].cmmu_addr |= ((cmmu_num & 0x4) ^ 0x4) << 12;
+ cmmu[cmmu_num].cmmu_addr_mask |= CMMU_A14_MASK;
+ }
+
+ /*
+ * Next we cope with the various access modes.
+ */
+ switch ((cmmu[cmmu_num].which == INST_CMMU) ?
+ cpu_cmmu_strategy[cmmu[cmmu_num].cmmu_cpu].inst :
+ cpu_cmmu_strategy[cmmu[cmmu_num].cmmu_cpu].data) {
+ case CMMU_SPLIT_SPV:
+ cmmu[cmmu_num].cmmu_access =
+ (cmmu_num & 0x2 ) ? CMMU_ACS_USER : CMMU_ACS_SUPER;
+ break;
+ case CMMU_SPLIT_SRAM_SPV:
+ cmmu[cmmu_num].cmmu_access =
+ (cmmu_num & 0x2 ) ? CMMU_ACS_SUPER : CMMU_ACS_BOTH;
+ break;
+ default:
+ cmmu[cmmu_num].cmmu_access = CMMU_ACS_BOTH;
+ break;
+ }
+ }
+ return;
+}
+
+static char *cmmu_strat_string[] = {
+ "address split ",
+ "user/spv split",
+ "spv SRAM split",
+ "all SRAM split"
+};
+
+void m18x_cmmu_dump_config(void)
+{
+
+ volatile unsigned long *pcnfa;
+ volatile unsigned long *pcnfb;
+ register int cmmu_num;
+
+ if (cputyp != CPU_188) return;
+
+ db_printf("Current CPU/CMMU configuration:\n\n");
+
+ db_printf("VME188 address decoder: PCNFA = 0x%1x, PCNFB = 0x%1x\n\n", *pcnfa & 0xf, *pcnfb & 0xf);
+ pcnfa = (volatile unsigned long *)MVME188_PCNFA;
+ pcnfb = (volatile unsigned long *)MVME188_PCNFB;
+ for (cmmu_num = 0; cmmu_num < max_cmmus; cmmu_num++) {
+ db_printf("CMMU #%d: %s CMMU for CPU %d:\n Strategy: %s\n %s access addr 0x%08x mask 0x%08x match %s\n",
+ cmmu_num,
+ (cmmu[cmmu_num].which == INST_CMMU) ? "inst" : "data",
+ cmmu[cmmu_num].cmmu_cpu,
+ cmmu_strat_string[(cmmu[cmmu_num].which == INST_CMMU) ?
+ cpu_cmmu_strategy[cmmu[cmmu_num].cmmu_cpu].inst :
+ cpu_cmmu_strategy[cmmu[cmmu_num].cmmu_cpu].data],
+ (cmmu[cmmu_num].cmmu_access == CMMU_ACS_BOTH) ? "User and spv" :
+ ((cmmu[cmmu_num].cmmu_access == CMMU_ACS_USER) ? "User " :
+ "Supervisor "),
+ cmmu[cmmu_num].cmmu_addr,
+ cmmu[cmmu_num].cmmu_addr_mask,
+ cmmu[cmmu_num].cmmu_addr_match ? "TRUE" : "FALSE");
+ }
+}
+
+/* To be implemented as a macro for speedup - XXX-em */
+static void
+m18x_cmmu_store(int mmu, int reg, unsigned val)
+{
+ *(volatile unsigned *)(reg + (char*)(cmmu[mmu].cmmu_regs)) = val;
+}
+
+int m18x_cmmu_alive(int mmu)
+{
+ return (cmmu[mmu].cmmu_alive == CMMU_ALIVE);
+}
+
+unsigned m18x_cmmu_get(int mmu, int reg)
+{
+ return *(volatile unsigned *)(reg + (char*)(cmmu[mmu].cmmu_regs));
+}
+
+/*
+ * This function is called by the MMU module and pokes values
+ * into the CMMU's registers.
+ */
+void m18x_cmmu_set(int reg, unsigned val, int flags,
+ int num, int mode, int access, vm_offset_t addr)
+{
+ register int mmu;
+
+ if (flags & NUM_CMMU) {
+ /*
+ * Special case: user supplied CMMU number directly as argument.
+ * Simply store the value away.
+ */
+ /* assert(num < max_cmmus); */
+ m18x_cmmu_store(num, reg, val);
+ return;
+ }
+
+ /*
+ * We scan all CMMUs to find the matching ones and store the
+ * values there.
+ */
+ for (mmu = num*cpu_cmmu_ratio; mmu < (num+1)*cpu_cmmu_ratio; mmu++) {
+ if (((flags & MODE_VAL)) &&
+ (cmmu[mmu].which != mode))
+ continue;
+ if (((flags & ACCESS_VAL)) &&
+ (cmmu[mmu].cmmu_access != access) &&
+ (cmmu[mmu].cmmu_access != CMMU_ACS_BOTH))
+ continue;
+ if (flags & ADDR_VAL) {
+ if (((addr & cmmu[mmu].cmmu_addr_mask) == cmmu[mmu].cmmu_addr)
+ != cmmu[mmu].cmmu_addr_match) {
+ continue;
+ }
+ }
+ m18x_cmmu_store(mmu, reg, val);
+ }
+}
+
+#ifdef DDB
+/*
+ * Used by DDB for cache probe functions
+ */
+unsigned m18x_cmmu_get_by_mode(int cpu, int mode)
+{
+ register int mmu;
+
+ for (mmu = cpu*cpu_cmmu_ratio; mmu < (cpu+1)*cpu_cmmu_ratio; mmu++)
+ if (cmmu[mmu].which == mode)
+ return mmu;
+ printf("can't figure out first %s CMMU for CPU %d\n",
+ (mode == DATA_CMMU) ? "data" : "instruction", cpu);
+ panic("m18x_cmmu_get_by_mode");
+}
+#endif
+
+static char *mmutypes[8] = {
+ "Unknown (0)",
+ "Unknown (1)",
+ "Unknown (2)",
+ "Unknown (3)",
+ "Unknown (4)",
+ "M88200 (16K)",
+ "M88204 (64K)",
+ "Unknown (7)"
+};
+
+/*
+ * Should only be called after the calling cpus knows its cpu
+ * number and master/slave status . Should be called first
+ * by the master, before the slaves are started.
+*/
+void m18x_cpu_configuration_print(int master)
+{
+ int pid = read_processor_identification_register();
+ int proctype = (pid & 0xff00) >> 8;
+ int procvers = (pid & 0xe) >> 1;
+ int mmu, cpu = cpu_number();
+ struct simplelock print_lock;
+
+ if (master)
+ simple_lock_init(&print_lock);
+
+ simple_lock(&print_lock);
+
+ printf("Processor %d: ", cpu);
+ if (proctype)
+ printf("Architectural Revision 0x%x UNKNOWN CPU TYPE Version 0x%x\n",
+ proctype, procvers);
+ else
+ printf("M88100 Version 0x%x\n", procvers);
+
+#if ERRATA__XXX_USR == 0
+ if (procvers < 2)
+ printf("WARNING: M88100 bug workaround code not enabled!!!\n");
+#endif
+
+ for (mmu = cpu*cpu_cmmu_ratio; mmu < (cpu+1)*cpu_cmmu_ratio; mmu++) {
+ int idr = m18x_cmmu_get(mmu, CMMU_IDR);
+ int mmuid = (0xe00000 & idr)>>21;
+
+ printf(" %s %s Cache: ",
+ (cmmu[mmu].cmmu_access == CMMU_ACS_BOTH) ? "Spv and User" :
+ ((cmmu[mmu].cmmu_access == CMMU_ACS_USER) ? "User " :
+ "Supervisor "),
+ (cmmu[mmu].which == INST_CMMU) ? "Instruction" :
+ "Data ");
+ if (mmutypes[mmuid][0] == 'U')
+ printf("Type 0x%x ", mmuid);
+ else
+ printf("%s ", mmutypes[mmuid]);
+ printf("Version 0x%x\n", (idr & 0x1f0000)>>16);
+ }
+ printf (" Configured as %s and started\n", master ? "master" : "slave");
+
+ simple_unlock(&print_lock);
+}
+
+/*
+ * CMMU initialization routine
+ */
+void
+m18x_cmmu_init(void)
+{
+ unsigned tmp, cmmu_num;
+ union cpupid id;
+ int cpu;
+
+ for (cpu = 0; cpu < max_cpus; cpu++) {
+ cpu_cmmu[cpu].pair[INST_CMMU] = cpu_cmmu[cpu].pair[DATA_CMMU] = 0;
+ }
+
+ for (cmmu_num = 0; cmmu_num < max_cmmus; cmmu_num++)
+ if (m18x_cmmu_alive(cmmu_num)) {
+ id.cpupid = cmmu[cmmu_num].cmmu_regs->idr;
+
+ cpu_cmmu[cmmu[cmmu_num].cmmu_cpu].pair[cmmu[cmmu_num].which] =
+ &cmmu[cmmu_num];
+ /*
+ * Reset cache data....
+ * as per M88200 Manual (2nd Ed.) section 3.11.
+ */
+ for (tmp = 0; tmp < 255; tmp++) {
+ cmmu[cmmu_num].cmmu_regs->sar = tmp << 4;
+ cmmu[cmmu_num].cmmu_regs->cssp = 0x3f0ff000;
+ }
+
+ /* 88204 has additional cache to clear */
+ if (id.m88200.type == M88204) {
+ for (tmp = 0; tmp < 255; tmp++) {
+ cmmu[cmmu_num].cmmu_regs->sar = tmp<<4;
+ cmmu[cmmu_num].cmmu_regs->cssp1 = 0x3f0ff000;
+ }
+ for (tmp = 0; tmp < 255; tmp++) {
+ cmmu[cmmu_num].cmmu_regs->sar = tmp<<4;
+ cmmu[cmmu_num].cmmu_regs->cssp2 = 0x3f0ff000;
+ }
+ for (tmp = 0; tmp < 255; tmp++) {
+ cmmu[cmmu_num].cmmu_regs->sar = tmp<<4;
+ cmmu[cmmu_num].cmmu_regs->cssp3 = 0x3f0ff000;
+ }
+ }
+
+ /*
+ * Set the SCTR, SAPR, and UAPR to some known state
+ * (I don't trust the reset to do it).
+ */
+ tmp =
+ ! CMMU_SCTR_PE | /* not parity enable */
+ ! CMMU_SCTR_SE | /* not snoop enable */
+ ! CMMU_SCTR_PR ; /* not priority arbitration */
+ cmmu[cmmu_num].cmmu_regs->sctr = tmp;
+
+ tmp =
+ (0x00000 << 12) | /* segment table base address */
+ AREA_D_WT | /* write through */
+ AREA_D_G | /* global */
+ AREA_D_CI | /* cache inhibit */
+ ! AREA_D_TE ; /* not translation enable */
+ cmmu[cmmu_num].cmmu_regs->sapr =
+ cmmu[cmmu_num].cmmu_regs->uapr = tmp;
+
+
+#if SHADOW_BATC
+ cmmu[cmmu_num].batc[0] =
+ cmmu[cmmu_num].batc[1] =
+ cmmu[cmmu_num].batc[2] =
+ cmmu[cmmu_num].batc[3] =
+ cmmu[cmmu_num].batc[4] =
+ cmmu[cmmu_num].batc[5] =
+ cmmu[cmmu_num].batc[6] =
+ cmmu[cmmu_num].batc[7] = 0;
+#endif
+ cmmu[cmmu_num].cmmu_regs->bwp[0] =
+ cmmu[cmmu_num].cmmu_regs->bwp[1] =
+ cmmu[cmmu_num].cmmu_regs->bwp[2] =
+ cmmu[cmmu_num].cmmu_regs->bwp[3] =
+ cmmu[cmmu_num].cmmu_regs->bwp[4] =
+ cmmu[cmmu_num].cmmu_regs->bwp[5] =
+ cmmu[cmmu_num].cmmu_regs->bwp[6] =
+ cmmu[cmmu_num].cmmu_regs->bwp[7] = 0;
+ cmmu[cmmu_num].cmmu_regs->scr = CMMU_FLUSH_CACHE_INV_ALL;
+ cmmu[cmmu_num].cmmu_regs->scr = CMMU_FLUSH_SUPER_ALL;
+ cmmu[cmmu_num].cmmu_regs->scr = CMMU_FLUSH_USER_ALL;
+ }
+
+ /*
+ * Enable snooping...
+ */
+ for (cpu = 0; cpu < max_cpus; cpu++) {
+ if (!cpu_sets[cpu])
+ continue;
+
+ /*
+ * Enable snooping.
+ * We enable it for instruction cmmus as well so that we can have
+ * breakpoints, etc, and modify code.
+ */
+ if (cputyp == CPU_188) {
+ tmp =
+ ! CMMU_SCTR_PE | /* not parity enable */
+ CMMU_SCTR_SE | /* snoop enable */
+ ! CMMU_SCTR_PR ; /* not priority arbitration */
+ } else {
+ tmp =
+ ! CMMU_SCTR_PE | /* not parity enable */
+ ! CMMU_SCTR_PR ; /* not priority arbitration */
+ }
+ m18x_cmmu_set(CMMU_SCTR, tmp, 0, cpu, DATA_CMMU, 0, 0);
+ m18x_cmmu_set(CMMU_SCTR, tmp, 0, cpu, INST_CMMU, 0, 0);
+
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_SUPER_ALL, ACCESS_VAL,
+ cpu, DATA_CMMU, CMMU_ACS_SUPER, 0);
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_SUPER_ALL, ACCESS_VAL,
+ cpu, INST_CMMU, CMMU_ACS_SUPER, 0);
+ }
+
+ /*
+ * Turn on some cache.
+ */
+ for (cpu = 0; cpu < max_cpus; cpu++) {
+ if (!cpu_sets[cpu])
+ continue;
+ /*
+ * Enable some caching for the instruction stream.
+ * Can't cache data yet 'cause device addresses can never
+ * be cached, and we don't have those no-caching zones
+ * set up yet....
+ */
+ tmp =
+ (0x00000 << 12) | /* segment table base address */
+ AREA_D_WT | /* write through */
+ AREA_D_G | /* global */
+ AREA_D_CI | /* cache inhibit */
+ ! AREA_D_TE ; /* not translation enable */
+ /*
+ REGS(cpu, INST_CMMU).sapr = tmp;
+ */
+ m18x_cmmu_set(CMMU_SAPR, tmp, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+
+ /*
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_SUPER_ALL, ACCESS_VAL|MODE_VAL,
+ cpu, DATA_CMMU, CMMU_ACS_SUPER, 0);
+ }
+}
+
+
+/*
+ * Just before poweroff or reset....
+ */
+void
+m18x_cmmu_shutdown_now(void)
+{
+ unsigned tmp;
+ unsigned cmmu_num;
+
+ /*
+ * Now set some state as we like...
+ */
+ for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++) {
+ if (cputyp == CPU_188) {
+ tmp =
+ ! CMMU_SCTR_PE | /* parity enable */
+ ! CMMU_SCTR_SE | /* snoop enable */
+ ! CMMU_SCTR_PR ; /* priority arbitration */
+ } else {
+ tmp =
+ ! CMMU_SCTR_PE | /* parity enable */
+ ! CMMU_SCTR_PR ; /* priority arbitration */
+ }
+
+ cmmu[cmmu_num].cmmu_regs->sctr = tmp;
+
+ tmp =
+ (0x00000 << 12) | /* segment table base address */
+ ! AREA_D_WT | /* write through */
+ ! AREA_D_G | /* global */
+ AREA_D_CI | /* cache inhibit */
+ ! AREA_D_TE ; /* translation disable */
+
+ cmmu[cmmu_num].cmmu_regs->sapr = tmp;
+ cmmu[cmmu_num].cmmu_regs->uapr = tmp;
+ }
+}
+
+#define PARITY_ENABLE
+/*
+ * enable parity
+ */
+void m18x_cmmu_parity_enable(void)
+{
+#ifdef PARITY_ENABLE
+ register int cmmu_num;
+
+ for (cmmu_num = 0; cmmu_num < max_cmmus; cmmu_num++) {
+ if (m18x_cmmu_alive(cmmu_num)) {
+ register unsigned val1 = m18x_cmmu_get(cmmu_num, CMMU_SCTR);
+
+ /*
+ cmmu[cmmu_num].cmmu_regs->sctr |= CMMU_SCTR_PE;
+ */
+ m18x_cmmu_set(CMMU_SCTR, val1 | CMMU_SCTR_PE, NUM_CMMU,
+ cmmu_num, 0, 0, 0);
+ }
+ }
+#endif /* PARITY_ENABLE */
+}
+
+/*
+ * Find out the CPU number from accessing CMMU
+ * Better be at splhigh, or even better, with interrupts
+ * disabled.
+ */
+#define ILLADDRESS U(0x0F000000) /* any faulty address */
+
+unsigned m18x_cmmu_cpu_number(void)
+{
+ register unsigned cmmu_no;
+ int i;
+
+
+ for (i=0; i < 10; i++) {
+ /* clear CMMU p-bus status registers */
+ for (cmmu_no = 0; cmmu_no < MAX_CMMUS; cmmu_no++) {
+ if (cmmu[cmmu_no].cmmu_alive == CMMU_AVAILABLE &&
+ cmmu[cmmu_no].which == DATA_CMMU)
+ cmmu[cmmu_no].cmmu_regs->pfSTATUSr = 0;
+ }
+
+ /* access faulting address */
+ badwordaddr((void *)ILLADDRESS);
+
+ /* check which CMMU reporting the fault */
+ for (cmmu_no = 0; cmmu_no < MAX_CMMUS; cmmu_no++) {
+ if (cmmu[cmmu_no].cmmu_alive == CMMU_AVAILABLE &&
+ cmmu[cmmu_no].which == DATA_CMMU &&
+ cmmu[cmmu_no].cmmu_regs->pfSTATUSr & 0x70000) {
+ if (cmmu[cmmu_no].cmmu_regs->pfSTATUSr & 0x70000) {
+ cmmu[cmmu_no].cmmu_regs->pfSTATUSr = 0; /* to be clean */
+ cmmu[cmmu_no].cmmu_alive = CMMU_MARRIED;
+ return cmmu[cmmu_no].cmmu_cpu;
+ }
+ }
+ }
+ }
+ panic("m18x_cmmu_cpu_number: could not determine my cpu number");
+ return 0; /* to make compiler happy */
+}
+
+/**
+ ** Funcitons that actually modify CMMU registers.
+ **/
+
+#if !DDB
+static
+#endif
+void
+m18x_cmmu_remote_set(unsigned cpu, unsigned r, unsigned data, unsigned x)
+{
+ *(volatile unsigned *)(r + (char*)&REGS(cpu,data)) = x;
+}
+
+/*
+ * cmmu_cpu_lock should be held when called if read
+ * the CMMU_SCR or CMMU_SAR.
+ */
+#if !DDB
+static
+#endif
+unsigned
+m18x_cmmu_remote_get(unsigned cpu, unsigned r, unsigned data)
+{
+ return (*(volatile unsigned *)(r + (char*)&REGS(cpu,data)));
+}
+
+/* Needs no locking - read only registers */
+unsigned
+m18x_cmmu_get_idr(unsigned data)
+{
+ int cpu;
+ cpu = cpu_number();
+ return REGS(cpu,data).idr;
+}
+
+void
+m18x_cmmu_set_sapr(unsigned ap)
+{
+ int cpu;
+ cpu = cpu_number();
+
+ if (cache_policy & CACHE_INH)
+ ap |= AREA_D_CI;
+ /*
+ REGS(cpu, INST_CMMU).sapr = ap;
+ REGS(cpu, DATA_CMMU).sapr = ap;
+ */
+ m18x_cmmu_set(CMMU_SAPR, ap, ACCESS_VAL,
+ cpu, 0, CMMU_ACS_SUPER, 0);
+}
+
+void
+m18x_cmmu_remote_set_sapr(unsigned cpu, unsigned ap)
+{
+ if (cache_policy & CACHE_INH)
+ ap |= AREA_D_CI;
+
+ /*
+ REGS(cpu, INST_CMMU).sapr = ap;
+ REGS(cpu, DATA_CMMU).sapr = ap;
+ */
+ m18x_cmmu_set(CMMU_SAPR, ap, ACCESS_VAL,
+ cpu, 0, CMMU_ACS_SUPER, 0);
+}
+
+void
+m18x_cmmu_set_uapr(unsigned ap)
+{
+ int cpu;
+ cpu = cpu_number();
+
+ /* this functionality also mimiced in m18x_cmmu_pmap_activate() */
+ /*
+ REGS(cpu, INST_CMMU).uapr = ap;
+ REGS(cpu, DATA_CMMU).uapr = ap;
+ */
+ m18x_cmmu_set(CMMU_UAPR, ap, ACCESS_VAL,
+ cpu, 0, CMMU_ACS_USER, 0);
+}
+
+/*
+ * Set batc entry number entry_no to value in
+ * the data or instruction cache depending on data.
+ *
+ * Except for the cmmu_init, this function, m18x_cmmu_set_pair_batc_entry,
+ * and m18x_cmmu_pmap_activate are the only functions which may set the
+ * batc values.
+ */
+void
+m18x_cmmu_set_batc_entry(
+ unsigned cpu,
+ unsigned entry_no,
+ unsigned data, /* 1 = data, 0 = instruction */
+ unsigned value) /* the value to stuff into the batc */
+{
+ /*
+ REGS(cpu,data).bwp[entry_no] = value;
+ */
+ m18x_cmmu_set(CMMU_BWP(entry_no), value, MODE_VAL|ACCESS_VAL,
+ cpu, data, CMMU_ACS_USER, 0);
+#if SHADOW_BATC
+ CMMU(cpu,data)->batc[entry_no] = value;
+#endif
+#if 0 /* was for debugging piece (peace?) of mind */
+ REGS(cpu,data).scr = CMMU_FLUSH_SUPER_ALL;
+ REGS(cpu,data).scr = CMMU_FLUSH_USER_ALL;
+#endif
+}
+
+/*
+ * Set batc entry number entry_no to value in
+ * the data and instruction cache for the named CPU.
+ */
+void
+m18x_cmmu_set_pair_batc_entry(
+ unsigned cpu,
+ unsigned entry_no,
+ unsigned value) /* the value to stuff into the batc */
+{
+
+ /*
+ REGS(cpu,DATA_CMMU).bwp[entry_no] = value;
+ */
+ m18x_cmmu_set(CMMU_BWP(entry_no), value, MODE_VAL|ACCESS_VAL,
+ cpu, DATA_CMMU, CMMU_ACS_USER, 0);
+#if SHADOW_BATC
+ CMMU(cpu,DATA_CMMU)->batc[entry_no] = value;
+#endif
+ /*
+ REGS(cpu,INST_CMMU).bwp[entry_no] = value;
+ */
+ m18x_cmmu_set(CMMU_BWP(entry_no), value, MODE_VAL|ACCESS_VAL,
+ cpu, INST_CMMU, CMMU_ACS_USER, 0);
+#if SHADOW_BATC
+ CMMU(cpu,INST_CMMU)->batc[entry_no] = value;
+#endif
+
+#if 0 /* was for debugging piece (peace?) of mind */
+ REGS(cpu,INST_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
+ REGS(cpu,INST_CMMU).scr = CMMU_FLUSH_USER_ALL;
+ REGS(cpu,DATA_CMMU).scr = CMMU_FLUSH_SUPER_ALL;
+ REGS(cpu,DATA_CMMU).scr = CMMU_FLUSH_USER_ALL;
+#endif
+}
+
+/**
+ ** Functions that invalidate TLB entries.
+ **/
+
+/*
+ * flush any tlb
+ * Some functionality mimiced in m18x_cmmu_pmap_activate.
+ */
+void
+m18x_cmmu_flush_remote_tlb(unsigned cpu, unsigned kernel, vm_offset_t vaddr, int size)
+{
+ register s = splhigh();
+
+ if (cpu > max_cpus) {
+ cpu = cpu_number();
+ }
+
+ if ((unsigned)size > M88K_PGBYTES) {
+ /*
+ REGS(cpu, INST_CMMU).scr =
+ REGS(cpu, DATA_CMMU).scr =
+ kernel ? CMMU_FLUSH_SUPER_ALL : CMMU_FLUSH_USER_ALL;
+ */
+
+ m18x_cmmu_set(CMMU_SCR, kernel ? CMMU_FLUSH_SUPER_ALL : CMMU_FLUSH_USER_ALL, ACCESS_VAL,
+ cpu, 0, kernel ? CMMU_ACS_SUPER : CMMU_ACS_USER, 0);
+ } else { /* a page or smaller */
+
+
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)vaddr;
+ REGS(cpu, DATA_CMMU).sar = (unsigned)vaddr;
+ */
+ m18x_cmmu_set(CMMU_SAR, vaddr, ADDR_VAL|ACCESS_VAL,
+ cpu, 0, kernel ? CMMU_ACS_SUPER : CMMU_ACS_USER, vaddr);
+
+ /*
+ REGS(cpu, INST_CMMU).scr =
+ REGS(cpu, DATA_CMMU).scr =
+ kernel ? CMMU_FLUSH_SUPER_PAGE : CMMU_FLUSH_USER_PAGE;
+ */
+ m18x_cmmu_set(CMMU_SCR, kernel ? CMMU_FLUSH_SUPER_PAGE : CMMU_FLUSH_USER_PAGE, ADDR_VAL|ACCESS_VAL,
+ cpu, 0, kernel ? CMMU_ACS_SUPER : CMMU_ACS_USER, vaddr);
+ }
+
+ splx(s);
+}
+
+/*
+ * flush my personal tlb
+ */
+void
+m18x_cmmu_flush_tlb(unsigned kernel, vm_offset_t vaddr, int size)
+{
+ int cpu;
+ cpu = cpu_number();
+ m18x_cmmu_flush_remote_tlb(cpu, kernel, vaddr, size);
+}
+
+/*
+ * New fast stuff for pmap_activate.
+ * Does what a few calls used to do.
+ * Only called from pmap.c's _pmap_activate().
+ */
+void
+m18x_cmmu_pmap_activate(
+ unsigned cpu,
+ unsigned uapr,
+ batc_template_t i_batc[BATC_MAX],
+ batc_template_t d_batc[BATC_MAX])
+{
+ int entry_no;
+
+
+ /* the following is from m18x_cmmu_set_uapr */
+ /*
+ REGS(cpu, INST_CMMU).uapr = uapr;
+ REGS(cpu, DATA_CMMU).uapr = uapr;
+ */
+ m18x_cmmu_set(CMMU_UAPR, uapr, ACCESS_VAL,
+ cpu, 0, CMMU_ACS_USER, 0);
+
+ for (entry_no = 0; entry_no < BATC_MAX; entry_no++) {
+ /*
+ REGS(cpu,INST_CMMU).bwp[entry_no] = i_batc[entry_no].bits;
+ REGS(cpu,DATA_CMMU).bwp[entry_no] = d_batc[entry_no].bits;
+ */
+ m18x_cmmu_set(CMMU_BWP(entry_no), i_batc[entry_no].bits, MODE_VAL|ACCESS_VAL,
+ cpu, INST_CMMU, CMMU_ACS_USER, 0);
+ m18x_cmmu_set(CMMU_BWP(entry_no), d_batc[entry_no].bits, MODE_VAL|ACCESS_VAL,
+ cpu, DATA_CMMU, CMMU_ACS_USER, 0);
+#if SHADOW_BATC
+ CMMU(cpu,INST_CMMU)->batc[entry_no] = i_batc[entry_no].bits;
+ CMMU(cpu,DATA_CMMU)->batc[entry_no] = d_batc[entry_no].bits;
+#endif
+ }
+
+
+ /*
+ * Flush the user TLB.
+ * IF THE KERNEL WILL EVER CARE ABOUT THE BATC ENTRIES,
+ * THE SUPERVISOR TLBs SHOULB EE FLUSHED AS WELL.
+ */
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_USER_ALL;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_USER_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_USER_ALL, ACCESS_VAL,
+ cpu, 0, CMMU_ACS_USER, 0);
+}
+
+/**
+ ** Functions that invalidate caches.
+ **
+ ** Cache invalidates require physical addresses. Care must be exercised when
+ ** using segment invalidates. This implies that the starting physical address
+ ** plus the segment length should be invalidated. A typical mistake is to
+ ** extract the first physical page of a segment from a virtual address, and
+ ** then expecting to invalidate when the pages are not physically contiguous.
+ **
+ ** We don't push Instruction Caches prior to invalidate because they are not
+ ** snooped and never modified (I guess it doesn't matter then which form
+ ** of the command we use then).
+ **/
+/*
+ * flush both Instruction and Data caches
+ */
+void
+m18x_cmmu_flush_remote_cache(int cpu, vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+
+
+#if !defined(BROKEN_MMU_MASK)
+
+ if (size < 0 || size > NBSG ) {
+
+
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_ALL, 0,
+ cpu, 0, 0, 0);
+ } else if (size <= 16) {
+
+
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, ADDR_VAL,
+ cpu, 0, 0, (unsigned)physaddr);
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_LINE , ADDR_VAL,
+ cpu, 0, 0, (unsigned)physaddr);
+ } else if (size <= NBPG) {
+
+
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, ADDR_VAL,
+ cpu, 0, 0, (unsigned)physaddr);
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_PAGE , ADDR_VAL,
+ cpu, 0, 0, (unsigned)physaddr);
+ } else {
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, 0,
+ cpu, 0, 0, 0);
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_SEGMENT, 0,
+ cpu, 0, 0, 0);
+ }
+
+#else
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_ALL, 0,
+ cpu, 0, 0, 0);
+#endif /* !BROKEN_MMU_MASK */
+
+
+ splx(s);
+}
+
+/*
+ * flush both Instruction and Data caches
+ */
+void
+m18x_cmmu_flush_cache(vm_offset_t physaddr, int size)
+{
+ int cpu = cpu_number();
+ m18x_cmmu_flush_remote_cache(cpu, physaddr, size);
+}
+
+/*
+ * flush Instruction caches
+ */
+void
+m18x_cmmu_flush_remote_inst_cache(int cpu, vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+
+
+
+#if !defined(BROKEN_MMU_MASK)
+ if (size < 0 || size > NBSG ) {
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_ALL, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ } else if (size <= 16) {
+
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_LINE, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ } else if (size <= NBPG) {
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_PAGE, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ } else {
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_SEGMENT, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ }
+#else
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_ALL, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+#endif /* !BROKEN_MMU_MASK */
+
+ splx(s);
+}
+
+/*
+ * flush Instruction caches
+ */
+void
+m18x_cmmu_flush_inst_cache(vm_offset_t physaddr, int size)
+{
+ int cpu;
+ cpu = cpu_number();
+ m18x_cmmu_flush_remote_inst_cache(cpu, physaddr, size);
+}
+
+void
+m18x_cmmu_flush_remote_data_cache(int cpu, vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+
+
+
+#if !defined(BROKEN_MMU_MASK)
+ if (size < 0 || size > NBSG ) {
+
+ /*
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_ALL, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ } else if (size <= 16) {
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_LINE, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+
+ } else if (size <= NBPG) {
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_PAGE, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ } else {
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_SEGMENT, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ }
+#else
+ /*
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_ALL, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+#endif /* !BROKEN_MMU_MASK */
+
+
+
+ splx(s);
+}
+
+/*
+ * flush data cache
+ */
+void
+m18x_cmmu_flush_data_cache(vm_offset_t physaddr, int size)
+{
+ int cpu;
+ cpu = cpu_number();
+ m18x_cmmu_flush_remote_data_cache(cpu, physaddr, size);
+}
+
+/*
+ * sync dcache (and icache too)
+ */
+void
+m18x_cmmu_sync_cache(vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+ int cpu;
+ cpu = cpu_number();
+
+
+
+#if !defined(BROKEN_MMU_MASK)
+ if (size < 0 || size > NBSG ) {
+ /*
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CB_ALL;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CB_ALL, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CB_ALL, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ } else if (size <= 16) {
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CB_LINE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CB_LINE, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_LINE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CB_LINE, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ } else if (size <= NBPG) {
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CB_PAGE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CB_PAGE, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_PAGE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CB_PAGE, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ } else {
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CB_SEGMENT;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CB_SEGMENT, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_SEGMENT;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CB_SEGMENT, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ }
+#else
+ /*
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_ALL;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CB_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CB_ALL, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CB_ALL, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+#endif /* !BROKEN_MMU_MASK */
+
+
+
+ splx(s);
+}
+
+void
+m18x_cmmu_sync_inval_cache(vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+ int cpu;
+ cpu = cpu_number();
+
+
+
+#if !defined(BROKEN_MMU_MASK)
+ if (size < 0 || size > NBSG ) {
+ /*
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_ALL, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_ALL, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ } else if (size <= 16) {
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CBI_LINE, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_LINE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CBI_LINE, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ } else if (size <= NBPG) {
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CBI_PAGE, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_PAGE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CBI_PAGE, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ } else {
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CBI_SEGMENT, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_SEGMENT;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_CBI_SEGMENT, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ }
+
+#else
+ /*
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_CBI_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_ALL, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_CBI_ALL, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+#endif /* !BROKEN_MMU_MASK */
+
+
+
+ splx(s);
+}
+
+void
+m18x_cmmu_inval_cache(vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+ int cpu;
+ cpu = cpu_number();
+
+
+
+#if !defined(BROKEN_MMU_MASK)
+ if (size < 0 || size > NBSG ) {
+ /*
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_INV_ALL;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_INV_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_INV_ALL, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_INV_ALL, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ } else if (size <= 16) {
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_INV_LINE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_INV_LINE, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_INV_LINE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_INV_LINE, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ } else if (size <= NBPG) {
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_INV_PAGE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_INV_PAGE, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_INV_PAGE;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_INV_PAGE, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ } else {
+ /*
+ REGS(cpu, DATA_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_INV_SEGMENT;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, INST_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_INV_SEGMENT, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+ /*
+ REGS(cpu, INST_CMMU).sar = (unsigned)physaddr;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_INV_SEGMENT;
+ */
+ m18x_cmmu_set(CMMU_SAR, (unsigned)physaddr, MODE_VAL|ADDR_VAL,
+ cpu, DATA_CMMU, 0, (unsigned)physaddr);
+ m18x_cmmu_set(CMMU_SAR, CMMU_FLUSH_CACHE_INV_SEGMENT, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ }
+#else
+ /*
+ REGS(cpu, DATA_CMMU).scr = CMMU_FLUSH_CACHE_INV_ALL;
+ REGS(cpu, INST_CMMU).scr = CMMU_FLUSH_CACHE_INV_ALL;
+ */
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_INV_ALL, MODE_VAL,
+ cpu, DATA_CMMU, 0, 0);
+ m18x_cmmu_set(CMMU_SCR, CMMU_FLUSH_CACHE_INV_ALL, MODE_VAL,
+ cpu, INST_CMMU, 0, 0);
+#endif /* !BROKEN_MMU_MASK */
+
+
+
+ splx(s);
+}
+
+void
+m18x_dma_cachectl(vm_offset_t va, int size, int op)
+{
+ int count;
+
+#if !defined(BROKEN_MMU_MASK)
+ while (size) {
+
+ count = NBPG - ((int)va & PGOFSET);
+
+ if (size < count)
+ count = size;
+
+ if (op == DMA_CACHE_SYNC)
+ m18x_cmmu_sync_cache(kvtop(va), count);
+ else if (op == DMA_CACHE_SYNC_INVAL)
+ m18x_cmmu_sync_inval_cache(kvtop(va), count);
+ else
+ m18x_cmmu_inval_cache(kvtop(va), count);
+
+ va = (vm_offset_t)((int)va + count);
+ size -= count;
+ }
+#else
+
+ if (op == DMA_CACHE_SYNC)
+ m18x_cmmu_sync_cache(kvtop(va), size);
+ else if (op == DMA_CACHE_SYNC_INVAL)
+ m18x_cmmu_sync_inval_cache(kvtop(va), size);
+ else
+ m18x_cmmu_inval_cache(kvtop(va), size);
+#endif /* !BROKEN_MMU_MASK */
+}
+
+#if DDB
+union ssr {
+ unsigned bits;
+ struct {
+ unsigned :16,
+ ce:1,
+ be:1,
+ :4,
+ wt:1,
+ sp:1,
+ g:1,
+ ci:1,
+ :1,
+ m:1,
+ u:1,
+ wp:1,
+ bh:1,
+ v:1;
+ } field;
+};
+
+union cssp {
+ unsigned bits;
+ struct {
+ unsigned : 2,
+ l: 6,
+ d3: 1,
+ d2: 1,
+ d1: 1,
+ d0: 1,
+ vv3: 2,
+ vv2: 2,
+ vv1: 2,
+ vv0: 2,
+ :12;
+ } field;
+};
+
+union batcu {
+ unsigned bits;
+ struct { /* block address translation register */
+ unsigned int
+ lba:13, /* logical block address */
+ pba:13, /* physical block address */
+ s:1, /* supervisor */
+ wt:4, /* write through */
+ g:1, /* global */
+ ci:1, /* cache inhibit */
+ wp:1, /* write protect */
+ v:1; /* valid */
+ } field;
+};
+
+ #define VV_EX_UNMOD 0
+ #define VV_EX_MOD 1
+ #define VV_SHARED_UNMOD 2
+ #define VV_INVALID 3
+
+ #define D(UNION, LINE) \
+ ((LINE) == 3 ? (UNION).field.d3 : \
+ ((LINE) == 2 ? (UNION).field.d2 : \
+ ((LINE) == 1 ? (UNION).field.d1 : \
+ ((LINE) == 0 ? (UNION).field.d0 : ~0))))
+ #define VV(UNION, LINE) \
+ ((LINE) == 3 ? (UNION).field.vv3 : \
+ ((LINE) == 2 ? (UNION).field.vv2 : \
+ ((LINE) == 1 ? (UNION).field.vv1 : \
+ ((LINE) == 0 ? (UNION).field.vv0 : ~0))))
+
+
+ #undef VEQR_ADDR
+ #define VEQR_ADDR 0
+
+/*
+ * Show (for debugging) how the given CMMU translates the given ADDRESS.
+ * If cmmu == -1, the data cmmu for the current cpu is used.
+ */
+void
+m18x_cmmu_show_translation(
+ unsigned address,
+ unsigned supervisor_flag,
+ unsigned verbose_flag,
+ int cmmu_num)
+{
+ /*
+ * A virtual address is split into three fields. Two are used as
+ * indicies into tables (segment and page), and one is an offset into
+ * a page of memory.
+ */
+ union {
+ unsigned bits;
+ struct {
+ unsigned segment_table_index:10,
+ page_table_index:10,
+ page_offset:12;
+ } field;
+ } virtual_address;
+ unsigned value;
+
+ if (verbose_flag)
+ db_printf("-------------------------------------------\n");
+
+
+
+ /****** ACCESS PROPER CMMU or THREAD ***********/
+ #if 0 /* no thread */
+ if (thread != 0) {
+ /* the following tidbit from _pmap_activate in m88k/pmap.c */
+ register apr_template_t apr_data;
+ supervisor_flag = 0; /* thread implies user */
+
+ if (thread->task == 0) {
+ db_printf("[thread %x has empty task pointer]\n", thread);
+ return;
+ } else if (thread->task->map == 0) {
+ db_printf("[thread/task %x/%x has empty map pointer]\n",
+ thread, thread->task);
+ return;
+ } else if (thread->task->map->pmap == 0) {
+ db_printf("[thread/task/map %x/%x/%x has empty pmap pointer]\n",
+ thread, thread->task, thread->task->map);
+ return;
+ }
+ if (thread->task->map->pmap->lock.lock_data) {
+ db_printf("[Warning: thread %x's task %x's map %x's "
+ "pmap %x is locked]\n", thread, thread->task,
+ thread->task->map, thread->task->map->pmap);
+ }
+ apr_data.bits = 0;
+ apr_data.field.st_base = M88K_BTOP(thread->task->map->pmap->sdt_paddr);
+ apr_data.field.wt = 0;
+ apr_data.field.g = 1;
+ apr_data.field.ci = 0;
+ apr_data.field.te = 1;
+ value = apr_data.bits;
+ if (verbose_flag) {
+ db_printf("[thread %x task %x map %x pmap %x UAPR is %x]\n",
+ thread, thread->task, thread->task->map,
+ thread->task->map->pmap, value);
+ }
+ } else
+ #endif /* 0 */
+ {
+ if (cmmu_num == -1) {
+ if (cpu_cmmu[0].pair[DATA_CMMU] == 0) {
+ db_printf("ack! can't figure my own data cmmu number.\n");
+ return;
+ }
+ cmmu_num = cpu_cmmu[0].pair[DATA_CMMU] - cmmu;
+ if (verbose_flag)
+ db_printf("The data cmmu for cpu#%d is cmmu#%d.\n",
+ 0, cmmu_num);
+ } else if (cmmu_num < 0 || cmmu_num >= MAX_CMMUS) {
+ db_printf("invalid cpu number [%d]... must be in range [0..%d]\n",
+ cmmu_num, MAX_CMMUS - 1);
+
+ return;
+ }
+
+ if (cmmu[cmmu_num].cmmu_alive == 0) {
+ db_printf("warning: cmmu %d is not alive.\n", cmmu_num);
+ #if 0
+
+ return;
+ #endif
+ }
+
+ if (!verbose_flag) {
+ if (!(cmmu[cmmu_num].cmmu_regs->sctr & CMMU_SCTR_SE))
+ db_printf("WARNING: snooping not enabled for CMMU#%d.\n",
+ cmmu_num);
+ } else {
+ int i;
+ for (i=0; i<MAX_CMMUS; i++)
+ if ((i == cmmu_num || cmmu[i].cmmu_alive) &&
+ (verbose_flag>1 || !(cmmu[i].cmmu_regs->sctr&CMMU_SCTR_SE))) {
+ db_printf("CMMU#%d (cpu %d %s) snooping %s\n", i,
+ cmmu[i].cmmu_cpu, cmmu[i].which ? "data" : "inst",
+ (cmmu[i].cmmu_regs->sctr & CMMU_SCTR_SE) ? "on":"OFF");
+ }
+ }
+
+ if (supervisor_flag)
+ value = cmmu[cmmu_num].cmmu_regs->sapr;
+ else
+ value = cmmu[cmmu_num].cmmu_regs->uapr;
+
+ }
+
+ /******* LOOK AT THE BATC ** (if not a thread) **************/
+ #if 0
+ #if SHADOW_BATC
+ if (thread == 0) {
+ int i;
+ union batcu batc;
+ for (i = 0; i < 8; i++) {
+ batc.bits = cmmu[cmmu_num].batc[i];
+ if (batc.field.v == 0) {
+ if (verbose_flag>1)
+ db_printf("cmmu #%d batc[%d] invalid.\n", cmmu_num, i);
+ } else {
+ db_printf("cmmu#%d batc[%d] v%08x p%08x", cmmu_num, i,
+ batc.field.lba << 18, batc.field.pba);
+ if (batc.field.s) db_printf(", supervisor");
+ if (batc.field.wt) db_printf(", wt.th");
+ if (batc.field.g) db_printf(", global");
+ if (batc.field.ci) db_printf(", cache inhibit");
+ if (batc.field.wp) db_printf(", write protect");
+ }
+ }
+ }
+ #endif
+ #endif /* 0 */
+
+ /******* SEE WHAT A PROBE SAYS (if not a thread) ***********/
+ #if 0
+ if (thread == 0)
+ #endif /* 0 */
+ {
+ union ssr ssr;
+ struct cmmu_regs *cmmu_regs = cmmu[cmmu_num].cmmu_regs;
+ cmmu_regs->sar = address;
+ cmmu_regs->scr = supervisor_flag ? CMMU_PROBE_SUPER : CMMU_PROBE_USER;
+ ssr.bits = cmmu_regs->ssr;
+ if (verbose_flag > 1)
+ db_printf("probe of 0x%08x returns ssr=0x%08x\n",
+ address, ssr.bits);
+ if (ssr.field.v)
+ db_printf("PROBE of 0x%08x returns phys=0x%x",
+ address, cmmu_regs->sar);
+ else
+ db_printf("PROBE fault at 0x%x", cmmu_regs->pfADDRr);
+ if (ssr.field.ce) db_printf(", copyback err");
+ if (ssr.field.be) db_printf(", bus err");
+ if (ssr.field.wt) db_printf(", writethrough");
+ if (ssr.field.sp) db_printf(", sup prot");
+ if (ssr.field.g) db_printf(", global");
+ if (ssr.field.ci) db_printf(", cache inhibit");
+ if (ssr.field.m) db_printf(", modified");
+ if (ssr.field.u) db_printf(", used");
+ if (ssr.field.wp) db_printf(", write prot");
+ if (ssr.field.bh) db_printf(", BATC");
+ db_printf(".\n");
+ }
+
+ /******* INTERPRET AREA DESCRIPTOR *********/
+ {
+ union apr_template apr_template;
+ apr_template.bits = value;
+ if (verbose_flag > 1) {
+ db_printf("CMMU#%d", cmmu_num);
+ #if 0
+ if (thread == 0)
+ db_printf("CMMU#%d", cmmu_num);
+ else
+ db_printf("THREAD %x", thread);
+ #endif /* 0 */
+ db_printf(" %cAPR is 0x%08x\n",
+ supervisor_flag ? 'S' : 'U', apr_template.bits);
+ }
+ db_printf("CMMU#%d", cmmu_num);
+ #if 0
+ if (thread == 0)
+ db_printf("CMMU#%d", cmmu_num);
+ else
+ db_printf("THREAD %x", thread);
+ #endif /* 0 */
+ db_printf(" %cAPR: SegTbl: 0x%x000p",
+ supervisor_flag ? 'S' : 'U', apr_template.field.st_base);
+ if (apr_template.field.wt) db_printf(", WTHRU");
+ else db_printf(", !wthru");
+ if (apr_template.field.g) db_printf(", GLOBAL");
+ else db_printf(", !global");
+ if (apr_template.field.ci) db_printf(", $INHIBIT");
+ else db_printf(", $ok");
+ if (apr_template.field.te) db_printf(", VALID");
+ else db_printf(", !valid");
+ db_printf(".\n");
+
+ /* if not valid, done now */
+ if (apr_template.field.te == 0) {
+ db_printf("<would report an error, valid bit not set>\n");
+
+ return;
+ }
+
+ value = apr_template.field.st_base << 12; /* now point to seg page */
+ }
+
+ /* translate value from physical to virtual */
+ if (verbose_flag)
+ db_printf("[%x physical is %x virtual]\n", value, value + VEQR_ADDR);
+ value += VEQR_ADDR;
+
+ virtual_address.bits = address;
+
+ /****** ACCESS SEGMENT TABLE AND INTERPRET SEGMENT DESCRIPTOR *******/
+ {
+ union sdt_entry_template std_template;
+ if (verbose_flag)
+ db_printf("will follow to entry %d of page at 0x%x...\n",
+ virtual_address.field.segment_table_index, value);
+ value |= virtual_address.field.segment_table_index *
+ sizeof(struct sdt_entry);
+
+ if (badwordaddr(value)) {
+ db_printf("ERROR: unable to access page at 0x%08x.\n", value);
+
+ return;
+ }
+
+ std_template.bits = *(unsigned *)value;
+ if (verbose_flag > 1)
+ db_printf("SEG DESC @0x%x is 0x%08x\n", value, std_template.bits);
+ db_printf("SEG DESC @0x%x: PgTbl: 0x%x000",
+ value, std_template.sdt_desc.table_addr);
+ if (std_template.sdt_desc.wt) db_printf(", WTHRU");
+ else db_printf(", !wthru");
+ if (std_template.sdt_desc.sup) db_printf(", S-PROT");
+ else db_printf(", UserOk");
+ if (std_template.sdt_desc.g) db_printf(", GLOBAL");
+ else db_printf(", !global");
+ if (std_template.sdt_desc.no_cache) db_printf(", $INHIBIT");
+ else db_printf(", $ok");
+ if (std_template.sdt_desc.prot) db_printf(", W-PROT");
+ else db_printf(", WriteOk");
+ if (std_template.sdt_desc.dtype) db_printf(", VALID");
+ else db_printf(", !valid");
+ db_printf(".\n");
+
+ /* if not valid, done now */
+ if (std_template.sdt_desc.dtype == 0) {
+ db_printf("<would report an error, STD entry not valid>\n");
+
+ return;
+ }
+
+ value = std_template.sdt_desc.table_addr << 12;
+ }
+
+ /* translate value from physical to virtual */
+ if (verbose_flag)
+ db_printf("[%x physical is %x virtual]\n", value, value + VEQR_ADDR);
+ value += VEQR_ADDR;
+
+ /******* PAGE TABLE *********/
+ {
+ union pte_template pte_template;
+ if (verbose_flag)
+ db_printf("will follow to entry %d of page at 0x%x...\n",
+ virtual_address.field.page_table_index, value);
+ value |= virtual_address.field.page_table_index *
+ sizeof(struct pt_entry);
+
+ if (badwordaddr(value)) {
+ db_printf("error: unable to access page at 0x%08x.\n", value);
+
+ return;
+ }
+
+ pte_template.bits = *(unsigned *)value;
+ if (verbose_flag > 1)
+ db_printf("PAGE DESC @0x%x is 0x%08x.\n", value, pte_template.bits);
+ db_printf("PAGE DESC @0x%x: page @%x000",
+ value, pte_template.pte.pfn);
+ if (pte_template.pte.wired) db_printf(", WIRE");
+ else db_printf(", !wire");
+ if (pte_template.pte.wt) db_printf(", WTHRU");
+ else db_printf(", !wthru");
+ if (pte_template.pte.sup) db_printf(", S-PROT");
+ else db_printf(", UserOk");
+ if (pte_template.pte.g) db_printf(", GLOBAL");
+ else db_printf(", !global");
+ if (pte_template.pte.ci) db_printf(", $INHIBIT");
+ else db_printf(", $ok");
+ if (pte_template.pte.modified) db_printf(", MOD");
+ else db_printf(", !mod");
+ if (pte_template.pte.pg_used) db_printf(", USED");
+ else db_printf(", !used");
+ if (pte_template.pte.prot) db_printf(", W-PROT");
+ else db_printf(", WriteOk");
+ if (pte_template.pte.dtype) db_printf(", VALID");
+ else db_printf(", !valid");
+ db_printf(".\n");
+
+ /* if not valid, done now */
+ if (pte_template.pte.dtype == 0) {
+ db_printf("<would report an error, PTE entry not valid>\n");
+
+ return;
+ }
+
+ value = pte_template.pte.pfn << 12;
+ if (verbose_flag)
+ db_printf("will follow to byte %d of page at 0x%x...\n",
+ virtual_address.field.page_offset, value);
+ value |= virtual_address.field.page_offset;
+
+ if (badwordaddr(value)) {
+ db_printf("error: unable to access page at 0x%08x.\n", value);
+
+ return;
+ }
+ }
+
+ /* translate value from physical to virtual */
+ if (verbose_flag)
+ db_printf("[%x physical is %x virtual]\n", value, value + VEQR_ADDR);
+ value += VEQR_ADDR;
+
+ db_printf("WORD at 0x%x is 0x%08x.\n", value, *(unsigned *)value);
+
+}
+
+
+void
+m18x_cmmu_cache_state(unsigned addr, unsigned supervisor_flag)
+{
+ static char *vv_name[4] =
+ {"exclu-unmod", "exclu-mod", "shared-unmod", "invalid"};
+ int cmmu_num;
+
+ for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++) {
+ union ssr ssr;
+ union cssp cssp;
+ struct cmmu_regs *R;
+ unsigned tag, line;
+ if (!cmmu[cmmu_num].cmmu_alive)
+ continue;
+ R = cmmu[cmmu_num].cmmu_regs;
+ db_printf("cmmu #%d %s cmmu for cpu %d.\n", cmmu_num,
+ cmmu[cmmu_num].which ? "data" : "inst",
+ cmmu[cmmu_num].cmmu_cpu);
+ R->sar = addr;
+ R->scr = supervisor_flag ? CMMU_PROBE_SUPER : CMMU_PROBE_USER;
+
+ ssr.bits = R->ssr;
+ if (!ssr.field.v) {
+ db_printf("PROBE of 0x%08x faults.\n",addr);
+ continue;
+ }
+ db_printf("PROBE of 0x%08x returns phys=0x%x", addr, R->sar);
+
+ tag = R->sar & ~0xfff;
+ cssp.bits = R->cssp;
+
+ /* check to see if any of the tags for the set match the address */
+ for (line = 0; line < 4; line++) {
+ if (VV(cssp, line) == VV_INVALID) {
+ db_printf("line %d invalid.\n", line);
+ continue; /* line is invalid */
+ }
+ if (D(cssp, line)) {
+ db_printf("line %d disabled.\n", line);
+ continue; /* line is disabled */
+ }
+
+ if ((R->ctp[line] & ~0xfff) != tag) {
+ db_printf("line %d address tag is %x.\n", line,
+ (R->ctp[line] & ~0xfff));
+ continue;
+ }
+ db_printf("found in line %d as %08x (%s).\n",
+ line, R->cdp[line], vv_name[VV(cssp, line)]);
+ }
+ }
+
+}
+
+void
+m18x_show_cmmu_info(unsigned addr)
+{
+ int cmmu_num;
+ m18x_cmmu_cache_state(addr, 1);
+
+ for (cmmu_num = 0; cmmu_num < MAX_CMMUS; cmmu_num++)
+ if (cmmu[cmmu_num].cmmu_alive) {
+ db_printf("cmmu #%d %s cmmu for cpu %d: ", cmmu_num,
+ cmmu[cmmu_num].which ? "data" : "inst",
+ cmmu[cmmu_num].cmmu_cpu);
+ m18x_cmmu_show_translation(addr, 1, 0, cmmu_num);
+ }
+}
+#endif /* end if DDB */
diff --git a/sys/arch/mvme88k/mvme88k/m197_cmmu.c b/sys/arch/mvme88k/mvme88k/m197_cmmu.c
new file mode 100644
index 00000000000..f104b42a9d9
--- /dev/null
+++ b/sys/arch/mvme88k/mvme88k/m197_cmmu.c
@@ -0,0 +1,809 @@
+/*
+ * Copyright (c) 1998 Steve Murphree, Jr.
+ * Copyright (c) 1996 Nivas Madhur
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Nivas Madhur.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $Id: m197_cmmu.c,v 1.1 1999/09/27 19:13:22 smurph Exp $
+ */
+
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1991 Carnegie Mellon University
+ * Copyright (c) 1991 OMRON Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON AND OMRON ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND OMRON DISCLAIM ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+#ifdef MVME197
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/simplelock.h>
+#include <machine/board.h>
+#include <machine/cpus.h>
+#include <machine/cpu_number.h>
+#include <machine/m88110.h>
+
+#define CMMU_DEBUG 1
+
+#ifdef DEBUG
+ #define DB_CMMU 0x4000 /* MMU debug */
+unsigned int debuglevel = 0;
+ #define dprintf(_L_,_X_) { if (debuglevel & (_L_)) { unsigned int psr = disable_interrupts_return_psr(); printf("%d: ", cpu_number()); printf _X_; set_psr(psr); } }
+#else
+ #define dprintf(_L_,_X_)
+#endif
+#undef SHADOW_BATC /* don't use BATCs for now XXX nivas */
+
+/*
+ * CMMU(cpu,data) Is the cmmu struct for the named cpu's indicated cmmu.
+ * REGS(cpu,data) is the actual register structure.
+ */
+
+#define CMMU(cpu, data) cpu_cmmu[(cpu)].pair[(data)?DATA_CMMU:INST_CMMU]
+#define REGS(cpu, data) (*CMMU(cpu, data)->cmmu_regs)
+
+/*
+ * This lock protects the cmmu SAR and SCR's; other ports
+ * can be accessed without locking it
+ *
+ * May be used from "db_interface.c".
+ */
+
+extern unsigned cache_policy;
+extern unsigned cpu_sets[];
+extern unsigned number_cpus;
+extern unsigned master_cpu;
+extern int max_cpus, max_cmmus;
+extern int cpu_cmmu_ratio;
+int init_done;
+
+/* FORWARDS */
+void m197_setup_cmmu_config(void);
+void m197_setup_board_config(void);
+
+#ifdef CMMU_DEBUG
+void
+m197_show_apr(unsigned value)
+{
+ union apr_template apr_template;
+ apr_template.bits = value;
+
+ printf("table @ 0x%x000", apr_template.field.st_base);
+ if (apr_template.field.wt) printf(", writethrough");
+ if (apr_template.field.g) printf(", global");
+ if (apr_template.field.ci) printf(", cache inhibit");
+ if (apr_template.field.te) printf(", valid");
+ else printf(", not valid");
+ printf("\n");
+}
+
+void
+m197_show_sctr(unsigned value)
+{
+ union {
+ unsigned bits;
+ struct {
+ unsigned :16,
+ pe: 1,
+ se: 1,
+ pr: 1,
+ :13;
+ } fields;
+ } sctr;
+ sctr.bits = value;
+ printf("%spe, %sse %spr]\n",
+ sctr.fields.pe ? "" : "!",
+ sctr.fields.se ? "" : "!",
+ sctr.fields.pr ? "" : "!");
+}
+#endif
+
+void
+m197_setup_board_config(void)
+{
+ /* dummy routine */
+ m197_setup_cmmu_config();
+ return;
+}
+
+void
+m197_setup_cmmu_config(void)
+{
+ /* we can print something here... */
+ cpu_sets[0] = 1; /* This cpu installed... */
+ return;
+}
+
+void m197_cmmu_dump_config(void)
+{
+ /* dummy routine */
+ return;
+}
+
+/* To be implemented as a macro for speedup - XXX-smurph */
+static void
+m197_cmmu_store(int mmu, int reg, unsigned val)
+{
+}
+
+int m197_cmmu_alive(int mmu)
+{
+ return 1;
+}
+
+unsigned m197_cmmu_get(int mmu, int reg)
+{
+ unsigned val;
+ return val;
+}
+
+/*
+ * This function is called by the MMU module and pokes values
+ * into the CMMU's registers.
+ */
+void m197_cmmu_set(int reg, unsigned val, int flags,
+ int num, int mode, int access, vm_offset_t addr)
+{
+ return;
+}
+
+#ifdef DDB
+/*
+ * Used by DDB for cache probe functions
+ */
+unsigned m197_cmmu_get_by_mode(int cpu, int mode)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Should only be called after the calling cpus knows its cpu
+ * number and master/slave status . Should be called first
+ * by the master, before the slaves are started.
+*/
+void m197_cpu_configuration_print(int master)
+{
+ int pid = read_processor_identification_register();
+ int proctype = (pid & 0xff00) >> 8;
+ int procvers = (pid & 0xe) >> 1;
+ int mmu, cpu = cpu_number();
+ struct simplelock print_lock;
+
+ if (master)
+ simple_lock_init(&print_lock);
+
+ simple_lock(&print_lock);
+
+ printf("Processor %d: ", cpu);
+ if (proctype)
+ printf("Architectural Revision 0x%x UNKNOWN CPU TYPE Version 0x%x\n",
+ proctype, procvers);
+ else
+ printf("M88110 Version 0x%x\n", procvers);
+
+ simple_unlock(&print_lock);
+ return;
+}
+
+/*
+ * CMMU initialization routine
+ */
+void m197_load_patc(int entry, vm_offset_t vaddr, vm_offset_t paddr, int kernel);
+
+void
+m197_cmmu_init(void)
+{
+ int i;
+ unsigned tmp;
+ extern void *kernel_sdt;
+ unsigned lba, pba, value;
+ init_done = 0;
+
+ /* clear BATCs */
+ for (i=0; i<8; i++) {
+ m197_cmmu_set_pair_batc_entry(0, i, 0);
+ }
+ /* clear PATCs */
+ for (i=0; i<32; i++) {
+ m197_load_patc(i, 0, 0, 0);
+ }
+ set_ictl(CMMU_ICTL_DID /* Double instruction disable */
+ | CMMU_ICTL_MEN
+ | CMMU_ICTL_HTEN);
+
+
+ set_dctl(CMMU_DCTL_MEN
+ | CMMU_DCTL_HTEN);
+
+ set_icmd(CMMU_ICMD_INV_ITIC); /* clear instruction cache */
+ set_dcmd(CMMU_DCMD_INV_ALL); /* clear data cache */
+
+ tmp = (0x00000 << 12) | /* segment table base address */
+ AREA_D_WT | /* write through */
+ AREA_D_G | /* global */
+ ! AREA_D_TE ; /* not translation enable */
+
+ set_isap(tmp);
+ set_dsap(tmp);
+
+ set_isr(0);
+ set_ilar(0);
+ set_ipar(0);
+ set_dsr(0);
+ set_dlar(0);
+ set_dpar(0);
+
+ lba = pba = (unsigned)&kernel_sdt;
+ lba &= ~0x7FFFF;
+ pba = pba >> 13;
+ pba &= ~0x3F;
+ value = lba | pba | 0x20 | 0x01;
+
+ m197_cmmu_set_pair_batc_entry(0, 0, value);
+
+}
+
+
+/*
+ * Just before poweroff or reset....
+ */
+void
+m197_cmmu_shutdown_now(void)
+{
+ unsigned tmp;
+ unsigned cmmu_num;
+
+}
+
+/*
+ * enable parity
+ */
+void m197_cmmu_parity_enable(void)
+{
+#ifdef PARITY_ENABLE
+#endif /* PARITY_ENABLE */
+}
+
+/*
+ * Find out the CPU number from accessing CMMU
+ * Better be at splhigh, or even better, with interrupts
+ * disabled.
+ */
+#define ILLADDRESS U(0x0F000000) /* any faulty address */
+
+unsigned m197_cmmu_cpu_number(void)
+{
+ return 0; /* to make compiler happy */
+}
+
+/**
+ ** Funcitons that actually modify CMMU registers.
+ **/
+#if !DDB
+static
+#endif
+void
+m197_cmmu_remote_set(unsigned cpu, unsigned r, unsigned data, unsigned x)
+{
+ panic("m197_cmmu_remote_set() called!\n");
+}
+
+/*
+ * cmmu_cpu_lock should be held when called if read
+ * the CMMU_SCR or CMMU_SAR.
+ */
+#if !DDB
+static
+#endif
+unsigned
+m197_cmmu_remote_get(unsigned cpu, unsigned r, unsigned data)
+{
+ panic("m197_cmmu_remote_get() called!\n");
+ return 0;
+}
+
+/* Needs no locking - read only registers */
+unsigned
+m197_cmmu_get_idr(unsigned data)
+{
+ return 0; /* todo */
+}
+
+int
+probe_mmu(vm_offset_t va, int data)
+{
+ unsigned result;
+ if (data) {
+ set_dsar((unsigned)va);
+ set_dcmd(CMMU_DCMD_PRB_SUPR);
+ result = get_dsr();
+ if (result & CMMU_DSR_PH)
+ return 1;
+ else
+ return 0;
+ } else {
+ set_isar((unsigned)va);
+ set_icmd(CMMU_ICMD_PRB_SUPR);
+ result = get_isr();
+ if (result & CMMU_ISR_BH)
+ return 2;
+ else if (result & CMMU_ISR_PH)
+ return 1;
+ else
+ return 0;
+ }
+ return 0;
+}
+
+void
+m197_cmmu_set_sapr(unsigned ap)
+{
+ int result;
+ set_icmd(CMMU_ICMD_INV_SATC);
+ set_dcmd(CMMU_DCMD_INV_SATC);
+ /* load an entry pointing to seg table into PATC */
+ /* Don't forget to set it valid */
+
+ m197_load_patc(0, (vm_offset_t)ap, (vm_offset_t)(ap | 0x1), 1);
+ if(!(result = probe_mmu((vm_offset_t) ap, 1))){
+ printf("Didn't make it!!!!\n");
+ return;
+ } else {
+ if (result == 2)
+ printf("area pointer is in BATC.\n");
+ if (result == 1)
+ printf("area pointer is in PATC.\n");
+ }
+
+ set_isap(ap);
+ set_dsap(ap);
+}
+
+void
+m197_cmmu_remote_set_sapr(unsigned cpu, unsigned ap)
+{
+ m197_cmmu_set_sapr(ap);
+}
+
+void
+m197_cmmu_set_uapr(unsigned ap)
+{
+ set_iuap(ap);
+ set_duap(ap);
+}
+
+/*
+ * Set batc entry number entry_no to value in
+ * the data or instruction cache depending on data.
+ *
+ * Except for the cmmu_init, this function, m197_cmmu_set_pair_batc_entry,
+ * and m197_cmmu_pmap_activate are the only functions which may set the
+ * batc values.
+ */
+void
+m197_cmmu_set_batc_entry(
+ unsigned cpu,
+ unsigned entry_no,
+ unsigned data, /* 1 = data, 0 = instruction */
+ unsigned value) /* the value to stuff */
+{
+ if (data) {
+ set_dir(entry_no);
+ set_dbp(value);
+ } else {
+ set_iir(entry_no);
+ set_ibp(value);
+ }
+}
+
+/*
+ * Set batc entry number entry_no to value in
+ * the data and instruction cache for the named CPU.
+ */
+void
+m197_cmmu_set_pair_batc_entry(unsigned cpu, unsigned entry_no, unsigned value)
+/* the value to stuff into the batc */
+{
+ m197_cmmu_set_batc_entry(cpu, entry_no, 1, value);
+ m197_cmmu_set_batc_entry(cpu, entry_no, 0, value);
+}
+
+/**
+ ** Functions that invalidate TLB entries.
+ **/
+
+/*
+ * flush any tlb
+ * Some functionality mimiced in m197_cmmu_pmap_activate.
+ */
+void
+m197_cmmu_flush_remote_tlb(unsigned cpu, unsigned kernel, vm_offset_t vaddr, int size)
+{
+ register s = splhigh();
+ if (kernel) {
+ set_icmd(CMMU_ICMD_INV_SATC);
+ set_dcmd(CMMU_DCMD_INV_SATC);
+ } else {
+ set_icmd(CMMU_ICMD_INV_UATC);
+ set_dcmd(CMMU_DCMD_INV_UATC);
+ }
+ splx(s);
+}
+
+/*
+ * flush my personal tlb
+ */
+void
+m197_cmmu_flush_tlb(unsigned kernel, vm_offset_t vaddr, int size)
+{
+ int cpu;
+ cpu = cpu_number();
+ m197_cmmu_flush_remote_tlb(cpu, kernel, vaddr, size);
+}
+
+/*
+ * New fast stuff for pmap_activate.
+ * Does what a few calls used to do.
+ * Only called from pmap.c's _pmap_activate().
+ */
+void
+m197_cmmu_pmap_activate(
+ unsigned cpu,
+ unsigned uapr,
+ batc_template_t i_batc[BATC_MAX],
+ batc_template_t d_batc[BATC_MAX])
+{
+ int entry_no;
+
+ m197_cmmu_set_uapr(uapr);
+
+ /*
+ for (entry_no = 0; entry_no < 8; entry_no++) {
+ m197_cmmu_set_batc_entry(cpu, entry_no, 0, i_batc[entry_no].bits);
+ m197_cmmu_set_batc_entry(cpu, entry_no, 1, d_batc[entry_no].bits);
+ }
+ */
+ /*
+ * Flush the user TLB.
+ * IF THE KERNEL WILL EVER CARE ABOUT THE BATC ENTRIES,
+ * THE SUPERVISOR TLBs SHOULB EE FLUSHED AS WELL.
+ */
+ set_icmd(CMMU_ICMD_INV_UATC);
+ set_dcmd(CMMU_DCMD_INV_UATC);
+}
+
+/**
+ ** Functions that invalidate caches.
+ **
+ ** Cache invalidates require physical addresses. Care must be exercised when
+ ** using segment invalidates. This implies that the starting physical address
+ ** plus the segment length should be invalidated. A typical mistake is to
+ ** extract the first physical page of a segment from a virtual address, and
+ ** then expecting to invalidate when the pages are not physically contiguous.
+ **
+ ** We don't push Instruction Caches prior to invalidate because they are not
+ ** snooped and never modified (I guess it doesn't matter then which form
+ ** of the command we use then).
+ **/
+/*
+ * flush both Instruction and Data caches
+ */
+void
+m197_cmmu_flush_remote_cache(int cpu, vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+ set_icmd(CMMU_ICMD_INV_ITIC);
+ set_dcmd(CMMU_DCMD_FLUSH_ALL_INV);
+ splx(s);
+}
+
+/*
+ * flush both Instruction and Data caches
+ */
+void
+m197_cmmu_flush_cache(vm_offset_t physaddr, int size)
+{
+ int cpu = cpu_number();
+ m197_cmmu_flush_remote_cache(cpu, physaddr, size);
+}
+
+/*
+ * flush Instruction caches
+ */
+void
+m197_cmmu_flush_remote_inst_cache(int cpu, vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+
+ set_icmd(CMMU_ICMD_INV_ITIC);
+
+ splx(s);
+}
+
+/*
+ * flush Instruction caches
+ */
+void
+m197_cmmu_flush_inst_cache(vm_offset_t physaddr, int size)
+{
+ int cpu;
+ cpu = cpu_number();
+ m197_cmmu_flush_remote_inst_cache(cpu, physaddr, size);
+}
+
+/*
+ * flush data cache
+ */
+void
+m197_cmmu_flush_remote_data_cache(int cpu, vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+ set_dcmd(CMMU_DCMD_FLUSH_ALL_INV);
+ splx(s);
+}
+
+/*
+ * flush data cache
+ */
+void
+m197_cmmu_flush_data_cache(vm_offset_t physaddr, int size)
+{
+ int cpu;
+ cpu = cpu_number();
+ m197_cmmu_flush_remote_data_cache(cpu, physaddr, size);
+}
+
+/*
+ * sync dcache (and icache too)
+ */
+void
+m197_cmmu_sync_cache(vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+ int cpu;
+ cpu = cpu_number();
+ /* set_mmureg(CMMU_ICTL, CMMU_ICMD_INV_TIC); */
+ set_dcmd(CMMU_DCMD_FLUSH_ALL);
+
+ splx(s);
+}
+
+void
+m197_cmmu_sync_inval_cache(vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+ int cpu;
+ cpu = cpu_number();
+
+ set_dcmd(CMMU_DCMD_FLUSH_ALL_INV);
+ splx(s);
+}
+
+void
+m197_cmmu_inval_cache(vm_offset_t physaddr, int size)
+{
+ register s = splhigh();
+ int cpu;
+ cpu = cpu_number();
+ set_icmd(CMMU_ICMD_INV_ITIC);
+ set_dcmd(CMMU_DCMD_INV_ALL);
+ splx(s);
+}
+
+void
+m197_dma_cachectl(vm_offset_t va, int size, int op)
+{
+ int count;
+ if (op == DMA_CACHE_SYNC)
+ m197_cmmu_sync_cache(kvtop(va), size);
+ else if (op == DMA_CACHE_SYNC_INVAL)
+ m197_cmmu_sync_inval_cache(kvtop(va), size);
+ else
+ m197_cmmu_inval_cache(kvtop(va), size);
+}
+
+#ifdef DDB
+
+ #define VV_EX_UNMOD 0
+ #define VV_EX_MOD 1
+ #define VV_SHARED_UNMOD 2
+ #define VV_INVALID 3
+
+ #define D(UNION, LINE) \
+ ((LINE) == 3 ? (UNION).field.d3 : \
+ ((LINE) == 2 ? (UNION).field.d2 : \
+ ((LINE) == 1 ? (UNION).field.d1 : \
+ ((LINE) == 0 ? (UNION).field.d0 : ~0))))
+ #define VV(UNION, LINE) \
+ ((LINE) == 3 ? (UNION).field.vv3 : \
+ ((LINE) == 2 ? (UNION).field.vv2 : \
+ ((LINE) == 1 ? (UNION).field.vv1 : \
+ ((LINE) == 0 ? (UNION).field.vv0 : ~0))))
+
+
+ #undef VEQR_ADDR
+ #define VEQR_ADDR 0
+
+/*
+ * Show (for debugging) how the given CMMU translates the given ADDRESS.
+ * If cmmu == -1, the data cmmu for the current cpu is used.
+ */
+void
+m197_cmmu_show_translation(
+ unsigned address,
+ unsigned supervisor_flag,
+ unsigned verbose_flag,
+ int cmmu_num)
+{
+ /*
+ * A virtual address is split into three fields. Two are used as
+ * indicies into tables (segment and page), and one is an offset into
+ * a page of memory.
+ */
+ union {
+ unsigned bits;
+ struct {
+ unsigned segment_table_index:10,
+ page_table_index:10,
+ page_offset:12;
+ } field;
+ } virtual_address;
+ unsigned value;
+
+ if (verbose_flag)
+ db_printf("-------------------------------------------\n");
+
+}
+
+
+void
+m197_cmmu_cache_state(unsigned addr, unsigned supervisor_flag)
+{
+ static char *vv_name[4] =
+ {"exclu-unmod", "exclu-mod", "shared-unmod", "invalid"};
+ int cmmu_num;
+}
+
+void
+m197_show_cmmu_info(unsigned addr)
+{
+ int cmmu_num;
+ m197_cmmu_cache_state(addr, 1);
+}
+#endif /* end if DDB */
+
+#define MSDTENT(addr, va) ((sdt_entry_t *)(addr + SDTIDX(va)))
+#define MPDTENT(addr, va) ((sdt_entry_t *)(addr + PDTIDX(va)))
+void
+m197_load_patc(int entry, vm_offset_t vaddr, vm_offset_t paddr, int kernel)
+{
+ unsigned lpa, pfa, i;
+
+ lpa = (unsigned)vaddr & 0xFFFFF000;
+ if (kernel) {
+ lpa |= 0x01;
+ }
+ pfa = (unsigned)paddr;
+ i = entry << 5;
+ set_iir(i);
+ set_ippu(lpa);
+ set_ippl(pfa);
+ set_dir(i);
+ set_dppu(lpa);
+ set_dppl(lpa);
+}
+
+#define SDT_WP(sd_ptr) ((sd_ptr)->prot != 0)
+#define SDT_SUP(sd_ptr) ((sd_ptr)->sup != 0)
+#define PDT_WP(pte_ptr) ((pte_ptr)->prot != 0)
+#define PDT_SUP(pte_ptr) ((pte_ptr)->sup != 0)
+
+int
+m197_table_search(pmap_t map, vm_offset_t virt, int write, int kernel, int data)
+{
+ sdt_entry_t *sdt;
+ pt_entry_t *pte;
+ unsigned lpa, pfa, i;
+ static entry_num = 0;
+
+ if (map == (pmap_t)0)
+ panic("m197_table_search: pmap is NULL");
+
+ sdt = SDTENT(map, virt);
+
+ /*
+ * Check whether page table exist or not.
+ */
+ if (!SDT_VALID(sdt))
+ return (4); /* seg fault */
+
+ /* OK, it's valid. Now check permissions. */
+ if (!kernel)
+ if (SDT_SUP(sdt))
+ return (6); /* Supervisor Violation */
+ if (write)
+ if (SDT_WP(sdt))
+ return (7); /* Write Violation */
+
+ else
+ pte = (pt_entry_t *)(((sdt + SDT_ENTRIES)->table_addr)<<PDT_SHIFT) + PDTIDX(virt);
+ /*
+ * Check whether page frame exist or not.
+ */
+ if (!PDT_VALID(pte))
+ return (5); /* Page Fault */
+
+ /* OK, it's valid. Now check permissions. */
+ if (!kernel)
+ if (PDT_SUP(sdt))
+ return (6); /* Supervisor Violation */
+ if (write)
+ if (PDT_WP(sdt))
+ return (7); /* Write Violation */
+ /* If we get here, load the PATC. */
+ if (entry_num > 32)
+ entry_num = 0;
+ lpa = (unsigned)virt & 0xFFFFF000;
+ if (kernel)
+ lpa |= 0x01;
+ i = entry_num << 5;
+ if (data) {
+ set_dir(i); /* set PATC index */
+ set_dppu(lpa); /* set logical address */
+ set_dppl((unsigned)pte); /* set page fram address */
+ } else {
+ set_iir(i);
+ set_ippu(lpa);
+ set_ippl((unsigned)pte);
+ }
+ return 0;
+}
+
+#endif /* MVME197 */
+
+
diff --git a/sys/arch/mvme88k/mvme88k/m88100_fp.S b/sys/arch/mvme88k/mvme88k/m88100_fp.S
index 54bdf2e737d..c80d4eeb15f 100644
--- a/sys/arch/mvme88k/mvme88k/m88100_fp.S
+++ b/sys/arch/mvme88k/mvme88k/m88100_fp.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: m88100_fp.S,v 1.5 1999/02/09 06:36:29 smurph Exp $ */
+/* $OpenBSD: m88100_fp.S,v 1.6 1999/09/27 19:13:22 smurph Exp $ */
/*
* Mach Operating System
* Copyright (c) 1991 Carnegie Mellon University
@@ -36,6 +36,7 @@
#include <machine/trap.h>
#include <machine/asm.h>
+#if defined(MVME187) || defined(MVME188)
#define psr cr1
#define spsr cr2
#define ssb cr3
@@ -124,8 +125,8 @@
text
align 8
- global _Xfp_precise
-_Xfp_precise:
+ global _m88100_Xfp_precise
+_m88100_Xfp_precise:
or r29, r3, r0 /* r29 is now the E.F. */
subu r31, r31, 40
st r1, r31, 32
@@ -2303,3 +2304,4 @@ Iwritesingle:
st r11, r3 [r2]
/* Return.. */
jmp r1
+#endif /* defined(MVME187) || defined(MVME188) */
diff --git a/sys/arch/mvme88k/mvme88k/m88110_fp.S b/sys/arch/mvme88k/mvme88k/m88110_fp.S
new file mode 100644
index 00000000000..365e74ad12f
--- /dev/null
+++ b/sys/arch/mvme88k/mvme88k/m88110_fp.S
@@ -0,0 +1,266 @@
+/* $OpenBSD: m88110_fp.S,v 1.1 1999/09/27 19:13:23 smurph Exp $ */
+/*
+ * Copyright (c) 1999 Steve Murphree, Jr.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Floating point trouble routines */
+/*
+ * August 1, 1999
+ * smurph@OpenBSD.org
+ *
+ * Additions to support MVME197 (mc88110) mmu routines.
+ */
+
+/*
+ * This is cheesy. I'm using the TCFP features of the mc88110
+ * because it was easy. It is not 100% IEEE. But it may be
+ * close enough. We shall see... XXXsmurph
+ * Err... TCFP == "Time Critical Floating Point"
+ *
+ * The only two SFU1 exceptions that can occure in TCFP mode are:
+ * 1) Unimplemented Floating Point Instruction
+ * 2) Floating Point Privilege Violation
+ */
+
+#define LOCORE
+#define ASSEMBLER
+
+#include "assym.s"
+#include <machine/trap.h>
+#include <machine/asm.h>
+
+#ifdef MVME197
+ .text
+ENTRY(m88110_Xfp_precise)
+ or r29, r3, r0 /* r29 is now the E.F. */
+ subu r31, r31, 40
+ st r1, r31, 32
+ st r29, r31, 36
+
+ ld r2, r29, EF_FPSR * 4
+ ld r3, r29, EF_FPCR * 4
+ ld r4, r29, EF_FPECR * 4
+
+ /* Load into r1 the return address for the 0 handlers. Looking */
+ /* at FPECR, branch to the appropriate 0 handler. However, */
+ /* if none of the 0 bits are enabled, then a floating point */
+ /* instruction was issued with the floating point unit disabled. This */
+ /* will cause an unimplemented opcode 0. */
+
+1: bb0 6,r4, 2f /* branch to m88110_FPunimp if bit set */
+ br m88110_FPuimp
+2: bb0 5,r4, 3f /* branch to m88110_FPpriviol if bit set */
+ br m88110_FPpriviol
+3:
+ or.u r4, r4, 0xffff
+
+LABEL(m88110_FPuimp)
+ subu r31,r31,40 /* allocate stack */
+ st r1,r31,36 /* save return address */
+ st r3,r31,32 /* save exception frame */
+ or r2,r0,T_FPEPFLT /* load trap type */
+ or r3, r29, r0
+ bsr _trap /* trap */
+ ld r1,r31,36 /* recover return address */
+ addu r31,r31,40 /* deallocate stack */
+ jmp r1
+
+LABEL(m88110_FPpriviol)
+ subu r31,r31,40 /* allocate stack */
+ st r1,r31,36 /* save return address */
+ st r3,r31,32 /* save exception frame */
+ or r2,r0,T_PRIVINFLT /* load trap type */
+ or r3, r29, r0
+ bsr _trap /* trap */
+ ld r1,r31,36 /* recover return address */
+ addu r31,r31,40 /* deallocate stack */
+ jmp r1
+
+ENTRY(set_tcfp)
+
+ or.u r2, r0, hi16(0x200000)
+ or r2, r2, lo16(0x200000)
+ jmp.n r1
+ fstcr r2, fcr0
+
+
+/*************************************************************************
+ *************************************************************************
+ **
+ ** void set_mmureg(unsigned reg_const, unsigned value);
+ **
+ ** Sets the given mmu register on the mc88110 chip to the given value.
+ **
+ ** Input:
+ ** r1 return address
+ ** r2 the register
+ ** r3 the value
+ **
+ ** Other registers used:
+ ** r5 jumptable address
+ ** r6 calculated jumptable address
+ **
+ ** Output:
+ ** none
+ **/
+ENTRY(set_mmureg)
+ /* calculate address to jump to */
+ or.u r5, r0, hi16(regmark)
+ or r5, r5, lo16(regmark)
+ mul r2, r2, 0x08
+ /* and go there (after adjusting the offset via ".n") */
+ jmp.n r6
+ subu r6, r5, r2
+
+ jmp.n r1
+ stcr r3, cr51
+ jmp.n r1
+ stcr r3, cr50
+ jmp.n r1
+ stcr r3, cr49
+ jmp.n r1
+ stcr r3, cr48
+ jmp.n r1
+ stcr r3, cr47
+ jmp.n r1
+ stcr r3, cr46
+ jmp.n r1
+ stcr r3, cr45
+ jmp.n r1
+ stcr r3, cr44
+ jmp.n r1
+ stcr r3, cr43
+ jmp.n r1
+ stcr r3, cr42
+ jmp.n r1
+ stcr r3, cr41
+ jmp.n r1
+ stcr r3, cr40
+ jmp.n r1
+ stcr r3, cr36
+ jmp.n r1
+ stcr r3, cr35
+ jmp.n r1
+ stcr r3, cr34
+ jmp.n r1
+ stcr r3, cr33
+ jmp.n r1
+ stcr r3, cr32
+ jmp.n r1
+ stcr r3, cr31
+ jmp.n r1
+ stcr r3, cr30
+ jmp.n r1
+ stcr r3, cr29
+ jmp.n r1
+ stcr r3, cr28
+ jmp.n r1
+ stcr r3, cr27
+ jmp.n r1
+ stcr r3, cr26
+regmark: jmp.n r1
+ stcr r3, cr25
+
+/*************************************************************************
+ *************************************************************************
+ **
+ ** unsigned get_mmureg(unsigned reg_const);
+ **
+ ** Get the given mmu register's value.
+ **
+ ** Input:
+ ** r1 return address
+ ** r2 the register/return value
+ **
+ ** Other registers used:
+ ** r5 jumptable address
+ ** r6 calculated jumptable address
+ **
+ ** Output:
+ ** r2 return value
+ **/
+ENTRY(get_mmureg)
+ /* calculate address to jump to */
+ or.u r5, r0, hi16(regmark2)
+ or r5, r5, lo16(regmark2)
+ mul r2, r2, 0x08
+ /* and go there (after adjusting the offset via ".n") */
+ jmp.n r6
+ subu r6, r5, r2
+
+ jmp.n r1
+ ldcr r2, cr51
+ jmp.n r1
+ ldcr r2, cr50
+ jmp.n r1
+ ldcr r2, cr49
+ jmp.n r1
+ ldcr r2, cr48
+ jmp.n r1
+ ldcr r2, cr47
+ jmp.n r1
+ ldcr r2, cr46
+ jmp.n r1
+ ldcr r2, cr45
+ jmp.n r1
+ ldcr r2, cr44
+ jmp.n r1
+ ldcr r2, cr43
+ jmp.n r1
+ ldcr r2, cr42
+ jmp.n r1
+ ldcr r2, cr41
+ jmp.n r1
+ ldcr r2, cr40
+ jmp.n r1
+ ldcr r2, cr36
+ jmp.n r1
+ ldcr r2, cr35
+ jmp.n r1
+ ldcr r2, cr34
+ jmp.n r1
+ ldcr r2, cr33
+ jmp.n r1
+ ldcr r2, cr32
+ jmp.n r1
+ ldcr r2, cr31
+ jmp.n r1
+ ldcr r2, cr30
+ jmp.n r1
+ ldcr r2, cr29
+ jmp.n r1
+ ldcr r2, cr28
+ jmp.n r1
+ ldcr r2, cr27
+ jmp.n r1
+ ldcr r2, cr26
+regmark2: jmp.n r1
+ ldcr r2, cr25
+#endif /* MVME197 */
+
diff --git a/sys/arch/mvme88k/mvme88k/m88110_mmu.S b/sys/arch/mvme88k/mvme88k/m88110_mmu.S
new file mode 100644
index 00000000000..61519af53c7
--- /dev/null
+++ b/sys/arch/mvme88k/mvme88k/m88110_mmu.S
@@ -0,0 +1,159 @@
+#include <machine/asm.h>
+
+#ifdef MVME197
+/* set routines */
+ENTRY(set_icmd)
+ jmp.n r1
+ stcr r2, ICMD
+ENTRY(set_ictl)
+ jmp.n r1
+ stcr r2, ICTL
+ENTRY(set_isar)
+ jmp.n r1
+ stcr r2, ISAR
+ENTRY(set_isap)
+ FLUSH_PIPELINE
+ stcr r2, ISAP
+ jmp r1
+ENTRY(set_iuap)
+ FLUSH_PIPELINE
+ stcr r2, IUAP
+ jmp r1
+ENTRY(set_iir)
+ jmp.n r1
+ stcr r2, IIR
+ENTRY(set_ibp)
+ jmp.n r1
+ stcr r2, IBP
+ENTRY(set_ippu)
+ jmp.n r1
+ stcr r2, IPPU
+ENTRY(set_ippl)
+ jmp.n r1
+ stcr r2, IPPL
+ENTRY(set_isr)
+ jmp.n r1
+ stcr r2, ISR
+ENTRY(set_ilar)
+ jmp.n r1
+ stcr r2, ILAR
+ENTRY(set_ipar)
+ jmp.n r1
+ stcr r2, IPAR
+ENTRY(set_dcmd)
+ jmp.n r1
+ stcr r2, DCMD
+ENTRY(set_dctl)
+ jmp.n r1
+ stcr r2, DCTL
+ENTRY(set_dsar)
+ stcr r2, DSAR
+ NOP
+ jmp r1
+ENTRY(set_dsap)
+ FLUSH_PIPELINE
+ NOP
+ stcr r2, DSAP
+ jmp r1
+ENTRY(set_duap)
+ FLUSH_PIPELINE
+ NOP
+ stcr r2, DUAP
+ jmp r1
+ENTRY(set_dir)
+ jmp.n r1
+ stcr r2, DIR
+ENTRY(set_dbp)
+ jmp.n r1
+ stcr r2, DBP
+ENTRY(set_dppu)
+ jmp.n r1
+ stcr r2, DPPU
+ENTRY(set_dppl)
+ jmp.n r1
+ stcr r2, DPPL
+ENTRY(set_dsr)
+ jmp.n r1
+ stcr r2, DSR
+ENTRY(set_dlar)
+ jmp.n r1
+ stcr r2, DLAR
+ENTRY(set_dpar)
+ jmp.n r1
+ stcr r2, DPAR
+
+/* get routines */
+ENTRY(get_icmd)
+ jmp.n r1
+ ldcr r2, ICMD
+ENTRY(get_ictl)
+ jmp.n r1
+ ldcr r2, ICTL
+ENTRY(get_isar)
+ jmp.n r1
+ ldcr r2, ISAR
+ENTRY(get_isap)
+ jmp.n r1
+ ldcr r2, ISAP
+ENTRY(get_iuap)
+ jmp.n r1
+ ldcr r2, IUAP
+ENTRY(get_iir)
+ jmp.n r1
+ ldcr r2, IIR
+ENTRY(get_ibp)
+ jmp.n r1
+ ldcr r2, IBP
+ENTRY(get_ippu)
+ jmp.n r1
+ ldcr r2, IPPU
+ENTRY(get_ippl)
+ jmp.n r1
+ ldcr r2, IPPL
+ENTRY(get_isr)
+ jmp.n r1
+ ldcr r2, ISR
+ENTRY(get_ilar)
+ jmp.n r1
+ ldcr r2, ILAR
+ENTRY(get_ipar)
+ jmp.n r1
+ ldcr r2, IPAR
+ENTRY(get_dcmd)
+ jmp.n r1
+ ldcr r2, DCMD
+ENTRY(get_dctl)
+ jmp.n r1
+ ldcr r2, DCTL
+ENTRY(get_dsar)
+ jmp.n r1
+ ldcr r2, DSAR
+ENTRY(get_dsap)
+ jmp.n r1
+ ldcr r2, DSAP
+ENTRY(get_duap)
+ jmp.n r1
+ ldcr r2, DUAP
+ENTRY(get_dir)
+ jmp.n r1
+ ldcr r2, DIR
+ENTRY(get_dbp)
+ jmp.n r1
+ ldcr r2, DBP
+ENTRY(get_dppu)
+ jmp.n r1
+ ldcr r2, DPPU
+ENTRY(get_dppl)
+ jmp.n r1
+ ldcr r2, DPPL
+ENTRY(get_dsr)
+ jmp.n r1
+ ldcr r2, DSR
+ENTRY(get_dlar)
+ jmp.n r1
+ ldcr r2, DLAR
+ENTRY(get_dpar)
+ jmp.n r1
+ ldcr r2, DPAR
+#endif /* MVME197 */
+
diff --git a/sys/arch/mvme88k/mvme88k/machdep.c b/sys/arch/mvme88k/mvme88k/machdep.c
index 8b3c35570a1..247f163b31c 100644
--- a/sys/arch/mvme88k/mvme88k/machdep.c
+++ b/sys/arch/mvme88k/mvme88k/machdep.c
@@ -1,6 +1,6 @@
-/* $OpenBSD: machdep.c,v 1.16 1999/09/03 18:01:30 art Exp $ */
+/* $OpenBSD: machdep.c,v 1.17 1999/09/27 19:13:23 smurph Exp $ */
/*
- * Copyright (c) 1998 Steve Murphree, Jr.
+ * Copyright (c) 1998, 1999 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
* All rights reserved.
*
@@ -62,13 +62,13 @@
#include <sys/msgbuf.h>
#include <sys/syscallargs.h>
#ifdef SYSVMSG
-#include <sys/msg.h>
+ #include <sys/msg.h>
#endif
#ifdef SYSVSEM
-#include <sys/sem.h>
+ #include <sys/sem.h>
#endif
#ifdef SYSVSHM
-#include <sys/shm.h>
+ #include <sys/shm.h>
#endif
#include <sys/ioctl.h>
#include <sys/exec.h>
@@ -76,11 +76,12 @@
#include <sys/errno.h>
#include <net/netisr.h>
+#include <mvme88k/dev/sysconreg.h>
#include <mvme88k/dev/pcctworeg.h>
#include <machine/cpu.h>
+#include <machine/cpu_number.h>
+#include <machine/asm_macro.h> /* enable/disable interrupts */
#include <machine/reg.h>
-#include <machine/psl.h>
-#include <machine/board.h>
#include <machine/trap.h>
#include <machine/bug.h>
#include <machine/prom.h>
@@ -96,29 +97,50 @@
#include <machine/m88100.h> /* DMT_VALID */
#include <machine/m882xx.h> /* CMMU stuff */
#if DDB
-# include <machine/db_machdep.h>
+ #include <machine/db_machdep.h>
#endif /* DDB */
#if DDB
-#define DEBUG_MSG db_printf
+ #define DEBUG_MSG db_printf
#else
-#define DEBUG_MSG printf
+ #define DEBUG_MSG printf
#endif /* DDB */
static int waittime = -1;
struct intrhand *intr_handlers[256];
+vm_offset_t interrupt_stack[MAX_CPUS] = {0};
+
+/* machine dependant function pointers. */
+struct funcp mdfp;
+
+/* forwards */
+void m88100_Xfp_precise(void);
+void m88110_Xfp_precise(void);
+void setupiackvectors(void);
unsigned char *ivec[] = {
+ (unsigned char *)0xFFFE0003, /* not used, no such thing as int 0 */
+ (unsigned char *)0xFFFE0007,
+ (unsigned char *)0xFFFE000B,
+ (unsigned char *)0xFFFE000F,
+ (unsigned char *)0xFFFE0013,
+ (unsigned char *)0xFFFE0017,
+ (unsigned char *)0xFFFE001B,
+ (unsigned char *)0xFFFE001F,
+};
- (unsigned char *)0xFFFE0003, /* not used, no such thing as int 0 */
- (unsigned char *)0xFFFE0007,
- (unsigned char *)0xFFFE000B,
- (unsigned char *)0xFFFE000F,
- (unsigned char *)0xFFFE0013,
- (unsigned char *)0xFFFE0017,
- (unsigned char *)0xFFFE001B,
- (unsigned char *)0xFFFE001F,
+#ifdef MVME188
+/*
+ * *int_mask_reg[CPU]
+ * Points to the hardware interrupt status register for each CPU.
+ */
+volatile unsigned int *int_mask_reg[MAX_CPUS] = {
+ (volatile unsigned int *)IEN0_REG,
+ (volatile unsigned int *)IEN1_REG,
+ (volatile unsigned int *)IEN2_REG,
+ (volatile unsigned int *)IEN3_REG
};
+#endif /* MVME188 */
u_char *int_mask_level = (u_char *)INT_MASK_LEVEL;
u_char *int_pri_level = (u_char *)INT_PRI_LEVEL;
@@ -126,19 +148,18 @@ u_char *iackaddr;
volatile u_char *pcc2intr_mask;
volatile u_char *pcc2intr_ipl;
volatile vm_offset_t bugromva;
+volatile vm_offset_t kernelva;
+volatile vm_offset_t utilva;
volatile vm_offset_t sramva;
volatile vm_offset_t obiova;
volatile vm_offset_t extiova;
-volatile vm_offset_t vmea16va;
-volatile vm_offset_t vmea32d16va;
-
-int physmem; /* available physical memory, in pages */
-int cold;
+int physmem; /* available physical memory, in pages */
+int cold; /* boot process flag */
vm_offset_t avail_end, avail_start, avail_next;
int msgbufmapped = 0;
-int foodebug = 0;
-int longformat = 1;
+int foodebug = 0; /* for size_memory() */
+int longformat = 1; /* for regdump() */
int BugWorks = 0;
/*
* safepri is a safe priority for sleep to set for a spin-wait
@@ -146,101 +167,98 @@ int BugWorks = 0;
*/
int safepri = 0;
-#if 1 /*XXX_FUTURE*/
/*
* iomap stuff is for managing chunks of virtual address space that
* can be allocated to IO devices.
- * XXX none of the drivers use this at this time. IO address is mapped
- * so that pa == va. XXX nivas
+ * VMEbus drivers use this at this now. Only on-board IO devices' addresses
+ * are mapped so that pa == va. XXX smurph.
*/
-vm_offset_t iomapbase;
+void *iomapbase;
struct map *iomap;
vm_map_t iomap_map;
-int niomap;
-#endif
+int niomap;
/*
* Declare these as initialized data so we can patch them.
*/
-int nswbuf = 0;
+int nswbuf = 0;
#ifdef NBUF
-int nbuf = NBUF;
+int nbuf = NBUF;
#else
-int nbuf = 0;
+int nbuf = 0;
#endif
#ifdef BUFPAGES
-int bufpages = BUFPAGES;
+int bufpages = BUFPAGES;
#else
-int bufpages = 0;
+int bufpages = 0;
#endif
int *nofault;
caddr_t allocsys __P((caddr_t));
-
+
/*
* Info for CTL_HW
*/
-char machine[] = "mvme88k"; /* cpu "architecture" */
-char cpu_model[120];
-extern char version[];
+char machine[] = "mvme88k"; /* cpu "architecture" */
+char cpu_model[120];
+extern unsigned master_cpu;
+extern char version[];
struct bugenv bugargs;
-struct kernel{
- void *entry;
- void *symtab;
- void *esym;
- int bflags;
- int bdev;
- char *kname;
- void *smini;
- void *emini;
- void *end_load;
+struct kernel {
+ void *entry;
+ void *symtab;
+ void *esym;
+ int bflags;
+ int bdev;
+ char *kname;
+ void *smini;
+ void *emini;
+ void *end_load;
}kflags;
char *esym;
-int boothowto; /* read in kern/bootstrap */
+int boothowto; /* read in locore.S */
+int bootdev; /* read in locore.S */
int cputyp;
-int cpuspeed = 25; /* 25 MHZ XXX should be read from NVRAM */
+int cpuspeed = 25; /* 25 MHZ XXX should be read from NVRAM */
#ifndef roundup
-#define roundup(value, stride) (((unsigned)(value) + (stride) - 1) & ~((stride)-1))
+ #define roundup(value, stride) (((unsigned)(value) + (stride) - 1) & ~((stride)-1))
#endif /* roundup */
-vm_size_t mem_size;
-vm_size_t rawmem_size;
-vm_offset_t first_addr = 0;
-vm_offset_t last_addr = 0;
+vm_size_t mem_size;
+vm_size_t rawmem_size;
+vm_offset_t first_addr = 0;
+vm_offset_t last_addr = 0;
-vm_offset_t avail_start, avail_next, avail_end;
-vm_offset_t virtual_avail, virtual_end;
-vm_offset_t pcc2consvaddr, clconsvaddr;
-vm_offset_t miniroot;
+vm_offset_t avail_start, avail_next, avail_end;
+vm_offset_t virtual_avail, virtual_end;
+vm_offset_t pcc2consvaddr, clconsvaddr;
+vm_offset_t miniroot;
-void *end_loaded;
-int bootdev;
-int no_symbols = 1;
-
-struct proc *lastproc;
-pcb_t curpcb;
+void *end_loaded;
+int bootdev;
+int no_symbols = 1;
+struct proc *lastproc;
+pcb_t curpcb;
extern struct user *proc0paddr;
-extern void *etherbuf;
-extern int etherlen;
-
-/* XXX this is to fake out the console routines, while booting. */
-#include "bugtty.h"
-#if NBUGTTY > 0
- int bugttycnprobe __P((struct consdev *));
- int bugttycninit __P((struct consdev *));
- void bugttycnputc __P((dev_t, int));
- int bugttycngetc __P((dev_t));
- extern void nullcnpollc __P((dev_t, int));
- static struct consdev bugcons =
- { NULL, NULL, bugttycngetc, bugttycnputc,
- nullcnpollc, makedev(14,0), 1 };
-#endif /* NBUGTTY */
-
-void cmmu_init(void);
+
+/*
+ * XXX this is to fake out the console routines, while
+ * booting. New and improved! :-) smurph
+ */
+int bootcnprobe __P((struct consdev *));
+int bootcninit __P((struct consdev *));
+void bootcnputc __P((dev_t, char));
+int bootcngetc __P((dev_t));
+extern void nullcnpollc __P((dev_t, int));
+#define bootcnpollc nullcnpollc
+static struct consdev bootcons = {
+NULL, NULL, bootcngetc, bootcnputc,
+ bootcnpollc, makedev(14,0), 1};
+void cmmu_init(void);
/*
* Console initialization: called early on from main,
* before vm init or startup. Do enough configuration
@@ -249,18 +267,18 @@ void cmmu_init(void);
void
consinit()
{
- extern struct consdev *cn_tab;
- /*
- * Initialize the console before we print anything out.
- */
+ extern struct consdev *cn_tab;
+ /*
+ * Initialize the console before we print anything out.
+ */
- cn_tab = NULL;
- cninit();
+ cn_tab = NULL;
+ cninit();
#if defined (DDB)
- kdb_init();
- if (boothowto & RB_KDB)
- Debugger();
+ kdb_init();
+ if (boothowto & RB_KDB)
+ Debugger();
#endif
}
@@ -272,96 +290,99 @@ consinit()
vm_offset_t
size_memory(void)
{
- volatile unsigned int *look;
- unsigned int *max;
- extern char *end;
+ volatile unsigned int *look;
+ unsigned int *max;
+ extern char *end;
#define PATTERN 0x5a5a5a5a
#define STRIDE (4*1024) /* 4k at a time */
#define Roundup(value, stride) (((unsigned)(value) + (stride) - 1) & ~((stride)-1))
-
- /*
- * count it up.
- */
- max = (void*)MAXPHYSMEM;
- for (look = (void*)Roundup(end, STRIDE); look < max;
- look = (int*)((unsigned)look + STRIDE)) {
- unsigned save;
-
- /* if can't access, we've reached the end */
- if (foodebug) printf("%x\n", look);
- if (badwordaddr((vm_offset_t)look)) {
+#if 1
+ /*
+ * count it up.
+ */
+ max = (void*)MAXPHYSMEM;
+ for (look = (void*)Roundup(end, STRIDE); look < max;
+ look = (int*)((unsigned)look + STRIDE)) {
+ unsigned save;
+
+ /* if can't access, we've reached the end */
+ if (foodebug) printf("%x\n", look);
+ if (badwordaddr((vm_offset_t)look)) {
#if defined(DEBUG)
- printf("%x\n", look);
+ printf("%x\n", look);
#endif
- look = (int *)((int)look - STRIDE);
- break;
- }
-
- /*
- * If we write a value, we expect to read the same value back.
- * We'll do this twice, the 2nd time with the opposite bit
- * pattern from the first, to make sure we check all bits.
- */
- save = *look;
- if (*look = PATTERN, *look != PATTERN)
- break;
- if (*look = ~PATTERN, *look != ~PATTERN)
- break;
- *look = save;
- }
-
- physmem = btoc(trunc_page((unsigned)look)); /* in pages */
- return(trunc_page((unsigned)look));
+ look = (int *)((int)look - STRIDE);
+ break;
+ }
+
+ /*
+ * If we write a value, we expect to read the same value back.
+ * We'll do this twice, the 2nd time with the opposite bit
+ * pattern from the first, to make sure we check all bits.
+ */
+ save = *look;
+ if (*look = PATTERN, *look != PATTERN)
+ break;
+ if (*look = ~PATTERN, *look != ~PATTERN)
+ break;
+ *look = save;
+ }
+#else
+ look = (unsigned int *)0x03FFF000; /* temp hack to fake 32Meg on MVME188 */
+#endif
+ physmem = btoc(trunc_page((unsigned)look)); /* in pages */
+ return (trunc_page((unsigned)look));
}
int
getcpuspeed(void)
{
- struct bugbrdid brdid;
- int speed = 0;
- int i, c;
- bugbrdid(&brdid);
- for (i=0; i<4; i++){
- c=(unsigned char)brdid.speed[i];
- c-= '0';
- speed *=10;
- speed +=c;
- }
- speed = speed / 100;
- return(speed);
+ struct bugbrdid brdid;
+ int speed = 0;
+ int i, c;
+ bugbrdid(&brdid);
+ for (i=0; i<4; i++) {
+ c=(unsigned char)brdid.speed[i];
+ c-= '0';
+ speed *=10;
+ speed +=c;
+ }
+ speed = speed / 100;
+ return (speed);
}
int
getscsiid(void)
{
- struct bugbrdid brdid;
- int scsiid = 0;
- int i, c;
- bugbrdid(&brdid);
- for (i=0; i<2; i++){
- c=(unsigned char)brdid.scsiid[i];
- c-= '0';
- scsiid *=10;
- scsiid +=c;
- }
- return(7);
+ struct bugbrdid brdid;
+ int scsiid = 0;
+ int i, c;
+ bugbrdid(&brdid);
+ for (i=0; i<2; i++) {
+ c=(unsigned char)brdid.scsiid[i];
+ scsiid *=10;
+ c-= '0';
+ scsiid +=c;
+ }
+ printf("SCSI ID = %d\n", scsiid);
+ return (7); /* hack! */
}
void
identifycpu()
{
- cpuspeed = getcpuspeed();
- sprintf(cpu_model, "Motorola MVME%x %dMhz", cputyp, cpuspeed);
- printf("\nModel: %s\n", cpu_model);
+ cpuspeed = getcpuspeed();
+ sprintf(cpu_model, "Motorola MVME%x %dMhz", cputyp, cpuspeed);
+ printf("\nModel: %s\n", cpu_model);
}
-/* The following two functions assume UPAGES == 3 */
+/* The following two functions assume UPAGES == 4 */
#if UPAGES != 4
-#error "UPAGES changed?"
+ #error "UPAGES changed?"
#endif
#if USPACE != (UPAGES * NBPG)
-#error "USPACE changed?"
+ #error "USPACE changed?"
#endif
/*
@@ -372,259 +393,293 @@ void
save_u_area(struct proc *p, vm_offset_t va)
{
int i;
- for (i=0; i<UPAGES; i++) {
- p->p_md.md_upte[i] = kvtopte(va + (i * NBPG))->bits;
- }
+ for (i=0; i<UPAGES; i++) {
+ p->p_md.md_upte[i] = kvtopte(va + (i * NBPG))->bits;
+ }
}
void
load_u_area(struct proc *p)
{
- pte_template_t *t;
-
- int i;
- for (i=0; i<UPAGES; i++) {
- t = kvtopte(UADDR + (i * NBPG));
- t->bits = p->p_md.md_upte[i];
- }
- for (i=0; i<UPAGES; i++) {
- cmmu_flush_tlb(1, UADDR + (i * NBPG), NBPG);
- }
+ pte_template_t *t;
+
+ int i;
+ for (i=0; i<UPAGES; i++) {
+ t = kvtopte(UADDR + (i * NBPG));
+ t->bits = p->p_md.md_upte[i];
+ }
+ for (i=0; i<UPAGES; i++) {
+ cmmu_flush_tlb(1, UADDR + (i * NBPG), NBPG);
+ }
+}
+
+/*
+ * Set up real-time clocks.
+ * These function pointers are set in dev/clock.c and dev/sclock.c
+ */
+void
+cpu_initclocks(void)
+{
+#ifdef DEBUG
+ printf("cpu_initclocks(): ");
+#endif
+ if (mdfp.clock_init_func != NULL){
+#ifdef DEBUG
+ printf("[interval clock] ");
+#endif
+ (*mdfp.clock_init_func)();
+ }
+ if (mdfp.statclock_init_func != NULL){
+#ifdef DEBUG
+ printf("[statistics clock]");
+#endif
+ (*mdfp.statclock_init_func)();
+ }
+#ifdef DEBUG
+ printf("\n");
+#endif
+}
+
+void
+setstatclockrate(int newhz)
+{
+ /* function stub */
}
+
void
cpu_startup()
{
- caddr_t v;
- int sz, i;
- vm_size_t size;
- int base, residual;
- vm_offset_t minaddr, maxaddr, uarea_pages;
- extern vm_offset_t miniroot;
- /*
- * Initialize error message buffer (at end of core).
- * avail_end was pre-decremented in m1x7_bootstrap().
- */
-
- for (i = 0; i < btoc(sizeof(struct msgbuf)); i++)
- pmap_enter(kernel_pmap, (vm_offset_t)msgbufp,
+ caddr_t v;
+ int sz, i;
+ vm_size_t size;
+ int base, residual;
+ vm_offset_t minaddr, maxaddr, uarea_pages;
+ extern vm_offset_t miniroot;
+ /*
+ * Initialize error message buffer (at end of core).
+ * avail_end was pre-decremented in mvme_bootstrap().
+ */
+
+ for (i = 0; i < btoc(sizeof(struct msgbuf)); i++)
+ pmap_enter(kernel_pmap, (vm_offset_t)msgbufp,
avail_end + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
VM_PROT_READ|VM_PROT_WRITE, TRUE);
- msgbufmapped = 1;
-
- printf(version);
- identifycpu();
- printf("real mem = %d\n", ctob(physmem));
-
- /*
- * Find out how much space we need, allocate it,
- * and then give everything true virtual addresses.
- */
- sz = (int)allocsys((caddr_t)0);
- if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
- panic("startup: no room for tables");
- if (allocsys(v) - v != sz)
- panic("startup: table size inconsistency");
-
- /*
- * Grab UADDR virtual address
- */
-
- uarea_pages = UADDR;
-
- vm_map_find(kernel_map, vm_object_allocate(USPACE), 0,
- (vm_offset_t *)&uarea_pages, USPACE, TRUE);
-
- if (uarea_pages != UADDR) {
- printf("uarea_pages %x: UADDR not free\n", uarea_pages);
- panic("bad UADDR");
- }
-
- /*
- * Grab the BUGROM space that we hardwired in pmap_bootstrap
- */
-
- bugromva = BUGROM_START;
-
- vm_map_find(kernel_map, vm_object_allocate(BUGROM_SIZE), 0,
- (vm_offset_t *)&bugromva, BUGROM_SIZE, TRUE);
-
- if (bugromva != BUGROM_START) {
- printf("bugromva %x: BUGROM not free\n", bugromva);
- panic("bad bugromva");
- }
-
- /*
- * Grab the SRAM space that we hardwired in pmap_bootstrap
- */
-
- sramva = SRAM_START;
-
- vm_map_find(kernel_map, vm_object_allocate(SRAM_SIZE), 0,
- (vm_offset_t *)&sramva, SRAM_SIZE, TRUE);
-
- if (sramva != SRAM_START) {
- printf("sramva %x: SRAM not free\n", sramva);
- panic("bad sramva");
- }
-
- /*
- * Grab the OBIO space that we hardwired in pmap_bootstrap
- */
-
- obiova = OBIO_START;
-
- vm_map_find(kernel_map, vm_object_allocate(OBIO_SIZE), 0,
- (vm_offset_t *)&obiova, OBIO_SIZE, TRUE);
-
- if (obiova != OBIO_START) {
- printf("obiova %x: OBIO not free\n", obiova);
- panic("bad OBIO");
- }
-
-#if 0
+ msgbufmapped = 1;
+
+ printf("real mem = %d\n", ctob(physmem));
+
/*
- * Grab the EXTIO space that we hardwired in pmap_bootstrap
- */
+ * Find out how much space we need, allocate it,
+ * and then give everything true virtual addresses.
+ */
+ sz = (int)allocsys((caddr_t)0);
+ if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
+ panic("startup: no room for tables");
+ if (allocsys(v) - v != sz)
+ panic("startup: table size inconsistency");
- extiova = IOMAP_MAP_START;
+ /*
+ * Grab UADDR virtual address
+ */
+
+ uarea_pages = UADDR;
+
+ vm_map_find(kernel_map, vm_object_allocate(USPACE), 0,
+ (vm_offset_t *)&uarea_pages, USPACE, TRUE);
+
+ if (uarea_pages != UADDR) {
+ printf("uarea_pages %x: UADDR not free\n", uarea_pages);
+ panic("bad UADDR");
+ }
+
+ if (cputyp != CPU_188) { /* != CPU_188 */
+
+ /*
+ * Grab the BUGROM space that we hardwired in pmap_bootstrap
+ */
+ bugromva = BUGROM_START;
+
+ vm_map_find(kernel_map, vm_object_allocate(BUGROM_SIZE), 0,
+ (vm_offset_t *)&bugromva, BUGROM_SIZE, TRUE);
+
+ if (bugromva != BUGROM_START) {
+ printf("bugromva %x: BUGROM not free\n", bugromva);
+ panic("bad bugromva");
+ }
+
+ /*
+ * Grab the SRAM space that we hardwired in pmap_bootstrap
+ */
+ sramva = SRAM_START;
+
+ vm_map_find(kernel_map, vm_object_allocate(SRAM_SIZE), 0,
+ (vm_offset_t *)&sramva, SRAM_SIZE, TRUE);
+
+ if (sramva != SRAM_START) {
+ printf("sramva %x: SRAM not free\n", sramva);
+ panic("bad sramva");
+ }
+
+ /*
+ * Grab the OBIO space that we hardwired in pmap_bootstrap
+ */
+ obiova = OBIO_START;
+
+ vm_map_find(kernel_map, vm_object_allocate(OBIO_SIZE), 0,
+ (vm_offset_t *)&obiova, OBIO_SIZE, TRUE);
+
+ if (obiova != OBIO_START) {
+ printf("obiova %x: OBIO not free\n", obiova);
+ panic("bad OBIO");
+ }
+ } else { /* cputyp == CPU_188 */
+ /*
+ * Grab the UTIL space that we hardwired in pmap_bootstrap
+ */
+ utilva = MVME188_UTILITY;
+
+ vm_map_find(kernel_map, vm_object_allocate(MVME188_UTILITY_SIZE), 0,
+ (vm_offset_t *)&utilva, MVME188_UTILITY_SIZE, TRUE);
+
+ if (utilva != MVME188_UTILITY) {
+ printf("utilva %x: UTILITY area not free\n", utilva);
+ panic("bad utilva");
+ }
+ }
- vm_map_find(kernel_map, vm_object_allocate(IOMAP_SIZE), 0,
- (vm_offset_t *)&extiova, IOMAP_SIZE, TRUE);
+ /*
+ * Now allocate buffers proper. They are different than the above
+ * in that they usually occupy more virtual memory than physical.
+ */
+
+ size = MAXBSIZE * nbuf;
+ buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
+ &maxaddr, size, TRUE);
+ minaddr = (vm_offset_t)buffers;
+ if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
+ (vm_offset_t *)&minaddr, size, FALSE) != KERN_SUCCESS) {
+ panic("startup: cannot allocate buffers");
+ }
+ if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
+ /* don't want to alloc more physical mem than needed */
+ bufpages = btoc(MAXBSIZE) * nbuf;
+ }
+ base = bufpages / nbuf;
+ residual = bufpages % nbuf;
+
+ for (i = 0; i < nbuf; i++) {
+ vm_size_t curbufsize;
+ vm_offset_t curbuf;
+
+ /*
+ * First <residual> buffers get (base+1) physical pages
+ * allocated for them. The rest get (base) physical pages.
+ *
+ * The rest of each buffer occupies virtual space,
+ * but has no physical memory allocated for it.
+ */
+ curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
+ curbufsize = CLBYTES * (i < residual ? base+1 : base);
+
+ /* this faults in the required physical pages */
+ vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
+
+ vm_map_simplify(buffer_map, curbuf);
+ }
- if (extiova != IOMAP_MAP_START) {
- printf("extiova %x: EXTIO not free\n", extiova);
- panic("bad EXTIO");
- }
+ /*
+ * Allocate a submap for exec arguments. This map effectively
+ * limits the number of processes exec'ing at any time.
+ */
+ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ 16*NCARGS, TRUE);
+#ifdef DEBUG
+ printf("exe_map from 0x%x to 0x%x\n", (unsigned)minaddr, (unsigned)maxaddr);
#endif
-
- /*
- * Now allocate buffers proper. They are different than the above
- * in that they usually occupy more virtual memory than physical.
- */
-
- size = MAXBSIZE * nbuf;
- buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
- &maxaddr, size, TRUE);
- minaddr = (vm_offset_t)buffers;
- if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
- (vm_offset_t *)&minaddr, size, FALSE) != KERN_SUCCESS) {
- panic("startup: cannot allocate buffers");
- }
- if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
- /* don't want to alloc more physical mem than needed */
- bufpages = btoc(MAXBSIZE) * nbuf;
- }
- base = bufpages / nbuf;
- residual = bufpages % nbuf;
-
- for (i = 0; i < nbuf; i++) {
- vm_size_t curbufsize;
- vm_offset_t curbuf;
-
- /*
- * First <residual> buffers get (base+1) physical pages
- * allocated for them. The rest get (base) physical pages.
- *
- * The rest of each buffer occupies virtual space,
- * but has no physical memory allocated for it.
- */
- curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
- curbufsize = CLBYTES * (i < residual ? base+1 : base);
-
- /* this faults in the required physical pages */
- vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
-
- vm_map_simplify(buffer_map, curbuf);
- }
-
- /*
- * Allocate a submap for exec arguments. This map effectively
- * limits the number of processes exec'ing at any time.
- */
- exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- 16*NCARGS, TRUE);
-
- /*
- * Allocate map for physio.
- */
-
- phys_map = vm_map_create(kernel_pmap, PHYSIO_MAP_START,
- PHYSIO_MAP_START + PHYSIO_MAP_SIZE, TRUE);
- if (phys_map == NULL) {
- panic("cpu_startup: unable to create phys_map");
- }
+ /*
+ * Allocate map for physio.
+ */
+
+ phys_map = vm_map_create(kernel_pmap, PHYSIO_MAP_START,
+ PHYSIO_MAP_START + PHYSIO_MAP_SIZE, TRUE);
+ if (phys_map == NULL) {
+ panic("cpu_startup: unable to create phys_map");
+ }
+
+ /*
+ * Allocate map for external I/O XXX new code - smurph
+ */
+ /*
+ * IOMAP_MAP_START was used for the base address of this map, but
+ * IOMAP_MAP_START == 0xEF000000, which is larger than a signed
+ * long (int on 88k). This causes rminit() to break when DIAGNOSTIC is
+ * defined, as it checks (long)addr < 0. So as a workaround, I use
+ * 0x10000000 as a base address. XXX smurph
+ */
+
+ iomap_map = vm_map_create(kernel_pmap, (u_long)0x10000000,
+ (u_long)0x10000000 + IOMAP_SIZE, TRUE);
+ if (iomap_map == NULL) {
+ panic("cpu_startup: unable to create iomap_map");
+ }
+ iomapbase = (void *)kmem_alloc_wait(iomap_map, IOMAP_SIZE);
+ rminit(iomap, IOMAP_SIZE, (u_long)iomapbase, "iomap", NIOPMAP);
-#if 1 /*XXX_FUTURE*/
- iomap_map = vm_map_create(kernel_pmap, IOMAP_MAP_START,
- IOMAP_MAP_START + IOMAP_SIZE, TRUE);
- if (iomap_map == NULL) {
- panic("cpu_startup: unable to create iomap_map");
- }
-
- /*
- * Allocate space from iomap for a (privately managed) pool
- * of addresses for IO mappings.
- */
-
- iomapbase = kmem_alloc_wait(iomap_map, IOMAP_SIZE);
- rminit(iomap, IOMAP_SIZE, (u_long)iomapbase, "iomap", niomap);
-#endif
+ /*
+ * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
+ * we use the more space efficient malloc in place of kmem_alloc.
+ */
+ mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
+ M_MBUF, M_NOWAIT);
+ bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
+ mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
+ VM_MBUF_SIZE, FALSE);
- /*
- * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
- * we use the more space efficient malloc in place of kmem_alloc.
- */
- mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
- M_MBUF, M_NOWAIT);
- bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
- mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
- VM_MBUF_SIZE, FALSE);
-
- /*
- * Initialize callouts
- */
- callfree = callout;
- for (i = 1; i < ncallout; i++)
- callout[i-1].c_next = &callout[i];
- callout[i-1].c_next = NULL;
-
- printf("avail mem = %d\n", ptoa(cnt.v_free_count));
- printf("using %d buffers containing %d bytes of memory\n",
- nbuf, bufpages * CLBYTES);
-
-#ifdef MFS
- /*
- * Check to see if a mini-root was loaded into memory. It resides
- * at the start of the next page just after the end of BSS.
- */
- {
- extern void *smini;
-
- if (miniroot && (boothowto & RB_MINIROOT)) {
- boothowto |= RB_DFLTROOT;
- mfs_initminiroot(miniroot);
- }
- }
+ /*
+ * Initialize callouts
+ */
+ callfree = callout;
+ for (i = 1; i < ncallout; i++)
+ callout[i-1].c_next = &callout[i];
+ callout[i-1].c_next = NULL;
+
+ printf("avail mem = %d\n", ptoa(cnt.v_free_count));
+ printf("using %d buffers containing %d bytes of memory\n",
+ nbuf, bufpages * CLBYTES);
+
+#if 0 /* #ifdef MFS */
+ /*
+ * Check to see if a mini-root was loaded into memory. It resides
+ * at the start of the next page just after the end of BSS.
+ */
+ {
+ extern void *smini;
+
+ if (miniroot && (boothowto & RB_MINIROOT)) {
+ boothowto |= RB_DFLTROOT;
+ mfs_initminiroot(miniroot);
+ }
+ }
#endif
- /*
- * Set up buffers, so they can be used to read disk labels.
- */
- bufinit();
-
- /*
- * Configure the system.
- */
- nofault = NULL;
+ /*
+ * Set up buffers, so they can be used to read disk labels.
+ */
+ bufinit();
- /*
- * zero out intr_handlers
- */
- bzero((void *)intr_handlers, 256 * sizeof(struct intrhand *));
+ /*
+ * Configure the system.
+ */
+ nofault = NULL;
- configure();
+ /*
+ * zero out intr_handlers
+ */
+ bzero((void *)intr_handlers, 256 * sizeof(struct intrhand *));
+ setupiackvectors();
+ configure();
}
/*
@@ -638,130 +693,136 @@ cpu_startup()
*/
caddr_t
allocsys(v)
- register caddr_t v;
+register caddr_t v;
{
#define valloc(name, type, num) \
v = (caddr_t)(((name) = (type *)v) + (num))
#ifdef REAL_CLISTS
- valloc(cfree, struct cblock, nclist);
+ valloc(cfree, struct cblock, nclist);
#endif
- valloc(callout, struct callout, ncallout);
+ valloc(callout, struct callout, ncallout);
+#if 0
+ valloc(swapmap, struct map, nswapmap = maxproc * 2);
+#endif
#ifdef SYSVSHM
- valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
+ valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
#endif
#ifdef SYSVSEM
- valloc(sema, struct semid_ds, seminfo.semmni);
- valloc(sem, struct sem, seminfo.semmns);
- /* This is pretty disgusting! */
- valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
+ valloc(sema, struct semid_ds, seminfo.semmni);
+ valloc(sem, struct sem, seminfo.semmns);
+ /* This is pretty disgusting! */
+ valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
#endif
#ifdef SYSVMSG
- valloc(msgpool, char, msginfo.msgmax);
- valloc(msgmaps, struct msgmap, msginfo.msgseg);
- valloc(msghdrs, struct msg, msginfo.msgtql);
- valloc(msqids, struct msqid_ds, msginfo.msgmni);
+ valloc(msgpool, char, msginfo.msgmax);
+ valloc(msgmaps, struct msgmap, msginfo.msgseg);
+ valloc(msghdrs, struct msg, msginfo.msgtql);
+ valloc(msqids, struct msqid_ds, msginfo.msgmni);
#endif
- /*
- * Determine how many buffers to allocate (enough to
- * hold 5% of total physical memory, but at least 16).
- * Allocate 1/2 as many swap buffer headers as file i/o buffers.
- */
- if (bufpages == 0)
- if (physmem < btoc(2 * 1024 * 1024))
- bufpages = (physmem / 10) / CLSIZE;
- else
- bufpages = (physmem / 20) / CLSIZE;
- if (nbuf == 0) {
- nbuf = bufpages;
- if (nbuf < 16)
- nbuf = 16;
- }
- if (nswbuf == 0) {
- nswbuf = (nbuf / 2) &~ 1; /* force even */
- if (nswbuf > 256)
- nswbuf = 256; /* sanity */
- }
- valloc(swbuf, struct buf, nswbuf);
- valloc(buf, struct buf, nbuf);
+ /*
+ * Determine how many buffers to allocate (enough to
+ * hold 5% of total physical memory, but at least 16).
+ * Allocate 1/2 as many swap buffer headers as file i/o buffers.
+ */
+ if (bufpages == 0)
+ if (physmem < btoc(2 * 1024 * 1024))
+ bufpages = (physmem / 10) / CLSIZE;
+ else
+ bufpages = (physmem / 20) / CLSIZE;
+ if (nbuf == 0) {
+ nbuf = bufpages;
+ if (nbuf < 16)
+ nbuf = 16;
+ }
+ if (nswbuf == 0) {
+ nswbuf = (nbuf / 2) &~ 1; /* force even */
+ if (nswbuf > 256)
+ nswbuf = 256; /* sanity */
+ }
+ valloc(swbuf, struct buf, nswbuf);
+ valloc(buf, struct buf, nbuf);
#if 1 /*XXX_FUTURE*/
- /*
- * Arbitrarily limit the number of devices mapping
- * the IO space at a given time to NIOPMAP (= 32, default).
- */
- valloc(iomap, struct map, niomap = NIOPMAP);
+ /*
+ * Arbitrarily limit the number of devices mapping
+ * the IO space at a given time to NIOPMAP (= 32, default).
+ */
+ valloc(iomap, struct map, niomap = NIOPMAP);
#endif
- return v;
+ return v;
}
/*
* Set registers on exec.
* Clear all except sp and pc.
*/
+
+/* MVME197 TODO list :-) smurph */
+
void
setregs(p, pack, stack, retval)
- struct proc *p;
- struct exec_package *pack;
- u_long stack;
- int retval[2];
+struct proc *p;
+struct exec_package *pack;
+u_long stack;
+int retval[2];
{
- register struct trapframe *tf = USER_REGS(p);
+ register struct trapframe *tf = USER_REGS(p);
/* printf("stack at %x\n", stack);
- printf("%x - %x\n", USRSTACK - MAXSSIZ, USRSTACK);
+ printf("%x - %x\n", USRSTACK - MAXSSIZ, USRSTACK);
*/
- /*
- * The syscall will ``return'' to snip; set it.
- * argc, argv, envp are placed on the stack by copyregs.
- * Point r2 to the stack. crt0 should extract envp from
- * argc & argv before calling user's main.
- */
+ /*
+ * The syscall will ``return'' to snip; set it.
+ * argc, argv, envp are placed on the stack by copyregs.
+ * Point r2 to the stack. crt0 should extract envp from
+ * argc & argv before calling user's main.
+ */
#if 0
- /*
- * I don't think I need to mess with fpstate on 88k because
- * we make sure the floating point pipeline is drained in
- * the trap handlers. Should check on this later. XXX Nivas.
- */
-
- if ((fs = p->p_md.md_fpstate) != NULL) {
- /*
- * We hold an FPU state. If we own *the* FPU chip state
- * we must get rid of it, and the only way to do that is
- * to save it. In any case, get rid of our FPU state.
- */
- if (p == fpproc) {
- savefpstate(fs);
- fpproc = NULL;
- }
- free((void *)fs, M_SUBPROC);
- p->p_md.md_fpstate = NULL;
- }
+ /*
+ * I don't think I need to mess with fpstate on 88k because
+ * we make sure the floating point pipeline is drained in
+ * the trap handlers. Should check on this later. XXX Nivas.
+ */
+
+ if ((fs = p->p_md.md_fpstate) != NULL) {
+ /*
+ * We hold an FPU state. If we own *the* FPU chip state
+ * we must get rid of it, and the only way to do that is
+ * to save it. In any case, get rid of our FPU state.
+ */
+ if (p == fpproc) {
+ savefpstate(fs);
+ fpproc = NULL;
+ }
+ free((void *)fs, M_SUBPROC);
+ p->p_md.md_fpstate = NULL;
+ }
#endif /* 0 */
- bzero((caddr_t)tf, sizeof *tf);
- tf->epsr = 0x3f0; /* user mode, interrupts enabled, fp enabled */
+ bzero((caddr_t)tf, sizeof *tf);
+ tf->epsr = 0x3f0; /* user mode, interrupts enabled, fp enabled */
/* tf->epsr = 0x3f4;*/ /* user mode, interrupts enabled, fp enabled, MXM Mask */
- /*
- * We want to start executing at pack->ep_entry. The way to
- * do this is force the processor to fetch from ep_entry. Set
- * NIP to something bogus and invalid so that it will be a NOOP.
- * And set sfip to ep_entry with valid bit on so that it will be
- * fetched.
- */
-
- tf->snip = pack->ep_entry & ~3;
- tf->sfip = (pack->ep_entry & ~3) | FIP_V;
- tf->r[2] = stack;
- tf->r[31] = stack;
- retval[1] = 0;
+ /*
+ * We want to start executing at pack->ep_entry. The way to
+ * do this is force the processor to fetch from ep_entry. Set
+ * NIP to something bogus and invalid so that it will be a NOOP.
+ * And set sfip to ep_entry with valid bit on so that it will be
+ * fetched.
+ */
+
+ tf->snip = pack->ep_entry & ~3;
+ tf->sfip = (pack->ep_entry & ~3) | FIP_V;
+ tf->r[2] = stack;
+ tf->r[31] = stack;
+ retval[1] = 0;
}
struct sigstate {
- int ss_flags; /* which of the following are valid */
- struct trapframe ss_frame; /* original exception frame */
+ int ss_flags; /* which of the following are valid */
+ struct trapframe ss_frame; /* original exception frame */
};
/*
@@ -769,143 +830,143 @@ struct sigstate {
* thru sf_handler so... don't screw with them!
*/
struct sigframe {
- int sf_signo; /* signo for handler */
- siginfo_t *sf_sip;
- struct sigcontext *sf_scp; /* context ptr for handler */
- sig_t sf_handler; /* handler addr for u_sigc */
- struct sigcontext sf_sc; /* actual context */
- siginfo_t sf_si;
+ int sf_signo; /* signo for handler */
+ siginfo_t *sf_sip;
+ struct sigcontext *sf_scp; /* context ptr for handler */
+ sig_t sf_handler; /* handler addr for u_sigc */
+ struct sigcontext sf_sc; /* actual context */
+ siginfo_t sf_si;
};
#ifdef DEBUG
int sigdebug = 0;
int sigpid = 0;
-#define SDB_FOLLOW 0x01
-#define SDB_KSTACK 0x02
-#define SDB_FPSTATE 0x04
+ #define SDB_FOLLOW 0x01
+ #define SDB_KSTACK 0x02
+ #define SDB_FPSTATE 0x04
#endif
/*
* Send an interrupt to process.
*/
+/* MVME197 TODO list :-) smurph */
void
sendsig(catcher, sig, mask, code, type, val)
- sig_t catcher;
- int sig, mask;
- unsigned long code;
- int type;
- union sigval val;
+sig_t catcher;
+int sig, mask;
+unsigned long code;
+int type;
+union sigval val;
{
- register struct proc *p = curproc;
- register struct trapframe *tf;
- register struct sigacts *psp = p->p_sigacts;
- struct sigframe *fp;
- int oonstack, fsize;
- struct sigframe sf;
- int addr;
- extern char sigcode[], esigcode[];
+ register struct proc *p = curproc;
+ register struct trapframe *tf;
+ register struct sigacts *psp = p->p_sigacts;
+ struct sigframe *fp;
+ int oonstack, fsize;
+ struct sigframe sf;
+ int addr;
+ extern char sigcode[], esigcode[];
#define szsigcode (esigcode - sigcode)
- tf = p->p_md.md_tf;
- oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
- /*
- * Allocate and validate space for the signal handler
- * context. Note that if the stack is in data space, the
- * call to grow() is a nop, and the copyout()
- * will fail if the process has not already allocated
- * the space with a `brk'.
- */
- fsize = sizeof(struct sigframe);
- if ((psp->ps_flags & SAS_ALTSTACK) &&
- (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
- (psp->ps_sigonstack & sigmask(sig))) {
- fp = (struct sigframe *)(psp->ps_sigstk.ss_sp +
- psp->ps_sigstk.ss_size - fsize);
- psp->ps_sigstk.ss_flags |= SA_ONSTACK;
- } else
- fp = (struct sigframe *)(tf->r[31] - fsize);
- if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
- (void)grow(p, (unsigned)fp);
+ tf = p->p_md.md_tf;
+ oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
+ /*
+ * Allocate and validate space for the signal handler
+ * context. Note that if the stack is in data space, the
+ * call to grow() is a nop, and the copyout()
+ * will fail if the process has not already allocated
+ * the space with a `brk'.
+ */
+ fsize = sizeof(struct sigframe);
+ if ((psp->ps_flags & SAS_ALTSTACK) &&
+ (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
+ (psp->ps_sigonstack & sigmask(sig))) {
+ fp = (struct sigframe *)(psp->ps_sigstk.ss_sp +
+ psp->ps_sigstk.ss_size - fsize);
+ psp->ps_sigstk.ss_flags |= SA_ONSTACK;
+ } else
+ fp = (struct sigframe *)(tf->r[31] - fsize);
+ if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
+ (void)grow(p, (unsigned)fp);
#ifdef DEBUG
- if ((sigdebug & SDB_FOLLOW) ||
- (sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
- printf("sendsig(%d): sig %d ssp %x usp %x scp %x\n",
- p->p_pid, sig, &oonstack, fp, &fp->sf_sc);
+ if ((sigdebug & SDB_FOLLOW) ||
+ (sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
+ printf("sendsig(%d): sig %d ssp %x usp %x scp %x\n",
+ p->p_pid, sig, &oonstack, fp, &fp->sf_sc);
#endif
- /*
- * Build the signal context to be used by sigreturn.
- */
- sf.sf_signo = sig;
- sf.sf_scp = &fp->sf_sc;
- sf.sf_handler = catcher;
- sf.sf_sc.sc_onstack = oonstack;
- sf.sf_sc.sc_mask = mask;
-
- if (psp->ps_siginfo & sigmask(sig)) {
- sf.sf_sip = &fp->sf_si;
- initsiginfo(&sf.sf_si, sig, code, type, val);
- }
-
-
- /*
- * Copy the whole user context into signal context that we
- * are building.
- */
- bcopy((caddr_t)tf->r, (caddr_t)sf.sf_sc.sc_regs,
- sizeof(sf.sf_sc.sc_regs));
- sf.sf_sc.sc_xip = tf->sxip & ~3;
- sf.sf_sc.sc_nip = tf->snip & ~3;
- sf.sf_sc.sc_fip = tf->sfip & ~3;
- sf.sf_sc.sc_ps = tf->epsr;
- sf.sf_sc.sc_sp = tf->r[31];
- sf.sf_sc.sc_fpsr = tf->fpsr;
- sf.sf_sc.sc_fpcr = tf->fpcr;
- sf.sf_sc.sc_ssbr = tf->ssbr;
- sf.sf_sc.sc_dmt0 = tf->dmt0;
- sf.sf_sc.sc_dmd0 = tf->dmd0;
- sf.sf_sc.sc_dma0 = tf->dma0;
- sf.sf_sc.sc_dmt1 = tf->dmt1;
- sf.sf_sc.sc_dmd1 = tf->dmd1;
- sf.sf_sc.sc_dma1 = tf->dma1;
- sf.sf_sc.sc_dmt2 = tf->dmt2;
- sf.sf_sc.sc_dmd2 = tf->dmd2;
- sf.sf_sc.sc_dma2 = tf->dma2;
- sf.sf_sc.sc_fpecr = tf->fpecr;
- sf.sf_sc.sc_fphs1 = tf->fphs1;
- sf.sf_sc.sc_fpls1 = tf->fpls1;
- sf.sf_sc.sc_fphs2 = tf->fphs2;
- sf.sf_sc.sc_fpls2 = tf->fpls2;
- sf.sf_sc.sc_fppt = tf->fppt;
- sf.sf_sc.sc_fprh = tf->fprh;
- sf.sf_sc.sc_fprl = tf->fprl;
- sf.sf_sc.sc_fpit = tf->fpit;
- if (copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf)) {
- /*
- * Process has trashed its stack; give it an illegal
- * instruction to halt it in its tracks.
- */
- SIGACTION(p, SIGILL) = SIG_DFL;
- sig = sigmask(SIGILL);
- p->p_sigignore &= ~sig;
- p->p_sigcatch &= ~sig;
- p->p_sigmask &= ~sig;
- psignal(p, SIGILL);
- return;
- }
- /*
- * Build the argument list for the signal handler.
- * Signal trampoline code is at base of user stack.
- */
- addr = (int)PS_STRINGS - szsigcode;
- tf->snip = (addr & ~3) | NIP_V;
- tf->sfip = (tf->snip + 4) | FIP_V;
- tf->r[31] = (unsigned)fp;
+ /*
+ * Build the signal context to be used by sigreturn.
+ */
+ sf.sf_signo = sig;
+ sf.sf_scp = &fp->sf_sc;
+ sf.sf_handler = catcher;
+ sf.sf_sc.sc_onstack = oonstack;
+ sf.sf_sc.sc_mask = mask;
+
+ if (psp->ps_siginfo & sigmask(sig)) {
+ sf.sf_sip = &fp->sf_si;
+ initsiginfo(&sf.sf_si, sig, code, type, val);
+ }
+
+
+ /*
+ * Copy the whole user context into signal context that we
+ * are building.
+ */
+ bcopy((caddr_t)tf->r, (caddr_t)sf.sf_sc.sc_regs,
+ sizeof(sf.sf_sc.sc_regs));
+ sf.sf_sc.sc_xip = tf->sxip & ~3;
+ sf.sf_sc.sc_nip = tf->snip & ~3;
+ sf.sf_sc.sc_fip = tf->sfip & ~3;
+ sf.sf_sc.sc_ps = tf->epsr;
+ sf.sf_sc.sc_sp = tf->r[31];
+ sf.sf_sc.sc_fpsr = tf->fpsr;
+ sf.sf_sc.sc_fpcr = tf->fpcr;
+ sf.sf_sc.sc_ssbr = tf->ssbr;
+ sf.sf_sc.sc_dmt0 = tf->dmt0;
+ sf.sf_sc.sc_dmd0 = tf->dmd0;
+ sf.sf_sc.sc_dma0 = tf->dma0;
+ sf.sf_sc.sc_dmt1 = tf->dmt1;
+ sf.sf_sc.sc_dmd1 = tf->dmd1;
+ sf.sf_sc.sc_dma1 = tf->dma1;
+ sf.sf_sc.sc_dmt2 = tf->dmt2;
+ sf.sf_sc.sc_dmd2 = tf->dmd2;
+ sf.sf_sc.sc_dma2 = tf->dma2;
+ sf.sf_sc.sc_fpecr = tf->fpecr;
+ sf.sf_sc.sc_fphs1 = tf->fphs1;
+ sf.sf_sc.sc_fpls1 = tf->fpls1;
+ sf.sf_sc.sc_fphs2 = tf->fphs2;
+ sf.sf_sc.sc_fpls2 = tf->fpls2;
+ sf.sf_sc.sc_fppt = tf->fppt;
+ sf.sf_sc.sc_fprh = tf->fprh;
+ sf.sf_sc.sc_fprl = tf->fprl;
+ sf.sf_sc.sc_fpit = tf->fpit;
+ if (copyout((caddr_t)&sf, (caddr_t)fp, sizeof sf)) {
+ /*
+ * Process has trashed its stack; give it an illegal
+ * instruction to halt it in its tracks.
+ */
+ SIGACTION(p, SIGILL) = SIG_DFL;
+ sig = sigmask(SIGILL);
+ p->p_sigignore &= ~sig;
+ p->p_sigcatch &= ~sig;
+ p->p_sigmask &= ~sig;
+ psignal(p, SIGILL);
+ return;
+ }
+ /*
+ * Build the argument list for the signal handler.
+ * Signal trampoline code is at base of user stack.
+ */
+ addr = (int)PS_STRINGS - szsigcode;
+ tf->snip = (addr & ~3) | NIP_V;
+ tf->sfip = (tf->snip + 4) | FIP_V;
+ tf->r[31] = (unsigned)fp;
#ifdef DEBUG
- if ((sigdebug & SDB_FOLLOW) ||
- (sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
- printf("sendsig(%d): sig %d returns\n",
- p->p_pid, sig);
+ if ((sigdebug & SDB_FOLLOW) ||
+ (sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
+ printf("sendsig(%d): sig %d returns\n", p->p_pid, sig);
#endif
}
@@ -920,158 +981,176 @@ sendsig(catcher, sig, mask, code, type, val)
* a machine fault.
*/
/* ARGSUSED */
+
+/* MVME197 TODO list :-) smurph */
+
sys_sigreturn(p, v, retval)
- struct proc *p;
- void *v;
- register_t *retval;
+struct proc *p;
+void *v;
+register_t *retval;
{
- struct sys_sigreturn_args /* {
- syscallarg(struct sigcontext *) sigcntxp;
- } */ *uap = v;
- register struct sigcontext *scp;
- register struct trapframe *tf;
- struct sigcontext ksc;
- int error;
-
- scp = (struct sigcontext *)SCARG(uap, sigcntxp);
+ struct sys_sigreturn_args /* {
+ syscallarg(struct sigcontext *) sigcntxp;
+ } */ *uap = v;
+ register struct sigcontext *scp;
+ register struct trapframe *tf;
+ struct sigcontext ksc;
+ int error;
+
+ scp = (struct sigcontext *)SCARG(uap, sigcntxp);
#ifdef DEBUG
- if (sigdebug & SDB_FOLLOW)
- printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
+ if (sigdebug & SDB_FOLLOW)
+ printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
#endif
- if ((int)scp & 3 || useracc((caddr_t)scp, sizeof *scp, B_WRITE) == 0 ||
- copyin((caddr_t)scp, (caddr_t)&ksc, sizeof(struct sigcontext)))
- return (EINVAL);
-
- tf = p->p_md.md_tf;
- scp = &ksc;
- /*
- * xip, nip and fip must be multiples of 4. This is all
- * that is required; if it holds, just do it.
- */
+ if ((int)scp & 3 || useracc((caddr_t)scp, sizeof *scp, B_WRITE) == 0 ||
+ copyin((caddr_t)scp, (caddr_t)&ksc, sizeof(struct sigcontext)))
+ return (EINVAL);
+
+ tf = p->p_md.md_tf;
+ scp = &ksc;
+ /*
+ * xip, nip and fip must be multiples of 4. This is all
+ * that is required; if it holds, just do it.
+ */
#if 0
- if (((scp->sc_xip | scp->sc_nip | scp->sc_fip) & 3) != 0)
- return (EINVAL);
+ if (((scp->sc_xip | scp->sc_nip | scp->sc_fip) & 3) != 0)
+ return (EINVAL);
#endif /* 0 */
- if (((scp->sc_xip | scp->sc_nip | scp->sc_fip) & 3) != 0)
- printf("xip %x nip %x fip %x\n",
- scp->sc_xip, scp->sc_nip, scp->sc_fip);
-
-
- /*
- * this can be improved by doing
- * bcopy(sc_reg to tf, sizeof sigcontext - 2 words)
- * XXX nivas
- */
-
- bcopy((caddr_t)scp->sc_regs, (caddr_t)tf->r,
- sizeof(scp->sc_regs));
- tf->sxip = (scp->sc_xip) | XIP_V;
- tf->snip = (scp->sc_nip) | NIP_V;
- tf->sfip = (scp->sc_fip) | FIP_V;
- tf->epsr = scp->sc_ps;
- tf->r[31] = scp->sc_sp;
- tf->fpsr = scp->sc_fpsr;
- tf->fpcr = scp->sc_fpcr;
- tf->ssbr = scp->sc_ssbr;
- tf->dmt0 = scp->sc_dmt0;
- tf->dmd0 = scp->sc_dmd0;
- tf->dma0 = scp->sc_dma0;
- tf->dmt1 = scp->sc_dmt1;
- tf->dmd1 = scp->sc_dmd1;
- tf->dma1 = scp->sc_dma1;
- tf->dmt2 = scp->sc_dmt2;
- tf->dmd2 = scp->sc_dmd2;
- tf->dma2 = scp->sc_dma2;
- tf->fpecr = scp->sc_fpecr;
- tf->fphs1 = scp->sc_fphs1;
- tf->fpls1 = scp->sc_fpls1;
- tf->fphs2 = scp->sc_fphs2;
- tf->fpls2 = scp->sc_fpls2;
- tf->fppt = scp->sc_fppt;
- tf->fprh = scp->sc_fprh;
- tf->fprl = scp->sc_fprl;
- tf->fpit = scp->sc_fpit;
-
- tf->epsr = scp->sc_ps;
- /*
- * Restore the user supplied information
- */
- if (scp->sc_onstack & 01)
- p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
- else
- p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
- p->p_sigmask = scp->sc_mask & ~sigcantmask;
- return (EJUSTRETURN);
+ if (((scp->sc_xip | scp->sc_nip | scp->sc_fip) & 3) != 0)
+ printf("xip %x nip %x fip %x\n",
+ scp->sc_xip, scp->sc_nip, scp->sc_fip);
+
+ /*
+ * this can be improved by doing
+ * bcopy(sc_reg to tf, sizeof sigcontext - 2 words)
+ * XXX nivas
+ */
+
+ bcopy((caddr_t)scp->sc_regs, (caddr_t)tf->r, sizeof(scp->sc_regs));
+ tf->sxip = (scp->sc_xip) | XIP_V;
+ tf->snip = (scp->sc_nip) | NIP_V;
+ tf->sfip = (scp->sc_fip) | FIP_V;
+ tf->epsr = scp->sc_ps;
+ tf->r[31] = scp->sc_sp;
+ tf->fpsr = scp->sc_fpsr;
+ tf->fpcr = scp->sc_fpcr;
+ tf->ssbr = scp->sc_ssbr;
+ tf->dmt0 = scp->sc_dmt0;
+ tf->dmd0 = scp->sc_dmd0;
+ tf->dma0 = scp->sc_dma0;
+ tf->dmt1 = scp->sc_dmt1;
+ tf->dmd1 = scp->sc_dmd1;
+ tf->dma1 = scp->sc_dma1;
+ tf->dmt2 = scp->sc_dmt2;
+ tf->dmd2 = scp->sc_dmd2;
+ tf->dma2 = scp->sc_dma2;
+ tf->fpecr = scp->sc_fpecr;
+ tf->fphs1 = scp->sc_fphs1;
+ tf->fpls1 = scp->sc_fpls1;
+ tf->fphs2 = scp->sc_fphs2;
+ tf->fpls2 = scp->sc_fpls2;
+ tf->fppt = scp->sc_fppt;
+ tf->fprh = scp->sc_fprh;
+ tf->fprl = scp->sc_fprl;
+ tf->fpit = scp->sc_fpit;
+
+ tf->epsr = scp->sc_ps;
+ /*
+ * Restore the user supplied information
+ */
+ if (scp->sc_onstack & 01)
+ p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
+ else
+ p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
+ p->p_sigmask = scp->sc_mask & ~sigcantmask;
+ return (EJUSTRETURN);
}
_doboot()
{
- cmmu_shutdown_now();
- bugreturn();
+ cmmu_shutdown_now();
+ bugreturn();
}
void
boot(howto)
- register int howto;
+register int howto;
+{
+ /* take a snap shot before clobbering any registers */
+ if (curproc)
+ savectx(curproc->p_addr, 0);
+
+ boothowto = howto;
+ if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
+ extern struct proc proc0;
+
+ /* protect against curproc->p_stats.foo refs in sync() XXX */
+ if (curproc == NULL)
+ curproc = &proc0;
+
+ waittime = 0;
+ vfs_shutdown();
+
+ /*
+ * If we've been adjusting the clock, the todr
+ * will be out of synch; adjust it now.
+ */
+ resettodr();
+ }
+ splhigh(); /* extreme priority */
+ if (howto & RB_HALT) {
+ printf("halted\n\n");
+ bugreturn();
+ } else {
+ if (howto & RB_DUMP)
+ dumpsys();
+ doboot();
+ /*NOTREACHED*/
+ }
+ /*NOTREACHED*/
+ while (1); /* to keep compiler happy, and me from going crazy */
+}
+
+#ifdef MVME188
+void
+m188_reset(void)
{
- /* take a snap shot before clobbering any registers */
- if (curproc)
- savectx(curproc->p_addr, 0);
-
- boothowto = howto;
- if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
-
- extern struct proc proc0;
-
- /* protect against curproc->p_stats.foo refs in sync() XXX */
- if (curproc == NULL)
- curproc = &proc0;
-
- waittime = 0;
- vfs_shutdown();
-
- /*
- * If we've been adjusting the clock, the todr
- * will be out of synch; adjust it now.
- */
- resettodr();
- }
- splhigh(); /* extreme priority */
- if (howto & RB_HALT) {
- printf("halted\n\n");
- bugreturn();
- } else {
- if (howto & RB_DUMP)
- dumpsys();
- doboot();
- /*NOTREACHED*/
- }
- /*NOTREACHED*/
- while (1); /* to keep compiler happy, and me from going crazy */
+ volatile int cnt;
+
+ *sys_syscon->ien0 = 0;
+ *sys_syscon->ien1 = 0;
+ *sys_syscon->ien2 = 0;
+ *sys_syscon->ien3 = 0;
+ *sys_syscon->glbres = 1; /* system reset */
+ *sys_syscon->ucsr |= 0x2000; /* clear SYSFAIL* */
+ for (cnt = 0; cnt < 5*1024*1024; cnt++)
+ ;
+ *sys_syscon->ucsr |= 0x2000; /* clear SYSFAIL* */
}
+#endif /* MVME188 */
-unsigned dumpmag = 0x8fca0101; /* magic number for savecore */
-int dumpsize = 0; /* also for savecore */
-long dumplo = 0;
+unsigned dumpmag = 0x8fca0101; /* magic number for savecore */
+int dumpsize = 0; /* also for savecore */
+long dumplo = 0;
dumpconf()
{
- int nblks;
-
- dumpsize = physmem;
- if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
- nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
- if (dumpsize > btoc(dbtob(nblks - dumplo)))
- dumpsize = btoc(dbtob(nblks - dumplo));
- else if (dumplo == 0)
- dumplo = nblks - btodb(ctob(physmem));
- }
- /*
- * Don't dump on the first CLBYTES (why CLBYTES?)
- * in case the dump device includes a disk label.
- */
- if (dumplo < btodb(CLBYTES))
- dumplo = btodb(CLBYTES);
+ int nblks;
+
+ dumpsize = physmem;
+ if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
+ nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
+ if (dumpsize > btoc(dbtob(nblks - dumplo)))
+ dumpsize = btoc(dbtob(nblks - dumplo));
+ else if (dumplo == 0)
+ dumplo = nblks - btodb(ctob(physmem));
+ }
+ /*
+ * Don't dump on the first CLBYTES (why CLBYTES?)
+ * in case the dump device includes a disk label.
+ */
+ if (dumplo < btodb(CLBYTES))
+ dumplo = btodb(CLBYTES);
}
/*
@@ -1081,42 +1160,41 @@ dumpconf()
*/
dumpsys()
{
-
- msgbufmapped = 0;
- if (dumpdev == NODEV)
- return;
- /*
- * For dumps during autoconfiguration,
- * if dump device has already configured...
- */
- if (dumpsize == 0)
- dumpconf();
- if (dumplo < 0)
- return;
- printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
- printf("dump ");
- switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
-
- case ENXIO:
- printf("device bad\n");
- break;
-
- case EFAULT:
- printf("device not ready\n");
- break;
-
- case EINVAL:
- printf("area improper\n");
- break;
-
- case EIO:
- printf("i/o error\n");
- break;
-
- default:
- printf("succeeded\n");
- break;
- }
+ msgbufmapped = 0;
+ if (dumpdev == NODEV)
+ return;
+ /*
+ * For dumps during autoconfiguration,
+ * if dump device has already configured...
+ */
+ if (dumpsize == 0)
+ dumpconf();
+ if (dumplo < 0)
+ return;
+ printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
+ printf("dump ");
+ switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
+
+ case ENXIO:
+ printf("device bad\n");
+ break;
+
+ case EFAULT:
+ printf("device not ready\n");
+ break;
+
+ case EINVAL:
+ printf("area improper\n");
+ break;
+
+ case EIO:
+ printf("i/o error\n");
+ break;
+
+ default:
+ printf("succeeded\n");
+ break;
+ }
}
/*
@@ -1125,146 +1203,149 @@ dumpsys()
void
setupiackvectors()
{
- register u_char *vaddr;
-#ifdef XXX_FUTURE
- extern vm_offset_t iomap_mapin(vm_offset_t, vm_size_t, boolean_t);
+ register u_char *vaddr;
+#undef MAP_VEC /* Swicthing to new virtual addresses XXX smurph */
+#ifdef MAP_VEC
+ extern vm_offset_t iomap_mapin(vm_offset_t, vm_size_t, boolean_t);
#endif
- /*
- * map a page in for phys address 0xfffe0000 and set the
- * addresses for various levels.
- */
-#ifdef XXX_FUTURE
- vaddr = (u_char *)iomap_mapin(0xfffe0000, NBPG, 1);
+ /*
+ * map a page in for phys address 0xfffe0000 and set the
+ * addresses for various levels.
+ */
+ switch (cputyp) {
+ case CPU_187:
+#ifdef MAP_VEC /* do for MVME188 too */
+ vaddr = (u_char *)iomap_mapin(M187_IACK, NBPG, 1);
#else
- vaddr = (u_char *)0xfffe0000;
+ vaddr = (u_char *)M187_IACK;
#endif
-#if 0
- (unsigned char *)0xFFFE0003, /* not used, no such thing as int 0 */
- (unsigned char *)0xFFFE0007,
- (unsigned char *)0xFFFE000B,
- (unsigned char *)0xFFFE000F,
- (unsigned char *)0xFFFE0013,
- (unsigned char *)0xFFFE0017,
- (unsigned char *)0xFFFE001B,
- (unsigned char *)0xFFFE001F,
+ break;
+ case CPU_188:
+#ifdef MAP_VEC /* do for MVME188 too */
+ vaddr = (u_char *)iomap_mapin(M188_IACK, NBPG, 1);
+#else
+ vaddr = (u_char *)M188_IACK;
#endif
- ivec[0] = vaddr + 0x03;
- ivec[1] = vaddr + 0x07;
- ivec[2] = vaddr + 0x0b;
- ivec[3] = vaddr + 0x0f;
- ivec[4] = vaddr + 0x13;
- ivec[5] = vaddr + 0x17;
- ivec[6] = vaddr + 0x1b;
- ivec[7] = vaddr + 0x1f;
+ break;
+ case CPU_197:
+#ifdef MAP_VEC /* do for MVME188 too */
+ vaddr = (u_char *)iomap_mapin(M197_IACK, NBPG, 1);
+#else
+ vaddr = (u_char *)M197_IACK;
+#endif
+ break;
+ }
+#ifdef DEBUG
+ printf("interrupt ACK address mapped at 0x%x\n", vaddr);
+#endif
+ ivec[0] = vaddr + 0x03;
+ ivec[1] = vaddr + 0x07;
+ ivec[2] = vaddr + 0x0b;
+ ivec[3] = vaddr + 0x0f;
+ ivec[4] = vaddr + 0x13;
+ ivec[5] = vaddr + 0x17;
+ ivec[6] = vaddr + 0x1b;
+ ivec[7] = vaddr + 0x1f;
}
-/*
- * find a useable interrupt vector in the range start, end. It starts at
- * the end of the range, and searches backwards (to increase the chances
- * of not conflicting with more normal users)
- */
+/* gets an interrupt stack for slave processors */
+vm_offset_t
+get_slave_stack(void)
+{
+ vm_offset_t addr = 0;
+
+ addr = (vm_offset_t)kmem_alloc(kernel_map, INTSTACK_SIZE + 4096);
+
+ if (addr == NULL)
+ panic("Cannot allocate slave stack");
+
+ if (interrupt_stack[0] == 0)
+ interrupt_stack[0] = (vm_offset_t) intstack;
+ interrupt_stack[cpu_number()] = addr;
+ return addr;
+}
+
+/* dummy main routine for slave processors */
int
-intr_findvec(start, end)
- int start, end;
+slave_main(void)
{
- extern u_long *vector_list[], interrupt_handler, unknown_handler;
- int vec;
-
- if (start < 0 || end > 255 || start > end)
- return (-1);
- for (vec = end; vec > start; --vec)
- if (vector_list[vec] == &unknown_handler
-/* || vector_list[vec] == &interrupt_handler */)
- return (vec);
- return (-1);
+ printf("slave CPU%d started\n", cpu_number());
+ while (-1); /* spin forever */
+ return 0;
}
/*
- * Chain the interrupt handler in. But first check if the vector
- * offset chosen is legal. It either must be a badtrap (not allocated
- * for a `system' purpose), or it must be a hardtrap (ie. already
- * allocated to deal with chained interrupt handlers).
+ * find a useable interrupt vector in the range start, end. It starts at
+ * the end of the range, and searches backwards (to increase the chances
+ * of not conflicting with more normal users)
+ *
+ * XXX This is not used yet. It will provide a facility to 'autovector'
+ * VME boards. smurph
*/
-#if 0
-
int
-intr_establish(vec, ih)
- int vec;
- struct intrhand *ih;
+intr_findvec(start, end)
+int start, end;
{
- extern u_long *vector_list[], interrupt_handler, unknown_handler;
- struct intrhand *ihx;
-
- if (vector_list[vec] != &interrupt_handler && vector_list[vec] != &unknown_handler) {
- printf("intr_establish: vec %d unavailable\n", vec);
- return (-1);
- }
- vector_list[vec] = &interrupt_handler;
-#if DIAGNOSTIC
- printf("assigning vec %x to interrupt_handler\n", vec);
-#endif
- ih->ih_next = NULL; /* just in case */
-
- /* attach at tail */
- if (ihx = intr_handlers[vec]) {
- while (ihx->ih_next)
- ihx = ihx->ih_next;
- ihx->ih_next = ih;
- } else
- intr_handlers[vec] = ih;
- return (INTR_EST_SUCC);
+ register struct intrhand *intr;
+ int vec;
+
+ if (start < 0 || end > 255 || start > end)
+ return (-1);
+ for (vec = end; vec > start; --vec)
+ if (intr_handlers[vec] == (struct intrhand *)0)
+ return (vec);
+ return (-1);
}
-#else
/*
* Insert ihand in the list of handlers at vector vec.
* Return return different error codes for the different
* errors and let the caller decide what to do.
*/
-
int
intr_establish(int vec, struct intrhand *ihand)
{
- register struct intrhand *intr;
-
- if (vec < 0 || vec > 255) {
-#if DIAGNOSTIC
- panic("intr_establish: vec (%x) not between 0 and 0xff",
- vec);
-#endif /* DIAGNOSTIC */
- return (INTR_EST_BADVEC);
- }
-
- if (intr = intr_handlers[vec]) {
- if (intr->ih_ipl != ihand->ih_ipl) {
-#if DIAGNOSTIC
- panic("intr_establish: there are other handlers with vec (%x) at ipl %x, but you want it at %x",
- intr->ih_ipl, vec, ihand->ih_ipl);
-#endif /* DIAGNOSTIC */
- return (INTR_EST_BADIPL);
- }
-
- /*
- * Go to the end of the chain
- */
- while (intr->ih_next)
- intr = intr->ih_next;
- }
-
- ihand->ih_next = 0;
-
- if (intr)
- intr->ih_next = ihand;
- else
- intr_handlers[vec] = ihand;
-
- return (INTR_EST_SUCC);
+ register struct intrhand *intr;
+
+ if (vec < 0 || vec > 255) {
+ #if DIAGNOSTIC
+ panic("intr_establish: vec (%x) not between 0 and 0xff",
+ vec);
+ #endif /* DIAGNOSTIC */
+ return (INTR_EST_BADVEC);
+ }
+
+ if (intr = intr_handlers[vec]) {
+ if (intr->ih_ipl != ihand->ih_ipl) {
+ #if DIAGNOSTIC
+ panic("intr_establish: there are other handlers with vec (%x) at ipl %x, but you want it at %x",
+ intr->ih_ipl, vec, ihand->ih_ipl);
+ #endif /* DIAGNOSTIC */
+ return (INTR_EST_BADIPL);
+ }
+
+ /*
+ * Go to the end of the chain
+ */
+ while (intr->ih_next)
+ intr = intr->ih_next;
+ }
+
+ ihand->ih_next = 0;
+
+ if (intr)
+ intr->ih_next = ihand;
+ else
+ intr_handlers[vec] = ihand;
+
+ return (INTR_EST_SUCC);
}
-#endif
+
+#ifdef MVME188
/*
- * Device interrupt handler
+ * Device interrupt handler for MVME188
*
* when we enter, interrupts are disabled;
* when we leave, they should be disabled,
@@ -1272,193 +1353,349 @@ intr_establish(int vec, struct intrhand *ihand)
* the routine.
*/
-void
-ext_int(u_int v, struct m88100_saved_state *eframe)
+/* Hard coded vector table for onboard devices. */
+unsigned obio_vec[32] = {SYSCV_ABRT,SYSCV_ACF,0,SYSCV_TIMER1,0,0,0,0,
+ 0,0,SYSCV_TIMER2,SYSCV_SYSF,0,0,SYSCV_SCC,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+};
+#define GET_MASK(cpu, val) *int_mask_reg[cpu] & (val)
+
+void
+m188_ext_int(u_int v, struct m88100_saved_state *eframe)
{
- register u_char mask, level, xxxvec;
- register struct intrhand *intr;
- int ret;
- u_char vec;
-
- /* get level and mask */
-
- asm volatile("ld.b %0,%1" : "=r" (mask) : "" (*pcc2intr_mask));
- asm volatile("ld.b %0,%1" : "=r" (level) : "" (*pcc2intr_ipl));
-
- /*
- * It is really bizarre for the mask and level to the be the same.
- * pcc2 for 187 blocks all interrupts at and below the mask value,
- * so we should not be getting an interrupt at the level that is
- * already blocked. I can't explain this case XXX nivas
- */
-
- if ((mask == level) && level) {
- printf("mask == level, %d\n", level);
- goto beatit;
- }
-
- /*
- * Interrupting level cannot be 0--0 doesn't produce an interrupt.
- * Weird! XXX nivas
- */
-
- if (level == 0) {
- printf("Bogons... level %x and mask %x\n", level, mask);
- goto beatit;
- }
-
- /* and block interrupts at level or lower */
- setipl((u_char)level);
- /* and stash it away in the trap frame */
- eframe->mask = mask;
+ register int cpu = 0; /*cpu_number();*/
+ register unsigned int cur_mask;
+ register unsigned int level, old_spl;
+ register struct intrhand *intr;
+ int ret, intnum;
+ unsigned vec;
+
+ cur_mask = ISR_GET_CURRENT_MASK(cpu);
+ old_spl = m188_curspl[cpu];
+ eframe->mask = old_spl;
+
+ if (! cur_mask) {
+ /*
+ * Spurious interrupts - may be caused by debug output clearing
+ * DUART interrupts.
+ */
+ flush_pipeline();
+ return;
+ }
+
+ /* We want to service all interrupts marked in the IST register */
+ /* They are all valid because the mask would have prevented them */
+ /* from being generated otherwise. We will service them in order of */
+ /* priority. */
+ do {
+ /*
+ printf("interrupt: mask = 0x%08x spl = %d imr = 0x%x\n", ISR_GET_CURRENT_MASK(cpu),
+ old_spl, *int_mask_reg[cpu]);
+ */
+ level = safe_level(cur_mask, old_spl);
+
+ if (old_spl >= level) {
+ register int i;
+
+ printf("safe level %d <= old level %d\n", level, old_spl);
+ printf("cur_mask = 0x%b\n", cur_mask, IST_STRING);
+ for (i = 0; i < 4; i++)
+ printf("IEN%d = 0x%b ", i, *int_mask_reg[i], IST_STRING);
+ printf("\nCPU0 spl %d CPU1 spl %d CPU2 spl %d CPU3 spl %d\n",
+ m188_curspl[0], m188_curspl[1],
+ m188_curspl[2], m188_curspl[3]);
+ for (i = 0; i < 8; i++)
+ printf("int_mask[%d] = 0x%08x\n", i, int_mask_val[i]);
+ printf("--CPU %d halted--", cpu_number());
+ spl7();
+ while (1)
+ ;
+ }
+
+ setipl((u_char)level);
+
+ if (level > 7 || (char)level < 0) {
+ panic("int level (%x) is not between 0 and 7", level);
+ }
+
+ /* generate IACK and get the vector */
+
+ /*
+ * This is tricky. If you don't catch all the
+ * interrupts, you die. Game over. Insert coin...
+ * XXX smurph
+ */
+
+ intnum = ff1(cur_mask);
+ if (intnum & OBIO_INTERRUPT_MASK) {
+ vec = obio_vec[intnum];
+ if (vec = 0) {
+ printf("unknown onboard interrupt: mask = 0x%b\n", 1 << intnum, IST_STRING);
+ panic("m188_ext_int");
+ }
+ } else if (intnum & HW_FAILURE_MASK) {
+ vec = obio_vec[intnum];
+ if (vec = 0) {
+ printf("unknown hadware failure: mask = 0x%b\n", 1 << intnum, IST_STRING);
+ panic("m188_ext_int");
+ }
+ } else if (intnum & VME_INTERRUPT_MASK) {
+ asm volatile("tb1 0, r0, 0");
+ if (guarded_access(ivec[level], 1, &vec) == EFAULT) {
+ printf("Unable to get vector for this vmebus interrupt (level %x)\n", level);
+ goto out_m188;
+ }
+ } else {
+ printf("unknown interrupt: mask = 0x%b\n", 1 << intnum, IST_STRING);
+ panic("m188_ext_int");
+ }
#if 0
- asm volatile("st.b %1,%0" : "=m" (*pcc2intr_mask) : "r" (level));
+ if (cur_mask & ABRT_BIT) { /* abort button interrupt */
+ vec = 110;
+ } else if (cur_mask & DTI_BIT) { /* interval timer interrupt */
+ vec = SYSCV_TIMER1;
+ } else if (cur_mask & CIOI_BIT) { /* statistics timer interrupt */
+ vec = SYSCV_TIMER2;
+ } else if (cur_mask & DI_BIT) { /* duart interrupt */
+ vec = SYSCV_SCC;
+ } else { /* vmebus interrupt */
+ asm volatile("tb1 0, r0, 0");
+ if (guarded_access(ivec[level], 1, &vec) == EFAULT) {
+ printf("Unable to get vector for this vmebus interrupt (level %x)\n", level);
+ goto out_m188;
+ }
+ }
#endif
- if (level > 7 || (char)level < 0) {
- panic("int level (%x) is not between 0 and 7", level);
- }
-
- /* generate IACK and get the vector */
-
-#if XXX
- asm volatile("ld.b %0,%1" : "=r" (vec) : "" (*ivec[level]));
- asm volatile("tb1 0, r0, 0");
- asm volatile("tb1 0, r0, 0");
- asm volatile("tb1 0, r0, 0");
-
- asm volatile("tb1 0, r0, 0");
-
- if (guarded_access(ivec[level], 1, &vec) == EFAULT) {
- printf("Unable to get vector for this interrupt (level %x)\n",
- level);
- goto out;
- }
-#endif XXX
-
- asm volatile("tb1 0, r0, 0");
- if (guarded_access(ivec[level], 1, &vec) == EFAULT) {
- printf("Unable to get vector for this interrupt (level %x)\n",
- level);
- goto out;
- }
- asm volatile("tb1 0, r0, 0");
- asm volatile("tb1 0, r0, 0");
- asm volatile("tb1 0, r0, 0");
- /*vec = xxxvec;*/
-
- if (vec > 0xFF) {
- panic("interrupt vector %x greater than 255", vec);
- }
-
- enable_interrupt();
-
- if ((intr = intr_handlers[vec]) == 0) {
- printf("Spurious interrupt (level %x and vec %x)\n",
- level, vec);
- }
- if (intr && intr->ih_ipl != level) {
- panic("Handler ipl %x not the same as level %x",
- intr->ih_ipl, level);
- }
-
- /*
- * Walk through all interrupt handlers in the chain for the
- * given vector, calling each handler in turn, till some handler
- * returns a value != 0.
- */
-
- for (ret = 0; intr; intr = intr->ih_next) {
- if (intr->ih_wantframe)
- ret = (*intr->ih_fn)(intr->ih_arg, (void *)eframe);
- else
- ret = (*intr->ih_fn)(intr->ih_arg);
- if (ret)
- break;
- }
-
- if (ret == 0) {
- printf("Unclaimed interrupt (level %x and vec %x)\n",
- level, vec);
- }
-
- /*
- * process any remaining data access exceptions before
- * returning to assembler
- */
- disable_interrupt();
+ asm volatile("tb1 0, r0, 0");
+ asm volatile("tb1 0, r0, 0");
+ asm volatile("tb1 0, r0, 0");
+ if (vec > 0xFF) {
+ panic("interrupt vector %x greater than 255", vec);
+ }
+#if 0
+ enable_interrupt(); /* should we ?? */
+#endif
+
+ if ((intr = intr_handlers[vec]) == 0)
+ printf("Spurious interrupt (level %x and vec %x)\n", level, vec);
+
+ /*
+ * Walk through all interrupt handlers in the chain for the
+ * given vector, calling each handler in turn, till some handler
+ * returns a value != 0.
+ */
+ for (ret = 0; intr; intr = intr->ih_next) {
+ if (intr->ih_wantframe)
+ ret = (*intr->ih_fn)(intr->ih_arg, (void *)eframe);
+ else
+ ret = (*intr->ih_fn)(intr->ih_arg);
+ if (ret)
+ break;
+ }
+ if (ret == 0)
+ printf("Unclaimed interrupt (level %x and vec %x)\n", level, vec);
+ } while (cur_mask = ISR_GET_CURRENT_MASK(cpu));
+
+
+ /*
+ * process any remaining data access exceptions before
+ * returning to assembler
+ */
+ disable_interrupt();
+out_m188:
+ if (eframe->dmt0 & DMT_VALID) {
+ trap(T_DATAFLT, eframe);
+ data_access_emulation(eframe);
+ eframe->dmt0 &= ~DMT_VALID;
+ }
+
+ /*
+ * Restore the mask level to what it was when the interrupt
+ * was taken.
+ */
+ setipl((u_char)eframe->mask);
+ flush_pipeline();
+ return;
+}
+
+#endif /* MVME188 */
+
+/*
+ * Device interrupt handler for MVME1x7
+ *
+ * when we enter, interrupts are disabled;
+ * when we leave, they should be disabled,
+ * but they need not be disabled throughout
+ * the routine.
+ */
+
+#if defined(MVME187) || defined(MVME197)
+void
+sbc_ext_int(u_int v, struct m88100_saved_state *eframe)
+{
+ register u_char mask, level, xxxvec;
+ register struct intrhand *intr;
+ int ret;
+ u_char vec;
+
+ /* get level and mask */
+ asm volatile("ld.b %0,%1" : "=r" (mask) : "" (*pcc2intr_mask));
+ asm volatile("ld.b %0,%1" : "=r" (level) : "" (*pcc2intr_ipl));
+
+ /*
+ * It is really bizarre for the mask and level to the be the same.
+ * pcc2 for 187 blocks all interrupts at and below the mask value,
+ * so we should not be getting an interrupt at the level that is
+ * already blocked. I can't explain this case XXX nivas
+ */
+
+ if ((mask == level) && level) {
+ printf("mask == level, %d\n", level);
+ goto beatit;
+ }
+
+ /*
+ * Interrupting level cannot be 0--0 doesn't produce an interrupt.
+ * Weird! XXX nivas
+ */
+
+ if (level == 0) {
+ printf("Bogons... level %x and mask %x\n", level, mask);
+ goto beatit;
+ }
+
+ /* and block interrupts at level or lower */
+ setipl((u_char)level);
+ /* and stash it away in the trap frame */
+ eframe->mask = mask;
+
+ if (level > 7 || (char)level < 0) {
+ panic("int level (%x) is not between 0 and 7", level);
+ }
+
+ /* generate IACK and get the vector */
+ asm volatile("tb1 0, r0, 0");
+ if (guarded_access(ivec[level], 1, &vec) == EFAULT) {
+ printf("Unable to get vector for this interrupt (level %x)\n", level);
+ goto out;
+ }
+ asm volatile("tb1 0, r0, 0");
+ asm volatile("tb1 0, r0, 0");
+ asm volatile("tb1 0, r0, 0");
+ /*vec = xxxvec;*/
+
+ if (vec > 0xFF) {
+ panic("interrupt vector %x greater than 255", vec);
+ }
+
+ enable_interrupt();
+
+ if ((intr = intr_handlers[vec]) == 0) {
+ printf("Spurious interrupt (level %x and vec %x)\n",
+ level, vec);
+ }
+ if (intr && intr->ih_ipl != level) {
+ panic("Handler ipl %x not the same as level %x. vec = 0x%x",
+ intr->ih_ipl, level, vec);
+ }
+
+ /*
+ * Walk through all interrupt handlers in the chain for the
+ * given vector, calling each handler in turn, till some handler
+ * returns a value != 0.
+ */
+
+ for (ret = 0; intr; intr = intr->ih_next) {
+ if (intr->ih_wantframe)
+ ret = (*intr->ih_fn)(intr->ih_arg, (void *)eframe);
+ else
+ ret = (*intr->ih_fn)(intr->ih_arg);
+ if (ret)
+ break;
+ }
+
+ if (ret == 0) {
+ printf("Unclaimed interrupt (level %x and vec %x)\n",
+ level, vec);
+ }
+
+ /*
+ * process any remaining data access exceptions before
+ * returning to assembler
+ */
+ disable_interrupt();
out:
- if (eframe->dmt0 & DMT_VALID) {
- trap(T_DATAFLT, eframe);
- data_access_emulation(eframe);
- eframe->dmt0 &= ~DMT_VALID;
- }
- mask = eframe->mask;
-
- /*
- * Restore the mask level to what it was when the interrupt
- * was taken.
- */
- setipl((u_char)mask);
-#if 0
- asm volatile("st.b %1,%0" : "=m" (*pcc2intr_mask) : "r" (mask));
-#endif
-#if 0
- splx((u_char)mask);
-#endif /* 0 */
+ if (cputyp != CPU_197) {
+ if (eframe->dmt0 & DMT_VALID) {
+ trap(T_DATAFLT, eframe);
+ data_access_emulation(eframe);
+ eframe->dmt0 &= ~DMT_VALID;
+ }
+ }
+ mask = eframe->mask;
+
+ /*
+ * Restore the mask level to what it was when the interrupt
+ * was taken.
+ */
+ setipl((u_char)mask);
beatit:
- return;
+ return;
}
+#endif /* defined(MVME187) || defined(MVME197) */
cpu_exec_aout_makecmds(p, epp)
- struct proc *p;
- struct exec_package *epp;
+struct proc *p;
+struct exec_package *epp;
{
- return ENOEXEC;
+ return ENOEXEC;
}
sys_sysarch(p, v, retval)
- struct proc *p;
- void *v;
- register_t *retval;
+struct proc *p;
+void *v;
+register_t *retval;
{
- struct sys_sysarch_args /* {
- syscallarg(int) op;
- syscallarg(char *) parm;
- } */ *uap = v;
- int error = 0;
-
- switch((int)SCARG(uap, op)) {
- default:
- error = EINVAL;
- break;
- }
- return(error);
+ struct sys_sysarch_args /* {
+ syscallarg(int) op;
+ syscallarg(char *) parm;
+ } */ *uap = v;
+ int error = 0;
+
+ switch ((int)SCARG(uap, op)) {
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
}
/*
* machine dependent system variables.
*/
+
cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
- int *name;
- u_int namelen;
- void *oldp;
- size_t *oldlenp;
- void *newp;
- size_t newlen;
- struct proc *p;
+int *name;
+u_int namelen;
+void *oldp;
+size_t *oldlenp;
+void *newp;
+size_t newlen;
+struct proc *p;
{
- /* all sysctl names are this level are terminal */
- if (namelen != 1)
- return (ENOTDIR); /* overloaded */
+ /* all sysctl names are this level are terminal */
+ if (namelen != 1)
+ return (ENOTDIR); /* overloaded */
- switch (name[0]) {
- default:
- return (EOPNOTSUPP);
- }
- /*NOTREACHED*/
+ switch (name[0]) {
+ default:
+ return (EOPNOTSUPP);
+ }
+ /*NOTREACHED*/
}
/*
@@ -1467,15 +1704,15 @@ cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
void
_insque(velement, vhead)
- void *velement, *vhead;
+void *velement, *vhead;
{
- register struct prochd *element, *head;
- element = velement;
- head = vhead;
- element->ph_link = head->ph_link;
- head->ph_link = (struct proc *)element;
- element->ph_rlink = (struct proc *)head;
- ((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
+ register struct prochd *element, *head;
+ element = velement;
+ head = vhead;
+ element->ph_link = head->ph_link;
+ head->ph_link = (struct proc *)element;
+ element->ph_rlink = (struct proc *)head;
+ ((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
}
/*
@@ -1484,57 +1721,57 @@ _insque(velement, vhead)
void
_remque(velement)
- void *velement;
+void *velement;
{
- register struct prochd *element;
- element = velement;
- ((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
- ((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
- element->ph_rlink = (struct proc *)0;
+ register struct prochd *element;
+ element = velement;
+ ((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
+ ((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
+ element->ph_rlink = (struct proc *)0;
}
int
copystr(fromaddr, toaddr, maxlength, lencopied)
- const void *fromaddr;
- void *toaddr;
- size_t maxlength;
- size_t *lencopied;
+const void *fromaddr;
+void *toaddr;
+size_t maxlength;
+size_t *lencopied;
{
- u_int tally;
+ u_int tally;
- tally = 0;
+ tally = 0;
- while (maxlength--) {
- *(u_char *)toaddr = *(u_char *)fromaddr++;
- tally++;
- if (*(u_char *)toaddr++ == 0) {
- if(lencopied) *lencopied = tally;
- return(0);
- }
- }
+ while (maxlength--) {
+ *(u_char *)toaddr = *(u_char *)fromaddr++;
+ tally++;
+ if (*(u_char *)toaddr++ == 0) {
+ if (lencopied) *lencopied = tally;
+ return (0);
+ }
+ }
- if (lencopied)
- *lencopied = tally;
+ if (lencopied)
+ *lencopied = tally;
- return(ENAMETOOLONG);
+ return (ENAMETOOLONG);
}
void
setrunqueue(p)
- register struct proc *p;
+register struct proc *p;
{
- register struct prochd *q;
- register struct proc *oldlast;
- register int which = p->p_priority >> 2;
-
- if (p->p_back != NULL)
- panic("setrunqueue %x", p);
- q = &qs[which];
- whichqs |= 1 << which;
- p->p_forw = (struct proc *)q;
- p->p_back = oldlast = q->ph_rlink;
- q->ph_rlink = p;
- oldlast->p_forw = p;
+ register struct prochd *q;
+ register struct proc *oldlast;
+ register int which = p->p_priority >> 2;
+
+ if (p->p_back != NULL)
+ panic("setrunqueue %x", p);
+ q = &qs[which];
+ whichqs |= 1 << which;
+ p->p_forw = (struct proc *)q;
+ p->p_back = oldlast = q->ph_rlink;
+ q->ph_rlink = p;
+ oldlast->p_forw = p;
}
/*
@@ -1543,20 +1780,20 @@ setrunqueue(p)
*/
void
remrunqueue(vp)
- struct proc *vp;
+struct proc *vp;
{
- register struct proc *p = vp;
- register int which = p->p_priority >> 2;
- register struct prochd *q;
-
- if ((whichqs & (1 << which)) == 0)
- panic("remrq %x", p);
- p->p_forw->p_back = p->p_back;
- p->p_back->p_forw = p->p_forw;
- p->p_back = NULL;
- q = &qs[which];
- if (q->ph_link == (struct proc *)q)
- whichqs &= ~(1 << which);
+ register struct proc *p = vp;
+ register int which = p->p_priority >> 2;
+ register struct prochd *q;
+
+ if ((whichqs & (1 << which)) == 0)
+ panic("remrq %x", p);
+ p->p_forw->p_back = p->p_back;
+ p->p_back->p_forw = p->p_forw;
+ p->p_back = NULL;
+ q = &qs[which];
+ if (q->ph_link == (struct proc *)q)
+ whichqs &= ~(1 << which);
}
/* dummys for now */
@@ -1567,217 +1804,229 @@ bugsyscall()
void
myetheraddr(cp)
- u_char *cp;
+u_char *cp;
{
- struct bugniocall niocall;
- struct bugbrdid brdid;
- bugbrdid(&brdid);
- bcopy(&brdid.etheraddr, cp, 6);
-/*
- niocall.clun = 0;
- niocall.dlun = 0;
- niocall.ci = 0;
- niocall.cd = 0;
- niocall.cid = NETCTRL_GETHDW;
- niocall.memaddr = (unsigned long)cp;
- niocall.nbytes = 6;
- bugnetctrl(&niocall);
-*/
-
-/* if (cp[0] == '\0') {
- strncpy(cp, cp2, 6);
- } */
+ struct bugniocall niocall;
+ struct bugbrdid brdid;
+ bugbrdid(&brdid);
+ bcopy(&brdid.etheraddr, cp, 6);
}
void netintr()
{
#ifdef INET
- if (netisr & (1 << NETISR_ARP)) {
- netisr &= ~(1 << NETISR_ARP);
- arpintr();
- }
- if (netisr & (1 << NETISR_IP)) {
- netisr &= ~(1 << NETISR_IP);
- ipintr();
- }
+ if (netisr & (1 << NETISR_ARP)) {
+ netisr &= ~(1 << NETISR_ARP);
+ arpintr();
+ }
+ if (netisr & (1 << NETISR_IP)) {
+ netisr &= ~(1 << NETISR_IP);
+ ipintr();
+ }
#endif
#ifdef INET6
- if (netisr & (1 << NETISR_IPV6)) {
- netisr &= ~(1 << NETISR_IPV6);
- ipv6intr();
- }
+ if (netisr & (1 << NETISR_IPV6)) {
+ netisr &= ~(1 << NETISR_IPV6);
+ ipv6intr();
+ }
#endif
#ifdef NETATALK
- if (netisr & (1 << NETISR_ATALK)) {
- netisr &= ~(1 << NETISR_ATALK);
- atintr();
- }
+ if (netisr & (1 << NETISR_ATALK)) {
+ netisr &= ~(1 << NETISR_ATALK);
+ atintr();
+ }
#endif
#ifdef NS
- if (netisr & (1 << NETISR_NS)) {
- netisr &= ~(1 << NETISR_NS);
- nsintr();
- }
+ if (netisr & (1 << NETISR_NS)) {
+ netisr &= ~(1 << NETISR_NS);
+ nsintr();
+ }
#endif
#ifdef ISO
- if (netisr & (1 << NETISR_ISO)) {
- netisr &= ~(1 << NETISR_ISO);
- clnlintr();
- }
+ if (netisr & (1 << NETISR_ISO)) {
+ netisr &= ~(1 << NETISR_ISO);
+ clnlintr();
+ }
#endif
#ifdef CCITT
- if (netisr & (1 << NETISR_CCITT)) {
- netisr &= ~(1 << NETISR_CCITT);
- ccittintr();
- }
+ if (netisr & (1 << NETISR_CCITT)) {
+ netisr &= ~(1 << NETISR_CCITT);
+ ccittintr();
+ }
#endif
#include "ppp.h"
#if NPPP > 0
- if (netisr & (1 << NETISR_PPP)) {
- netisr &= ~(1 << NETISR_PPP);
- pppintr();
- }
+ if (netisr & (1 << NETISR_PPP)) {
+ netisr &= ~(1 << NETISR_PPP);
+ pppintr();
+ }
#endif
#include "bridge.h"
#if NBRIDGE > 0
- if (netisr & (1 << NETISR_BRIDGE)) {
- netisr &= ~(1 << NETISR_BRIDGE);
- bridgeintr();
- }
+ if (netisr & (1 << NETISR_BRIDGE)) {
+ netisr &= ~(1 << NETISR_BRIDGE);
+ bridgeintr();
+ }
#endif
}
void
dosoftint()
{
- if (ssir & SIR_NET) {
- siroff(SIR_NET);
- cnt.v_soft++;
- netintr();
- }
-
- if (ssir & SIR_CLOCK) {
- siroff(SIR_CLOCK);
- cnt.v_soft++;
- softclock();
- }
-
- return;
+ if (ssir & SIR_NET) {
+ siroff(SIR_NET);
+ cnt.v_soft++;
+ netintr();
+ }
+
+ if (ssir & SIR_CLOCK) {
+ siroff(SIR_CLOCK);
+ cnt.v_soft++;
+ softclock();
+ }
+
+ return;
}
int
spl0()
{
- int x;
- int level = 0;
- x = splsoftclock();
+ int x;
+ int level = 0;
+ x = splsoftclock();
- if (ssir) {
- dosoftint();
- }
+ if (ssir) {
+ dosoftint();
+ }
- setipl(0);
+ setipl(0);
- return(x);
+ return (x);
}
badwordaddr(void *addr)
{
- return badaddr((vm_offset_t)addr, 4);
+ return badaddr((vm_offset_t)addr, 4);
}
MY_info(f, p, flags, s)
- struct trapframe *f;
- caddr_t p;
- int flags;
- char *s;
+struct trapframe *f;
+caddr_t p;
+int flags;
+char *s;
{
- regdump(f);
- printf("proc %x flags %x type %s\n", p, flags, s);
-}
+ regdump(f);
+ printf("proc %x flags %x type %s\n", p, flags, s);
+}
MY_info_done(f, flags)
- struct trapframe *f;
- int flags;
+struct trapframe *f;
+int flags;
{
- regdump(f);
-}
+ regdump(f);
+}
void
nmihand(void *framep)
{
- struct m88100_saved_state *frame = framep;
+ struct m88100_saved_state *frame = framep;
#if DDB
- DEBUG_MSG("Abort Pressed\n");
- Debugger();
+ DEBUG_MSG("Abort Pressed\n");
+ Debugger();
#else
- DEBUG_MSG("Spurious NMI?\n");
+ DEBUG_MSG("Spurious NMI?\n");
#endif /* DDB */
}
regdump(struct trapframe *f)
{
#define R(i) f->r[i]
- printf("R00-05: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- R(0),R(1),R(2),R(3),R(4),R(5));
- printf("R06-11: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- R(6),R(7),R(8),R(9),R(10),R(11));
- printf("R12-17: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- R(12),R(13),R(14),R(15),R(16),R(17));
- printf("R18-23: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- R(18),R(19),R(20),R(21),R(22),R(23));
- printf("R24-29: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- R(24),R(25),R(26),R(27),R(28),R(29));
- printf("R30-31: 0x%08x 0x%08x\n",R(30),R(31));
- printf("sxip %x snip %x sfip %x\n", f->sxip, f->snip, f->sfip);
- if (f->vector == 0x3) { /* print dmt stuff for data access fault */
- printf("dmt0 %x dmd0 %x dma0 %x\n", f->dmt0, f->dmd0, f->dma0);
- printf("dmt1 %x dmd1 %x dma1 %x\n", f->dmt1, f->dmd1, f->dma1);
- printf("dmt2 %x dmd2 %x dma2 %x\n", f->dmt2, f->dmd2, f->dma2);
- }
- if (longformat) {
- printf("fpsr %x ", f->fpsr);
- printf("fpcr %x ", f->fpcr);
- printf("epsr %x ", f->epsr);
- printf("ssbr %x\n", f->ssbr);
- printf("dmt0 %x ", f->dmt0);
- printf("dmd0 %x ", f->dmd0);
- printf("dma0 %x ", f->dma0);
- printf("dmt1 %x ", f->dmt1);
- printf("dmd1 %x ", f->dmd1);
- printf("dma1 %x ", f->dma1);
- printf("dmt2 %x ", f->dmt2);
- printf("dmd2 %x ", f->dmd2);
- printf("dma2 %x\n", f->dma2);
- printf("fpecr %x ", f->fpecr);
- printf("fphs1 %x ", f->fphs1);
- printf("fpls1 %x ", f->fpls1);
- printf("fphs2 %x ", f->fphs2);
- printf("fpls2 %x ", f->fpls2);
- printf("fppt %x ", f->fppt);
- printf("fprh %x ", f->fprh);
- printf("fprl %x ", f->fprl);
- printf("fpit %x\n", f->fpit);
- printf("vector %x ", f->vector);
- printf("mask %x ", f->mask);
- printf("mode %x ", f->mode);
- printf("scratch1 %x ", f->scratch1);
- printf("pad %x\n", f->pad);
- }
+ printf("R00-05: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(0),R(1),R(2),R(3),R(4),R(5));
+ printf("R06-11: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(6),R(7),R(8),R(9),R(10),R(11));
+ printf("R12-17: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(12),R(13),R(14),R(15),R(16),R(17));
+ printf("R18-23: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(18),R(19),R(20),R(21),R(22),R(23));
+ printf("R24-29: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ R(24),R(25),R(26),R(27),R(28),R(29));
+ printf("R30-31: 0x%08x 0x%08x\n",R(30),R(31));
+ if (cputyp == CPU_197) {
+ printf("exip %x enip %x\n", f->sxip, f->snip);
+ } else {
+ printf("sxip %x snip %x sfip %x\n", f->sxip, f->snip, f->sfip);
+ }
+ if (f->vector == 0x3 && cputyp != CPU_197) {
+ /* print dmt stuff for data access fault */
+ printf("dmt0 %x dmd0 %x dma0 %x\n", f->dmt0, f->dmd0, f->dma0);
+ printf("dmt1 %x dmd1 %x dma1 %x\n", f->dmt1, f->dmd1, f->dma1);
+ printf("dmt2 %x dmd2 %x dma2 %x\n", f->dmt2, f->dmd2, f->dma2);
+ printf("fault type %d\n", (f->dpfsr >> 16) & 0x7);
+ dae_print(f);
+ }
+ if (longformat && cputyp != CPU_197) {
+ printf("fpsr %x ", f->fpsr);
+ printf("fpcr %x ", f->fpcr);
+ printf("epsr %x ", f->epsr);
+ printf("ssbr %x\n", f->ssbr);
+ printf("fpecr %x ", f->fpecr);
+ printf("fphs1 %x ", f->fphs1);
+ printf("fpls1 %x ", f->fpls1);
+ printf("fphs2 %x ", f->fphs2);
+ printf("fpls2 %x\n", f->fpls2);
+ printf("fppt %x ", f->fppt);
+ printf("fprh %x ", f->fprh);
+ printf("fprl %x ", f->fprl);
+ printf("fpit %x\n", f->fpit);
+ printf("vector %x ", f->vector);
+ printf("mask %x ", f->mask);
+ printf("mode %x ", f->mode);
+ printf("scratch1 %x ", f->scratch1);
+ printf("pad %x\n", f->pad);
+ }
+ if (longformat && cputyp == CPU_197) {
+ printf("fpsr %x ", f->fpsr);
+ printf("fpcr %x ", f->fpcr);
+ printf("fpecr %x ", f->fpecr);
+ printf("epsr %x\n", f->epsr);
+ printf("dsap %x ", f->dmt1);
+ printf("dsr %x ", f->dsr);
+ printf("dlar %x ", f->dlar);
+ printf("dpar %x\n", f->dpar);
+ printf("isap %x ", f->dmt0);
+ printf("isr %x ", f->isr);
+ printf("ilar %x ", f->ilar);
+ printf("ipar %x\n", f->ipar);
+ printf("vector %x ", f->vector);
+ printf("mask %x ", f->mask);
+ printf("mode %x ", f->mode);
+ printf("scratch1 %x ", f->scratch1);
+ printf("pad %x\n", f->pad);
+ }
+ if (cputyp == CPU_188 ) {
+ unsigned int istr, cur_mask;
+
+ istr = *(volatile int *)IST_REG;
+ cur_mask = GET_MASK(0, istr);
+ printf("emask = 0x%b\n", f->mask, IST_STRING);
+ printf("istr = 0x%b\n", istr, IST_STRING);
+ printf("cmask = 0x%b\n", cur_mask, IST_STRING);
+ }
}
#if DDB
inline int
db_splhigh(void)
{
- return (db_setipl(IPL_HIGH));
+ return (db_setipl(IPL_HIGH));
}
inline int
db_splx(int s)
{
- return (db_setipl(s));
+ return (db_setipl(s));
}
#endif /* DDB */
@@ -1789,69 +2038,164 @@ db_splx(int s)
void
-m187_bootstrap(void)
+mvme_bootstrap(void)
{
- extern char version[];
- extern char *edata, *end;
- extern int cold;
- extern int kernelstart;
- extern vm_offset_t size_memory(void);
- extern struct consdev *cn_tab;
- struct bugbrdid brdid;
-
- cold = 1; /* we are still booting */
-#if NBUGTTY > 0
- cn_tab = &bugcons;
-#endif
- buginit();
-
- bugbrdid(&brdid);
- cputyp = brdid.brdno;
+ extern char *edata, *end;
+ extern int cold;
+ extern unsigned number_cpus;
+ extern int kernelstart;
+ extern int lock_wait_time;
+ extern vm_offset_t size_memory(void);
+ extern struct consdev *cn_tab;
+ extern unsigned vector_list;
+ struct bugbrdid brdid;
+
+ cold = 1; /* we are still booting */
+
+ /* zreo out the machine dependant function pointers */
+ bzero(&mdfp, sizeof(struct funcp));
+
+ buginit(); /* init the bug routines */
+ bugbrdid(&brdid);
+ cputyp = brdid.brdno;
+
+ /* to support the M8120. It's based off of MVME187 */
+ if (cputyp == 0x8120)
+ cputyp = CPU_187;
+
+ /*
+ * set up interrupt and fp exception handlers
+ * based on the machine.
+ */
+ switch (cputyp) {
+#ifdef MVME188
+ case CPU_188:
+ mdfp.interrupt_func = &m188_ext_int;
+ mdfp.fp_precise_func = &m88100_Xfp_precise;
+ /* clear and disable all interrupts */
+ *int_mask_reg[0] = 0;
+ *int_mask_reg[1] = 0;
+ *int_mask_reg[2] = 0;
+ *int_mask_reg[3] = 0;
+ break;
+#endif /* MVME188 */
+#ifdef MVME187
+ case CPU_187:
+ mdfp.interrupt_func = &sbc_ext_int;
+ mdfp.fp_precise_func = &m88100_Xfp_precise;
+ break;
+#endif /* MVME187 */
+#ifdef MVME197
+ case CPU_197:
+ mdfp.interrupt_func = &sbc_ext_int;
+ mdfp.fp_precise_func = &m88110_Xfp_precise;
+ set_tcfp(); /* Set Time Critical Floating Point Mode */
+ break;
+#endif /* MVME197 */
+ default:
+ panic("mvme_bootstrap: Can't determine cpu type.");
+ }
+
+ /* startup fake console driver. It will be replaced by consinit() */
+ cn_tab = &bootcons;
+
+ vm_set_page_size();
+
+ first_addr = m88k_round_page(first_addr);
+
+ if (!no_symbols) boothowto |= RB_KDB;
+
+ last_addr = size_memory();
+ cmmu_parity_enable();
+
+ printf("%s",version);
+ identifycpu();
+ setup_board_config();
+ cmmu_init();
+ master_cpu = cmmu_cpu_number();
+ set_cpu_number(master_cpu);
+ printf("CPU%d is master CPU\n", master_cpu);
+
+#ifdef notevenclose
+ if (cputyp == CPU_188 && (boothowto & RB_MINIROOT)) {
+ int i;
+ for (i=0; i<MAX_CPUS; i++) {
+ if(!spin_cpu(i))
+ printf("CPU%d started\n", i);
+ }
+ }
+#endif
+ avail_start = first_addr;
+ avail_end = last_addr;
+#ifdef DEBUG
+ printf("MVME%x boot: memory from 0x%x to 0x%x\n", cputyp, avail_start, avail_end);
+#endif
+ /*
+ * Steal one page at the top of physical memory for msgbuf
+ */
+ avail_end -= PAGE_SIZE;
+ pmap_bootstrap((vm_offset_t)M88K_TRUNC_PAGE((unsigned)&kernelstart) /* = loadpt */,
+ &avail_start, &avail_end, &virtual_avail,
+ &virtual_end);
- vm_set_page_size();
+ /*
+ * Must initialize p_addr before autoconfig or
+ * the fault handler will get a NULL reference.
+ */
+ proc0.p_addr = proc0paddr;
+ curproc = &proc0;
+ curpcb = &proc0paddr->u_pcb;
- first_addr = m88k_round_page(first_addr);
+ /* Initialize cached PTEs for u-area mapping. */
+ save_u_area(&proc0, (vm_offset_t)proc0paddr);
- if (!no_symbols) boothowto |= RB_KDB;
+ /*
+ * Map proc0's u-area at the standard address (UADDR).
+ */
+ load_u_area(&proc0);
- last_addr = size_memory();
+ /* Initialize the "u-area" pages. */
+ bzero((caddr_t)UADDR, UPAGES*NBPG);
+#ifdef DEBUG
+ printf("leaving mvme_bootstrap()\n");
+#endif
+}
- cmmu_init();
+/*
+ * Boot console routines:
+ * Enables printing of boot messages before consinit().
+ */
+int
+bootcnprobe(cp)
+struct consdev *cp;
+{
+ cp->cn_dev = makedev(14, 0);
+ cp->cn_pri = CN_NORMAL;
+ return (1);
+}
- avail_start = first_addr;
- avail_end = last_addr;
- /*printf("%s",version);*/
- printf("M187 boot: memory from 0x%x to 0x%x\n", avail_start, avail_end);
- printf("M187 boot: howto 0x%x\n", boothowto);
+int
+bootcninit(cp)
+struct consdev *cp;
+{
+ /* Nothing to do */
+}
- /*
- * Steal one page at the top of physical memory for msgbuf
- */
- avail_end -= PAGE_SIZE;
+int
+bootcngetc(dev)
+dev_t dev;
+{
+ return (buginchr());
+}
-#if 1
- pmap_bootstrap((vm_offset_t)M88K_TRUNC_PAGE((unsigned)&kernelstart) /* = loadpt */,
- &avail_start, &avail_end, &virtual_avail,
- &virtual_end);
-#endif
+void
+bootcnputc(dev, c)
+dev_t dev;
+char c;
+{
+ int s;
- /*
- * Must initialize p_addr before autoconfig or
- * the fault handler will get a NULL reference.
- */
- proc0.p_addr = proc0paddr;
- curproc = &proc0;
- curpcb = &proc0paddr->u_pcb;
-
- /* Initialize cached PTEs for u-area mapping. */
- save_u_area(&proc0, (vm_offset_t)proc0paddr);
-
- /*
- * Map proc0's u-area at the standard address (UADDR).
- */
- load_u_area(&proc0);
-
- /* Initialize the "u-area" pages. */
- bzero((caddr_t)UADDR, UPAGES*NBPG);
-
+ if (c == '\n')
+ bugoutchr('\r');
+ bugoutchr(c);
}
diff --git a/sys/arch/mvme88k/mvme88k/pmap.c b/sys/arch/mvme88k/mvme88k/pmap.c
index d519d7ad3ea..08c1fb8991b 100644
--- a/sys/arch/mvme88k/mvme88k/pmap.c
+++ b/sys/arch/mvme88k/mvme88k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.11 1999/09/03 18:01:31 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.12 1999/09/27 19:13:23 smurph Exp $ */
/*
* Copyright (c) 1996 Nivas Madhur
* All rights reserved.
@@ -45,10 +45,11 @@
/* don't want to make them general yet. */
#ifdef luna88k
-# define OMRON_PMAP
+ #define OMRON_PMAP
#endif
-# define OMRON_PMAP
+#define OMRON_PMAP
+/*#define DEBUG 1*/
#include <sys/types.h>
#include <machine/board.h>
#include <sys/param.h>
@@ -56,88 +57,99 @@
#include <vm/vm.h>
#include <vm/vm_kern.h> /* vm/vm_kern.h */
+#include <sys/simplelock.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/msgbuf.h>
#include <machine/assert.h>
+#include <machine/cpu_number.h>
+#include <machine/pmap_table.h>
#include <mvme88k/dev/pcctworeg.h>
#include <mvme88k/dev/clreg.h>
- /*
- * VM externals
- */
+/*
+ * VM externals
+ */
extern vm_offset_t avail_start, avail_next, avail_end;
extern vm_offset_t virtual_avail, virtual_end;
-extern vm_offset_t pcc2consvaddr;
-extern vm_offset_t clconsvaddr;
+extern vm_offset_t pcc2consvaddr;
+extern vm_offset_t clconsvaddr;
-char *iiomapbase;
+extern void *iomapbase;
int iiomapsize;
+extern int max_cpus;
+/*
+ * Macros to operate cpus_using field
+ */
+#define SETBIT_CPUSET(cpu_number, cpuset) (*(cpuset)) |= (1 << (cpu_number));
+#define CLRBIT_CPUSET(cpu_number, cpuset) (*(cpuset)) &= ~(1 << (cpu_number));
+
/*
* Static variables, functions and variables for debugging
*/
#ifdef DEBUG
-#define STATIC
+ #define STATIC
/*
* conditional debugging
*/
-#define CD_NORM 0x01
-#define CD_FULL 0x02
-
-#define CD_ACTIVATE 0x0000004 /* _pmap_activate */
-#define CD_KMAP 0x0000008 /* pmap_expand_kmap */
-#define CD_MAP 0x0000010 /* pmap_map */
-#define CD_MAPB 0x0000020 /* pmap_map_batc */
-#define CD_CACHE 0x0000040 /* pmap_cache_ctrl */
-#define CD_BOOT 0x0000080 /* pmap_bootstrap */
-#define CD_INIT 0x0000100 /* pmap_init */
-#define CD_CREAT 0x0000200 /* pmap_create */
-#define CD_FREE 0x0000400 /* pmap_free_tables */
-#define CD_DESTR 0x0000800 /* pmap_destroy */
-#define CD_RM 0x0001000 /* pmap_remove */
-#define CD_RMAL 0x0002000 /* pmap_remove_all */
-#define CD_COW 0x0004000 /* pmap_copy_on_write */
-#define CD_PROT 0x0008000 /* pmap_protect */
-#define CD_EXP 0x0010000 /* pmap_expand */
-#define CD_ENT 0x0020000 /* pmap_enter */
-#define CD_UPD 0x0040000 /* pmap_update */
-#define CD_COL 0x0080000 /* pmap_collect */
-#define CD_CMOD 0x0100000 /* pmap_clear_modify */
-#define CD_IMOD 0x0200000 /* pmap_is_modified */
-#define CD_CREF 0x0400000 /* pmap_clear_reference */
-#define CD_PGMV 0x0800000 /* pagemove */
-#define CD_CHKPV 0x1000000 /* check_pv_list */
-#define CD_CHKPM 0x2000000 /* check_pmap_consistency */
-#define CD_CHKM 0x4000000 /* check_map */
-#define CD_ALL 0x0FFFFFC
-
-/*int pmap_con_dbg = CD_ALL | CD_FULL | CD_COW | CD_BOOT;*/
-int pmap_con_dbg = CD_NORM;
+ #define CD_NORM 0x01
+ #define CD_FULL 0x02
+
+ #define CD_ACTIVATE 0x0000004 /* _pmap_activate */
+ #define CD_KMAP 0x0000008 /* pmap_expand_kmap */
+ #define CD_MAP 0x0000010 /* pmap_map */
+ #define CD_MAPB 0x0000020 /* pmap_map_batc */
+ #define CD_CACHE 0x0000040 /* pmap_cache_ctrl */
+ #define CD_BOOT 0x0000080 /* pmap_bootstrap */
+ #define CD_INIT 0x0000100 /* pmap_init */
+ #define CD_CREAT 0x0000200 /* pmap_create */
+ #define CD_FREE 0x0000400 /* pmap_free_tables */
+ #define CD_DESTR 0x0000800 /* pmap_destroy */
+ #define CD_RM 0x0001000 /* pmap_remove */
+ #define CD_RMAL 0x0002000 /* pmap_remove_all */
+ #define CD_COW 0x0004000 /* pmap_copy_on_write */
+ #define CD_PROT 0x0008000 /* pmap_protect */
+ #define CD_EXP 0x0010000 /* pmap_expand */
+ #define CD_ENT 0x0020000 /* pmap_enter */
+ #define CD_UPD 0x0040000 /* pmap_update */
+ #define CD_COL 0x0080000 /* pmap_collect */
+ #define CD_CMOD 0x0100000 /* pmap_clear_modify */
+ #define CD_IMOD 0x0200000 /* pmap_is_modified */
+ #define CD_CREF 0x0400000 /* pmap_clear_reference */
+ #define CD_PGMV 0x0800000 /* pagemove */
+ #define CD_CHKPV 0x1000000 /* check_pv_list */
+ #define CD_CHKPM 0x2000000 /* check_pmap_consistency */
+ #define CD_CHKM 0x4000000 /* check_map */
+ #define CD_ALL 0x0FFFFFC
+int pmap_con_dbg = CD_FULL | CD_ALL;
+/*
+int pmap_con_dbg = CD_FULL| CD_NORM | CD_PROT | CD_BOOT | CD_CHKPV | CD_CHKPM | CD_CHKM;
+int pmap_con_dbg = CD_NORM;*/
#else
-#define STATIC static
+ #define STATIC static
#endif /* DEBUG */
-caddr_t vmmap;
-pt_entry_t *vmpte, *msgbufmap;
+caddr_t vmmap;
+pt_entry_t *vmpte, *msgbufmap;
-STATIC struct pmap kernel_pmap_store;
+STATIC struct pmap kernel_pmap_store;
pmap_t kernel_pmap = &kernel_pmap_store;
typedef struct kpdt_entry *kpdt_entry_t;
struct kpdt_entry {
- kpdt_entry_t next;
- vm_offset_t phys;
+ kpdt_entry_t next;
+ vm_offset_t phys;
};
#define KPDT_ENTRY_NULL ((kpdt_entry_t)0)
-STATIC kpdt_entry_t kpdt_free;
+STATIC kpdt_entry_t kpdt_free;
/*
* MAX_KERNEL_VA_SIZE must fit into the virtual address space between
@@ -162,9 +174,9 @@ STATIC kpdt_entry_t kpdt_free;
* Two pages of scratch space.
* Used in copy_to_phys(), pmap_copy_page() and pmap_zero_page().
*/
-vm_offset_t phys_map_vaddr1, phys_map_vaddr2;
+vm_offset_t phys_map_vaddr1, phys_map_vaddr2;
-int ptes_per_vm_page; /* no. of ptes required to map one VM page */
+int ptes_per_vm_page; /* no. of ptes required to map one VM page */
#define PMAP_MAX 512
@@ -175,7 +187,7 @@ int ptes_per_vm_page; /* no. of ptes required to map one VM page */
* of modified flags for pages which are no longer containd in any
* pmap. (for mapped pages, the modified flags are in the PTE.)
*/
-char *pmap_modify_list;
+char *pmap_modify_list;
/* The PV (Physical to virtual) List.
@@ -185,16 +197,17 @@ char *pmap_modify_list;
* pv_head_table. This is used by things like pmap_remove, when we must
* find and remove all mappings for a particular physical page.
*/
-typedef struct pv_entry {
- struct pv_entry *next; /* next pv_entry */
- pmap_t pmap; /* pmap where mapping lies */
- vm_offset_t va; /* virtual address for mapping */
+typedef struct pv_entry {
+ struct pv_entry *next; /* next pv_entry */
+ pmap_t pmap; /* pmap where mapping lies */
+ vm_offset_t va; /* virtual address for mapping */
} *pv_entry_t;
#define PV_ENTRY_NULL ((pv_entry_t) 0)
-static pv_entry_t pv_head_table; /* array of entries, one per page */
+static struct simplelock *pv_lock_table; /* array */
+static pv_entry_t pv_head_table; /* array of entries, one per page */
/*
* Index into pv_head table, its lock bits, and the modify bits
* starting at pmap_phys_start.
@@ -245,20 +258,28 @@ static pv_entry_t pv_head_table; /* array of entries, one per page */
* pv_list lock, then try to get the pmap lock, but if they can't,
* they release the pv_list lock and retry the whole operation.
*/
-
+/*
+ * We raise the interrupt level to splvm, to block interprocessor
+ * interrupts during pmap operations.
+ */
#define SPLVM(spl) { spl = splvm(); }
#define SPLX(spl) { splx(spl); }
+#define PMAP_LOCK(pmap,spl) { \
+ SPLVM(spl); \
+ simple_lock(&(pmap)->lock); \
+}
+#define PMAP_UNLOCK(pmap, spl) { \
+ simple_unlock(&(pmap)->lock); \
+ SPLX(spl); \
+}
-#define PMAP_LOCK(pmap, spl) SPLVM(spl)
-#define PMAP_UNLOCK(pmap, spl) SPLX(spl)
-
-#define PV_LOCK_TABLE_SIZE(n) 0
-#define LOCK_PVH(index)
-#define UNLOCK_PVH(index)
+#define PV_LOCK_TABLE_SIZE(n) ((vm_size_t)((n) * sizeof(struct simplelock)))
+#define LOCK_PVH(index) simple_lock(&(pv_lock_table[index]))
+#define UNLOCK_PVH(index) simple_unlock(&(pv_lock_table[index]))
#define ETHERPAGES 16
-void *etherbuf=NULL;
-int etherlen;
+void *etherbuf=NULL;
+int etherlen;
/*
* First and last physical address that we maintain any information
@@ -266,8 +287,8 @@ int etherlen;
* pmap_init won't touch any non-existent structures.
*/
-static vm_offset_t pmap_phys_start = (vm_offset_t) 0;
-static vm_offset_t pmap_phys_end = (vm_offset_t) 0;
+static vm_offset_t pmap_phys_start = (vm_offset_t) 0;
+static vm_offset_t pmap_phys_end = (vm_offset_t) 0;
#define PMAP_MANAGED(pa) (pmap_initialized && ((pa) >= pmap_phys_start && (pa) < pmap_phys_end))
@@ -277,7 +298,7 @@ static vm_offset_t pmap_phys_end = (vm_offset_t) 0;
* pmap_init initialize this.
* '90.7.17 Fuzzy
*/
-boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
+boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
/*
* Consistency checks.
@@ -290,39 +311,71 @@ boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
static void check_pv_list __P((vm_offset_t, pv_entry_t, char *));
static void check_pmap_consistency __P((char *));
-#define CHECK_PV_LIST(phys,pv_h,who) \
+ #define CHECK_PV_LIST(phys,pv_h,who) \
if (pmap_con_dbg & CD_CHKPV) check_pv_list(phys,pv_h,who)
-#define CHECK_PMAP_CONSISTENCY(who) \
+ #define CHECK_PMAP_CONSISTENCY(who) \
if (pmap_con_dbg & CD_CHKPM) check_pmap_consistency(who)
#else
-#define CHECK_PV_LIST(phys,pv_h,who)
-#define CHECK_PMAP_CONSISTENCY(who)
+ #define CHECK_PV_LIST(phys,pv_h,who)
+ #define CHECK_PMAP_CONSISTENCY(who)
#endif /* DEBUG */
/*
* number of BATC entries used
*/
-int batc_used;
+int batc_used;
/*
* keep track BATC mapping
*/
batc_entry_t batc_entry[BATC_MAX];
-int maxcmmu_pb = 4; /* max number of CMMUs per processors pbus */
-int n_cmmus_pb = 1; /* number of CMMUs per processors pbus */
-
-#define cpu_number() 0 /* just being lazy, should be taken out -nivas*/
+int maxcmmu_pb = 4; /* max number of CMMUs per processors pbus */
+int n_cmmus_pb = 1; /* number of CMMUs per processors pbus */
vm_offset_t kmapva = 0;
extern vm_offset_t bugromva;
extern vm_offset_t sramva;
extern vm_offset_t obiova;
-STATIC void
-flush_atc_entry(unsigned users, vm_offset_t va, int kernel)
+/*
+ * Rooutine: FLUSH_ATC_ENTRY
+ *
+ * Author: N. Sugai
+ *
+ * Function:
+ * Flush atc(TLB) which maps given virtual address, in the CPUs which
+ * are spcified by 'users', using the function 'flush'.
+ *
+ * Parameters:
+ * users bit paterns of the CPUs which may hold the TLB shoule be
+ * flushed
+ * va virtual address that should be flushed
+ * flush pointer to the function actually flushes the TLB
+ *
+ * Special Assumptions:
+ * (*flush)() has two arguments, 1st one specifies the CPU number,
+ * and 2nd one specifies the virtual address should be flushed.
+ *
+ */
+void
+flush_atc_entry(long users, vm_offset_t va, int kernel)
{
- cmmu_flush_remote_tlb(cpu_number(), kernel, va, M88K_PGBYTES);
+ register int cpu;
+ long tusers = users;
+
+#if 0
+ if (ff1(tusers) > 4) { /* can't be more than 4 */
+ printf("ff1 users = %d!\n", ff1(tusers));
+ panic("bogus amount of users!!!");
+ }
+#endif
+ while ((cpu = ff1(tusers)) != 32) {
+ if (cpu_sets[cpu]) { /* just checking to make sure */
+ cmmu_flush_remote_tlb(cpu, kernel, va, M88K_PGBYTES);
+ }
+ tusers &= ~(1 << cpu);
+ }
}
/*
@@ -354,65 +407,67 @@ flush_atc_entry(unsigned users, vm_offset_t va, int kernel)
void
_pmap_activate(pmap_t pmap, pcb_t pcb, int my_cpu)
{
- apr_template_t apr_data;
- int n;
+ apr_template_t apr_data;
+ int n;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ACTIVATE | CD_FULL)) == (CD_ACTIVATE | CD_NORM))
- printf("(_pmap_activate :%x) pmap 0x%x\n", curproc, (unsigned)pmap);
+ if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
+ printf("(_pmap_activate :%x) pmap 0x%x\n", curproc, (unsigned)pmap);
#endif
-
- if (pmap != kernel_pmap) {
- /*
- * Lock the pmap to put this cpu in its active set.
- */
- simple_lock(&pmap->lock);
-
- apr_data.bits = 0;
- apr_data.field.st_base = M88K_BTOP(pmap->sdt_paddr);
- apr_data.field.wt = 0;
- apr_data.field.g = 1;
- apr_data.field.ci = 0;
- apr_data.field.te = 1;
+
+ if (pmap != kernel_pmap) {
+ /*
+ * Lock the pmap to put this cpu in its active set.
+ */
+ simple_lock(&pmap->lock);
+
+ apr_data.bits = 0;
+ apr_data.field.st_base = M88K_BTOP(pmap->sdt_paddr);
+ apr_data.field.wt = 0;
+ apr_data.field.g = 1;
+ apr_data.field.ci = 0;
+ apr_data.field.te = 1;
#ifdef notyet
-#ifdef OMRON_PMAP
- /*
- * cmmu_pmap_activate will set the uapr and the batc entries, then
- * flush the *USER* TLB. IF THE KERNEL WILL EVER CARE ABOUT THE
- * BATC ENTRIES, THE SUPERVISOR TLBs SHOULB BE FLUSHED AS WELL.
- */
- cmmu_pmap_activate(my_cpu, apr_data.bits, pmap->i_batc, pmap->d_batc);
- for (n = 0; n < BATC_MAX; n++)
- *(unsigned*)&batc_entry[n] = pmap->i_batc[n].bits;
-#else
- cmmu_set_uapr(apr_data.bits);
- cmmu_flush_tlb(0, 0, -1);
-#endif
+ #ifdef OMRON_PMAP
+ /*
+ * cmmu_pmap_activate will set the uapr and the batc entries, then
+ * flush the *USER* TLB. IF THE KERNEL WILL EVER CARE ABOUT THE
+ * BATC ENTRIES, THE SUPERVISOR TLBs SHOULB BE FLUSHED AS WELL.
+ */
+ cmmu_pmap_activate(my_cpu, apr_data.bits, pmap->i_batc, pmap->d_batc);
+ for (n = 0; n < BATC_MAX; n++)
+ *(unsigned*)&batc_entry[n] = pmap->i_batc[n].bits;
+ #else
+ cmmu_set_uapr(apr_data.bits);
+ cmmu_flush_tlb(0, 0, -1);
+ #endif
#endif /* notyet */
- /*
- * I am forcing it to not program the BATC at all. pmap.c module
- * needs major, major cleanup. XXX nivas
- */
- cmmu_set_uapr(apr_data.bits);
- cmmu_flush_tlb(0, 0, -1);
+ /*
+ * I am forcing it to not program the BATC at all. pmap.c module
+ * needs major, major cleanup. XXX nivas
+ */
+ cmmu_set_uapr(apr_data.bits);
+ cmmu_flush_tlb(0, 0, -1);
+
+ /*
+ * Mark that this cpu is using the pmap.
+ */
+ SETBIT_CPUSET(my_cpu, &(pmap->cpus_using));
- /*
- * Mark that this cpu is using the pmap.
- */
- simple_unlock(&pmap->lock);
+ simple_unlock(&pmap->lock);
- } else {
+ } else {
- /*
- * kernel_pmap must be always active.
- */
+ /*
+ * kernel_pmap must be always active.
+ */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
- printf("(_pmap_activate :%x) called for kernel_pmap\n", curproc);
+ if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM))
+ printf("(_pmap_activate :%x) called for kernel_pmap\n", curproc);
#endif
- }
+ }
} /* _pmap_activate */
/*
@@ -439,9 +494,15 @@ _pmap_activate(pmap_t pmap, pcb_t pcb, int my_cpu)
void
_pmap_deactivate(pmap_t pmap, pcb_t pcb, int my_cpu)
{
- if (pmap != kernel_pmap) {
- /* Nothing to do */
- }
+ if (pmap != kernel_pmap) {
+
+ /*
+ * we expect the spl is already raised to sched level.
+ */
+ simple_lock(&pmap->lock);
+ CLRBIT_CPUSET(my_cpu, &(pmap->cpus_using));
+ simple_unlock(&pmap->lock);
+ }
}
/*
@@ -459,12 +520,12 @@ _pmap_deactivate(pmap_t pmap, pcb_t pcb, int my_cpu)
STATIC unsigned int
m88k_protection(pmap_t map, vm_prot_t prot)
{
- pte_template_t p;
+ pte_template_t p;
- p.bits = 0;
- p.pte.prot = (prot & VM_PROT_WRITE) ? 0 : 1;
+ p.bits = 0;
+ p.pte.prot = (prot & VM_PROT_WRITE) ? 0 : 1;
- return(p.bits);
+ return (p.bits);
} /* m88k_protection */
@@ -494,23 +555,23 @@ m88k_protection(pmap_t map, vm_prot_t prot)
pt_entry_t *
pmap_pte(pmap_t map, vm_offset_t virt)
{
- sdt_entry_t *sdt;
+ sdt_entry_t *sdt;
- /*XXX will this change if physical memory is not contiguous? */
- /* take a look at PDTIDX XXXnivas */
- if (map == PMAP_NULL)
- panic("pmap_pte: pmap is NULL");
+ /*XXX will this change if physical memory is not contiguous? */
+ /* take a look at PDTIDX XXXnivas */
+ if (map == PMAP_NULL)
+ panic("pmap_pte: pmap is NULL");
- sdt = SDTENT(map,virt);
+ sdt = SDTENT(map,virt);
- /*
- * Check whether page table is exist or not.
- */
- if (!SDT_VALID(sdt))
- return(PT_ENTRY_NULL);
- else
- return((pt_entry_t *)(((sdt + SDT_ENTRIES)->table_addr)<<PDT_SHIFT) +
- PDTIDX(virt));
+ /*
+ * Check whether page table is exist or not.
+ */
+ if (!SDT_VALID(sdt))
+ return (PT_ENTRY_NULL);
+ else
+ return ((pt_entry_t *)(((sdt + SDT_ENTRIES)->table_addr)<<PDT_SHIFT) +
+ PDTIDX(virt));
} /* pmap_pte */
@@ -554,36 +615,36 @@ pmap_pte(pmap_t map, vm_offset_t virt)
STATIC pt_entry_t *
pmap_expand_kmap(vm_offset_t virt, vm_prot_t prot)
{
- int aprot;
- sdt_entry_t *sdt;
- kpdt_entry_t kpdt_ent;
- pmap_t map = kernel_pmap;
+ int aprot;
+ sdt_entry_t *sdt;
+ kpdt_entry_t kpdt_ent;
+ pmap_t map = kernel_pmap;
#if DEBUG
- if ((pmap_con_dbg & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL))
- printf("(pmap_expand_kmap :%x) v %x\n", curproc,virt);
+ if ((pmap_con_dbg & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL))
+ printf("(pmap_expand_kmap :%x) v %x\n", curproc,virt);
#endif
- aprot = m88k_protection (map, prot);
+ aprot = m88k_protection (map, prot);
- /* segment table entry derivate from map and virt. */
- sdt = SDTENT(map, virt);
- if (SDT_VALID(sdt))
- panic("pmap_expand_kmap: segment table entry VALID");
+ /* segment table entry derivate from map and virt. */
+ sdt = SDTENT(map, virt);
+ if (SDT_VALID(sdt))
+ panic("pmap_expand_kmap: segment table entry VALID");
- kpdt_ent = kpdt_free;
- if (kpdt_ent == KPDT_ENTRY_NULL) {
- printf("pmap_expand_kmap: Ran out of kernel pte tables\n");
- return(PT_ENTRY_NULL);
- }
- kpdt_free = kpdt_free->next;
+ kpdt_ent = kpdt_free;
+ if (kpdt_ent == KPDT_ENTRY_NULL) {
+ printf("pmap_expand_kmap: Ran out of kernel pte tables\n");
+ return (PT_ENTRY_NULL);
+ }
+ kpdt_free = kpdt_free->next;
- ((sdt_entry_template_t *)sdt)->bits = kpdt_ent->phys | aprot | DT_VALID;
- ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = (vm_offset_t)kpdt_ent | aprot | DT_VALID;
- (unsigned)(kpdt_ent->phys) = 0;
- (unsigned)(kpdt_ent->next) = 0;
+ ((sdt_entry_template_t *)sdt)->bits = kpdt_ent->phys | aprot | DT_VALID;
+ ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = (vm_offset_t)kpdt_ent | aprot | DT_VALID;
+ (unsigned)(kpdt_ent->phys) = 0;
+ (unsigned)(kpdt_ent->next) = 0;
- return((pt_entry_t *)(kpdt_ent) + PDTIDX(virt));
+ return ((pt_entry_t *)(kpdt_ent) + PDTIDX(virt));
}/* pmap_expand_kmap() */
/*
@@ -624,57 +685,67 @@ pmap_expand_kmap(vm_offset_t virt, vm_prot_t prot)
* }
*
*/
+void m197_load_patc(int, vm_offset_t, vm_offset_t, int);
+
vm_offset_t
pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
{
- int aprot;
- unsigned npages;
- unsigned num_phys_pages;
- unsigned cmode;
- pt_entry_t *pte;
- pte_template_t template;
-
- /*
- * cache mode is passed in the top 16 bits.
- * extract it from there. And clear the top
- * 16 bits from prot.
- */
- cmode = (prot & 0xffff0000) >> 16;
- prot &= 0x0000ffff;
+ int aprot;
+ unsigned npages;
+ unsigned num_phys_pages;
+ unsigned cmode;
+ pt_entry_t *pte;
+ pte_template_t template;
+ static unsigned i = 0;
+ /*
+ * cache mode is passed in the top 16 bits.
+ * extract it from there. And clear the top
+ * 16 bits from prot.
+ */
+ cmode = (prot & 0xffff0000) >> 16;
+ prot &= 0x0000ffff;
#if DEBUG
- if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL))
- printf ("(pmap_map :%x) phys address from %x to %x mapped at virtual %x, prot %x cmode %x\n",
- curproc, start, end, virt, prot, cmode);
+ if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL))
+ printf ("(pmap_map :%x) phys address from %x to %x mapped at virtual %x, prot %x cmode %x\n",
+ curproc, start, end, virt, prot, cmode);
#endif
- if (start > end)
- panic("pmap_map: start greater than end address");
+ if (start > end)
+ panic("pmap_map: start greater than end address");
- aprot = m88k_protection (kernel_pmap, prot);
+ aprot = m88k_protection (kernel_pmap, prot);
- template.bits = M88K_TRUNC_PAGE(start) | aprot | DT_VALID | cmode;
+ template.bits = M88K_TRUNC_PAGE(start) | aprot | cmode | DT_VALID;
- npages = M88K_BTOP(M88K_ROUND_PAGE(end) - M88K_TRUNC_PAGE(start));
+ npages = M88K_BTOP(M88K_ROUND_PAGE(end) - M88K_TRUNC_PAGE(start));
- for (num_phys_pages = npages; num_phys_pages > 0; num_phys_pages--) {
+ for (num_phys_pages = npages; num_phys_pages > 0; num_phys_pages--) {
- if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
- if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
- panic ("pmap_map: Cannot allocate pte table");
+ if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
+ if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
+ panic ("pmap_map: Cannot allocate pte table");
#ifdef DEBUG
- if (pmap_con_dbg & CD_MAP)
- if (pte->dtype)
- printf("(pmap_map :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
+ if (pmap_con_dbg & CD_MAP)
+ if (pte->dtype)
+ printf("(pmap_map :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
#endif
- *pte = template.pte;
- virt += M88K_PGBYTES;
- template.bits += M88K_PGBYTES;
- }
+ *pte = template.pte;
+ /* hack for MVME197 */
+ if (cputyp == CPU_197) {
+ if (i < 32) {
+ m197_load_patc(i, virt, (vm_offset_t)template.bits, 1);
+ i++;
+ }
+ }
+
+ virt += M88K_PGBYTES;
+ template.bits += M88K_PGBYTES;
+ }
- return(virt);
+ return (virt);
} /* pmap_map() */
@@ -725,98 +796,100 @@ pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
*/
vm_offset_t
pmap_map_batc(vm_offset_t virt, vm_offset_t start, vm_offset_t end,
- vm_prot_t prot, unsigned cmode)
+ vm_prot_t prot, unsigned cmode)
{
- int aprot;
- unsigned num_phys_pages;
- vm_offset_t phys;
- pt_entry_t *pte;
- pte_template_t template;
- batc_template_t batctmp;
- register int i;
+ int aprot;
+ unsigned num_phys_pages;
+ vm_offset_t phys;
+ pt_entry_t *pte;
+ pte_template_t template;
+ batc_template_t batctmp;
+ register int i;
#if DEBUG
- if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
+ if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
printf ("(pmap_map_batc :%x) phys address from %x to %x mapped at virtual %x, prot %x\n", curproc,
- start, end, virt, prot);
+ start, end, virt, prot);
#endif
- if (start > end)
+ if (start > end)
panic("pmap_map_batc: start greater than end address");
- aprot = m88k_protection (kernel_pmap, prot);
- template.bits = M88K_TRUNC_PAGE(start) | aprot | DT_VALID | cmode;
- phys = start;
- batctmp.bits = 0;
- batctmp.field.sup = 1; /* supervisor */
- batctmp.field.wt = template.pte.wt; /* write through */
- batctmp.field.g = template.pte.g; /* global */
- batctmp.field.ci = template.pte.ci; /* cache inhibit */
- batctmp.field.wp = template.pte.prot; /* protection */
- batctmp.field.v = 1; /* valid */
+ aprot = m88k_protection (kernel_pmap, prot);
+ template.bits = M88K_TRUNC_PAGE(start) | aprot | DT_VALID | cmode;
+ phys = start;
+ batctmp.bits = 0;
+ batctmp.field.sup = 1; /* supervisor */
+ batctmp.field.wt = template.pte.wt; /* write through */
+ batctmp.field.g = template.pte.g; /* global */
+ batctmp.field.ci = template.pte.ci; /* cache inhibit */
+ batctmp.field.wp = template.pte.prot; /* protection */
+ batctmp.field.v = 1; /* valid */
- num_phys_pages = M88K_BTOP(M88K_ROUND_PAGE(end) - M88K_TRUNC_PAGE(start));
+ num_phys_pages = M88K_BTOP(M88K_ROUND_PAGE(end) - M88K_TRUNC_PAGE(start));
- while (num_phys_pages > 0) {
+ while (num_phys_pages > 0) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
- printf("(pmap_map_batc :%x) num_phys_pg=%x, virt=%x, aligne V=%d, phys=%x, aligne P=%d\n", curproc,
- num_phys_pages, virt, BATC_BLK_ALIGNED(virt), phys, BATC_BLK_ALIGNED(phys));
+ if ((pmap_con_dbg & (CD_MAPB | CD_FULL)) == (CD_MAPB | CD_FULL))
+ printf("(pmap_map_batc :%x) num_phys_pg=%x, virt=%x, aligne V=%d, phys=%x, aligne P=%d\n", curproc,
+ num_phys_pages, virt, BATC_BLK_ALIGNED(virt), phys, BATC_BLK_ALIGNED(phys));
#endif
- if ( BATC_BLK_ALIGNED(virt) && BATC_BLK_ALIGNED(phys) &&
- num_phys_pages >= BATC_BLKBYTES/M88K_PGBYTES &&
- batc_used < BATC_MAX ) {
+ if ( BATC_BLK_ALIGNED(virt) && BATC_BLK_ALIGNED(phys) &&
+ num_phys_pages >= BATC_BLKBYTES/M88K_PGBYTES &&
+ batc_used < BATC_MAX ) {
- /*
- * map by BATC
- */
- batctmp.field.lba = M88K_BTOBLK(virt);
- batctmp.field.pba = M88K_BTOBLK(phys);
+ /*
+ * map by BATC
+ */
+ batctmp.field.lba = M88K_BTOBLK(virt);
+ batctmp.field.pba = M88K_BTOBLK(phys);
- cmmu_set_pair_batc_entry(0, batc_used, batctmp.bits);
+ for ( i = 0; i < max_cpus; i++)
+ if (cpu_sets[i])
+ cmmu_set_pair_batc_entry(i, batc_used, batctmp.bits);
- batc_entry[batc_used] = batctmp.field;
+ batc_entry[batc_used] = batctmp.field;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_MAPB | CD_NORM)) == (CD_MAPB | CD_NORM)) {
- printf("(pmap_map_batc :%x) BATC used=%d, data=%x\n", curproc, batc_used, batctmp.bits);
- }
- if (pmap_con_dbg & CD_MAPB) {
-
- for (i = 0; i < BATC_BLKBYTES; i += M88K_PGBYTES ) {
- pte = pmap_pte(kernel_pmap, virt+i);
- if (pte->dtype)
- printf("(pmap_map_batc :%x) va %x is already mapped : pte %x\n", curproc, virt+i, ((pte_template_t *)pte)->bits);
- }
- }
+ if ((pmap_con_dbg & (CD_MAPB | CD_NORM)) == (CD_MAPB | CD_NORM)) {
+ printf("(pmap_map_batc :%x) BATC used=%d, data=%x\n", curproc, batc_used, batctmp.bits);
+ }
+ if (pmap_con_dbg & CD_MAPB) {
+
+ for (i = 0; i < BATC_BLKBYTES; i += M88K_PGBYTES ) {
+ pte = pmap_pte(kernel_pmap, virt+i);
+ if (pte->dtype)
+ printf("(pmap_map_batc :%x) va %x is already mapped : pte %x\n", curproc, virt+i, ((pte_template_t *)pte)->bits);
+ }
+ }
#endif
- batc_used++;
- virt += BATC_BLKBYTES;
- phys += BATC_BLKBYTES;
- template.pte.pfn = M88K_BTOP(phys);
- num_phys_pages -= BATC_BLKBYTES/M88K_PGBYTES;
- continue;
- }
- if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
- if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
- panic ("pmap_map_batc: Cannot allocate pte table");
+ batc_used++;
+ virt += BATC_BLKBYTES;
+ phys += BATC_BLKBYTES;
+ template.pte.pfn = M88K_BTOP(phys);
+ num_phys_pages -= BATC_BLKBYTES/M88K_PGBYTES;
+ continue;
+ }
+ if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
+ if ((pte = pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE)) == PT_ENTRY_NULL)
+ panic ("pmap_map_batc: Cannot allocate pte table");
#ifdef DEBUG
- if (pmap_con_dbg & CD_MAPB)
- if (pte->dtype)
- printf("(pmap_map_batc :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
+ if (pmap_con_dbg & CD_MAPB)
+ if (pte->dtype)
+ printf("(pmap_map_batc :%x) pte @ 0x%x already valid\n", curproc, (unsigned)pte);
#endif
- *pte = template.pte;
- virt += M88K_PGBYTES;
- phys += M88K_PGBYTES;
- template.bits += M88K_PGBYTES;
- num_phys_pages--;
- }
+ *pte = template.pte;
+ virt += M88K_PGBYTES;
+ phys += M88K_PGBYTES;
+ template.bits += M88K_PGBYTES;
+ num_phys_pages--;
+ }
- return(M88K_ROUND_PAGE(virt));
+ return (M88K_ROUND_PAGE(virt));
} /* pmap_map_batc() */
@@ -855,65 +928,73 @@ pmap_map_batc(vm_offset_t virt, vm_offset_t start, vm_offset_t end,
void
pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode)
{
- int spl, spl_sav;
- pt_entry_t *pte;
- vm_offset_t va;
- int kflush;
- int cpu;
- register pte_template_t opte;
+ int spl, spl_sav;
+ pt_entry_t *pte;
+ vm_offset_t va;
+ int kflush;
+ int cpu;
+ register unsigned users;
+ register pte_template_t opte;
#ifdef DEBUG
- if ( mode & CACHE_MASK ) {
- printf("(cache_ctrl) illegal mode %x\n",mode);
- return;
- }
- if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
- printf("(pmap_cache_ctrl :%x) pmap %x, va %x, mode %x\n", curproc, pmap, s, mode);
- }
+ if ( mode & CACHE_MASK ) {
+ printf("(cache_ctrl) illegal mode %x\n",mode);
+ return;
+ }
+ if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
+ printf("(pmap_cache_ctrl :%x) pmap %x, va %x, mode %x\n", curproc, pmap, s, mode);
+ }
#endif /* DEBUG */
- if ( pmap == PMAP_NULL ) {
- panic("pmap_cache_ctrl: pmap is NULL");
- }
+ if ( pmap == PMAP_NULL ) {
+ panic("pmap_cache_ctrl: pmap is NULL");
+ }
- PMAP_LOCK(pmap, spl);
+ PMAP_LOCK(pmap, spl);
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
+ /*
+ *
+ */
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
- for (va = s; va < e; va += M88K_PGBYTES) {
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
- continue;
+ for (va = s; va < e; va += M88K_PGBYTES) {
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ continue;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
- printf("(cache_ctrl) pte@0x%08x\n",(unsigned)pte);
- }
+ if ((pmap_con_dbg & (CD_CACHE | CD_NORM)) == (CD_CACHE | CD_NORM)) {
+ printf("(cache_ctrl) pte@0x%08x\n",(unsigned)pte);
+ }
#endif /* DEBUG */
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- * XXX
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- ((pte_template_t *)pte)->bits = (opte.bits & CACHE_MASK) | mode;
- flush_atc_entry(0, va, kflush);
- splx(spl_sav);
-
- /*
- * Data cache should be copied back and invalidated.
- */
- cmmu_flush_remote_cache(0, M88K_PTOB(pte->pfn), M88K_PGBYTES);
- }
-
- PMAP_UNLOCK(pmap, spl);
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ * XXX
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ ((pte_template_t *)pte)->bits = (opte.bits & CACHE_MASK) | mode;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+
+ /*
+ * Data cache should be copied back and invalidated.
+ */
+ for (cpu=0; cpu<max_cpus; cpu++)
+ if (cpu_sets[cpu])
+ /*cmmu_flush_remote_data_cache(cpu, M88K_PTOB(pte->pfn),M88K_PGBYTES);*/
+ cmmu_flush_remote_cache(cpu, M88K_PTOB(pte->pfn), M88K_PGBYTES);
-} /* pmap_cache_ctrl */
+ }
+
+ PMAP_UNLOCK(pmap, spl);
+} /* pmap_cache_ctrl */
/*
* Routine: PMAP_BOOTSTRAP
@@ -964,219 +1045,241 @@ pmap_cache_ctrl(pmap_t pmap, vm_offset_t s, vm_offset_t e, unsigned mode)
*/
void
-pmap_bootstrap(vm_offset_t load_start, /* IN */
- vm_offset_t *phys_start, /* IN/OUT */
- vm_offset_t *phys_end, /* IN */
- vm_offset_t *virt_start, /* OUT */
- vm_offset_t *virt_end) /* OUT */
+pmap_bootstrap(vm_offset_t load_start, /* IN */
+ vm_offset_t *phys_start, /* IN/OUT */
+ vm_offset_t *phys_end, /* IN */
+ vm_offset_t *virt_start, /* OUT */
+ vm_offset_t *virt_end) /* OUT */
{
- kpdt_entry_t kpdt_virt;
- sdt_entry_t *kmap;
- vm_offset_t vaddr,
- virt,
- kpdt_phys,
- s_text,
- e_text,
- kernel_pmap_size,
- etherpa;
- apr_template_t apr_data;
- pt_entry_t *pte;
- int i;
- u_long foo;
- extern char *kernelstart, *etext;
- extern void cmmu_go_virt(void);
+ kpdt_entry_t kpdt_virt;
+ sdt_entry_t *kmap;
+ vm_offset_t vaddr,
+ virt,
+ kpdt_phys,
+ s_text,
+ e_text,
+ kernel_pmap_size,
+ etherpa;
+ apr_template_t apr_data;
+ pt_entry_t *pte;
+ int i;
+ u_long foo;
+ pmap_table_t ptable;
+ extern char *kernelstart, *etext;
+ extern char *kernel_sdt;
+ extern void cmmu_go_virt(void);
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_NORM)) == (CD_BOOT | CD_NORM)) {
- printf("pmap_bootstrap : \"load_start\" 0x%x\n", load_start);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_NORM)) == (CD_BOOT | CD_NORM)) {
+ printf("pmap_bootstrap : \"load_start\" 0x%x\n", load_start);
+ }
#endif
- ptes_per_vm_page = PAGE_SIZE >> M88K_PGSHIFT;
- if (ptes_per_vm_page == 0){
- panic("pmap_bootstrap: VM page size < MACHINE page size");
- }
- if (!PAGE_ALIGNED(load_start)) {
- panic("pmap_bootstrap : \"load_start\" not on the m88k page boundary : 0x%x", load_start);
- }
-
- /*
- * Allocate the kernel page table from the front of available
- * physical memory,
- * i.e. just after where the kernel image was loaded.
- */
- /*
- * The calling sequence is
- * ...
- * pmap_bootstrap(&kernelstart,...)
- * kernelstart is the first symbol in the load image.
- * We link the kernel such that &kernelstart == 0x10000 (size of
- * BUG ROM)
- * The expression (&kernelstart - load_start) will end up as
- * 0, making *virt_start == *phys_start, giving a 1-to-1 map)
- */
+ ptes_per_vm_page = PAGE_SIZE >> M88K_PGSHIFT;
+ if (ptes_per_vm_page == 0) {
+ panic("pmap_bootstrap: VM page size < MACHINE page size");
+ }
+ if (!PAGE_ALIGNED(load_start)) {
+ panic("pmap_bootstrap : \"load_start\" not on the m88k page boundary : 0x%x", load_start);
+ }
- *phys_start = M88K_ROUND_PAGE(*phys_start);
- *virt_start = *phys_start +
- (M88K_TRUNC_PAGE((unsigned)&kernelstart) - load_start);
+ simple_lock_init(&kernel_pmap->lock);
- /*
- * Initialilze kernel_pmap structure
- */
- kernel_pmap->ref_count = 1;
- kernel_pmap->sdt_paddr = kmap = (sdt_entry_t *)(*phys_start);
- kernel_pmap->sdt_vaddr = (sdt_entry_t *)(*virt_start);
- kmapva = *virt_start;
+ /*
+ * Allocate the kernel page table from the front of available
+ * physical memory,
+ * i.e. just after where the kernel image was loaded.
+ */
+ /*
+ * The calling sequence is
+ * ...
+ * pmap_bootstrap(&kernelstart,...)
+ * kernelstart is the first symbol in the load image.
+ * We link the kernel such that &kernelstart == 0x10000 (size of
+ * BUG ROM)
+ * The expression (&kernelstart - load_start) will end up as
+ * 0, making *virt_start == *phys_start, giving a 1-to-1 map)
+ */
-#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("kernel_pmap->sdt_paddr = %x\n",kernel_pmap->sdt_paddr);
- printf("kernel_pmap->sdt_vaddr = %x\n",kernel_pmap->sdt_vaddr);
- }
- /* init double-linked list of pmap structure */
- kernel_pmap->next = kernel_pmap;
- kernel_pmap->prev = kernel_pmap;
-#endif
+ *phys_start = M88K_ROUND_PAGE(*phys_start);
+ *virt_start = *phys_start +
+ (M88K_TRUNC_PAGE((unsigned)&kernelstart) - load_start);
- /*
- * Reserve space for segment table entries.
- * One for the regular segment table and one for the shadow table
- * The shadow table keeps track of the virtual address of page
- * tables. This is used in virtual-to-physical address translation
- * functions. Remember, MMU cares only for physical addresses of
- * segment and page table addresses. For kernel page tables, we
- * really don't need this virtual stuff (since the kernel will
- * be mapped 1-to-1) but for user page tables, this is required.
- * Just to be consistent, we will maintain the shadow table for
- * kernel pmap also.
- */
+ /*
+ * Initialilze kernel_pmap structure
+ */
+ kernel_pmap->ref_count = 1;
+ kernel_pmap->cpus_using = 0;
+ kernel_pmap->sdt_paddr = kmap = (sdt_entry_t *)(*phys_start);
+ kernel_pmap->sdt_vaddr = (sdt_entry_t *)(*virt_start);
+ kmapva = *virt_start;
- kernel_pmap_size = 2*SDT_SIZE;
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("kernel_pmap->sdt_paddr = %x\n",kernel_pmap->sdt_paddr);
+ printf("kernel_pmap->sdt_vaddr = %x\n",kernel_pmap->sdt_vaddr);
+ }
+ /* init double-linked list of pmap structure */
+ kernel_pmap->next = kernel_pmap;
+ kernel_pmap->prev = kernel_pmap;
+#endif
- /* save pointers to where page table entries start in physical memory */
- kpdt_phys = (*phys_start + kernel_pmap_size);
- kpdt_virt = (kpdt_entry_t)(*virt_start + kernel_pmap_size);
- kernel_pmap_size += MAX_KERNEL_PDT_SIZE;
- *phys_start += kernel_pmap_size;
- *virt_start += kernel_pmap_size;
+ /*
+ * Reserve space for segment table entries.
+ * One for the regular segment table and one for the shadow table
+ * The shadow table keeps track of the virtual address of page
+ * tables. This is used in virtual-to-physical address translation
+ * functions. Remember, MMU cares only for physical addresses of
+ * segment and page table addresses. For kernel page tables, we
+ * really don't need this virtual stuff (since the kernel will
+ * be mapped 1-to-1) but for user page tables, this is required.
+ * Just to be consistent, we will maintain the shadow table for
+ * kernel pmap also.
+ */
- /* init all segment and page descriptor to zero */
- bzero(kernel_pmap->sdt_vaddr, kernel_pmap_size);
+ kernel_pmap_size = 2*SDT_SIZE;
+#ifdef DEBUG
+ printf("kernel segment table from 0x%x to 0x%x\n", kernel_pmap->sdt_vaddr,
+ kernel_pmap->sdt_vaddr + kernel_pmap_size);
+#endif
+ /* save pointers to where page table entries start in physical memory */
+ kpdt_phys = (*phys_start + kernel_pmap_size);
+ kpdt_virt = (kpdt_entry_t)(*virt_start + kernel_pmap_size);
+ kernel_pmap_size += MAX_KERNEL_PDT_SIZE;
+ *phys_start += kernel_pmap_size;
+ *virt_start += kernel_pmap_size;
+
+ /* init all segment and page descriptor to zero */
+ bzero(kernel_pmap->sdt_vaddr, kernel_pmap_size);
+#ifdef DEBUG
+ printf("kernel page table to 0x%x\n", kernel_pmap->sdt_vaddr + kernel_pmap_size);
+#endif
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("kpdt_phys = %x\n",kpdt_phys);
- printf("kpdt_virt = %x\n",kpdt_virt);
- printf("end of kpdt at (virt)0x%08x ; (phys)0x%08x\n",
- *virt_start,*phys_start);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("kpdt_phys = %x\n",kpdt_phys);
+ printf("kpdt_virt = %x\n",kpdt_virt);
+ printf("end of kpdt at (virt)0x%08x ; (phys)0x%08x\n",
+ *virt_start,*phys_start);
+ }
#endif
- /*
- * init the kpdt queue
- */
- kpdt_free = kpdt_virt;
- for (i = MAX_KERNEL_PDT_SIZE/PDT_SIZE; i>0; i--) {
- kpdt_virt->next = (kpdt_entry_t)((vm_offset_t)kpdt_virt + PDT_SIZE);
- kpdt_virt->phys = kpdt_phys;
- kpdt_virt = kpdt_virt->next;
- kpdt_phys += PDT_SIZE;
- }
- kpdt_virt->next = KPDT_ENTRY_NULL; /* terminate the list */
-
- /*
- * Map the kernel image into virtual space
- */
+ /*
+ * init the kpdt queue
+ */
+ kpdt_free = kpdt_virt;
+ for (i = MAX_KERNEL_PDT_SIZE/PDT_SIZE; i>0; i--) {
+ kpdt_virt->next = (kpdt_entry_t)((vm_offset_t)kpdt_virt + PDT_SIZE);
+ kpdt_virt->phys = kpdt_phys;
+ kpdt_virt = kpdt_virt->next;
+ kpdt_phys += PDT_SIZE;
+ }
+ kpdt_virt->next = KPDT_ENTRY_NULL; /* terminate the list */
- s_text = load_start; /* paddr of text */
- e_text = load_start + ((unsigned)&etext -
- M88K_TRUNC_PAGE((unsigned)&kernelstart));
- /* paddr of end of text section*/
- e_text = M88K_ROUND_PAGE(e_text);
+ /*
+ * Map the kernel image into virtual space
+ */
+
+ s_text = load_start; /* paddr of text */
+ e_text = load_start + ((unsigned)&etext -
+ M88K_TRUNC_PAGE((unsigned)&kernelstart));
+ /* paddr of end of text section*/
+ e_text = M88K_ROUND_PAGE(e_text);
#ifdef OMRON_PMAP
-#define PMAPER pmap_map
+ #define PMAPER pmap_map
#else
-#define PMAPER pmap_map_batc
+ #define PMAPER pmap_map_batc
#endif
- /* map the first 64k (BUG ROM) read only, cache inhibited (? XXX) */
- vaddr = PMAPER(
- 0,
- 0,
- 0x10000,
- (VM_PROT_WRITE | VM_PROT_READ)|(CACHE_INH <<16));
-
- assert(vaddr == M88K_TRUNC_PAGE((unsigned)&kernelstart));
-
- vaddr = PMAPER(
- (vm_offset_t)M88K_TRUNC_PAGE(((unsigned)&kernelstart)),
- s_text,
- e_text,
- VM_PROT_WRITE | VM_PROT_READ|(CACHE_GLOBAL<<16)); /* shouldn't it be RO? XXX*/
-
- vaddr = PMAPER(
- vaddr,
- e_text,
- (vm_offset_t)kmap,
- (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_GLOBAL << 16));
-
- /*
- * Map system segment & page tables - should be cache inhibited?
- * 88200 manual says that CI bit is driven on the Mbus while accessing
- * the translation tree. I don't think we need to map it CACHE_INH
- * here...
- */
+#if 1 /* defined(MVME187) || defined (MVME197) */
+ /* map the first 64k (BUG ROM) read only, cache inhibited (? XXX) */
+ if (cputyp != CPU_188) { /* != CPU_188 */
+ vaddr = PMAPER(
+ 0,
+ 0,
+ 0x10000,
+ (VM_PROT_WRITE | VM_PROT_READ)|(CACHE_INH <<16));
+ assert(vaddr == M88K_TRUNC_PAGE((unsigned)&kernelstart));
+ }
+#endif /* defined(MVME187) || defined (MVME197) */
+
+ vaddr = PMAPER(
+ (vm_offset_t)M88K_TRUNC_PAGE(((unsigned)&kernelstart)),
+ s_text,
+ e_text,
+ (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_GLOBAL<<16)); /* shouldn't it be RO? XXX*/
+
+ vaddr = PMAPER(
+ vaddr,
+ e_text,
+ (vm_offset_t)kmap,
+ (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_GLOBAL << 16));
+
+ /*
+ * Map system segment & page tables - should be cache inhibited?
+ * 88200 manual says that CI bit is driven on the Mbus while accessing
+ * the translation tree. I don't think we need to map it CACHE_INH
+ * here...
+ */
if (kmapva != vaddr) {
#ifdef DEBUG
if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("(pmap_bootstrap) correcting vaddr\n");
- }
+ printf("(pmap_bootstrap) correcting vaddr\n");
+ }
#endif
- while (vaddr < (*virt_start - kernel_pmap_size))
- vaddr = M88K_ROUND_PAGE(vaddr + 1);
+ while (vaddr < (*virt_start - kernel_pmap_size))
+ vaddr = M88K_ROUND_PAGE(vaddr + 1);
}
+ vaddr = PMAPER(
+ vaddr,
+ (vm_offset_t)kmap,
+ *phys_start,
+ (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_INH << 16));
- vaddr = PMAPER(vaddr,(vm_offset_t)kmap,*phys_start,
- (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_GLOBAL << 16));
-
if (vaddr != *virt_start) {
- #ifdef DEBUG
+#ifdef DEBUG
if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
printf("1:vaddr %x *virt_start %x *phys_start %x\n", vaddr,
- *virt_start, *phys_start);
+ *virt_start, *phys_start);
}
- #endif
+#endif
*virt_start = vaddr;
*phys_start = round_page(*phys_start);
}
-
- /*
- * Get ethernet buffer - need etherlen bytes physically contiguous.
- * 1 to 1 mapped as well???. There is actually a bug in the macros
- * used by the 1x7 ethernet driver. Remove this when that is fixed.
- * XXX -nivas
- */
- etherlen = (ETHERPAGES * NBPG);
- *phys_start = vaddr;
-
- etherbuf = (void *)vaddr;
-
- vaddr = PMAPER(vaddr,*phys_start,*phys_start + etherlen,
- (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_INH << 16));
- *virt_start += etherlen;
- *phys_start += etherlen;
-
- if (vaddr != *virt_start) {
-#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("2:vaddr %x *virt_start %x *phys_start %x\n", vaddr,
- *virt_start, *phys_start);
- }
-#endif
- *virt_start = vaddr;
- *phys_start = round_page(*phys_start);
- }
+#if defined(MVME187) || defined (MVME197)
+ /*
+ * Get ethernet buffer - need etherlen bytes physically contiguous.
+ * 1 to 1 mapped as well???. There is actually a bug in the macros
+ * used by the 1x7 ethernet driver. Remove this when that is fixed.
+ * XXX -nivas
+ */
+ if (cputyp != CPU_188) { /* != CPU_188 */
+ *phys_start = vaddr;
+ etherlen = ETHERPAGES * NBPG;
+ etherbuf = (void *)vaddr;
+
+ vaddr = PMAPER(
+ vaddr,
+ *phys_start,
+ *phys_start + etherlen,
+ (VM_PROT_WRITE|VM_PROT_READ)|(CACHE_INH << 16));
+
+ *virt_start += etherlen;
+ *phys_start += etherlen;
+
+ if (vaddr != *virt_start) {
+ #ifdef DEBUG
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("2:vaddr %x *virt_start %x *phys_start %x\n", vaddr,
+ *virt_start, *phys_start);
+ }
+ #endif
+ *virt_start = vaddr;
+ *phys_start = round_page(*phys_start);
+ }
+ }
+#endif /* defined(MVME187) || defined (MVME197) */
*virt_start = round_page(*virt_start);
*virt_end = VM_MAX_KERNEL_ADDRESS;
@@ -1186,164 +1289,149 @@ pmap_bootstrap(vm_offset_t load_start, /* IN */
*/
phys_map_vaddr1 = round_page(*virt_start);
- phys_map_vaddr2 = phys_map_vaddr1 + PAGE_SIZE;
+ phys_map_vaddr2 = phys_map_vaddr1 + PAGE_SIZE * max_cpus;
/*
* To make 1:1 mapping of virt:phys, throw away a few phys pages.
* XXX what is this? nivas
*/
-
- *phys_start += 2 * PAGE_SIZE;
- *virt_start += 2 * PAGE_SIZE;
+
+ *phys_start += 2 * PAGE_SIZE * max_cpus;
+ *virt_start += 2 * PAGE_SIZE * max_cpus;
/*
* Map all IO space 1-to-1. Ideally, I would like to not do this
* but have va for the given IO address dynamically allocated. But
- * on the 88200, 2 of the BATCs are hardwired to do map the IO space
+ * on the 88200, 2 of the BATCs are hardwired to map the IO space
* 1-to-1; I decided to map the rest of the IO space 1-to-1.
* And bug ROM & the SRAM need to be mapped 1-to-1 if we ever want to
* execute bug system calls after the MMU has been turned on.
* OBIO should be mapped cache inhibited.
*/
- PMAPER(
- BUGROM_START,
- BUGROM_START,
- BUGROM_START + BUGROM_SIZE,
- VM_PROT_WRITE|VM_PROT_READ|(CACHE_INH << 16));
-
- PMAPER(
- SRAM_START,
- SRAM_START,
- SRAM_START + SRAM_SIZE,
- VM_PROT_WRITE|VM_PROT_READ|(CACHE_GLOBAL << 16));
-
- PMAPER(
- OBIO_START,
- OBIO_START,
- OBIO_START + OBIO_SIZE,
- VM_PROT_WRITE|VM_PROT_READ|(CACHE_INH << 16));
-
-#if 0
- PMAPER(
- VMEA16,
- VMEA16,
- VMEA16 + VMEA16_SIZE,
- VM_PROT_WRITE|VM_PROT_READ|(CACHE_INH << 16));
-
- PMAPER(
- VMEA32D16,
- VMEA32D16,
- VMEA32D16 + VMEA32D16_SIZE,
- VM_PROT_WRITE|VM_PROT_READ|(CACHE_INH << 16));
-
- PMAPER(
- IOMAP_MAP_START,
- IOMAP_MAP_START,
- IOMAP_MAP_START + IOMAP_SIZE,
- VM_PROT_WRITE|VM_PROT_READ|(CACHE_INH << 16));
-#endif /* 0 */
+ ptable = pmap_table_build(avail_end); /* see pmap_table.c for details */
+#ifdef DEBUG
+ printf("pmap_bootstrap: -> pmap_table_build\n");
+#endif
+ for ( ; ptable->size != 0xffffffffU; ptable++)
+ if (ptable->size) {
+ /*
+ * size-1, 'cause pmap_map rounds up to next pagenumber
+ */
+ PMAPER(ptable->virt_start,
+ ptable->phys_start,
+ ptable->phys_start + (ptable->size - 1),
+ ptable->prot|(ptable->cacheability << 16));
+ }
- /*
- * Allocate all the submaps we need. Note that SYSMAP just allocates
- * kernel virtual address with no physical backing memory. The idea
- * is physical memory will be mapped at this va before using that va.
- * This means that if different physcal pages are going to be mapped
- * at different times, we better do a tlb flush before using it -
+ /*
+ * Allocate all the submaps we need. Note that SYSMAP just allocates
+ * kernel virtual address with no physical backing memory. The idea
+ * is physical memory will be mapped at this va before using that va.
+ * This means that if different physcal pages are going to be mapped
+ * at different times, we better do a tlb flush before using it -
* else we will be referencing the wrong page.
- */
+ */
#define SYSMAP(c, p, v, n) \
({ \
v = (c)virt; \
if ((p = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL) \
- pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE|(CACHE_GLOBAL << 16)); \
+ pmap_expand_kmap(virt, (VM_PROT_READ|VM_PROT_WRITE)|(CACHE_GLOBAL << 16)); \
virt += ((n)*NBPG); \
})
- virt = *virt_start;
+ virt = *virt_start;
- SYSMAP(caddr_t, vmpte , vmmap, 1);
- SYSMAP(struct msgbuf *, msgbufmap ,msgbufp, 1);
+ SYSMAP(caddr_t, vmpte , vmmap, 1);
+ SYSMAP(struct msgbuf *, msgbufmap ,msgbufp, 1);
- vmpte->pfn = -1;
- vmpte->dtype = DT_INVALID;
-
- *virt_start = virt;
+ vmpte->pfn = -1;
+ vmpte->dtype = DT_INVALID;
- /*
- * Set translation for UPAGES at UADDR. The idea is we want to
- * have translations set up for UADDR. Later on, the ptes for
- * for this address will be set so that kstack will refer
- * to the u area. Make sure pmap knows about this virtual
- * address by doing vm_findspace on kernel_map.
- */
+ *virt_start = virt;
- for (i = 0, virt = UADDR; i < UPAGES; i++, virt += PAGE_SIZE) {
+ /*
+ * Set translation for UPAGES at UADDR. The idea is we want to
+ * have translations set up for UADDR. Later on, the ptes for
+ * for this address will be set so that kstack will refer
+ * to the u area. Make sure pmap knows about this virtual
+ * address by doing vm_findspace on kernel_map.
+ */
+
+ for (i = 0, virt = UADDR; i < UPAGES; i++, virt += PAGE_SIZE) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("setting up mapping for Upage %d @ %x\n", i, virt);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("setting up mapping for Upage %d @ %x\n", i, virt);
+ }
#endif
- if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
- pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE|(CACHE_GLOBAL << 16));
- }
+ if ((pte = pmap_pte(kernel_pmap, virt)) == PT_ENTRY_NULL)
+ pmap_expand_kmap(virt, VM_PROT_READ|VM_PROT_WRITE|(CACHE_GLOBAL << 16));
+ }
- /*
- * Switch to using new page tables
- */
+ /*
+ * Switch to using new page tables
+ */
- apr_data.bits = 0;
- apr_data.field.st_base = M88K_BTOP(kernel_pmap->sdt_paddr);
- apr_data.field.wt = 1;
- apr_data.field.g = 1;
- apr_data.field.ci = 0;
- apr_data.field.te = 1; /* Translation enable */
+ apr_data.bits = 0;
+ apr_data.field.st_base = M88K_BTOP(kernel_pmap->sdt_paddr);
+ apr_data.field.wt = 1;
+ apr_data.field.g = 1;
+ apr_data.field.ci = 0;
+ apr_data.field.te = 1; /* Translation enable */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- void show_apr(unsigned value);
- show_apr(apr_data.bits);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ void show_apr(unsigned value);
+ show_apr(apr_data.bits);
+ }
#endif
- /* Invalidate entire kernel TLB. */
+ /* Invalidate entire kernel TLB. */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("invalidating tlb %x\n", apr_data.bits);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("invalidating tlb %x\n", apr_data.bits);
+ }
#endif
- cmmu_flush_remote_tlb(0, 1, 0, -1);
-
+ for (i = 0; i < MAX_CPUS; i++)
+ if (cpu_sets[i]) {
+ /* Invalidate entire kernel TLB. */
+ cmmu_flush_remote_tlb(i, 1, 0, -1);
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("done invalidating tlb %x\n", apr_data.bits);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("After cmmu_flush_remote_tlb()\n");
+ }
#endif
+ /* still physical */
+ /*
+ * Set valid bit to DT_INVALID so that the very first pmap_enter()
+ * on these won't barf in pmap_remove_range().
+ */
+ pte = pmap_pte(kernel_pmap, phys_map_vaddr1);
+ pte->pfn = -1;
+ pte->dtype = DT_INVALID;
+ pte = pmap_pte(kernel_pmap, phys_map_vaddr2);
+ pte->dtype = DT_INVALID;
+ pte->pfn = -1;
+ /* Load supervisor pointer to segment table. */
+ cmmu_remote_set_sapr(i, apr_data.bits);
+#ifdef DEBUG
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("After cmmu_remote_set_sapr()\n");
+ }
+#endif
+ SETBIT_CPUSET(i, &kernel_pmap->cpus_using);
+ /* Load supervisor pointer to segment table. */
+ }
- /*
- * Set valid bit to DT_INVALID so that the very first pmap_enter()
- * on these won't barf in pmap_remove_range().
- */
- pte = pmap_pte(kernel_pmap, phys_map_vaddr1);
- pte->pfn = -1;
- pte->dtype = DT_INVALID;
- pte = pmap_pte(kernel_pmap, phys_map_vaddr2);
- pte->dtype = DT_INVALID;
- pte->pfn = -1;
-
- /* still physical */
- /* Load supervisor pointer to segment table. */
- cmmu_remote_set_sapr(0, apr_data.bits);
- /* virtual now on */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("running virtual - avail_next 0x%x\n", *phys_start);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("running virtual - avail_next 0x%x\n", *phys_start);
+ }
#endif
- avail_next = *phys_start;
+ avail_next = *phys_start;
+
+ return;
- return;
-
} /* pmap_bootstrap() */
/*
@@ -1361,22 +1449,22 @@ pmap_bootstrap(vm_offset_t load_start, /* IN */
void *
pmap_bootstrap_alloc(int size)
{
- register void *mem;
-
- size = round_page(size);
- mem = (void *)virtual_avail;
- virtual_avail = pmap_map(virtual_avail, avail_start,
- avail_start + size,
- VM_PROT_READ|VM_PROT_WRITE|(CACHE_GLOBAL << 16));
- avail_start += size;
+ register void *mem;
+
+ size = round_page(size);
+ mem = (void *)virtual_avail;
+ virtual_avail = pmap_map(virtual_avail, avail_start,
+ avail_start + size,
+ VM_PROT_READ|VM_PROT_WRITE|(CACHE_GLOBAL << 16));
+ avail_start += size;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
- printf("pmap_bootstrap_alloc: size %x virtual_avail %x avail_start %x\n",
- size, virtual_avail, avail_start);
- }
+ if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) {
+ printf("pmap_bootstrap_alloc: size %x virtual_avail %x avail_start %x\n",
+ size, virtual_avail, avail_start);
+ }
#endif
- bzero((void *)mem, size);
- return (mem);
+ bzero((void *)mem, size);
+ return (mem);
}
/*
@@ -1409,7 +1497,7 @@ pmap_bootstrap_alloc(int size)
* structures, and segment tables.
*
* Last, it sets the pmap_phys_start and pmap_phys_end global
- * variables. These define the range of pages 'managed' be pmap. These
+ * variables. These define the range of pages 'managed' by pmap. These
* are pages for which pmap must maintain the PV list and the modify
* list. (All other pages are kernel-specific and are permanently
* wired.)
@@ -1424,53 +1512,66 @@ pmap_bootstrap_alloc(int size)
void
pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
{
- register long npages;
- register vm_offset_t addr;
- register vm_size_t s;
- register int i;
- vm_size_t pvl_table_size;
+ register long npages;
+ register vm_offset_t addr;
+ register vm_size_t s;
+ register int i;
+ vm_size_t pvl_table_size;
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_INIT | CD_NORM)) == (CD_INIT | CD_NORM))
- printf("(pmap_init) phys_start %x phys_end %x\n", phys_start, phys_end);
+ if ((pmap_con_dbg & (CD_INIT | CD_NORM)) == (CD_INIT | CD_NORM))
+ printf("(pmap_init) phys_start %x phys_end %x\n", phys_start, phys_end);
#endif
- /*
- * Allocate memory for the pv_head_table,
- * the modify bit array, and the pte_page table.
- */
- npages = atop(phys_end - phys_start);
- pvl_table_size = PV_LOCK_TABLE_SIZE(npages);
- s = (vm_size_t)(npages * sizeof(struct pv_entry) /* pv_list */
- + npages); /* pmap_modify_list */
+ /*
+ * Allocate memory for the pv_head_table and its lock bits,
+ * the modify bit array, and the pte_page table.
+ */
+ npages = atop(phys_end - phys_start);
+ pvl_table_size = PV_LOCK_TABLE_SIZE(npages);
+ s = (vm_size_t)(npages * sizeof(struct pv_entry) /* pv_list */
+ + pvl_table_size /* pv_lock_table */
+ + npages); /* pmap_modify_list */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) {
- printf("(pmap_init) nbr of managed pages = %x\n", npages);
- printf("(pmap_init) size of pv_list = %x\n",
- npages * sizeof(struct pv_entry));
- }
+ if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) {
+ printf("(pmap_init) nbr of managed pages = %x\n", npages);
+ printf("(pmap_init) size of pv_list = %x\n",
+ npages * sizeof(struct pv_entry));
+ }
#endif
- s = round_page(s);
- addr = (vm_offset_t)kmem_alloc(kernel_map, s);
+ s = round_page(s);
+ addr = (vm_offset_t)kmem_alloc(kernel_map, s);
- pv_head_table = (pv_entry_t)addr;
- addr = (vm_offset_t)(pv_head_table + npages);
+ pv_head_table = (pv_entry_t)addr;
+ addr = (vm_offset_t)(pv_head_table + npages);
- pmap_modify_list = (char *)addr;
+ /*
+ * Assume that 'simple_lock' is used to lock pv_lock_table
+ */
+ pv_lock_table = (struct simplelock *)addr; /* XXX */
+ addr = (vm_offset_t)pv_lock_table + pvl_table_size;
- /*
+ pmap_modify_list = (char *)addr;
+
+ /*
+ * Initialize pv_lock_table
+ */
+ for (i = 0; i < npages; i++)
+ simple_lock_init(&(pv_lock_table[i]));
+
+ /*
* Only now, when all of the data structures are allocated,
* can we set pmap_phys_start and pmap_phys_end. If we set them
* too soon, the kmem_alloc above will blow up when it causes
* a call to pmap_enter, and pmap_enter tries to manipulate the
* (not yet existing) pv_list.
*/
- pmap_phys_start = phys_start;
- pmap_phys_end = phys_end;
+ pmap_phys_start = phys_start;
+ pmap_phys_end = phys_end;
- pmap_initialized = TRUE;
+ pmap_initialized = TRUE;
} /* pmap_init() */
@@ -1514,32 +1615,31 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
void
pmap_zero_page(vm_offset_t phys)
{
- vm_offset_t srcva;
- pte_template_t template;
- unsigned int i;
- unsigned int spl_sav;
-
- register int my_cpu = cpu_number();
- pt_entry_t *srcpte;
-
- srcva = (vm_offset_t)(phys_map_vaddr1 + (my_cpu * PAGE_SIZE));
- srcpte = pmap_pte(kernel_pmap, srcva);
-
- for (i = 0; i < ptes_per_vm_page; i++, phys += M88K_PGBYTES)
- {
- template.bits = M88K_TRUNC_PAGE(phys)
- | m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE)
- | DT_VALID | CACHE_GLOBAL;
-
-
- spl_sav = splimp();
- cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
- *srcpte = template.pte;
- splx(spl_sav);
- bzero (srcva, M88K_PGBYTES);
- /* force the data out */
- cmmu_flush_remote_data_cache(my_cpu,phys, M88K_PGBYTES);
- }
+ vm_offset_t srcva;
+ pte_template_t template ;
+ unsigned int i;
+ unsigned int spl_sav;
+ int my_cpu;
+ pt_entry_t *srcpte;
+
+ my_cpu = cpu_number();
+ srcva = (vm_offset_t)(phys_map_vaddr1 + (my_cpu * PAGE_SIZE));
+ srcpte = pmap_pte(kernel_pmap, srcva);
+
+ for (i = 0; i < ptes_per_vm_page; i++, phys += M88K_PGBYTES) {
+ template.bits = M88K_TRUNC_PAGE(phys)
+ | m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE)
+ | DT_VALID | CACHE_GLOBAL;
+
+
+ spl_sav = splimp();
+ cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
+ *srcpte = template.pte;
+ splx(spl_sav);
+ bzero (srcva, M88K_PGBYTES);
+ /* force the data out */
+ cmmu_flush_remote_data_cache(my_cpu,phys, M88K_PGBYTES);
+ }
} /* pmap_zero_page() */
@@ -1574,113 +1674,115 @@ pmap_zero_page(vm_offset_t phys)
pmap_t
pmap_create(vm_size_t size)
{
- pmap_t p;
+ pmap_t p;
- /*
- * A software use-only map doesn't even need a map.
- */
- if (size != 0)
- return(PMAP_NULL);
+ /*
+ * A software use-only map doesn't even need a map.
+ */
+ if (size != 0)
+ return (PMAP_NULL);
- CHECK_PMAP_CONSISTENCY("pmap_create");
+ CHECK_PMAP_CONSISTENCY("pmap_create");
- p = (pmap_t)malloc(sizeof(*p), M_VMPMAP, M_WAITOK);
- if (p == PMAP_NULL) {
- panic("pmap_create: cannot allocate a pmap");
- }
+ p = (pmap_t)malloc(sizeof(*p), M_VMPMAP, M_WAITOK);
+ if (p == PMAP_NULL) {
+ panic("pmap_create: cannot allocate a pmap");
+ }
- bzero(p, sizeof(*p));
- pmap_pinit(p);
- return(p);
+ bzero(p, sizeof(*p));
+ pmap_pinit(p);
+ return (p);
} /* pmap_create() */
void
pmap_pinit(pmap_t p)
{
- pmap_statistics_t stats;
- sdt_entry_t *segdt;
- int i;
+ pmap_statistics_t stats;
+ sdt_entry_t *segdt;
+ int i;
- /*
- * Allocate memory for *actual* segment table and *shadow* table.
- */
- segdt = (sdt_entry_t *)kmem_alloc(kernel_map, 2 * SDT_SIZE);
- if (segdt == NULL)
- panic("pmap_create: kmem_alloc failure");
+ /*
+ * Allocate memory for *actual* segment table and *shadow* table.
+ */
+ segdt = (sdt_entry_t *)kmem_alloc(kernel_map, 2 * SDT_SIZE);
+ if (segdt == NULL)
+ panic("pmap_create: kmem_alloc failure");
#if 0
- /* maybe, we can use bzero to zero out the segdt. XXX nivas */
- bzero(segdt, 2 * SDT_SIZE); */
+ /* maybe, we can use bzero to zero out the segdt. XXX nivas */
+ bzero(segdt, 2 * SDT_SIZE);
#endif /* 0 */
- /* use pmap zero page to zero it out */
- pmap_zero_page(pmap_extract(kernel_pmap,(vm_offset_t)segdt));
- if (PAGE_SIZE == SDT_SIZE) /* only got half */
+ /* use pmap zero page to zero it out */
+ pmap_zero_page(pmap_extract(kernel_pmap,(vm_offset_t)segdt));
+ if (PAGE_SIZE == SDT_SIZE) /* only got half */
pmap_zero_page(pmap_extract(kernel_pmap,(vm_offset_t)segdt+PAGE_SIZE));
- if (PAGE_SIZE < 2*SDT_SIZE) /* get remainder */
+ if (PAGE_SIZE < 2*SDT_SIZE) /* get remainder */
bzero((vm_offset_t)segdt+PAGE_SIZE, (2*SDT_SIZE)-PAGE_SIZE);
- /*
- * Initialize pointer to segment table both virtual and physical.
- */
- p->sdt_vaddr = segdt;
- p->sdt_paddr = (sdt_entry_t *)pmap_extract(kernel_pmap,(vm_offset_t)segdt);
+ /*
+ * Initialize pointer to segment table both virtual and physical.
+ */
+ p->sdt_vaddr = segdt;
+ p->sdt_paddr = (sdt_entry_t *)pmap_extract(kernel_pmap,(vm_offset_t)segdt);
- if (!PAGE_ALIGNED(p->sdt_paddr)) {
- printf("pmap_create: std table = %x\n",(int)p->sdt_paddr);
- panic("pmap_create: sdt_table not aligned on page boundary");
- }
+ if (!PAGE_ALIGNED(p->sdt_paddr)) {
+ printf("pmap_create: std table = %x\n",(int)p->sdt_paddr);
+ panic("pmap_create: sdt_table not aligned on page boundary");
+ }
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_CREAT | CD_NORM)) == (CD_CREAT | CD_NORM)) {
- printf("(pmap_create :%x) pmap=0x%x, sdt_vaddr=0x%x, sdt_paddr=0x%x\n",
- curproc, (unsigned)p, p->sdt_vaddr, p->sdt_paddr);
- }
+ if ((pmap_con_dbg & (CD_CREAT | CD_NORM)) == (CD_CREAT | CD_NORM)) {
+ printf("(pmap_create :%x) pmap=0x%x, sdt_vaddr=0x%x, sdt_paddr=0x%x\n",
+ curproc, (unsigned)p, p->sdt_vaddr, p->sdt_paddr);
+ }
#endif
#if notneeded
- /*
- * memory for page tables should be CACHE DISABLED?
- */
- pmap_cache_ctrl(kernel_pmap,
- (vm_offset_t)segdt,
- (vm_offset_t)segdt+SDT_SIZE,
- CACHE_INH);
+ /*
+ * memory for page tables should be CACHE DISABLED?
+ */
+ pmap_cache_ctrl(kernel_pmap,
+ (vm_offset_t)segdt,
+ (vm_offset_t)segdt+ (SDT_SIZE*2),
+ CACHE_INH);
#endif
- /*
- * Initalize SDT_ENTRIES.
- */
- /*
- * There is no need to clear segment table, since kmem_alloc would
- * provides us clean pages.
- */
+ /*
+ * Initalize SDT_ENTRIES.
+ */
+ /*
+ * There is no need to clear segment table, since kmem_alloc would
+ * provides us clean pages.
+ */
- /*
- * Initialize pmap structure.
- */
- p->ref_count = 1;
+ /*
+ * Initialize pmap structure.
+ */
+ p->ref_count = 1;
+ simple_lock_init(&p->lock);
+ p->cpus_using = 0;
#ifdef OMRON_PMAP
- /* initialize block address translation cache */
- for (i = 0; i < BATC_MAX; i++) {
- p->i_batc[i].bits = 0;
- p->d_batc[i].bits = 0;
- }
+ /* initialize block address translation cache */
+ for (i = 0; i < BATC_MAX; i++) {
+ p->i_batc[i].bits = 0;
+ p->d_batc[i].bits = 0;
+ }
#endif
- /*
- * Initialize statistics.
- */
- stats = &p->stats;
- stats->resident_count = 0;
- stats->wired_count = 0;
+ /*
+ * Initialize statistics.
+ */
+ stats = &p->stats;
+ stats->resident_count = 0;
+ stats->wired_count = 0;
#ifdef DEBUG
- /* link into list of pmaps, just after kernel pmap */
- p->next = kernel_pmap->next;
- p->prev = kernel_pmap;
- kernel_pmap->next = p;
- p->next->prev = p;
+ /* link into list of pmaps, just after kernel pmap */
+ p->next = kernel_pmap->next;
+ p->prev = kernel_pmap;
+ kernel_pmap->next = p;
+ p->next->prev = p;
#endif
} /* pmap_pinit() */
@@ -1713,49 +1815,48 @@ pmap_pinit(pmap_t p)
STATIC void
pmap_free_tables(pmap_t pmap)
{
- unsigned long sdt_va; /* outer loop index */
- sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
- pt_entry_t *gdttbl; /* ptr to first entry in a page table */
- unsigned int i,j;
+ unsigned long sdt_va; /* outer loop index */
+ sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
+ pt_entry_t *gdttbl; /* ptr to first entry in a page table */
+ unsigned int i,j;
#if DEBUG
- if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM))
- printf("(pmap_free_tables :%x) pmap %x\n", curproc, pmap);
+ if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM))
+ printf("(pmap_free_tables :%x) pmap %x\n", curproc, pmap);
#endif
- sdttbl = pmap->sdt_vaddr; /* addr of segment table */
+ sdttbl = pmap->sdt_vaddr; /* addr of segment table */
- /*
- This contortion is here instead of the natural loop
- because of integer overflow/wraparound if VM_MAX_USER_ADDRESS is near 0xffffffff
- */
+ /*
+ This contortion is here instead of the natural loop
+ because of integer overflow/wraparound if VM_MAX_USER_ADDRESS is near 0xffffffff
+ */
- i = VM_MIN_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
- j = VM_MAX_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
- if ( j < 1024 ) j++;
+ i = VM_MIN_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ j = VM_MAX_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ if ( j < 1024 ) j++;
- /* Segment table Loop */
- for ( ; i < j; i++)
- {
- sdt_va = PDT_TABLE_GROUP_VA_SPACE*i;
- if ((gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va)) != PT_ENTRY_NULL) {
+ /* Segment table Loop */
+ for ( ; i < j; i++) {
+ sdt_va = PDT_TABLE_GROUP_VA_SPACE*i;
+ if ((gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va)) != PT_ENTRY_NULL) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
- printf("(pmap_free_tables :%x) free page table = 0x%x\n", curproc, gdttbl);
+ if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
+ printf("(pmap_free_tables :%x) free page table = 0x%x\n", curproc, gdttbl);
#endif
- PT_FREE(gdttbl);
- }
+ PT_FREE(gdttbl);
+ }
- } /* Segment Loop */
+ } /* Segment Loop */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
+ if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
printf("(pmap_free_tables :%x) free segment table = 0x%x\n", curproc, sdttbl);
#endif
- /*
- * Freeing both *actual* and *shadow* segment tables
- */
- kmem_free(kernel_map, (vm_offset_t)sdttbl, 2*SDT_SIZE);
+ /*
+ * Freeing both *actual* and *shadow* segment tables
+ */
+ kmem_free(kernel_map, (vm_offset_t)sdttbl, 2*SDT_SIZE);
} /* pmap_free_tables() */
@@ -1763,13 +1864,13 @@ pmap_free_tables(pmap_t pmap)
void
pmap_release(register pmap_t p)
{
- pmap_free_tables(p);
+ pmap_free_tables(p);
#ifdef DBG
- DEBUG ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
- printf("(pmap_destroy :%x) ref_count = 0\n", curproc);
- /* unlink from list of pmap structs */
- p->prev->next = p->next;
- p->next->prev = p->prev;
+ DEBUG ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
+ printf("(pmap_destroy :%x) ref_count = 0\n", curproc);
+ /* unlink from list of pmap structs */
+ p->prev->next = p->next;
+ p->next->prev = p->prev;
#endif
}
@@ -1801,30 +1902,30 @@ pmap_release(register pmap_t p)
void
pmap_destroy(pmap_t p)
{
- register int c, s;
+ register int c, s;
- if (p == PMAP_NULL) {
+ if (p == PMAP_NULL) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
- printf("(pmap_destroy :%x) pmap is NULL\n", curproc);
+ if ((pmap_con_dbg & (CD_DESTR | CD_NORM)) == (CD_DESTR | CD_NORM))
+ printf("(pmap_destroy :%x) pmap is NULL\n", curproc);
#endif
- return;
- }
+ return;
+ }
- if (p == kernel_pmap) {
- panic("pmap_destroy: Attempt to destroy kernel pmap");
- }
+ if (p == kernel_pmap) {
+ panic("pmap_destroy: Attempt to destroy kernel pmap");
+ }
- CHECK_PMAP_CONSISTENCY("pmap_destroy");
+ CHECK_PMAP_CONSISTENCY("pmap_destroy");
- PMAP_LOCK(p, s);
- c = --p->ref_count;
- PMAP_UNLOCK(p, s);
+ PMAP_LOCK(p, s);
+ c = --p->ref_count;
+ PMAP_UNLOCK(p, s);
- if (c == 0) {
- pmap_release(p);
- free((caddr_t)p,M_VMPMAP);
- }
+ if (c == 0) {
+ pmap_release(p);
+ free((caddr_t)p,M_VMPMAP);
+ }
} /* pmap_destroy() */
@@ -1847,13 +1948,13 @@ pmap_destroy(pmap_t p)
void
pmap_reference(pmap_t p)
{
- int s;
+ int s;
- if (p != PMAP_NULL) {
- PMAP_LOCK(p, s);
- p->ref_count++;
- PMAP_UNLOCK(p, s);
- }
+ if (p != PMAP_NULL) {
+ PMAP_LOCK(p, s);
+ p->ref_count++;
+ PMAP_UNLOCK(p, s);
+ }
} /* pmap_reference */
@@ -1916,151 +2017,154 @@ pmap_reference(pmap_t p)
STATIC void
pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
{
- int pfi;
- int pfn;
- int num_removed = 0,
- num_unwired = 0;
- register int i;
- pt_entry_t *pte;
- pv_entry_t prev, cur;
- pv_entry_t pvl;
- vm_offset_t pa, va, tva;
- register unsigned users;
- register pte_template_t opte;
- int kflush;
-
- if (e < s)
+ int pfi;
+ int pfn;
+ int num_removed = 0,
+ num_unwired = 0;
+ register int i;
+ pt_entry_t *pte;
+ pv_entry_t prev, cur;
+ pv_entry_t pvl;
+ vm_offset_t pa, va, tva;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (e < s)
panic("pmap_remove_range: end < start");
- /*
- * Pmap has been locked by pmap_remove.
- */
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- /*
- * Loop through the range in vm_page_size increments.
- * Do not assume that either start or end fail on any
- * kind of page boundary (though this may be true!?).
- */
+ /*
+ * Pmap has been locked by pmap_remove.
+ */
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
- CHECK_PAGE_ALIGN(s, "pmap_remove_range - start addr");
-
- for (va = s; va < e; va += PAGE_SIZE) {
-
- sdt_entry_t *sdt;
-
- sdt = SDTENT(pmap,va);
-
- if (!SDT_VALID(sdt)) {
- va &= SDT_MASK; /* align to segment */
- if (va <= e - (1<<SDT_SHIFT))
- va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
- else /* wrap around */
- break;
- continue;
- }
-
- pte = pmap_pte(pmap,va);
-
- if (!PDT_VALID(pte)) {
- continue; /* no page mapping */
- }
-
- num_removed++;
-
- if (pte->wired)
- num_unwired++;
-
- pfn = pte->pfn;
- pa = M88K_PTOB(pfn);
-
- if (PMAP_MANAGED(pa)) {
- pfi = PFIDX(pa);
- /*
- * Remove the mapping from the pvlist for
- * this physical page.
- */
- pvl = PFIDX_TO_PVH(pfi);
- CHECK_PV_LIST(pa, pvl, "pmap_remove_range before");
-
- if (pvl->pmap == PMAP_NULL)
- panic("pmap_remove: null pv_list");
-
- if (pvl->va == va && pvl->pmap == pmap) {
-
- /*
- * Hander is the pv_entry. Copy the next one
- * to hander and free the next one (we can't
- * free the hander)
- */
- cur = pvl->next;
- if (cur != PV_ENTRY_NULL) {
- *pvl = *cur;
- free((caddr_t)cur, M_VMPVENT);
- } else {
- pvl->pmap = PMAP_NULL;
- }
-
- } else {
-
- for (prev = pvl; (cur = prev->next) != PV_ENTRY_NULL; prev = cur) {
- if (cur->va == va && cur->pmap == pmap) {
- break;
- }
- }
- if (cur == PV_ENTRY_NULL) {
- printf("pmap_remove_range: looking for VA "
- "0x%x (pa 0x%x) PV list at 0x%x\n", va, pa, (unsigned)pvl);
- panic("pmap_remove_range: mapping not in pv_list");
- }
-
- prev->next = cur->next;
- free((caddr_t)cur, M_VMPVENT);
- }
-
- CHECK_PV_LIST(pa, pvl, "pmap_remove_range after");
-
- } /* if PAGE_MANAGED */
-
- /*
- * For each pte in vm_page (NOTE: vm_page, not
- * M88K (machine dependent) page !! ), reflect
- * modify bits to pager and zero (invalidate,
- * remove) the pte entry.
- */
- tva = va;
- for (i = ptes_per_vm_page; i > 0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- opte.bits = invalidate_pte(pte);
- flush_atc_entry(0, tva, kflush);
-
- if (opte.pte.modified) {
- if (IS_VM_PHYSADDR(pa)) {
- vm_page_set_modified(PHYS_TO_VM_PAGE(opte.bits & M88K_PGMASK));
- }
- /* keep track ourselves too */
- if (PMAP_MANAGED(pa))
- pmap_modify_list[pfi] = 1;
- }
- pte++;
- tva += M88K_PGBYTES;
- }
-
- } /* end for ( va = s; ...) */
-
- /*
- * Update the counts
- */
- pmap->stats.resident_count -= num_removed;
- pmap->stats.wired_count -= num_unwired;
+ /*
+ * Loop through the range in vm_page_size increments.
+ * Do not assume that either start or end fail on any
+ * kind of page boundary (though this may be true!?).
+ */
+
+ CHECK_PAGE_ALIGN(s, "pmap_remove_range - start addr");
+
+ for (va = s; va < e; va += PAGE_SIZE) {
+
+ sdt_entry_t *sdt;
+
+ sdt = SDTENT(pmap,va);
+
+ if (!SDT_VALID(sdt)) {
+ va &= SDT_MASK; /* align to segment */
+ if (va <= e - (1<<SDT_SHIFT))
+ va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
+ else /* wrap around */
+ break;
+ continue;
+ }
+
+ pte = pmap_pte(pmap,va);
+
+ if (!PDT_VALID(pte)) {
+ continue; /* no page mapping */
+ }
+
+ num_removed++;
+
+ if (pte->wired)
+ num_unwired++;
+
+ pfn = pte->pfn;
+ pa = M88K_PTOB(pfn);
+
+ if (PMAP_MANAGED(pa)) {
+ pfi = PFIDX(pa);
+ LOCK_PVH(pfi);
+ /*
+ * Remove the mapping from the pvlist for
+ * this physical page.
+ */
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST(pa, pvl, "pmap_remove_range before");
+
+ if (pvl->pmap == PMAP_NULL)
+ panic("pmap_remove_range: null pv_list");
+
+ if (pvl->va == va && pvl->pmap == pmap) {
+
+ /*
+ * Hander is the pv_entry. Copy the next one
+ * to hander and free the next one (we can't
+ * free the hander)
+ */
+ cur = pvl->next;
+ if (cur != PV_ENTRY_NULL) {
+ *pvl = *cur;
+ free((caddr_t)cur, M_VMPVENT);
+ } else {
+ pvl->pmap = PMAP_NULL;
+ }
+
+ } else {
+
+ for (prev = pvl; (cur = prev->next) != PV_ENTRY_NULL; prev = cur) {
+ if (cur->va == va && cur->pmap == pmap) {
+ break;
+ }
+ }
+ if (cur == PV_ENTRY_NULL) {
+ printf("pmap_remove_range: looking for VA "
+ "0x%x (pa 0x%x) PV list at 0x%x\n", va, pa, (unsigned)pvl);
+ panic("pmap_remove_range: mapping not in pv_list");
+ }
+
+ prev->next = cur->next;
+ free((caddr_t)cur, M_VMPVENT);
+ }
+
+ CHECK_PV_LIST(pa, pvl, "pmap_remove_range after");
+ UNLOCK_PVH(pfi);
+
+ } /* if PAGE_MANAGED */
+
+ /*
+ * For each pte in vm_page (NOTE: vm_page, not
+ * M88K (machine dependent) page !! ), reflect
+ * modify bits to pager and zero (invalidate,
+ * remove) the pte entry.
+ */
+ tva = va;
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ opte.bits = invalidate_pte(pte);
+ flush_atc_entry(users, tva, kflush);
+
+ if (opte.pte.modified) {
+ if (IS_VM_PHYSADDR(pa)) {
+ vm_page_set_modified(PHYS_TO_VM_PAGE(opte.bits & M88K_PGMASK));
+ }
+ /* keep track ourselves too */
+ if (PMAP_MANAGED(pa))
+ pmap_modify_list[pfi] = 1;
+ }
+ pte++;
+ tva += M88K_PGBYTES;
+ }
+
+ } /* end for ( va = s; ...) */
+
+ /*
+ * Update the counts
+ */
+ pmap->stats.resident_count -= num_removed;
+ pmap->stats.wired_count -= num_unwired;
} /* pmap_remove_range */
@@ -2089,23 +2193,25 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e)
void
pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e)
{
- int spl;
+ int spl;
- if (map == PMAP_NULL) {
- return;
- }
+ if (map == PMAP_NULL) {
+ return;
+ }
#if DEBUG
- if ((pmap_con_dbg & (CD_RM | CD_NORM)) == (CD_RM | CD_NORM))
- printf("(pmap_remove :%x) map %x s %x e %x\n", curproc, map, s, e);
+ if ((pmap_con_dbg & (CD_RM | CD_NORM)) == (CD_RM | CD_NORM))
+ printf("(pmap_remove :%x) map %x s %x e %x\n", curproc, map, s, e);
#endif
- CHECK_PAGE_ALIGN(s, "pmap_remove start addr");
+ CHECK_PAGE_ALIGN(s, "pmap_remove start addr");
- if (s>e)
- panic("pmap_remove: start greater than end address");
+ if (s>e)
+ panic("pmap_remove: start greater than end address");
- pmap_remove_range(map, s, e);
+ PMAP_LOCK(map, spl);
+ pmap_remove_range(map, s, e);
+ PMAP_UNLOCK(map, spl);
} /* pmap_remove() */
@@ -2154,110 +2260,117 @@ pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e)
void
pmap_remove_all(vm_offset_t phys)
{
- pv_entry_t pvl, cur;
- register pt_entry_t *pte;
- int pfi;
- register int i;
- register vm_offset_t va;
- register pmap_t pmap;
- int spl;
- int dbgcnt = 0;
- register unsigned users;
- register pte_template_t opte;
- int kflush;
-
- if (!PMAP_MANAGED(phys)) {
- /* not a managed page. */
+ pv_entry_t pvl, cur;
+ register pt_entry_t *pte;
+ int pfi;
+ register int i;
+ register vm_offset_t va;
+ register pmap_t pmap;
+ int spl;
+ int dbgcnt = 0;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
+ /* not a managed page. */
#ifdef DEBUG
- if (pmap_con_dbg & CD_RMAL)
- printf("(pmap_remove_all :%x) phys addr 0x%x not a managed page\n", curproc, phys);
+ if (pmap_con_dbg & CD_RMAL)
+ printf("(pmap_remove_all :%x) phys addr 0x%x not a managed page\n", curproc, phys);
#endif
- return;
- }
+ return;
+ }
- SPLVM(spl);
+ SPLVM(spl);
- /*
- * Walk down PV list, removing all mappings.
- * We have to do the same work as in pmap_remove_pte_page
- * since that routine locks the pv_head. We don't have
- * to lock the pv_head, since we have the entire pmap system.
- */
-remove_all_Retry:
+ /*
+ * Walk down PV list, removing all mappings.
+ * We have to do the same work as in pmap_remove_pte_page
+ * since that routine locks the pv_head. We don't have
+ * to lock the pv_head, since we have the entire pmap system.
+ */
+ remove_all_Retry:
- pfi = PFIDX(phys);
- pvl = PFIDX_TO_PVH(pfi);
- CHECK_PV_LIST(phys, pvl, "pmap_remove_all before");
+ pfi = PFIDX(phys);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST(phys, pvl, "pmap_remove_all before");
+ LOCK_PVH(pfi);
- /*
- * Loop for each entry on the pv list
- */
- while ((pmap = pvl->pmap) != PMAP_NULL) {
- va = pvl->va;
- users = 0;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- pte = pmap_pte(pmap, va);
-
- /*
- * Do a few consistency checks to make sure
- * the PV list and the pmap are in synch.
- */
- if (pte == PT_ENTRY_NULL) {
- printf("(pmap_remove_all :%x) phys %x pmap %x va %x dbgcnt %x\n",
- (unsigned)curproc, phys, (unsigned)pmap, va, dbgcnt);
- panic("pmap_remove_all: pte NULL");
- }
- if (!PDT_VALID(pte))
- panic("pmap_remove_all: pte invalid");
- if (M88K_PTOB(pte->pfn) != phys)
- panic("pmap_remove_all: pte doesn't point to page");
- if (pte->wired)
- panic("pmap_remove_all: removing a wired page");
-
- pmap->stats.resident_count--;
-
- if ((cur = pvl->next) != PV_ENTRY_NULL) {
- *pvl = *cur;
- free((caddr_t)cur, M_VMPVENT);
- }
- else
- pvl->pmap = PMAP_NULL;
-
- /*
- * Reflect modified pages to pager.
- */
- for (i = ptes_per_vm_page; i>0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- opte.bits = invalidate_pte(pte);
- flush_atc_entry(users, va, kflush);
-
- if (opte.pte.modified) {
- vm_page_set_modified((vm_page_t)PHYS_TO_VM_PAGE(phys));
- /* keep track ourselves too */
- pmap_modify_list[pfi] = 1;
- }
- pte++;
- va += M88K_PGBYTES;
- }
-
- /*
- * Do not free any page tables,
- * leaves that for when VM calls pmap_collect().
- */
- dbgcnt++;
- }
- CHECK_PV_LIST(phys, pvl, "pmap_remove_all after");
-
- SPLX(spl);
+ /*
+ * Loop for each entry on the pv list
+ */
+ while ((pmap = pvl->pmap) != PMAP_NULL) {
+ va = pvl->va;
+ if (!simple_lock_try(&pmap->lock)) {
+ UNLOCK_PVH(pfi);
+ goto remove_all_Retry;
+ }
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Do a few consistency checks to make sure
+ * the PV list and the pmap are in synch.
+ */
+ if (pte == PT_ENTRY_NULL) {
+ printf("(pmap_remove_all :%x) phys %x pmap %x va %x dbgcnt %x\n",
+ (unsigned)curproc, phys, (unsigned)pmap, va, dbgcnt);
+ panic("pmap_remove_all: pte NULL");
+ }
+ if (!PDT_VALID(pte))
+ panic("pmap_remove_all: pte invalid");
+ if (M88K_PTOB(pte->pfn) != phys)
+ panic("pmap_remove_all: pte doesn't point to page");
+ if (pte->wired)
+ panic("pmap_remove_all: removing a wired page");
+
+ pmap->stats.resident_count--;
+
+ if ((cur = pvl->next) != PV_ENTRY_NULL) {
+ *pvl = *cur;
+ free((caddr_t)cur, M_VMPVENT);
+ } else
+ pvl->pmap = PMAP_NULL;
+
+ /*
+ * Reflect modified pages to pager.
+ */
+ for (i = ptes_per_vm_page; i>0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ opte.bits = invalidate_pte(pte);
+ flush_atc_entry(users, va, kflush);
+
+ if (opte.pte.modified) {
+ vm_page_set_modified((vm_page_t)PHYS_TO_VM_PAGE(phys));
+ /* keep track ourselves too */
+ pmap_modify_list[pfi] = 1;
+ }
+ pte++;
+ va += M88K_PGBYTES;
+ }
+
+ /*
+ * Do not free any page tables,
+ * leaves that for when VM calls pmap_collect().
+ */
+ simple_unlock(&pmap->lock);
+ dbgcnt++;
+ }
+ CHECK_PV_LIST(phys, pvl, "pmap_remove_all after");
+
+ UNLOCK_PVH(pfi);
+ SPLX(spl);
} /* pmap_remove_all() */
@@ -2290,93 +2403,104 @@ remove_all_Retry:
STATIC void
pmap_copy_on_write(vm_offset_t phys)
{
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
- register int i;
- int spl, spl_sav;
- register unsigned users;
- register pte_template_t opte;
- int kflush;
-
- if (!PMAP_MANAGED(phys)) {
+ register pv_entry_t pv_e;
+ register pt_entry_t *pte;
+ register int i;
+ int spl, spl_sav;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
#ifdef DEBUG
- if (pmap_con_dbg & CD_CMOD)
- printf("(pmap_copy_on_write :%x) phys addr 0x%x not managed \n", curproc, phys);
+ if (pmap_con_dbg & CD_CMOD)
+ printf("(pmap_copy_on_write :%x) phys addr 0x%x not managed \n", curproc, phys);
#endif
- return;
- }
+ return;
+ }
- SPLVM(spl);
+ SPLVM(spl);
- pv_e = PFIDX_TO_PVH(PFIDX(phys));
- CHECK_PV_LIST(phys, pv_e, "pmap_copy_on_write before");
- if (pv_e->pmap == PMAP_NULL) {
+ copy_on_write_Retry:
+ pv_e = PFIDX_TO_PVH(PFIDX(phys));
+ CHECK_PV_LIST(phys, pv_e, "pmap_copy_on_write before");
+ LOCK_PVH(PFIDX(phys));
+ if (pv_e->pmap == PMAP_NULL) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_COW | CD_NORM)) == (CD_COW | CD_NORM))
- printf("(pmap_copy_on_write :%x) phys addr 0x%x not mapped\n", curproc, phys);
+ if ((pmap_con_dbg & (CD_COW | CD_NORM)) == (CD_COW | CD_NORM))
+ printf("(pmap_copy_on_write :%x) phys addr 0x%x not mapped\n", curproc, phys);
#endif
- SPLX(spl);
+ UNLOCK_PVH(PFIDX(phys));
+ SPLX(spl);
+
+ return; /* no mappings */
+ }
- return; /* no mappings */
- }
+ /*
+ * Run down the list of mappings to this physical page,
+ * disabling write privileges on each one.
+ */
- /*
- * Run down the list of mappings to this physical page,
- * disabling write privileges on each one.
- */
+ while (pv_e != PV_ENTRY_NULL) {
+ pmap_t pmap;
+ vm_offset_t va;
- while (pv_e != PV_ENTRY_NULL) {
- pmap_t pmap;
- vm_offset_t va;
-
- pmap = pv_e->pmap;
- va = pv_e->va;
-
- users = 0;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- /*
- * Check for existing and valid pte
- */
- pte = pmap_pte(pmap, va);
- if (pte == PT_ENTRY_NULL)
- panic("pmap_copy_on_write: pte from pv_list not in map");
- if (!PDT_VALID(pte))
- panic("pmap_copy_on_write: invalid pte");
- if (M88K_PTOB(pte->pfn) != phys)
- panic("pmap_copy_on_write: pte doesn't point to page");
-
- /*
- * Flush TLBs of which cpus using pmap.
- */
-
- for (i = ptes_per_vm_page; i > 0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- opte.pte.prot = M88K_RO;
- ((pte_template_t *)pte)->bits = opte.bits;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
- pte++;
- va += M88K_PGBYTES;
- }
-
- pv_e = pv_e->next;
- }
- CHECK_PV_LIST(phys, PFIDX_TO_PVH(PFIDX(phys)), "pmap_copy_on_write");
-
- SPLX(spl);
+ pmap = pv_e->pmap;
+ va = pv_e->va;
+
+ if (!simple_lock_try(&pmap->lock)) {
+ UNLOCK_PVH(PFIDX(phys));
+ goto copy_on_write_Retry;
+ }
+
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ /*
+ * Check for existing and valid pte
+ */
+ pte = pmap_pte(pmap, va);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_copy_on_write: pte from pv_list not in map");
+ if (!PDT_VALID(pte))
+ panic("pmap_copy_on_write: invalid pte");
+ if (M88K_PTOB(pte->pfn) != phys)
+ panic("pmap_copy_on_write: pte doesn't point to page");
+
+ /*
+ * Flush TLBs of which cpus using pmap.
+ */
+
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ opte.pte.prot = M88K_RO;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va += M88K_PGBYTES;
+ }
+
+ simple_unlock(&pmap->lock);
+ pv_e = pv_e->next;
+ }
+ CHECK_PV_LIST(phys, PFIDX_TO_PVH(PFIDX(phys)), "pmap_copy_on_write");
+
+ UNLOCK_PVH(PFIDX(phys));
+ SPLX(spl);
} /* pmap_copy_on_write */
@@ -2410,92 +2534,94 @@ pmap_copy_on_write(vm_offset_t phys)
void
pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot)
{
- pte_template_t maprot;
- unsigned ap;
- int spl, spl_sav;
- register int i;
- pt_entry_t *pte;
- vm_offset_t va, tva;
- register unsigned users;
- register pte_template_t opte;
- int kflush;
-
- if (pmap == PMAP_NULL || prot & VM_PROT_WRITE)
- return;
- if ((prot & VM_PROT_READ) == 0) {
- pmap_remove(pmap, s, e);
- return;
- }
- if (s > e)
- panic("pmap_protect: start grater than end address");
-
- maprot.bits = m88k_protection(pmap, prot);
- ap = maprot.pte.prot;
-
- PMAP_LOCK(pmap, spl);
-
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- CHECK_PAGE_ALIGN(s, "pmap_protect");
-
- /*
- * Loop through the range in vm_page_size increment.
- * Do not assume that either start or end fall on any
- * kind of page boundary (though this may be true ?!).
- */
- for (va = s; va <= e; va += PAGE_SIZE) {
+ pte_template_t maprot;
+ unsigned ap;
+ int spl, spl_sav;
+ register int i;
+ pt_entry_t *pte;
+ vm_offset_t va, tva;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (pmap == PMAP_NULL || prot & VM_PROT_WRITE)
+ return;
+ if ((prot & VM_PROT_READ) == 0) {
+ pmap_remove(pmap, s, e);
+ return;
+ }
+
+ if (s > e)
+ panic("pmap_protect: start grater than end address");
- pte = pmap_pte(pmap, va);
+ maprot.bits = m88k_protection(pmap, prot);
+ ap = maprot.pte.prot;
- if (pte == PT_ENTRY_NULL) {
+ PMAP_LOCK(pmap, spl);
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ CHECK_PAGE_ALIGN(s, "pmap_protect");
+
+ /*
+ * Loop through the range in vm_page_size increment.
+ * Do not assume that either start or end fall on any
+ * kind of page boundary (though this may be true ?!).
+ */
+ for (va = s; va <= e; va += PAGE_SIZE) {
+
+ pte = pmap_pte(pmap, va);
+
+ if (pte == PT_ENTRY_NULL) {
+
+ va &= SDT_MASK; /* align to segment */
+ if (va <= e - (1<<SDT_SHIFT))
+ va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
+ else /* wrap around */
+ break;
- va &= SDT_MASK; /* align to segment */
- if (va <= e - (1<<SDT_SHIFT))
- va += (1<<SDT_SHIFT) - PAGE_SIZE; /* no page table, skip to next seg entry */
- else /* wrap around */
- break;
-
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
- printf("(pmap_protect :%x) no page table :: skip to 0x%x\n", curproc, va + PAGE_SIZE);
+ if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
+ printf("(pmap_protect :%x) no page table :: skip to 0x%x\n", curproc, va + PAGE_SIZE);
#endif
- continue;
- }
+ continue;
+ }
- if (!PDT_VALID(pte)) {
+ if (!PDT_VALID(pte)) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
- printf("(pmap_protect :%x) pte invalid pte @ 0x%x\n", curproc, pte);
+ if ((pmap_con_dbg & (CD_PROT | CD_FULL)) == (CD_PROT | CD_FULL))
+ printf("(pmap_protect :%x) pte invalid pte @ 0x%x\n", curproc, pte);
#endif
- continue; /* no page mapping */
- }
+ continue; /* no page mapping */
+ }
#if 0
- printf("(pmap_protect :%x) pte good\n", curproc);
+ printf("(pmap_protect :%x) pte good\n", curproc);
#endif
- tva = va;
- for (i = ptes_per_vm_page; i>0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- opte.pte.prot = ap;
- ((pte_template_t *)pte)->bits = opte.bits;
- flush_atc_entry(0, tva, kflush);
- splx(spl_sav);
- pte++;
- tva += M88K_PGBYTES;
- }
- }
-
- PMAP_UNLOCK(pmap, spl);
+ tva = va;
+ for (i = ptes_per_vm_page; i>0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ opte.pte.prot = ap;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, tva, kflush);
+ splx(spl_sav);
+ pte++;
+ tva += M88K_PGBYTES;
+ }
+ }
+
+ PMAP_UNLOCK(pmap, spl);
} /* pmap_protect() */
@@ -2548,102 +2674,102 @@ pmap_protect(pmap_t pmap, vm_offset_t s, vm_offset_t e, vm_prot_t prot)
STATIC void
pmap_expand(pmap_t map, vm_offset_t v)
{
- int i,
- spl;
- vm_offset_t pdt_vaddr,
- pdt_paddr;
+ int i,
+ spl;
+ vm_offset_t pdt_vaddr,
+ pdt_paddr;
- sdt_entry_t *sdt;
- pt_entry_t *pte;
- vm_offset_t pmap_extract();
+ sdt_entry_t *sdt;
+ pt_entry_t *pte;
+ vm_offset_t pmap_extract();
- if (map == PMAP_NULL) {
- panic("pmap_expand: pmap is NULL");
- }
+ if (map == PMAP_NULL) {
+ panic("pmap_expand: pmap is NULL");
+ }
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_EXP | CD_NORM)) == (CD_EXP | CD_NORM))
- printf ("(pmap_expand :%x) map %x v %x\n", curproc, map, v);
+ if ((pmap_con_dbg & (CD_EXP | CD_NORM)) == (CD_EXP | CD_NORM))
+ printf ("(pmap_expand :%x) map %x v %x\n", curproc, map, v);
#endif
- CHECK_PAGE_ALIGN (v, "pmap_expand");
+ CHECK_PAGE_ALIGN (v, "pmap_expand");
- /*
- * Handle kernel pmap in pmap_expand_kmap().
- */
- if (map == kernel_pmap) {
- PMAP_LOCK(map, spl);
- if (pmap_expand_kmap(v, VM_PROT_READ|VM_PROT_WRITE) == PT_ENTRY_NULL)
- panic ("pmap_expand: Cannot allocate kernel pte table");
- PMAP_UNLOCK(map, spl);
+ /*
+ * Handle kernel pmap in pmap_expand_kmap().
+ */
+ if (map == kernel_pmap) {
+ PMAP_LOCK(map, spl);
+ if (pmap_expand_kmap(v, VM_PROT_READ|VM_PROT_WRITE) == PT_ENTRY_NULL)
+ panic ("pmap_expand: Cannot allocate kernel pte table");
+ PMAP_UNLOCK(map, spl);
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_EXP | CD_FULL)) == (CD_EXP | CD_FULL))
- printf("(pmap_expand :%x) kernel_pmap\n", curproc);
+ if ((pmap_con_dbg & (CD_EXP | CD_FULL)) == (CD_EXP | CD_FULL))
+ printf("(pmap_expand :%x) kernel_pmap\n", curproc);
#endif
- return;
- }
+ return;
+ }
- /* XXX */
+ /* XXX */
#ifdef MACH_KERNEL
- if (kmem_alloc_wired(kernel_map, &pdt_vaddr, PAGE_SIZE) != KERN_SUCCESS)
- panic("pmap_enter: kmem_alloc failure");
- pmap_zero_page(pmap_extract(kernel_pmap, pdt_vaddr));
+ if (kmem_alloc_wired(kernel_map, &pdt_vaddr, PAGE_SIZE) != KERN_SUCCESS)
+ panic("pmap_enter: kmem_alloc failure");
+ pmap_zero_page(pmap_extract(kernel_pmap, pdt_vaddr));
#else
- pdt_vaddr = kmem_alloc (kernel_map, PAGE_SIZE);
+ pdt_vaddr = kmem_alloc (kernel_map, PAGE_SIZE);
#endif
- pdt_paddr = pmap_extract(kernel_pmap, pdt_vaddr);
+ pdt_paddr = pmap_extract(kernel_pmap, pdt_vaddr);
#if notneeded
- /*
- * the page for page tables should be CACHE DISABLED
- */
- pmap_cache_ctrl(kernel_pmap, pdt_vaddr, pdt_vaddr+PAGE_SIZE, CACHE_INH);
+ /*
+ * the page for page tables should be CACHE DISABLED
+ */
+ pmap_cache_ctrl(kernel_pmap, pdt_vaddr, pdt_vaddr+PAGE_SIZE, CACHE_INH);
#endif
- PMAP_LOCK(map, spl);
+ PMAP_LOCK(map, spl);
- if ((pte = pmap_pte(map, v)) != PT_ENTRY_NULL) {
- /*
- * Someone else caused us to expand
- * during our vm_allocate.
- */
- PMAP_UNLOCK(map, spl);
- /* XXX */
- kmem_free (kernel_map, pdt_vaddr, PAGE_SIZE);
+ if ((pte = pmap_pte(map, v)) != PT_ENTRY_NULL) {
+ /*
+ * Someone else caused us to expand
+ * during our vm_allocate.
+ */
+ PMAP_UNLOCK(map, spl);
+ /* XXX */
+ kmem_free (kernel_map, pdt_vaddr, PAGE_SIZE);
#ifdef DEBUG
- if (pmap_con_dbg & CD_EXP)
- printf("(pmap_expand :%x) table has already allocated\n", curproc);
+ if (pmap_con_dbg & CD_EXP)
+ printf("(pmap_expand :%x) table has already allocated\n", curproc);
#endif
- return;
- }
-
- /*
- * Apply a mask to V to obtain the vaddr of the beginning of
- * its containing page 'table group',i.e. the group of
- * page tables that fit eithin a single VM page.
- * Using that, obtain the segment table pointer that references the
- * first page table in the group, and initilize all the
- * segment table descriptions for the page 'table group'.
- */
- v &= ~((1<<(LOG2_PDT_TABLE_GROUP_SIZE+PDT_BITS+PG_BITS))-1);
-
- sdt = SDTENT(map,v);
-
- /*
- * Init each of the segment entries to point the freshly allocated
- * page tables.
- */
-
- for (i = PDT_TABLE_GROUP_SIZE; i>0; i--) {
- ((sdt_entry_template_t *)sdt)->bits = pdt_paddr | M88K_RW | DT_VALID;
- ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = pdt_vaddr | M88K_RW | DT_VALID;
- sdt++;
- pdt_paddr += PDT_SIZE;
- pdt_vaddr += PDT_SIZE;
- }
-
- PMAP_UNLOCK(map, spl);
+ return;
+ }
+
+ /*
+ * Apply a mask to V to obtain the vaddr of the beginning of
+ * its containing page 'table group',i.e. the group of
+ * page tables that fit eithin a single VM page.
+ * Using that, obtain the segment table pointer that references the
+ * first page table in the group, and initilize all the
+ * segment table descriptions for the page 'table group'.
+ */
+ v &= ~((1<<(LOG2_PDT_TABLE_GROUP_SIZE+PDT_BITS+PG_BITS))-1);
+
+ sdt = SDTENT(map,v);
+
+ /*
+ * Init each of the segment entries to point the freshly allocated
+ * page tables.
+ */
+
+ for (i = PDT_TABLE_GROUP_SIZE; i>0; i--) {
+ ((sdt_entry_template_t *)sdt)->bits = pdt_paddr | M88K_RW | DT_VALID;
+ ((sdt_entry_template_t *)(sdt + SDT_ENTRIES))->bits = pdt_vaddr | M88K_RW | DT_VALID;
+ sdt++;
+ pdt_paddr += PDT_SIZE;
+ pdt_vaddr += PDT_SIZE;
+ }
+
+ PMAP_UNLOCK(map, spl);
} /* pmap_expand() */
@@ -2729,232 +2855,235 @@ pmap_expand(pmap_t map, vm_offset_t v)
*/
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa,
- vm_prot_t prot, boolean_t wired,
- vm_prot_t access_type)
+ vm_prot_t prot, boolean_t wired,
+ vm_prot_t access_type)
{
- int ap;
- int spl, spl_sav;
- pv_entry_t pv_e;
- pt_entry_t *pte;
- vm_offset_t old_pa;
- pte_template_t template;
- register int i;
- int pfi;
- pv_entry_t pvl;
- register unsigned users;
- register pte_template_t opte;
- int kflush;
-
- if (pmap == PMAP_NULL) {
- panic("pmap_enter: pmap is NULL");
- }
-
- CHECK_PAGE_ALIGN (va, "pmap_entry - VA");
- CHECK_PAGE_ALIGN (pa, "pmap_entry - PA");
-
- /*
- * Range check no longer use, since we use whole address space
- */
+ int ap;
+ int spl, spl_sav;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ vm_offset_t old_pa;
+ pte_template_t template;
+ register int i;
+ int pfi;
+ pv_entry_t pvl;
+ register unsigned users;
+ register pte_template_t opte;
+ int kflush;
+
+ if (pmap == PMAP_NULL) {
+ panic("pmap_enter: pmap is NULL");
+ }
+
+ CHECK_PAGE_ALIGN (va, "pmap_entry - VA");
+ CHECK_PAGE_ALIGN (pa, "pmap_entry - PA");
+
+ /*
+ * Range check no longer use, since we use whole address space
+ */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
- if (pmap == kernel_pmap)
- printf ("(pmap_enter :%x) pmap kernel va %x pa %x\n", curproc, va, pa);
- else
- printf ("(pmap_enter :%x) pmap %x va %x pa %x\n", curproc, pmap, va, pa);
- }
+ if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
+ if (pmap == kernel_pmap)
+ printf ("(pmap_enter :%x) pmap kernel va %x pa %x\n", curproc, va, pa);
+ else
+ printf ("(pmap_enter :%x) pmap %x va %x pa %x\n", curproc, pmap, va, pa);
+ }
#endif
- ap = m88k_protection (pmap, prot);
+ ap = m88k_protection (pmap, prot);
- /*
- * Must allocate a new pvlist entry while we're unlocked;
- * zalloc may cause pageout (which will lock the pmap system).
- * If we determine we need a pvlist entry, we will unlock
- * and allocate one. Then will retry, throwing away
- * the allocated entry later (if we no longer need it).
- */
- pv_e = PV_ENTRY_NULL;
- Retry:
+ /*
+ * Must allocate a new pvlist entry while we're unlocked;
+ * zalloc may cause pageout (which will lock the pmap system).
+ * If we determine we need a pvlist entry, we will unlock
+ * and allocate one. Then will retry, throwing away
+ * the allocated entry later (if we no longer need it).
+ */
+ pv_e = PV_ENTRY_NULL;
+ Retry:
- PMAP_LOCK(pmap, spl);
+ PMAP_LOCK(pmap, spl);
- /*
- * Expand pmap to include this pte. Assume that
- * pmap is always expanded to include enough M88K
- * pages to map one VM page.
- */
- while ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
- /*
- * Must unlock to expand the pmap.
- */
- PMAP_UNLOCK(pmap, spl);
- pmap_expand(pmap, va);
- PMAP_LOCK(pmap, spl);
- }
-
- /*
- * Special case if the physical page is already mapped
- * at this address.
- */
- old_pa = M88K_PTOB(pte->pfn);
- if (old_pa == pa) {
-
- users = 0;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- /*
- * May be changing its wired attributes or protection
- */
-
- if (wired && !pte->wired)
- pmap->stats.wired_count++;
- else if (!wired && pte->wired)
- pmap->stats.wired_count--;
-
- if ((unsigned long)pa >= MAXPHYSMEM)
- template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_INH;
- else
- template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_GLOBAL;
- if (wired)
- template.pte.wired = 1;
-
- /*
- * If there is a same mapping, we have nothing to do.
- */
- if ( !PDT_VALID(pte) || (pte->wired != template.pte.wired)
- || (pte->prot != template.pte.prot)) {
-
- for (i = ptes_per_vm_page; i>0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- template.pte.modified = opte.pte.modified;
- *pte++ = template.pte;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
- template.bits += M88K_PGBYTES;
- va += M88K_PGBYTES;
- }
- }
-
- } else { /* if ( pa == old_pa) */
-
- /*
- * Remove old mapping from the PV list if necessary.
- */
- if (old_pa != (vm_offset_t)-1) {
- /*
- * Invalidate the translation buffer,
- * then remove the mapping.
- */
+ /*
+ * Expand pmap to include this pte. Assume that
+ * pmap is always expanded to include enough M88K
+ * pages to map one VM page.
+ */
+ while ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
+ /*
+ * Must unlock to expand the pmap.
+ */
+ PMAP_UNLOCK(pmap, spl);
+ pmap_expand(pmap, va);
+ PMAP_LOCK(pmap, spl);
+ }
+
+ /*
+ * Special case if the physical page is already mapped
+ * at this address.
+ */
+ old_pa = M88K_PTOB(pte->pfn);
+ if (old_pa == pa) {
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ /*
+ * May be changing its wired attributes or protection
+ */
+
+ if (wired && !pte->wired)
+ pmap->stats.wired_count++;
+ else if (!wired && pte->wired)
+ pmap->stats.wired_count--;
+
+ if ((unsigned long)pa >= MAXPHYSMEM)
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_INH;
+ else
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_GLOBAL;
+ if (wired)
+ template.pte.wired = 1;
+
+ /*
+ * If there is a same mapping, we have nothing to do.
+ */
+ if ( !PDT_VALID(pte) || (pte->wired != template.pte.wired)
+ || (pte->prot != template.pte.prot)) {
+
+ for (i = ptes_per_vm_page; i>0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ template.pte.modified = opte.pte.modified;
+ *pte++ = template.pte;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ template.bits += M88K_PGBYTES;
+ va += M88K_PGBYTES;
+ }
+ }
+
+ } else { /* if ( pa == old_pa) */
+
+ /*
+ * Remove old mapping from the PV list if necessary.
+ */
+ if (old_pa != (vm_offset_t)-1) {
+ /*
+ * Invalidate the translation buffer,
+ * then remove the mapping.
+ */
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
- if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
- printf("vaddr1 0x%x vaddr2 0x%x va 0x%x pa 0x%x managed %x\n",
- phys_map_vaddr1, phys_map_vaddr2, va, old_pa,
- PMAP_MANAGED(pa) ? 1 : 0);
- printf("pte %x pfn %x valid %x\n",
- pte, pte->pfn, pte->dtype);
- }
- }
+ if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
+ if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
+ printf("vaddr1 0x%x vaddr2 0x%x va 0x%x pa 0x%x managed %x\n",
+ phys_map_vaddr1, phys_map_vaddr2, va, old_pa,
+ PMAP_MANAGED(pa) ? 1 : 0);
+ printf("pte %x pfn %x valid %x\n",
+ pte, pte->pfn, pte->dtype);
+ }
+ }
#endif
- if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
- flush_atc_entry(0, va, 1);
- } else {
- pmap_remove_range(pmap, va, va + PAGE_SIZE);
- }
- }
-
- if (PMAP_MANAGED(pa)) {
+ if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
+ flush_atc_entry(users, va, 1);
+ } else {
+ pmap_remove_range(pmap, va, va + PAGE_SIZE);
+ }
+ }
+
+ if (PMAP_MANAGED(pa)) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
- if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
- printf("va 0x%x and managed pa 0x%x\n", va, pa);
- }
- }
+ if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) {
+ if (va == phys_map_vaddr1 || va == phys_map_vaddr2) {
+ printf("va 0x%x and managed pa 0x%x\n", va, pa);
+ }
+ }
#endif
- /*
- * Enter the mappimg in the PV list for this
- * physical page.
- */
- pfi = PFIDX(pa);
- pvl = PFIDX_TO_PVH(pfi);
- CHECK_PV_LIST (pa, pvl, "pmap_enter before");
-
- if (pvl->pmap == PMAP_NULL) {
-
- /*
- * No mappings yet
- */
- pvl->va = va;
- pvl->pmap = pmap;
- pvl->next = PV_ENTRY_NULL;
-
- } else {
+ /*
+ * Enter the mappimg in the PV list for this
+ * physical page.
+ */
+ pfi = PFIDX(pa);
+ LOCK_PVH(pfi);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST (pa, pvl, "pmap_enter before");
+
+ if (pvl->pmap == PMAP_NULL) {
+
+ /*
+ * No mappings yet
+ */
+ pvl->va = va;
+ pvl->pmap = pmap;
+ pvl->next = PV_ENTRY_NULL;
+
+ } else {
#ifdef DEBUG
- /*
- * check that this mapping is not already there
- */
- {
- pv_entry_t e = pvl;
- while (e != PV_ENTRY_NULL) {
- if (e->pmap == pmap && e->va == va)
- panic ("pmap_enter: already in pv_list");
- e = e->next;
- }
- }
+ /*
+ * check that this mapping is not already there
+ */
+ {
+ pv_entry_t e = pvl;
+ while (e != PV_ENTRY_NULL) {
+ if (e->pmap == pmap && e->va == va)
+ panic ("pmap_enter: already in pv_list");
+ e = e->next;
+ }
+ }
#endif
- /*
- * Add new pv_entry after header.
- */
- if (pv_e == PV_ENTRY_NULL) {
- PMAP_UNLOCK(pmap, spl);
- pv_e = (pv_entry_t) malloc(sizeof *pv_e, M_VMPVENT,
- M_NOWAIT);
- goto Retry;
- }
- pv_e->va = va;
- pv_e->pmap = pmap;
- pv_e->next = pvl->next;
- pvl->next = pv_e;
- /*
- * Remeber that we used the pvlist entry.
- */
- pv_e = PV_ENTRY_NULL;
- }
- }
-
- /*
- * And count the mapping.
- */
- pmap->stats.resident_count++;
- if (wired)
- pmap->stats.wired_count++;
-
- if ((unsigned long)pa >= MAXPHYSMEM)
- template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_INH;
- else
- template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_GLOBAL;
-
- if (wired)
- template.pte.wired = 1;
-
- DO_PTES (pte, template.bits);
-
- } /* if ( pa == old_pa ) ... else */
-
- PMAP_UNLOCK(pmap, spl);
-
- if (pv_e != PV_ENTRY_NULL)
+ /*
+ * Add new pv_entry after header.
+ */
+ if (pv_e == PV_ENTRY_NULL) {
+ UNLOCK_PVH(pfi);
+ PMAP_UNLOCK(pmap, spl);
+ pv_e = (pv_entry_t) malloc(sizeof *pv_e, M_VMPVENT,
+ M_NOWAIT);
+ goto Retry;
+ }
+ pv_e->va = va;
+ pv_e->pmap = pmap;
+ pv_e->next = pvl->next;
+ pvl->next = pv_e;
+ /*
+ * Remeber that we used the pvlist entry.
+ */
+ pv_e = PV_ENTRY_NULL;
+ }
+ UNLOCK_PVH(pfi);
+ }
+
+ /*
+ * And count the mapping.
+ */
+ pmap->stats.resident_count++;
+ if (wired)
+ pmap->stats.wired_count++;
+
+ if ((unsigned long)pa >= MAXPHYSMEM)
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_INH;
+ else
+ template.bits = DT_VALID | ap | M88K_TRUNC_PAGE(pa) | CACHE_GLOBAL;
+
+ if (wired)
+ template.pte.wired = 1;
+
+ DO_PTES (pte, template.bits);
+
+ } /* if ( pa == old_pa ) ... else */
+
+ PMAP_UNLOCK(pmap, spl);
+
+ if (pv_e != PV_ENTRY_NULL)
free((caddr_t) pv_e, M_VMPVENT);
} /* pmap_enter */
@@ -2987,31 +3116,31 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa,
void
pmap_change_wiring(pmap_t map, vm_offset_t v, boolean_t wired)
{
- pt_entry_t *pte;
- int i;
- int spl;
+ pt_entry_t *pte;
+ int i;
+ int spl;
- PMAP_LOCK(map, spl);
+ PMAP_LOCK(map, spl);
- if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
- panic ("pmap_change_wiring: pte missing");
+ if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
+ panic ("pmap_change_wiring: pte missing");
- if (wired && !pte->wired)
- /*
- * wiring mapping
- */
- map->stats.wired_count++;
+ if (wired && !pte->wired)
+ /*
+ * wiring mapping
+ */
+ map->stats.wired_count++;
- else if (!wired && pte->wired)
- /*
- * unwired mapping
- */
- map->stats.wired_count--;
+ else if (!wired && pte->wired)
+ /*
+ * unwired mapping
+ */
+ map->stats.wired_count--;
- for (i = ptes_per_vm_page; i>0; i--)
- (pte++)->wired = wired;
+ for (i = ptes_per_vm_page; i>0; i--)
+ (pte++)->wired = wired;
- PMAP_UNLOCK(map, spl);
+ PMAP_UNLOCK(map, spl);
} /* pmap_change_wiring() */
@@ -3047,45 +3176,45 @@ pmap_change_wiring(pmap_t map, vm_offset_t v, boolean_t wired)
vm_offset_t
pmap_extract(pmap_t pmap, vm_offset_t va)
{
- register pt_entry_t *pte;
- register vm_offset_t pa;
- register int i;
- int spl;
-
- if (pmap == PMAP_NULL)
- panic("pmap_extract: pmap is NULL");
-
- /*
- * check BATC first
- */
- if (pmap == kernel_pmap && batc_used > 0)
- for (i = batc_used-1; i > 0; i--)
- if (batc_entry[i].lba == M88K_BTOBLK(va)) {
- pa = (batc_entry[i].pba << BATC_BLKSHIFT) | (va & BATC_BLKMASK );
- return(pa);
- }
-
- PMAP_LOCK(pmap, spl);
-
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
- pa = (vm_offset_t) 0;
- else {
- if (PDT_VALID(pte))
- pa = M88K_PTOB(pte->pfn);
- else
- pa = (vm_offset_t) 0;
- }
-
- if (pa)
- pa |= (va & M88K_PGOFSET); /* offset within page */
-
- PMAP_UNLOCK(pmap, spl);
-
+ register pt_entry_t *pte;
+ register vm_offset_t pa;
+ register int i;
+ int spl;
+
+ if (pmap == PMAP_NULL)
+ panic("pmap_extract: pmap is NULL");
+
+ /*
+ * check BATC first
+ */
+ if (pmap == kernel_pmap && batc_used > 0)
+ for (i = batc_used-1; i > 0; i--)
+ if (batc_entry[i].lba == M88K_BTOBLK(va)) {
+ pa = (batc_entry[i].pba << BATC_BLKSHIFT) | (va & BATC_BLKMASK );
+ return (pa);
+ }
+
+ PMAP_LOCK(pmap, spl);
+
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t) 0;
+ else {
+ if (PDT_VALID(pte))
+ pa = M88K_PTOB(pte->pfn);
+ else
+ pa = (vm_offset_t) 0;
+ }
+
+ if (pa)
+ pa |= (va & M88K_PGOFSET); /* offset within page */
+
+ PMAP_UNLOCK(pmap, spl);
+
#if 0
- printf("pmap_extract ret %x\n", pa);
+ printf("pmap_extract ret %x\n", pa);
#endif /* 0 */
- return(pa);
-
+ return (pa);
+
} /* pamp_extract() */
/*
@@ -3095,37 +3224,37 @@ pmap_extract(pmap_t pmap, vm_offset_t va)
vm_offset_t
pmap_extract_unlocked(pmap_t pmap, vm_offset_t va)
{
- pt_entry_t *pte;
- vm_offset_t pa;
- int i;
-
- if (pmap == PMAP_NULL)
- panic("pmap_extract: pmap is NULL");
-
- /*
- * check BATC first
- */
- if (pmap == kernel_pmap && batc_used > 0)
- for (i = batc_used-1; i > 0; i--)
- if (batc_entry[i].lba == M88K_BTOBLK(va)) {
- pa = (batc_entry[i].pba << BATC_BLKSHIFT) | (va & BATC_BLKMASK );
- return(pa);
- }
-
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
- pa = (vm_offset_t) 0;
- else {
- if (PDT_VALID(pte))
- pa = M88K_PTOB(pte->pfn);
- else
- pa = (vm_offset_t) 0;
- }
-
- if (pa)
- pa |= (va & M88K_PGOFSET); /* offset within page */
-
- return(pa);
-
+ pt_entry_t *pte;
+ vm_offset_t pa;
+ int i;
+
+ if (pmap == PMAP_NULL)
+ panic("pmap_extract: pmap is NULL");
+
+ /*
+ * check BATC first
+ */
+ if (pmap == kernel_pmap && batc_used > 0)
+ for (i = batc_used-1; i > 0; i--)
+ if (batc_entry[i].lba == M88K_BTOBLK(va)) {
+ pa = (batc_entry[i].pba << BATC_BLKSHIFT) | (va & BATC_BLKMASK );
+ return (pa);
+ }
+
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t) 0;
+ else {
+ if (PDT_VALID(pte))
+ pa = M88K_PTOB(pte->pfn);
+ else
+ pa = (vm_offset_t) 0;
+ }
+
+ if (pa)
+ pa |= (va & M88K_PGOFSET); /* offset within page */
+
+ return (pa);
+
} /* pamp_extract_unlocked() */
@@ -3150,10 +3279,10 @@ pmap_extract_unlocked(pmap_t pmap, vm_offset_t va)
*/
void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
- vm_size_t len, vm_offset_t src_addr)
+ vm_size_t len, vm_offset_t src_addr)
{
#ifdef lint
- dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
+ dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
#endif
@@ -3185,8 +3314,8 @@ void
pmap_update(void)
{
#ifdef DBG
- if ((pmap_con_dbg & (CD_UPD | CD_FULL)) == (CD_UPD | CD_FULL))
- printf("(pmap_update :%x) Called \n", curproc);
+ if ((pmap_con_dbg & (CD_UPD | CD_FULL)) == (CD_UPD | CD_FULL))
+ printf("(pmap_update :%x) Called \n", curproc);
#endif
}/* pmap_update() */
@@ -3236,112 +3365,111 @@ void
pmap_collect(pmap_t pmap)
{
- vm_offset_t sdt_va; /* outer loop index */
- vm_offset_t sdt_vt; /* end of segment */
- sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
- sdt_entry_t *sdtp; /* ptr to index into segment table */
- sdt_entry_t *sdt; /* ptr to index into segment table */
- pt_entry_t *gdttbl; /* ptr to first entry in a page table */
- pt_entry_t *gdttblend; /* ptr to byte after last entry in table group */
- pt_entry_t *gdtp; /* ptr to index into a page table */
- boolean_t found_gdt_wired; /* flag indicating a wired page exists in */
- /* a page table's address range */
- int spl;
- unsigned int i,j;
-
-
-
- if (pmap == PMAP_NULL) {
- panic("pmap_collect: pmap is NULL");
- }
- if (pmap == kernel_pmap) {
+ vm_offset_t sdt_va; /* outer loop index */
+ vm_offset_t sdt_vt; /* end of segment */
+ sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */
+ sdt_entry_t *sdtp; /* ptr to index into segment table */
+ sdt_entry_t *sdt; /* ptr to index into segment table */
+ pt_entry_t *gdttbl; /* ptr to first entry in a page table */
+ pt_entry_t *gdttblend; /* ptr to byte after last entry in table group */
+ pt_entry_t *gdtp; /* ptr to index into a page table */
+ boolean_t found_gdt_wired; /* flag indicating a wired page exists in */
+ /* a page table's address range */
+ int spl;
+ unsigned int i,j;
+
+
+
+ if (pmap == PMAP_NULL) {
+ panic("pmap_collect: pmap is NULL");
+ }
+ if (pmap == kernel_pmap) {
#ifdef MACH_KERNEL
- return;
+ return;
#else
- panic("pmap_collect attempted on kernel pmap");
+ panic("pmap_collect attempted on kernel pmap");
#endif
- }
-
- CHECK_PMAP_CONSISTENCY ("pmap_collect");
+ }
+
+ CHECK_PMAP_CONSISTENCY ("pmap_collect");
#if DBG
- if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
+ if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
printf ("(pmap_collect :%x) pmap %x\n", curproc, pmap);
#endif
- PMAP_LOCK(pmap, spl);
+ PMAP_LOCK(pmap, spl);
- sdttbl = pmap->sdt_vaddr; /* addr of segment table */
- sdtp = sdttbl;
+ sdttbl = pmap->sdt_vaddr; /* addr of segment table */
+ sdtp = sdttbl;
- /*
- This contortion is here instead of the natural loop
- because of integer overflow/wraparound if VM_MAX_USER_ADDRESS is near 0xffffffff
- */
+ /*
+ This contortion is here instead of the natural loop
+ because of integer overflow/wraparound if VM_MAX_USER_ADDRESS is near 0xffffffff
+ */
- i = VM_MIN_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
- j = VM_MAX_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
- if ( j < 1024 ) j++;
+ i = VM_MIN_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ j = VM_MAX_USER_ADDRESS / PDT_TABLE_GROUP_VA_SPACE;
+ if ( j < 1024 ) j++;
- /* Segment table loop */
- for ( ; i < j; i++, sdtp += PDT_TABLE_GROUP_SIZE)
- {
- sdt_va = VM_MIN_USER_ADDRESS + PDT_TABLE_GROUP_VA_SPACE*i;
+ /* Segment table loop */
+ for ( ; i < j; i++, sdtp += PDT_TABLE_GROUP_SIZE) {
+ sdt_va = VM_MIN_USER_ADDRESS + PDT_TABLE_GROUP_VA_SPACE*i;
- gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va);
+ gdttbl = pmap_pte(pmap, (vm_offset_t)sdt_va);
- if (gdttbl == PT_ENTRY_NULL)
- continue; /* no maps in this range */
+ if (gdttbl == PT_ENTRY_NULL)
+ continue; /* no maps in this range */
- gdttblend = gdttbl + (PDT_ENTRIES * PDT_TABLE_GROUP_SIZE);
+ gdttblend = gdttbl + (PDT_ENTRIES * PDT_TABLE_GROUP_SIZE);
- /* scan page maps for wired pages */
- found_gdt_wired = FALSE;
- for (gdtp=gdttbl; gdtp <gdttblend; gdtp++) {
- if (gdtp->wired) {
- found_gdt_wired = TRUE;
- break;
- }
- }
+ /* scan page maps for wired pages */
+ found_gdt_wired = FALSE;
+ for (gdtp=gdttbl; gdtp <gdttblend; gdtp++) {
+ if (gdtp->wired) {
+ found_gdt_wired = TRUE;
+ break;
+ }
+ }
- if (found_gdt_wired)
- continue; /* can't free this range */
+ if (found_gdt_wired)
+ continue; /* can't free this range */
- /* figure out end of range. Watch for wraparound */
+ /* figure out end of range. Watch for wraparound */
- sdt_vt = sdt_va <= VM_MAX_USER_ADDRESS-PDT_TABLE_GROUP_VA_SPACE ?
- sdt_va+PDT_TABLE_GROUP_VA_SPACE :
- VM_MAX_USER_ADDRESS;
+ sdt_vt = sdt_va <= VM_MAX_USER_ADDRESS-PDT_TABLE_GROUP_VA_SPACE ?
+ sdt_va+PDT_TABLE_GROUP_VA_SPACE :
+ VM_MAX_USER_ADDRESS;
- /* invalidate all maps in this range */
- pmap_remove_range (pmap, (vm_offset_t)sdt_va, (vm_offset_t)sdt_vt);
+ /* invalidate all maps in this range */
+ pmap_remove_range (pmap, (vm_offset_t)sdt_va, (vm_offset_t)sdt_vt);
- /*
- * we can safely deallocated the page map(s)
- */
- for (sdt = sdtp; sdt < (sdtp+PDT_TABLE_GROUP_SIZE); sdt++) {
- ((sdt_entry_template_t *) sdt) -> bits = 0;
- ((sdt_entry_template_t *) sdt+SDT_ENTRIES) -> bits = 0;
- }
+ /*
+ * we can safely deallocated the page map(s)
+ */
+ for (sdt = sdtp; sdt < (sdtp+PDT_TABLE_GROUP_SIZE); sdt++) {
+ ((sdt_entry_template_t *) sdt) -> bits = 0;
+ ((sdt_entry_template_t *) sdt+SDT_ENTRIES) -> bits = 0;
+ }
- /*
- * we have to unlock before freeing the table, since PT_FREE
- * calls kmem_free or zfree, which will invoke another pmap routine
- */
- PMAP_UNLOCK(pmap, spl);
- PT_FREE(gdttbl);
- PMAP_LOCK(pmap, spl);
+ /*
+ * we have to unlock before freeing the table, since PT_FREE
+ * calls kmem_free or zfree, which will invoke another pmap routine
+ */
+ PMAP_UNLOCK(pmap, spl);
+ PT_FREE(gdttbl);
+ PMAP_LOCK(pmap, spl);
- } /* Segment table Loop */
+ } /* Segment table Loop */
- PMAP_UNLOCK(pmap, spl);
+ PMAP_UNLOCK(pmap, spl);
#if DBG
- if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
+ if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM))
printf ("(pmap_collect :%x) done \n", curproc);
#endif
- CHECK_PMAP_CONSISTENCY("pmap_collect");
+ CHECK_PMAP_CONSISTENCY("pmap_collect");
} /* pmap collect() */
@@ -3363,12 +3491,13 @@ pmap_collect(pmap_t pmap)
* cpu CPU number
*/
void
-pmap_activate(pmap_t pmap, pcb_t pcb)
+pmap_activate(pmap_t pmap, pcb_t pcb, int cpu)
{
#ifdef lint
- my_cpu++;
+ my_cpu++;
#endif
- PMAP_ACTIVATE(pmap, pcb, 0);
+ cpu = cpu_number(); /* hack to fix bogus cpu number */
+ PMAP_ACTIVATE(pmap, pcb, cpu);
} /* pmap_activate() */
@@ -3384,14 +3513,15 @@ pmap_activate(pmap_t pmap, pcb_t pcb)
* Parameters:
* pmap pointer to pmap structure
* pcb pointer to pcb
+ * cpu CPU number
*/
void
-pmap_deactivate(pmap_t pmap, pcb_t pcb)
+pmap_deactivate(pmap_t pmap, pcb_t pcb,int cpu)
{
#ifdef lint
- pmap++; th++; which_cpu++;
+ pmap++; th++; which_cpu++;
#endif
- PMAP_DEACTIVATE(pmap, pcb, 0);
+ PMAP_DEACTIVATE(pmap, pcb, cpu);
} /* pmap_deactivate() */
@@ -3405,7 +3535,7 @@ pmap_deactivate(pmap_t pmap, pcb_t pcb)
pmap_t
pmap_kernel(void)
{
- return (kernel_pmap);
+ return (kernel_pmap);
}/* pmap_kernel() */
@@ -3440,47 +3570,46 @@ pmap_kernel(void)
void
pmap_copy_page(vm_offset_t src, vm_offset_t dst)
{
- vm_offset_t dstva, srcva;
- unsigned int spl_sav;
- int i;
- int aprot;
- pte_template_t template;
- pt_entry_t *dstpte, *srcpte;
- int my_cpu = cpu_number();
-
- /*
- * Map source physical address.
- */
- aprot = m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
-
- srcva = (vm_offset_t)(phys_map_vaddr1 + (cpu_number() * PAGE_SIZE));
- dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
-
- srcpte = pmap_pte(kernel_pmap, srcva);
- dstpte = pmap_pte(kernel_pmap, dstva);
-
- for (i=0; i < ptes_per_vm_page; i++, src += M88K_PGBYTES, dst += M88K_PGBYTES)
- {
- template.bits = M88K_TRUNC_PAGE(src) | aprot | DT_VALID | CACHE_GLOBAL;
-
- /* do we need to write back dirty bits */
- spl_sav = splimp();
- cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
- *srcpte = template.pte;
-
- /*
- * Map destination physical address.
- */
- template.bits = M88K_TRUNC_PAGE(dst) | aprot | CACHE_GLOBAL | DT_VALID;
- cmmu_flush_tlb(1, dstva, M88K_PGBYTES);
- *dstpte = template.pte;
- splx(spl_sav);
-
- bcopy((void*)srcva, (void*)dstva, M88K_PGBYTES);
- /* flush source, dest out of cache? */
- cmmu_flush_remote_data_cache(my_cpu, src, M88K_PGBYTES);
- cmmu_flush_remote_data_cache(my_cpu, dst, M88K_PGBYTES);
- }
+ vm_offset_t dstva, srcva;
+ unsigned int spl_sav;
+ int i;
+ int aprot;
+ pte_template_t template;
+ pt_entry_t *dstpte, *srcpte;
+ int my_cpu = cpu_number();
+
+ /*
+ * Map source physical address.
+ */
+ aprot = m88k_protection (kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
+
+ srcva = (vm_offset_t)(phys_map_vaddr1 + (cpu_number() * PAGE_SIZE));
+ dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
+
+ srcpte = pmap_pte(kernel_pmap, srcva);
+ dstpte = pmap_pte(kernel_pmap, dstva);
+
+ for (i=0; i < ptes_per_vm_page; i++, src += M88K_PGBYTES, dst += M88K_PGBYTES) {
+ template.bits = M88K_TRUNC_PAGE(src) | aprot | DT_VALID | CACHE_GLOBAL;
+
+ /* do we need to write back dirty bits */
+ spl_sav = splimp();
+ cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
+ *srcpte = template.pte;
+
+ /*
+ * Map destination physical address.
+ */
+ template.bits = M88K_TRUNC_PAGE(dst) | aprot | CACHE_GLOBAL | DT_VALID;
+ cmmu_flush_tlb(1, dstva, M88K_PGBYTES);
+ *dstpte = template.pte;
+ splx(spl_sav);
+
+ bcopy((void*)srcva, (void*)dstva, M88K_PGBYTES);
+ /* flush source, dest out of cache? */
+ cmmu_flush_remote_data_cache(my_cpu, src, M88K_PGBYTES);
+ cmmu_flush_remote_data_cache(my_cpu, dst, M88K_PGBYTES);
+ }
} /* pmap_copy_page() */
@@ -3511,45 +3640,44 @@ pmap_copy_page(vm_offset_t src, vm_offset_t dst)
void
copy_to_phys(vm_offset_t srcva, vm_offset_t dstpa, int bytecount)
{
- vm_offset_t dstva;
- pt_entry_t *dstpte;
- int copy_size,
- offset,
- aprot;
- unsigned int i;
- pte_template_t template;
-
- dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
- dstpte = pmap_pte(kernel_pmap, dstva);
- copy_size = M88K_PGBYTES;
- offset = dstpa - M88K_TRUNC_PAGE(dstpa);
- dstpa -= offset;
-
- aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
- while (bytecount > 0){
- copy_size = M88K_PGBYTES - offset;
- if (copy_size > bytecount)
- copy_size = bytecount;
-
- /*
- * Map distation physical address.
- */
-
- for (i = 0; i < ptes_per_vm_page; i++)
- {
- template.bits = M88K_TRUNC_PAGE(dstpa) | aprot | CACHE_WT | DT_VALID;
- cmmu_flush_tlb(1, dstva, M88K_PGBYTES);
- *dstpte = template.pte;
-
- dstva += offset;
- bcopy((void*)srcva, (void*)dstva, copy_size);
- srcva += copy_size;
- dstva += copy_size;
- dstpa += M88K_PGBYTES;
- bytecount -= copy_size;
- offset = 0;
- }
+ vm_offset_t dstva;
+ pt_entry_t *dstpte;
+ int copy_size,
+ offset,
+ aprot;
+ unsigned int i;
+ pte_template_t template;
+
+ dstva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
+ dstpte = pmap_pte(kernel_pmap, dstva);
+ copy_size = M88K_PGBYTES;
+ offset = dstpa - M88K_TRUNC_PAGE(dstpa);
+ dstpa -= offset;
+
+ aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
+ while (bytecount > 0) {
+ copy_size = M88K_PGBYTES - offset;
+ if (copy_size > bytecount)
+ copy_size = bytecount;
+
+ /*
+ * Map distation physical address.
+ */
+
+ for (i = 0; i < ptes_per_vm_page; i++) {
+ template.bits = M88K_TRUNC_PAGE(dstpa) | aprot | CACHE_WT | DT_VALID;
+ cmmu_flush_tlb(1, dstva, M88K_PGBYTES);
+ *dstpte = template.pte;
+
+ dstva += offset;
+ bcopy((void*)srcva, (void*)dstva, copy_size);
+ srcva += copy_size;
+ dstva += copy_size;
+ dstpa += M88K_PGBYTES;
+ bytecount -= copy_size;
+ offset = 0;
}
+ }
}
/*
@@ -3578,45 +3706,44 @@ copy_to_phys(vm_offset_t srcva, vm_offset_t dstpa, int bytecount)
void
copy_from_phys(vm_offset_t srcpa, vm_offset_t dstva, int bytecount)
{
- register vm_offset_t srcva;
- register pt_entry_t *srcpte;
- register int copy_size, offset;
- int aprot;
- unsigned int i;
- pte_template_t template;
-
- srcva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
- srcpte = pmap_pte(kernel_pmap, srcva);
- copy_size = M88K_PGBYTES;
- offset = srcpa - M88K_TRUNC_PAGE(srcpa);
- srcpa -= offset;
-
- aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
- while (bytecount > 0){
- copy_size = M88K_PGBYTES - offset;
- if (copy_size > bytecount)
- copy_size = bytecount;
-
- /*
- * Map destnation physical address.
- */
-
- for (i=0; i < ptes_per_vm_page; i++)
- {
- template.bits = M88K_TRUNC_PAGE(srcpa) | aprot | CACHE_WT | DT_VALID;
- cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
- *srcpte = template.pte;
-
- srcva += offset;
- bcopy((void*)srcva, (void*)dstva, copy_size);
- srcpa += M88K_PGBYTES;
- dstva += copy_size;
- srcva += copy_size;
- bytecount -= copy_size;
- offset = 0;
- /* cache flush source? */
- }
+ register vm_offset_t srcva;
+ register pt_entry_t *srcpte;
+ register int copy_size, offset;
+ int aprot;
+ unsigned int i;
+ pte_template_t template;
+
+ srcva = (vm_offset_t)(phys_map_vaddr2 + (cpu_number() * PAGE_SIZE));
+ srcpte = pmap_pte(kernel_pmap, srcva);
+ copy_size = M88K_PGBYTES;
+ offset = srcpa - M88K_TRUNC_PAGE(srcpa);
+ srcpa -= offset;
+
+ aprot = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE);
+ while (bytecount > 0) {
+ copy_size = M88K_PGBYTES - offset;
+ if (copy_size > bytecount)
+ copy_size = bytecount;
+
+ /*
+ * Map destnation physical address.
+ */
+
+ for (i=0; i < ptes_per_vm_page; i++) {
+ template.bits = M88K_TRUNC_PAGE(srcpa) | aprot | CACHE_WT | DT_VALID;
+ cmmu_flush_tlb(1, srcva, M88K_PGBYTES);
+ *srcpte = template.pte;
+
+ srcva += offset;
+ bcopy((void*)srcva, (void*)dstva, copy_size);
+ srcpa += M88K_PGBYTES;
+ dstva += copy_size;
+ srcva += copy_size;
+ bytecount -= copy_size;
+ offset = 0;
+ /* cache flush source? */
}
+ }
}
/*
@@ -3642,10 +3769,10 @@ copy_from_phys(vm_offset_t srcpa, vm_offset_t dstva, int bytecount)
*/
void
pmap_pageable(pmap_t pmap, vm_offset_t start, vm_offset_t end,
- boolean_t pageable)
+ boolean_t pageable)
{
#ifdef lint
- pmap++; start++; end++; pageable++;
+ pmap++; start++; end++; pageable++;
#endif
} /* pmap_pagealbe() */
@@ -3678,41 +3805,41 @@ pmap_pageable(pmap_t pmap, vm_offset_t start, vm_offset_t end,
void
pmap_redzone(pmap_t pmap, vm_offset_t va)
{
- pt_entry_t *pte;
- int spl, spl_sav;
- int i;
- unsigned users;
- pte_template_t opte;
- int kflush;
-
- va = M88K_ROUND_PAGE(va);
- PMAP_LOCK(pmap, spl);
-
- users = 0;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- if ((pte = pmap_pte(pmap, va)) != PT_ENTRY_NULL && PDT_VALID(pte))
+ pt_entry_t *pte;
+ int spl, spl_sav;
+ int i;
+ unsigned users;
+ pte_template_t opte;
+ int kflush;
+
+ va = M88K_ROUND_PAGE(va);
+ PMAP_LOCK(pmap, spl);
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ if ((pte = pmap_pte(pmap, va)) != PT_ENTRY_NULL && PDT_VALID(pte))
for (i = ptes_per_vm_page; i > 0; i--) {
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- opte.pte.prot = M88K_RO;
- ((pte_template_t *)pte)->bits = opte.bits;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
- pte++;
- va +=M88K_PGBYTES;
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ opte.pte.prot = M88K_RO;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va +=M88K_PGBYTES;
}
- PMAP_UNLOCK(pmap, spl);
+ PMAP_UNLOCK(pmap, spl);
} /* pmap_redzone() */
@@ -3750,89 +3877,94 @@ pmap_redzone(pmap_t pmap, vm_offset_t va)
void
pmap_clear_modify(vm_offset_t phys)
{
- pv_entry_t pvl;
- int pfi;
- pv_entry_t pvep;
- pt_entry_t *pte;
- pmap_t pmap;
- int spl, spl_sav;
- vm_offset_t va;
- int i;
- unsigned users;
- pte_template_t opte;
- int kflush;
-
- if (!PMAP_MANAGED(phys)) {
+ pv_entry_t pvl;
+ int pfi;
+ pv_entry_t pvep;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ int spl, spl_sav;
+ vm_offset_t va;
+ int i;
+ unsigned users;
+ pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
#ifdef DBG
- if (pmap_con_dbg & CD_CMOD)
- printf("(pmap_clear_modify :%x) phys addr 0x%x not managed \n", curproc, phys);
+ if (pmap_con_dbg & CD_CMOD)
+ printf("(pmap_clear_modify :%x) phys addr 0x%x not managed \n", curproc, phys);
#endif
- return;
- }
+ return;
+ }
+
+ SPLVM(spl);
- SPLVM(spl);
+ clear_modify_Retry:
+ pfi = PFIDX(phys);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST (phys, pvl, "pmap_clear_modify");
+ LOCK_PVH(pfi);
-clear_modify_Retry:
- pfi = PFIDX(phys);
- pvl = PFIDX_TO_PVH(pfi);
- CHECK_PV_LIST (phys, pvl, "pmap_clear_modify");
- /* update correspoinding pmap_modify_list element */
- pmap_modify_list[pfi] = 0;
+ /* update correspoinding pmap_modify_list element */
+ pmap_modify_list[pfi] = 0;
- if (pvl->pmap == PMAP_NULL) {
+ if (pvl->pmap == PMAP_NULL) {
#ifdef DEBUG
- if ((pmap_con_dbg & (CD_CMOD | CD_NORM)) == (CD_CMOD | CD_NORM))
- printf("(pmap_clear_modify :%x) phys addr 0x%x not mapped\n", curproc, phys);
+ if ((pmap_con_dbg & (CD_CMOD | CD_NORM)) == (CD_CMOD | CD_NORM))
+ printf("(pmap_clear_modify :%x) phys addr 0x%x not mapped\n", curproc, phys);
#endif
- SPLX(spl);
- return;
- }
-
- /* for each listed pmap, trun off the page modified bit */
- pvep = pvl;
- while (pvep != PV_ENTRY_NULL) {
- pmap = pvep->pmap;
- va = pvep->va;
- if (!simple_lock_try(&pmap->lock)) {
- goto clear_modify_Retry;
- }
-
- users = 0;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- pte = pmap_pte(pmap, va);
- if (pte == PT_ENTRY_NULL)
- panic("pmap_clear_modify: bad pv list entry.");
-
- for (i = ptes_per_vm_page; i > 0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- /* clear modified bit */
- opte.pte.modified = 0;
- ((pte_template_t *)pte)->bits = opte.bits;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
- pte++;
- va += M88K_PGBYTES;
- }
-
- simple_unlock(&pmap->lock);
-
- pvep = pvep->next;
- }
-
- SPLX(spl);
+ UNLOCK_PVH(pfi);
+ SPLX(spl);
+ return;
+ }
+
+ /* for each listed pmap, trun off the page modified bit */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ pmap = pvep->pmap;
+ va = pvep->va;
+ if (!simple_lock_try(&pmap->lock)) {
+ UNLOCK_PVH(pfi);
+ goto clear_modify_Retry;
+ }
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ pte = pmap_pte(pmap, va);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_clear_modify: bad pv list entry.");
+
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ /* clear modified bit */
+ opte.pte.modified = 0;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va += M88K_PGBYTES;
+ }
+
+ simple_unlock(&pmap->lock);
+
+ pvep = pvep->next;
+ }
+
+ UNLOCK_PVH(pfi);
+ SPLX(spl);
} /* pmap_clear_modify() */
@@ -3877,84 +4009,88 @@ clear_modify_Retry:
boolean_t
pmap_is_modified(vm_offset_t phys)
{
- pv_entry_t pvl;
- int pfi;
- pv_entry_t pvep;
- pt_entry_t *ptep;
- int spl;
- int i;
- boolean_t modified_flag;
-
- if (!PMAP_MANAGED(phys)) {
+ pv_entry_t pvl;
+ int pfi;
+ pv_entry_t pvep;
+ pt_entry_t *ptep;
+ int spl;
+ int i;
+ boolean_t modified_flag;
+
+ if (!PMAP_MANAGED(phys)) {
#ifdef DBG
- if (pmap_con_dbg & CD_IMOD)
- printf("(pmap_is_modified :%x) phys addr 0x%x not managed\n", curproc, phys);
+ if (pmap_con_dbg & CD_IMOD)
+ printf("(pmap_is_modified :%x) phys addr 0x%x not managed\n", curproc, phys);
#endif
- return(FALSE);
- }
+ return (FALSE);
+ }
- SPLVM(spl);
+ SPLVM(spl);
- pfi = PFIDX(phys);
- pvl = PFIDX_TO_PVH(pfi);
- CHECK_PV_LIST (phys, pvl, "pmap_is_modified");
-is_mod_Retry:
+ pfi = PFIDX(phys);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST (phys, pvl, "pmap_is_modified");
+ is_mod_Retry:
- if ((boolean_t) pmap_modify_list[pfi]) {
- /* we've already cached a modify flag for this page,
- no use looking further... */
+ if ((boolean_t) pmap_modify_list[pfi]) {
+ /* we've already cached a modify flag for this page,
+ no use looking further... */
#ifdef DBG
- if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
- printf("(pmap_is_modified :%x) already cached a modify flag for this page\n", curproc);
+ if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
+ printf("(pmap_is_modified :%x) already cached a modify flag for this page\n", curproc);
#endif
- SPLX(spl);
- return(TRUE);
- }
-
- if (pvl->pmap == PMAP_NULL) {
- /* unmapped page - get info from page_modified array
- maintained by pmap_remove_range/ pmap_remove_all */
- modified_flag = (boolean_t) pmap_modify_list[pfi];
+ SPLX(spl);
+ return (TRUE);
+ }
+ LOCK_PVH(pfi);
+
+ if (pvl->pmap == PMAP_NULL) {
+ /* unmapped page - get info from page_modified array
+ maintained by pmap_remove_range/ pmap_remove_all */
+ modified_flag = (boolean_t) pmap_modify_list[pfi];
#ifdef DBG
- if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
- printf("(pmap_is_modified :%x) phys addr 0x%x not mapped\n", curproc, phys);
+ if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM))
+ printf("(pmap_is_modified :%x) phys addr 0x%x not mapped\n", curproc, phys);
#endif
- SPLX(spl);
- return(modified_flag);
- }
-
- /* for each listed pmap, check modified bit for given page */
- pvep = pvl;
- while (pvep != PV_ENTRY_NULL) {
- if (!simple_lock_try(&pvep->pmap->lock)) {
- UNLOCK_PVH(pfi);
- goto is_mod_Retry;
- }
-
- ptep = pmap_pte(pvep->pmap, pvep->va);
- if (ptep == PT_ENTRY_NULL) {
- printf("pmap_is_modified: pte from pv_list not in map virt = 0x%x\n", pvep->va);
- panic("pmap_is_modified: bad pv list entry");
- }
- for (i = ptes_per_vm_page; i > 0; i--) {
- if (ptep->modified) {
- simple_unlock(&pvep->pmap->lock);
+ UNLOCK_PVH(pfi);
+ SPLX(spl);
+ return (modified_flag);
+ }
+
+ /* for each listed pmap, check modified bit for given page */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ if (!simple_lock_try(&pvep->pmap->lock)) {
+ UNLOCK_PVH(pfi);
+ goto is_mod_Retry;
+ }
+
+ ptep = pmap_pte(pvep->pmap, pvep->va);
+ if (ptep == PT_ENTRY_NULL) {
+ printf("pmap_is_modified: pte from pv_list not in map virt = 0x%x\n", pvep->va);
+ panic("pmap_is_modified: bad pv list entry");
+ }
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ if (ptep->modified) {
+ simple_unlock(&pvep->pmap->lock);
#ifdef DBG
- if ((pmap_con_dbg & (CD_IMOD | CD_FULL)) == (CD_IMOD | CD_FULL))
- printf("(pmap_is_modified :%x) modified page pte@0x%x\n", curproc, (unsigned)ptep);
+ if ((pmap_con_dbg & (CD_IMOD | CD_FULL)) == (CD_IMOD | CD_FULL))
+ printf("(pmap_is_modified :%x) modified page pte@0x%x\n", curproc, (unsigned)ptep);
#endif
- SPLX(spl);
- return(TRUE);
- }
- ptep++;
- }
- simple_unlock(&pvep->pmap->lock);
+ UNLOCK_PVH(pfi);
+ SPLX(spl);
+ return (TRUE);
+ }
+ ptep++;
+ }
+ simple_unlock(&pvep->pmap->lock);
- pvep = pvep->next;
- }
+ pvep = pvep->next;
+ }
- SPLX(spl);
- return(FALSE);
+ UNLOCK_PVH(pfi);
+ SPLX(spl);
+ return (FALSE);
} /* pmap_is_modified() */
@@ -3996,87 +4132,91 @@ is_mod_Retry:
void
pmap_clear_reference(vm_offset_t phys)
{
- pv_entry_t pvl;
- int pfi;
- pv_entry_t pvep;
- pt_entry_t *pte;
- pmap_t pmap;
- int spl, spl_sav;
- vm_offset_t va;
- int i;
- unsigned users;
- pte_template_t opte;
- int kflush;
-
- if (!PMAP_MANAGED(phys)) {
+ pv_entry_t pvl;
+ int pfi;
+ pv_entry_t pvep;
+ pt_entry_t *pte;
+ pmap_t pmap;
+ int spl, spl_sav;
+ vm_offset_t va;
+ int i;
+ unsigned users;
+ pte_template_t opte;
+ int kflush;
+
+ if (!PMAP_MANAGED(phys)) {
#ifdef DBG
- if (pmap_con_dbg & CD_CREF) {
- printf("(pmap_clear_reference :%x) phys addr 0x%x not managed\n", curproc,phys);
- }
+ if (pmap_con_dbg & CD_CREF) {
+ printf("(pmap_clear_reference :%x) phys addr 0x%x not managed\n", curproc,phys);
+ }
#endif
- return;
- }
+ return;
+ }
- SPLVM(spl);
+ SPLVM(spl);
-clear_reference_Retry:
- pfi = PFIDX(phys);
- pvl = PFIDX_TO_PVH(pfi);
- CHECK_PV_LIST(phys, pvl, "pmap_clear_reference");
+ clear_reference_Retry:
+ pfi = PFIDX(phys);
+ LOCK_PVH(pfi);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST(phys, pvl, "pmap_clear_reference");
- if (pvl->pmap == PMAP_NULL) {
+ if (pvl->pmap == PMAP_NULL) {
#ifdef DBG
- if ((pmap_con_dbg & (CD_CREF | CD_NORM)) == (CD_CREF | CD_NORM))
- printf("(pmap_clear_reference :%x) phys addr 0x%x not mapped\n", curproc,phys);
+ if ((pmap_con_dbg & (CD_CREF | CD_NORM)) == (CD_CREF | CD_NORM))
+ printf("(pmap_clear_reference :%x) phys addr 0x%x not mapped\n", curproc,phys);
#endif
- SPLX(spl);
- return;
- }
-
- /* for each listed pmap, turn off the page refrenced bit */
- pvep = pvl;
- while (pvep != PV_ENTRY_NULL) {
- pmap = pvep->pmap;
- va = pvep->va;
- if (!simple_lock_try(&pmap->lock)) {
- goto clear_reference_Retry;
- }
-
- users = 0;
- if (pmap == kernel_pmap) {
- kflush = 1;
- } else {
- kflush = 0;
- }
-
- pte = pmap_pte(pmap, va);
- if (pte == PT_ENTRY_NULL)
- panic("pmap_clear_reference: bad pv list entry.");
-
- for (i = ptes_per_vm_page; i > 0; i--) {
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- spl_sav = splimp();
- opte.bits = invalidate_pte(pte);
- /* clear reference bit */
- opte.pte.pg_used = 0;
- ((pte_template_t *)pte)->bits = opte.bits;
- flush_atc_entry(users, va, kflush);
- splx(spl_sav);
- pte++;
- va += M88K_PGBYTES;
- }
-
- simple_unlock(&pmap->lock);
-
- pvep = pvep->next;
- }
-
- SPLX(spl);
+ UNLOCK_PVH(pfi);
+ SPLX(spl);
+ return;
+ }
+
+ /* for each listed pmap, turn off the page refrenced bit */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ pmap = pvep->pmap;
+ va = pvep->va;
+ if (!simple_lock_try(&pmap->lock)) {
+ UNLOCK_PVH(pfi);
+ goto clear_reference_Retry;
+ }
+
+ users = pmap->cpus_using;
+ if (pmap == kernel_pmap) {
+ kflush = 1;
+ } else {
+ kflush = 0;
+ }
+
+ pte = pmap_pte(pmap, va);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_clear_reference: bad pv list entry.");
+
+ for (i = ptes_per_vm_page; i > 0; i--) {
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ spl_sav = splimp();
+ opte.bits = invalidate_pte(pte);
+ /* clear reference bit */
+ opte.pte.pg_used = 0;
+ ((pte_template_t *)pte)->bits = opte.bits;
+ flush_atc_entry(users, va, kflush);
+ splx(spl_sav);
+ pte++;
+ va += M88K_PGBYTES;
+ }
+
+ simple_unlock(&pmap->lock);
+
+ pvep = pvep->next;
+ }
+
+ UNLOCK_PVH(pfi);
+ SPLX(spl);
} /* pmap_clear_reference() */
@@ -4119,55 +4259,59 @@ clear_reference_Retry:
boolean_t
pmap_is_referenced(vm_offset_t phys)
{
- pv_entry_t pvl;
- int pfi;
- pv_entry_t pvep;
- pt_entry_t *ptep;
- int spl;
- int i;
-
- if (!PMAP_MANAGED(phys))
- return(FALSE);
-
- SPLVM(spl);
-
- pfi = PFIDX(phys);
- pvl = PFIDX_TO_PVH(pfi);
- CHECK_PV_LIST(phys, pvl, "pmap_is_referenced");
-
-is_ref_Retry:
-
- if (pvl->pmap == PMAP_NULL) {
- SPLX(spl);
- return(FALSE);
- }
-
- /* for each listed pmap, check used bit for given page */
- pvep = pvl;
- while (pvep != PV_ENTRY_NULL) {
- if (!simple_lock_try(&pvep->pmap->lock)) {
- UNLOCK_PVH(pfi);
- goto is_ref_Retry;
- }
-
- ptep = pmap_pte(pvep->pmap, pvep->va);
- if (ptep == PT_ENTRY_NULL)
- panic("pmap_is_referenced: bad pv list entry.");
- for (i = ptes_per_vm_page; i > 0; i--) {
- if (ptep->pg_used) {
- simple_unlock(&pvep->pmap->lock);
- SPLX(spl);
- return(TRUE);
- }
- ptep++;
- }
- simple_unlock(&pvep->pmap->lock);
-
- pvep = pvep->next;
- }
-
- SPLX(spl);
- return(FALSE);
+ pv_entry_t pvl;
+ int pfi;
+ pv_entry_t pvep;
+ pt_entry_t *ptep;
+ int spl;
+ int i;
+
+ if (!PMAP_MANAGED(phys))
+ return (FALSE);
+
+ SPLVM(spl);
+
+ pfi = PFIDX(phys);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST(phys, pvl, "pmap_is_referenced");
+
+ is_ref_Retry:
+
+ if (pvl->pmap == PMAP_NULL) {
+ SPLX(spl);
+ return (FALSE);
+ }
+
+ LOCK_PVH(pfi);
+
+ /* for each listed pmap, check used bit for given page */
+ pvep = pvl;
+ while (pvep != PV_ENTRY_NULL) {
+ if (!simple_lock_try(&pvep->pmap->lock)) {
+ UNLOCK_PVH(pfi);
+ goto is_ref_Retry;
+ }
+
+ ptep = pmap_pte(pvep->pmap, pvep->va);
+ if (ptep == PT_ENTRY_NULL)
+ panic("pmap_is_referenced: bad pv list entry.");
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ if (ptep->pg_used) {
+ simple_unlock(&pvep->pmap->lock);
+ UNLOCK_PVH(pfi);
+ SPLX(spl);
+ return (TRUE);
+ }
+ ptep++;
+ }
+ simple_unlock(&pvep->pmap->lock);
+
+ pvep = pvep->next;
+ }
+
+ UNLOCK_PVH(pfi);
+ SPLX(spl);
+ return (FALSE);
} /* pmap_is referenced() */
/*
@@ -4203,24 +4347,26 @@ is_ref_Retry:
boolean_t
pmap_verify_free(vm_offset_t phys)
{
- pv_entry_t pv_h;
- int spl;
- boolean_t result;
+ pv_entry_t pv_h;
+ int spl;
+ boolean_t result;
- if (!pmap_initialized)
- return(TRUE);
+ if (!pmap_initialized)
+ return (TRUE);
- if (!PMAP_MANAGED(phys))
- return(FALSE);
+ if (!PMAP_MANAGED(phys))
+ return (FALSE);
- SPLVM(spl);
+ SPLVM(spl);
- pv_h = PFIDX_TO_PVH(PFIDX(phys));
+ pv_h = PFIDX_TO_PVH(PFIDX(phys));
+ LOCK_PVH(PFIDX(phys));
- result = (pv_h->pmap == PMAP_NULL);
- SPLX(spl);
+ result = (pv_h->pmap == PMAP_NULL);
+ UNLOCK_PVH(PFIDX(phys));
+ SPLX(spl);
- return(result);
+ return (result);
} /* pmap_verify_free */
@@ -4235,10 +4381,10 @@ boolean_t
pmap_valid_page(vm_offset_t p)
{
#ifdef lint
- p++;
+ p++;
#endif
- return(TRUE);
-} /* pmap_valid_page() */
+ return (TRUE);
+} /* pmap_valid_page() */
/*
* Routine: PMAP_PAGE_PROTECT
@@ -4252,17 +4398,17 @@ pmap_valid_page(vm_offset_t p)
void
pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
{
- switch (prot) {
- case VM_PROT_READ:
- case VM_PROT_READ|VM_PROT_EXECUTE:
- pmap_copy_on_write(phys);
- break;
- case VM_PROT_ALL:
- break;
- default:
- pmap_remove_all(phys);
- break;
- }
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ pmap_copy_on_write(phys);
+ break;
+ case VM_PROT_ALL:
+ break;
+ default:
+ pmap_remove_all(phys);
+ break;
+ }
}
#if FUTURE_MAYBE
@@ -4293,82 +4439,84 @@ pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
void
pagemove(vm_offset_t from, vm_offset_t to, int size)
{
- vm_offset_t pa;
- pt_entry_t *srcpte, *dstpte;
- int pfi;
- pv_entry_t pvl;
- int spl;
- int i;
- unsigned users;
- pte_template_t opte;
-
- PMAP_LOCK(kernel_pmap, spl);
-
- users = 0;
-
- while (size > 0) {
-
- /*
- * check if the source addr is mapped
- */
- if ((srcpte = pmap_pte(kernel_pmap, (vm_offset_t)from)) == PT_ENTRY_NULL) {
- printf("pagemove: source vaddr 0x%x\n", from);
- panic("pagemove: Source addr not mapped");
- }
-
- /*
- *
- */
- if ((dstpte = pmap_pte(kernel_pmap, (vm_offset_t)to)) == PT_ENTRY_NULL)
- if ((dstpte = pmap_expand_kmap((vm_offset_t)to, VM_PROT_READ | VM_PROT_WRITE))
- == PT_ENTRY_NULL)
- panic("pagemove: Cannot allocate distination pte");
- /*
- *
- */
- if (dstpte->dtype == DT_VALID) {
- printf("pagemove: distination vaddr 0x%x, pte = 0x%x\n", to, *((unsigned *)dstpte));
- panic("pagemove: Distination pte already valid");
- }
+ vm_offset_t pa;
+ pt_entry_t *srcpte, *dstpte;
+ int pfi;
+ pv_entry_t pvl;
+ int spl;
+ int i;
+ unsigned users;
+ pte_template_t opte;
+
+ PMAP_LOCK(kernel_pmap, spl);
+
+ users = kernel_pmap->cpus_using;
+
+ while (size > 0) {
+
+ /*
+ * check if the source addr is mapped
+ */
+ if ((srcpte = pmap_pte(kernel_pmap, (vm_offset_t)from)) == PT_ENTRY_NULL) {
+ printf("pagemove: source vaddr 0x%x\n", from);
+ panic("pagemove: Source addr not mapped");
+ }
-#ifdef DEBUG
- if ((pmap_con_dbg & (CD_PGMV | CD_NORM)) == (CD_PGMV | CD_NORM))
- printf("(pagemove :%x) from 0x%x to 0x%x\n", curproc, from, to);
- if ((pmap_con_dbg & (CD_PGMV | CD_FULL)) == (CD_PGMV | CD_FULL))
- printf("(pagemove :%x) srcpte @ 0x%x = %x dstpte @ 0x%x = %x\n", curproc, (unsigned)srcpte, *(unsigned *)srcpte, (unsigned)dstpte, *(unsigned *)dstpte);
+ /*
+ *
+ */
+ if ((dstpte = pmap_pte(kernel_pmap, (vm_offset_t)to)) == PT_ENTRY_NULL)
+ if ((dstpte = pmap_expand_kmap((vm_offset_t)to, VM_PROT_READ | VM_PROT_WRITE))
+ == PT_ENTRY_NULL)
+ panic("pagemove: Cannot allocate distination pte");
+ /*
+ *
+ */
+ if (dstpte->dtype == DT_VALID) {
+ printf("pagemove: distination vaddr 0x%x, pte = 0x%x\n", to, *((unsigned *)dstpte));
+ panic("pagemove: Distination pte already valid");
+ }
-#endif /* DEBUG */
+ #ifdef DEBUG
+ if ((pmap_con_dbg & (CD_PGMV | CD_NORM)) == (CD_PGMV | CD_NORM))
+ printf("(pagemove :%x) from 0x%x to 0x%x\n", curproc, from, to);
+ if ((pmap_con_dbg & (CD_PGMV | CD_FULL)) == (CD_PGMV | CD_FULL))
+ printf("(pagemove :%x) srcpte @ 0x%x = %x dstpte @ 0x%x = %x\n", curproc, (unsigned)srcpte, *(unsigned *)srcpte, (unsigned)dstpte, *(unsigned *)dstpte);
+
+ #endif /* DEBUG */
+
+ /*
+ * Update pv_list
+ */
+ pa = M88K_PTOB(srcpte->pfn);
+ if (PMAP_MANAGED(pa)) {
+ pfi = PFIDX(pa);
+ LOCK_PVH(pfi);
+ pvl = PFIDX_TO_PVH(pfi);
+ CHECK_PV_LIST(pa, pvl, "pagemove");
+ pvl->va = (vm_offset_t)to;
+ UNLOCK_PVH(pfi);
+ }
+
+ /*
+ * copy pte
+ */
+ for (i = ptes_per_vm_page; i > 0; i--) {
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by other cpu.
+ */
+ opte.bits = invalidate_pte(srcpte);
+ flush_atc_entry(users, from, 1);
+ ((pte_template_t *)dstpte)->bits = opte.bits;
+ from += M88K_PGBYTES;
+ to += M88K_PGBYTES;
+ srcpte++; dstpte++;
+ }
+ size -= PAGE_SIZE;
+ }
- /*
- * Update pv_list
- */
- pa = M88K_PTOB(srcpte->pfn);
- if (PMAP_MANAGED(pa)) {
- pfi = PFIDX(pa);
- pvl = PFIDX_TO_PVH(pfi);
- CHECK_PV_LIST(pa, pvl, "pagemove");
- pvl->va = (vm_offset_t)to;
- }
-
- /*
- * copy pte
- */
- for (i = ptes_per_vm_page; i > 0; i--) {
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by other cpu.
- */
- opte.bits = invalidate_pte(srcpte);
- flush_atc_entry(users, from, 1);
- ((pte_template_t *)dstpte)->bits = opte.bits;
- from += M88K_PGBYTES;
- to += M88K_PGBYTES;
- srcpte++; dstpte++;
- }
- size -= PAGE_SIZE;
- }
-
- PMAP_UNLOCK(kernel_pmap, spl);
+ PMAP_UNLOCK(kernel_pmap, spl);
} /* pagemove */
@@ -4399,12 +4547,16 @@ pagemove(vm_offset_t from, vm_offset_t to, int size)
void
icache_flush(vm_offset_t pa)
{
- int i;
- int cpu = 0;
-
- for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
- cmmu_flush_remote_inst_cache(cpu, pa, M88K_PGBYTES);
- }
+ int i;
+ int cpu = 0;
+
+ for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
+ for (cpu=0; cpu<max_cpus; cpu++) {
+ if (cpu_sets[cpu]) {
+ cmmu_flush_remote_inst_cache(cpu, pa, M88K_PGBYTES);
+ }
+ }
+ }
} /* icache_flush */
@@ -4429,21 +4581,21 @@ icache_flush(vm_offset_t pa)
void
pmap_dcache_flush(pmap_t pmap, vm_offset_t va)
{
- vm_offset_t pa;
- int i;
- int spl;
+ vm_offset_t pa;
+ int i;
+ int spl;
- if (pmap == PMAP_NULL)
- panic("pmap_dcache_flush: pmap is NULL");
+ if (pmap == PMAP_NULL)
+ panic("pmap_dcache_flush: pmap is NULL");
- PMAP_LOCK(pmap, spl);
+ PMAP_LOCK(pmap, spl);
- pa = M88K_PTOB((pmap_pte(pmap, va))->pfn);
- for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
- cmmu_flush_data_cache(pa, M88K_PGBYTES);
- }
+ pa = M88K_PTOB((pmap_pte(pmap, va))->pfn);
+ for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
+ cmmu_flush_data_cache(pa, M88K_PGBYTES);
+ }
- PMAP_UNLOCK(pmap, spl);
+ PMAP_UNLOCK(pmap, spl);
} /* pmap_dcache_flush */
@@ -4451,54 +4603,55 @@ pmap_dcache_flush(pmap_t pmap, vm_offset_t va)
STATIC void
cache_flush_loop(int mode, vm_offset_t pa, int size)
{
- int i;
- int ncpus;
- void (*cfunc)(int cpu, vm_offset_t physaddr, int size);
-
- switch (mode) {
- default:
- panic("bad cache_flush_loop mode");
- return;
-
- case FLUSH_CACHE: /* All caches, all CPUs */
- ncpus = NCPUS;
- cfunc = cmmu_flush_remote_cache;
- break;
-
- case FLUSH_CODE_CACHE: /* Instruction caches, all CPUs */
- ncpus = NCPUS;
- cfunc = cmmu_flush_remote_inst_cache;
- break;
-
- case FLUSH_DATA_CACHE: /* Data caches, all CPUs */
- ncpus = NCPUS;
- cfunc = cmmu_flush_remote_data_cache;
- break;
-
- case FLUSH_LOCAL_CACHE: /* Both caches, my CPU */
- ncpus = 1;
- cfunc = cmmu_flush_remote_cache;
- break;
-
- case FLUSH_LOCAL_CODE_CACHE: /* Instruction cache, my CPU */
- ncpus = 1;
- cfunc = cmmu_flush_remote_inst_cache;
- break;
-
- case FLUSH_LOCAL_DATA_CACHE: /* Data cache, my CPU */
- ncpus = 1;
- cfunc = cmmu_flush_remote_data_cache;
- break;
- }
-
- if (ncpus == 1) {
- (*cfunc)(cpu_number(), pa, size);
- }
- else {
- for (i=0; i<NCPUS; i++) {
- (*cfunc)(i, pa, size);
- }
- }
+ int i;
+ int ncpus;
+ void (*cfunc)(int cpu, vm_offset_t physaddr, int size);
+
+ switch (mode) {
+ default:
+ panic("bad cache_flush_loop mode");
+ return;
+
+ case FLUSH_CACHE: /* All caches, all CPUs */
+ ncpus = max_cpus;
+ cfunc = cmmu_flush_remote_cache;
+ break;
+
+ case FLUSH_CODE_CACHE: /* Instruction caches, all CPUs */
+ ncpus = max_cpus;
+ cfunc = cmmu_flush_remote_inst_cache;
+ break;
+
+ case FLUSH_DATA_CACHE: /* Data caches, all CPUs */
+ ncpus = max_cpus;
+ cfunc = cmmu_flush_remote_data_cache;
+ break;
+
+ case FLUSH_LOCAL_CACHE: /* Both caches, my CPU */
+ ncpus = 1;
+ cfunc = cmmu_flush_remote_cache;
+ break;
+
+ case FLUSH_LOCAL_CODE_CACHE: /* Instruction cache, my CPU */
+ ncpus = 1;
+ cfunc = cmmu_flush_remote_inst_cache;
+ break;
+
+ case FLUSH_LOCAL_DATA_CACHE: /* Data cache, my CPU */
+ ncpus = 1;
+ cfunc = cmmu_flush_remote_data_cache;
+ break;
+ }
+
+ if (ncpus == 1) {
+ (*cfunc)(cpu_number(), pa, size);
+ } else {
+ for (i=0; i<max_cpus; i++) {
+ if (cpu_sets[i]) {
+ (*cfunc)(i, pa, size);
+ }
+ }
+ }
}
/*
@@ -4508,31 +4661,31 @@ cache_flush_loop(int mode, vm_offset_t pa, int size)
void
pmap_cache_flush(pmap_t pmap, vm_offset_t virt, int bytes, int mode)
{
- vm_offset_t pa;
- vm_offset_t va;
- int i;
- int spl;
+ vm_offset_t pa;
+ vm_offset_t va;
+ int i;
+ int spl;
- if (pmap == PMAP_NULL)
- panic("pmap_dcache_flush: NULL pmap");
+ if (pmap == PMAP_NULL)
+ panic("pmap_dcache_flush: NULL pmap");
- /*
- * If it is more than a couple of pages, just blow the whole cache
- * because of the number of cycles involved.
- */
- if (bytes > 2*M88K_PGBYTES) {
- cache_flush_loop(mode, 0, -1);
- return;
- }
-
- PMAP_LOCK(pmap, spl);
- for(va = virt; bytes > 0; bytes -= M88K_PGBYTES,va += M88K_PGBYTES) {
- pa = M88K_PTOB((pmap_pte(pmap, va))->pfn);
- for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
- cache_flush_loop(mode, pa, M88K_PGBYTES);
- }
- }
- PMAP_UNLOCK(pmap, spl);
+ /*
+ * If it is more than a couple of pages, just blow the whole cache
+ * because of the number of cycles involved.
+ */
+ if (bytes > 2*M88K_PGBYTES) {
+ cache_flush_loop(mode, 0, -1);
+ return;
+ }
+
+ PMAP_LOCK(pmap, spl);
+ for (va = virt; bytes > 0; bytes -= M88K_PGBYTES,va += M88K_PGBYTES) {
+ pa = M88K_PTOB((pmap_pte(pmap, va))->pfn);
+ for (i = ptes_per_vm_page; i > 0; i--, pa += M88K_PGBYTES) {
+ cache_flush_loop(mode, pa, M88K_PGBYTES);
+ }
+ }
+ PMAP_UNLOCK(pmap, spl);
} /* pmap_cache_flush */
#ifdef DEBUG
@@ -4576,49 +4729,48 @@ pmap_cache_flush(pmap_t pmap, vm_offset_t virt, int bytes, int mode)
STATIC void
check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who)
{
- pv_entry_t pv_e;
- pt_entry_t *pte;
- vm_offset_t pa;
-
- if (pv_h != PFIDX_TO_PVH(PFIDX(phys))) {
- printf("check_pv_list: incorrect pv_h supplied.\n");
- panic(who);
- }
-
- if (!PAGE_ALIGNED(phys)) {
- printf("check_pv_list: supplied phys addr not page aligned.\n");
- panic(who);
- }
-
- if (pv_h->pmap == PMAP_NULL) {
- if (pv_h->next != PV_ENTRY_NULL) {
- printf("check_pv_list: first entry has null pmap, but list non-empty.\n");
- panic(who);
- }
- else return; /* proper empry lst */
- }
-
- pv_e = pv_h;
- while (pv_e != PV_ENTRY_NULL) {
- if (!PAGE_ALIGNED(pv_e->va)) {
- printf("check_pv_list: non-aligned VA in entry at 0x%x.\n", pv_e);
- panic(who);
- }
- /*
- * We can't call pmap_extract since it requires lock.
- */
- if ((pte = pmap_pte(pv_e->pmap, pv_e->va)) == PT_ENTRY_NULL)
- pa = (vm_offset_t)0;
- else
- pa = M88K_PTOB(pte->pfn) | (pv_e->va & M88K_PGOFSET);
-
- if (pa != phys) {
- printf("check_pv_list: phys addr diff in entry at 0x%x.\n", pv_e);
- panic(who);
- }
-
- pv_e = pv_e->next;
- }
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ vm_offset_t pa;
+
+ if (pv_h != PFIDX_TO_PVH(PFIDX(phys))) {
+ printf("check_pv_list: incorrect pv_h supplied.\n");
+ panic(who);
+ }
+
+ if (!PAGE_ALIGNED(phys)) {
+ printf("check_pv_list: supplied phys addr not page aligned.\n");
+ panic(who);
+ }
+
+ if (pv_h->pmap == PMAP_NULL) {
+ if (pv_h->next != PV_ENTRY_NULL) {
+ printf("check_pv_list: first entry has null pmap, but list non-empty.\n");
+ panic(who);
+ } else return; /* proper empry lst */
+ }
+
+ pv_e = pv_h;
+ while (pv_e != PV_ENTRY_NULL) {
+ if (!PAGE_ALIGNED(pv_e->va)) {
+ printf("check_pv_list: non-aligned VA in entry at 0x%x.\n", pv_e);
+ panic(who);
+ }
+ /*
+ * We can't call pmap_extract since it requires lock.
+ */
+ if ((pte = pmap_pte(pv_e->pmap, pv_e->va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t)0;
+ else
+ pa = M88K_PTOB(pte->pfn) | (pv_e->va & M88K_PGOFSET);
+
+ if (pa != phys) {
+ printf("check_pv_list: phys addr diff in entry at 0x%x.\n", pv_e);
+ panic(who);
+ }
+
+ pv_e = pv_e->next;
+ }
} /* check_pv_list() */
@@ -4656,102 +4808,101 @@ check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who)
STATIC void
check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who)
{
- vm_offset_t va,
- old_va,
- phys;
- pv_entry_t pv_h,
- pv_e,
- saved_pv_e;
- pt_entry_t *ptep;
- boolean_t found;
- int loopcnt;
-
-
- /*
- * for each page in the address space, check to see if there's
- * a valid mapping. If so makes sure it's listed in the PV_list.
- */
-
- if ((pmap_con_dbg & (CD_CHKM | CD_NORM)) == (CD_CHKM | CD_NORM))
- printf("(check_map) checking map at 0x%x\n", map);
-
- old_va = s;
- for (va = s; va < e; va += PAGE_SIZE) {
- /* check for overflow - happens if e=0xffffffff */
- if (va < old_va)
- break;
- else
- old_va = va;
-
- if (va == phys_map_vaddr1 || va == phys_map_vaddr2)
- /* don't try anything with these */
- continue;
-
- ptep = pmap_pte(map, va);
-
- if (ptep == PT_ENTRY_NULL) {
- /* no page table, skip to next segment entry */
- va = SDT_NEXT(va)-PAGE_SIZE;
- continue;
- }
-
- if (!PDT_VALID(ptep))
- continue; /* no page mapping */
-
- phys = M88K_PTOB(ptep->pfn); /* pick up phys addr */
-
- if (!PMAP_MANAGED(phys))
- continue; /* no PV list */
-
- /* note: vm_page_startup allocates some memory for itself
- through pmap_map before pmap_init is run. However,
- it doesn't adjust the physical start of memory.
- So, pmap thinks those pages are managed - but they're
- not actually under it's control. So, the following
- conditional is a hack to avoid those addresses
- reserved by vm_page_startup */
- /* pmap_init also allocate some memory for itself. */
-
- if (map == kernel_pmap &&
- va < round_page((vm_offset_t)(pmap_modify_list + (pmap_phys_end - pmap_phys_start))))
- continue;
-
- pv_h = PFIDX_TO_PVH(PFIDX(phys));
- found = FALSE;
-
- if (pv_h->pmap != PMAP_NULL) {
-
- loopcnt = 10000; /* loop limit */
- pv_e = pv_h;
- while(pv_e != PV_ENTRY_NULL) {
-
- if (loopcnt-- < 0) {
- printf("check_map: loop in PV list at PVH 0x%x (for phys 0x%x)\n", pv_h, phys);
- panic(who);
- }
-
- if (pv_e->pmap == map && pv_e->va == va) {
- if (found) {
- printf("check_map: Duplicate PV list entries at 0x%x and 0x%x in PV list 0x%x.\n", saved_pv_e, pv_e, pv_h);
- printf("check_map: for pmap 0x%x, VA 0x%x,phys 0x%x.\n", map, va, phys);
- panic(who);
- }
- else {
- found = TRUE;
- saved_pv_e = pv_e;
- }
- }
- pv_e = pv_e->next;
- }
- }
-
- if (!found) {
- printf("check_map: Mapping for pmap 0x%x VA 0x%x Phys 0x%x does not appear in PV list 0x%x.\n", map, va, phys, pv_h);
- }
- }
-
- if ((pmap_con_dbg & (CD_CHKM | CD_NORM)) == (CD_CHKM | CD_NORM))
- printf("(check_map) done \n");
+ vm_offset_t va,
+ old_va,
+ phys;
+ pv_entry_t pv_h,
+ pv_e,
+ saved_pv_e;
+ pt_entry_t *ptep;
+ boolean_t found;
+ int loopcnt;
+
+
+ /*
+ * for each page in the address space, check to see if there's
+ * a valid mapping. If so makes sure it's listed in the PV_list.
+ */
+
+ if ((pmap_con_dbg & (CD_CHKM | CD_NORM)) == (CD_CHKM | CD_NORM))
+ printf("(check_map) checking map at 0x%x\n", map);
+
+ old_va = s;
+ for (va = s; va < e; va += PAGE_SIZE) {
+ /* check for overflow - happens if e=0xffffffff */
+ if (va < old_va)
+ break;
+ else
+ old_va = va;
+
+ if (va == phys_map_vaddr1 || va == phys_map_vaddr2)
+ /* don't try anything with these */
+ continue;
+
+ ptep = pmap_pte(map, va);
+
+ if (ptep == PT_ENTRY_NULL) {
+ /* no page table, skip to next segment entry */
+ va = SDT_NEXT(va)-PAGE_SIZE;
+ continue;
+ }
+
+ if (!PDT_VALID(ptep))
+ continue; /* no page mapping */
+
+ phys = M88K_PTOB(ptep->pfn); /* pick up phys addr */
+
+ if (!PMAP_MANAGED(phys))
+ continue; /* no PV list */
+
+ /* note: vm_page_startup allocates some memory for itself
+ through pmap_map before pmap_init is run. However,
+ it doesn't adjust the physical start of memory.
+ So, pmap thinks those pages are managed - but they're
+ not actually under it's control. So, the following
+ conditional is a hack to avoid those addresses
+ reserved by vm_page_startup */
+ /* pmap_init also allocate some memory for itself. */
+
+ if (map == kernel_pmap &&
+ va < round_page((vm_offset_t)(pmap_modify_list + (pmap_phys_end - pmap_phys_start))))
+ continue;
+
+ pv_h = PFIDX_TO_PVH(PFIDX(phys));
+ found = FALSE;
+
+ if (pv_h->pmap != PMAP_NULL) {
+
+ loopcnt = 10000; /* loop limit */
+ pv_e = pv_h;
+ while (pv_e != PV_ENTRY_NULL) {
+
+ if (loopcnt-- < 0) {
+ printf("check_map: loop in PV list at PVH 0x%x (for phys 0x%x)\n", pv_h, phys);
+ panic(who);
+ }
+
+ if (pv_e->pmap == map && pv_e->va == va) {
+ if (found) {
+ printf("check_map: Duplicate PV list entries at 0x%x and 0x%x in PV list 0x%x.\n", saved_pv_e, pv_e, pv_h);
+ printf("check_map: for pmap 0x%x, VA 0x%x,phys 0x%x.\n", map, va, phys);
+ panic(who);
+ } else {
+ found = TRUE;
+ saved_pv_e = pv_e;
+ }
+ }
+ pv_e = pv_e->next;
+ }
+ }
+
+ if (!found) {
+ printf("check_map: Mapping for pmap 0x%x VA 0x%x Phys 0x%x does not appear in PV list 0x%x.\n", map, va, phys, pv_h);
+ }
+ }
+
+ if ((pmap_con_dbg & (CD_CHKM | CD_NORM)) == (CD_CHKM | CD_NORM))
+ printf("(check_map) done \n");
} /* check_map() */
@@ -4792,46 +4943,46 @@ check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who)
STATIC void
check_pmap_consistency(char *who)
{
- pmap_t p;
- int i;
- vm_offset_t phys;
- pv_entry_t pv_h;
- int spl;
+ pmap_t p;
+ int i;
+ vm_offset_t phys;
+ pv_entry_t pv_h;
+ int spl;
- if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
- printf("check_pmap_consistency (%s :%x) start.\n", who, curproc);
+ if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
+ printf("check_pmap_consistency (%s :%x) start.\n", who, curproc);
- if (pv_head_table == PV_ENTRY_NULL) {
+ if (pv_head_table == PV_ENTRY_NULL) {
- printf("check_pmap_consistency (%s) PV head table not initialized.\n", who);
- return;
- }
+ printf("check_pmap_consistency (%s) PV head table not initialized.\n", who);
+ return;
+ }
- SPLVM(spl);
+ SPLVM(spl);
- p = kernel_pmap;
- check_map(p, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS, who);
+ p = kernel_pmap;
+ check_map(p, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS, who);
- /* run through all pmaps. check consistency of each one... */
- i = PMAP_MAX;
- for (p = kernel_pmap->next;p != kernel_pmap; p = p->next) {
- if (i == 0) { /* can not read pmap list */
- printf("check_pmap_consistency: pmap strcut loop error.\n");
- panic(who);
- }
- check_map(p, VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS, who);
- }
+ /* run through all pmaps. check consistency of each one... */
+ i = PMAP_MAX;
+ for (p = kernel_pmap->next;p != kernel_pmap; p = p->next) {
+ if (i == 0) { /* can not read pmap list */
+ printf("check_pmap_consistency: pmap strcut loop error.\n");
+ panic(who);
+ }
+ check_map(p, VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS, who);
+ }
- /* run through all managed paes, check pv_list for each one */
- for (phys = pmap_phys_start; phys < pmap_phys_end; phys += PAGE_SIZE) {
- pv_h = PFIDX_TO_PVH(PFIDX(phys));
- check_pv_list(phys, pv_h, who);
- }
+ /* run through all managed paes, check pv_list for each one */
+ for (phys = pmap_phys_start; phys < pmap_phys_end; phys += PAGE_SIZE) {
+ pv_h = PFIDX_TO_PVH(PFIDX(phys));
+ check_pv_list(phys, pv_h, who);
+ }
- SPLX(spl);
+ SPLX(spl);
- if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
- printf("check_pmap consistency (%s :%x): done.\n",who, curproc);
+ if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM))
+ printf("check_pmap consistency (%s :%x): done.\n",who, curproc);
} /* check_pmap_consistency() */
#endif /* DEBUG */
@@ -4880,64 +5031,63 @@ check_pmap_consistency(char *who)
void
pmap_print(pmap_t pmap)
{
- sdt_entry_t *sdtp;
- sdt_entry_t *sdtv;
- int i;
+ sdt_entry_t *sdtp;
+ sdt_entry_t *sdtv;
+ int i;
- printf("Pmap @ 0x%x:\n", (unsigned)pmap);
- sdtp = pmap->sdt_paddr;
- sdtv = pmap->sdt_vaddr;
- printf(" sdt_paddr: 0x%x; sdt_vaddr: 0x%x; ref_count: %d;\n",
- (unsigned)sdtp, (unsigned)sdtv,
- pmap->ref_count);
+ printf("Pmap @ 0x%x:\n", (unsigned)pmap);
+ sdtp = pmap->sdt_paddr;
+ sdtv = pmap->sdt_vaddr;
+ printf(" sdt_paddr: 0x%x; sdt_vaddr: 0x%x; ref_count: %d;\n",
+ (unsigned)sdtp, (unsigned)sdtv,
+ pmap->ref_count);
#ifdef statistics_not_yet_maintained
- printf(" statistics: pagesize %d: free_count %d; "
- "active_count %d; inactive_count %d; wire_count %d\n",
- pmap->stats.pagesize,
- pmap->stats.free_count,
- pmap->stats.active_count,
- pmap->stats.inactive_count,
- pmap->stats.wire_count);
-
- printf(" zero_fill_count %d; reactiveations %d; "
- "pageins %d; pageouts %d; faults %d\n",
- pmap->stats.zero_fill_count,
- pmap->stats.reactivations,
- pmap->stats.pageins,
- pmap->stats.pageouts,
- pmap->stats.fault);
-
- printf(" cow_faults %d, lookups %d, hits %d\n",
- pmap->stats.cow_faults,
- pmap->stats.loopups,
- pmap->stats.faults);
+ printf(" statistics: pagesize %d: free_count %d; "
+ "active_count %d; inactive_count %d; wire_count %d\n",
+ pmap->stats.pagesize,
+ pmap->stats.free_count,
+ pmap->stats.active_count,
+ pmap->stats.inactive_count,
+ pmap->stats.wire_count);
+
+ printf(" zero_fill_count %d; reactiveations %d; "
+ "pageins %d; pageouts %d; faults %d\n",
+ pmap->stats.zero_fill_count,
+ pmap->stats.reactivations,
+ pmap->stats.pageins,
+ pmap->stats.pageouts,
+ pmap->stats.fault);
+
+ printf(" cow_faults %d, lookups %d, hits %d\n",
+ pmap->stats.cow_faults,
+ pmap->stats.loopups,
+ pmap->stats.faults);
#endif
- sdtp = (sdt_entry_t *) pmap->sdt_vaddr; /* addr of physical table */
- sdtv = sdtp + SDT_ENTRIES; /* shadow table with virt address */
- if (sdtp == (sdt_entry_t *)0)
- printf("Error in pmap - sdt_paddr is null.\n");
- else {
- int count = 0;
- printf(" Segment table at 0x%x (0x%x):\n",
- (unsigned)sdtp, (unsigned)sdtv);
- for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) {
- if ((sdtp->table_addr != 0 ) || (sdtv->table_addr != 0)) {
- if (count != 0)
- printf("sdt entry %d skip !!\n", count);
- count = 0;
- printf(" (%x)phys: ", i);
- PRINT_SDT(sdtp);
- printf(" (%x)virt: ", i);
- PRINT_SDT(sdtv);
- }
- else
- count++;
- }
- if (count != 0)
- printf("sdt entry %d skip !!\n", count);
- }
+ sdtp = (sdt_entry_t *) pmap->sdt_vaddr; /* addr of physical table */
+ sdtv = sdtp + SDT_ENTRIES; /* shadow table with virt address */
+ if (sdtp == (sdt_entry_t *)0)
+ printf("Error in pmap - sdt_paddr is null.\n");
+ else {
+ int count = 0;
+ printf(" Segment table at 0x%x (0x%x):\n",
+ (unsigned)sdtp, (unsigned)sdtv);
+ for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) {
+ if ((sdtp->table_addr != 0 ) || (sdtv->table_addr != 0)) {
+ if (count != 0)
+ printf("sdt entry %d skip !!\n", count);
+ count = 0;
+ printf(" (%x)phys: ", i);
+ PRINT_SDT(sdtp);
+ printf(" (%x)virt: ", i);
+ PRINT_SDT(sdtv);
+ } else
+ count++;
+ }
+ if (count != 0)
+ printf("sdt entry %d skip !!\n", count);
+ }
} /* pmap_print() */
@@ -4964,113 +5114,113 @@ pmap_print(pmap_t pmap)
void
pmap_print_trace (pmap_t pmap, vm_offset_t va, boolean_t long_format)
{
- sdt_entry_t *sdtp; /* ptr to sdt table of physical addresses */
- sdt_entry_t *sdtv; /* ptr to sdt shadow table of virtual addresses */
- pt_entry_t *ptep; /* ptr to pte table of physical page addresses */
-
- int i; /* table loop index */
- unsigned long prev_entry; /* keep track of value of previous table entry */
- int n_dup_entries; /* count contiguous duplicate entries */
-
- printf("Trace of virtual address 0x%08x. Pmap @ 0x%08x.\n",
- va, (unsigned)pmap);
-
- /*** SDT TABLES ***/
- /* get addrs of sdt tables */
- sdtp = (sdt_entry_t *)pmap->sdt_vaddr;
- sdtv = sdtp + SDT_ENTRIES;
-
- if (sdtp == SDT_ENTRY_NULL) {
- printf(" Segment table pointer (pmap.sdt_paddr) null, trace stops.\n");
- return;
- }
-
- n_dup_entries = 0;
- prev_entry = 0xFFFFFFFF;
-
- if (long_format) {
- printf(" Segment table at 0x%08x (virt shadow at 0x%08x)\n",
- (unsigned)sdtp, (unsigned)sdtv);
- for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) {
- if (prev_entry == ((sdt_entry_template_t *)sdtp)->bits
- && SDTIDX(va) != i && i != SDT_ENTRIES-1) {
- n_dup_entries++;
- continue; /* suppress duplicate entry */
- }
- if (n_dup_entries != 0) {
- printf(" - %d duplicate entries skipped -\n",n_dup_entries);
- n_dup_entries = 0;
- }
- prev_entry = ((pte_template_t *)sdtp)->bits;
- if (SDTIDX(va) == i) {
- printf(" >> (%x)phys: ", i);
- } else {
- printf(" (%x)phys: ", i);
- }
- PRINT_SDT(sdtp);
- if (SDTIDX(va) == i) {
- printf(" >> (%x)virt: ", i);
- } else {
- printf(" (%x)virt: ", i);
- }
- PRINT_SDT(sdtv);
- } /* for */
- } else {
- /* index into both tables for given VA */
- sdtp += SDTIDX(va);
- sdtv += SDTIDX(va);
- printf(" SDT entry index 0x%x at 0x%x (virt shadow at 0x%x)\n",
- SDTIDX(va), (unsigned)sdtp, (unsigned)sdtv);
- printf(" phys: ");
- PRINT_SDT(sdtp);
- printf(" virt: ");
- PRINT_SDT(sdtv);
- }
-
- /*** PTE TABLES ***/
- /* get addrs of page (pte) table (no shadow table) */
-
- sdtp = ((sdt_entry_t *)pmap->sdt_vaddr) + SDTIDX(va);
- #ifdef DBG
- printf("*** DEBUG (sdtp) ");
- PRINT_SDT(sdtp);
- #endif
- sdtv = sdtp + SDT_ENTRIES;
- ptep = (pt_entry_t *)(M88K_PTOB(sdtv->table_addr));
- if (sdtp->dtype != DT_VALID) {
- printf(" segment table entry invlid, trace stops.\n");
- return;
- }
-
- n_dup_entries = 0;
- prev_entry = 0xFFFFFFFF;
- if (long_format) {
- printf(" page table (ptes) at 0x%x\n", (unsigned)ptep);
- for (i = 0; i < PDT_ENTRIES; i++, ptep++) {
- if (prev_entry == ((pte_template_t *)ptep)->bits
- && PDTIDX(va) != i && i != PDT_ENTRIES-1) {
- n_dup_entries++;
- continue; /* suppress suplicate entry */
- }
- if (n_dup_entries != 0) {
- printf(" - %d duplicate entries skipped -\n",n_dup_entries);
- n_dup_entries = 0;
- }
- prev_entry = ((pte_template_t *)ptep)->bits;
- if (PDTIDX(va) == i) {
- printf(" >> (%x)pte: ", i);
- } else {
- printf(" (%x)pte: ", i);
- }
- PRINT_PDT(ptep);
- } /* for */
- } else {
- /* index into page table */
- ptep += PDTIDX(va);
- printf(" pte index 0x%x\n", PDTIDX(va));
- printf(" pte: ");
- PRINT_PDT(ptep);
- }
+ sdt_entry_t *sdtp; /* ptr to sdt table of physical addresses */
+ sdt_entry_t *sdtv; /* ptr to sdt shadow table of virtual addresses */
+ pt_entry_t *ptep; /* ptr to pte table of physical page addresses */
+
+ int i; /* table loop index */
+ unsigned long prev_entry; /* keep track of value of previous table entry */
+ int n_dup_entries; /* count contiguous duplicate entries */
+
+ printf("Trace of virtual address 0x%08x. Pmap @ 0x%08x.\n",
+ va, (unsigned)pmap);
+
+ /*** SDT TABLES ***/
+ /* get addrs of sdt tables */
+ sdtp = (sdt_entry_t *)pmap->sdt_vaddr;
+ sdtv = sdtp + SDT_ENTRIES;
+
+ if (sdtp == SDT_ENTRY_NULL) {
+ printf(" Segment table pointer (pmap.sdt_paddr) null, trace stops.\n");
+ return;
+ }
+
+ n_dup_entries = 0;
+ prev_entry = 0xFFFFFFFF;
+
+ if (long_format) {
+ printf(" Segment table at 0x%08x (virt shadow at 0x%08x)\n",
+ (unsigned)sdtp, (unsigned)sdtv);
+ for (i = 0; i < SDT_ENTRIES; i++, sdtp++, sdtv++) {
+ if (prev_entry == ((sdt_entry_template_t *)sdtp)->bits
+ && SDTIDX(va) != i && i != SDT_ENTRIES-1) {
+ n_dup_entries++;
+ continue; /* suppress duplicate entry */
+ }
+ if (n_dup_entries != 0) {
+ printf(" - %d duplicate entries skipped -\n",n_dup_entries);
+ n_dup_entries = 0;
+ }
+ prev_entry = ((pte_template_t *)sdtp)->bits;
+ if (SDTIDX(va) == i) {
+ printf(" >> (%x)phys: ", i);
+ } else {
+ printf(" (%x)phys: ", i);
+ }
+ PRINT_SDT(sdtp);
+ if (SDTIDX(va) == i) {
+ printf(" >> (%x)virt: ", i);
+ } else {
+ printf(" (%x)virt: ", i);
+ }
+ PRINT_SDT(sdtv);
+ } /* for */
+ } else {
+ /* index into both tables for given VA */
+ sdtp += SDTIDX(va);
+ sdtv += SDTIDX(va);
+ printf(" SDT entry index 0x%x at 0x%x (virt shadow at 0x%x)\n",
+ SDTIDX(va), (unsigned)sdtp, (unsigned)sdtv);
+ printf(" phys: ");
+ PRINT_SDT(sdtp);
+ printf(" virt: ");
+ PRINT_SDT(sdtv);
+ }
+
+ /*** PTE TABLES ***/
+ /* get addrs of page (pte) table (no shadow table) */
+
+ sdtp = ((sdt_entry_t *)pmap->sdt_vaddr) + SDTIDX(va);
+#ifdef DBG
+ printf("*** DEBUG (sdtp) ");
+ PRINT_SDT(sdtp);
+#endif
+ sdtv = sdtp + SDT_ENTRIES;
+ ptep = (pt_entry_t *)(M88K_PTOB(sdtv->table_addr));
+ if (sdtp->dtype != DT_VALID) {
+ printf(" segment table entry invlid, trace stops.\n");
+ return;
+ }
+
+ n_dup_entries = 0;
+ prev_entry = 0xFFFFFFFF;
+ if (long_format) {
+ printf(" page table (ptes) at 0x%x\n", (unsigned)ptep);
+ for (i = 0; i < PDT_ENTRIES; i++, ptep++) {
+ if (prev_entry == ((pte_template_t *)ptep)->bits
+ && PDTIDX(va) != i && i != PDT_ENTRIES-1) {
+ n_dup_entries++;
+ continue; /* suppress suplicate entry */
+ }
+ if (n_dup_entries != 0) {
+ printf(" - %d duplicate entries skipped -\n",n_dup_entries);
+ n_dup_entries = 0;
+ }
+ prev_entry = ((pte_template_t *)ptep)->bits;
+ if (PDTIDX(va) == i) {
+ printf(" >> (%x)pte: ", i);
+ } else {
+ printf(" (%x)pte: ", i);
+ }
+ PRINT_PDT(ptep);
+ } /* for */
+ } else {
+ /* index into page table */
+ ptep += PDTIDX(va);
+ printf(" pte index 0x%x\n", PDTIDX(va));
+ printf(" pte: ");
+ PRINT_PDT(ptep);
+ }
} /* pmap_print_trace() */
/*
@@ -5083,43 +5233,43 @@ pmap_print_trace (pmap_t pmap, vm_offset_t va, boolean_t long_format)
boolean_t
pmap_check_transaction(pmap_t pmap, vm_offset_t va, vm_prot_t type)
{
- pt_entry_t *pte;
- sdt_entry_t *sdt;
- int spl;
-
- PMAP_LOCK(pmap, spl);
-
- if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
- PMAP_UNLOCK(pmap, spl);
- return FALSE;
- }
-
- if (!PDT_VALID(pte)) {
- PMAP_UNLOCK(pmap, spl);
- return FALSE;
- }
-
- /*
- * Valid pte. If the transaction was a read, there is no way it
- * could have been a fault, so return true. For now, assume
- * that a write transaction could have caused a fault. We need
- * to check pte and sdt entries for write permission to really
- * tell.
- */
+ pt_entry_t *pte;
+ sdt_entry_t *sdt;
+ int spl;
+
+ PMAP_LOCK(pmap, spl);
+
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) {
+ PMAP_UNLOCK(pmap, spl);
+ return FALSE;
+ }
- if (type == VM_PROT_READ) {
- PMAP_UNLOCK(pmap, spl);
- return TRUE;
- } else {
- sdt = SDTENT(pmap,va);
- if (sdt->prot || pte->prot) {
- PMAP_UNLOCK(pmap, spl);
- return FALSE;
- } else {
- PMAP_UNLOCK(pmap, spl);
- return TRUE;
- }
- }
+ if (!PDT_VALID(pte)) {
+ PMAP_UNLOCK(pmap, spl);
+ return FALSE;
+ }
+
+ /*
+ * Valid pte. If the transaction was a read, there is no way it
+ * could have been a fault, so return true. For now, assume
+ * that a write transaction could have caused a fault. We need
+ * to check pte and sdt entries for write permission to really
+ * tell.
+ */
+
+ if (type == VM_PROT_READ) {
+ PMAP_UNLOCK(pmap, spl);
+ return TRUE;
+ } else {
+ sdt = SDTENT(pmap,va);
+ if (sdt->prot || pte->prot) {
+ PMAP_UNLOCK(pmap, spl);
+ return FALSE;
+ } else {
+ PMAP_UNLOCK(pmap, spl);
+ return TRUE;
+ }
+ }
}
/* New functions to satisfy rpd - contributed by danner */
@@ -5127,89 +5277,89 @@ pmap_check_transaction(pmap_t pmap, vm_offset_t va, vm_prot_t type)
void
pmap_virtual_space(vm_offset_t *startp, vm_offset_t *endp)
{
- *startp = virtual_avail;
- *endp = virtual_end;
+ *startp = virtual_avail;
+ *endp = virtual_end;
}
unsigned int
pmap_free_pages(void)
{
- return atop(avail_end - avail_next);
+ return atop(avail_end - avail_next);
}
boolean_t
pmap_next_page(vm_offset_t *addrp)
{
- if (avail_next == avail_end)
- return FALSE;
+ if (avail_next == avail_end)
+ return FALSE;
- *addrp = avail_next;
- avail_next += PAGE_SIZE;
- return TRUE;
+ *addrp = avail_next;
+ avail_next += PAGE_SIZE;
+ return TRUE;
}
#if USING_BATC
-#ifdef OMRON_PMAP
+ #ifdef OMRON_PMAP
/*
* Set BATC
*/
void
pmap_set_batc(
- pmap_t pmap,
- boolean_t data,
- int i,
- vm_offset_t va,
- vm_offset_t pa,
- boolean_t super,
- boolean_t wt,
- boolean_t global,
- boolean_t ci,
- boolean_t wp,
- boolean_t valid)
+ pmap_t pmap,
+ boolean_t data,
+ int i,
+ vm_offset_t va,
+ vm_offset_t pa,
+ boolean_t super,
+ boolean_t wt,
+ boolean_t global,
+ boolean_t ci,
+ boolean_t wp,
+ boolean_t valid)
{
- register batc_template_t batctmp;
-
- if (i < 0 || i > (BATC_MAX - 1)) {
- panic("pmap_set_batc: illegal batc number");
- /* bad number */
- return;
- }
-
- batctmp.field.lba = va >> 19;
- batctmp.field.pba = pa >> 19;
- batctmp.field.sup = super;
- batctmp.field.wt = wt;
- batctmp.field.g = global;
- batctmp.field.ci = ci;
- batctmp.field.wp = wp;
- batctmp.field.v = valid;
-
- if (data) {
- pmap->d_batc[i].bits = batctmp.bits;
- } else {
- pmap->i_batc[i].bits = batctmp.bits;
- }
+ register batc_template_t batctmp;
+
+ if (i < 0 || i > (BATC_MAX - 1)) {
+ panic("pmap_set_batc: illegal batc number");
+ /* bad number */
+ return;
+ }
+
+ batctmp.field.lba = va >> 19;
+ batctmp.field.pba = pa >> 19;
+ batctmp.field.sup = super;
+ batctmp.field.wt = wt;
+ batctmp.field.g = global;
+ batctmp.field.ci = ci;
+ batctmp.field.wp = wp;
+ batctmp.field.v = valid;
+
+ if (data) {
+ pmap->d_batc[i].bits = batctmp.bits;
+ } else {
+ pmap->i_batc[i].bits = batctmp.bits;
+ }
}
void use_batc(
- task_t task,
- boolean_t data, /* for data-cmmu ? */
- int i, /* batc number */
- vm_offset_t va, /* virtual address */
- vm_offset_t pa, /* physical address */
- boolean_t s, /* for super-mode ? */
- boolean_t wt, /* is writethrough */
- boolean_t g, /* is global ? */
- boolean_t ci, /* is cache inhibited ? */
- boolean_t wp, /* is write-protected ? */
- boolean_t v) /* is valid ? */
+ task_t task,
+ boolean_t data, /* for data-cmmu ? */
+ int i, /* batc number */
+ vm_offset_t va, /* virtual address */
+ vm_offset_t pa, /* physical address */
+ boolean_t s, /* for super-mode ? */
+ boolean_t wt, /* is writethrough */
+ boolean_t g, /* is global ? */
+ boolean_t ci, /* is cache inhibited ? */
+ boolean_t wp, /* is write-protected ? */
+ boolean_t v) /* is valid ? */
{
- pmap_t pmap;
- pmap = vm_map_pmap(task->map);
- pmap_set_batc(pmap, data, i, va, pa, s, wt, g, ci, wp, v);
+ pmap_t pmap;
+ pmap = vm_map_pmap(task->map);
+ pmap_set_batc(pmap, data, i, va, pa, s, wt, g, ci, wp, v);
}
-#endif
+ #endif
#endif /* USING_BATC */
#if FUTURE_MAYBE
/*
@@ -5228,15 +5378,15 @@ void use_batc(
void
pmap_destroy_ranges(pmap_range_t *ranges)
{
- pmap_range_t this, next;
-
- this = *ranges;
- while (this != 0) {
- next = this->next;
- pmap_range_free(this);
- this = next;
- }
- *ranges = 0;
+ pmap_range_t this, next;
+
+ this = *ranges;
+ while (this != 0) {
+ next = this->next;
+ pmap_range_free(this);
+ this = next;
+ }
+ *ranges = 0;
}
/*
@@ -5245,15 +5395,15 @@ pmap_destroy_ranges(pmap_range_t *ranges)
boolean_t
pmap_range_lookup(pmap_range_t *ranges, vm_offset_t address)
{
- pmap_range_t range;
-
- for (range = *ranges; range != 0; range = range->next) {
- if (address < range->start)
- return FALSE;
- if (address < range->end)
- return TRUE;
- }
- return FALSE;
+ pmap_range_t range;
+
+ for (range = *ranges; range != 0; range = range->next) {
+ if (address < range->start)
+ return FALSE;
+ if (address < range->end)
+ return TRUE;
+ }
+ return FALSE;
}
/*
@@ -5263,52 +5413,52 @@ pmap_range_lookup(pmap_range_t *ranges, vm_offset_t address)
void
pmap_range_add(pmap_range_t *ranges, vm_offset_t start, vm_offset_t end)
{
- pmap_range_t range, *prev;
+ pmap_range_t range, *prev;
- /* look for the start address */
+ /* look for the start address */
- for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
- if (start < range->start)
- break;
- if (start <= range->end)
- goto start_overlaps;
- }
+ for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
+ if (start < range->start)
+ break;
+ if (start <= range->end)
+ goto start_overlaps;
+ }
- /* start address is not present */
+ /* start address is not present */
- if ((range == 0) || (end < range->start)) {
- /* no overlap; allocate a new range */
+ if ((range == 0) || (end < range->start)) {
+ /* no overlap; allocate a new range */
- range = pmap_range_alloc();
- range->start = start;
- range->end = end;
- range->next = *prev;
- *prev = range;
- return;
- }
+ range = pmap_range_alloc();
+ range->start = start;
+ range->end = end;
+ range->next = *prev;
+ *prev = range;
+ return;
+ }
- /* extend existing range forward to start */
+ /* extend existing range forward to start */
- range->start = start;
+ range->start = start;
- start_overlaps:
- assert((range->start <= start) && (start <= range->end));
+ start_overlaps:
+ assert((range->start <= start) && (start <= range->end));
- /* delete redundant ranges */
+ /* delete redundant ranges */
- while ((range->next != 0) && (range->next->start <= end)) {
- pmap_range_t old;
+ while ((range->next != 0) && (range->next->start <= end)) {
+ pmap_range_t old;
- old = range->next;
- range->next = old->next;
- range->end = old->end;
- pmap_range_free(old);
- }
+ old = range->next;
+ range->next = old->next;
+ range->end = old->end;
+ pmap_range_free(old);
+ }
- /* extend existing range backward to end */
+ /* extend existing range backward to end */
- if (range->end < end)
- range->end = end;
+ if (range->end < end)
+ range->end = end;
}
/*
@@ -5318,44 +5468,45 @@ pmap_range_add(pmap_range_t *ranges, vm_offset_t start, vm_offset_t end)
void
pmap_range_remove(pmap_range_t *ranges, vm_offset_t start, vm_offset_t end)
{
- pmap_range_t range, *prev;
+ pmap_range_t range, *prev;
- /* look for start address */
+ /* look for start address */
- for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
- if (start <= range->start)
- break;
- if (start < range->end) {
- if (end < range->end) {
- pmap_range_t new;
+ for (prev = ranges; (range = *prev) != 0; prev = &range->next) {
+ if (start <= range->start)
+ break;
+ if (start < range->end) {
+ if (end < range->end) {
+ pmap_range_t new;
- /* split this range */
+ /* split this range */
- new = pmap_range_alloc();
- new->next = range->next;
- new->start = end;
- new->end = range->end;
+ new = pmap_range_alloc();
+ new->next = range->next;
+ new->start = end;
+ new->end = range->end;
- range->next = new;
- range->end = start;
- return;
- }
+ range->next = new;
+ range->end = start;
+ return;
+ }
- /* truncate this range */
+ /* truncate this range */
- range->end = start;
- }
- }
+ range->end = start;
+ }
+ }
- /* start address is not in the middle of a range */
+ /* start address is not in the middle of a range */
- while ((range != 0) && (range->end <= end)) {
- *prev = range->next;
- pmap_range_free(range);
- range = *prev;
- }
+ while ((range != 0) && (range->end <= end)) {
+ *prev = range->next;
+ pmap_range_free(range);
+ range = *prev;
+ }
- if ((range != 0) && (range->start < end))
- range->start = end;
+ if ((range != 0) && (range->start < end))
+ range->start = end;
}
#endif /* FUTURE_MAYBE */
+
diff --git a/sys/arch/mvme88k/mvme88k/pmap_table.c b/sys/arch/mvme88k/mvme88k/pmap_table.c
new file mode 100644
index 00000000000..9f00f13ca18
--- /dev/null
+++ b/sys/arch/mvme88k/mvme88k/pmap_table.c
@@ -0,0 +1,118 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993-1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <sys/types.h>
+#include <machine/board.h>
+#include <sys/param.h>
+#include <machine/m882xx.h> /* CMMU stuff */
+#include <vm/vm.h>
+#include <vm/vm_kern.h> /* vm/vm_kern.h */
+#include <machine/pmap_table.h> /* pmap_table.h*/
+
+#define R VM_PROT_READ
+#define RW VM_PROT_READ|VM_PROT_WRITE
+#define C CACHE_DFL
+#define CW CACHE_WT
+#define CI CACHE_INH
+#define CG CACHE_GLOBAL
+#define PAGE M88K_PGBYTES
+#define SEG M88K_SGBYTES
+
+#define M188_UTILITY U(0xFF000000)
+#define M188_UTILITY_SIZE U(0x01000000)
+#if 0
+#undef VEQR_ADDR
+#define VEQR_ADDR 0
+#endif
+
+/* phys_start, virt_start, size, prot, cacheability */
+#ifdef MVME187
+static pmap_table_entry m187_board_table[] = {
+ { BUGROM_START, BUGROM_START, BUGROM_SIZE, RW, CI},
+ { SRAM_START , SRAM_START , SRAM_SIZE , RW, CG},
+ { OBIO_START , OBIO_START , OBIO_SIZE , RW, CI},
+ { 0 , 0 , 0xffffffff , 0 , 0},
+};
+#endif
+
+#ifdef MVME188
+static pmap_table_entry m188_board_table[] = {
+ { MVME188_UTILITY, MVME188_UTILITY, MVME188_UTILITY_SIZE, RW, CI},
+ { 0 , VEQR_ADDR , 0/*filled in later*/, RW, CG},
+ { 0 , 0 , 0/*filled in later*/, RW, CG},
+ { 0 , 0 , 0xffffffff , 0, 0},
+};
+#endif
+
+#ifdef MVME197
+static pmap_table_entry m197_board_table[] = {
+ { BUGROM_START, BUGROM_START, BUGROM_SIZE, RW, CI},
+ { SRAM_START , SRAM_START , SRAM_SIZE , RW, CG},
+ { OBIO_START , OBIO_START , OBIO_SIZE , RW, CG},
+ { 0 , 0 , 0xffffffff , 0 , 0},
+};
+#endif
+
+pmap_table_t pmap_table_build(unsigned memory_size)
+{
+ extern int kernelstart;
+ unsigned int i;
+ pmap_table_t bt, pbt;
+
+ switch (cputyp) {
+#ifdef MVME187
+ case CPU_187:
+ bt = m187_board_table;
+ break;
+#endif
+#ifdef MVME188
+ case CPU_188:
+ bt = m188_board_table;
+ /* fill in the veqr map entry */
+ m188_board_table[1].size = memory_size;
+ m188_board_table[2].size = (unsigned)&kernelstart;
+ break;
+#endif
+#ifdef MVME197
+ case CPU_197:
+ bt = m197_board_table;
+ break;
+#endif
+ default:
+ panic("pmap_table_build: Unknown CPU type.");
+ /* NOT REACHED */
+ }
+
+ /* round off all entries to nearest segment */
+ pbt = bt;
+ for (i = 0; pbt->size != 0xffffffff; i++){
+ if (pbt->size>0)
+ pbt->size = (pbt->size + M88K_PGBYTES-1) & ~(M88K_PGBYTES-1);
+ pbt++;
+ }
+
+ return bt;
+}
diff --git a/sys/arch/mvme88k/mvme88k/process_machdep.c b/sys/arch/mvme88k/mvme88k/process_machdep.c
index f8e66540bf8..b69af52c741 100644
--- a/sys/arch/mvme88k/mvme88k/process_machdep.c
+++ b/sys/arch/mvme88k/mvme88k/process_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: process_machdep.c,v 1.6 1999/05/29 04:41:47 smurph Exp $ */
+/* $OpenBSD: process_machdep.c,v 1.7 1999/09/27 19:13:24 smurph Exp $ */
/*
* Copyright (c) 1993 The Regents of the University of California.
@@ -81,7 +81,8 @@ process_read_regs(p, regs)
struct proc *p;
struct reg *regs;
{
- bcopy(p->p_md.md_tf, (caddr_t)regs, sizeof(struct reg));
+
+ bcopy((caddr_t)USER_REGS(p), (caddr_t)regs, sizeof(struct reg));
return (0);
}
@@ -90,13 +91,14 @@ process_write_regs(p, regs)
struct proc *p;
struct reg *regs;
{
- bcopy((caddr_t)regs, p->p_md.md_tf, sizeof(struct reg));
+ bcopy((caddr_t)regs, (caddr_t)USER_REGS(p), sizeof(struct reg));
return (0);
}
int
process_sstep(p, sstep)
struct proc *p;
+ int sstep;
{
if (sstep)
cpu_singlestep(p);
@@ -108,8 +110,15 @@ process_set_pc(p, addr)
struct proc *p;
caddr_t addr;
{
- p->p_md.md_tf->sxip = (u_int)addr;
+ struct reg *regs;
+
+ regs = USER_REGS(p);
+ regs->sxip = (u_int)addr;
+ regs->snip = (u_int)addr + 4;
+ /*
+ p->p_md.md_tf->sxip = (u_int)addr;
p->p_md.md_tf->snip = (u_int)addr + 4;
+ */
return (0);
}
diff --git a/sys/arch/mvme88k/mvme88k/trap.c b/sys/arch/mvme88k/mvme88k/trap.c
index 57b276fd5dc..54ecb40c125 100644
--- a/sys/arch/mvme88k/mvme88k/trap.c
+++ b/sys/arch/mvme88k/mvme88k/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.7 1999/05/29 04:41:47 smurph Exp $ */
+/* $OpenBSD: trap.c,v 1.8 1999/09/27 19:13:24 smurph Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -54,7 +54,11 @@
#include <sys/syscall.h>
#include <sys/ktrace.h>
#include <machine/cpu.h> /* DMT_VALID, etc. */
+#include <machine/asm_macro.h> /* enable/disable interrupts */
#include <machine/m88100.h> /* DMT_VALID, etc. */
+#ifdef MVME197
+#include <machine/m88110.h> /* DMT_VALID, etc. */
+#endif
#include <machine/trap.h>
#include <machine/psl.h> /* FIP_E, etc. */
#include <machine/pcb.h> /* FIP_E, etc. */
@@ -62,12 +66,19 @@
#include <sys/systm.h>
#if (DDB)
-#include <machine/db_machdep.h>
+ #include <machine/db_machdep.h>
#else
-#define PC_REGS(regs) ((regs->sxip & 2) ? regs->sxip & ~3 : \
+ #define PC_REGS(regs) ((regs->sxip & 2) ? regs->sxip & ~3 : \
(regs->snip & 2 ? regs->snip & ~3 : regs->sfip & ~3))
+ #define inst_return(I) (((I)&0xfffffbffU) == 0xf400c001U ? TRUE : FALSE)
+ #define inst_call(I) ({ unsigned i = (I); \
+ ((((i) & 0xf8000000U) == 0xc8000000U || /*bsr*/ \
+ ((i) & 0xfffffbe0U) == 0xf400c800U) /*jsr*/ \
+ ? TRUE : FALSE) \
+ ;})
+
#endif /* DDB */
-#define BREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */
+#define SSBREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */
#define TRAPTRACE
#if defined(TRAPTRACE)
@@ -75,9 +86,9 @@ unsigned traptrace = 0;
#endif
#if DDB
-#define DEBUG_MSG db_printf
+ #define DEBUG_MSG db_printf
#else
-#define DEBUG_MSG printf
+ #define DEBUG_MSG printf
#endif /* DDB */
#define USERMODE(PSR) (((struct psr*)&(PSR))->psr_mode == 0)
@@ -85,29 +96,29 @@ unsigned traptrace = 0;
/* XXX MAJOR CLEANUP REQUIRED TO PORT TO BSD */
-char *trap_type[] = {
- "Reset",
- "Interrupt Exception",
- "Instruction Access",
- "Data Access Exception",
- "Misaligned Access",
- "Unimplemented Opcode",
- "Privileg Violation",
- "Bounds Check Violation",
- "Illegal Integer Divide",
- "Integer Overflow",
- "Error Exception",
+char *trap_type[] = {
+ "Reset",
+ "Interrupt Exception",
+ "Instruction Access",
+ "Data Access Exception",
+ "Misaligned Access",
+ "Unimplemented Opcode",
+ "Privileg Violation",
+ "Bounds Check Violation",
+ "Illegal Integer Divide",
+ "Integer Overflow",
+ "Error Exception",
};
-char *pbus_exception_type[] = {
- "Success (No Fault)",
- "",
- "",
- "Bus Error",
- "Segment Fault",
- "Page Fault",
- "Supervisor Violation",
- "Write Violation",
+char *pbus_exception_type[] = {
+ "Success (No Fault)",
+ "unknown 1",
+ "unknown 2",
+ "Bus Error",
+ "Segment Fault",
+ "Page Fault",
+ "Supervisor Violation",
+ "Write Violation",
};
extern ret_addr;
#define NSIR 8
@@ -115,683 +126,1410 @@ void (*sir_routines[NSIR])();
void *sir_args[NSIR];
u_char next_sir;
-int trap_types = sizeof trap_type / sizeof trap_type[0];
+int trap_types = sizeof trap_type / sizeof trap_type[0];
static inline void
userret(struct proc *p, struct m88100_saved_state *frame, u_quad_t oticks)
{
- int sig;
- int s;
-
- /* take pending signals */
- while ((sig = CURSIG(p)) != 0)
- postsig(sig);
- p->p_priority = p->p_usrpri;
-
- if (want_resched) {
- /*
- * Since we are curproc, clock will normally just change
- * our priority without moving us from one queue to another
- * (since the running process is not on a queue.)
- * If that happened after we put ourselves on the run queue
- * but before we switched, we might not be on the queue
- * indicated by our priority.
- */
- s = splstatclock();
- setrunqueue(p);
- p->p_stats->p_ru.ru_nivcsw++;
- mi_switch();
- (void) splx(s);
- while ((sig = CURSIG(p)) != 0)
- postsig(sig);
- }
-
- /*
- * If profiling, charge recent system time to the trapped pc.
- */
- if (p->p_flag & P_PROFIL)
- addupc_task(p, frame->sxip & ~3,
- (int)(p->p_sticks - oticks));
-
- curpriority = p->p_priority;
+ int sig;
+ int s;
+
+ /* take pending signals */
+ while ((sig = CURSIG(p)) != 0)
+ postsig(sig);
+ p->p_priority = p->p_usrpri;
+
+ if (want_resched) {
+ /*
+ * Since we are curproc, clock will normally just change
+ * our priority without moving us from one queue to another
+ * (since the running process is not on a queue.)
+ * If that happened after we put ourselves on the run queue
+ * but before we switched, we might not be on the queue
+ * indicated by our priority.
+ */
+ s = splstatclock();
+ setrunqueue(p);
+ p->p_stats->p_ru.ru_nivcsw++;
+ mi_switch();
+ (void) splx(s);
+ while ((sig = CURSIG(p)) != 0)
+ postsig(sig);
+ }
+
+ /*
+ * If profiling, charge recent system time to the trapped pc.
+ */
+ if (p->p_flag & P_PROFIL)
+ addupc_task(p, frame->sxip & ~3,
+ (int)(p->p_sticks - oticks));
+
+ curpriority = p->p_priority;
}
void
panictrap(int type, struct m88100_saved_state *frame)
{
- static int panicing = 0;
-
- if (panicing++ == 0) {
- if (type == 2) { /* instruction exception */
- printf("Instr access fault (%s) v = %x, frame %x\n",
- pbus_exception_type[(frame->ipfsr >> 16) & 0x7],
- frame->sxip & ~3, frame);
- } else if (type == 3) { /* data access exception */
- printf("Data access fault (%s) v = %x, frame %x\n",
- pbus_exception_type[(frame->dpfsr >> 16) & 0x7],
- frame->sxip & ~3, frame);
- } else
- printf("trap type %d, v = %x, frame %x\n", type, frame->sxip & ~3, frame);
- regdump(frame);
- }
- if ((u_int)type < trap_types)
- panic(trap_type[type]);
- panic("trap");
- /*NOTREACHED*/
+ static int panicing = 0;
+
+ if (panicing++ == 0) {
+ if (type == 2) { /* instruction exception */
+ DEBUG_MSG("\nInstr access fault (%s) v = %x, frame %x\n",
+ pbus_exception_type[(frame->ipfsr >> 16) & 0x7],
+ frame->sxip & ~3, frame);
+ } else if (type == 3) { /* data access exception */
+ DEBUG_MSG("\nData access fault (%s) v = %x, frame %x\n",
+ pbus_exception_type[(frame->dpfsr >> 16) & 0x7],
+ frame->sxip & ~3, frame);
+ } else
+ DEBUG_MSG("\ntrap type %d, v = %x, frame %x\n", type, frame->sxip & ~3, frame);
+ regdump(frame);
+ }
+ if ((u_int)type < trap_types)
+ panic(trap_type[type]);
+ panic("trap");
+ /*NOTREACHED*/
}
+
+#if defined(MVME187) || defined(MVME188)
+unsigned last_trap[4] = {0,0,0,0};
/*ARGSUSED*/
void
trap(unsigned type, struct m88100_saved_state *frame)
{
- struct proc *p;
- u_quad_t sticks = 0;
- vm_map_t map;
- vm_offset_t va;
- vm_prot_t ftype;
- int fault_type;
- u_long fault_code;
- unsigned nss, fault_addr;
- struct vmspace *vm;
- union sigval sv;
- int result;
- int sig = 0;
- unsigned pc = PC_REGS(frame); /* get program counter (sxip) */
-
- extern vm_map_t kernel_map;
- extern int fubail(), subail();
- extern unsigned guarded_access_start;
- extern unsigned guarded_access_end;
- extern unsigned guarded_access_bad;
-
- cnt.v_trap++;
- if ((p = curproc) == NULL)
- p = &proc0;
-
- if (USERMODE(frame->epsr)) {
- sticks = p->p_sticks;
- type += T_USER;
- p->p_md.md_tf = frame; /* for ptrace/signals */
- fault_type = 0;
- fault_code = 0;
- }
-
- switch(type)
- {
- default:
- panictrap(frame->vector, frame);
- /*NOTREACHED*/
+ struct proc *p;
+ u_quad_t sticks = 0;
+ vm_map_t map;
+ vm_offset_t va;
+ vm_prot_t ftype;
+ int fault_type;
+ u_long fault_code;
+ unsigned nss, fault_addr;
+ struct vmspace *vm;
+ union sigval sv;
+ int su = 0;
+ int result;
+ int sig = 0;
+ unsigned pc = PC_REGS(frame); /* get program counter (sxip) */
+
+ extern vm_map_t kernel_map;
+ extern int fubail(), subail();
+ extern unsigned guarded_access_start;
+ extern unsigned guarded_access_end;
+ extern unsigned guarded_access_bad;
+
+ if (type != last_trap[3]) {
+ last_trap[0] = last_trap[1];
+ last_trap[1] = last_trap[2];
+ last_trap[2] = last_trap[3];
+ last_trap[3] = type;
+ }
+ cnt.v_trap++;
+ if ((p = curproc) == NULL)
+ p = &proc0;
+
+ if (USERMODE(frame->epsr)) {
+ sticks = p->p_sticks;
+ type += T_USER;
+ p->p_md.md_tf = frame; /* for ptrace/signals */
+ fault_type = 0;
+ fault_code = 0;
+ }
+/* printf("trap 0x%x ", type); */
+ switch (type) {
+ default:
+ panictrap(frame->vector, frame);
+ /*NOTREACHED*/
#if defined(DDB)
- case T_KDB_BREAK:
- /*FALLTHRU*/
- case T_KDB_BREAK+T_USER:
- {
- int s = db_splhigh();
- db_enable_interrupt();
- ddb_break_trap(T_KDB_BREAK,(db_regs_t*)frame);
- db_disable_interrupt();
- db_splx(s);
- return;
- }
- case T_KDB_ENTRY:
- /*FALLTHRU*/
- case T_KDB_ENTRY+T_USER:
- {
- int s = db_splhigh();
- db_enable_interrupt();
- ddb_entry_trap(T_KDB_ENTRY,(db_regs_t*)frame);
- db_disable_interrupt();
- db_splx(s);
- return;
- }
-
-#if 0
- case T_ILLFLT:
- {
- int s = db_splhigh();
- db_enable_interrupt();
- ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
- "error fault", (db_regs_t*)frame);
- db_disable_interrupt();
- db_splx(s);
- return;
- }
-#endif /* 0 */
+ case T_KDB_BREAK:
+ /*FALLTHRU*/
+ case T_KDB_BREAK+T_USER:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_break_trap(T_KDB_BREAK,(db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+ case T_KDB_ENTRY:
+ /*FALLTHRU*/
+ case T_KDB_ENTRY+T_USER:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_entry_trap(T_KDB_ENTRY,(db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+
+ #if 0
+ case T_ILLFLT:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
+ "error fault", (db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+ #endif /* 0 */
#endif /* DDB */
-
- case T_MISALGNFLT:
- DEBUG_MSG("kernel misalgined "
- "access exception @ 0x%08x\n", frame->sxip);
- panictrap(frame->vector, frame);
- break;
-
- case T_INSTFLT:
- /* kernel mode instruction access fault.
- * Should never, never happen for a non-paged kernel.
- */
- DEBUG_MSG("kernel mode instruction "
- "page fault @ 0x%08x\n", frame->sxip);
- panictrap(frame->vector, frame);
- break;
-
- case T_DATAFLT:
- /* kernel mode data fault */
- /*
- * If the faulting address is in user space, handle it in
- * the context of the user process. Else, use kernel map.
- */
-
- if (type == T_DATAFLT) {
- fault_addr = frame->dma0;
- if (frame->dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
- ftype = VM_PROT_READ|VM_PROT_WRITE;
- fault_code = VM_PROT_WRITE;
- } else {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
- }
- } else {
- fault_addr = frame->sxip & XIP_ADDR;
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
- }
-
- va = trunc_page((vm_offset_t)fault_addr);
-
- vm = p->p_vmspace;
- map = &vm->vm_map;
-
- /* data fault on a kernel address... */
- if (frame->dmt0 & DMT_DAS)
- map = kernel_map;
-
- /*
- * We don't want to call vm_fault() if it is fuwintr() or
- * suwintr(). These routines are for copying from interrupt
- * context and vm_fault() can potentially sleep. You may
- * wonder if it isn't bad karma for an interrupt handler to
- * touch the current process. Indeed it is, but clock interrupt
- * does it while doing profiling. It is OK in that context.
- */
-
- if (p->p_addr->u_pcb.pcb_onfault == (int)fubail ||
- p->p_addr->u_pcb.pcb_onfault == (int)subail)
- goto outtahere;
-
- /* data fault on the user address */
- if (type == T_DATAFLT && (frame->dmt0 & DMT_DAS) == 0)
- {
- type = T_DATAFLT + T_USER;
- goto user_fault;
- }
-
- /*
- * If it is a guarded access, bus error is OK.
- */
-
- if ((frame->dpfsr >> 16 & 0x7) == 0x3) {
-#ifdef DIAGNOSTIC
-#if DDB
- printf("sxip %x dpfsr %x\n", frame->sxip, frame->dpfsr);
- gimmeabreak();
-#endif
-#endif
- }
-
- if ((frame->dpfsr >> 16 & 0x7) == 0x3 && /* bus error */
- (frame->sxip & ~3) >= (unsigned)&guarded_access_start &&
- (frame->sxip & ~3) <= (unsigned)&guarded_access_end) {
-
- frame->snip = ((unsigned)&guarded_access_bad ) | FIP_V;
- frame->sfip = ((unsigned)&guarded_access_bad + 4) | FIP_V;
- frame->sxip = 0;
- frame->dmt0 = 0;/* XXX what about other trans. in data unit */
- frame->dpfsr = 0;
- return;
- }
- /*
- * On a segment or a page fault, call vm_fault() to resolve
- * the fault.
- */
-
- if ((frame->dpfsr >> 16 & 0x7) == 0x4 /* seg fault */
- || (frame->dpfsr >> 16 & 0x7) == 0x5) { /* page fault */
- result = vm_fault(map, va, ftype, FALSE);
-
- if (result == KERN_SUCCESS) {
- /*
- * We could resolve the fault. Call
- * data_access_emulation to drain the data unit pipe
- * line and reset dmt0 so that trap won't get called
- * again. For inst faults, back up the pipe line.
- */
- if (type == T_DATAFLT) {
- data_access_emulation(frame);
- frame->dmt0 = 0;
- frame->dpfsr = 0;
- } else {
- frame->sfip = frame->snip & ~FIP_E;
- frame->snip = frame->sxip & ~NIP_E;
- }
- return;
- }
- }
-
- /*
- * if still the fault is not resolved ...
- */
- if (!p->p_addr->u_pcb.pcb_onfault)
- panictrap(frame->vector, frame);
-
- outtahere:
- frame->snip = ((unsigned)p->p_addr->u_pcb.pcb_onfault ) | FIP_V;
- frame->sfip = ((unsigned)p->p_addr->u_pcb.pcb_onfault + 4) | FIP_V;
- frame->sxip = 0;
- frame->dmt0 = 0; /* XXX what about other trans. in data unit */
- frame->dpfsr = 0;
- return;
-
- case T_INSTFLT+T_USER:
- /* User mode instruction access fault */
- /*FALLTHRU*/
- case T_DATAFLT+T_USER:
- user_fault:
-
- if (type == T_INSTFLT+T_USER){
- fault_addr = frame->sxip & XIP_ADDR;
- }else{
- fault_addr = frame->dma0;
- }
-
- if (frame->dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
- ftype = VM_PROT_READ|VM_PROT_WRITE;
- fault_code = VM_PROT_WRITE;
- } else {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
- }
-
- va = trunc_page((vm_offset_t)fault_addr);
-
- vm = p->p_vmspace;
- map = &vm->vm_map;
-
- /* Call vm_fault() to resolve non-bus error faults */
-
- if ((frame->ipfsr >> 16 & 0x7) != 0x3 &&
- (frame->dpfsr >> 16 & 0x7) != 0x3) {
-
- result = vm_fault(map, va, ftype, FALSE);
- frame->ipfsr = frame->dpfsr = 0;
-
-/* printf("vm_fault(%x, %x, %x, 0) -> %x\n",
- map, va, ftype, result);
+ case T_INT:
+ case T_INT+T_USER:
+ /* This function pointer is set in machdep.c
+ It calls m188_ext_int or sbc_ext_int depending
+ on the value of cputyp - smurph */
+ (*mdfp.interrupt_func)(T_INT, frame);
+ return;
+
+ case T_MISALGNFLT:
+ DEBUG_MSG("kernel misalgined "
+ "access exception @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
+ break;
+
+ case T_INSTFLT:
+ /* kernel mode instruction access fault.
+ * Should never, never happen for a non-paged kernel.
+ */
+ DEBUG_MSG("kernel mode instruction "
+ "page fault @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
+ break;
+
+ case T_DATAFLT:
+ /* kernel mode data fault */
+ /*
+ * If the faulting address is in user space, handle it in
+ * the context of the user process. Else, use kernel map.
+ */
+
+ if (type == T_DATAFLT) {
+ fault_addr = frame->dma0;
+ if (frame->dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ fault_code = VM_PROT_WRITE;
+ } else {
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ }
+ } else {
+ fault_addr = frame->sxip & XIP_ADDR;
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ }
+
+ va = trunc_page((vm_offset_t)fault_addr);
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+
+ /* data fault on a kernel address... */
+ if (frame->dmt0 & DMT_DAS)
+ map = kernel_map;
+
+ /*
+ * We don't want to call vm_fault() if it is fuwintr() or
+ * suwintr(). These routines are for copying from interrupt
+ * context and vm_fault() can potentially sleep. You may
+ * wonder if it isn't bad karma for an interrupt handler to
+ * touch the current process. Indeed it is, but clock interrupt
+ * does it while doing profiling. It is OK in that context.
+ */
+
+ if (p->p_addr->u_pcb.pcb_onfault == (int)fubail ||
+ p->p_addr->u_pcb.pcb_onfault == (int)subail)
+ goto outtahere;
+
+ /* data fault on the user address */
+ if (type == T_DATAFLT && (frame->dmt0 & DMT_DAS) == 0) {
+ type = T_DATAFLT + T_USER;
+ goto user_fault;
+ }
+
+ /*
+ * If it is a guarded access, bus error is OK.
+ */
+
+ if ((frame->dpfsr >> 16 & 0x7) == 0x3 && /* bus error */
+ (frame->sxip & ~3) >= (unsigned)&guarded_access_start &&
+ (frame->sxip & ~3) <= (unsigned)&guarded_access_end) {
+
+ frame->snip = ((unsigned)&guarded_access_bad ) | FIP_V;
+ frame->sfip = ((unsigned)&guarded_access_bad + 4) | FIP_V;
+ frame->sxip = 0;
+ frame->dmt0 = 0;/* XXX what about other trans. in data unit */
+ frame->dpfsr = 0;
+ return;
+ }
+
+ /*
+ * On a segment or a page fault, call vm_fault() to resolve
+ * the fault.
+ */
+ if ((frame->dpfsr >> 16 & 0x7) == 0x4 /* seg fault */
+ || (frame->dpfsr >> 16 & 0x7) == 0x5) { /* page fault */
+ result = vm_fault(map, va, ftype, FALSE);
+ /*
+ printf("vm_fault(map 0x%x, va 0x%x, ftype 0x%x, FALSE) -> %d (%s)\n",
+ map, va, ftype, result,
+ result ? "KERN_INVALID_ADDRESS" : "KERN_SUCCESS");
+ */
+ if (result == KERN_SUCCESS) {
+ /*
+ * We could resolve the fault. Call
+ * data_access_emulation to drain the data unit pipe
+ * line and reset dmt0 so that trap won't get called
+ * again. For inst faults, back up the pipe line.
+ */
+ if (type == T_DATAFLT) {
+ /*
+ printf("calling data_access_emulation()\n");
+ */
+ data_access_emulation(frame);
+ frame->dmt0 = 0;
+ frame->dpfsr = 0;
+ } else {
+ frame->sfip = frame->snip & ~FIP_E;
+ frame->snip = frame->sxip & ~NIP_E;
+ }
+ return;
+ }
+ }
+ /*
+ printf ("PBUS Fault %d (%s) va = 0x%x\n", ((frame->dpfsr >> 16) & 0x7),
+ pbus_exception_type[(frame->dpfsr >> 16) & 0x7], va);
+ */
+ /*
+ * if still the fault is not resolved ...
+ */
+ if (!p->p_addr->u_pcb.pcb_onfault)
+ panictrap(frame->vector, frame);
+
+ outtahere:
+ frame->snip = ((unsigned)p->p_addr->u_pcb.pcb_onfault ) | FIP_V;
+ frame->sfip = ((unsigned)p->p_addr->u_pcb.pcb_onfault + 4) | FIP_V;
+ frame->sxip = 0;
+ frame->dmt0 = 0; /* XXX what about other trans. in data unit */
+ frame->dpfsr = 0;
+ return;
+ case T_INSTFLT+T_USER:
+ /* User mode instruction access fault */
+ /*FALLTHRU*/
+ case T_DATAFLT+T_USER:
+ user_fault:
+/* printf("\nUser Data access fault (%s) v = %x, frame %x\n",
+ pbus_exception_type[(frame->dpfsr >> 16) & 0x7],
+ frame->sxip & ~3, frame);
*/
- }
-
- if ((caddr_t)va >= vm->vm_maxsaddr) {
- if (result == KERN_SUCCESS) {
- nss = clrnd(btoc(USRSTACK - va));/* XXX check this */
- if (nss > vm->vm_ssize)
- vm->vm_ssize = nss;
- } else if (result == KERN_PROTECTION_FAILURE)
- result = KERN_INVALID_ADDRESS;
- }
-
- if (result == KERN_SUCCESS) {
- if (type == T_DATAFLT+T_USER) {
- /*
- * We could resolve the fault. Call
- * data_access_emulation to drain the data unit
- * pipe line and reset dmt0 so that trap won't
- * get called again.
- */
- data_access_emulation(frame);
- frame->dmt0 = 0;
- frame->dpfsr = 0;
- } else {
- /* back up SXIP, SNIP clearing the the Error bit */
- frame->sfip = frame->snip & ~FIP_E;
- frame->snip = frame->sxip & ~NIP_E;
- }
- } else {
- sig = result == KERN_PROTECTION_FAILURE ? SIGBUS : SIGSEGV;
- fault_type = result == KERN_PROTECTION_FAILURE ? BUS_ADRERR
- : SEGV_MAPERR;
- }
-
- break;
-
- case T_MISALGNFLT+T_USER:
+
+ if (type == T_INSTFLT+T_USER) {
+ fault_addr = frame->sxip & XIP_ADDR;
+ } else {
+ fault_addr = frame->dma0;
+ }
+
+ if (frame->dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ fault_code = VM_PROT_WRITE;
+ } else {
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ }
+
+ va = trunc_page((vm_offset_t)fault_addr);
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+
+ /* Call vm_fault() to resolve non-bus error faults */
+ if ((frame->ipfsr >> 16 & 0x7) != 0x3 &&
+ (frame->dpfsr >> 16 & 0x7) != 0x3) {
+
+ result = vm_fault(map, va, ftype, FALSE);
+ frame->ipfsr = frame->dpfsr = 0;
+ /*
+ printf("vm_fault(map 0x%x, va 0x%x, ftype 0x%x, FALSE) -> %d (%s)\n",
+ map, va, ftype, result,
+ result ? "KERN_INVALID_ADDRESS" : "KERN_SUCCESS");
+ */
+ }
+
+ if ((caddr_t)va >= vm->vm_maxsaddr) {
+ if (result == KERN_SUCCESS) {
+ nss = clrnd(btoc(USRSTACK - va));/* XXX check this */
+ if (nss > vm->vm_ssize)
+ vm->vm_ssize = nss;
+ } else if (result == KERN_PROTECTION_FAILURE)
+ result = KERN_INVALID_ADDRESS;
+ }
+
+ if (result == KERN_SUCCESS) {
+ if (type == T_DATAFLT+T_USER) {
+ /*
+ printf("calling data_access_emulation()\n");
+ */
+ /*
+ * We could resolve the fault. Call
+ * data_access_emulation to drain the data unit
+ * pipe line and reset dmt0 so that trap won't
+ * get called again.
+ */
+ data_access_emulation(frame);
+ frame->dmt0 = 0;
+ frame->dpfsr = 0;
+ } else {
+ /* back up SXIP, SNIP clearing the the Error bit */
+ frame->sfip = frame->snip & ~FIP_E;
+ frame->snip = frame->sxip & ~NIP_E;
+ }
+ } else {
+ sig = result == KERN_PROTECTION_FAILURE ? SIGBUS : SIGSEGV;
+ fault_type = result == KERN_PROTECTION_FAILURE ? BUS_ADRERR
+ : SEGV_MAPERR;
+ }
+ /*
+ printf("sig == %d, fault_type == %d\n", sig, fault_type);
+ */
+ break;
+
+ case T_MISALGNFLT+T_USER:
/* DEBUG_MSG("T_MISALGNFLT\n");*/
- sig = SIGBUS;
- fault_type = BUS_ADRALN;
+ sig = SIGBUS;
+ fault_type = BUS_ADRALN;
/* panictrap(fault_type, frame);*/
- break;
-
- case T_PRIVINFLT+T_USER:
- case T_ILLFLT+T_USER:
- sig = SIGILL;
- break;
-
- case T_BNDFLT+T_USER:
- sig = SIGFPE;
- break;
- case T_ZERODIV+T_USER:
- sig = SIGFPE;
- fault_type = FPE_INTDIV;
- break;
- case T_OVFFLT+T_USER:
- sig = SIGFPE;
- fault_type = FPE_INTOVF;
- break;
-
- case T_FPEPFLT+T_USER:
- case T_FPEIFLT+T_USER:
- sig = SIGFPE;
- break;
-
- case T_SIGTRAP+T_USER:
- sig = SIGTRAP;
- fault_type = TRAP_TRACE;
- break;
-
- case T_STEPBPT+T_USER:
- /*
- * This trap is used by the kernel to support single-step
- * debugging (although any user could generate this trap
- * which should probably be handled differently). When a
- * process is continued by a debugger with the PT_STEP
- * function of ptrace (single step), the kernel inserts
- * one or two breakpoints in the user process so that only
- * one instruction (or two in the case of a delayed branch)
- * is executed. When this breakpoint is hit, we get the
- * T_STEPBPT trap.
- */
+ break;
+
+ case T_PRIVINFLT+T_USER:
+ case T_ILLFLT+T_USER:
+ sig = SIGILL;
+ break;
+
+ case T_BNDFLT+T_USER:
+ sig = SIGFPE;
+ break;
+ case T_ZERODIV+T_USER:
+ sig = SIGFPE;
+ fault_type = FPE_INTDIV;
+ break;
+ case T_OVFFLT+T_USER:
+ sig = SIGFPE;
+ fault_type = FPE_INTOVF;
+ break;
+
+ case T_FPEPFLT+T_USER:
+ case T_FPEIFLT+T_USER:
+ sig = SIGFPE;
+ break;
+
+ case T_SIGTRAP+T_USER:
+ sig = SIGTRAP;
+ fault_type = TRAP_TRACE;
+ break;
+
+ case T_STEPBPT+T_USER:
+ /*
+ * This trap is used by the kernel to support single-step
+ * debugging (although any user could generate this trap
+ * which should probably be handled differently). When a
+ * process is continued by a debugger with the PT_STEP
+ * function of ptrace (single step), the kernel inserts
+ * one or two breakpoints in the user process so that only
+ * one instruction (or two in the case of a delayed branch)
+ * is executed. When this breakpoint is hit, we get the
+ * T_STEPBPT trap.
+ */
+
+ {
+ register unsigned va;
+ unsigned instr;
+ struct uio uio;
+ struct iovec iov;
+
+ /* compute address of break instruction */
+ va = pc;
+
+ /* read break instruction */
+ instr = fuiword((caddr_t)pc);
#if 0
- frame->sfip = frame->snip; /* set up next FIP */
- frame->snip = frame->sxip; /* set up next NIP */
- break;
+ printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
+ p->p_comm, p->p_pid, instr, pc,
+ p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
#endif
- {
- register unsigned va;
- unsigned instr;
- struct uio uio;
- struct iovec iov;
-
- /* compute address of break instruction */
- va = pc;
-
- /* read break instruction */
- instr = fuiword((caddr_t)pc);
+ /* check and see if we got here by accident */
+ if ((p->p_md.md_ss_addr != pc &&
+ p->p_md.md_ss_taken_addr != pc) ||
+ instr != SSBREAKPOINT) {
+ sig = SIGTRAP;
+ fault_type = TRAP_TRACE;
+ break;
+ }
+ /* restore original instruction and clear BP */
+ instr = p->p_md.md_ss_instr;
+ va = p->p_md.md_ss_addr;
+ if (va != 0) {
+ iov.iov_base = (caddr_t)&instr;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)va;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+ procfs_domem(p, p, NULL, &uio);
+ }
+
+ /* branch taken instruction */
+ instr = p->p_md.md_ss_taken_instr;
+ va = p->p_md.md_ss_taken_addr;
+ if (instr != 0) {
+ iov.iov_base = (caddr_t)&instr;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)va;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+ procfs_domem(p, p, NULL, &uio);
+ }
#if 1
- printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
- p->p_comm, p->p_pid, instr, pc,
- p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
+ frame->sfip = frame->snip; /* set up next FIP */
+ frame->snip = pc; /* set up next NIP */
+ frame->snip |= 2; /* set valid bit */
#endif
- /* check and see if we got here by accident */
-/*
- if (p->p_md.md_ss_addr != pc || instr != BREAKPOINT) {
- sig = SIGTRAP;
- fault_type = TRAP_TRACE;
- break;
- }
+ p->p_md.md_ss_addr = 0;
+ p->p_md.md_ss_instr = 0;
+ p->p_md.md_ss_taken_addr = 0;
+ p->p_md.md_ss_taken_instr = 0;
+ sig = SIGTRAP;
+ fault_type = TRAP_BRKPT;
+ }
+ break;
+
+ case T_USERBPT+T_USER:
+ /*
+ * This trap is meant to be used by debuggers to implement
+ * breakpoint debugging. When we get this trap, we just
+ * return a signal which gets caught by the debugger.
+ */
+ frame->sfip = frame->snip; /* set up the next FIP */
+ frame->snip = frame->sxip; /* set up the next NIP */
+ sig = SIGTRAP;
+ fault_type = TRAP_BRKPT;
+ break;
+
+ case T_ASTFLT+T_USER:
+ want_ast = 0;
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+ break;
+ }
+
+ /*
+ * If trap from supervisor mode, just return
+ */
+ if (SYSTEMMODE(frame->epsr))
+ return;
+
+ if (sig) {
+ sv.sival_int = fault_addr;
+ trapsignal(p, sig, fault_code, fault_type, sv);
+ /*
+ * don't want multiple faults - we are going to
+ * deliver signal.
+ */
+ frame->dmt0 = 0;
+ frame->dpfsr = 0;
+ }
+
+ userret(p, frame, sticks);
+}
+#endif /* defined(MVME187) || defined(MVME188) */
+/*ARGSUSED*/
+#ifdef MVME197
+void
+trap2(unsigned type, struct m88100_saved_state *frame)
+{
+ struct proc *p;
+ u_quad_t sticks = 0;
+ vm_map_t map;
+ vm_offset_t va;
+ vm_prot_t ftype;
+ int fault_type;
+ u_long fault_code;
+ unsigned nss, fault_addr;
+ struct vmspace *vm;
+ union sigval sv;
+ int su = 0;
+ int result;
+ int sig = 0;
+ unsigned pc = PC_REGS(frame); /* get program counter (sxip) */
+ unsigned dsr, isr, user = 0, write = 0, data = 0;
+
+ extern vm_map_t kernel_map;
+ extern int fubail(), subail();
+ extern unsigned guarded_access_start;
+ extern unsigned guarded_access_end;
+ extern unsigned guarded_access_bad;
+
+ cnt.v_trap++;
+ if ((p = curproc) == NULL)
+ p = &proc0;
+
+ if (USERMODE(frame->epsr)) {
+ sticks = p->p_sticks;
+ type += T_USER;
+ p->p_md.md_tf = frame; /* for ptrace/signals */
+ fault_type = 0;
+ fault_code = 0;
+ }
+ printf("m197_trap 0x%x ", type);
+ switch (type) {
+ default:
+ panictrap(frame->vector, frame);
+ /*NOTREACHED*/
+ case T_197_READ+T_USER:
+ user = 1;
+ case T_197_READ:
+ va = (vm_offset_t) frame->dlar;
+ /* if it was a user read, handle in context of the user */
+ if ((frame->dsr & CMMU_DSR_SU) && !user) {
+ map = kernel_map;
+ } else {
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ }
+ result = m197_table_search(map->pmap, va, CMMU_READ, user, CMMU_DATA);
+ if (result) {
+ switch (result) {
+ case 4: /* Seg Fault */
+ frame->dsr |= CMMU_DSR_SI | CMMU_DSR_RW;
+ break;
+ case 5: /* Page Fault */
+ frame->dsr |= CMMU_DSR_PI | CMMU_DSR_RW;
+ break;
+ case 6: /* Supervisor Violation */
+ frame->dsr |= CMMU_DSR_SP | CMMU_DSR_RW;
+ break;
+ }
+ /* table search failed and we are going to report a data fault */
+ if (user) {
+ type = T_DATAFLT+T_USER;
+ goto m197_user_fault;
+ } else {
+ type = T_DATAFLT;
+ goto m197_data_fault;
+ }
+ } else {
+ return; /* PATC sucessfully loaded */
+ }
+ break;
+ case T_197_WRITE+T_USER:
+ user = 1;
+ case T_197_WRITE:
+ /* if it was a user read, handle in context of the user */
+ if ((frame->dsr & CMMU_DSR_SU) && !user) {
+ map = kernel_map;
+ } else {
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ }
+ va = (vm_offset_t) frame->dlar;
+ result = m197_table_search(map->pmap, va, CMMU_WRITE, user, CMMU_DATA);
+ if (result) {
+ switch (result) {
+ case 4: /* Seg Fault */
+ frame->dsr |= CMMU_DSR_SI;
+ break;
+ case 5: /* Page Fault */
+ frame->dsr |= CMMU_DSR_PI;
+ break;
+ case 6: /* Supervisor Violation */
+ frame->dsr |= CMMU_DSR_SP;
+ break;
+ case 7: /* Write Violation */
+ frame->dsr |= CMMU_DSR_WE;
+ break;
+ }
+ /* table search failed and we are going to report a data fault */
+ if (user) {
+ type = T_DATAFLT+T_USER;
+ goto m197_user_fault;
+ } else {
+ type = T_DATAFLT;
+ goto m197_data_fault;
+ }
+ } else {
+ return; /* PATC sucessfully loaded */
+ }
+ break;
+ case T_197_INST+T_USER:
+ user = 1;
+ case T_197_INST:
+ /* if it was a user read, handle in context of the user */
+ if ((frame->isr & CMMU_ISR_SU) && !user) {
+ map = kernel_map;
+ } else {
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+ }
+ va = (vm_offset_t) frame->sxip;
+ result = m197_table_search(map->pmap, va, CMMU_READ, user, CMMU_INST);
+ if (result) {
+ switch (result) {
+ case 4: /* Seg Fault */
+ frame->isr |= CMMU_ISR_SI;
+ break;
+ case 5: /* Page Fault */
+ frame->isr |= CMMU_ISR_PI;
+ break;
+ case 6: /* Supervisor Violation */
+ frame->isr |= CMMU_ISR_SP;
+ break;
+ }
+ /* table search failed and we are going to report a data fault */
+ if (user) {
+ type = T_INSTFLT+T_USER;
+ goto m197_user_fault;
+ } else {
+ type = T_INSTFLT;
+ goto m197_inst_fault;
+ }
+ } else {
+ return; /* PATC sucessfully loaded */
+ }
+ break;
+ #if defined(DDB)
+ case T_KDB_BREAK:
+ /*FALLTHRU*/
+ case T_KDB_BREAK+T_USER:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_break_trap(T_KDB_BREAK,(db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+ case T_KDB_ENTRY:
+ /*FALLTHRU*/
+ case T_KDB_ENTRY+T_USER:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_entry_trap(T_KDB_ENTRY,(db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+
+ #if 0
+ case T_ILLFLT:
+ {
+ int s = db_splhigh();
+ db_enable_interrupt();
+ ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
+ "error fault", (db_regs_t*)frame);
+ db_disable_interrupt();
+ db_splx(s);
+ return;
+ }
+ #endif /* 0 */
+ #endif /* DDB */
+ case T_ILLFLT:
+ DEBUG_MSG("test trap "
+ "page fault @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
+
+ case T_MISALGNFLT:
+ DEBUG_MSG("kernel misalgined "
+ "access exception @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
+ break;
+
+ case T_INSTFLT:
+m197_inst_fault:
+ /* kernel mode instruction access fault.
+ * Should never, never happen for a non-paged kernel.
+ */
+ DEBUG_MSG("kernel mode instruction "
+ "page fault @ 0x%08x\n", frame->sxip);
+ panictrap(frame->vector, frame);
+ break;
+
+ case T_DATAFLT:
+ /* kernel mode data fault */
+ /*
+ * If the faulting address is in user space, handle it in
+ * the context of the user process. Else, use kernel map.
+ */
+m197_data_fault:
+ if (type == T_DATAFLT) {
+ fault_addr = frame->dlar;
+ if (frame->dsr & CMMU_DSR_RW) {
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ } else {
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ fault_code = VM_PROT_WRITE;
+ write = 1;
+ }
+ data = 1;
+ } else {
+ fault_addr = frame->sxip & XIP_ADDR;
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ }
+
+ va = trunc_page((vm_offset_t)fault_addr);
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+
+ /* data fault on a kernel address... */
+ if (type == T_DATAFLT) {
+ if (frame->dsr & CMMU_DSR_SU) {
+ map = kernel_map;
+ }
+ }
+
+ /*
+ * We don't want to call vm_fault() if it is fuwintr() or
+ * suwintr(). These routines are for copying from interrupt
+ * context and vm_fault() can potentially sleep. You may
+ * wonder if it isn't bad karma for an interrupt handler to
+ * touch the current process. Indeed it is, but clock interrupt
+ * does it while doing profiling. It is OK in that context.
+ */
+
+ if (p->p_addr->u_pcb.pcb_onfault == (int)fubail ||
+ p->p_addr->u_pcb.pcb_onfault == (int)subail)
+ goto m197_outtahere;
+
+ /* data fault on the user address */
+ if (type == T_DATAFLT && (frame->dsr & CMMU_DSR_SU) == 0) {
+ type = T_DATAFLT + T_USER;
+ goto m197_user_fault;
+ }
+
+ /*
+ * If it is a guarded access, bus error is OK.
+ */
+
+ if ((frame->dsr & CMMU_DSR_BE) && /* bus error */
+ (frame->sxip & ~3) >= (unsigned)&guarded_access_start &&
+ (frame->sxip & ~3) <= (unsigned)&guarded_access_end) {
+ return;
+ }
+
+ /*
+ * On a segment or a page fault, call vm_fault() to resolve
+ * the fault.
+ */
+ result = m197_table_search(map->pmap, va, write, 1, data);
+/* todo
+ switch (result) {
+ case :
+ }
+*/
+ if (type == T_DATAFLT) {
+ if ((frame->dsr & CMMU_DSR_SI) /* seg fault */
+ || (frame->dsr & CMMU_DSR_PI)) { /* page fault */
+ result = vm_fault(map, va, ftype, FALSE);
+ if (result == KERN_SUCCESS) {
+ return;
+ }
+ }
+ } else {
+ if ((frame->isr & CMMU_ISR_SI) /* seg fault */
+ || (frame->isr & CMMU_ISR_PI)) { /* page fault */
+ result = vm_fault(map, va, ftype, FALSE);
+ if (result == KERN_SUCCESS) {
+ return;
+ }
+ }
+ }
+
+ /*
+ printf ("PBUS Fault %d (%s) va = 0x%x\n", ((frame->dpfsr >> 16) & 0x7),
+ pbus_exception_type[(frame->dpfsr >> 16) & 0x7], va);
+ */
+ /*
+ * if still the fault is not resolved ...
+ */
+ if (!p->p_addr->u_pcb.pcb_onfault)
+ panictrap(frame->vector, frame);
+
+m197_outtahere:
+ frame->sxip = ((unsigned)p->p_addr->u_pcb.pcb_onfault);
+ return;
+ case T_INSTFLT+T_USER:
+ /* User mode instruction access fault */
+ /*FALLTHRU*/
+ case T_DATAFLT+T_USER:
+m197_user_fault:
+/* printf("\nUser Data access fault (%s) v = %x, frame %x\n",
+ pbus_exception_type[(frame->dpfsr >> 16) & 0x7],
+ frame->sxip & ~3, frame);
*/
- /* restore original instruction and clear BP */
- /*sig = suiword((caddr_t)pc, p->p_md.md_ss_instr);*/
- instr = p->p_md.md_ss_instr;
- if (instr == 0){
- printf("Warning: can't restore instruction at %x: %x\n",
- p->p_md.md_ss_addr, p->p_md.md_ss_instr);
- } else {
- iov.iov_base = (caddr_t)&instr;
- iov.iov_len = sizeof(int);
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_offset = (off_t)pc;
- uio.uio_resid = sizeof(int);
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_rw = UIO_WRITE;
- uio.uio_procp = curproc;
- }
-
- frame->sfip = frame->snip; /* set up next FIP */
- frame->snip = frame->sxip; /* set up next NIP */
- frame->snip |= 2; /* set valid bit */
- p->p_md.md_ss_addr = 0;
- sig = SIGTRAP;
- fault_type = TRAP_BRKPT;
- break;
- }
-
- case T_USERBPT+T_USER:
- /*
- * This trap is meant to be used by debuggers to implement
- * breakpoint debugging. When we get this trap, we just
- * return a signal which gets caught by the debugger.
- */
- frame->sfip = frame->snip; /* set up the next FIP */
- frame->snip = frame->sxip; /* set up the next NIP */
- sig = SIGTRAP;
- fault_type = TRAP_BRKPT;
- break;
-
- case T_ASTFLT+T_USER:
- want_ast = 0;
- if (p->p_flag & P_OWEUPC) {
- p->p_flag &= ~P_OWEUPC;
- ADDUPROF(p);
- }
- break;
- }
-
- /*
- * If trap from supervisor mode, just return
- */
- if (SYSTEMMODE(frame->epsr))
- return;
-
- if (sig) {
-/* trapsignal(p, sig, fault_code, fault_type, (caddr_t)fault_addr); */
- sv.sival_int = fault_addr;
- trapsignal(p, sig, fault_code, fault_type, sv);
- /*
- * don't want multiple faults - we are going to
- * deliver signal.
- */
- frame->dmt0 = 0;
- frame->dpfsr = 0;
- }
- userret(p, frame, sticks);
+ if (type == T_INSTFLT+T_USER) {
+ fault_addr = frame->sxip & XIP_ADDR;
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ } else {
+ fault_addr = frame->dlar;
+ if (frame->dsr & CMMU_DSR_RW) {
+ ftype = VM_PROT_READ;
+ fault_code = VM_PROT_READ;
+ } else {
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ fault_code = VM_PROT_WRITE;
+ }
+ }
+
+ va = trunc_page((vm_offset_t)fault_addr);
+
+ vm = p->p_vmspace;
+ map = &vm->vm_map;
+
+ /* Call vm_fault() to resolve non-bus error faults */
+ if (type == T_DATAFLT+T_USER) {
+ if ((frame->dsr & CMMU_DSR_SI) /* seg fault */
+ || (frame->dsr & CMMU_DSR_PI)) { /* page fault */
+ result = vm_fault(map, va, ftype, FALSE);
+ if (result == KERN_SUCCESS) {
+ return;
+ }
+ }
+ } else {
+ if ((frame->isr & CMMU_ISR_SI) /* seg fault */
+ || (frame->isr & CMMU_ISR_PI)) { /* page fault */
+ result = vm_fault(map, va, ftype, FALSE);
+ if (result == KERN_SUCCESS) {
+ return;
+ }
+ }
+ }
+
+ if ((caddr_t)va >= vm->vm_maxsaddr) {
+ if (result == KERN_SUCCESS) {
+ nss = clrnd(btoc(USRSTACK - va));/* XXX check this */
+ if (nss > vm->vm_ssize)
+ vm->vm_ssize = nss;
+ } else if (result == KERN_PROTECTION_FAILURE)
+ result = KERN_INVALID_ADDRESS;
+ }
+
+ if (result != KERN_SUCCESS) {
+ sig = result == KERN_PROTECTION_FAILURE ? SIGBUS : SIGSEGV;
+ fault_type = result == KERN_PROTECTION_FAILURE ? BUS_ADRERR
+ : SEGV_MAPERR;
+ } else {
+ return;
+ }
+ /*
+ printf("sig == %d, fault_type == %d\n", sig, fault_type);
+ */
+ break;
+
+ case T_MISALGNFLT+T_USER:
+/* DEBUG_MSG("T_MISALGNFLT\n");*/
+ sig = SIGBUS;
+ fault_type = BUS_ADRALN;
+/* panictrap(fault_type, frame);*/
+ break;
+
+ case T_PRIVINFLT+T_USER:
+ case T_ILLFLT+T_USER:
+ sig = SIGILL;
+ break;
+
+ case T_BNDFLT+T_USER:
+ sig = SIGFPE;
+ break;
+ case T_ZERODIV+T_USER:
+ sig = SIGFPE;
+ fault_type = FPE_INTDIV;
+ break;
+ case T_OVFFLT+T_USER:
+ sig = SIGFPE;
+ fault_type = FPE_INTOVF;
+ break;
+
+ case T_FPEPFLT+T_USER:
+ case T_FPEIFLT+T_USER:
+ sig = SIGFPE;
+ break;
+
+ case T_SIGTRAP+T_USER:
+ sig = SIGTRAP;
+ fault_type = TRAP_TRACE;
+ break;
+
+ case T_STEPBPT+T_USER:
+ /*
+ * This trap is used by the kernel to support single-step
+ * debugging (although any user could generate this trap
+ * which should probably be handled differently). When a
+ * process is continued by a debugger with the PT_STEP
+ * function of ptrace (single step), the kernel inserts
+ * one or two breakpoints in the user process so that only
+ * one instruction (or two in the case of a delayed branch)
+ * is executed. When this breakpoint is hit, we get the
+ * T_STEPBPT trap.
+ */
+ #if 0
+ frame->sfip = frame->snip; /* set up next FIP */
+ frame->snip = frame->sxip; /* set up next NIP */
+ break;
+ #endif
+ {
+ register unsigned va;
+ unsigned instr;
+ struct uio uio;
+ struct iovec iov;
+
+ /* compute address of break instruction */
+ va = pc;
+
+ /* read break instruction */
+ instr = fuiword((caddr_t)pc);
+ #if 1
+ printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
+ p->p_comm, p->p_pid, instr, pc,
+ p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
+ #endif
+ /* check and see if we got here by accident */
+/*
+ if (p->p_md.md_ss_addr != pc || instr != SSBREAKPOINT) {
+ sig = SIGTRAP;
+ fault_type = TRAP_TRACE;
+ break;
+ }
+*/
+ /* restore original instruction and clear BP */
+ /*sig = suiword((caddr_t)pc, p->p_md.md_ss_instr);*/
+ instr = p->p_md.md_ss_instr;
+ if (instr == 0) {
+ printf("Warning: can't restore instruction at %x: %x\n",
+ p->p_md.md_ss_addr, p->p_md.md_ss_instr);
+ } else {
+ iov.iov_base = (caddr_t)&instr;
+ iov.iov_len = sizeof(int);
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)pc;
+ uio.uio_resid = sizeof(int);
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+ }
+
+ frame->sfip = frame->snip; /* set up next FIP */
+ frame->snip = frame->sxip; /* set up next NIP */
+ frame->snip |= 2; /* set valid bit */
+ p->p_md.md_ss_addr = 0;
+ sig = SIGTRAP;
+ fault_type = TRAP_BRKPT;
+ break;
+ }
+
+ case T_USERBPT+T_USER:
+ /*
+ * This trap is meant to be used by debuggers to implement
+ * breakpoint debugging. When we get this trap, we just
+ * return a signal which gets caught by the debugger.
+ */
+ frame->sfip = frame->snip; /* set up the next FIP */
+ frame->snip = frame->sxip; /* set up the next NIP */
+ sig = SIGTRAP;
+ fault_type = TRAP_BRKPT;
+ break;
+
+ case T_ASTFLT+T_USER:
+ want_ast = 0;
+ if (p->p_flag & P_OWEUPC) {
+ p->p_flag &= ~P_OWEUPC;
+ ADDUPROF(p);
+ }
+ break;
+ }
+
+ /*
+ * If trap from supervisor mode, just return
+ */
+ if (SYSTEMMODE(frame->epsr))
+ return;
+
+ if (sig) {
+ sv.sival_int = fault_addr;
+ trapsignal(p, sig, fault_code, fault_type, sv);
+ /*
+ * don't want multiple faults - we are going to
+ * deliver signal.
+ */
+ frame->dsr = 0;
+ }
+ userret(p, frame, sticks);
+}
+#endif /* MVME197 */
+void
+test_trap2(int num, int m197)
+{
+ DEBUG_MSG("\n[test_trap (Good News[tm]) m197 = %d, vec = %d]\n", m197, num);
+ bugreturn();
}
void
test_trap(struct m88100_saved_state *frame)
{
- DEBUG_MSG("\n[test_trap (Good News[tm]) frame 0x%08x]\n", frame);
- regdump((struct trapframe*)frame);
- bugreturn();
+ DEBUG_MSG("\n[test_trap (Good News[tm]) frame 0x%08x]\n", frame);
+ regdump((struct trapframe*)frame);
+ bugreturn();
}
void
error_fault(struct m88100_saved_state *frame)
{
- DEBUG_MSG("\n[ERROR FAULT (Bad News[tm]) frame 0x%08x]\n", frame);
-
+ DEBUG_MSG("\n[ERROR EXCEPTION (Bad News[tm]) frame 0x%08x]\n", frame);
+ regdump((struct trapframe*)frame);
+ DEBUG_MSG("trap trace %x -> %x -> %x -> %x\n", last_trap[0], last_trap[1], last_trap[2], last_trap[3]);
#if DDB
- gimmeabreak();
- DEBUG_MSG("[you really can't restart after an error fault.]\n");
- gimmeabreak();
+ gimmeabreak();
+ DEBUG_MSG("[you really can't restart after an error exception.]\n");
+ gimmeabreak();
#endif /* DDB */
- bugreturn(); /* This gets us to Bug instead of a loop forever */
+ bugreturn(); /* This gets us to Bug instead of a loop forever */
}
void
error_reset(struct m88100_saved_state *frame)
{
- DEBUG_MSG("\n[ERROR RESET (Really Bad News[tm]) frame 0x%08x]\n", frame);
+ DEBUG_MSG("\n[RESET EXCEPTION (Really Bad News[tm]) frame 0x%08x]\n", frame);
+ DEBUG_MSG("This is usually caused by a branch to a NULL function pointer.\n");
+ DEBUG_MSG("Use the debugger trace command to track it down.\n");
#if DDB
- gimmeabreak();
- DEBUG_MSG("[It's useless to restart after an error reset. You might as well reboot.]\n");
- gimmeabreak();
+ gimmeabreak();
+ DEBUG_MSG("[It's useless to restart after a reset exception. You might as well reboot.]\n");
+ gimmeabreak();
#endif /* DDB */
- bugreturn(); /* This gets us to Bug instead of a loop forever */
+ bugreturn(); /* This gets us to Bug instead of a loop forever */
}
syscall(register_t code, struct m88100_saved_state *tf)
{
- register int i, nsys, *ap, nap;
- register struct sysent *callp;
- register struct proc *p;
- int error, new;
- struct args {
- int i[8];
- } args;
- int rval[2];
- u_quad_t sticks;
- extern struct pcb *curpcb;
+ register int i, nsys, *ap, nap;
+ register struct sysent *callp;
+ register struct proc *p;
+ int error, new;
+ struct args {
+ int i[8];
+ } args;
+ int rval[2];
+ u_quad_t sticks;
+ extern struct pcb *curpcb;
- cnt.v_syscall++;
+ cnt.v_syscall++;
- p = curproc;
+ p = curproc;
- callp = p->p_emul->e_sysent;
- nsys = p->p_emul->e_nsysent;
+ callp = p->p_emul->e_sysent;
+ nsys = p->p_emul->e_nsysent;
#ifdef DIAGNOSTIC
- if (USERMODE(tf->epsr) == 0)
- panic("syscall");
- if (curpcb != &p->p_addr->u_pcb)
- panic("syscall curpcb/ppcb");
- if (tf != (struct trapframe *)&curpcb->user_state)
- panic("syscall trapframe");
+ if (USERMODE(tf->epsr) == 0)
+ panic("syscall");
+ if (curpcb != &p->p_addr->u_pcb)
+ panic("syscall curpcb/ppcb");
+ if (tf != (struct trapframe *)&curpcb->user_state)
+ panic("syscall trapframe");
#endif
- sticks = p->p_sticks;
- p->p_md.md_tf = tf;
-
- /*
- * For 88k, all the arguments are passed in the registers (r2-r12)
- * For syscall (and __syscall), r2 (and r3) has the actual code.
- * __syscall takes a quad syscall number, so that other
- * arguments are at their natural alignments.
- */
- ap = &tf->r[2];
- nap = 6;
-
- switch (code) {
- case SYS_syscall:
- code = *ap++;
- nap--;
- break;
- case SYS___syscall:
- if (callp != sysent)
- break;
- code = ap[_QUAD_LOWWORD];
- ap += 2;
- nap -= 2;
- break;
- }
-
- /* Callp currently points to syscall, which returns ENOSYS. */
-
- if (code < 0 || code >= nsys)
- callp += p->p_emul->e_nosys;
- else {
- callp += code;
- i = callp->sy_argsize / sizeof(register_t);
- if (i > 8)
- panic("syscall nargs");
- /*
- * just copy them; syscall stub made sure all the
- * args are moved from user stack to registers.
- */
- bcopy((caddr_t)ap, (caddr_t)args.i, i * sizeof(register_t));
- }
+ sticks = p->p_sticks;
+ p->p_md.md_tf = tf;
+
+ /*
+ * For 88k, all the arguments are passed in the registers (r2-r12)
+ * For syscall (and __syscall), r2 (and r3) has the actual code.
+ * __syscall takes a quad syscall number, so that other
+ * arguments are at their natural alignments.
+ */
+ ap = &tf->r[2];
+ nap = 6;
+
+ switch (code) {
+ case SYS_syscall:
+ code = *ap++;
+ nap--;
+ break;
+ case SYS___syscall:
+ if (callp != sysent)
+ break;
+ code = ap[_QUAD_LOWWORD];
+ ap += 2;
+ nap -= 2;
+ break;
+ }
+
+ /* Callp currently points to syscall, which returns ENOSYS. */
+
+ if (code < 0 || code >= nsys)
+ callp += p->p_emul->e_nosys;
+ else {
+ callp += code;
+ i = callp->sy_argsize / sizeof(register_t);
+ if (i > 8)
+ panic("syscall nargs");
+ /*
+ * just copy them; syscall stub made sure all the
+ * args are moved from user stack to registers.
+ */
+ bcopy((caddr_t)ap, (caddr_t)args.i, i * sizeof(register_t));
+ }
+#ifdef SYSCALL_DEBUG
+ scdebug_call(p, code, args.i);
+#endif
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p->p_tracep, code, callp->sy_argsize, args.i);
+#endif
+ rval[0] = 0;
+ rval[1] = 0;
+ error = (*callp->sy_call)(p, &args, rval);
+ /*
+ * system call will look like:
+ * ld r10, r31, 32; r10,r11,r12 might be garbage.
+ * ld r11, r31, 36
+ * ld r12, r31, 40
+ * or r13, r0, <code>
+ * tb0 0, r0, <128> <- xip
+ * br err <- nip
+ * jmp r1 <- fip
+ * err: or.u r3, r0, hi16(errno)
+ * st r2, r3, lo16(errno)
+ * subu r2, r0, 1
+ * jmp r1
+ *
+ * So, when we take syscall trap, sxip/snip/sfip will be as
+ * shown above.
+ * Given this,
+ * 1. If the system call returned 0, need to skip nip.
+ * nip = fip, fip += 4
+ * (doesn't matter what fip + 4 will be but we will never
+ * execute this since jmp r1 at nip will change the execution flow.)
+ * 2. If the system call returned an errno > 0, plug the value
+ * in r2, and leave nip and fip unchanged. This will have us
+ * executing "br err" on return to user space.
+ * 3. If the system call code returned ERESTART,
+ * we need to rexecute the trap instruction. Back up the pipe
+ * line.
+ * fip = nip, nip = xip
+ * 4. If the system call returned EJUSTRETURN, don't need to adjust
+ * any pointers.
+ */
+
+ if (error == 0) {
+ /*
+ * If fork succeeded and we are the child, our stack
+ * has moved and the pointer tf is no longer valid,
+ * and p is wrong. Compute the new trapframe pointer.
+ * (The trap frame invariably resides at the
+ * tippity-top of the u. area.)
+ */
+ p = curproc;
+ tf = USER_REGS(p);
+ tf->r[2] = rval[0];
+ tf->r[3] = rval[1];
+ tf->epsr &= ~PSR_C;
+ tf->snip = tf->sfip & ~FIP_E;
+ tf->sfip = tf->snip + 4;
+ } else if (error > 0) {
+ /* error != ERESTART && error != EJUSTRETURN*/
+ tf->r[2] = error;
+ tf->epsr |= PSR_C; /* fail */
+ tf->snip = tf->snip & ~NIP_E;
+ tf->sfip = tf->sfip & ~FIP_E;
+ } else if (error == ERESTART) {
+ /*
+ * If (error == ERESTART), back up the pipe line. This
+ * will end up reexecuting the trap.
+ */
+ tf->epsr &= ~PSR_C;
+ tf->sfip = tf->snip & ~NIP_E;
+ tf->snip = tf->sxip & ~NIP_E;
+ } else {
+ /* if (error == EJUSTRETURN), leave the ip's alone */
+ tf->epsr &= ~PSR_C;
+ }
+#ifdef SYSCALL_DEBUG
+ scdebug_ret(p, code, error, rval);
+#endif
+ userret(p, tf, sticks);
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p->p_tracep, code, error, rval[0]);
+#endif
+}
+
+/* Instruction pointers opperate differently on mc88110 */
+m197_syscall(register_t code, struct m88100_saved_state *tf)
+{
+ register int i, nsys, *ap, nap;
+ register struct sysent *callp;
+ register struct proc *p;
+ int error, new;
+ struct args {
+ int i[8];
+ } args;
+ int rval[2];
+ u_quad_t sticks;
+ extern struct pcb *curpcb;
+
+ cnt.v_syscall++;
+
+ p = curproc;
+
+ callp = p->p_emul->e_sysent;
+ nsys = p->p_emul->e_nsysent;
+
+#ifdef DIAGNOSTIC
+ if (USERMODE(tf->epsr) == 0)
+ panic("syscall");
+ if (curpcb != &p->p_addr->u_pcb)
+ panic("syscall curpcb/ppcb");
+ if (tf != (struct trapframe *)&curpcb->user_state)
+ panic("syscall trapframe");
+#endif
+
+ sticks = p->p_sticks;
+ p->p_md.md_tf = tf;
+
+ /*
+ * For 88k, all the arguments are passed in the registers (r2-r12)
+ * For syscall (and __syscall), r2 (and r3) has the actual code.
+ * __syscall takes a quad syscall number, so that other
+ * arguments are at their natural alignments.
+ */
+ ap = &tf->r[2];
+ nap = 6;
+
+ switch (code) {
+ case SYS_syscall:
+ code = *ap++;
+ nap--;
+ break;
+ case SYS___syscall:
+ if (callp != sysent)
+ break;
+ code = ap[_QUAD_LOWWORD];
+ ap += 2;
+ nap -= 2;
+ break;
+ }
+
+ /* Callp currently points to syscall, which returns ENOSYS. */
+
+ if (code < 0 || code >= nsys)
+ callp += p->p_emul->e_nosys;
+ else {
+ callp += code;
+ i = callp->sy_argsize / sizeof(register_t);
+ if (i > 8)
+ panic("syscall nargs");
+ /*
+ * just copy them; syscall stub made sure all the
+ * args are moved from user stack to registers.
+ */
+ bcopy((caddr_t)ap, (caddr_t)args.i, i * sizeof(register_t));
+ }
#ifdef SYSCALL_DEBUG
- scdebug_call(p, code, args.i);
+ scdebug_call(p, code, args.i);
#endif
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSCALL))
- ktrsyscall(p->p_tracep, code, callp->sy_argsize, args.i);
+ if (KTRPOINT(p, KTR_SYSCALL))
+ ktrsyscall(p->p_tracep, code, callp->sy_argsize, args.i);
#endif
- rval[0] = 0;
- rval[1] = 0;
- error = (*callp->sy_call)(p, &args, rval);
- /*
- * system call will look like:
- * ld r10, r31, 32; r10,r11,r12 might be garbage.
- * ld r11, r31, 36
- * ld r12, r31, 40
- * or r13, r0, <code>
- * tb0 0, r0, <128> <- xip
- * br err <- nip
- * jmp r1 <- fip
- * err: or.u r3, r0, hi16(errno)
- * st r2, r3, lo16(errno)
- * subu r2, r0, 1
- * jmp r1
- *
- * So, when we take syscall trap, sxip/snip/sfip will be as
- * shown above.
- * Given this,
- * 1. If the system call returned 0, need to skip nip.
- * nip = fip, fip += 4
- * (doesn't matter what fip + 4 will be but we will never
- * execute this since jmp r1 at nip will change the execution flow.)
- * 2. If the system call returned an errno > 0, plug the value
- * in r2, and leave nip and fip unchanged. This will have us
- * executing "br err" on return to user space.
- * 3. If the system call code returned ERESTART,
- * we need to rexecute the trap instruction. Back up the pipe
- * line.
- * fip = nip, nip = xip
- * 4. If the system call returned EJUSTRETURN, don't need to adjust
- * any pointers.
- */
-
- if (error == 0) {
- /*
- * If fork succeeded and we are the child, our stack
- * has moved and the pointer tf is no longer valid,
- * and p is wrong. Compute the new trapframe pointer.
- * (The trap frame invariably resides at the
- * tippity-top of the u. area.)
- */
- p = curproc;
- tf = USER_REGS(p);
- tf->r[2] = rval[0];
- tf->r[3] = rval[1];
- tf->epsr &= ~PSR_C;
- tf->snip = tf->sfip & ~FIP_E;
- tf->sfip = tf->snip + 4;
- } else if (error > 0) {
- /* error != ERESTART && error != EJUSTRETURN*/
- tf->r[2] = error;
- tf->epsr |= PSR_C; /* fail */
- tf->snip = tf->snip & ~NIP_E;
- tf->sfip = tf->sfip & ~FIP_E;
- } else if (error == ERESTART) {
- /*
- * If (error == ERESTART), back up the pipe line. This
- * will end up reexecuting the trap.
- */
- tf->epsr &= ~PSR_C;
- tf->sfip = tf->snip & ~NIP_E;
- tf->snip = tf->sxip & ~NIP_E;
- } else {
- /* if (error == EJUSTRETURN), leave the ip's alone */
- tf->epsr &= ~PSR_C;
- }
+ rval[0] = 0;
+ rval[1] = 0;
+ error = (*callp->sy_call)(p, &args, rval);
+ /*
+ * system call will look like:
+ * ld r10, r31, 32; r10,r11,r12 might be garbage.
+ * ld r11, r31, 36
+ * ld r12, r31, 40
+ * or r13, r0, <code>
+ * tb0 0, r0, <128> <- sxip
+ * br err <- snip
+ * jmp r1
+ * err: or.u r3, r0, hi16(errno)
+ * st r2, r3, lo16(errno)
+ * subu r2, r0, 1
+ * jmp r1
+ *
+ * So, when we take syscall trap, sxip/snip will be as
+ * shown above.
+ * Given this,
+ * 1. If the system call returned 0, need to jmp r1.
+ * sxip += 8
+ * 2. If the system call returned an errno > 0, increment
+ * sxip += 4 and plug the value in r2. This will have us
+ * executing "br err" on return to user space.
+ * 3. If the system call code returned ERESTART,
+ * we need to rexecute the trap instruction. leave xip as is.
+ * 4. If the system call returned EJUSTRETURN, just return.
+ * sxip += 8
+ */
+
+ if (error == 0) {
+ /*
+ * If fork succeeded and we are the child, our stack
+ * has moved and the pointer tf is no longer valid,
+ * and p is wrong. Compute the new trapframe pointer.
+ * (The trap frame invariably resides at the
+ * tippity-top of the u. area.)
+ */
+ p = curproc;
+ tf = USER_REGS(p);
+ tf->r[2] = rval[0];
+ tf->r[3] = rval[1];
+ tf->epsr &= ~PSR_C;
+ tf->sxip += 8;
+ tf->sxip &= ~3;
+ } else if (error > 0) {
+ /* error != ERESTART && error != EJUSTRETURN*/
+ tf->r[2] = error;
+ tf->epsr |= PSR_C; /* fail */
+ tf->sxip += 4;
+ tf->sxip &= ~3;
+ } else if (error == ERESTART) {
+ /*
+ * If (error == ERESTART), back up the pipe line. This
+ * will end up reexecuting the trap.
+ */
+ tf->epsr &= ~PSR_C;
+ } else {
+ /* if (error == EJUSTRETURN) */
+ tf->epsr &= ~PSR_C;
+ tf->sxip += 8;
+ tf->sxip &= ~3;
+ }
#ifdef SYSCALL_DEBUG
- scdebug_ret(p, code, error, rval);
+ scdebug_ret(p, code, error, rval);
#endif
- userret(p, tf, sticks);
+ userret(p, tf, sticks);
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSRET))
- ktrsysret(p->p_tracep, code, error, rval[0]);
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p->p_tracep, code, error, rval[0]);
#endif
}
@@ -802,19 +1540,24 @@ syscall(register_t code, struct m88100_saved_state *tf)
void
child_return(struct proc *p)
{
- struct trapframe *tf;
-
- tf = USER_REGS(p);
- tf->r[2] = 0;
- tf->r[3] = 0;
- tf->epsr &= ~PSR_C;
- tf->snip = tf->sfip & ~3;
- tf->sfip = tf->snip + 4;
-
- userret(p, tf, p->p_sticks);
+ struct trapframe *tf;
+
+ tf = USER_REGS(p);
+ tf->r[2] = 0;
+ tf->r[3] = 0;
+ tf->epsr &= ~PSR_C;
+ if (cputyp != CPU_197) {
+ tf->snip = tf->sfip & ~3;
+ tf->sfip = tf->snip + 4;
+ } else {
+ tf->sxip += 8;
+ tf->sxip &= ~3;
+ }
+
+ userret(p, tf, p->p_sticks);
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSRET))
- ktrsysret(p->p_tracep, SYS_fork, 0, 0);
+ if (KTRPOINT(p, KTR_SYSRET))
+ ktrsysret(p->p_tracep, SYS_fork, 0, 0);
#endif
}
@@ -823,27 +1566,27 @@ child_return(struct proc *p)
*/
u_long
allocate_sir(proc, arg)
- void (*proc)();
- void *arg;
+void (*proc)();
+void *arg;
{
- int bit;
-
- if (next_sir >= NSIR)
- panic("allocate_sir: none left");
- bit = next_sir++;
- sir_routines[bit] = proc;
- sir_args[bit] = arg;
- return (1 << bit);
+ int bit;
+
+ if (next_sir >= NSIR)
+ panic("allocate_sir: none left");
+ bit = next_sir++;
+ sir_routines[bit] = proc;
+ sir_args[bit] = arg;
+ return (1 << bit);
}
void
init_sir()
{
- extern void netintr();
+ extern void netintr();
- sir_routines[0] = netintr;
- sir_routines[1] = softclock;
- next_sir = 2;
+ sir_routines[0] = netintr;
+ sir_routines[1] = softclock;
+ next_sir = 2;
}
@@ -851,44 +1594,46 @@ init_sir()
* User Single Step Debugging Support *
\************************************/
-/*
- * Read bytes from address space for debugger.
- */
-void
-ss_read_bytes(addr, size, data)
- unsigned addr;
- register int size;
- register char *data;
+unsigned
+ss_get_value(struct proc *p, unsigned addr, int size)
{
- register char *src;
-
- src = (char *)addr;
-
- while(--size >= 0) {
- *data++ = *src++;
- }
+ struct uio uio;
+ struct iovec iov;
+ unsigned value;
+
+ iov.iov_base = (caddr_t)&value;
+ iov.iov_len = size;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)addr;
+ uio.uio_resid = size;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_READ;
+ uio.uio_procp = curproc;
+ procfs_domem(curproc, p, NULL, &uio);
+ return value;
}
-unsigned
-ss_get_value(unsigned addr, int size, int is_signed)
+int
+ss_put_value(struct proc *p, unsigned addr, unsigned value, int size)
{
- char data[sizeof(unsigned)];
- unsigned value, extend;
- int i;
-
- ss_read_bytes(addr, size, data);
-
- value = 0;
- extend = (~(db_expr_t)0) << (size * 8 - 1);
- for (i = 0; i < size; i++)
- value = (value << 8) + (data[i] & 0xFF);
-
- if (size < sizeof(unsigned) && is_signed && (value & extend))
- value |= extend;
- return (value);
+ struct uio uio;
+ struct iovec iov;
+ int i;
+
+ iov.iov_base = (caddr_t)&value;
+ iov.iov_len = size;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = (off_t)addr;
+ uio.uio_resid = size;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_procp = curproc;
+ i = procfs_domem(curproc, p, NULL, &uio);
+ return i;
}
-
/*
* ss_branch_taken(instruction, program counter, func, func_data)
*
@@ -902,43 +1647,41 @@ ss_get_value(unsigned addr, int size, int is_signed)
*/
unsigned
ss_branch_taken(
- unsigned inst,
- unsigned pc,
- unsigned (*func)(unsigned int, struct trapframe *),
- struct trapframe *func_data) /* 'opaque' */
+ unsigned inst,
+ unsigned pc,
+ unsigned (*func)(unsigned int, struct trapframe *),
+ struct trapframe *func_data) /* 'opaque' */
{
- /* check if br/bsr */
- if ((inst & 0xf0000000U) == 0xc0000000U)
- {
+ /* check if br/bsr */
+ if ((inst & 0xf0000000U) == 0xc0000000U) {
/* signed 26 bit pc relative displacement, shift left two bits */
inst = (inst & 0x03ffffffU)<<2;
/* check if sign extension is needed */
if (inst & 0x08000000U)
- inst |= 0xf0000000U;
- return pc + inst;
- }
-
- /* check if bb0/bb1/bcnd case */
- switch ((inst & 0xf8000000U))
- {
- case 0xd0000000U: /* bb0 */
- case 0xd8000000U: /* bb1 */
- case 0xe8000000U: /* bcnd */
- /* signed 16 bit pc relative displacement, shift left two bits */
- inst = (inst & 0x0000ffffU)<<2;
- /* check if sign extension is needed */
- if (inst & 0x00020000U)
- inst |= 0xfffc0000U;
+ inst |= 0xf0000000U;
return pc + inst;
- }
-
- /* check jmp/jsr case */
- /* check bits 5-31, skipping 10 & 11 */
- if ((inst & 0xfffff3e0U) == 0xf400c000U)
- return (*func)(inst & 0x1f, func_data); /* the register value */
-
- return 0; /* keeps compiler happy */
+ }
+
+ /* check if bb0/bb1/bcnd case */
+ switch ((inst & 0xf8000000U)) {
+ case 0xd0000000U: /* bb0 */
+ case 0xd8000000U: /* bb1 */
+ case 0xe8000000U: /* bcnd */
+ /* signed 16 bit pc relative displacement, shift left two bits */
+ inst = (inst & 0x0000ffffU)<<2;
+ /* check if sign extension is needed */
+ if (inst & 0x00020000U)
+ inst |= 0xfffc0000U;
+ return pc + inst;
+ }
+
+ /* check jmp/jsr case */
+ /* check bits 5-31, skipping 10 & 11 */
+ if ((inst & 0xfffff3e0U) == 0xf400c000U)
+ return (*func)(inst & 0x1f, func_data); /* the register value */
+
+ return 0; /* keeps compiler happy */
}
/*
@@ -949,102 +1692,140 @@ ss_branch_taken(
unsigned
ss_getreg_val(unsigned regno, struct trapframe *tf)
{
- if (regno == 0)
- return 0;
- else if (regno < 31)
- return tf->r[regno];
- else {
- panic("bad register number to ss_getreg_val.");
- return 0;/*to make compiler happy */
+ if (regno == 0)
+ return 0;
+ else if (regno < 31)
+ return tf->r[regno];
+ else {
+ panic("bad register number to ss_getreg_val.");
+ return 0;/*to make compiler happy */
+ }
+}
+
+boolean_t
+ss_inst_branch(unsigned ins)
+{
+ /* check high five bits */
+
+ switch (ins >> (32-5))
+ {
+ case 0x18: /* br */
+ case 0x1a: /* bb0 */
+ case 0x1b: /* bb1 */
+ case 0x1d: /* bcnd */
+ return TRUE;
+ break;
+ case 0x1e: /* could be jmp */
+ if ((ins & 0xfffffbe0U) == 0xf400c000U)
+ return TRUE;
}
+
+ return FALSE;
}
-unsigned
-ss_get_next_addr(struct trapframe *regs)
+
+/* ss_inst_delayed - this instruction is followed by a delay slot. Could be
+ br.n, bsr.n bb0.n, bb1.n, bcnd.n or jmp.n or jsr.n */
+
+boolean_t
+ss_inst_delayed(unsigned ins)
{
- unsigned inst;
- unsigned pc = PC_REGS(regs);
- unsigned addr = 0;
-
- inst = ss_get_value(pc, sizeof(int), FALSE);
- addr = ss_branch_taken(inst, pc, ss_getreg_val, regs);
- if (addr) return(addr);
- return(pc + 4);
+ /* check the br, bsr, bb0, bb1, bcnd cases */
+ switch ((ins & 0xfc000000U)>>(32-6))
+ {
+ case 0x31: /* br */
+ case 0x33: /* bsr */
+ case 0x35: /* bb0 */
+ case 0x37: /* bb1 */
+ case 0x3b: /* bcnd */
+ return TRUE;
+ }
+
+ /* check the jmp, jsr cases */
+ /* mask out bits 0-4, bit 11 */
+ return ((ins & 0xfffff7e0U) == 0xf400c400U) ? TRUE : FALSE;
+}
+
+unsigned
+ss_next_instr_address(struct proc *p, unsigned pc, unsigned delay_slot)
+{
+ if (delay_slot == 0)
+ return pc + 4;
+ else
+ {
+ if (ss_inst_delayed(ss_get_value(p, pc, sizeof(int))))
+ return pc + 4;
+ else
+ return pc;
+ }
}
int
cpu_singlestep(p)
- register struct proc *p;
+register struct proc *p;
{
- register unsigned va;
- struct trapframe *sstf = p->p_md.md_tf;
- int i;
-
- int bpinstr = BREAKPOINT;
- int curinstr;
- struct uio uio;
- struct iovec iov;
-
- /*
- * Fetch what's at the current location.
- */
- iov.iov_base = (caddr_t)&curinstr;
- iov.iov_len = sizeof(int);
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_offset = (off_t)sstf->sxip;
- uio.uio_resid = sizeof(int);
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_rw = UIO_READ;
- uio.uio_procp = curproc;
- procfs_domem(curproc, p, NULL, &uio);
-
- /* compute next address after current location */
- if(curinstr != 0) {
- va = ss_get_next_addr(sstf);
- printf("SS %s (%d): next breakpoint set at %x\n",
- p->p_comm, p->p_pid, va);
- }
- else {
- va = PC_REGS(sstf) + 4;
- }
- if (p->p_md.md_ss_addr) {
- printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
- p->p_comm, p->p_pid, p->p_md.md_ss_addr, va); /* XXX */
- return (EFAULT);
- }
-
- p->p_md.md_ss_addr = va;
-
- /*
- * Fetch what's at the current location.
- */
- iov.iov_base = (caddr_t)&p->p_md.md_ss_instr;
- iov.iov_len = sizeof(int);
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_offset = (off_t)va;
- uio.uio_resid = sizeof(int);
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_rw = UIO_READ;
- uio.uio_procp = curproc;
- procfs_domem(curproc, p, NULL, &uio);
-
- /*
- * Store breakpoint instruction at the "next" location now.
- */
- iov.iov_base = (caddr_t)&bpinstr;
- iov.iov_len = sizeof(int);
- uio.uio_iov = &iov;
- uio.uio_iovcnt = 1;
- uio.uio_offset = (off_t)va;
- uio.uio_resid = sizeof(int);
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_rw = UIO_WRITE;
- uio.uio_procp = curproc;
- i = procfs_domem(curproc, p, NULL, &uio);
-
- if (i < 0) return (EFAULT);
- return (0);
+ register unsigned va;
+ struct trapframe *sstf = USER_REGS(p); /*p->p_md.md_tf;*/
+ unsigned pc, brpc;
+ int i;
+ int bpinstr = SSBREAKPOINT;
+ unsigned curinstr;
+ unsigned inst;
+ struct uio uio;
+ struct iovec iov;
+
+ pc = PC_REGS(sstf);
+ /*
+ * User was stopped at pc, e.g. the instruction
+ * at pc was not executed.
+ * Fetch what's at the current location.
+ */
+ curinstr = ss_get_value(p, pc, sizeof(int));
+
+ /* compute next address after current location */
+ if (curinstr != 0) {
+ if (ss_inst_branch(curinstr) || inst_call(curinstr) || inst_return(curinstr)) {
+ brpc = ss_branch_taken(curinstr, pc, ss_getreg_val, sstf);
+ if (brpc != pc) { /* self-branches are hopeless */
+#if 0
+ printf("SS %s (%d): next taken breakpoint set at %x\n",
+ p->p_comm, p->p_pid, brpc);
+#endif
+ p->p_md.md_ss_taken_addr = brpc;
+ p->p_md.md_ss_taken_instr = ss_get_value(p, brpc, sizeof(int));
+ /* Store breakpoint instruction at the "next" location now. */
+ i = ss_put_value(p, brpc, bpinstr, sizeof(int));
+ if (i < 0) return (EFAULT);
+ }
+ }
+ pc = ss_next_instr_address(p, pc, 0);
+#if 0
+ printf("SS %s (%d): next breakpoint set at %x\n",
+ p->p_comm, p->p_pid, pc);
+#endif
+ } else {
+ pc = PC_REGS(sstf) + 4;
+#if 0
+ printf("SS %s (%d): next breakpoint set at %x\n",
+ p->p_comm, p->p_pid, pc);
+#endif
+ }
+
+ if (p->p_md.md_ss_addr) {
+ printf("SS %s (%d): breakpoint already set at %x (va %x)\n",
+ p->p_comm, p->p_pid, p->p_md.md_ss_addr, pc); /* XXX */
+ return (EFAULT);
+ }
+
+ p->p_md.md_ss_addr = pc;
+
+ /* Fetch what's at the "next" location. */
+ p->p_md.md_ss_instr = ss_get_value(p, pc, sizeof(int));
+
+ /* Store breakpoint instruction at the "next" location now. */
+ i = ss_put_value(p, pc, bpinstr, sizeof(int));
+
+ if (i < 0) return (EFAULT);
+ return (0);
}
diff --git a/sys/arch/mvme88k/mvme88k/vm_machdep.c b/sys/arch/mvme88k/mvme88k/vm_machdep.c
index b5f06c33586..c3b3d79cef1 100644
--- a/sys/arch/mvme88k/mvme88k/vm_machdep.c
+++ b/sys/arch/mvme88k/mvme88k/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.10 1999/09/03 18:01:33 art Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.11 1999/09/27 19:13:24 smurph Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -42,7 +42,7 @@
* from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
* from: @(#)vm_machdep.c 7.10 (Berkeley) 5/7/91
* vm_machdep.c,v 1.3 1993/07/07 07:09:32 cgd Exp
- * $Id: vm_machdep.c,v 1.10 1999/09/03 18:01:33 art Exp $
+ * $Id: vm_machdep.c,v 1.11 1999/09/27 19:13:24 smurph Exp $
*/
#include <sys/param.h>
@@ -59,6 +59,7 @@
#include <vm/vm_map.h>
#include <machine/cpu.h>
+#include <machine/cpu_number.h>
#include <machine/pte.h>
extern struct map *iomap;
@@ -73,6 +74,7 @@ extern vm_map_t iomap_map;
* address in each process; in the future we will probably relocate
* the frame pointers on the stack after copying.
*/
+#undef pcb_sp
#ifdef __FORK_BRAINDAMAGE
int
@@ -83,6 +85,7 @@ cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize)
{
struct switchframe *p2sf;
int off, ssz;
+ int cpu;
struct ksigframe {
void (*func)(struct proc *);
void *proc;
@@ -90,7 +93,8 @@ cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize)
extern void proc_do_uret(), child_return();
extern void proc_trampoline();
- savectx(p1->p_addr);
+ cpu = cpu_number();
+ savectx(p1->p_addr);
bcopy((void *)&p1->p_addr->u_pcb, (void *)&p2->p_addr->u_pcb, sizeof(struct pcb));
p2->p_addr->u_pcb.kernel_state.pcb_ipl = 0;
@@ -100,7 +104,7 @@ cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize)
/*XXX these may not be necessary nivas */
save_u_area(p2, p2->p_addr);
#ifdef notneeded
- PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &p2->p_addr->u_pcb, 0);
+ PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &p2->p_addr->u_pcb, cpu);
#endif /* notneeded */
/*
@@ -116,7 +120,7 @@ cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize)
* If specified, give the child a different stack.
*/
if (stack != NULL)
- USER_REGS(p2)->pcb_sp = (u_int)stack + stacksize;
+ USER_REGS(p2)->r[31] = (u_int)stack + stacksize;
ksfp = (struct ksigframe *)p2->p_addr->u_pcb.kernel_state.pcb_sp - 1;
@@ -332,11 +336,11 @@ iomap_mapin(vm_offset_t pa, vm_size_t len, boolean_t canwait)
ppa = trunc_page(ppa);
#ifndef NEW_MAPPING
- tva = iova;
+ tva = iova;
#else
tva = ppa;
#endif
-
+
while (len>0) {
pmap_enter(vm_map_pmap(iomap_map), tva, ppa,
VM_PROT_WRITE|VM_PROT_READ|(CACHE_INH << 16), 1, 0);
@@ -437,18 +441,10 @@ int
badvaddr(vm_offset_t va, int size)
{
register int x;
- int i;
- int ret = 0;
-
- for (i=0; i<5; i++){
- ret = badaddr(va, size);
- if (ret)
- delay(500);
- else
- break;
+
+ if (badaddr(va, size)) {
+ return -1;
}
- if (ret)
- return -1;
switch (size) {
case 1: