aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/apm.c605
-rw-r--r--arch/mips/kernel/asm-offsets.c66
-rw-r--r--arch/mips/kernel/branch.c2
-rw-r--r--arch/mips/kernel/i8259.c4
-rw-r--r--arch/mips/kernel/irixsig.c2
-rw-r--r--arch/mips/kernel/ptrace.c26
-rw-r--r--arch/mips/kernel/ptrace32.c16
-rw-r--r--arch/mips/kernel/r4k_switch.S13
-rw-r--r--arch/mips/kernel/setup.c56
-rw-r--r--arch/mips/kernel/traps.c9
11 files changed, 712 insertions, 89 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 34e8a256765c..881c467c6982 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -13,6 +13,8 @@ binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
+obj-$(CONFIG_APM) += apm.o
+
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
diff --git a/arch/mips/kernel/apm.c b/arch/mips/kernel/apm.c
new file mode 100644
index 000000000000..15f46b4471fd
--- /dev/null
+++ b/arch/mips/kernel/apm.c
@@ -0,0 +1,605 @@
+/*
+ * bios-less APM driver for MIPS Linux
+ * Jamey Hicks <jamey@crl.dec.com>
+ * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
+ *
+ * APM 1.2 Reference:
+ * Intel Corporation, Microsoft Corporation. Advanced Power Management
+ * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
+ *
+ * [This document is available from Microsoft at:
+ * http://www.microsoft.com/hwdev/busbios/amp_12.htm]
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/miscdevice.h>
+#include <linux/apm_bios.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+
+#include <asm/apm.h> /* apm_power_info */
+#include <asm/system.h>
+
+/*
+ * The apm_bios device is one of the misc char devices.
+ * This is its minor number.
+ */
+#define APM_MINOR_DEV 134
+
+/*
+ * See Documentation/Config.help for the configuration options.
+ *
+ * Various options can be changed at boot time as follows:
+ * (We allow underscores for compatibility with the modules code)
+ * apm=on/off enable/disable APM
+ */
+
+/*
+ * Maximum number of events stored
+ */
+#define APM_MAX_EVENTS 16
+
+struct apm_queue {
+ unsigned int event_head;
+ unsigned int event_tail;
+ apm_event_t events[APM_MAX_EVENTS];
+};
+
+/*
+ * The per-file APM data
+ */
+struct apm_user {
+ struct list_head list;
+
+ unsigned int suser: 1;
+ unsigned int writer: 1;
+ unsigned int reader: 1;
+
+ int suspend_result;
+ unsigned int suspend_state;
+#define SUSPEND_NONE 0 /* no suspend pending */
+#define SUSPEND_PENDING 1 /* suspend pending read */
+#define SUSPEND_READ 2 /* suspend read, pending ack */
+#define SUSPEND_ACKED 3 /* suspend acked */
+#define SUSPEND_DONE 4 /* suspend completed */
+
+ struct apm_queue queue;
+};
+
+/*
+ * Local variables
+ */
+static int suspends_pending;
+static int apm_disabled;
+static int mips_apm_active;
+
+static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
+static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
+
+/*
+ * This is a list of everyone who has opened /dev/apm_bios
+ */
+static DECLARE_RWSEM(user_list_lock);
+static LIST_HEAD(apm_user_list);
+
+/*
+ * kapmd info. kapmd provides us a process context to handle
+ * "APM" events within - specifically necessary if we're going
+ * to be suspending the system.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
+static DECLARE_COMPLETION(kapmd_exit);
+static DEFINE_SPINLOCK(kapmd_queue_lock);
+static struct apm_queue kapmd_queue;
+
+
+static const char driver_version[] = "1.13"; /* no spaces */
+
+
+
+/*
+ * Compatibility cruft until the IPAQ people move over to the new
+ * interface.
+ */
+static void __apm_get_power_status(struct apm_power_info *info)
+{
+}
+
+/*
+ * This allows machines to provide their own "apm get power status" function.
+ */
+void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
+EXPORT_SYMBOL(apm_get_power_status);
+
+
+/*
+ * APM event queue management.
+ */
+static inline int queue_empty(struct apm_queue *q)
+{
+ return q->event_head == q->event_tail;
+}
+
+static inline apm_event_t queue_get_event(struct apm_queue *q)
+{
+ q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+ return q->events[q->event_tail];
+}
+
+static void queue_add_event(struct apm_queue *q, apm_event_t event)
+{
+ q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
+ if (q->event_head == q->event_tail) {
+ static int notified;
+
+ if (notified++ == 0)
+ printk(KERN_ERR "apm: an event queue overflowed\n");
+ q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+ }
+ q->events[q->event_head] = event;
+}
+
+static void queue_event_one_user(struct apm_user *as, apm_event_t event)
+{
+ if (as->suser && as->writer) {
+ switch (event) {
+ case APM_SYS_SUSPEND:
+ case APM_USER_SUSPEND:
+ /*
+ * If this user already has a suspend pending,
+ * don't queue another one.
+ */
+ if (as->suspend_state != SUSPEND_NONE)
+ return;
+
+ as->suspend_state = SUSPEND_PENDING;
+ suspends_pending++;
+ break;
+ }
+ }
+ queue_add_event(&as->queue, event);
+}
+
+static void queue_event(apm_event_t event, struct apm_user *sender)
+{
+ struct apm_user *as;
+
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as != sender && as->reader)
+ queue_event_one_user(as, event);
+ }
+ up_read(&user_list_lock);
+ wake_up_interruptible(&apm_waitqueue);
+}
+
+static void apm_suspend(void)
+{
+ struct apm_user *as;
+ int err = pm_suspend(PM_SUSPEND_MEM);
+
+ /*
+ * Anyone on the APM queues will think we're still suspended.
+ * Send a message so everyone knows we're now awake again.
+ */
+ queue_event(APM_NORMAL_RESUME, NULL);
+
+ /*
+ * Finally, wake up anyone who is sleeping on the suspend.
+ */
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ as->suspend_result = err;
+ as->suspend_state = SUSPEND_DONE;
+ }
+ up_read(&user_list_lock);
+
+ wake_up(&apm_suspend_waitqueue);
+}
+
+static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct apm_user *as = fp->private_data;
+ apm_event_t event;
+ int i = count, ret = 0;
+
+ if (count < sizeof(apm_event_t))
+ return -EINVAL;
+
+ if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
+
+ while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
+ event = queue_get_event(&as->queue);
+
+ ret = -EFAULT;
+ if (copy_to_user(buf, &event, sizeof(event)))
+ break;
+
+ if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)
+ as->suspend_state = SUSPEND_READ;
+
+ buf += sizeof(event);
+ i -= sizeof(event);
+ }
+
+ if (i < count)
+ ret = count - i;
+
+ return ret;
+}
+
+static unsigned int apm_poll(struct file *fp, poll_table * wait)
+{
+ struct apm_user *as = fp->private_data;
+
+ poll_wait(fp, &apm_waitqueue, wait);
+ return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
+}
+
+/*
+ * apm_ioctl - handle APM ioctl
+ *
+ * APM_IOC_SUSPEND
+ * This IOCTL is overloaded, and performs two functions. It is used to:
+ * - initiate a suspend
+ * - acknowledge a suspend read from /dev/apm_bios.
+ * Only when everyone who has opened /dev/apm_bios with write permission
+ * has acknowledge does the actual suspend happen.
+ */
+static int
+apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
+{
+ struct apm_user *as = filp->private_data;
+ unsigned long flags;
+ int err = -EINVAL;
+
+ if (!as->suser || !as->writer)
+ return -EPERM;
+
+ switch (cmd) {
+ case APM_IOC_SUSPEND:
+ as->suspend_result = -EINTR;
+
+ if (as->suspend_state == SUSPEND_READ) {
+ /*
+ * If we read a suspend command from /dev/apm_bios,
+ * then the corresponding APM_IOC_SUSPEND ioctl is
+ * interpreted as an acknowledge.
+ */
+ as->suspend_state = SUSPEND_ACKED;
+ suspends_pending--;
+ } else {
+ /*
+ * Otherwise it is a request to suspend the system.
+ * Queue an event for all readers, and expect an
+ * acknowledge from all writers who haven't already
+ * acknowledged.
+ */
+ queue_event(APM_USER_SUSPEND, as);
+ }
+
+ /*
+ * If there are no further acknowledges required, suspend
+ * the system.
+ */
+ if (suspends_pending == 0)
+ apm_suspend();
+
+ /*
+ * Wait for the suspend/resume to complete. If there are
+ * pending acknowledges, we wait here for them.
+ *
+ * Note that we need to ensure that the PM subsystem does
+ * not kick us out of the wait when it suspends the threads.
+ */
+ flags = current->flags;
+ current->flags |= PF_NOFREEZE;
+
+ /*
+ * Note: do not allow a thread which is acking the suspend
+ * to escape until the resume is complete.
+ */
+ if (as->suspend_state == SUSPEND_ACKED)
+ wait_event(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE);
+ else
+ wait_event_interruptible(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE);
+
+ current->flags = flags;
+ err = as->suspend_result;
+ as->suspend_state = SUSPEND_NONE;
+ break;
+ }
+
+ return err;
+}
+
+static int apm_release(struct inode * inode, struct file * filp)
+{
+ struct apm_user *as = filp->private_data;
+ filp->private_data = NULL;
+
+ down_write(&user_list_lock);
+ list_del(&as->list);
+ up_write(&user_list_lock);
+
+ /*
+ * We are now unhooked from the chain. As far as new
+ * events are concerned, we no longer exist. However, we
+ * need to balance suspends_pending, which means the
+ * possibility of sleeping.
+ */
+ if (as->suspend_state != SUSPEND_NONE) {
+ suspends_pending -= 1;
+ if (suspends_pending == 0)
+ apm_suspend();
+ }
+
+ kfree(as);
+ return 0;
+}
+
+static int apm_open(struct inode * inode, struct file * filp)
+{
+ struct apm_user *as;
+
+ as = (struct apm_user *)kzalloc(sizeof(*as), GFP_KERNEL);
+ if (as) {
+ /*
+ * XXX - this is a tiny bit broken, when we consider BSD
+ * process accounting. If the device is opened by root, we
+ * instantly flag that we used superuser privs. Who knows,
+ * we might close the device immediately without doing a
+ * privileged operation -- cevans
+ */
+ as->suser = capable(CAP_SYS_ADMIN);
+ as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
+ as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
+
+ down_write(&user_list_lock);
+ list_add(&as->list, &apm_user_list);
+ up_write(&user_list_lock);
+
+ filp->private_data = as;
+ }
+
+ return as ? 0 : -ENOMEM;
+}
+
+static struct file_operations apm_bios_fops = {
+ .owner = THIS_MODULE,
+ .read = apm_read,
+ .poll = apm_poll,
+ .ioctl = apm_ioctl,
+ .open = apm_open,
+ .release = apm_release,
+};
+
+static struct miscdevice apm_device = {
+ .minor = APM_MINOR_DEV,
+ .name = "apm_bios",
+ .fops = &apm_bios_fops
+};
+
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Arguments, with symbols from linux/apm_bios.h.
+ *
+ * 0) Linux driver version (this will change if format changes)
+ * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
+ * 2) APM flags from APM Installation Check (0x00):
+ * bit 0: APM_16_BIT_SUPPORT
+ * bit 1: APM_32_BIT_SUPPORT
+ * bit 2: APM_IDLE_SLOWS_CLOCK
+ * bit 3: APM_BIOS_DISABLED
+ * bit 4: APM_BIOS_DISENGAGED
+ * 3) AC line status
+ * 0x00: Off-line
+ * 0x01: On-line
+ * 0x02: On backup power (BIOS >= 1.1 only)
+ * 0xff: Unknown
+ * 4) Battery status
+ * 0x00: High
+ * 0x01: Low
+ * 0x02: Critical
+ * 0x03: Charging
+ * 0x04: Selected battery not present (BIOS >= 1.2 only)
+ * 0xff: Unknown
+ * 5) Battery flag
+ * bit 0: High
+ * bit 1: Low
+ * bit 2: Critical
+ * bit 3: Charging
+ * bit 7: No system battery
+ * 0xff: Unknown
+ * 6) Remaining battery life (percentage of charge):
+ * 0-100: valid
+ * -1: Unknown
+ * 7) Remaining battery life (time units):
+ * Number of remaining minutes or seconds
+ * -1: Unknown
+ * 8) min = minutes; sec = seconds
+ */
+static int apm_get_info(char *buf, char **start, off_t fpos, int length)
+{
+ struct apm_power_info info;
+ char *units;
+ int ret;
+
+ info.ac_line_status = 0xff;
+ info.battery_status = 0xff;
+ info.battery_flag = 0xff;
+ info.battery_life = -1;
+ info.time = -1;
+ info.units = -1;
+
+ if (apm_get_power_status)
+ apm_get_power_status(&info);
+
+ switch (info.units) {
+ default: units = "?"; break;
+ case 0: units = "min"; break;
+ case 1: units = "sec"; break;
+ }
+
+ ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
+ driver_version, APM_32_BIT_SUPPORT,
+ info.ac_line_status, info.battery_status,
+ info.battery_flag, info.battery_life,
+ info.time, units);
+
+ return ret;
+}
+#endif
+
+static int kapmd(void *arg)
+{
+ daemonize("kapmd");
+ current->flags |= PF_NOFREEZE;
+
+ do {
+ apm_event_t event;
+
+ wait_event_interruptible(kapmd_wait,
+ !queue_empty(&kapmd_queue) || !mips_apm_active);
+
+ if (!mips_apm_active)
+ break;
+
+ spin_lock_irq(&kapmd_queue_lock);
+ event = 0;
+ if (!queue_empty(&kapmd_queue))
+ event = queue_get_event(&kapmd_queue);
+ spin_unlock_irq(&kapmd_queue_lock);
+
+ switch (event) {
+ case 0:
+ break;
+
+ case APM_LOW_BATTERY:
+ case APM_POWER_STATUS_CHANGE:
+ queue_event(event, NULL);
+ break;
+
+ case APM_USER_SUSPEND:
+ case APM_SYS_SUSPEND:
+ queue_event(event, NULL);
+ if (suspends_pending == 0)
+ apm_suspend();
+ break;
+
+ case APM_CRITICAL_SUSPEND:
+ apm_suspend();
+ break;
+ }
+ } while (1);
+
+ complete_and_exit(&kapmd_exit, 0);
+}
+
+static int __init apm_init(void)
+{
+ int ret;
+
+ if (apm_disabled) {
+ printk(KERN_NOTICE "apm: disabled on user request.\n");
+ return -ENODEV;
+ }
+
+ mips_apm_active = 1;
+
+ ret = kernel_thread(kapmd, NULL, CLONE_KERNEL);
+ if (ret < 0) {
+ mips_apm_active = 0;
+ return ret;
+ }
+
+#ifdef CONFIG_PROC_FS
+ create_proc_info_entry("apm", 0, NULL, apm_get_info);
+#endif
+
+ ret = misc_register(&apm_device);
+ if (ret != 0) {
+ remove_proc_entry("apm", NULL);
+
+ mips_apm_active = 0;
+ wake_up(&kapmd_wait);
+ wait_for_completion(&kapmd_exit);
+ }
+
+ return ret;
+}
+
+static void __exit apm_exit(void)
+{
+ misc_deregister(&apm_device);
+ remove_proc_entry("apm", NULL);
+
+ mips_apm_active = 0;
+ wake_up(&kapmd_wait);
+ wait_for_completion(&kapmd_exit);
+}
+
+module_init(apm_init);
+module_exit(apm_exit);
+
+MODULE_AUTHOR("Stephen Rothwell");
+MODULE_DESCRIPTION("Advanced Power Management");
+MODULE_LICENSE("GPL");
+
+#ifndef MODULE
+static int __init apm_setup(char *str)
+{
+ while ((str != NULL) && (*str != '\0')) {
+ if (strncmp(str, "off", 3) == 0)
+ apm_disabled = 1;
+ if (strncmp(str, "on", 2) == 0)
+ apm_disabled = 0;
+ str = strchr(str, ',');
+ if (str != NULL)
+ str += strspn(str, ", \t");
+ }
+ return 1;
+}
+
+__setup("apm=", apm_setup);
+#endif
+
+/**
+ * apm_queue_event - queue an APM event for kapmd
+ * @event: APM event
+ *
+ * Queue an APM event for kapmd to process and ultimately take the
+ * appropriate action. Only a subset of events are handled:
+ * %APM_LOW_BATTERY
+ * %APM_POWER_STATUS_CHANGE
+ * %APM_USER_SUSPEND
+ * %APM_SYS_SUSPEND
+ * %APM_CRITICAL_SUSPEND
+ */
+void apm_queue_event(apm_event_t event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kapmd_queue_lock, flags);
+ queue_add_event(&kapmd_queue, event);
+ spin_unlock_irqrestore(&kapmd_queue_lock, flags);
+
+ wake_up_interruptible(&kapmd_wait);
+}
+EXPORT_SYMBOL(apm_queue_event);
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0facfaf4e950..f1bb6a2dc5fc 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -141,72 +141,72 @@ void output_thread_defines(void)
void output_thread_fpu_defines(void)
{
offset("#define THREAD_FPR0 ",
- struct task_struct, thread.fpu.hard.fpr[0]);
+ struct task_struct, thread.fpu.fpr[0]);
offset("#define THREAD_FPR1 ",
- struct task_struct, thread.fpu.hard.fpr[1]);
+ struct task_struct, thread.fpu.fpr[1]);
offset("#define THREAD_FPR2 ",
- struct task_struct, thread.fpu.hard.fpr[2]);
+ struct task_struct, thread.fpu.fpr[2]);
offset("#define THREAD_FPR3 ",
- struct task_struct, thread.fpu.hard.fpr[3]);
+ struct task_struct, thread.fpu.fpr[3]);
offset("#define THREAD_FPR4 ",
- struct task_struct, thread.fpu.hard.fpr[4]);
+ struct task_struct, thread.fpu.fpr[4]);
offset("#define THREAD_FPR5 ",
- struct task_struct, thread.fpu.hard.fpr[5]);
+ struct task_struct, thread.fpu.fpr[5]);
offset("#define THREAD_FPR6 ",
- struct task_struct, thread.fpu.hard.fpr[6]);
+ struct task_struct, thread.fpu.fpr[6]);
offset("#define THREAD_FPR7 ",
- struct task_struct, thread.fpu.hard.fpr[7]);
+ struct task_struct, thread.fpu.fpr[7]);
offset("#define THREAD_FPR8 ",
- struct task_struct, thread.fpu.hard.fpr[8]);
+ struct task_struct, thread.fpu.fpr[8]);
offset("#define THREAD_FPR9 ",
- struct task_struct, thread.fpu.hard.fpr[9]);
+ struct task_struct, thread.fpu.fpr[9]);
offset("#define THREAD_FPR10 ",
- struct task_struct, thread.fpu.hard.fpr[10]);
+ struct task_struct, thread.fpu.fpr[10]);
offset("#define THREAD_FPR11 ",
- struct task_struct, thread.fpu.hard.fpr[11]);
+ struct task_struct, thread.fpu.fpr[11]);
offset("#define THREAD_FPR12 ",
- struct task_struct, thread.fpu.hard.fpr[12]);
+ struct task_struct, thread.fpu.fpr[12]);
offset("#define THREAD_FPR13 ",
- struct task_struct, thread.fpu.hard.fpr[13]);
+ struct task_struct, thread.fpu.fpr[13]);
offset("#define THREAD_FPR14 ",
- struct task_struct, thread.fpu.hard.fpr[14]);
+ struct task_struct, thread.fpu.fpr[14]);
offset("#define THREAD_FPR15 ",
- struct task_struct, thread.fpu.hard.fpr[15]);
+ struct task_struct, thread.fpu.fpr[15]);
offset("#define THREAD_FPR16 ",
- struct task_struct, thread.fpu.hard.fpr[16]);
+ struct task_struct, thread.fpu.fpr[16]);
offset("#define THREAD_FPR17 ",
- struct task_struct, thread.fpu.hard.fpr[17]);
+ struct task_struct, thread.fpu.fpr[17]);
offset("#define THREAD_FPR18 ",
- struct task_struct, thread.fpu.hard.fpr[18]);
+ struct task_struct, thread.fpu.fpr[18]);
offset("#define THREAD_FPR19 ",
- struct task_struct, thread.fpu.hard.fpr[19]);
+ struct task_struct, thread.fpu.fpr[19]);
offset("#define THREAD_FPR20 ",
- struct task_struct, thread.fpu.hard.fpr[20]);
+ struct task_struct, thread.fpu.fpr[20]);
offset("#define THREAD_FPR21 ",
- struct task_struct, thread.fpu.hard.fpr[21]);
+ struct task_struct, thread.fpu.fpr[21]);
offset("#define THREAD_FPR22 ",
- struct task_struct, thread.fpu.hard.fpr[22]);
+ struct task_struct, thread.fpu.fpr[22]);
offset("#define THREAD_FPR23 ",
- struct task_struct, thread.fpu.hard.fpr[23]);
+ struct task_struct, thread.fpu.fpr[23]);
offset("#define THREAD_FPR24 ",
- struct task_struct, thread.fpu.hard.fpr[24]);
+ struct task_struct, thread.fpu.fpr[24]);
offset("#define THREAD_FPR25 ",
- struct task_struct, thread.fpu.hard.fpr[25]);
+ struct task_struct, thread.fpu.fpr[25]);
offset("#define THREAD_FPR26 ",
- struct task_struct, thread.fpu.hard.fpr[26]);
+ struct task_struct, thread.fpu.fpr[26]);
offset("#define THREAD_FPR27 ",
- struct task_struct, thread.fpu.hard.fpr[27]);
+ struct task_struct, thread.fpu.fpr[27]);
offset("#define THREAD_FPR28 ",
- struct task_struct, thread.fpu.hard.fpr[28]);
+ struct task_struct, thread.fpu.fpr[28]);
offset("#define THREAD_FPR29 ",
- struct task_struct, thread.fpu.hard.fpr[29]);
+ struct task_struct, thread.fpu.fpr[29]);
offset("#define THREAD_FPR30 ",
- struct task_struct, thread.fpu.hard.fpr[30]);
+ struct task_struct, thread.fpu.fpr[30]);
offset("#define THREAD_FPR31 ",
- struct task_struct, thread.fpu.hard.fpr[31]);
+ struct task_struct, thread.fpu.fpr[31]);
offset("#define THREAD_FCR31 ",
- struct task_struct, thread.fpu.hard.fcr31);
+ struct task_struct, thread.fpu.fcr31);
linefeed;
}
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index b6232d9033cb..76fd3f22c766 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -178,7 +178,7 @@ int __compute_return_epc(struct pt_regs *regs)
if (is_fpu_owner())
asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
else
- fcr31 = current->thread.fpu.hard.fcr31;
+ fcr31 = current->thread.fpu.fcr31;
preempt_enable();
bit = (insn.i_format.rt >> 2);
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 2125ba5f1d9b..0cb8ed5662f3 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -302,11 +302,11 @@ static struct irqaction irq2 = {
};
static struct resource pic1_io_resource = {
- "pic1", 0x20, 0x3f, IORESOURCE_BUSY
+ .name = "pic1", .start = 0x20, .end = 0x3f, .flags = IORESOURCE_BUSY
};
static struct resource pic2_io_resource = {
- "pic2", 0xa0, 0xbf, IORESOURCE_BUSY
+ .name = "pic2", .start = 0xa0, .end = 0xbf, .flags = IORESOURCE_BUSY
};
/*
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 8150f071f80a..a9bf6cc3abd1 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -260,7 +260,7 @@ irix_sigreturn(struct pt_regs *regs)
for(i = 0; i < 32; i++)
error |= __get_user(fregs[i], &context->fpregs[i]);
- error |= __get_user(current->thread.fpu.hard.fcr31, &context->fpcsr);
+ error |= __get_user(current->thread.fpu.fcr31, &context->fpcsr);
}
/* XXX do sigstack crapola here... XXX */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 9b4733c12395..1d44025188d8 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -120,11 +120,11 @@ int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
__put_user ((__u64) -1, i + (__u64 __user *) data);
}
+ __put_user (child->thread.fpu.fcr31, data + 64);
+
if (cpu_has_fpu) {
unsigned int flags, tmp;
- __put_user (child->thread.fpu.hard.fcr31, data + 64);
-
preempt_disable();
if (cpu_has_mipsmt) {
unsigned int vpflags = dvpe();
@@ -142,7 +142,6 @@ int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
preempt_enable();
__put_user (tmp, data + 65);
} else {
- __put_user (child->thread.fpu.soft.fcr31, data + 64);
__put_user ((__u32) 0, data + 65);
}
@@ -162,10 +161,7 @@ int ptrace_setfpregs (struct task_struct *child, __u32 __user *data)
for (i = 0; i < 32; i++)
__get_user (fregs[i], i + (__u64 __user *) data);
- if (cpu_has_fpu)
- __get_user (child->thread.fpu.hard.fcr31, data + 64);
- else
- __get_user (child->thread.fpu.soft.fcr31, data + 64);
+ __get_user (child->thread.fpu.fcr31, data + 64);
/* FIR may not be written. */
@@ -241,10 +237,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = regs->lo;
break;
case FPC_CSR:
- if (cpu_has_fpu)
- tmp = child->thread.fpu.hard.fcr31;
- else
- tmp = child->thread.fpu.soft.fcr31;
+ tmp = child->thread.fpu.fcr31;
break;
case FPC_EIR: { /* implementation / version register */
unsigned int flags;
@@ -336,9 +329,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (!tsk_used_math(child)) {
/* FP not yet used */
- memset(&child->thread.fpu.hard, ~0,
- sizeof(child->thread.fpu.hard));
- child->thread.fpu.hard.fcr31 = 0;
+ memset(&child->thread.fpu, ~0,
+ sizeof(child->thread.fpu));
+ child->thread.fpu.fcr31 = 0;
}
#ifdef CONFIG_32BIT
/*
@@ -369,10 +362,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
regs->lo = data;
break;
case FPC_CSR:
- if (cpu_has_fpu)
- child->thread.fpu.hard.fcr31 = data;
- else
- child->thread.fpu.soft.fcr31 = data;
+ child->thread.fpu.fcr31 = data;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 8704dc0496ea..f40ecd8be05f 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -166,10 +166,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
tmp = regs->lo;
break;
case FPC_CSR:
- if (cpu_has_fpu)
- tmp = child->thread.fpu.hard.fcr31;
- else
- tmp = child->thread.fpu.soft.fcr31;
+ tmp = child->thread.fpu.fcr31;
break;
case FPC_EIR: { /* implementation / version register */
unsigned int flags;
@@ -288,9 +285,9 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
if (!tsk_used_math(child)) {
/* FP not yet used */
- memset(&child->thread.fpu.hard, ~0,
- sizeof(child->thread.fpu.hard));
- child->thread.fpu.hard.fcr31 = 0;
+ memset(&child->thread.fpu, ~0,
+ sizeof(child->thread.fpu));
+ child->thread.fpu.fcr31 = 0;
}
/*
* The odd registers are actually the high order bits
@@ -318,10 +315,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
regs->lo = data;
break;
case FPC_CSR:
- if (cpu_has_fpu)
- child->thread.fpu.hard.fcr31 = data;
- else
- child->thread.fpu.soft.fcr31 = data;
+ child->thread.fpu.fcr31 = data;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 0b1b54acee9f..db94e556fc97 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -75,8 +75,8 @@
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
- fpu_save_double a0 t1 t0 t2 # c0_status passed in t1
- # clobbers t0 and t2
+ fpu_save_double a0 t0 t1 # c0_status passed in t0
+ # clobbers t1
1:
/*
@@ -129,9 +129,9 @@
*/
LEAF(_save_fp)
#ifdef CONFIG_64BIT
- mfc0 t1, CP0_STATUS
+ mfc0 t0, CP0_STATUS
#endif
- fpu_save_double a0 t1 t0 t2 # clobbers t1
+ fpu_save_double a0 t0 t1 # clobbers t1
jr ra
END(_save_fp)
@@ -139,7 +139,10 @@ LEAF(_save_fp)
* Restore a thread's fp context.
*/
LEAF(_restore_fp)
- fpu_restore_double a0, t1 # clobbers t1
+#ifdef CONFIG_64BIT
+ mfc0 t0, CP0_STATUS
+#endif
+ fpu_restore_double a0 t0 t1 # clobbers t1
jr ra
END(_restore_fp)
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 397a70e651b5..bfcec8d9bfe4 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -442,6 +442,48 @@ static inline void bootmem_init(void)
#endif /* CONFIG_BLK_DEV_INITRD */
}
+/*
+ * arch_mem_init - initialize memory managment subsystem
+ *
+ * o plat_mem_setup() detects the memory configuration and will record detected
+ * memory areas using add_memory_region.
+ * o parse_cmdline_early() parses the command line for mem= options which,
+ * iff detected, will override the results of the automatic detection.
+ *
+ * At this stage the memory configuration of the system is known to the
+ * kernel but generic memory managment system is still entirely uninitialized.
+ *
+ * o bootmem_init()
+ * o sparse_init()
+ * o paging_init()
+ *
+ * At this stage the bootmem allocator is ready to use.
+ *
+ * NOTE: historically plat_mem_setup did the entire platform initialization.
+ * This was rather impractical because it meant plat_mem_setup had to
+ * get away without any kind of memory allocator. To keep old code from
+ * breaking plat_setup was just renamed to plat_setup and a second platform
+ * initialization hook for anything else was introduced.
+ */
+
+extern void plat_mem_setup(void);
+
+static void __init arch_mem_init(char **cmdline_p)
+{
+ /* call board setup routine */
+ plat_mem_setup();
+
+ strlcpy(command_line, arcs_cmdline, sizeof(command_line));
+ strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
+
+ *cmdline_p = command_line;
+
+ parse_cmdline_early();
+ bootmem_init();
+ sparse_init();
+ paging_init();
+}
+
static inline void resource_init(void)
{
int i;
@@ -495,8 +537,6 @@ static inline void resource_init(void)
#undef MAXMEM
#undef MAXMEM_PFN
-extern void plat_setup(void);
-
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
@@ -511,18 +551,8 @@ void __init setup_arch(char **cmdline_p)
#endif
#endif
- /* call board setup routine */
- plat_setup();
+ arch_mem_init(cmdline_p);
- strlcpy(command_line, arcs_cmdline, sizeof(command_line));
- strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
-
- *cmdline_p = command_line;
-
- parse_cmdline_early();
- bootmem_init();
- sparse_init();
- paging_init();
resource_init();
#ifdef CONFIG_SMP
plat_smp_setup();
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index a7564b08eb4d..ad16eceb24dd 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -65,7 +65,7 @@ extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
- struct mips_fpu_soft_struct *ctx);
+ struct mips_fpu_struct *ctx);
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
@@ -600,8 +600,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
preempt_enable();
/* Run the emulator */
- sig = fpu_emulator_cop1Handler (regs,
- &current->thread.fpu.soft);
+ sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu);
preempt_disable();
@@ -610,7 +609,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
* We can't allow the emulated instruction to leave any of
* the cause bit set in $fcr31.
*/
- current->thread.fpu.soft.fcr31 &= ~FPU_CSR_ALL_X;
+ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
restore_fp(current);
@@ -755,7 +754,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
if (!cpu_has_fpu) {
int sig = fpu_emulator_cop1Handler(regs,
- &current->thread.fpu.soft);
+ &current->thread.fpu);
if (sig)
force_sig(sig, current);
#ifdef CONFIG_MIPS_MT_FPAFF