aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-16 17:58:08 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-16 17:58:08 -0700
commit489de30259e667d7bc47da9da44a0270b050cd97 (patch)
tree6807814f443fe2c5d041c3bc3fe3ca8d22a955ca /arch/powerpc/platforms/cell
parentMerge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6 (diff)
parentMerge branch 'for-2.6.23' into merge (diff)
downloadlinux-dev-489de30259e667d7bc47da9da44a0270b050cd97.tar.xz
linux-dev-489de30259e667d7bc47da9da44a0270b050cd97.zip
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (209 commits) [POWERPC] Create add_rtc() function to enable the RTC CMOS driver [POWERPC] Add H_ILLAN_ATTRIBUTES hcall number [POWERPC] xilinxfb: Parameterize xilinxfb platform device registration [POWERPC] Oprofile support for Power 5++ [POWERPC] Enable arbitary speed tty ioctls and split input/output speed [POWERPC] Make drivers/char/hvc_console.c:khvcd() static [POWERPC] Remove dead code for preventing pread() and pwrite() calls [POWERPC] Remove unnecessary #undef printk from prom.c [POWERPC] Fix typo in Ebony default DTS [POWERPC] Check for NULL ppc_md.init_IRQ() before calling [POWERPC] Remove extra return statement [POWERPC] pasemi: Don't auto-select CONFIG_EMBEDDED [POWERPC] pasemi: Rename platform [POWERPC] arch/powerpc/kernel/sysfs.c: Move NUMA exports [POWERPC] Add __read_mostly support for powerpc [POWERPC] Modify sched_clock() to make CONFIG_PRINTK_TIME more sane [POWERPC] Create a dummy zImage if no valid platform has been selected [POWERPC] PS3: Bootwrapper support. [POWERPC] powermac i2c: Use mutex [POWERPC] Schedule removal of arch/ppc ... Fixed up conflicts manually in: Documentation/feature-removal-schedule.txt arch/powerpc/kernel/pci_32.c arch/powerpc/kernel/pci_64.c include/asm-powerpc/pci.h and asked the powerpc people to double-check the result..
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c59
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c15
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c29
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c149
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c10
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c45
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c476
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_save.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h84
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c18
13 files changed, 754 insertions, 143 deletions
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
index 7fb92f23f380..9d7c2ef940a8 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -102,7 +102,7 @@ static void spider_io_flush(const volatile void __iomem *addr)
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
/* Check if it's in allowed range for PIO */
- if (vaddr < PHBS_IO_BASE || vaddr >= IMALLOC_BASE)
+ if (vaddr < PHB_IO_BASE || vaddr > PHB_IO_END)
return;
/* Try to find a PTE. If not, clear the paddr, we'll do
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index a7f5a7653c62..e4d0c9f42abd 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -183,7 +183,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
spu->slb_replace = 0;
spu_restart_dma(spu);
-
+ spu->stats.slb_flt++;
return 0;
}
@@ -332,6 +332,7 @@ spu_irq_class_2(int irq, void *data)
if (stat & 0x10) /* SPU mailbox threshold */
spu->wbox_callback(spu);
+ spu->stats.class2_intr++;
return stat ? IRQ_HANDLED : IRQ_NONE;
}
@@ -462,8 +463,18 @@ void spu_free(struct spu *spu)
}
EXPORT_SYMBOL_GPL(spu_free);
+static int spu_shutdown(struct sys_device *sysdev)
+{
+ struct spu *spu = container_of(sysdev, struct spu, sysdev);
+
+ spu_free_irqs(spu);
+ spu_destroy_spu(spu);
+ return 0;
+}
+
struct sysdev_class spu_sysdev_class = {
- set_kset_name("spu")
+ set_kset_name("spu"),
+ .shutdown = spu_shutdown,
};
int spu_add_sysdev_attr(struct sysdev_attribute *attr)
@@ -574,6 +585,9 @@ static int __init create_spu(void *data)
spin_unlock_irqrestore(&spu_list_lock, flags);
mutex_unlock(&spu_mutex);
+ spu->stats.utilization_state = SPU_UTIL_IDLE;
+ spu->stats.tstamp = jiffies;
+
goto out;
out_free_irqs:
@@ -586,6 +600,45 @@ out:
return ret;
}
+static const char *spu_state_names[] = {
+ "user", "system", "iowait", "idle"
+};
+
+static unsigned long long spu_acct_time(struct spu *spu,
+ enum spu_utilization_state state)
+{
+ unsigned long long time = spu->stats.times[state];
+
+ if (spu->stats.utilization_state == state)
+ time += jiffies - spu->stats.tstamp;
+
+ return jiffies_to_msecs(time);
+}
+
+
+static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
+{
+ struct spu *spu = container_of(sysdev, struct spu, sysdev);
+
+ return sprintf(buf, "%s %llu %llu %llu %llu "
+ "%llu %llu %llu %llu %llu %llu %llu %llu\n",
+ spu_state_names[spu->stats.utilization_state],
+ spu_acct_time(spu, SPU_UTIL_USER),
+ spu_acct_time(spu, SPU_UTIL_SYSTEM),
+ spu_acct_time(spu, SPU_UTIL_IOWAIT),
+ spu_acct_time(spu, SPU_UTIL_IDLE),
+ spu->stats.vol_ctx_switch,
+ spu->stats.invol_ctx_switch,
+ spu->stats.slb_flt,
+ spu->stats.hash_flt,
+ spu->stats.min_flt,
+ spu->stats.maj_flt,
+ spu->stats.class2_intr,
+ spu->stats.libassist);
+}
+
+static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
+
static int __init init_spu_base(void)
{
int i, ret = 0;
@@ -611,6 +664,8 @@ static int __init init_spu_base(void)
xmon_register_spus(&spu_full_list);
+ spu_add_sysdev_attr(&attr_stat);
+
return 0;
out_unregister_sysdev_class:
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index d32db9ffc6eb..07a0e815abf5 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -320,6 +320,12 @@ static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
/* FIXME: what are the side-effects of this? */
prob->dma_querymask_RW = mask;
prob->dma_querytype_RW = mode;
+ /* In the current implementation, the SPU context is always
+ * acquired in runnable state when new bits are added to the
+ * mask (tagwait), so it's sufficient just to mask
+ * dma_tagstatus_R with the 'mask' parameter here.
+ */
+ ctx->csa.prob.dma_tagstatus_R &= mask;
out:
spin_unlock(&ctx->csa.register_lock);
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 7c51cb54bca1..6d7bd60f5380 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -23,10 +23,14 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <asm/atomic.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
#include "spufs.h"
+
+atomic_t nr_spu_contexts = ATOMIC_INIT(0);
+
struct spu_context *alloc_spu_context(struct spu_gang *gang)
{
struct spu_context *ctx;
@@ -53,10 +57,12 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
INIT_LIST_HEAD(&ctx->rq);
if (gang)
spu_gang_add_ctx(gang, ctx);
- ctx->rt_priority = current->rt_priority;
- ctx->policy = current->policy;
- ctx->prio = current->prio;
- INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
+ ctx->cpus_allowed = current->cpus_allowed;
+ spu_set_timeslice(ctx);
+ ctx->stats.execution_state = SPUCTX_UTIL_USER;
+ ctx->stats.tstamp = jiffies;
+
+ atomic_inc(&nr_spu_contexts);
goto out;
out_free:
kfree(ctx);
@@ -76,6 +82,7 @@ void destroy_spu_context(struct kref *kref)
if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx);
BUG_ON(!list_empty(&ctx->rq));
+ atomic_dec(&nr_spu_contexts);
kfree(ctx);
}
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index 0f75c07e29d8..e064d0c0d80e 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -33,7 +33,8 @@
* function. Currently, there are a few corner cases that we haven't had
* to handle fortunately.
*/
-static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr)
+static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+ unsigned long dsisr, unsigned *flt)
{
struct vm_area_struct *vma;
unsigned long is_write;
@@ -73,7 +74,8 @@ good_area:
goto bad_area;
}
ret = 0;
- switch (handle_mm_fault(mm, vma, ea, is_write)) {
+ *flt = handle_mm_fault(mm, vma, ea, is_write);
+ switch (*flt) {
case VM_FAULT_MINOR:
current->min_flt++;
break;
@@ -153,6 +155,7 @@ int spufs_handle_class1(struct spu_context *ctx)
{
u64 ea, dsisr, access;
unsigned long flags;
+ unsigned flt = 0;
int ret;
/*
@@ -178,9 +181,17 @@ int spufs_handle_class1(struct spu_context *ctx)
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
return 0;
+ spuctx_switch_state(ctx, SPUCTX_UTIL_IOWAIT);
+
pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
dsisr, ctx->state);
+ ctx->stats.hash_flt++;
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ ctx->spu->stats.hash_flt++;
+ spu_switch_state(ctx->spu, SPU_UTIL_IOWAIT);
+ }
+
/* we must not hold the lock when entering spu_handle_mm_fault */
spu_release(ctx);
@@ -192,7 +203,7 @@ int spufs_handle_class1(struct spu_context *ctx)
/* hashing failed, so try the actual fault handler */
if (ret)
- ret = spu_handle_mm_fault(current->mm, ea, dsisr);
+ ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
spu_acquire(ctx);
/*
@@ -201,11 +212,23 @@ int spufs_handle_class1(struct spu_context *ctx)
* In case of unhandled error report the problem to user space.
*/
if (!ret) {
+ if (flt == VM_FAULT_MINOR)
+ ctx->stats.min_flt++;
+ else
+ ctx->stats.maj_flt++;
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ if (flt == VM_FAULT_MINOR)
+ ctx->spu->stats.min_flt++;
+ else
+ ctx->spu->stats.maj_flt++;
+ }
+
if (ctx->spu)
ctx->ops->restart_dma(ctx);
} else
spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
+ spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM);
return ret;
}
EXPORT_SYMBOL_GPL(spufs_handle_class1);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index b1e7e2f8a2e9..c2814ea96af2 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -28,6 +28,7 @@
#include <linux/pagemap.h>
#include <linux/poll.h>
#include <linux/ptrace.h>
+#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/semaphore.h>
@@ -39,6 +40,7 @@
#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
+
static int
spufs_mem_open(struct inode *inode, struct file *file)
{
@@ -216,12 +218,12 @@ unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
#endif /* CONFIG_SPU_FS_64K_LS */
static const struct file_operations spufs_mem_fops = {
- .open = spufs_mem_open,
- .release = spufs_mem_release,
- .read = spufs_mem_read,
- .write = spufs_mem_write,
- .llseek = generic_file_llseek,
- .mmap = spufs_mem_mmap,
+ .open = spufs_mem_open,
+ .release = spufs_mem_release,
+ .read = spufs_mem_read,
+ .write = spufs_mem_write,
+ .llseek = generic_file_llseek,
+ .mmap = spufs_mem_mmap,
#ifdef CONFIG_SPU_FS_64K_LS
.get_unmapped_area = spufs_get_unmapped_area,
#endif
@@ -1497,14 +1499,15 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
if (status)
ret = status;
}
- spu_release(ctx);
if (ret)
- goto out;
+ goto out_unlock;
ctx->tagwait |= 1 << cmd.tag;
ret = size;
+out_unlock:
+ spu_release(ctx);
out:
return ret;
}
@@ -1515,14 +1518,14 @@ static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
u32 free_elements, tagstatus;
unsigned int mask;
+ poll_wait(file, &ctx->mfc_wq, wait);
+
spu_acquire(ctx);
ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
free_elements = ctx->ops->get_mfc_free_elements(ctx);
tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
spu_release(ctx);
- poll_wait(file, &ctx->mfc_wq, wait);
-
mask = 0;
if (free_elements & 0xffff)
mask |= POLLOUT | POLLWRNORM;
@@ -1797,6 +1800,29 @@ static int spufs_info_open(struct inode *inode, struct file *file)
return 0;
}
+static int spufs_caps_show(struct seq_file *s, void *private)
+{
+ struct spu_context *ctx = s->private;
+
+ if (!(ctx->flags & SPU_CREATE_NOSCHED))
+ seq_puts(s, "sched\n");
+ if (!(ctx->flags & SPU_CREATE_ISOLATE))
+ seq_puts(s, "step\n");
+ return 0;
+}
+
+static int spufs_caps_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
+}
+
+static const struct file_operations spufs_caps_fops = {
+ .open = spufs_caps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
char __user *buf, size_t len, loff_t *pos)
{
@@ -2014,7 +2040,105 @@ static const struct file_operations spufs_proxydma_info_fops = {
.read = spufs_proxydma_info_read,
};
+static int spufs_show_tid(struct seq_file *s, void *private)
+{
+ struct spu_context *ctx = s->private;
+
+ seq_printf(s, "%d\n", ctx->tid);
+ return 0;
+}
+
+static int spufs_tid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
+}
+
+static const struct file_operations spufs_tid_fops = {
+ .open = spufs_tid_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const char *ctx_state_names[] = {
+ "user", "system", "iowait", "loaded"
+};
+
+static unsigned long long spufs_acct_time(struct spu_context *ctx,
+ enum spuctx_execution_state state)
+{
+ unsigned long time = ctx->stats.times[state];
+
+ if (ctx->stats.execution_state == state)
+ time += jiffies - ctx->stats.tstamp;
+
+ return jiffies_to_msecs(time);
+}
+
+static unsigned long long spufs_slb_flts(struct spu_context *ctx)
+{
+ unsigned long long slb_flts = ctx->stats.slb_flt;
+
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ slb_flts += (ctx->spu->stats.slb_flt -
+ ctx->stats.slb_flt_base);
+ }
+
+ return slb_flts;
+}
+
+static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
+{
+ unsigned long long class2_intrs = ctx->stats.class2_intr;
+
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ class2_intrs += (ctx->spu->stats.class2_intr -
+ ctx->stats.class2_intr_base);
+ }
+
+ return class2_intrs;
+}
+
+
+static int spufs_show_stat(struct seq_file *s, void *private)
+{
+ struct spu_context *ctx = s->private;
+
+ spu_acquire(ctx);
+ seq_printf(s, "%s %llu %llu %llu %llu "
+ "%llu %llu %llu %llu %llu %llu %llu %llu\n",
+ ctx_state_names[ctx->stats.execution_state],
+ spufs_acct_time(ctx, SPUCTX_UTIL_USER),
+ spufs_acct_time(ctx, SPUCTX_UTIL_SYSTEM),
+ spufs_acct_time(ctx, SPUCTX_UTIL_IOWAIT),
+ spufs_acct_time(ctx, SPUCTX_UTIL_LOADED),
+ ctx->stats.vol_ctx_switch,
+ ctx->stats.invol_ctx_switch,
+ spufs_slb_flts(ctx),
+ ctx->stats.hash_flt,
+ ctx->stats.min_flt,
+ ctx->stats.maj_flt,
+ spufs_class2_intrs(ctx),
+ ctx->stats.libassist);
+ spu_release(ctx);
+ return 0;
+}
+
+static int spufs_stat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
+}
+
+static const struct file_operations spufs_stat_fops = {
+ .open = spufs_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
struct tree_descr spufs_dir_contents[] = {
+ { "capabilities", &spufs_caps_fops, 0444, },
{ "mem", &spufs_mem_fops, 0666, },
{ "regs", &spufs_regs_fops, 0666, },
{ "mbox", &spufs_mbox_fops, 0444, },
@@ -2046,10 +2170,13 @@ struct tree_descr spufs_dir_contents[] = {
{ "wbox_info", &spufs_wbox_info_fops, 0444, },
{ "dma_info", &spufs_dma_info_fops, 0444, },
{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
+ { "tid", &spufs_tid_fops, 0444, },
+ { "stat", &spufs_stat_fops, 0444, },
{},
};
struct tree_descr spufs_dir_nosched_contents[] = {
+ { "capabilities", &spufs_caps_fops, 0444, },
{ "mem", &spufs_mem_fops, 0666, },
{ "mbox", &spufs_mbox_fops, 0444, },
{ "ibox", &spufs_ibox_fops, 0444, },
@@ -2068,6 +2195,8 @@ struct tree_descr spufs_dir_nosched_contents[] = {
{ "psmap", &spufs_psmap_fops, 0666, },
{ "phys-id", &spufs_id_ops, 0666, },
{ "object-id", &spufs_object_id_ops, 0666, },
+ { "tid", &spufs_tid_fops, 0444, },
+ { "stat", &spufs_stat_fops, 0444, },
{},
};
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 9807206e0219..f37460e5bfd2 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -232,10 +232,6 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
return dcache_dir_close(inode, file);
}
-const struct inode_operations spufs_dir_inode_operations = {
- .lookup = simple_lookup,
-};
-
const struct file_operations spufs_context_fops = {
.open = dcache_dir_open,
.release = spufs_dir_close,
@@ -269,7 +265,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
goto out_iput;
ctx->flags = flags;
- inode->i_op = &spufs_dir_inode_operations;
+ inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
if (flags & SPU_CREATE_NOSCHED)
ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
@@ -386,7 +382,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
if (!gang)
goto out_iput;
- inode->i_op = &spufs_dir_inode_operations;
+ inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode);
@@ -593,7 +589,7 @@ spufs_create_root(struct super_block *sb, void *data)
if (!inode)
goto out;
- inode->i_op = &spufs_dir_inode_operations;
+ inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
SPUFS_I(inode)->i_ctx = NULL;
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 57626600b1a4..58ae13b7de84 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -29,7 +29,8 @@ static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
spu = ctx->spu;
pte_fault = spu->dsisr &
(MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
- return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
+ return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ?
+ 1 : 0;
}
static int spu_setup_isolated(struct spu_context *ctx)
@@ -142,8 +143,11 @@ static int spu_run_init(struct spu_context *ctx, u32 * npc)
runcntl = SPU_RUNCNTL_RUNNABLE;
ctx->ops->runcntl_write(ctx, runcntl);
} else {
- spu_start_tick(ctx);
+ unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL;
ctx->ops->npc_write(ctx, *npc);
+ if (test_thread_flag(TIF_SINGLESTEP))
+ mode = SPU_PRIVCNTL_MODE_SINGLE_STEP;
+ out_be64(&ctx->spu->priv2->spu_privcntl_RW, mode);
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
}
@@ -155,7 +159,6 @@ static int spu_run_fini(struct spu_context *ctx, u32 * npc,
{
int ret = 0;
- spu_stop_tick(ctx);
*status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx);
spu_release(ctx);
@@ -298,9 +301,22 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
ctx->ops->master_start(ctx);
ctx->event_return = 0;
- ret = spu_acquire_runnable(ctx, 0);
- if (ret)
- return ret;
+ spu_acquire(ctx);
+ if (ctx->state == SPU_STATE_SAVED) {
+ __spu_update_sched_info(ctx);
+
+ ret = spu_activate(ctx, 0);
+ if (ret) {
+ spu_release(ctx);
+ goto out;
+ }
+ } else {
+ /*
+ * We have to update the scheduling priority under active_mutex
+ * to protect against find_victim().
+ */
+ spu_update_sched_info(ctx);
+ }
ret = spu_run_init(ctx, npc);
if (ret) {
@@ -325,16 +341,20 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status);
- if (ret) {
- spu_stop_tick(ctx);
+ if (ret)
goto out2;
- }
continue;
}
ret = spu_process_events(ctx);
} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
- SPU_STATUS_STOPPED_BY_HALT)));
+ SPU_STATUS_STOPPED_BY_HALT |
+ SPU_STATUS_SINGLE_STEP)));
+
+ if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
+ (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100) &&
+ (ctx->state == SPU_STATE_RUNNABLE))
+ ctx->stats.libassist++;
ctx->ops->master_stop(ctx);
ret = spu_run_fini(ctx, npc, &status);
@@ -344,10 +364,15 @@ out2:
if ((ret == 0) ||
((ret == -ERESTARTSYS) &&
((status & SPU_STATUS_STOPPED_BY_HALT) ||
+ (status & SPU_STATUS_SINGLE_STEP) ||
((status & SPU_STATUS_STOPPED_BY_STOP) &&
(status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
ret = status;
+ /* Note: we don't need to force_sig SIGTRAP on single-step
+ * since we have TIF_SINGLESTEP set, thus the kernel will do
+ * it upon return from the syscall anyawy
+ */
if ((status & SPU_STATUS_STOPPED_BY_STOP)
&& (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
force_sig(SIGTRAP, current);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 3b831e07f1ed..e5b4dd1db286 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -35,6 +35,10 @@
#include <linux/numa.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/pid_namespace.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
@@ -43,54 +47,126 @@
#include <asm/spu_priv1.h>
#include "spufs.h"
-#define SPU_TIMESLICE (HZ)
-
struct spu_prio_array {
DECLARE_BITMAP(bitmap, MAX_PRIO);
struct list_head runq[MAX_PRIO];
spinlock_t runq_lock;
struct list_head active_list[MAX_NUMNODES];
struct mutex active_mutex[MAX_NUMNODES];
+ int nr_active[MAX_NUMNODES];
+ int nr_waiting;
};
+static unsigned long spu_avenrun[3];
static struct spu_prio_array *spu_prio;
-static struct workqueue_struct *spu_sched_wq;
+static struct task_struct *spusched_task;
+static struct timer_list spusched_timer;
+
+/*
+ * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
+ */
+#define NORMAL_PRIO 120
+
+/*
+ * Frequency of the spu scheduler tick. By default we do one SPU scheduler
+ * tick for every 10 CPU scheduler ticks.
+ */
+#define SPUSCHED_TICK (10)
-static inline int node_allowed(int node)
+/*
+ * These are the 'tuning knobs' of the scheduler:
+ *
+ * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
+ * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ */
+#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
+#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
+
+#define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
+#define SCALE_PRIO(x, prio) \
+ max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
+
+/*
+ * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
+ * [800ms ... 100ms ... 5ms]
+ *
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ */
+void spu_set_timeslice(struct spu_context *ctx)
{
- cpumask_t mask;
+ if (ctx->prio < NORMAL_PRIO)
+ ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
+ else
+ ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
+}
- if (!nr_cpus_node(node))
- return 0;
- mask = node_to_cpumask(node);
- if (!cpus_intersects(mask, current->cpus_allowed))
- return 0;
- return 1;
+/*
+ * Update scheduling information from the owning thread.
+ */
+void __spu_update_sched_info(struct spu_context *ctx)
+{
+ /*
+ * 32-Bit assignment are atomic on powerpc, and we don't care about
+ * memory ordering here because retriving the controlling thread is
+ * per defintion racy.
+ */
+ ctx->tid = current->pid;
+
+ /*
+ * We do our own priority calculations, so we normally want
+ * ->static_prio to start with. Unfortunately thies field
+ * contains junk for threads with a realtime scheduling
+ * policy so we have to look at ->prio in this case.
+ */
+ if (rt_prio(current->prio))
+ ctx->prio = current->prio;
+ else
+ ctx->prio = current->static_prio;
+ ctx->policy = current->policy;
+
+ /*
+ * A lot of places that don't hold active_mutex poke into
+ * cpus_allowed, including grab_runnable_context which
+ * already holds the runq_lock. So abuse runq_lock
+ * to protect this field aswell.
+ */
+ spin_lock(&spu_prio->runq_lock);
+ ctx->cpus_allowed = current->cpus_allowed;
+ spin_unlock(&spu_prio->runq_lock);
}
-void spu_start_tick(struct spu_context *ctx)
+void spu_update_sched_info(struct spu_context *ctx)
{
- if (ctx->policy == SCHED_RR) {
- /*
- * Make sure the exiting bit is cleared.
- */
- clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
- mb();
- queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
- }
+ int node = ctx->spu->node;
+
+ mutex_lock(&spu_prio->active_mutex[node]);
+ __spu_update_sched_info(ctx);
+ mutex_unlock(&spu_prio->active_mutex[node]);
}
-void spu_stop_tick(struct spu_context *ctx)
+static int __node_allowed(struct spu_context *ctx, int node)
{
- if (ctx->policy == SCHED_RR) {
- /*
- * While the work can be rearming normally setting this flag
- * makes sure it does not rearm itself anymore.
- */
- set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
- mb();
- cancel_delayed_work(&ctx->sched_work);
+ if (nr_cpus_node(node)) {
+ cpumask_t mask = node_to_cpumask(node);
+
+ if (cpus_intersects(mask, ctx->cpus_allowed))
+ return 1;
}
+
+ return 0;
+}
+
+static int node_allowed(struct spu_context *ctx, int node)
+{
+ int rval;
+
+ spin_lock(&spu_prio->runq_lock);
+ rval = __node_allowed(ctx, node);
+ spin_unlock(&spu_prio->runq_lock);
+
+ return rval;
}
/**
@@ -99,9 +175,18 @@ void spu_stop_tick(struct spu_context *ctx)
*/
static void spu_add_to_active_list(struct spu *spu)
{
- mutex_lock(&spu_prio->active_mutex[spu->node]);
- list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
- mutex_unlock(&spu_prio->active_mutex[spu->node]);
+ int node = spu->node;
+
+ mutex_lock(&spu_prio->active_mutex[node]);
+ spu_prio->nr_active[node]++;
+ list_add_tail(&spu->list, &spu_prio->active_list[node]);
+ mutex_unlock(&spu_prio->active_mutex[node]);
+}
+
+static void __spu_remove_from_active_list(struct spu *spu)
+{
+ list_del_init(&spu->list);
+ spu_prio->nr_active[spu->node]--;
}
/**
@@ -113,7 +198,7 @@ static void spu_remove_from_active_list(struct spu *spu)
int node = spu->node;
mutex_lock(&spu_prio->active_mutex[node]);
- list_del_init(&spu->list);
+ __spu_remove_from_active_list(spu);
mutex_unlock(&spu_prio->active_mutex[node]);
}
@@ -144,6 +229,10 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
{
pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
spu->number, spu->node);
+
+ ctx->stats.slb_flt_base = spu->stats.slb_flt;
+ ctx->stats.class2_intr_base = spu->stats.class2_intr;
+
spu->ctx = ctx;
spu->flags = 0;
ctx->spu = spu;
@@ -161,8 +250,8 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu->timestamp = jiffies;
spu_cpu_affinity_set(spu, raw_smp_processor_id());
spu_switch_notify(spu, ctx);
- spu_add_to_active_list(spu);
ctx->state = SPU_STATE_RUNNABLE;
+ spu_switch_state(spu, SPU_UTIL_SYSTEM);
}
/**
@@ -175,7 +264,8 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
spu->pid, spu->number, spu->node);
- spu_remove_from_active_list(spu);
+ spu_switch_state(spu, SPU_UTIL_IDLE);
+
spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
@@ -192,6 +282,11 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
ctx->spu = NULL;
spu->flags = 0;
spu->ctx = NULL;
+
+ ctx->stats.slb_flt +=
+ (spu->stats.slb_flt - ctx->stats.slb_flt_base);
+ ctx->stats.class2_intr +=
+ (spu->stats.class2_intr - ctx->stats.class2_intr_base);
}
/**
@@ -200,20 +295,39 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
*/
static void __spu_add_to_rq(struct spu_context *ctx)
{
- int prio = ctx->prio;
-
- list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
- set_bit(prio, spu_prio->bitmap);
+ /*
+ * Unfortunately this code path can be called from multiple threads
+ * on behalf of a single context due to the way the problem state
+ * mmap support works.
+ *
+ * Fortunately we need to wake up all these threads at the same time
+ * and can simply skip the runqueue addition for every but the first
+ * thread getting into this codepath.
+ *
+ * It's still quite hacky, and long-term we should proxy all other
+ * threads through the owner thread so that spu_run is in control
+ * of all the scheduling activity for a given context.
+ */
+ if (list_empty(&ctx->rq)) {
+ list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
+ set_bit(ctx->prio, spu_prio->bitmap);
+ if (!spu_prio->nr_waiting++)
+ __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+ }
}
static void __spu_del_from_rq(struct spu_context *ctx)
{
int prio = ctx->prio;
- if (!list_empty(&ctx->rq))
+ if (!list_empty(&ctx->rq)) {
+ if (!--spu_prio->nr_waiting)
+ del_timer(&spusched_timer);
list_del_init(&ctx->rq);
- if (list_empty(&spu_prio->runq[prio]))
- clear_bit(prio, spu_prio->bitmap);
+
+ if (list_empty(&spu_prio->runq[prio]))
+ clear_bit(prio, spu_prio->bitmap);
+ }
}
static void spu_prio_wait(struct spu_context *ctx)
@@ -244,7 +358,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
for (n = 0; n < MAX_NUMNODES; n++, node++) {
node = (node < MAX_NUMNODES) ? node : 0;
- if (!node_allowed(node))
+ if (!node_allowed(ctx, node))
continue;
spu = spu_alloc_node(node);
if (spu)
@@ -276,15 +390,15 @@ static struct spu *find_victim(struct spu_context *ctx)
node = cpu_to_node(raw_smp_processor_id());
for (n = 0; n < MAX_NUMNODES; n++, node++) {
node = (node < MAX_NUMNODES) ? node : 0;
- if (!node_allowed(node))
+ if (!node_allowed(ctx, node))
continue;
mutex_lock(&spu_prio->active_mutex[node]);
list_for_each_entry(spu, &spu_prio->active_list[node], list) {
struct spu_context *tmp = spu->ctx;
- if (tmp->rt_priority < ctx->rt_priority &&
- (!victim || tmp->rt_priority < victim->rt_priority))
+ if (tmp->prio > ctx->prio &&
+ (!victim || tmp->prio > victim->prio))
victim = spu->ctx;
}
mutex_unlock(&spu_prio->active_mutex[node]);
@@ -312,7 +426,10 @@ static struct spu *find_victim(struct spu_context *ctx)
victim = NULL;
goto restart;
}
+ spu_remove_from_active_list(spu);
spu_unbind_context(spu, victim);
+ victim->stats.invol_ctx_switch++;
+ spu->stats.invol_ctx_switch++;
mutex_unlock(&victim->state_mutex);
/*
* We need to break out of the wait loop in spu_run
@@ -338,22 +455,30 @@ static struct spu *find_victim(struct spu_context *ctx)
*/
int spu_activate(struct spu_context *ctx, unsigned long flags)
{
-
- if (ctx->spu)
- return 0;
+ spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM);
do {
struct spu *spu;
+ /*
+ * If there are multiple threads waiting for a single context
+ * only one actually binds the context while the others will
+ * only be able to acquire the state_mutex once the context
+ * already is in runnable state.
+ */
+ if (ctx->spu)
+ return 0;
+
spu = spu_get_idle(ctx);
/*
* If this is a realtime thread we try to get it running by
* preempting a lower priority thread.
*/
- if (!spu && ctx->rt_priority)
+ if (!spu && rt_prio(ctx->prio))
spu = find_victim(ctx);
if (spu) {
spu_bind_context(spu, ctx);
+ spu_add_to_active_list(spu);
return 0;
}
@@ -369,23 +494,28 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
* Remove the highest priority context on the runqueue and return it
* to the caller. Returns %NULL if no runnable context was found.
*/
-static struct spu_context *grab_runnable_context(int prio)
+static struct spu_context *grab_runnable_context(int prio, int node)
{
- struct spu_context *ctx = NULL;
+ struct spu_context *ctx;
int best;
spin_lock(&spu_prio->runq_lock);
best = sched_find_first_bit(spu_prio->bitmap);
- if (best < prio) {
+ while (best < prio) {
struct list_head *rq = &spu_prio->runq[best];
- BUG_ON(list_empty(rq));
-
- ctx = list_entry(rq->next, struct spu_context, rq);
- __spu_del_from_rq(ctx);
+ list_for_each_entry(ctx, rq, rq) {
+ /* XXX(hch): check for affinity here aswell */
+ if (__node_allowed(ctx, node)) {
+ __spu_del_from_rq(ctx);
+ goto found;
+ }
+ }
+ best++;
}
+ ctx = NULL;
+ found:
spin_unlock(&spu_prio->runq_lock);
-
return ctx;
}
@@ -395,9 +525,12 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
struct spu_context *new = NULL;
if (spu) {
- new = grab_runnable_context(max_prio);
+ new = grab_runnable_context(max_prio, spu->node);
if (new || force) {
+ spu_remove_from_active_list(spu);
spu_unbind_context(spu, ctx);
+ ctx->stats.vol_ctx_switch++;
+ spu->stats.vol_ctx_switch++;
spu_free(spu);
if (new)
wake_up(&new->stop_wq);
@@ -417,7 +550,17 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
*/
void spu_deactivate(struct spu_context *ctx)
{
+ /*
+ * We must never reach this for a nosched context,
+ * but handle the case gracefull instead of panicing.
+ */
+ if (ctx->flags & SPU_CREATE_NOSCHED) {
+ WARN_ON(1);
+ return;
+ }
+
__spu_deactivate(ctx, 1, MAX_PRIO);
+ spuctx_switch_state(ctx, SPUCTX_UTIL_USER);
}
/**
@@ -432,56 +575,178 @@ void spu_yield(struct spu_context *ctx)
{
if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
mutex_lock(&ctx->state_mutex);
- __spu_deactivate(ctx, 0, MAX_PRIO);
+ if (__spu_deactivate(ctx, 0, MAX_PRIO))
+ spuctx_switch_state(ctx, SPUCTX_UTIL_USER);
+ else {
+ spuctx_switch_state(ctx, SPUCTX_UTIL_LOADED);
+ spu_switch_state(ctx->spu, SPU_UTIL_USER);
+ }
mutex_unlock(&ctx->state_mutex);
}
}
-void spu_sched_tick(struct work_struct *work)
+static void spusched_tick(struct spu_context *ctx)
{
- struct spu_context *ctx =
- container_of(work, struct spu_context, sched_work.work);
- int preempted;
+ if (ctx->flags & SPU_CREATE_NOSCHED)
+ return;
+ if (ctx->policy == SCHED_FIFO)
+ return;
+
+ if (--ctx->time_slice)
+ return;
/*
- * If this context is being stopped avoid rescheduling from the
- * scheduler tick because we would block on the state_mutex.
- * The caller will yield the spu later on anyway.
+ * Unfortunately active_mutex ranks outside of state_mutex, so
+ * we have to trylock here. If we fail give the context another
+ * tick and try again.
*/
- if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
- return;
+ if (mutex_trylock(&ctx->state_mutex)) {
+ struct spu *spu = ctx->spu;
+ struct spu_context *new;
- mutex_lock(&ctx->state_mutex);
- preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
- mutex_unlock(&ctx->state_mutex);
+ new = grab_runnable_context(ctx->prio + 1, spu->node);
+ if (new) {
- if (preempted) {
- /*
- * We need to break out of the wait loop in spu_run manually
- * to ensure this context gets put on the runqueue again
- * ASAP.
- */
- wake_up(&ctx->stop_wq);
+ __spu_remove_from_active_list(spu);
+ spu_unbind_context(spu, ctx);
+ ctx->stats.invol_ctx_switch++;
+ spu->stats.invol_ctx_switch++;
+ spu_free(spu);
+ wake_up(&new->stop_wq);
+ /*
+ * We need to break out of the wait loop in
+ * spu_run manually to ensure this context
+ * gets put on the runqueue again ASAP.
+ */
+ wake_up(&ctx->stop_wq);
+ }
+ spu_set_timeslice(ctx);
+ mutex_unlock(&ctx->state_mutex);
} else {
- spu_start_tick(ctx);
+ ctx->time_slice++;
}
}
-int __init spu_sched_init(void)
+/**
+ * count_active_contexts - count nr of active tasks
+ *
+ * Return the number of tasks currently running or waiting to run.
+ *
+ * Note that we don't take runq_lock / active_mutex here. Reading
+ * a single 32bit value is atomic on powerpc, and we don't care
+ * about memory ordering issues here.
+ */
+static unsigned long count_active_contexts(void)
{
- int i;
+ int nr_active = 0, node;
- spu_sched_wq = create_singlethread_workqueue("spusched");
- if (!spu_sched_wq)
- return 1;
+ for (node = 0; node < MAX_NUMNODES; node++)
+ nr_active += spu_prio->nr_active[node];
+ nr_active += spu_prio->nr_waiting;
- spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
- if (!spu_prio) {
- printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
- __FUNCTION__);
- destroy_workqueue(spu_sched_wq);
- return 1;
+ return nr_active;
+}
+
+/**
+ * spu_calc_load - given tick count, update the avenrun load estimates.
+ * @tick: tick count
+ *
+ * No locking against reading these values from userspace, as for
+ * the CPU loadavg code.
+ */
+static void spu_calc_load(unsigned long ticks)
+{
+ unsigned long active_tasks; /* fixed-point */
+ static int count = LOAD_FREQ;
+
+ count -= ticks;
+
+ if (unlikely(count < 0)) {
+ active_tasks = count_active_contexts() * FIXED_1;
+ do {
+ CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
+ CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
+ CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
+ count += LOAD_FREQ;
+ } while (count < 0);
}
+}
+
+static void spusched_wake(unsigned long data)
+{
+ mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+ wake_up_process(spusched_task);
+ spu_calc_load(SPUSCHED_TICK);
+}
+
+static int spusched_thread(void *unused)
+{
+ struct spu *spu, *next;
+ int node;
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ for (node = 0; node < MAX_NUMNODES; node++) {
+ mutex_lock(&spu_prio->active_mutex[node]);
+ list_for_each_entry_safe(spu, next,
+ &spu_prio->active_list[node],
+ list)
+ spusched_tick(spu->ctx);
+ mutex_unlock(&spu_prio->active_mutex[node]);
+ }
+ }
+
+ return 0;
+}
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+static int show_spu_loadavg(struct seq_file *s, void *private)
+{
+ int a, b, c;
+
+ a = spu_avenrun[0] + (FIXED_1/200);
+ b = spu_avenrun[1] + (FIXED_1/200);
+ c = spu_avenrun[2] + (FIXED_1/200);
+
+ /*
+ * Note that last_pid doesn't really make much sense for the
+ * SPU loadavg (it even seems very odd on the CPU side..),
+ * but we include it here to have a 100% compatible interface.
+ */
+ seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
+ LOAD_INT(a), LOAD_FRAC(a),
+ LOAD_INT(b), LOAD_FRAC(b),
+ LOAD_INT(c), LOAD_FRAC(c),
+ count_active_contexts(),
+ atomic_read(&nr_spu_contexts),
+ current->nsproxy->pid_ns->last_pid);
+ return 0;
+}
+
+static int spu_loadavg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_spu_loadavg, NULL);
+}
+
+static const struct file_operations spu_loadavg_fops = {
+ .open = spu_loadavg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int __init spu_sched_init(void)
+{
+ struct proc_dir_entry *entry;
+ int err = -ENOMEM, i;
+
+ spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
+ if (!spu_prio)
+ goto out;
+
for (i = 0; i < MAX_PRIO; i++) {
INIT_LIST_HEAD(&spu_prio->runq[i]);
__clear_bit(i, spu_prio->bitmap);
@@ -492,7 +757,30 @@ int __init spu_sched_init(void)
INIT_LIST_HEAD(&spu_prio->active_list[i]);
}
spin_lock_init(&spu_prio->runq_lock);
+
+ setup_timer(&spusched_timer, spusched_wake, 0);
+
+ spusched_task = kthread_run(spusched_thread, NULL, "spusched");
+ if (IS_ERR(spusched_task)) {
+ err = PTR_ERR(spusched_task);
+ goto out_free_spu_prio;
+ }
+
+ entry = create_proc_entry("spu_loadavg", 0, NULL);
+ if (!entry)
+ goto out_stop_kthread;
+ entry->proc_fops = &spu_loadavg_fops;
+
+ pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
+ SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
return 0;
+
+ out_stop_kthread:
+ kthread_stop(spusched_task);
+ out_free_spu_prio:
+ kfree(spu_prio);
+ out:
+ return err;
}
void __exit spu_sched_exit(void)
@@ -500,6 +788,11 @@ void __exit spu_sched_exit(void)
struct spu *spu, *tmp;
int node;
+ remove_proc_entry("spu_loadavg", NULL);
+
+ del_timer_sync(&spusched_timer);
+ kthread_stop(spusched_task);
+
for (node = 0; node < MAX_NUMNODES; node++) {
mutex_lock(&spu_prio->active_mutex[node]);
list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
@@ -510,5 +803,4 @@ void __exit spu_sched_exit(void)
mutex_unlock(&spu_prio->active_mutex[node]);
}
kfree(spu_prio);
- destroy_workqueue(spu_sched_wq);
}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c
index 0bf723dcd677..4e19ed7a0756 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_restore.c
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c
@@ -296,7 +296,7 @@ static inline void restore_complete(void)
* This code deviates from the documented sequence in the
* following aspects:
*
- * 1. The EA for LSCSA is passed from PPE in the
+ * 1. The EA for LSCSA is passed from PPE in the
* signal notification channels.
* 2. The register spill area is pulled by SPU
* into LS, rather than pushed by PPE.
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save.c b/arch/powerpc/platforms/cell/spufs/spu_save.c
index 196033b8a579..ae95cc1701e9 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_save.c
+++ b/arch/powerpc/platforms/cell/spufs/spu_save.c
@@ -44,7 +44,7 @@ static inline void save_event_mask(void)
* Read the SPU_RdEventMsk channel and save to the LSCSA.
*/
offset = LSCSA_QW_OFFSET(event_mask);
- regs_spill[offset].slot[0] = spu_readch(SPU_RdEventStatMask);
+ regs_spill[offset].slot[0] = spu_readch(SPU_RdEventMask);
}
static inline void save_tag_mask(void)
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 47617e8014a5..08b3530288ac 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
+#include <linux/cpumask.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
@@ -39,9 +40,17 @@ enum {
struct spu_context_ops;
struct spu_gang;
-/* ctx->sched_flags */
-enum {
- SPU_SCHED_EXITING = 0,
+/*
+ * This is the state for spu utilization reporting to userspace.
+ * Because this state is visible to userspace it must never change and needs
+ * to be kept strictly separate from any internal state kept by the kernel.
+ */
+enum spuctx_execution_state {
+ SPUCTX_UTIL_USER = 0,
+ SPUCTX_UTIL_SYSTEM,
+ SPUCTX_UTIL_IOWAIT,
+ SPUCTX_UTIL_LOADED,
+ SPUCTX_UTIL_MAX
};
struct spu_context {
@@ -81,13 +90,34 @@ struct spu_context {
struct list_head gang_list;
struct spu_gang *gang;
+ /* owner thread */
+ pid_t tid;
+
/* scheduler fields */
- struct list_head rq;
- struct delayed_work sched_work;
+ struct list_head rq;
+ unsigned int time_slice;
unsigned long sched_flags;
- unsigned long rt_priority;
+ cpumask_t cpus_allowed;
int policy;
int prio;
+
+ /* statistics */
+ struct {
+ /* updates protected by ctx->state_mutex */
+ enum spuctx_execution_state execution_state;
+ unsigned long tstamp; /* time of last ctx switch */
+ unsigned long times[SPUCTX_UTIL_MAX];
+ unsigned long long vol_ctx_switch;
+ unsigned long long invol_ctx_switch;
+ unsigned long long min_flt;
+ unsigned long long maj_flt;
+ unsigned long long hash_flt;
+ unsigned long long slb_flt;
+ unsigned long long slb_flt_base; /* # at last ctx switch */
+ unsigned long long class2_intr;
+ unsigned long long class2_intr_base; /* # at last ctx switch */
+ unsigned long long libassist;
+ } stats;
};
struct spu_gang {
@@ -177,6 +207,7 @@ void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
int spufs_handle_class1(struct spu_context *ctx);
/* context management */
+extern atomic_t nr_spu_contexts;
static inline void spu_acquire(struct spu_context *ctx)
{
mutex_lock(&ctx->state_mutex);
@@ -200,9 +231,9 @@ void spu_acquire_saved(struct spu_context *ctx);
int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
-void spu_start_tick(struct spu_context *ctx);
-void spu_stop_tick(struct spu_context *ctx);
-void spu_sched_tick(struct work_struct *work);
+void spu_set_timeslice(struct spu_context *ctx);
+void spu_update_sched_info(struct spu_context *ctx);
+void __spu_update_sched_info(struct spu_context *ctx);
int __init spu_sched_init(void);
void __exit spu_sched_exit(void);
@@ -210,7 +241,7 @@ extern char *isolated_loader;
/*
* spufs_wait
- * Same as wait_event_interruptible(), except that here
+ * Same as wait_event_interruptible(), except that here
* we need to call spu_release(ctx) before sleeping, and
* then spu_acquire(ctx) when awoken.
*/
@@ -256,4 +287,37 @@ struct spufs_coredump_reader {
extern struct spufs_coredump_reader spufs_coredump_read[];
extern int spufs_coredump_num_notes;
+/*
+ * This function is a little bit too large for an inline, but
+ * as fault.c is built into the kernel we can't move it out of
+ * line.
+ */
+static inline void spuctx_switch_state(struct spu_context *ctx,
+ enum spuctx_execution_state new_state)
+{
+ WARN_ON(!mutex_is_locked(&ctx->state_mutex));
+
+ if (ctx->stats.execution_state != new_state) {
+ unsigned long curtime = jiffies;
+
+ ctx->stats.times[ctx->stats.execution_state] +=
+ curtime - ctx->stats.tstamp;
+ ctx->stats.tstamp = curtime;
+ ctx->stats.execution_state = new_state;
+ }
+}
+
+static inline void spu_switch_state(struct spu *spu,
+ enum spuctx_execution_state new_state)
+{
+ if (spu->stats.utilization_state != new_state) {
+ unsigned long curtime = jiffies;
+
+ spu->stats.times[spu->stats.utilization_state] +=
+ curtime - spu->stats.tstamp;
+ spu->stats.tstamp = curtime;
+ spu->stats.utilization_state = new_state;
+ }
+}
+
#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 71a0b41adb8c..9c506ba08cdc 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -70,7 +70,7 @@
}
#endif /* debug */
-#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
+#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
static inline void acquire_spu_lock(struct spu *spu)
{
@@ -387,6 +387,19 @@ static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
}
+static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save the Prxy_TagStatus register in the CSA.
+ *
+ * It is unnecessary to restore dma_tagstatus_R, however,
+ * dma_tagstatus_R in the CSA is accessed via backing_ops, so
+ * we must save it.
+ */
+ csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
+}
+
static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
@@ -1812,6 +1825,7 @@ static void save_csa(struct spu_state *prev, struct spu *spu)
save_mfc_queues(prev, spu); /* Step 19. */
save_ppu_querymask(prev, spu); /* Step 20. */
save_ppu_querytype(prev, spu); /* Step 21. */
+ save_ppu_tagstatus(prev, spu); /* NEW. */
save_mfc_csr_tsq(prev, spu); /* Step 22. */
save_mfc_csr_cmd(prev, spu); /* Step 23. */
save_mfc_csr_ato(prev, spu); /* Step 24. */
@@ -1930,7 +1944,7 @@ static void harvest(struct spu_state *prev, struct spu *spu)
reset_spu_privcntl(prev, spu); /* Step 16. */
reset_spu_lslr(prev, spu); /* Step 17. */
setup_mfc_sr1(prev, spu); /* Step 18. */
- spu_invalidate_slbs(spu); /* Step 19. */
+ spu_invalidate_slbs(spu); /* Step 19. */
reset_ch_part1(prev, spu); /* Step 20. */
reset_ch_part2(prev, spu); /* Step 21. */
enable_interrupts(prev, spu); /* Step 22. */