aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c72
-rw-r--r--arch/powerpc/platforms/cell/iommu.c4
-rw-r--r--arch/powerpc/platforms/cell/setup.c7
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c4
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
9 files changed, 45 insertions, 53 deletions
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 82607d621aca..88301e53f085 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -85,61 +85,57 @@ static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
cancel_delayed_work_sync(&info->work);
}
-static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event)
+static int spu_gov_start(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
- struct spu_gov_info_struct *info, *affected_info;
+ struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
+ struct spu_gov_info_struct *affected_info;
int i;
- int ret = 0;
- info = &per_cpu(spu_gov_info, cpu);
-
- switch (event) {
- case CPUFREQ_GOV_START:
- if (!cpu_online(cpu)) {
- printk(KERN_ERR "cpu %d is not online\n", cpu);
- ret = -EINVAL;
- break;
- }
+ if (!cpu_online(cpu)) {
+ printk(KERN_ERR "cpu %d is not online\n", cpu);
+ return -EINVAL;
+ }
- if (!policy->cur) {
- printk(KERN_ERR "no cpu specified in policy\n");
- ret = -EINVAL;
- break;
- }
+ if (!policy->cur) {
+ printk(KERN_ERR "no cpu specified in policy\n");
+ return -EINVAL;
+ }
- /* initialize spu_gov_info for all affected cpus */
- for_each_cpu(i, policy->cpus) {
- affected_info = &per_cpu(spu_gov_info, i);
- affected_info->policy = policy;
- }
+ /* initialize spu_gov_info for all affected cpus */
+ for_each_cpu(i, policy->cpus) {
+ affected_info = &per_cpu(spu_gov_info, i);
+ affected_info->policy = policy;
+ }
- info->poll_int = POLL_TIME;
+ info->poll_int = POLL_TIME;
- /* setup timer */
- spu_gov_init_work(info);
+ /* setup timer */
+ spu_gov_init_work(info);
- break;
+ return 0;
+}
- case CPUFREQ_GOV_STOP:
- /* cancel timer */
- spu_gov_cancel_work(info);
+static void spu_gov_stop(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
+ int i;
- /* clean spu_gov_info for all affected cpus */
- for_each_cpu (i, policy->cpus) {
- info = &per_cpu(spu_gov_info, i);
- info->policy = NULL;
- }
+ /* cancel timer */
+ spu_gov_cancel_work(info);
- break;
+ /* clean spu_gov_info for all affected cpus */
+ for_each_cpu (i, policy->cpus) {
+ info = &per_cpu(spu_gov_info, i);
+ info->policy = NULL;
}
-
- return ret;
}
static struct cpufreq_governor spu_governor = {
.name = "spudemand",
- .governor = spu_gov_govern,
+ .start = spu_gov_start,
+ .stop = spu_gov_stop,
.owner = THIS_MODULE,
};
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 14a582b21274..9027d7c48507 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -178,7 +178,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
* default for now.*/
#ifdef CELL_IOMMU_STRICT_PROTECTION
/* to avoid referencing a global, we use a trick here to setup the
- * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
+ * protection bit. "prot" is setup to be 3 fields of 4 bits appended
* together for each of the 3 supported direction values. It is then
* shifted left so that the fields matching the desired direction
* lands on the appropriate bits, and other bits are masked out.
@@ -338,7 +338,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
start_seg = base >> IO_SEGMENT_SHIFT;
segments = size >> IO_SEGMENT_SHIFT;
pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
- /* PTEs for each segment must start on a 4K bounday */
+ /* PTEs for each segment must start on a 4K boundary */
pages_per_segment = max(pages_per_segment,
(1 << 12) / sizeof(unsigned long));
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 36cff28d0293..d3543e68efe8 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -255,13 +255,10 @@ static void __init cell_setup_arch(void)
static int __init cell_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "IBM,CBEA") &&
- !of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
+ if (!of_machine_is_compatible("IBM,CBEA") &&
+ !of_machine_is_compatible("IBM,CPBW-1.0"))
return 0;
- hpte_init_native();
pm_power_off = rtas_power_off;
return 1;
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 54ee5743cb72..d06dcac66fcb 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -217,7 +217,7 @@ static void spider_irq_cascade(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
-/* For hooking up the cascace we have a problem. Our device-tree is
+/* For hooking up the cascade we have a problem. Our device-tree is
* crap and we don't know on which BE iic interrupt we are hooked on at
* least not the "standard" way. We can reconstitute it based on two
* informations though: which BE node we are connected to and whether
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 3cbe38fad609..bb4a8e07c229 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -69,7 +69,7 @@ static DEFINE_SPINLOCK(spu_lock);
* spu_full_list_lock and spu_full_list_mutex held, while iterating
* through it requires either of these locks.
*
- * In addition spu_full_list_lock protects all assignmens to
+ * In addition spu_full_list_lock protects all assignments to
* spu->mm.
*/
static LIST_HEAD(spu_full_list);
@@ -253,7 +253,7 @@ static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
* Setup the SPU kernel SLBs, in preparation for a context save/restore. We
* need to map both the context save area, and the save/restore code.
*
- * Because the lscsa and code may cross segment boundaires, we check to see
+ * Because the lscsa and code may cross segment boundaries, we check to see
* if mappings are required for the start and end of each range. We currently
* assume that the mappings are smaller that one segment - if not, something
* is seriously wrong.
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index c3327f3d8cf7..21b4bfb97200 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -535,8 +535,7 @@ static int __init init_affinity(void)
if (of_has_vicinity()) {
init_affinity_fw();
} else {
- long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
+ if (of_machine_is_compatible("IBM,CPBW-1.0"))
init_affinity_qs20_harcoded();
else
printk("No affinity configuration found\n");
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 2936a0044c04..06254467e4dd 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -866,7 +866,7 @@ void spufs_wbox_callback(struct spu *spu)
* - end of the mapped area
*
* If the file is opened without O_NONBLOCK, we wait here until
- * space is availabyl, but return when we have been able to
+ * space is available, but return when we have been able to
* write something.
*/
static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 9f79004e6d6f..cfacbee24d7b 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -435,7 +435,7 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
/* Note: we don't need to force_sig SIGTRAP on single-step
* since we have TIF_SINGLESTEP set, thus the kernel will do
- * it upon return from the syscall anyawy
+ * it upon return from the syscall anyway.
*/
if (unlikely(status & SPU_STATUS_SINGLE_STEP))
ret = -ERESTARTSYS;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 998f632e7cce..460f5f31d5cb 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -622,7 +622,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
/**
* find_victim - find a lower priority context to preempt
- * @ctx: canidate context for running
+ * @ctx: candidate context for running
*
* Returns the freed physical spu to run the new context on.
*/