aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/Kconfig2
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c6
-rw-r--r--arch/powerpc/platforms/cell/iommu.c173
-rw-r--r--arch/powerpc/platforms/cell/setup.c5
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c17
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c10
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c21
8 files changed, 47 insertions, 188 deletions
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 4b2f114f3116..0f7c8241912b 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -27,7 +27,7 @@ config PPC_IBM_CELL_BLADE
depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN
select PPC_CELL_NATIVE
select PPC_OF_PLATFORM_PCI
- select PCI
+ select FORCE_PCI
select MMIO_NVRAM
select PPC_UDBG_16550
select UDBG_RTAS_CONSOLE
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index b926438d73af..27ee65b89099 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -53,7 +53,7 @@ static struct cbe_regs_map *cbe_find_map(struct device_node *np)
int i;
struct device_node *tmp_np;
- if (strcasecmp(np->type, "spe")) {
+ if (!of_node_is_type(np, "spe")) {
for (i = 0; i < cbe_regs_map_count; i++)
if (cbe_regs_maps[i].cpu_node == np ||
cbe_regs_maps[i].be_node == np)
@@ -70,8 +70,8 @@ static struct cbe_regs_map *cbe_find_map(struct device_node *np)
tmp_np = tmp_np->parent;
/* on a correct devicetree we wont get up to root */
BUG_ON(!tmp_np);
- } while (strcasecmp(tmp_np->type, "cpu") &&
- strcasecmp(tmp_np->type, "be"));
+ } while (!of_node_is_type(tmp_np, "cpu") ||
+ !of_node_is_type(tmp_np, "be"));
np->data = cbe_find_map(tmp_np);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 12352a58072a..54e012e1f720 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -544,9 +544,10 @@ static struct cbe_iommu *cell_iommu_for_node(int nid)
static unsigned long cell_dma_nommu_offset;
static unsigned long dma_iommu_fixed_base;
+static bool cell_iommu_enabled;
/* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */
-static int iommu_fixed_is_weak;
+bool iommu_fixed_is_weak;
static struct iommu_table *cell_get_iommu_table(struct device *dev)
{
@@ -568,103 +569,19 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
return &window->table;
}
-/* A coherent allocation implies strong ordering */
-
-static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
-{
- if (iommu_fixed_is_weak)
- return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
- size, dma_handle,
- device_to_mask(dev), flag,
- dev_to_node(dev));
- else
- return dma_nommu_ops.alloc(dev, size, dma_handle, flag,
- attrs);
-}
-
-static void dma_fixed_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- if (iommu_fixed_is_weak)
- iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
- dma_handle);
- else
- dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs);
-}
-
-static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- return dma_nommu_ops.map_page(dev, page, offset, size,
- direction, attrs);
- else
- return iommu_map_page(dev, cell_get_iommu_table(dev), page,
- offset, size, device_to_mask(dev),
- direction, attrs);
-}
-
-static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- dma_nommu_ops.unmap_page(dev, dma_addr, size, direction,
- attrs);
- else
- iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
- direction, attrs);
-}
-
-static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs);
- else
- return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
- nents, device_to_mask(dev),
- direction, attrs);
-}
-
-static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs);
- else
- ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
- direction, attrs);
-}
-
-static int dma_suported_and_switch(struct device *dev, u64 dma_mask);
-
-static const struct dma_map_ops dma_iommu_fixed_ops = {
- .alloc = dma_fixed_alloc_coherent,
- .free = dma_fixed_free_coherent,
- .map_sg = dma_fixed_map_sg,
- .unmap_sg = dma_fixed_unmap_sg,
- .dma_supported = dma_suported_and_switch,
- .map_page = dma_fixed_map_page,
- .unmap_page = dma_fixed_unmap_page,
- .mapping_error = dma_iommu_mapping_error,
-};
+static u64 cell_iommu_get_fixed_address(struct device *dev);
static void cell_dma_dev_setup(struct device *dev)
{
- if (get_pci_dma_ops() == &dma_iommu_ops)
+ if (cell_iommu_enabled) {
+ u64 addr = cell_iommu_get_fixed_address(dev);
+
+ if (addr != OF_BAD_ADDR)
+ dev->archdata.dma_offset = addr + dma_iommu_fixed_base;
set_iommu_table_base(dev, cell_get_iommu_table(dev));
- else if (get_pci_dma_ops() == &dma_nommu_ops)
- set_dma_offset(dev, cell_dma_nommu_offset);
- else
- BUG();
+ } else {
+ dev->archdata.dma_offset = cell_dma_nommu_offset;
+ }
}
static void cell_pci_dma_dev_setup(struct pci_dev *dev)
@@ -681,11 +598,9 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
if (action != BUS_NOTIFY_ADD_DEVICE)
return 0;
- /* We use the PCI DMA ops */
- dev->dma_ops = get_pci_dma_ops();
-
+ if (cell_iommu_enabled)
+ dev->dma_ops = &dma_iommu_ops;
cell_dma_dev_setup(dev);
-
return 0;
}
@@ -810,7 +725,6 @@ static int __init cell_iommu_init_disabled(void)
unsigned long base = 0, size;
/* When no iommu is present, we use direct DMA ops */
- set_pci_dma_ops(&dma_nommu_ops);
/* First make sure all IOC translation is turned off */
cell_disable_iommus();
@@ -895,7 +809,11 @@ static u64 cell_iommu_get_fixed_address(struct device *dev)
const u32 *ranges = NULL;
int i, len, best, naddr, nsize, pna, range_size;
+ /* We can be called for platform devices that have no of_node */
np = of_node_get(dev->of_node);
+ if (!np)
+ goto out;
+
while (1) {
naddr = of_n_addr_cells(np);
nsize = of_n_size_cells(np);
@@ -946,27 +864,10 @@ out:
return dev_addr;
}
-static int dma_suported_and_switch(struct device *dev, u64 dma_mask)
+static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask)
{
- if (dma_mask == DMA_BIT_MASK(64) &&
- cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) {
- u64 addr = cell_iommu_get_fixed_address(dev) +
- dma_iommu_fixed_base;
- dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
- dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
- set_dma_ops(dev, &dma_iommu_fixed_ops);
- set_dma_offset(dev, addr);
- return 1;
- }
-
- if (dma_iommu_dma_supported(dev, dma_mask)) {
- dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
- set_dma_ops(dev, get_pci_dma_ops());
- cell_dma_dev_setup(dev);
- return 1;
- }
-
- return 0;
+ return mask == DMA_BIT_MASK(64) &&
+ cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR;
}
static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
@@ -1120,9 +1021,8 @@ static int __init cell_iommu_fixed_mapping_init(void)
cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
}
- dma_iommu_ops.dma_supported = dma_suported_and_switch;
- set_pci_dma_ops(&dma_iommu_ops);
-
+ cell_pci_controller_ops.iommu_bypass_supported =
+ cell_pci_iommu_bypass_supported;
return 0;
}
@@ -1143,7 +1043,7 @@ static int __init setup_iommu_fixed(char *str)
pciep = of_find_node_by_type(NULL, "pcie-endpoint");
if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
- iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING;
+ iommu_fixed_is_weak = true;
of_node_put(pciep);
@@ -1151,26 +1051,6 @@ static int __init setup_iommu_fixed(char *str)
}
__setup("iommu_fixed=", setup_iommu_fixed);
-static u64 cell_dma_get_required_mask(struct device *dev)
-{
- const struct dma_map_ops *dma_ops;
-
- if (!dev->dma_mask)
- return 0;
-
- if (!iommu_fixed_disabled &&
- cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
- return DMA_BIT_MASK(64);
-
- dma_ops = get_dma_ops(dev);
- if (dma_ops->get_required_mask)
- return dma_ops->get_required_mask(dev);
-
- WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
-
- return DMA_BIT_MASK(64);
-}
-
static int __init cell_iommu_init(void)
{
struct device_node *np;
@@ -1187,10 +1067,9 @@ static int __init cell_iommu_init(void)
/* Setup various callbacks */
cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
- ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
- goto bail;
+ goto done;
/* Create an iommu for each /axon node. */
for_each_node_by_name(np, "axon") {
@@ -1207,10 +1086,10 @@ static int __init cell_iommu_init(void)
continue;
cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
}
-
+ done:
/* Setup default PCI iommu ops */
set_pci_dma_ops(&dma_iommu_ops);
-
+ cell_iommu_enabled = true;
bail:
/* Register callbacks on OF platform device addition/removal
* to handle linking them to the right DMA operations
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 7d31b8d14661..e2e1371a71e2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -131,7 +131,7 @@ static int cell_setup_phb(struct pci_controller *phb)
np = phb->dn;
model = of_get_property(np, "model", NULL);
- if (model == NULL || strcmp(np->name, "pci"))
+ if (model == NULL || !of_node_name_eq(np, "pci"))
return 0;
/* Setup workarounds for spider */
@@ -168,8 +168,7 @@ static int __init cell_publish_devices(void)
* platform devices for the PCI host bridges
*/
for_each_child_of_node(root, np) {
- if (np->type == NULL || (strcmp(np->type, "pci") != 0 &&
- strcmp(np->type, "pciex") != 0))
+ if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "pciex"))
continue;
of_platform_device_create(np, NULL, NULL);
}
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index 8ae86200ef6c..b5f35cbe9e21 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -34,20 +34,9 @@
*/
static void *spu_syscall_table[] = {
-#define SYSCALL(func) sys_ni_syscall,
-#define COMPAT_SYS(func) sys_ni_syscall,
-#define PPC_SYS(func) sys_ni_syscall,
-#define OLDSYS(func) sys_ni_syscall,
-#define SYS32ONLY(func) sys_ni_syscall,
-#define PPC64ONLY(func) sys_ni_syscall,
-#define SYSX(f, f3264, f32) sys_ni_syscall,
-
-#define SYSCALL_SPU(func) sys_##func,
-#define COMPAT_SYS_SPU(func) sys_##func,
-#define COMPAT_SPU_NEW(func) sys_##func,
-#define SYSX_SPU(f, f3264, f32) f,
-
-#include <asm/systbl.h>
+#define __SYSCALL(nr, entry) entry,
+#include <asm/syscall_table_spu.h>
+#undef __SYSCALL
};
long spu_sys_callback(struct spu_syscall_block *s)
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index f7e36373f6e0..bed935c51ec2 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -458,7 +458,6 @@ static void init_affinity_node(int cbe)
struct device_node *vic_dn, *last_spu_dn;
phandle avoid_ph;
const phandle *vic_handles;
- const char *name;
int lenp, i, added;
last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu,
@@ -480,12 +479,7 @@ static void init_affinity_node(int cbe)
if (!vic_dn)
continue;
- /* a neighbour might be spe, mic-tm, or bif0 */
- name = of_get_property(vic_dn, "name", NULL);
- if (!name)
- continue;
-
- if (strcmp(name, "spe") == 0) {
+ if (of_node_name_eq(vic_dn, "spe") ) {
spu = devnode_spu(cbe, vic_dn);
avoid_ph = last_spu_dn->phandle;
} else {
@@ -498,7 +492,7 @@ static void init_affinity_node(int cbe)
spu = neighbour_spu(cbe, vic_dn, last_spu_dn);
if (!spu)
continue;
- if (!strcmp(name, "mic-tm")) {
+ if (of_node_name_eq(vic_dn, "mic-tm")) {
last_spu->has_mem_affinity = 1;
spu->has_mem_affinity = 1;
}
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 263413a34823..b95d6afc39b5 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -26,7 +26,6 @@
#include <linux/syscalls.h>
#include <linux/rcupdate.h>
#include <linux/binfmts.h>
-#include <linux/syscalls.h>
#include <asm/spu.h>
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 43e7b93f27c7..48c2477e7e2a 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -609,7 +609,7 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
if (len < 4)
return -EINVAL;
- if (!access_ok(VERIFY_WRITE, buf, len))
+ if (!access_ok(buf, len))
return -EFAULT;
udata = (void __user *)buf;
@@ -717,7 +717,7 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
if (len < 4)
return -EINVAL;
- if (!access_ok(VERIFY_WRITE, buf, len))
+ if (!access_ok(buf, len))
return -EFAULT;
udata = (void __user *)buf;
@@ -856,7 +856,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
return -EINVAL;
udata = (void __user *)buf;
- if (!access_ok(VERIFY_READ, buf, len))
+ if (!access_ok(buf, len))
return -EFAULT;
if (__get_user(wbox_data, udata))
@@ -1994,7 +1994,7 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
int ret;
struct spu_context *ctx = file->private_data;
- if (!access_ok(VERIFY_WRITE, buf, len))
+ if (!access_ok(buf, len))
return -EFAULT;
ret = spu_acquire_saved(ctx);
@@ -2034,7 +2034,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
struct spu_context *ctx = file->private_data;
int ret;
- if (!access_ok(VERIFY_WRITE, buf, len))
+ if (!access_ok(buf, len))
return -EFAULT;
ret = spu_acquire_saved(ctx);
@@ -2077,7 +2077,7 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
struct spu_context *ctx = file->private_data;
int ret;
- if (!access_ok(VERIFY_WRITE, buf, len))
+ if (!access_ok(buf, len))
return -EFAULT;
ret = spu_acquire_saved(ctx);
@@ -2129,7 +2129,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
struct spu_context *ctx = file->private_data;
int ret;
- if (!access_ok(VERIFY_WRITE, buf, len))
+ if (!access_ok(buf, len))
return -EFAULT;
ret = spu_acquire_saved(ctx);
@@ -2160,7 +2160,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
if (len < ret)
return -EINVAL;
- if (!access_ok(VERIFY_WRITE, buf, len))
+ if (!access_ok(buf, len))
return -EFAULT;
info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
@@ -2338,9 +2338,8 @@ static int spufs_switch_log_open(struct inode *inode, struct file *file)
goto out;
}
- ctx->switch_log = kmalloc(sizeof(struct switch_log) +
- SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
- GFP_KERNEL);
+ ctx->switch_log = kmalloc(struct_size(ctx->switch_log, log,
+ SWITCH_LOG_BUFSIZE), GFP_KERNEL);
if (!ctx->switch_log) {
rc = -ENOMEM;