aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/pseries
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/pseries')
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c19
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c99
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c1
-rw-r--r--arch/powerpc/platforms/pseries/vio.c95
4 files changed, 88 insertions, 126 deletions
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 2f8e62163602..97feb6e79f1a 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -802,6 +802,25 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
return rc;
}
+int dlpar_cpu_readd(int cpu)
+{
+ struct device_node *dn;
+ struct device *dev;
+ u32 drc_index;
+ int rc;
+
+ dev = get_cpu_device(cpu);
+ dn = dev->of_node;
+
+ rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
+
+ rc = dlpar_cpu_remove_by_index(drc_index);
+ if (!rc)
+ rc = dlpar_cpu_add(drc_index);
+
+ return rc;
+}
+
int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
{
u32 count, drc_index;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 8fc8fe0b9848..36eb1ddbac69 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -978,7 +978,7 @@ static phys_addr_t ddw_memory_hotplug_max(void)
* pdn: the parent pe node with the ibm,dma_window property
* Future: also check if we can remap the base window for our base page size
*
- * returns the dma offset for use by dma_set_mask
+ * returns the dma offset for use by the direct mapped DMA code.
*/
static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
{
@@ -1198,87 +1198,37 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
iommu_add_device(pci->table_group, &dev->dev);
}
-static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
+static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
{
- bool ddw_enabled = false;
- struct device_node *pdn, *dn;
- struct pci_dev *pdev;
+ struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
const __be32 *dma_window = NULL;
- u64 dma_offset;
-
- if (!dev->dma_mask)
- return -EIO;
-
- if (!dev_is_pci(dev))
- goto check_mask;
-
- pdev = to_pci_dev(dev);
/* only attempt to use a new window if 64-bit DMA is requested */
- if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
- dn = pci_device_to_OF_node(pdev);
- dev_dbg(dev, "node is %pOF\n", dn);
+ if (dma_mask < DMA_BIT_MASK(64))
+ return false;
- /*
- * the device tree might contain the dma-window properties
- * per-device and not necessarily for the bus. So we need to
- * search upwards in the tree until we either hit a dma-window
- * property, OR find a parent with a table already allocated.
- */
- for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
- pdn = pdn->parent) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- if (dma_window)
- break;
- }
- if (pdn && PCI_DN(pdn)) {
- dma_offset = enable_ddw(pdev, pdn);
- if (dma_offset != 0) {
- dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
- set_dma_offset(dev, dma_offset);
- set_dma_ops(dev, &dma_nommu_ops);
- ddw_enabled = true;
- }
- }
- }
+ dev_dbg(&pdev->dev, "node is %pOF\n", dn);
- /* fall back on iommu ops */
- if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
- dev_info(dev, "Restoring 32-bit DMA via iommu\n");
- set_dma_ops(dev, &dma_iommu_ops);
+ /*
+ * the device tree might contain the dma-window properties
+ * per-device and not necessarily for the bus. So we need to
+ * search upwards in the tree until we either hit a dma-window
+ * property, OR find a parent with a table already allocated.
+ */
+ for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
+ pdn = pdn->parent) {
+ dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
+ if (dma_window)
+ break;
}
-check_mask:
- if (!dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
- return 0;
-}
-
-static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
-{
- if (!dev->dma_mask)
- return 0;
-
- if (!disable_ddw && dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
- struct device_node *dn;
-
- dn = pci_device_to_OF_node(pdev);
-
- /* search upwards for ibm,dma-window */
- for (; dn && PCI_DN(dn) && !PCI_DN(dn)->table_group;
- dn = dn->parent)
- if (of_get_property(dn, "ibm,dma-window", NULL))
- break;
- /* if there is a ibm,ddw-applicable property require 64 bits */
- if (dn && PCI_DN(dn) &&
- of_get_property(dn, "ibm,ddw-applicable", NULL))
- return DMA_BIT_MASK(64);
+ if (pdn && PCI_DN(pdn)) {
+ pdev->dev.archdata.dma_offset = enable_ddw(pdev, pdn);
+ if (pdev->dev.archdata.dma_offset)
+ return true;
}
- return dma_iommu_ops.get_required_mask(dev);
+ return false;
}
static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
@@ -1373,8 +1323,9 @@ void iommu_init_early_pSeries(void)
if (firmware_has_feature(FW_FEATURE_LPAR)) {
pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
- ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
- ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
+ if (!disable_ddw)
+ pseries_pci_controller_ops.iommu_bypass_supported =
+ iommu_bypass_supported_pSeriesLP;
} else {
pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 794487313cc8..e73c7e30efe6 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -475,6 +475,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
splpar_dispatch_data(m);
seq_printf(m, "purr=%ld\n", get_purr());
+ seq_printf(m, "tbr=%ld\n", mftb());
} else { /* non SPLPAR case */
seq_printf(m, "system_active_processors=%d\n",
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 1fad4649735b..141795275ccb 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -492,7 +492,9 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
return NULL;
}
- ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
+ ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
+ dma_handle, dev->coherent_dma_mask, flag,
+ dev_to_node(dev));
if (unlikely(ret == NULL)) {
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
@@ -507,8 +509,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
{
struct vio_dev *viodev = to_vio_dev(dev);
- dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
-
+ iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
}
@@ -518,22 +519,22 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
- struct iommu_table *tbl;
+ struct iommu_table *tbl = get_iommu_table_base(dev);
dma_addr_t ret = DMA_MAPPING_ERROR;
- tbl = get_iommu_table_base(dev);
- if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
- atomic_inc(&viodev->cmo.allocs_failed);
- return ret;
- }
-
- ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
- if (unlikely(dma_mapping_error(dev, ret))) {
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
- atomic_inc(&viodev->cmo.allocs_failed);
- }
-
+ if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
+ goto out_fail;
+ ret = iommu_map_page(dev, tbl, page, offset, size, device_to_mask(dev),
+ direction, attrs);
+ if (unlikely(ret == DMA_MAPPING_ERROR))
+ goto out_deallocate;
return ret;
+
+out_deallocate:
+ vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
+out_fail:
+ atomic_inc(&viodev->cmo.allocs_failed);
+ return DMA_MAPPING_ERROR;
}
static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
@@ -542,11 +543,9 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
- struct iommu_table *tbl;
-
- tbl = get_iommu_table_base(dev);
- dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
+ struct iommu_table *tbl = get_iommu_table_base(dev);
+ iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
}
@@ -555,34 +554,32 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
- struct iommu_table *tbl;
+ struct iommu_table *tbl = get_iommu_table_base(dev);
struct scatterlist *sgl;
int ret, count;
size_t alloc_size = 0;
- tbl = get_iommu_table_base(dev);
for_each_sg(sglist, sgl, nelems, count)
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
- if (vio_cmo_alloc(viodev, alloc_size)) {
- atomic_inc(&viodev->cmo.allocs_failed);
- return 0;
- }
-
- ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
-
- if (unlikely(!ret)) {
- vio_cmo_dealloc(viodev, alloc_size);
- atomic_inc(&viodev->cmo.allocs_failed);
- return ret;
- }
+ if (vio_cmo_alloc(viodev, alloc_size))
+ goto out_fail;
+ ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, device_to_mask(dev),
+ direction, attrs);
+ if (unlikely(!ret))
+ goto out_deallocate;
for_each_sg(sglist, sgl, ret, count)
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
if (alloc_size)
vio_cmo_dealloc(viodev, alloc_size);
-
return ret;
+
+out_deallocate:
+ vio_cmo_dealloc(viodev, alloc_size);
+out_fail:
+ atomic_inc(&viodev->cmo.allocs_failed);
+ return 0;
}
static void vio_dma_iommu_unmap_sg(struct device *dev,
@@ -591,40 +588,27 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
- struct iommu_table *tbl;
+ struct iommu_table *tbl = get_iommu_table_base(dev);
struct scatterlist *sgl;
size_t alloc_size = 0;
int count;
- tbl = get_iommu_table_base(dev);
for_each_sg(sglist, sgl, nelems, count)
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
- dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
-
+ ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
vio_cmo_dealloc(viodev, alloc_size);
}
-static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
-{
- return dma_iommu_ops.dma_supported(dev, mask);
-}
-
-static u64 vio_dma_get_required_mask(struct device *dev)
-{
- return dma_iommu_ops.get_required_mask(dev);
-}
-
static const struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent,
- .mmap = dma_nommu_mmap_coherent,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
.unmap_page = vio_dma_iommu_unmap_page,
- .dma_supported = vio_dma_iommu_dma_supported,
- .get_required_mask = vio_dma_get_required_mask,
+ .dma_supported = dma_iommu_dma_supported,
+ .get_required_mask = dma_iommu_get_required_mask,
};
/**
@@ -1715,3 +1699,10 @@ int vio_disable_interrupts(struct vio_dev *dev)
}
EXPORT_SYMBOL(vio_disable_interrupts);
#endif /* CONFIG_PPC_PSERIES */
+
+static int __init vio_init(void)
+{
+ dma_debug_add_bus(&vio_bus_type);
+ return 0;
+}
+fs_initcall(vio_init);