aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/pci/pci_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/pci/pci_dma.c')
-rw-r--r--arch/s390/pci/pci_dma.c246
1 files changed, 170 insertions, 76 deletions
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7297fce9bf80..7350c8bc13a2 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -129,12 +129,11 @@ void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
entry_clr_protected(entry);
}
-static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
- dma_addr_t dma_addr, size_t size, int flags)
+static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
+ dma_addr_t dma_addr, size_t size, int flags)
{
unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
u8 *page_addr = (u8 *) (pa & PAGE_MASK);
- dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags;
unsigned long *entry;
int i, rc = 0;
@@ -145,7 +144,7 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
if (!zdev->dma_table) {
rc = -EINVAL;
- goto no_refresh;
+ goto out_unlock;
}
for (i = 0; i < nr_pages; i++) {
@@ -159,20 +158,6 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
dma_addr += PAGE_SIZE;
}
- /*
- * With zdev->tlb_refresh == 0, rpcit is not required to establish new
- * translations when previously invalid translation-table entries are
- * validated. With lazy unmap, it also is skipped for previously valid
- * entries, but a global rpcit is then required before any address can
- * be re-used, i.e. after each iommu bitmap wrap-around.
- */
- if (!zdev->tlb_refresh &&
- (!s390_iommu_strict ||
- ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
- goto no_refresh;
-
- rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
- nr_pages * PAGE_SIZE);
undo_cpu_trans:
if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
flags = ZPCI_PTE_INVALID;
@@ -185,12 +170,46 @@ undo_cpu_trans:
dma_update_cpu_trans(entry, page_addr, flags);
}
}
-
-no_refresh:
+out_unlock:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
return rc;
}
+static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
+ size_t size, int flags)
+{
+ /*
+ * With zdev->tlb_refresh == 0, rpcit is not required to establish new
+ * translations when previously invalid translation-table entries are
+ * validated. With lazy unmap, it also is skipped for previously valid
+ * entries, but a global rpcit is then required before any address can
+ * be re-used, i.e. after each iommu bitmap wrap-around.
+ */
+ if (!zdev->tlb_refresh &&
+ (!s390_iommu_strict ||
+ ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
+ return 0;
+
+ return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
+ PAGE_ALIGN(size));
+}
+
+static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
+ dma_addr_t dma_addr, size_t size, int flags)
+{
+ int rc;
+
+ rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
+ if (rc)
+ return rc;
+
+ rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
+ if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
+ __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
+
+ return rc;
+}
+
void dma_free_seg_table(unsigned long entry)
{
unsigned long *sto = get_rt_sto(entry);
@@ -230,45 +249,54 @@ static unsigned long __dma_alloc_iommu(struct device *dev,
boundary_size, 0);
}
-static unsigned long dma_alloc_iommu(struct device *dev, int size)
+static dma_addr_t dma_alloc_address(struct device *dev, int size)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long offset, flags;
- int wrap = 0;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
if (offset == -1) {
+ if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ /* global flush before DMA addresses are reused */
+ if (zpci_refresh_global(zdev))
+ goto out_error;
+
+ bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
+ zdev->lazy_bitmap, zdev->iommu_pages);
+ bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
+ }
/* wrap-around */
offset = __dma_alloc_iommu(dev, 0, size);
- wrap = 1;
+ if (offset == -1)
+ goto out_error;
}
+ zdev->next_bit = offset + size;
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
- if (offset != -1) {
- zdev->next_bit = offset + size;
- if (!zdev->tlb_refresh && !s390_iommu_strict && wrap)
- /* global flush after wrap-around with lazy unmap */
- zpci_refresh_global(zdev);
- }
+ return zdev->start_dma + offset * PAGE_SIZE;
+
+out_error:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
- return offset;
+ return DMA_ERROR_CODE;
}
-static void dma_free_iommu(struct device *dev, unsigned long offset, int size)
+static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long flags;
+ unsigned long flags, offset;
+
+ offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
if (!zdev->iommu_bitmap)
goto out;
- bitmap_clear(zdev->iommu_bitmap, offset, size);
- /*
- * Lazy flush for unmap: need to move next_bit to avoid address re-use
- * until wrap-around.
- */
- if (!s390_iommu_strict && offset >= zdev->next_bit)
- zdev->next_bit = offset + size;
+
+ if (zdev->tlb_refresh || s390_iommu_strict)
+ bitmap_clear(zdev->iommu_bitmap, offset, size);
+ else
+ bitmap_set(zdev->lazy_bitmap, offset, size);
+
out:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
}
@@ -289,16 +317,16 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long nr_pages, iommu_page_index;
unsigned long pa = page_to_phys(page) + offset;
int flags = ZPCI_PTE_VALID;
+ unsigned long nr_pages;
dma_addr_t dma_addr;
int ret;
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
- iommu_page_index = dma_alloc_iommu(dev, nr_pages);
- if (iommu_page_index == -1) {
+ dma_addr = dma_alloc_address(dev, nr_pages);
+ if (dma_addr == DMA_ERROR_CODE) {
ret = -ENOSPC;
goto out_err;
}
@@ -306,12 +334,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
/* Use rounded up size */
size = nr_pages * PAGE_SIZE;
- dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
- if (dma_addr + size > zdev->end_dma) {
- ret = -ERANGE;
- goto out_free;
- }
-
if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED;
@@ -323,7 +345,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
return dma_addr + (offset & ~PAGE_MASK);
out_free:
- dma_free_iommu(dev, iommu_page_index, nr_pages);
+ dma_free_address(dev, dma_addr, nr_pages);
out_err:
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
@@ -335,7 +357,6 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long iommu_page_index;
int npages, ret;
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@@ -349,8 +370,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
}
atomic64_add(npages, &zdev->unmapped_pages);
- iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
- dma_free_iommu(dev, iommu_page_index, npages);
+ dma_free_address(dev, dma_addr, npages);
}
static void *s390_dma_alloc(struct device *dev, size_t size,
@@ -394,37 +414,98 @@ static void s390_dma_free(struct device *dev, size_t size,
free_pages((unsigned long) pa, get_order(size));
}
-static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nr_elements, enum dma_data_direction dir,
- unsigned long attrs)
+/* Map a segment into a contiguous dma address area */
+static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ size_t size, dma_addr_t *handle,
+ enum dma_data_direction dir)
{
- int mapped_elements = 0;
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+ dma_addr_t dma_addr_base, dma_addr;
+ int flags = ZPCI_PTE_VALID;
struct scatterlist *s;
- int i;
+ unsigned long pa;
+ int ret;
- for_each_sg(sg, s, nr_elements, i) {
- struct page *page = sg_page(s);
- s->dma_address = s390_dma_map_pages(dev, page, s->offset,
- s->length, dir, 0);
- if (!dma_mapping_error(dev, s->dma_address)) {
- s->dma_length = s->length;
- mapped_elements++;
- } else
+ size = PAGE_ALIGN(size);
+ dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
+ if (dma_addr_base == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ dma_addr = dma_addr_base;
+ if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
+ flags |= ZPCI_TABLE_PROTECTED;
+
+ for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
+ pa = page_to_phys(sg_page(s)) + s->offset;
+ ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags);
+ if (ret)
goto unmap;
+
+ dma_addr += s->length;
}
-out:
- return mapped_elements;
+ ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
+ if (ret)
+ goto unmap;
+
+ *handle = dma_addr_base;
+ atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
+
+ return ret;
unmap:
- for_each_sg(sg, s, mapped_elements, i) {
- if (s->dma_address)
- s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
- dir, 0);
- s->dma_address = 0;
+ dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
+ ZPCI_PTE_INVALID);
+ dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
+ zpci_err("map error:\n");
+ zpci_err_dma(ret, pa);
+ return ret;
+}
+
+static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nr_elements, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *s = sg, *start = sg, *dma = sg;
+ unsigned int max = dma_get_max_seg_size(dev);
+ unsigned int size = s->offset + s->length;
+ unsigned int offset = s->offset;
+ int count = 0, i;
+
+ for (i = 1; i < nr_elements; i++) {
+ s = sg_next(s);
+
+ s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
+
+ if (s->offset || (size & ~PAGE_MASK) ||
+ size + s->length > max) {
+ if (__s390_dma_map_sg(dev, start, size,
+ &dma->dma_address, dir))
+ goto unmap;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ size = offset = s->offset;
+ start = s;
+ dma = sg_next(dma);
+ count++;
+ }
+ size += s->length;
}
- mapped_elements = 0;
- goto out;
+ if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
+ goto unmap;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ return count + 1;
+unmap:
+ for_each_sg(sg, s, count, i)
+ s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
+ dir, attrs);
+
+ return 0;
}
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
@@ -435,8 +516,9 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nr_elements, i) {
- s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir,
- 0);
+ if (s->dma_length)
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
+ dir, attrs);
s->dma_address = 0;
s->dma_length = 0;
}
@@ -482,7 +564,14 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
rc = -ENOMEM;
goto free_dma_table;
}
+ if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
+ if (!zdev->lazy_bitmap) {
+ rc = -ENOMEM;
+ goto free_bitmap;
+ }
+ }
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
@@ -492,6 +581,8 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
free_bitmap:
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
+ vfree(zdev->lazy_bitmap);
+ zdev->lazy_bitmap = NULL;
free_dma_table:
dma_free_cpu_table(zdev->dma_table);
zdev->dma_table = NULL;
@@ -513,6 +604,9 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
zdev->dma_table = NULL;
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
+ vfree(zdev->lazy_bitmap);
+ zdev->lazy_bitmap = NULL;
+
zdev->next_bit = 0;
}