aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2019-08-29 18:52:50 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2019-08-30 09:40:14 +1000
commit650ab1e370cdb61ba5c7495006f4376e02374da0 (patch)
tree99f4db55e278275d846cf9bff710114248ee9b81 /drivers/vfio
parentKVM: PPC: Book3S: Invalidate multiple TCEs at once (diff)
downloadlinux-dev-650ab1e370cdb61ba5c7495006f4376e02374da0.tar.xz
linux-dev-650ab1e370cdb61ba5c7495006f4376e02374da0.zip
vfio/spapr_tce: Invalidate multiple TCEs at once
Invalidating a TCE cache entry for each updated TCE is quite expensive. This makes use of the new iommu_table_ops::xchg_no_kill()/tce_kill() callbacks to bring down the time spent in mapping a huge guest DMA window. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20190829085252.72370-4-aik@ozlabs.ru
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 8ce9ad21129f..9809369e0ed3 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -435,7 +435,7 @@ static int tce_iommu_clear(struct tce_container *container,
unsigned long oldhpa;
long ret;
enum dma_data_direction direction;
- unsigned long lastentry = entry + pages;
+ unsigned long lastentry = entry + pages, firstentry = entry;
for ( ; entry < lastentry; ++entry) {
if (tbl->it_indirect_levels && tbl->it_userspace) {
@@ -460,7 +460,7 @@ static int tce_iommu_clear(struct tce_container *container,
direction = DMA_NONE;
oldhpa = 0;
- ret = iommu_tce_xchg(container->mm, tbl, entry, &oldhpa,
+ ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa,
&direction);
if (ret)
continue;
@@ -476,6 +476,8 @@ static int tce_iommu_clear(struct tce_container *container,
tce_iommu_unuse_page(container, oldhpa);
}
+ iommu_tce_kill(tbl, firstentry, pages);
+
return 0;
}
@@ -518,8 +520,8 @@ static long tce_iommu_build(struct tce_container *container,
hpa |= offset;
dirtmp = direction;
- ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
- &dirtmp);
+ ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
+ &hpa, &dirtmp);
if (ret) {
tce_iommu_unuse_page(container, hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
@@ -536,6 +538,8 @@ static long tce_iommu_build(struct tce_container *container,
if (ret)
tce_iommu_clear(container, tbl, entry, i);
+ else
+ iommu_tce_kill(tbl, entry, pages);
return ret;
}
@@ -572,8 +576,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
if (mm_iommu_mapped_inc(mem))
break;
- ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
- &dirtmp);
+ ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i,
+ &hpa, &dirtmp);
if (ret) {
/* dirtmp cannot be DMA_NONE here */
tce_iommu_unuse_page_v2(container, tbl, entry + i);
@@ -593,6 +597,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
if (ret)
tce_iommu_clear(container, tbl, entry, i);
+ else
+ iommu_tce_kill(tbl, entry, pages);
return ret;
}