aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/io-pgtable.h
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2019-07-02 16:44:41 +0100
committerWill Deacon <will@kernel.org>2019-07-29 17:22:57 +0100
commitabfd6fe0cd535d31ee83b668be6eb59ce6a8469d (patch)
tree3434e458e2191d71cb8d6211209044d8e3560bcd /include/linux/io-pgtable.h
parentiommu/io-pgtable-arm: Call ->tlb_flush_walk() and ->tlb_flush_leaf() (diff)
downloadwireguard-linux-abfd6fe0cd535d31ee83b668be6eb59ce6a8469d.tar.xz
wireguard-linux-abfd6fe0cd535d31ee83b668be6eb59ce6a8469d.zip
iommu/io-pgtable: Replace ->tlb_add_flush() with ->tlb_add_page()
The ->tlb_add_flush() callback in the io-pgtable API now looks a bit silly: - It takes a size and a granule, which are always the same - It takes a 'bool leaf', which is always true - It only ever flushes a single page With that in mind, replace it with an optional ->tlb_add_page() callback that drops the useless parameters. Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'include/linux/io-pgtable.h')
-rw-r--r--include/linux/io-pgtable.h22
1 files changed, 11 insertions, 11 deletions
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 0618aac59e74..99e04bd2baa1 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -25,12 +25,11 @@ enum io_pgtable_fmt {
* address range.
* @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
* address range.
- * @tlb_add_flush: Optional callback to queue up leaf TLB invalidation for a
- * virtual address range. This function exists purely as an
- * optimisation for IOMMUs that cannot batch TLB invalidation
- * operations efficiently and are therefore better suited to
- * issuing them early rather than deferring them until
- * iommu_tlb_sync().
+ * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
+ * single page. This function exists purely as an optimisation
+ * for IOMMUs that cannot batch TLB invalidation operations
+ * efficiently and are therefore better suited to issuing them
+ * early rather than deferring them until iommu_tlb_sync().
* @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
* any corresponding page table updates are visible to the
* IOMMU.
@@ -44,8 +43,7 @@ struct iommu_flush_ops {
void *cookie);
void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
void *cookie);
- void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
- bool leaf, void *cookie);
+ void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie);
void (*tlb_sync)(void *cookie);
};
@@ -212,10 +210,12 @@ io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
}
-static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
- unsigned long iova, size_t size, size_t granule, bool leaf)
+static inline void
+io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova,
+ size_t granule)
{
- iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
+ if (iop->cfg.tlb->tlb_add_page)
+ iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie);
}
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)