aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/iommu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/iommu.h')
-rw-r--r--include/linux/iommu.h33
1 files changed, 30 insertions, 3 deletions
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e552ecfefcf7..6633040a13f9 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -40,6 +40,7 @@ struct iommu_domain;
struct notifier_block;
struct iommu_sva;
struct iommu_fault_event;
+struct iommu_dma_cookie;
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
@@ -60,6 +61,7 @@ struct iommu_domain_geometry {
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
/*
* This are the possible domain-types
@@ -72,12 +74,17 @@ struct iommu_domain_geometry {
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
* This flag allows IOMMU drivers to implement
* certain optimizations for these domains
+ * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
+ * invalidation.
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
+#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API | \
+ __IOMMU_DOMAIN_DMA_FQ)
struct iommu_domain {
unsigned type;
@@ -86,9 +93,14 @@ struct iommu_domain {
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
- void *iova_cookie;
+ struct iommu_dma_cookie *iova_cookie;
};
+static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
+{
+ return domain->type & __IOMMU_DOMAIN_DMA_API;
+}
+
enum iommu_cap {
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
transactions */
@@ -160,16 +172,22 @@ enum iommu_dev_features {
* @start: IOVA representing the start of the range to be flushed
* @end: IOVA representing the end of the range to be flushed (inclusive)
* @pgsize: The interval at which to perform the flush
+ * @freelist: Removed pages to free after sync
+ * @queued: Indicates that the flush will be queued
*
* This structure is intended to be updated by multiple calls to the
* ->unmap() function in struct iommu_ops before eventually being passed
- * into ->iotlb_sync().
+ * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
+ * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
+ * them. @queued is set to indicate when ->iotlb_flush_all() will be called
+ * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
*/
struct iommu_iotlb_gather {
unsigned long start;
unsigned long end;
size_t pgsize;
struct page *freelist;
+ bool queued;
};
/**
@@ -486,7 +504,6 @@ int iommu_set_pgtable_quirks(struct iommu_domain *domain,
unsigned long quirks);
void iommu_set_dma_strict(void);
-bool iommu_get_dma_strict(struct iommu_domain *domain);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
@@ -577,6 +594,11 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
iommu_iotlb_gather_add_range(gather, iova, size);
}
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
+{
+ return gather && gather->queued;
+}
+
/* PCI device grouping function */
extern struct iommu_group *pci_device_group(struct device *dev);
/* Generic device grouping function */
@@ -925,6 +947,11 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
{
}
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
+{
+ return false;
+}
+
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}