aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-12 16:15:51 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-12 16:15:51 -0800
commit13eaa5bda0df8f5c1c4f2a4fb4a0bc20787dcc68 (patch)
treede605f4423dc72b5bba5b2e753d314b17df3b583 /include
parentMerge tag 'cxl-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl (diff)
parentMerge branches 'arm/smmu', 'virtio', 'x86/amd', 'x86/vt-d' and 'core' into next (diff)
downloadlinux-dev-13eaa5bda0df8f5c1c4f2a4fb4a0bc20787dcc68.tar.xz
linux-dev-13eaa5bda0df8f5c1c4f2a4fb4a0bc20787dcc68.zip
Merge tag 'iommu-updates-v5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel: - Identity domain support for virtio-iommu - Move flush queue code into iommu-dma - Some fixes for AMD IOMMU suspend/resume support when x2apic is used - Arm SMMU Updates from Will Deacon: - Revert evtq and priq back to their former sizes - Return early on short-descriptor page-table allocation failure - Fix page fault reporting for Adreno GPU on SMMUv2 - Make SMMUv3 MMU notifier ops 'const' - Numerous new compatible strings for Qualcomm SMMUv2 implementations - Various smaller fixes and cleanups * tag 'iommu-updates-v5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (38 commits) iommu/iova: Temporarily include dma-mapping.h from iova.h iommu: Move flush queue data into iommu_dma_cookie iommu/iova: Move flush queue code to iommu-dma iommu/iova: Consolidate flush queue code iommu/vt-d: Use put_pages_list iommu/amd: Use put_pages_list iommu/amd: Simplify pagetable freeing iommu/iova: Squash flush_cb abstraction iommu/iova: Squash entry_dtor abstraction iommu/iova: Fix race between FQ timeout and teardown iommu/amd: Fix typo in *glues … together* in comment iommu/vt-d: Remove unused dma_to_mm_pfn function iommu/vt-d: Drop duplicate check in dma_pte_free_pagetable() iommu/vt-d: Use bitmap_zalloc() when applicable iommu/amd: Remove useless irq affinity notifier iommu/amd: X2apic mode: mask/unmask interrupts on suspend/resume iommu/amd: X2apic mode: setup the INTX registers on mask/unmask iommu/amd: X2apic mode: re-enable after resume iommu/amd: Restore GA log/tail pointer on host resume iommu/iova: Move fast alloc size roundup into alloc_iova_fast() ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/intel-svm.h6
-rw-r--r--include/linux/iommu.h3
-rw-r--r--include/linux/iova.h68
-rw-r--r--include/trace/events/iommu.h10
-rw-r--r--include/uapi/linux/virtio_iommu.h8
5 files changed, 16 insertions, 79 deletions
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index 57cceecbe37f..1b73bab7eeff 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -8,12 +8,6 @@
#ifndef __INTEL_SVM_H__
#define __INTEL_SVM_H__
-/* Values for rxwp in fault_cb callback */
-#define SVM_REQ_READ (1<<3)
-#define SVM_REQ_WRITE (1<<2)
-#define SVM_REQ_EXEC (1<<1)
-#define SVM_REQ_PRIV (1<<0)
-
/* Page Request Queue depth */
#define PRQ_ORDER 2
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d2f3435e7d17..de0c57a567c8 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -186,7 +186,7 @@ struct iommu_iotlb_gather {
unsigned long start;
unsigned long end;
size_t pgsize;
- struct page *freelist;
+ struct list_head freelist;
bool queued;
};
@@ -399,6 +399,7 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
*gather = (struct iommu_iotlb_gather) {
.start = ULONG_MAX,
+ .freelist = LIST_HEAD_INIT(gather->freelist),
};
}
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 71d8a2de6635..cea79cb9f26c 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -12,7 +12,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rbtree.h>
-#include <linux/atomic.h>
#include <linux/dma-mapping.h>
/* iova structure */
@@ -35,35 +34,6 @@ struct iova_rcache {
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
-struct iova_domain;
-
-/* Call-Back from IOVA code into IOMMU drivers */
-typedef void (* iova_flush_cb)(struct iova_domain *domain);
-
-/* Destructor for per-entry data */
-typedef void (* iova_entry_dtor)(unsigned long data);
-
-/* Number of entries per Flush Queue */
-#define IOVA_FQ_SIZE 256
-
-/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
-#define IOVA_FQ_TIMEOUT 10
-
-/* Flush Queue entry for defered flushing */
-struct iova_fq_entry {
- unsigned long iova_pfn;
- unsigned long pages;
- unsigned long data;
- u64 counter; /* Flush counter when this entrie was added */
-};
-
-/* Per-CPU Flush Queue structure */
-struct iova_fq {
- struct iova_fq_entry entries[IOVA_FQ_SIZE];
- unsigned head, tail;
- spinlock_t lock;
-};
-
/* holds all the iova translations for a domain */
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
@@ -74,27 +44,9 @@ struct iova_domain {
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
unsigned long max32_alloc_size; /* Size of last failed allocation */
- struct iova_fq __percpu *fq; /* Flush Queue */
-
- atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
- have been started */
-
- atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
- have been finished */
-
struct iova anchor; /* rbtree lookup anchor */
- struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
-
- iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
- TLBs */
- iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
- iova entry */
-
- struct timer_list fq_timer; /* Timer to regularily empty the
- flush-queues */
- atomic_t fq_timer_on; /* 1 when timer is active, 0
- when not */
+ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
struct hlist_node cpuhp_dead;
};
@@ -144,17 +96,12 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
bool size_aligned);
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
-void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
-int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
#else
@@ -189,12 +136,6 @@ static inline void free_iova_fast(struct iova_domain *iovad,
{
}
-static inline void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data)
-{
-}
-
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn,
@@ -216,13 +157,6 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{
}
-static inline int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb,
- iova_entry_dtor entry_dtor)
-{
- return -ENODEV;
-}
-
static inline struct iova *find_iova(struct iova_domain *iovad,
unsigned long pfn)
{
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 72b4582322ff..29096fe12623 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -101,8 +101,9 @@ TRACE_EVENT(map,
__entry->size = size;
),
- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
- __entry->iova, __entry->paddr, __entry->size
+ TP_printk("IOMMU: iova=0x%016llx - 0x%016llx paddr=0x%016llx size=%zu",
+ __entry->iova, __entry->iova + __entry->size, __entry->paddr,
+ __entry->size
)
);
@@ -124,8 +125,9 @@ TRACE_EVENT(unmap,
__entry->unmapped_size = unmapped_size;
),
- TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
- __entry->iova, __entry->size, __entry->unmapped_size
+ TP_printk("IOMMU: iova=0x%016llx - 0x%016llx size=%zu unmapped_size=%zu",
+ __entry->iova, __entry->iova + __entry->size,
+ __entry->size, __entry->unmapped_size
)
);
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
index 237e36a280cb..1ff357f0d72e 100644
--- a/include/uapi/linux/virtio_iommu.h
+++ b/include/uapi/linux/virtio_iommu.h
@@ -16,6 +16,7 @@
#define VIRTIO_IOMMU_F_BYPASS 3
#define VIRTIO_IOMMU_F_PROBE 4
#define VIRTIO_IOMMU_F_MMIO 5
+#define VIRTIO_IOMMU_F_BYPASS_CONFIG 6
struct virtio_iommu_range_64 {
__le64 start;
@@ -36,6 +37,8 @@ struct virtio_iommu_config {
struct virtio_iommu_range_32 domain_range;
/* Probe buffer size */
__le32 probe_size;
+ __u8 bypass;
+ __u8 reserved[3];
};
/* Request types */
@@ -66,11 +69,14 @@ struct virtio_iommu_req_tail {
__u8 reserved[3];
};
+#define VIRTIO_IOMMU_ATTACH_F_BYPASS (1 << 0)
+
struct virtio_iommu_req_attach {
struct virtio_iommu_req_head head;
__le32 domain;
__le32 endpoint;
- __u8 reserved[8];
+ __le32 flags;
+ __u8 reserved[4];
struct virtio_iommu_req_tail tail;
};