aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-04-16 12:45:18 -0700
committerDavid S. Miller <davem@davemloft.net>2015-04-16 12:45:18 -0700
commita83f5d6a11ced6f43ec467a95fe8cc76e1c96cfd (patch)
treed3bc9387c3d86187c870e967e887513c61d156b4 /include
parentMerge tag 'stable/for-linus-4.1-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip (diff)
parentsparc: Make LDC use common iommu poll management functions (diff)
downloadlinux-dev-a83f5d6a11ced6f43ec467a95fe8cc76e1c96cfd.tar.xz
linux-dev-a83f5d6a11ced6f43ec467a95fe8cc76e1c96cfd.zip
Merge branch 'generic-iommu-allocator'
Sowmini Varadhan says: ==================== Generic IOMMU pooled allocator Investigation of network performance on Sparc shows a high degree of locking contention in the IOMMU allocator, and it was noticed that the PowerPC code has a better locking model. This patch series tries to extract the generic parts of the PowerPC code so that it can be shared across multiple PCI devices and architectures. v10: resend patchv9 without RFC tag, and a new mail Message-Id, (previous non-RFC attempt did not show up on the patchwork queue?) Full revision history below: v2 changes: - incorporate David Miller editorial comments: sparc specific fields moved from iommu-common into sparc's iommu_64.h - make the npools value an input parameter, for the case when the iommu map size is not very large - cookie_to_index mapping, and optimizations for span-boundary check, for use case such as LDC. v3: eliminate iommu_sparc, rearrange the ->demap indirection to be invoked under the pool lock. v4: David Miller review changes: - s/IOMMU_ERROR_CODE/DMA_ERROR_CODE - page_table_map_base and page_table_shift are unsigned long, not u32. v5: removed ->cookie_to_index and ->demap indirection from the iommu_tbl_ops The caller needs to call these functions as needed, before invoking the generic arena allocator functions. Added the "skip_span_boundary" argument to iommu_tbl_pool_init() for those callers like LDC which do no care about span boundary checks. v6: removed iommu_tbl_ops, and instead pass the ->flush_all as an indirection to iommu_tbl_pool_init(); only invoke ->flush_all when there is no large_pool, based on the assumption that large-pool usage is infrequently encountered v7: moved pool_hash initialization to lib/iommu-common.c and cleaned up code duplication from sun4v/sun4u/ldc. v8: Addresses BenH comments with one exception: I've left the IOMMU_POOL_HASH as is, so that powerpc can tailor it to their convenience. Discard trylock for simple spin_lock to acquire pool v9: Addresses latest BenH comments: need_flush checks, add support for dma mask and align_order. v10: resend without RFC tag, and new mail Message-Id. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/iommu-common.h55
1 files changed, 55 insertions, 0 deletions
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h
new file mode 100644
index 000000000000..6be5c863f329
--- /dev/null
+++ b/include/linux/iommu-common.h
@@ -0,0 +1,55 @@
+#ifndef _LINUX_IOMMU_COMMON_H
+#define _LINUX_IOMMU_COMMON_H
+
+#include <linux/spinlock_types.h>
+#include <linux/device.h>
+#include <asm/page.h>
+
+#define IOMMU_POOL_HASHBITS 4
+#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
+
+struct iommu_pool {
+ unsigned long start;
+ unsigned long end;
+ unsigned long hint;
+ spinlock_t lock;
+};
+
+struct iommu_table;
+
+struct iommu_tbl_ops {
+ unsigned long (*cookie_to_index)(u64, void *);
+ void (*demap)(void *, unsigned long, unsigned long);
+ void (*reset)(struct iommu_table *);
+};
+
+struct iommu_table {
+ unsigned long page_table_map_base;
+ unsigned long page_table_shift;
+ unsigned long nr_pools;
+ const struct iommu_tbl_ops *iommu_tbl_ops;
+ unsigned long poolsize;
+ struct iommu_pool arena_pool[IOMMU_NR_POOLS];
+ u32 flags;
+#define IOMMU_HAS_LARGE_POOL 0x00000001
+ struct iommu_pool large_pool;
+ unsigned long *map;
+};
+
+extern void iommu_tbl_pool_init(struct iommu_table *iommu,
+ unsigned long num_entries,
+ u32 page_table_shift,
+ const struct iommu_tbl_ops *iommu_tbl_ops,
+ bool large_pool, u32 npools);
+
+extern unsigned long iommu_tbl_range_alloc(struct device *dev,
+ struct iommu_table *iommu,
+ unsigned long npages,
+ unsigned long *handle,
+ unsigned int pool_hash);
+
+extern void iommu_tbl_range_free(struct iommu_table *iommu,
+ u64 dma_addr, unsigned long npages,
+ bool do_demap, void *demap_arg);
+
+#endif