aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iova.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/iova.c')
-rw-r--r--drivers/iommu/iova.c220
1 files changed, 88 insertions, 132 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 33edfa794ae9..466aaa8ba841 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -24,6 +24,9 @@
#include <linux/bitops.h>
#include <linux/cpu.h>
+/* The anchor node sits above the top of the usable address space */
+#define IOVA_ANCHOR ~0UL
+
static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn,
unsigned long size);
@@ -37,7 +40,7 @@ static void fq_flush_timeout(unsigned long data);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
- unsigned long start_pfn, unsigned long pfn_32bit)
+ unsigned long start_pfn)
{
/*
* IOVA granularity will normally be equal to the smallest
@@ -48,12 +51,16 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
- iovad->cached32_node = NULL;
+ iovad->cached_node = &iovad->anchor.node;
+ iovad->cached32_node = &iovad->anchor.node;
iovad->granule = granule;
iovad->start_pfn = start_pfn;
- iovad->dma_32bit_pfn = pfn_32bit + 1;
+ iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->flush_cb = NULL;
iovad->fq = NULL;
+ iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
+ rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
+ rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -108,50 +115,36 @@ int init_iova_flush_queue(struct iova_domain *iovad,
EXPORT_SYMBOL_GPL(init_iova_flush_queue);
static struct rb_node *
-__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
+__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
{
- if ((*limit_pfn > iovad->dma_32bit_pfn) ||
- (iovad->cached32_node == NULL))
- return rb_last(&iovad->rbroot);
- else {
- struct rb_node *prev_node = rb_prev(iovad->cached32_node);
- struct iova *curr_iova =
- rb_entry(iovad->cached32_node, struct iova, node);
- *limit_pfn = curr_iova->pfn_lo;
- return prev_node;
- }
+ if (limit_pfn <= iovad->dma_32bit_pfn)
+ return iovad->cached32_node;
+
+ return iovad->cached_node;
}
static void
-__cached_rbnode_insert_update(struct iova_domain *iovad,
- unsigned long limit_pfn, struct iova *new)
+__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
{
- if (limit_pfn != iovad->dma_32bit_pfn)
- return;
- iovad->cached32_node = &new->node;
+ if (new->pfn_hi < iovad->dma_32bit_pfn)
+ iovad->cached32_node = &new->node;
+ else
+ iovad->cached_node = &new->node;
}
static void
__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
{
struct iova *cached_iova;
- struct rb_node *curr;
- if (!iovad->cached32_node)
- return;
- curr = iovad->cached32_node;
- cached_iova = rb_entry(curr, struct iova, node);
-
- if (free->pfn_lo >= cached_iova->pfn_lo) {
- struct rb_node *node = rb_next(&free->node);
- struct iova *iova = rb_entry(node, struct iova, node);
+ cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
+ if (free->pfn_hi < iovad->dma_32bit_pfn &&
+ free->pfn_lo >= cached_iova->pfn_lo)
+ iovad->cached32_node = rb_next(&free->node);
- /* only cache if it's below 32bit pfn */
- if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
- iovad->cached32_node = node;
- else
- iovad->cached32_node = NULL;
- }
+ cached_iova = rb_entry(iovad->cached_node, struct iova, node);
+ if (free->pfn_lo >= cached_iova->pfn_lo)
+ iovad->cached_node = rb_next(&free->node);
}
/* Insert the iova into domain rbtree by holding writer lock */
@@ -182,63 +175,43 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
rb_insert_color(&iova->node, root);
}
-/*
- * Computes the padding size required, to make the start address
- * naturally aligned on the power-of-two order of its size
- */
-static unsigned int
-iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
-{
- return (limit_pfn - size) & (__roundup_pow_of_two(size) - 1);
-}
-
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
unsigned long size, unsigned long limit_pfn,
struct iova *new, bool size_aligned)
{
- struct rb_node *prev, *curr = NULL;
+ struct rb_node *curr, *prev;
+ struct iova *curr_iova;
unsigned long flags;
- unsigned long saved_pfn;
- unsigned int pad_size = 0;
+ unsigned long new_pfn;
+ unsigned long align_mask = ~0UL;
+
+ if (size_aligned)
+ align_mask <<= fls_long(size - 1);
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- saved_pfn = limit_pfn;
- curr = __get_cached_rbnode(iovad, &limit_pfn);
- prev = curr;
- while (curr) {
- struct iova *curr_iova = rb_entry(curr, struct iova, node);
-
- if (limit_pfn <= curr_iova->pfn_lo) {
- goto move_left;
- } else if (limit_pfn > curr_iova->pfn_hi) {
- if (size_aligned)
- pad_size = iova_get_pad_size(size, limit_pfn);
- if ((curr_iova->pfn_hi + size + pad_size) < limit_pfn)
- break; /* found a free slot */
- }
- limit_pfn = curr_iova->pfn_lo;
-move_left:
+ curr = __get_cached_rbnode(iovad, limit_pfn);
+ curr_iova = rb_entry(curr, struct iova, node);
+ do {
+ limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
+ new_pfn = (limit_pfn - size) & align_mask;
prev = curr;
curr = rb_prev(curr);
- }
+ curr_iova = rb_entry(curr, struct iova, node);
+ } while (curr && new_pfn <= curr_iova->pfn_hi);
- if (!curr) {
- if (size_aligned)
- pad_size = iova_get_pad_size(size, limit_pfn);
- if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return -ENOMEM;
- }
+ if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
}
/* pfn_lo will point to size aligned address if size_aligned is set */
- new->pfn_lo = limit_pfn - (size + pad_size);
+ new->pfn_lo = new_pfn;
new->pfn_hi = new->pfn_lo + size - 1;
/* If we have 'prev', it's a valid place to start the insertion. */
iova_insert_rbtree(&iovad->rbroot, new, prev);
- __cached_rbnode_insert_update(iovad, saved_pfn, new);
+ __cached_rbnode_insert_update(iovad, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
@@ -258,7 +231,8 @@ EXPORT_SYMBOL(alloc_iova_mem);
void free_iova_mem(struct iova *iova)
{
- kmem_cache_free(iova_cache, iova);
+ if (iova->pfn_lo != IOVA_ANCHOR)
+ kmem_cache_free(iova_cache, iova);
}
EXPORT_SYMBOL(free_iova_mem);
@@ -342,15 +316,12 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
while (node) {
struct iova *iova = rb_entry(node, struct iova, node);
- /* If pfn falls within iova's range, return iova */
- if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
- return iova;
- }
-
if (pfn < iova->pfn_lo)
node = node->rb_left;
- else if (pfn > iova->pfn_lo)
+ else if (pfn > iova->pfn_hi)
node = node->rb_right;
+ else
+ return iova; /* pfn falls within iova's range */
}
return NULL;
@@ -424,18 +395,19 @@ EXPORT_SYMBOL_GPL(free_iova);
* @iovad: - iova domain in question
* @size: - size of page frames to allocate
* @limit_pfn: - max limit address
+ * @flush_rcache: - set to flush rcache on regular allocation failure
* This function tries to satisfy an iova allocation from the rcache,
- * and falls back to regular allocation on failure.
+ * and falls back to regular allocation on failure. If regular allocation
+ * fails too and the flush_rcache flag is set then the rcache will be flushed.
*/
unsigned long
alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
- unsigned long limit_pfn)
+ unsigned long limit_pfn, bool flush_rcache)
{
- bool flushed_rcache = false;
unsigned long iova_pfn;
struct iova *new_iova;
- iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
+ iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
if (iova_pfn)
return iova_pfn;
@@ -444,11 +416,11 @@ retry:
if (!new_iova) {
unsigned int cpu;
- if (flushed_rcache)
+ if (!flush_rcache)
return 0;
/* Try replenishing IOVAs by flushing rcache. */
- flushed_rcache = true;
+ flush_rcache = false;
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
goto retry;
@@ -570,7 +542,7 @@ void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
unsigned long data)
{
- struct iova_fq *fq = get_cpu_ptr(iovad->fq);
+ struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
unsigned long flags;
unsigned idx;
@@ -600,8 +572,6 @@ void queue_iova(struct iova_domain *iovad,
if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
mod_timer(&iovad->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
-
- put_cpu_ptr(iovad->fq);
}
EXPORT_SYMBOL_GPL(queue_iova);
@@ -612,21 +582,12 @@ EXPORT_SYMBOL_GPL(queue_iova);
*/
void put_iova_domain(struct iova_domain *iovad)
{
- struct rb_node *node;
- unsigned long flags;
+ struct iova *iova, *tmp;
free_iova_flush_queue(iovad);
free_iova_rcaches(iovad);
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- node = rb_first(&iovad->rbroot);
- while (node) {
- struct iova *iova = rb_entry(node, struct iova, node);
-
- rb_erase(node, &iovad->rbroot);
+ rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
free_iova_mem(iova);
- node = rb_first(&iovad->rbroot);
- }
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
}
EXPORT_SYMBOL_GPL(put_iova_domain);
@@ -695,6 +656,10 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova;
unsigned int overlap = 0;
+ /* Don't allow nonsensical pfns */
+ if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
+ return NULL;
+
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
@@ -738,6 +703,9 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
struct iova *iova = rb_entry(node, struct iova, node);
struct iova *new_iova;
+ if (iova->pfn_lo == IOVA_ANCHOR)
+ continue;
+
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
if (!new_iova)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
@@ -855,12 +823,21 @@ static bool iova_magazine_empty(struct iova_magazine *mag)
static unsigned long iova_magazine_pop(struct iova_magazine *mag,
unsigned long limit_pfn)
{
+ int i;
+ unsigned long pfn;
+
BUG_ON(iova_magazine_empty(mag));
- if (mag->pfns[mag->size - 1] >= limit_pfn)
- return 0;
+ /* Only fall back to the rbtree if we have no suitable pfns at all */
+ for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
+ if (i == 0)
+ return 0;
- return mag->pfns[--mag->size];
+ /* Swap it to pop it */
+ pfn = mag->pfns[i];
+ mag->pfns[i] = mag->pfns[--mag->size];
+
+ return pfn;
}
static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
@@ -1011,27 +988,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
return 0;
- return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
-}
-
-/*
- * Free a cpu's rcache.
- */
-static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
- struct iova_rcache *rcache)
-{
- struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
- unsigned long flags;
-
- spin_lock_irqsave(&cpu_rcache->lock, flags);
-
- iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
- iova_magazine_free(cpu_rcache->loaded);
-
- iova_magazine_free_pfns(cpu_rcache->prev, iovad);
- iova_magazine_free(cpu_rcache->prev);
-
- spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
}
/*
@@ -1040,21 +997,20 @@ static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
static void free_iova_rcaches(struct iova_domain *iovad)
{
struct iova_rcache *rcache;
- unsigned long flags;
+ struct iova_cpu_rcache *cpu_rcache;
unsigned int cpu;
int i, j;
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
rcache = &iovad->rcaches[i];
- for_each_possible_cpu(cpu)
- free_cpu_iova_rcache(cpu, iovad, rcache);
- spin_lock_irqsave(&rcache->lock, flags);
+ for_each_possible_cpu(cpu) {
+ cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ iova_magazine_free(cpu_rcache->loaded);
+ iova_magazine_free(cpu_rcache->prev);
+ }
free_percpu(rcache->cpu_rcaches);
- for (j = 0; j < rcache->depot_size; ++j) {
- iova_magazine_free_pfns(rcache->depot[j], iovad);
+ for (j = 0; j < rcache->depot_size; ++j)
iova_magazine_free(rcache->depot[j]);
- }
- spin_unlock_irqrestore(&rcache->lock, flags);
}
}