Commit a8e0e4af authored by Robin Murphy's avatar Robin Murphy
Browse files

iommu/iova: Keep count of 32-bit PFNs



Optimise for the case where the 32-bit space is full, so that
allocation attempts fail fast to reduce lock contention.

Signed-off-by: Robin Murphy's avatarRobin Murphy <robin.murphy@arm.com>
parent 2124a2dd
......@@ -56,6 +56,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->free_32bit_pfns = iovad->dma_32bit_pfn - start_pfn;
iovad->flush_cb = NULL;
iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
......@@ -114,6 +115,24 @@ int init_iova_flush_queue(struct iova_domain *iovad,
}
EXPORT_SYMBOL_GPL(init_iova_flush_queue);
static void insert_update_32bit_pfn(struct iova_domain *iovad, struct iova *new)
{
unsigned long pfn_lo = new->pfn_lo;
unsigned long pfn_hi = min(new->pfn_hi, iovad->dma_32bit_pfn);
if (pfn_lo >= iovad->dma_32bit_pfn)
iovad->free_32bit_pfns -= pfn_hi - pfn_lo + 1;
}
static void delete_update_32bit_pfn(struct iova_domain *iovad, struct iova *free)
{
unsigned long pfn_lo = free->pfn_lo;
unsigned long pfn_hi = min(free->pfn_hi, iovad->dma_32bit_pfn);
if (pfn_lo >= iovad->dma_32bit_pfn)
iovad->free_32bit_pfns += pfn_hi - pfn_lo + 1;
}
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
{
......@@ -130,6 +149,8 @@ __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
iovad->cached32_node = &new->node;
else
iovad->cached_node = &new->node;
insert_update_32bit_pfn(iovad, new);
}
static void
......@@ -145,6 +166,8 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
cached_iova = rb_entry(iovad->cached_node, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo)
iovad->cached_node = rb_next(&free->node);
delete_update_32bit_pfn(iovad, free);
}
/* Insert the iova into domain rbtree by holding writer lock */
......@@ -188,8 +211,13 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
if (size_aligned)
align_mask <<= fls_long(size - 1);
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
/* Fail fast if a 32-bit allocation cannot possibly succeed */
if (limit_pfn <= iovad->dma_32bit_pfn && iovad->free_32bit_pfns < size)
goto no_mem;
/* Walk the tree backwards */
curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = rb_entry(curr, struct iova, node);
do {
......@@ -200,10 +228,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr_iova = rb_entry(curr, struct iova, node);
} while (curr && new_pfn <= curr_iova->pfn_hi);
if (limit_pfn < size || new_pfn < iovad->start_pfn) {
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}
if (limit_pfn < size || new_pfn < iovad->start_pfn)
goto no_mem;
/* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = new_pfn;
......@@ -214,9 +240,11 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
__cached_rbnode_insert_update(iovad, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return 0;
no_mem:
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}
static struct kmem_cache *iova_cache;
......@@ -678,6 +706,8 @@ reserve_iova(struct iova_domain *iovad,
* or need to insert remaining non overlap addr range
*/
iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
if (iova)
insert_update_32bit_pfn(iovad, iova);
finish:
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
......
......@@ -77,6 +77,7 @@ struct iova_domain {
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
unsigned int free_32bit_pfns;
struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment