iommu/iova: Squash entry_dtor abstraction
All flush queues are driven by iommu-dma now, so there is no need to abstract entry_dtor or its data any more. Squash the now-canonical implementation directly into the IOVA code to get it out of the way. Reviewed-by: John Garry <john.garry@huawei.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/2260f8de00ab5e0f9d2a1cf8978e6ae7cd4f182c.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
d7061627d7
commit
d5c383f2c9
|
@ -64,18 +64,6 @@ static int __init iommu_dma_forcedac_setup(char *str)
|
|||
}
|
||||
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
|
||||
|
||||
static void iommu_dma_entry_dtor(unsigned long data)
|
||||
{
|
||||
struct page *freelist = (struct page *)data;
|
||||
|
||||
while (freelist) {
|
||||
unsigned long p = (unsigned long)page_address(freelist);
|
||||
|
||||
freelist = freelist->freelist;
|
||||
free_page(p);
|
||||
}
|
||||
}
|
||||
|
||||
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
|
||||
{
|
||||
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
|
||||
|
@ -324,8 +312,7 @@ int iommu_dma_init_fq(struct iommu_domain *domain)
|
|||
if (cookie->fq_domain)
|
||||
return 0;
|
||||
|
||||
ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all,
|
||||
iommu_dma_entry_dtor);
|
||||
ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all);
|
||||
if (ret) {
|
||||
pr_warn("iova flush queue initialization failed\n");
|
||||
return ret;
|
||||
|
@ -471,7 +458,7 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
|
|||
else if (gather && gather->queued)
|
||||
queue_iova(iovad, iova_pfn(iovad, iova),
|
||||
size >> iova_shift(iovad),
|
||||
(unsigned long)gather->freelist);
|
||||
gather->freelist);
|
||||
else
|
||||
free_iova_fast(iovad, iova_pfn(iovad, iova),
|
||||
size >> iova_shift(iovad));
|
||||
|
|
|
@ -91,11 +91,9 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
|
|||
|
||||
iovad->fq = NULL;
|
||||
iovad->flush_cb = NULL;
|
||||
iovad->entry_dtor = NULL;
|
||||
}
|
||||
|
||||
int init_iova_flush_queue(struct iova_domain *iovad,
|
||||
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
|
||||
int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
|
||||
{
|
||||
struct iova_fq __percpu *queue;
|
||||
int cpu;
|
||||
|
@ -108,7 +106,6 @@ int init_iova_flush_queue(struct iova_domain *iovad,
|
|||
return -ENOMEM;
|
||||
|
||||
iovad->flush_cb = flush_cb;
|
||||
iovad->entry_dtor = entry_dtor;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct iova_fq *fq;
|
||||
|
@ -547,6 +544,16 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(free_iova_fast);
|
||||
|
||||
static void fq_entry_dtor(struct page *freelist)
|
||||
{
|
||||
while (freelist) {
|
||||
unsigned long p = (unsigned long)page_address(freelist);
|
||||
|
||||
freelist = freelist->freelist;
|
||||
free_page(p);
|
||||
}
|
||||
}
|
||||
|
||||
#define fq_ring_for_each(i, fq) \
|
||||
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
|
||||
|
||||
|
@ -579,9 +586,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
|
|||
if (fq->entries[idx].counter >= counter)
|
||||
break;
|
||||
|
||||
if (iovad->entry_dtor)
|
||||
iovad->entry_dtor(fq->entries[idx].data);
|
||||
|
||||
fq_entry_dtor(fq->entries[idx].freelist);
|
||||
free_iova_fast(iovad,
|
||||
fq->entries[idx].iova_pfn,
|
||||
fq->entries[idx].pages);
|
||||
|
@ -606,15 +611,12 @@ static void fq_destroy_all_entries(struct iova_domain *iovad)
|
|||
* bother to free iovas, just call the entry_dtor on all remaining
|
||||
* entries.
|
||||
*/
|
||||
if (!iovad->entry_dtor)
|
||||
return;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
|
||||
int idx;
|
||||
|
||||
fq_ring_for_each(idx, fq)
|
||||
iovad->entry_dtor(fq->entries[idx].data);
|
||||
fq_entry_dtor(fq->entries[idx].freelist);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -639,7 +641,7 @@ static void fq_flush_timeout(struct timer_list *t)
|
|||
|
||||
void queue_iova(struct iova_domain *iovad,
|
||||
unsigned long pfn, unsigned long pages,
|
||||
unsigned long data)
|
||||
struct page *freelist)
|
||||
{
|
||||
struct iova_fq *fq;
|
||||
unsigned long flags;
|
||||
|
@ -673,7 +675,7 @@ void queue_iova(struct iova_domain *iovad,
|
|||
|
||||
fq->entries[idx].iova_pfn = pfn;
|
||||
fq->entries[idx].pages = pages;
|
||||
fq->entries[idx].data = data;
|
||||
fq->entries[idx].freelist = freelist;
|
||||
fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
|
||||
|
||||
spin_unlock_irqrestore(&fq->lock, flags);
|
||||
|
|
|
@ -40,9 +40,6 @@ struct iova_domain;
|
|||
/* Call-Back from IOVA code into IOMMU drivers */
|
||||
typedef void (* iova_flush_cb)(struct iova_domain *domain);
|
||||
|
||||
/* Destructor for per-entry data */
|
||||
typedef void (* iova_entry_dtor)(unsigned long data);
|
||||
|
||||
/* Number of entries per Flush Queue */
|
||||
#define IOVA_FQ_SIZE 256
|
||||
|
||||
|
@ -53,7 +50,7 @@ typedef void (* iova_entry_dtor)(unsigned long data);
|
|||
struct iova_fq_entry {
|
||||
unsigned long iova_pfn;
|
||||
unsigned long pages;
|
||||
unsigned long data;
|
||||
struct page *freelist;
|
||||
u64 counter; /* Flush counter when this entrie was added */
|
||||
};
|
||||
|
||||
|
@ -88,9 +85,6 @@ struct iova_domain {
|
|||
iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
|
||||
TLBs */
|
||||
|
||||
iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
|
||||
iova entry */
|
||||
|
||||
struct timer_list fq_timer; /* Timer to regularily empty the
|
||||
flush-queues */
|
||||
atomic_t fq_timer_on; /* 1 when timer is active, 0
|
||||
|
@ -146,15 +140,14 @@ void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
|
|||
unsigned long size);
|
||||
void queue_iova(struct iova_domain *iovad,
|
||||
unsigned long pfn, unsigned long pages,
|
||||
unsigned long data);
|
||||
struct page *freelist);
|
||||
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
|
||||
unsigned long limit_pfn, bool flush_rcache);
|
||||
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
||||
unsigned long pfn_hi);
|
||||
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||
unsigned long start_pfn);
|
||||
int init_iova_flush_queue(struct iova_domain *iovad,
|
||||
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
|
||||
int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb);
|
||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||
void put_iova_domain(struct iova_domain *iovad);
|
||||
#else
|
||||
|
@ -189,12 +182,6 @@ static inline void free_iova_fast(struct iova_domain *iovad,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void queue_iova(struct iova_domain *iovad,
|
||||
unsigned long pfn, unsigned long pages,
|
||||
unsigned long data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
|
||||
unsigned long size,
|
||||
unsigned long limit_pfn,
|
||||
|
@ -216,13 +203,6 @@ static inline void init_iova_domain(struct iova_domain *iovad,
|
|||
{
|
||||
}
|
||||
|
||||
static inline int init_iova_flush_queue(struct iova_domain *iovad,
|
||||
iova_flush_cb flush_cb,
|
||||
iova_entry_dtor entry_dtor)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline struct iova *find_iova(struct iova_domain *iovad,
|
||||
unsigned long pfn)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue