iommu/iova: Add flush counters to Flush-Queue implementation
There are two counters: * fq_flush_start_cnt - Increased when a TLB flush is started. * fq_flush_finish_cnt - Increased when a TLB flush is finished. The fq_flush_start_cnt is assigned to every Flush-Queue entry on its creation. When freeing entries from the Flush-Queue, the value in the entry is compared to the fq_flush_finish_cnt. The entry can only be freed when its value is less than the value of fq_flush_finish_cnt. The reason for these counters it to take advantage of IOMMU TLB flushes that happened on other CPUs. These already flushed the TLB for Flush-Queue entries on other CPUs so that they can already be freed without flushing the TLB again. This makes it less likely that the Flush-Queue is full and saves IOMMU TLB flushes. Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
1928210107
commit
fb418dab8a
|
@ -75,6 +75,9 @@ int init_iova_flush_queue(struct iova_domain *iovad,
|
|||
{
|
||||
int cpu;
|
||||
|
||||
atomic64_set(&iovad->fq_flush_start_cnt, 0);
|
||||
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
|
||||
|
||||
iovad->fq = alloc_percpu(struct iova_fq);
|
||||
if (!iovad->fq)
|
||||
return -ENOMEM;
|
||||
|
@ -482,20 +485,30 @@ static inline unsigned fq_ring_add(struct iova_fq *fq)
|
|||
|
||||
static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
|
||||
{
|
||||
u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
|
||||
unsigned idx;
|
||||
|
||||
fq_ring_for_each(idx, fq) {
|
||||
|
||||
if (fq->entries[idx].counter >= counter)
|
||||
break;
|
||||
|
||||
if (iovad->entry_dtor)
|
||||
iovad->entry_dtor(fq->entries[idx].data);
|
||||
|
||||
free_iova_fast(iovad,
|
||||
fq->entries[idx].iova_pfn,
|
||||
fq->entries[idx].pages);
|
||||
}
|
||||
|
||||
fq->head = 0;
|
||||
fq->tail = 0;
|
||||
fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static void iova_domain_flush(struct iova_domain *iovad)
|
||||
{
|
||||
atomic64_inc(&iovad->fq_flush_start_cnt);
|
||||
iovad->flush_cb(iovad);
|
||||
atomic64_inc(&iovad->fq_flush_finish_cnt);
|
||||
}
|
||||
|
||||
static void fq_destroy_all_entries(struct iova_domain *iovad)
|
||||
|
@ -526,8 +539,15 @@ void queue_iova(struct iova_domain *iovad,
|
|||
struct iova_fq *fq = get_cpu_ptr(iovad->fq);
|
||||
unsigned idx;
|
||||
|
||||
/*
|
||||
* First remove all entries from the flush queue that have already been
|
||||
* flushed out on another CPU. This makes the fq_full() check below less
|
||||
* likely to be true.
|
||||
*/
|
||||
fq_ring_free(iovad, fq);
|
||||
|
||||
if (fq_full(fq)) {
|
||||
iovad->flush_cb(iovad);
|
||||
iova_domain_flush(iovad);
|
||||
fq_ring_free(iovad, fq);
|
||||
}
|
||||
|
||||
|
@ -536,6 +556,7 @@ void queue_iova(struct iova_domain *iovad,
|
|||
fq->entries[idx].iova_pfn = pfn;
|
||||
fq->entries[idx].pages = pages;
|
||||
fq->entries[idx].data = data;
|
||||
fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
|
||||
|
||||
put_cpu_ptr(iovad->fq);
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
/* iova structure */
|
||||
|
@ -52,6 +53,7 @@ struct iova_fq_entry {
|
|||
unsigned long iova_pfn;
|
||||
unsigned long pages;
|
||||
unsigned long data;
|
||||
u64 counter; /* Flush counter when this entrie was added */
|
||||
};
|
||||
|
||||
/* Per-CPU Flush Queue structure */
|
||||
|
@ -77,6 +79,12 @@ struct iova_domain {
|
|||
iova entry */
|
||||
|
||||
struct iova_fq __percpu *fq; /* Flush Queue */
|
||||
|
||||
atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
|
||||
have been started */
|
||||
|
||||
atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
|
||||
have been finished */
|
||||
};
|
||||
|
||||
static inline unsigned long iova_size(struct iova *iova)
|
||||
|
|
Loading…
Reference in New Issue