iommu/amd: make sure TLB to be flushed before IOVA freed
Although the mapping has already been removed in the page table, it maybe
still exist in TLB. Suppose the freed IOVAs is reused by others before the
flush operation completed, the new user can not correctly access to its
meomory.
Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
Fixes: b1516a1465
('iommu/amd: Implement flush queue')
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
4674686d6c
commit
3c120143f5
|
@ -2407,9 +2407,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
|
|||
}
|
||||
|
||||
if (amd_iommu_unmap_flush) {
|
||||
dma_ops_free_iova(dma_dom, dma_addr, pages);
|
||||
domain_flush_tlb(&dma_dom->domain);
|
||||
domain_flush_complete(&dma_dom->domain);
|
||||
dma_ops_free_iova(dma_dom, dma_addr, pages);
|
||||
} else {
|
||||
pages = __roundup_pow_of_two(pages);
|
||||
queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);
|
||||
|
|
Loading…
Reference in New Issue