AMD IOMMU: fix lazy IO/TLB flushing in unmap path
Lazy flushing needs to take care of the unmap path too which is not yet implemented and leads to stale IO/TLB entries. This is fixed by this patch. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
parent
ae9b940364
commit
80be308dfa
|
@ -526,6 +526,9 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
|
|||
{
|
||||
address >>= PAGE_SHIFT;
|
||||
iommu_area_free(dom->bitmap, address, pages);
|
||||
|
||||
if (address + pages >= dom->next_bit)
|
||||
dom->need_flush = true;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -981,8 +984,10 @@ static void __unmap_single(struct amd_iommu *iommu,
|
|||
|
||||
dma_ops_free_addresses(dma_dom, dma_addr, pages);
|
||||
|
||||
if (amd_iommu_unmap_flush)
|
||||
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
|
||||
iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
|
||||
dma_dom->need_flush = false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue