iommu/tegra-smmu: Use __GFP_ZERO to allocate zeroed pages
Rather than explicitly zeroing pages allocated via alloc_page(), add __GFP_ZERO to the gfp mask to ask the allocator for zeroed pages. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Thierry Reding <treding@nvidia.com>
This commit is contained in:
parent
05a65f06f6
commit
707917cbc6
|
@ -258,8 +258,6 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
|
||||||
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
|
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
|
||||||
{
|
{
|
||||||
struct tegra_smmu_as *as;
|
struct tegra_smmu_as *as;
|
||||||
unsigned int i;
|
|
||||||
uint32_t *pd;
|
|
||||||
|
|
||||||
if (type != IOMMU_DOMAIN_UNMANAGED)
|
if (type != IOMMU_DOMAIN_UNMANAGED)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -270,7 +268,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
|
||||||
|
|
||||||
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
|
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
|
||||||
|
|
||||||
as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
|
as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
|
||||||
if (!as->pd) {
|
if (!as->pd) {
|
||||||
kfree(as);
|
kfree(as);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -291,12 +289,6 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* clear PDEs */
|
|
||||||
pd = page_address(as->pd);
|
|
||||||
|
|
||||||
for (i = 0; i < SMMU_NUM_PDE; i++)
|
|
||||||
pd[i] = 0;
|
|
||||||
|
|
||||||
/* setup aperture */
|
/* setup aperture */
|
||||||
as->domain.geometry.aperture_start = 0;
|
as->domain.geometry.aperture_start = 0;
|
||||||
as->domain.geometry.aperture_end = 0xffffffff;
|
as->domain.geometry.aperture_end = 0xffffffff;
|
||||||
|
@ -533,21 +525,15 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
|
||||||
u32 *pd = page_address(as->pd), *pt;
|
u32 *pd = page_address(as->pd), *pt;
|
||||||
unsigned int pde = iova_pd_index(iova);
|
unsigned int pde = iova_pd_index(iova);
|
||||||
struct tegra_smmu *smmu = as->smmu;
|
struct tegra_smmu *smmu = as->smmu;
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
if (!as->pts[pde]) {
|
if (!as->pts[pde]) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
|
|
||||||
page = alloc_page(GFP_KERNEL | __GFP_DMA);
|
page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
pt = page_address(page);
|
|
||||||
|
|
||||||
for (i = 0; i < SMMU_NUM_PTE; i++)
|
|
||||||
pt[i] = 0;
|
|
||||||
|
|
||||||
dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
|
dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(smmu->dev, dma)) {
|
if (dma_mapping_error(smmu->dev, dma)) {
|
||||||
|
|
Loading…
Reference in New Issue