IOMMU Fixes for Linux v4.8-rc2
Including: * Some functions defined in a header file for the mediatek driver were not marked inline. Fix that oversight. * Fix a potential crash in the ARM64 dma-mapping code when freeing a partially initialized domain. * Another fix for ARM64 dma-mapping to respect IOMMU mapping constraints when allocating IOVA addresses. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABAgAGBQJXsZB3AAoJECvwRC2XARrjyRQP+gP9M0X7KKjs4OzxEL8oJEDr nTcR+NSsSqFWUHYQv173cOpUTOLu5R45+s2hrDQpZVR8Lvv5vBdfgdS6XrK9gOIM DVLn+p1J06gO/fJoHuiJVjNBfSdGOov65FSg9xJkRXhzbeiBmSfLOM3RaX9bYMOF opH41nt8MTvH7+EG0G5zxwDndzhKw7fsMIEvs1899B+iLJHg3CUscM5W/2Fv5vK6 Z0V0KWi/Mu48b8wVkYssPNIYK+WjpLXuKkbxmtpzAbhUViy0tn6RWzXetAqrEfAb +7jRKsu7i2iUH6bFj8RmQ+BF6bFR2EoDpw2I/seZydmR3uHP6w3KBY2V34svJG1Z lXo6jKGxKJ/U8t0K2tRm8jXBZ7nVDRqiOXm+v0qvOcXXbe1JyYSnCv9mAXogz+x6 h1pWk8vY6WgsrNFbAK1Q+g5o0E0+eo1ItVaLU5TD6rgNHPDE4GoUSpjyM6WpEuBK 4rofezhain87rW1pOggi5KFX/ptZ/md1tdzPgujtsSgBW0WVqLxoBbwWd9eJVohb XXeFnl0RqNJJi5CFk+yl+vm9hX/+iKL0Y8FzSwizwSXG5l/aF4dHdwwsxaAvJBkl Tjuw4u81HJ3Pqmi24CpYmRJhF7EhsyRuQoEOzHRlKZNnCVU/kOvrZsmLOnqLCMY/ qX2tp9PLj0Sw2RUAFpI9 =49AB -----END PGP SIGNATURE----- Merge tag 'iommu-fixes-v4.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull IOMMU fixes from Joerg Roedel: - Some functions defined in a header file for the mediatek driver were not marked inline. Fix that oversight. - Fix a potential crash in the ARM64 dma-mapping code when freeing a partially initialized domain. - Another fix for ARM64 dma-mapping to respect IOMMU mapping constraints when allocating IOVA addresses. * tag 'iommu-fixes-v4.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: iommu/dma: Respect IOMMU aperture when allocating iommu/dma: Don't put uninitialised IOVA domains iommu/mediatek: Mark static functions in headers inline
This commit is contained in:
commit
3684b03d8e
|
@ -68,6 +68,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
|
||||||
if (!iovad)
|
if (!iovad)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (iovad->granule)
|
||||||
put_iova_domain(iovad);
|
put_iova_domain(iovad);
|
||||||
kfree(iovad);
|
kfree(iovad);
|
||||||
domain->iova_cookie = NULL;
|
domain->iova_cookie = NULL;
|
||||||
|
@ -151,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
|
static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
|
||||||
dma_addr_t dma_limit)
|
dma_addr_t dma_limit)
|
||||||
{
|
{
|
||||||
|
struct iova_domain *iovad = domain->iova_cookie;
|
||||||
unsigned long shift = iova_shift(iovad);
|
unsigned long shift = iova_shift(iovad);
|
||||||
unsigned long length = iova_align(iovad, size) >> shift;
|
unsigned long length = iova_align(iovad, size) >> shift;
|
||||||
|
|
||||||
|
if (domain->geometry.force_aperture)
|
||||||
|
dma_limit = min(dma_limit, domain->geometry.aperture_end);
|
||||||
/*
|
/*
|
||||||
* Enforce size-alignment to be safe - there could perhaps be an
|
* Enforce size-alignment to be safe - there could perhaps be an
|
||||||
* attribute to control this per-device, or at least per-domain...
|
* attribute to control this per-device, or at least per-domain...
|
||||||
|
@ -314,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
|
iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
|
||||||
if (!iova)
|
if (!iova)
|
||||||
goto out_free_pages;
|
goto out_free_pages;
|
||||||
|
|
||||||
|
@ -386,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||||
phys_addr_t phys = page_to_phys(page) + offset;
|
phys_addr_t phys = page_to_phys(page) + offset;
|
||||||
size_t iova_off = iova_offset(iovad, phys);
|
size_t iova_off = iova_offset(iovad, phys);
|
||||||
size_t len = iova_align(iovad, size + iova_off);
|
size_t len = iova_align(iovad, size + iova_off);
|
||||||
struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
|
struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
|
||||||
|
|
||||||
if (!iova)
|
if (!iova)
|
||||||
return DMA_ERROR_CODE;
|
return DMA_ERROR_CODE;
|
||||||
|
@ -538,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||||
prev = s;
|
prev = s;
|
||||||
}
|
}
|
||||||
|
|
||||||
iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
|
iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
|
||||||
if (!iova)
|
if (!iova)
|
||||||
goto out_restore_sg;
|
goto out_restore_sg;
|
||||||
|
|
||||||
|
|
|
@ -55,19 +55,19 @@ struct mtk_iommu_data {
|
||||||
bool enable_4GB;
|
bool enable_4GB;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int compare_of(struct device *dev, void *data)
|
static inline int compare_of(struct device *dev, void *data)
|
||||||
{
|
{
|
||||||
return dev->of_node == data;
|
return dev->of_node == data;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mtk_iommu_bind(struct device *dev)
|
static inline int mtk_iommu_bind(struct device *dev)
|
||||||
{
|
{
|
||||||
struct mtk_iommu_data *data = dev_get_drvdata(dev);
|
struct mtk_iommu_data *data = dev_get_drvdata(dev);
|
||||||
|
|
||||||
return component_bind_all(dev, &data->smi_imu);
|
return component_bind_all(dev, &data->smi_imu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtk_iommu_unbind(struct device *dev)
|
static inline void mtk_iommu_unbind(struct device *dev)
|
||||||
{
|
{
|
||||||
struct mtk_iommu_data *data = dev_get_drvdata(dev);
|
struct mtk_iommu_data *data = dev_get_drvdata(dev);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue