IOMMU Updates for Linux v4.11
The changes include: * KVM PCIe/MSI passthrough support on ARM/ARM64 * Introduction of a core representation for individual hardware iommus * Support for IOMMU privileged mappings as supported by some ARM IOMMUS * 16-bit SID support for ARM-SMMUv2 * Stream table optimization for ARM-SMMUv3 * Various fixes and other small improvements -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABAgAGBQJYqw3hAAoJECvwRC2XARrjPy0P/35ykfHIAESJuF+72ziaoAYA ZvMrli8rGq7n+ntaIGPx9rV+hZTUSF8V2bfsYV7SAn5iYuViXZqvOtC3BAEp6GNC cdMeQfqXoHiWVMdXdOihzk+6YCQvBxqPOvUtYFqVhOo3Yrz8Dc71KsKvrTndEUVY f7bXHKssVONkWMga9lIVDgEefG5VyJPEQaxJXB9ymLHXbwWOcISe1lgtkrzFSxSH H9YNI07Tfcxfn6rN8jGmcYFYM58xwBicpB4HBw5uytMBYAsxqTEsx4X5dGpOF6RH cFW9nby+9ZlcTMyuWXKAck3o8df2ZC1xiSjnz+DHQdBPFiFNqIL3PVUcaz9PnF2e e6Y+DA3s+jykeiCvi2K0Z9RwTg7t8S5spel+UCeNVSnIjE9pqZNLF8vsDjF17zuR +zcFm7RVI397QVQGp0dbqhtxnwqt/3CX/wlzpvuNdEZa4vwujpcnM9tfl6gyFrF8 awK9Fj5ryAn4DEiM+8yiRHwLrU5ij1cfc8jQdqleEB2ca7Wv3g1uhhS0QTXOFY9u A7ygOna25U1EcOwjC6ebjiEL115ZEOrXo+eChhzCHoUEHCVxL+L/NAMEsUcMqPIw 3XsHhru0HbXgd5O5wHX39s2je8G3+ElqQwy8Ja3DimV6tvon7yaKCXy9QU+2aa1u 3r53R/0mW1ijtOfK+I0b =5b3I -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull IOMMU UPDATES from Joerg Roedel: - KVM PCIe/MSI passthrough support on ARM/ARM64 - introduction of a core representation for individual hardware iommus - support for IOMMU privileged mappings as supported by some ARM IOMMUS - 16-bit SID support for ARM-SMMUv2 - stream table optimization for ARM-SMMUv3 - various fixes and other small improvements * tag 'iommu-updates-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (61 commits) vfio/type1: Fix error return code in vfio_iommu_type1_attach_group() iommu: Remove iommu_register_instance interface iommu/exynos: Make use of iommu_device_register interface iommu/mediatek: Make use of iommu_device_register interface iommu/msm: Make use of iommu_device_register interface iommu/arm-smmu: Make use of the iommu_register interface iommu: Add iommu_device_set_fwnode() interface iommu: Make iommu_device_link/unlink take a struct iommu_device iommu: Add sysfs bindings for struct iommu_device iommu: Introduce new 'struct iommu_device' iommu: Rename struct iommu_device iommu: Rename iommu_get_instance() iommu: Fix static checker warning in iommu_insert_device_resv_regions iommu: Avoid unnecessary assignment of dev->iommu_fwspec iommu/mediatek: Remove bogus 'select' statements iommu/dma: Remove bogus dma_supported() implementation iommu/ipmmu-vmsa: Restrict IOMMU Domain Geometry to 32-bit address space iommu/vt-d: Don't over-free page table directories iommu/vt-d: Tylersburg isoch identity map check is done too late. iommu/vt-d: Fix some macros that are incorrectly specified in intel-iommu ...
This commit is contained in:
commit
ebb4949eb3
|
@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub-
|
|||
file if the IOMMU driver has chosen to register a more
|
||||
common name for the group.
|
||||
Users:
|
||||
|
||||
What: /sys/kernel/iommu_groups/reserved_regions
|
||||
Date: January 2017
|
||||
KernelVersion: v4.11
|
||||
Contact: Eric Auger <eric.auger@redhat.com>
|
||||
Description: /sys/kernel/iommu_groups/reserved_regions list IOVA
|
||||
regions that are reserved. Not necessarily all
|
||||
reserved regions are listed. This is typically used to
|
||||
output direct-mapped, MSI, non mappable regions. Each
|
||||
region is described on a single line: the 1st field is
|
||||
the base IOVA, the second is the end IOVA and the third
|
||||
field describes the type of the region.
|
||||
|
|
|
@ -143,3 +143,13 @@ So, this provides a way for drivers to avoid those error messages on calls
|
|||
where allocation failures are not a problem, and shouldn't bother the logs.
|
||||
|
||||
NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
|
||||
|
||||
DMA_ATTR_PRIVILEGED
|
||||
------------------------------
|
||||
|
||||
Some advanced peripherals such as remote processors and GPUs perform
|
||||
accesses to DMA buffers in both privileged "supervisor" and unprivileged
|
||||
"user" modes. This attribute is used to indicate to the DMA-mapping
|
||||
subsystem that the buffer is fully accessible at the elevated privilege
|
||||
level (and ideally inaccessible or at least read-only at the
|
||||
lesser-privileged levels).
|
||||
|
|
|
@ -1171,6 +1171,25 @@ core_initcall(dma_debug_do_init);
|
|||
|
||||
#ifdef CONFIG_ARM_DMA_USE_IOMMU
|
||||
|
||||
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
int prot = 0;
|
||||
|
||||
if (attrs & DMA_ATTR_PRIVILEGED)
|
||||
prot |= IOMMU_PRIV;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
return prot | IOMMU_READ | IOMMU_WRITE;
|
||||
case DMA_TO_DEVICE:
|
||||
return prot | IOMMU_READ;
|
||||
case DMA_FROM_DEVICE:
|
||||
return prot | IOMMU_WRITE;
|
||||
default:
|
||||
return prot;
|
||||
}
|
||||
}
|
||||
|
||||
/* IOMMU */
|
||||
|
||||
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
|
||||
|
@ -1394,7 +1413,8 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
|
|||
* Create a mapping in device IO address space for specified pages
|
||||
*/
|
||||
static dma_addr_t
|
||||
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
|
||||
__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
@ -1419,7 +1439,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
|
|||
|
||||
len = (j - i) << PAGE_SHIFT;
|
||||
ret = iommu_map(mapping->domain, iova, phys, len,
|
||||
IOMMU_READ|IOMMU_WRITE);
|
||||
__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
iova += len;
|
||||
|
@ -1476,7 +1496,8 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
|
|||
}
|
||||
|
||||
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
|
||||
dma_addr_t *handle, int coherent_flag)
|
||||
dma_addr_t *handle, int coherent_flag,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct page *page;
|
||||
void *addr;
|
||||
|
@ -1488,7 +1509,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
|
|||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_create_mapping(dev, &page, size);
|
||||
*handle = __iommu_create_mapping(dev, &page, size, attrs);
|
||||
if (*handle == DMA_ERROR_CODE)
|
||||
goto err_mapping;
|
||||
|
||||
|
@ -1522,7 +1543,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
|
||||
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
|
||||
return __iommu_alloc_simple(dev, size, gfp, handle,
|
||||
coherent_flag);
|
||||
coherent_flag, attrs);
|
||||
|
||||
/*
|
||||
* Following is a work-around (a.k.a. hack) to prevent pages
|
||||
|
@ -1537,7 +1558,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_create_mapping(dev, pages, size);
|
||||
*handle = __iommu_create_mapping(dev, pages, size, attrs);
|
||||
if (*handle == DMA_ERROR_CODE)
|
||||
goto err_buffer;
|
||||
|
||||
|
@ -1672,27 +1693,6 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int __dma_direction_to_prot(enum dma_data_direction dir)
|
||||
{
|
||||
int prot;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
prot = IOMMU_READ | IOMMU_WRITE;
|
||||
break;
|
||||
case DMA_TO_DEVICE:
|
||||
prot = IOMMU_READ;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
prot = IOMMU_WRITE;
|
||||
break;
|
||||
default:
|
||||
prot = 0;
|
||||
}
|
||||
|
||||
return prot;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a part of the scatter-gather list into contiguous io address space
|
||||
*/
|
||||
|
@ -1722,7 +1722,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||
if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
||||
|
||||
prot = __dma_direction_to_prot(dir);
|
||||
prot = __dma_info_to_prot(dir, attrs);
|
||||
|
||||
ret = iommu_map(mapping->domain, iova, phys, len, prot);
|
||||
if (ret < 0)
|
||||
|
@ -1930,7 +1930,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
|||
if (dma_addr == DMA_ERROR_CODE)
|
||||
return dma_addr;
|
||||
|
||||
prot = __dma_direction_to_prot(dir);
|
||||
prot = __dma_info_to_prot(dir, attrs);
|
||||
|
||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
|
||||
if (ret < 0)
|
||||
|
@ -2036,7 +2036,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
|
|||
if (dma_addr == DMA_ERROR_CODE)
|
||||
return dma_addr;
|
||||
|
||||
prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
|
||||
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
|
||||
|
||||
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -558,7 +558,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
unsigned long attrs)
|
||||
{
|
||||
bool coherent = is_device_dma_coherent(dev);
|
||||
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
|
||||
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
|
||||
size_t iosize = size;
|
||||
void *addr;
|
||||
|
||||
|
@ -712,7 +712,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
|
|||
unsigned long attrs)
|
||||
{
|
||||
bool coherent = is_device_dma_coherent(dev);
|
||||
int prot = dma_direction_to_prot(dir, coherent);
|
||||
int prot = dma_info_to_prot(dir, coherent, attrs);
|
||||
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
|
||||
|
||||
if (!iommu_dma_mapping_error(dev, dev_addr) &&
|
||||
|
@ -770,7 +770,7 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
|||
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
|
||||
|
||||
return iommu_dma_map_sg(dev, sgl, nelems,
|
||||
dma_direction_to_prot(dir, coherent));
|
||||
dma_info_to_prot(dir, coherent, attrs));
|
||||
}
|
||||
|
||||
static void __iommu_unmap_sg_attrs(struct device *dev,
|
||||
|
@ -799,7 +799,6 @@ static struct dma_map_ops iommu_dma_ops = {
|
|||
.sync_sg_for_device = __iommu_sync_sg_for_device,
|
||||
.map_resource = iommu_dma_map_resource,
|
||||
.unmap_resource = iommu_dma_unmap_resource,
|
||||
.dma_supported = iommu_dma_supported,
|
||||
.mapping_error = iommu_dma_mapping_error,
|
||||
};
|
||||
|
||||
|
|
|
@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
|
|||
if (!iort_fwnode)
|
||||
return NULL;
|
||||
|
||||
ops = iommu_get_instance(iort_fwnode);
|
||||
ops = iommu_ops_from_fwnode(iort_fwnode);
|
||||
if (!ops)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1859,9 +1859,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
|
|||
* Alloc MicroCode buffer for 'chans' Channel threads.
|
||||
* A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
|
||||
*/
|
||||
pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
|
||||
pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
|
||||
chans * pl330->mcbufsz,
|
||||
&pl330->mcode_bus, GFP_KERNEL);
|
||||
&pl330->mcode_bus, GFP_KERNEL,
|
||||
DMA_ATTR_PRIVILEGED);
|
||||
if (!pl330->mcode_cpu) {
|
||||
dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
|
||||
__func__, __LINE__);
|
||||
|
|
|
@ -352,9 +352,6 @@ config MTK_IOMMU_V1
|
|||
select IOMMU_API
|
||||
select MEMORY
|
||||
select MTK_SMI
|
||||
select COMMON_CLK_MT2701_MMSYS
|
||||
select COMMON_CLK_MT2701_IMGSYS
|
||||
select COMMON_CLK_MT2701_VDECSYS
|
||||
help
|
||||
Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is
|
||||
Multimedia Memory Managememt Unit. This option enables remapping of
|
||||
|
|
|
@ -112,7 +112,7 @@ static struct timer_list queue_timer;
|
|||
* Domain for untranslated devices - only allocated
|
||||
* if iommu=pt passed on kernel cmd line.
|
||||
*/
|
||||
static const struct iommu_ops amd_iommu_ops;
|
||||
const struct iommu_ops amd_iommu_ops;
|
||||
|
||||
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
|
||||
int amd_iommu_max_glx_val = -1;
|
||||
|
@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev)
|
|||
static int iommu_init_device(struct device *dev)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
struct amd_iommu *iommu;
|
||||
int devid;
|
||||
|
||||
if (dev->archdata.iommu)
|
||||
|
@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev)
|
|||
if (devid < 0)
|
||||
return devid;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[devid];
|
||||
|
||||
dev_data = find_dev_data(devid);
|
||||
if (!dev_data)
|
||||
return -ENOMEM;
|
||||
|
@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev)
|
|||
|
||||
dev->archdata.iommu = dev_data;
|
||||
|
||||
iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
|
||||
dev);
|
||||
iommu_device_link(&iommu->iommu, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev)
|
|||
|
||||
static void iommu_uninit_device(struct device *dev)
|
||||
{
|
||||
int devid;
|
||||
struct iommu_dev_data *dev_data;
|
||||
struct amd_iommu *iommu;
|
||||
int devid;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
if (devid < 0)
|
||||
return;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[devid];
|
||||
|
||||
dev_data = search_dev_data(devid);
|
||||
if (!dev_data)
|
||||
return;
|
||||
|
@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev)
|
|||
if (dev_data->domain)
|
||||
detach_device(dev);
|
||||
|
||||
iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
|
||||
dev);
|
||||
iommu_device_unlink(&iommu->iommu, dev);
|
||||
|
||||
iommu_group_remove_device(dev);
|
||||
|
||||
|
@ -3161,9 +3165,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void amd_iommu_get_dm_regions(struct device *dev,
|
||||
static void amd_iommu_get_resv_regions(struct device *dev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct iommu_resv_region *region;
|
||||
struct unity_map_entry *entry;
|
||||
int devid;
|
||||
|
||||
|
@ -3172,41 +3177,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
|
|||
return;
|
||||
|
||||
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
|
||||
struct iommu_dm_region *region;
|
||||
size_t length;
|
||||
int prot = 0;
|
||||
|
||||
if (devid < entry->devid_start || devid > entry->devid_end)
|
||||
continue;
|
||||
|
||||
region = kzalloc(sizeof(*region), GFP_KERNEL);
|
||||
length = entry->address_end - entry->address_start;
|
||||
if (entry->prot & IOMMU_PROT_IR)
|
||||
prot |= IOMMU_READ;
|
||||
if (entry->prot & IOMMU_PROT_IW)
|
||||
prot |= IOMMU_WRITE;
|
||||
|
||||
region = iommu_alloc_resv_region(entry->address_start,
|
||||
length, prot,
|
||||
IOMMU_RESV_DIRECT);
|
||||
if (!region) {
|
||||
pr_err("Out of memory allocating dm-regions for %s\n",
|
||||
dev_name(dev));
|
||||
return;
|
||||
}
|
||||
|
||||
region->start = entry->address_start;
|
||||
region->length = entry->address_end - entry->address_start;
|
||||
if (entry->prot & IOMMU_PROT_IR)
|
||||
region->prot |= IOMMU_READ;
|
||||
if (entry->prot & IOMMU_PROT_IW)
|
||||
region->prot |= IOMMU_WRITE;
|
||||
|
||||
list_add_tail(®ion->list, head);
|
||||
}
|
||||
|
||||
region = iommu_alloc_resv_region(MSI_RANGE_START,
|
||||
MSI_RANGE_END - MSI_RANGE_START + 1,
|
||||
0, IOMMU_RESV_RESERVED);
|
||||
if (!region)
|
||||
return;
|
||||
list_add_tail(®ion->list, head);
|
||||
|
||||
region = iommu_alloc_resv_region(HT_RANGE_START,
|
||||
HT_RANGE_END - HT_RANGE_START + 1,
|
||||
0, IOMMU_RESV_RESERVED);
|
||||
if (!region)
|
||||
return;
|
||||
list_add_tail(®ion->list, head);
|
||||
}
|
||||
|
||||
static void amd_iommu_put_dm_regions(struct device *dev,
|
||||
static void amd_iommu_put_resv_regions(struct device *dev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct iommu_dm_region *entry, *next;
|
||||
struct iommu_resv_region *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, head, list)
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
static void amd_iommu_apply_dm_region(struct device *dev,
|
||||
static void amd_iommu_apply_resv_region(struct device *dev,
|
||||
struct iommu_domain *domain,
|
||||
struct iommu_dm_region *region)
|
||||
struct iommu_resv_region *region)
|
||||
{
|
||||
struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
|
||||
unsigned long start, end;
|
||||
|
@ -3217,7 +3237,7 @@ static void amd_iommu_apply_dm_region(struct device *dev,
|
|||
WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
|
||||
}
|
||||
|
||||
static const struct iommu_ops amd_iommu_ops = {
|
||||
const struct iommu_ops amd_iommu_ops = {
|
||||
.capable = amd_iommu_capable,
|
||||
.domain_alloc = amd_iommu_domain_alloc,
|
||||
.domain_free = amd_iommu_domain_free,
|
||||
|
@ -3230,9 +3250,9 @@ static const struct iommu_ops amd_iommu_ops = {
|
|||
.add_device = amd_iommu_add_device,
|
||||
.remove_device = amd_iommu_remove_device,
|
||||
.device_group = amd_iommu_device_group,
|
||||
.get_dm_regions = amd_iommu_get_dm_regions,
|
||||
.put_dm_regions = amd_iommu_put_dm_regions,
|
||||
.apply_dm_region = amd_iommu_apply_dm_region,
|
||||
.get_resv_regions = amd_iommu_get_resv_regions,
|
||||
.put_resv_regions = amd_iommu_put_resv_regions,
|
||||
.apply_resv_region = amd_iommu_apply_resv_region,
|
||||
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
|
|
|
@ -94,6 +94,8 @@
|
|||
* out of it.
|
||||
*/
|
||||
|
||||
extern const struct iommu_ops amd_iommu_ops;
|
||||
|
||||
/*
|
||||
* structure describing one IOMMU in the ACPI table. Typically followed by one
|
||||
* or more ivhd_entrys.
|
||||
|
@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
|
|||
amd_iommu_erratum_746_workaround(iommu);
|
||||
amd_iommu_ats_write_check_workaround(iommu);
|
||||
|
||||
iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
|
||||
amd_iommu_groups, "ivhd%d",
|
||||
iommu->index);
|
||||
iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
|
||||
amd_iommu_groups, "ivhd%d", iommu->index);
|
||||
iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
|
||||
iommu_device_register(&iommu->iommu);
|
||||
|
||||
return pci_enable_device(iommu->dev);
|
||||
}
|
||||
|
@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void)
|
|||
*/
|
||||
ret = check_ivrs_checksum(ivrs_base);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
|
||||
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
|
||||
|
|
|
@ -535,8 +535,8 @@ struct amd_iommu {
|
|||
/* if one, we need to send a completion wait command */
|
||||
bool need_sync;
|
||||
|
||||
/* IOMMU sysfs device */
|
||||
struct device *iommu_dev;
|
||||
/* Handle for IOMMU core code */
|
||||
struct iommu_device iommu;
|
||||
|
||||
/*
|
||||
* We can't rely on the BIOS to restore all values on reinit, so we
|
||||
|
|
|
@ -269,9 +269,6 @@
|
|||
#define STRTAB_STE_1_SHCFG_INCOMING 1UL
|
||||
#define STRTAB_STE_1_SHCFG_SHIFT 44
|
||||
|
||||
#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
|
||||
#define STRTAB_STE_1_PRIVCFG_SHIFT 48
|
||||
|
||||
#define STRTAB_STE_2_S2VMID_SHIFT 0
|
||||
#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
|
||||
#define STRTAB_STE_2_VTCR_SHIFT 32
|
||||
|
@ -412,6 +409,9 @@
|
|||
/* High-level queue structures */
|
||||
#define ARM_SMMU_POLL_TIMEOUT_US 100
|
||||
|
||||
#define MSI_IOVA_BASE 0x8000000
|
||||
#define MSI_IOVA_LENGTH 0x100000
|
||||
|
||||
static bool disable_bypass;
|
||||
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(disable_bypass,
|
||||
|
@ -616,6 +616,9 @@ struct arm_smmu_device {
|
|||
unsigned int sid_bits;
|
||||
|
||||
struct arm_smmu_strtab_cfg strtab_cfg;
|
||||
|
||||
/* IOMMU core code handle */
|
||||
struct iommu_device iommu;
|
||||
};
|
||||
|
||||
/* SMMU private data for each master */
|
||||
|
@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
|||
}
|
||||
}
|
||||
|
||||
/* Nuke the existing Config, as we're going to rewrite it */
|
||||
val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
|
||||
|
||||
if (ste->valid)
|
||||
val |= STRTAB_STE_0_V;
|
||||
else
|
||||
val &= ~STRTAB_STE_0_V;
|
||||
/* Nuke the existing STE_0 value, as we're going to rewrite it */
|
||||
val = ste->valid ? STRTAB_STE_0_V : 0;
|
||||
|
||||
if (ste->bypass) {
|
||||
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
|
||||
|
@ -1073,9 +1071,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
|||
#ifdef CONFIG_PCI_ATS
|
||||
STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
|
||||
#endif
|
||||
STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
|
||||
STRTAB_STE_1_PRIVCFG_UNPRIV <<
|
||||
STRTAB_STE_1_PRIVCFG_SHIFT);
|
||||
STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_STALLS)
|
||||
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
|
||||
|
@ -1083,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
|
|||
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
|
||||
<< STRTAB_STE_0_S1CTXPTR_SHIFT) |
|
||||
STRTAB_STE_0_CFG_S1_TRANS;
|
||||
|
||||
}
|
||||
|
||||
if (ste->s2_cfg) {
|
||||
|
@ -1372,8 +1367,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
|
|||
switch (cap) {
|
||||
case IOMMU_CAP_CACHE_COHERENCY:
|
||||
return true;
|
||||
case IOMMU_CAP_INTR_REMAP:
|
||||
return true; /* MSIs are just memory writes */
|
||||
case IOMMU_CAP_NOEXEC:
|
||||
return true;
|
||||
default:
|
||||
|
@ -1795,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev)
|
|||
}
|
||||
|
||||
group = iommu_group_get_for_dev(dev);
|
||||
if (!IS_ERR(group))
|
||||
if (!IS_ERR(group)) {
|
||||
iommu_group_put(group);
|
||||
iommu_device_link(&smmu->iommu, dev);
|
||||
}
|
||||
|
||||
return PTR_ERR_OR_ZERO(group);
|
||||
}
|
||||
|
@ -1805,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev)
|
|||
{
|
||||
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
|
||||
struct arm_smmu_master_data *master;
|
||||
struct arm_smmu_device *smmu;
|
||||
|
||||
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
||||
return;
|
||||
|
||||
master = fwspec->iommu_priv;
|
||||
smmu = master->smmu;
|
||||
if (master && master->ste.valid)
|
||||
arm_smmu_detach_dev(dev);
|
||||
iommu_group_remove_device(dev);
|
||||
iommu_device_unlink(&smmu->iommu, dev);
|
||||
kfree(master);
|
||||
iommu_fwspec_free(dev);
|
||||
}
|
||||
|
@ -1883,6 +1881,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
|||
return iommu_fwspec_add_ids(dev, args->args, 1);
|
||||
}
|
||||
|
||||
static void arm_smmu_get_resv_regions(struct device *dev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct iommu_resv_region *region;
|
||||
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
||||
|
||||
region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
|
||||
prot, IOMMU_RESV_MSI);
|
||||
if (!region)
|
||||
return;
|
||||
|
||||
list_add_tail(®ion->list, head);
|
||||
}
|
||||
|
||||
static void arm_smmu_put_resv_regions(struct device *dev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct iommu_resv_region *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, head, list)
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
static struct iommu_ops arm_smmu_ops = {
|
||||
.capable = arm_smmu_capable,
|
||||
.domain_alloc = arm_smmu_domain_alloc,
|
||||
|
@ -1898,6 +1919,8 @@ static struct iommu_ops arm_smmu_ops = {
|
|||
.domain_get_attr = arm_smmu_domain_get_attr,
|
||||
.domain_set_attr = arm_smmu_domain_set_attr,
|
||||
.of_xlate = arm_smmu_of_xlate,
|
||||
.get_resv_regions = arm_smmu_get_resv_regions,
|
||||
.put_resv_regions = arm_smmu_put_resv_regions,
|
||||
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
||||
};
|
||||
|
||||
|
@ -1983,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
|
|||
u32 size, l1size;
|
||||
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
|
||||
|
||||
/*
|
||||
* If we can resolve everything with a single L2 table, then we
|
||||
* just need a single L1 descriptor. Otherwise, calculate the L1
|
||||
* size, capped to the SIDSIZE.
|
||||
*/
|
||||
if (smmu->sid_bits < STRTAB_SPLIT) {
|
||||
size = 0;
|
||||
} else {
|
||||
/* Calculate the L1 size, capped to the SIDSIZE. */
|
||||
size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
|
||||
size = min(size, smmu->sid_bits - STRTAB_SPLIT);
|
||||
}
|
||||
cfg->num_l1_ents = 1 << size;
|
||||
|
||||
size += STRTAB_SPLIT;
|
||||
|
@ -2504,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
|
|||
smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
|
||||
smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
|
||||
|
||||
/*
|
||||
* If the SMMU supports fewer bits than would fill a single L2 stream
|
||||
* table, use a linear table instead.
|
||||
*/
|
||||
if (smmu->sid_bits <= STRTAB_SPLIT)
|
||||
smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
|
||||
|
||||
/* IDR5 */
|
||||
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
|
||||
|
||||
|
@ -2613,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||
{
|
||||
int irq, ret;
|
||||
struct resource *res;
|
||||
resource_size_t ioaddr;
|
||||
struct arm_smmu_device *smmu;
|
||||
struct device *dev = &pdev->dev;
|
||||
bool bypass;
|
||||
|
@ -2630,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||
dev_err(dev, "MMIO region too small (%pr)\n", res);
|
||||
return -EINVAL;
|
||||
}
|
||||
ioaddr = res->start;
|
||||
|
||||
smmu->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(smmu->base))
|
||||
|
@ -2682,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
|
||||
/* And we're up. Go go go! */
|
||||
iommu_register_instance(dev->fwnode, &arm_smmu_ops);
|
||||
ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
|
||||
"smmu3.%pa", &ioaddr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
|
||||
iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
|
||||
|
||||
ret = iommu_device_register(&smmu->iommu);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
* - v7/v8 long-descriptor format
|
||||
* - Non-secure access to the SMMU
|
||||
* - Context fault reporting
|
||||
* - Extended Stream ID (16 bit)
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "arm-smmu: " fmt
|
||||
|
@ -87,6 +88,7 @@
|
|||
#define sCR0_CLIENTPD (1 << 0)
|
||||
#define sCR0_GFRE (1 << 1)
|
||||
#define sCR0_GFIE (1 << 2)
|
||||
#define sCR0_EXIDENABLE (1 << 3)
|
||||
#define sCR0_GCFGFRE (1 << 4)
|
||||
#define sCR0_GCFGFIE (1 << 5)
|
||||
#define sCR0_USFCFG (1 << 10)
|
||||
|
@ -126,6 +128,7 @@
|
|||
#define ID0_NUMIRPT_MASK 0xff
|
||||
#define ID0_NUMSIDB_SHIFT 9
|
||||
#define ID0_NUMSIDB_MASK 0xf
|
||||
#define ID0_EXIDS (1 << 8)
|
||||
#define ID0_NUMSMRG_SHIFT 0
|
||||
#define ID0_NUMSMRG_MASK 0xff
|
||||
|
||||
|
@ -169,6 +172,7 @@
|
|||
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
|
||||
#define S2CR_CBNDX_SHIFT 0
|
||||
#define S2CR_CBNDX_MASK 0xff
|
||||
#define S2CR_EXIDVALID (1 << 10)
|
||||
#define S2CR_TYPE_SHIFT 16
|
||||
#define S2CR_TYPE_MASK 0x3
|
||||
enum arm_smmu_s2cr_type {
|
||||
|
@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg {
|
|||
|
||||
#define TTBCR2_SEP_SHIFT 15
|
||||
#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
|
||||
#define TTBCR2_AS (1 << 4)
|
||||
|
||||
#define TTBRn_ASID_SHIFT 48
|
||||
|
||||
|
@ -281,6 +286,9 @@ enum arm_smmu_s2cr_privcfg {
|
|||
|
||||
#define FSYNR0_WNR (1 << 4)
|
||||
|
||||
#define MSI_IOVA_BASE 0x8000000
|
||||
#define MSI_IOVA_LENGTH 0x100000
|
||||
|
||||
static int force_stage;
|
||||
module_param(force_stage, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(force_stage,
|
||||
|
@ -351,6 +359,7 @@ struct arm_smmu_device {
|
|||
#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
|
||||
#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
|
||||
#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
|
||||
#define ARM_SMMU_FEAT_EXIDS (1 << 12)
|
||||
u32 features;
|
||||
|
||||
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
|
||||
|
@ -380,6 +389,9 @@ struct arm_smmu_device {
|
|||
unsigned int *irqs;
|
||||
|
||||
u32 cavium_id_base; /* Specific to Cavium */
|
||||
|
||||
/* IOMMU core code handle */
|
||||
struct iommu_device iommu;
|
||||
};
|
||||
|
||||
enum arm_smmu_context_fmt {
|
||||
|
@ -778,6 +790,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|||
reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
|
||||
reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
|
||||
reg2 |= TTBCR2_SEP_UPSTREAM;
|
||||
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
|
||||
reg2 |= TTBCR2_AS;
|
||||
}
|
||||
if (smmu->version > ARM_SMMU_V1)
|
||||
writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
|
||||
|
@ -1048,7 +1062,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
|
|||
struct arm_smmu_smr *smr = smmu->smrs + idx;
|
||||
u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
|
||||
|
||||
if (smr->valid)
|
||||
if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
|
||||
reg |= SMR_VALID;
|
||||
writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
|
||||
}
|
||||
|
@ -1060,6 +1074,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
|
|||
(s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
|
||||
(s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
|
||||
smmu->smrs[idx].valid)
|
||||
reg |= S2CR_EXIDVALID;
|
||||
writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
|
||||
}
|
||||
|
||||
|
@ -1070,6 +1087,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
|
|||
arm_smmu_write_smr(smmu, idx);
|
||||
}
|
||||
|
||||
/*
|
||||
* The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
|
||||
* should be called after sCR0 is written.
|
||||
*/
|
||||
static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
|
||||
{
|
||||
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
||||
u32 smr;
|
||||
|
||||
if (!smmu->smrs)
|
||||
return;
|
||||
|
||||
/*
|
||||
* SMR.ID bits may not be preserved if the corresponding MASK
|
||||
* bits are set, so check each one separately. We can reject
|
||||
* masters later if they try to claim IDs outside these masks.
|
||||
*/
|
||||
smr = smmu->streamid_mask << SMR_ID_SHIFT;
|
||||
writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
|
||||
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
|
||||
smmu->streamid_mask = smr >> SMR_ID_SHIFT;
|
||||
|
||||
smr = smmu->streamid_mask << SMR_MASK_SHIFT;
|
||||
writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
|
||||
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
|
||||
smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
|
||||
}
|
||||
|
||||
static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
|
||||
{
|
||||
struct arm_smmu_smr *smrs = smmu->smrs;
|
||||
|
@ -1214,7 +1259,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
|||
continue;
|
||||
|
||||
s2cr[idx].type = type;
|
||||
s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
|
||||
s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
|
||||
s2cr[idx].cbndx = cbndx;
|
||||
arm_smmu_write_s2cr(smmu, idx);
|
||||
}
|
||||
|
@ -1371,8 +1416,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
|
|||
* requests.
|
||||
*/
|
||||
return true;
|
||||
case IOMMU_CAP_INTR_REMAP:
|
||||
return true; /* MSIs are just memory writes */
|
||||
case IOMMU_CAP_NOEXEC:
|
||||
return true;
|
||||
default:
|
||||
|
@ -1444,6 +1487,8 @@ static int arm_smmu_add_device(struct device *dev)
|
|||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
iommu_device_link(&smmu->iommu, dev);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
|
@ -1456,10 +1501,17 @@ out_free:
|
|||
static void arm_smmu_remove_device(struct device *dev)
|
||||
{
|
||||
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
|
||||
struct arm_smmu_master_cfg *cfg;
|
||||
struct arm_smmu_device *smmu;
|
||||
|
||||
|
||||
if (!fwspec || fwspec->ops != &arm_smmu_ops)
|
||||
return;
|
||||
|
||||
cfg = fwspec->iommu_priv;
|
||||
smmu = cfg->smmu;
|
||||
|
||||
iommu_device_unlink(&smmu->iommu, dev);
|
||||
arm_smmu_master_free_smes(fwspec);
|
||||
iommu_group_remove_device(dev);
|
||||
kfree(fwspec->iommu_priv);
|
||||
|
@ -1549,6 +1601,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
|||
return iommu_fwspec_add_ids(dev, &fwid, 1);
|
||||
}
|
||||
|
||||
static void arm_smmu_get_resv_regions(struct device *dev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct iommu_resv_region *region;
|
||||
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
||||
|
||||
region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
|
||||
prot, IOMMU_RESV_MSI);
|
||||
if (!region)
|
||||
return;
|
||||
|
||||
list_add_tail(®ion->list, head);
|
||||
}
|
||||
|
||||
static void arm_smmu_put_resv_regions(struct device *dev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct iommu_resv_region *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, head, list)
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
static struct iommu_ops arm_smmu_ops = {
|
||||
.capable = arm_smmu_capable,
|
||||
.domain_alloc = arm_smmu_domain_alloc,
|
||||
|
@ -1564,6 +1639,8 @@ static struct iommu_ops arm_smmu_ops = {
|
|||
.domain_get_attr = arm_smmu_domain_get_attr,
|
||||
.domain_set_attr = arm_smmu_domain_set_attr,
|
||||
.of_xlate = arm_smmu_of_xlate,
|
||||
.get_resv_regions = arm_smmu_get_resv_regions,
|
||||
.put_resv_regions = arm_smmu_put_resv_regions,
|
||||
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
||||
};
|
||||
|
||||
|
@ -1648,6 +1725,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|||
if (smmu->features & ARM_SMMU_FEAT_VMID16)
|
||||
reg |= sCR0_VMID16EN;
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_EXIDS)
|
||||
reg |= sCR0_EXIDENABLE;
|
||||
|
||||
/* Push the button */
|
||||
__arm_smmu_tlb_sync(smmu);
|
||||
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
||||
|
@ -1735,11 +1815,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
"\t(IDR0.CTTW overridden by FW configuration)\n");
|
||||
|
||||
/* Max. number of entries we have for stream matching/indexing */
|
||||
if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
|
||||
smmu->features |= ARM_SMMU_FEAT_EXIDS;
|
||||
size = 1 << 16;
|
||||
} else {
|
||||
size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
|
||||
}
|
||||
smmu->streamid_mask = size - 1;
|
||||
if (id & ID0_SMS) {
|
||||
u32 smr;
|
||||
|
||||
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
|
||||
size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
|
||||
if (size == 0) {
|
||||
|
@ -1748,21 +1831,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* SMR.ID bits may not be preserved if the corresponding MASK
|
||||
* bits are set, so check each one separately. We can reject
|
||||
* masters later if they try to claim IDs outside these masks.
|
||||
*/
|
||||
smr = smmu->streamid_mask << SMR_ID_SHIFT;
|
||||
writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
|
||||
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
|
||||
smmu->streamid_mask = smr >> SMR_ID_SHIFT;
|
||||
|
||||
smr = smmu->streamid_mask << SMR_MASK_SHIFT;
|
||||
writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
|
||||
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
|
||||
smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
|
||||
|
||||
/* Zero-initialised to mark as invalid */
|
||||
smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
|
||||
GFP_KERNEL);
|
||||
|
@ -1770,8 +1838,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
return -ENOMEM;
|
||||
|
||||
dev_notice(smmu->dev,
|
||||
"\tstream matching with %lu register groups, mask 0x%x",
|
||||
size, smmu->smr_mask_mask);
|
||||
"\tstream matching with %lu register groups", size);
|
||||
}
|
||||
/* s2cr->type == 0 means translation, so initialise explicitly */
|
||||
smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
|
||||
|
@ -2011,6 +2078,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
|
|||
static int arm_smmu_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *res;
|
||||
resource_size_t ioaddr;
|
||||
struct arm_smmu_device *smmu;
|
||||
struct device *dev = &pdev->dev;
|
||||
int num_irqs, i, err;
|
||||
|
@ -2031,6 +2099,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||
return err;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
ioaddr = res->start;
|
||||
smmu->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(smmu->base))
|
||||
return PTR_ERR(smmu->base);
|
||||
|
@ -2091,9 +2160,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
|||
}
|
||||
}
|
||||
|
||||
iommu_register_instance(dev->fwnode, &arm_smmu_ops);
|
||||
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
|
||||
"smmu.%pa", &ioaddr);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to register iommu in sysfs\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
|
||||
iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
|
||||
|
||||
err = iommu_device_register(&smmu->iommu);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to register iommu\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, smmu);
|
||||
arm_smmu_device_reset(smmu);
|
||||
arm_smmu_test_smr_masks(smmu);
|
||||
|
||||
/* Oh, for a proper bus abstraction */
|
||||
if (!iommu_present(&platform_bus_type))
|
||||
|
|
|
@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
|
|||
phys_addr_t phys;
|
||||
};
|
||||
|
||||
enum iommu_dma_cookie_type {
|
||||
IOMMU_DMA_IOVA_COOKIE,
|
||||
IOMMU_DMA_MSI_COOKIE,
|
||||
};
|
||||
|
||||
struct iommu_dma_cookie {
|
||||
enum iommu_dma_cookie_type type;
|
||||
union {
|
||||
/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
|
||||
struct iova_domain iovad;
|
||||
/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
|
||||
dma_addr_t msi_iova;
|
||||
};
|
||||
struct list_head msi_page_list;
|
||||
spinlock_t msi_lock;
|
||||
};
|
||||
|
||||
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
|
||||
{
|
||||
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
|
||||
return cookie->iovad.granule;
|
||||
return PAGE_SIZE;
|
||||
}
|
||||
|
||||
static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
|
||||
{
|
||||
return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
|
||||
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
|
||||
return &cookie->iovad;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie;
|
||||
|
||||
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
|
||||
if (cookie) {
|
||||
spin_lock_init(&cookie->msi_lock);
|
||||
INIT_LIST_HEAD(&cookie->msi_page_list);
|
||||
cookie->type = type;
|
||||
}
|
||||
return cookie;
|
||||
}
|
||||
|
||||
int iommu_dma_init(void)
|
||||
|
@ -62,25 +97,53 @@ int iommu_dma_init(void)
|
|||
*/
|
||||
int iommu_get_dma_cookie(struct iommu_domain *domain)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie;
|
||||
|
||||
if (domain->iova_cookie)
|
||||
return -EEXIST;
|
||||
|
||||
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
|
||||
if (!cookie)
|
||||
domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
|
||||
if (!domain->iova_cookie)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&cookie->msi_lock);
|
||||
INIT_LIST_HEAD(&cookie->msi_page_list);
|
||||
domain->iova_cookie = cookie;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(iommu_get_dma_cookie);
|
||||
|
||||
/**
|
||||
* iommu_get_msi_cookie - Acquire just MSI remapping resources
|
||||
* @domain: IOMMU domain to prepare
|
||||
* @base: Start address of IOVA region for MSI mappings
|
||||
*
|
||||
* Users who manage their own IOVA allocation and do not want DMA API support,
|
||||
* but would still like to take advantage of automatic MSI remapping, can use
|
||||
* this to initialise their own domain appropriately. Users should reserve a
|
||||
* contiguous IOVA region, starting at @base, large enough to accommodate the
|
||||
* number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
|
||||
* used by the devices attached to @domain.
|
||||
*/
|
||||
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie;
|
||||
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
|
||||
if (domain->iova_cookie)
|
||||
return -EEXIST;
|
||||
|
||||
cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
|
||||
if (!cookie)
|
||||
return -ENOMEM;
|
||||
|
||||
cookie->msi_iova = base;
|
||||
domain->iova_cookie = cookie;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(iommu_get_msi_cookie);
|
||||
|
||||
/**
|
||||
* iommu_put_dma_cookie - Release a domain's DMA mapping resources
|
||||
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
|
||||
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
|
||||
* iommu_get_msi_cookie()
|
||||
*
|
||||
* IOMMU drivers should normally call this from their domain_free callback.
|
||||
*/
|
||||
|
@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
|
|||
if (!cookie)
|
||||
return;
|
||||
|
||||
if (cookie->iovad.granule)
|
||||
if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
|
||||
put_iova_domain(&cookie->iovad);
|
||||
|
||||
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
|
||||
|
@ -137,11 +200,13 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
|
|||
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
u64 size, struct device *dev)
|
||||
{
|
||||
struct iova_domain *iovad = cookie_iovad(domain);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
unsigned long order, base_pfn, end_pfn;
|
||||
bool pci = dev && dev_is_pci(dev);
|
||||
|
||||
if (!iovad)
|
||||
return -ENODEV;
|
||||
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
|
||||
return -EINVAL;
|
||||
|
||||
/* Use the smallest supported page size for IOVA granularity */
|
||||
order = __ffs(domain->pgsize_bitmap);
|
||||
|
@ -161,19 +226,31 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
|||
end_pfn = min_t(unsigned long, end_pfn,
|
||||
domain->geometry.aperture_end >> order);
|
||||
}
|
||||
/*
|
||||
* PCI devices may have larger DMA masks, but still prefer allocating
|
||||
* within a 32-bit mask to avoid DAC addressing. Such limitations don't
|
||||
* apply to the typical platform device, so for those we may as well
|
||||
* leave the cache limit at the top of their range to save an rb_last()
|
||||
* traversal on every allocation.
|
||||
*/
|
||||
if (pci)
|
||||
end_pfn &= DMA_BIT_MASK(32) >> order;
|
||||
|
||||
/* All we can safely do with an existing domain is enlarge it */
|
||||
/* start_pfn is always nonzero for an already-initialised domain */
|
||||
if (iovad->start_pfn) {
|
||||
if (1UL << order != iovad->granule ||
|
||||
base_pfn != iovad->start_pfn ||
|
||||
end_pfn < iovad->dma_32bit_pfn) {
|
||||
base_pfn != iovad->start_pfn) {
|
||||
pr_warn("Incompatible range for DMA domain\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
iovad->dma_32bit_pfn = end_pfn;
|
||||
/*
|
||||
* If we have devices with different DMA masks, move the free
|
||||
* area cache limit down for the benefit of the smaller one.
|
||||
*/
|
||||
iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
|
||||
} else {
|
||||
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
|
||||
if (dev && dev_is_pci(dev))
|
||||
if (pci)
|
||||
iova_reserve_pci_windows(to_pci_dev(dev), iovad);
|
||||
}
|
||||
return 0;
|
||||
|
@ -181,16 +258,22 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
|||
EXPORT_SYMBOL(iommu_dma_init_domain);
|
||||
|
||||
/**
|
||||
* dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
|
||||
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
|
||||
* page flags.
|
||||
* @dir: Direction of DMA transfer
|
||||
* @coherent: Is the DMA master cache-coherent?
|
||||
* @attrs: DMA attributes for the mapping
|
||||
*
|
||||
* Return: corresponding IOMMU API page protection flags
|
||||
*/
|
||||
int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
|
||||
int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
||||
unsigned long attrs)
|
||||
{
|
||||
int prot = coherent ? IOMMU_CACHE : 0;
|
||||
|
||||
if (attrs & DMA_ATTR_PRIVILEGED)
|
||||
prot |= IOMMU_PRIV;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
return prot | IOMMU_READ | IOMMU_WRITE;
|
||||
|
@ -204,19 +287,28 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
|
|||
}
|
||||
|
||||
static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
|
||||
dma_addr_t dma_limit)
|
||||
dma_addr_t dma_limit, struct device *dev)
|
||||
{
|
||||
struct iova_domain *iovad = cookie_iovad(domain);
|
||||
unsigned long shift = iova_shift(iovad);
|
||||
unsigned long length = iova_align(iovad, size) >> shift;
|
||||
struct iova *iova = NULL;
|
||||
|
||||
if (domain->geometry.force_aperture)
|
||||
dma_limit = min(dma_limit, domain->geometry.aperture_end);
|
||||
|
||||
/* Try to get PCI devices a SAC address */
|
||||
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
|
||||
iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
|
||||
true);
|
||||
/*
|
||||
* Enforce size-alignment to be safe - there could perhaps be an
|
||||
* attribute to control this per-device, or at least per-domain...
|
||||
*/
|
||||
return alloc_iova(iovad, length, dma_limit >> shift, true);
|
||||
if (!iova)
|
||||
iova = alloc_iova(iovad, length, dma_limit >> shift, true);
|
||||
|
||||
return iova;
|
||||
}
|
||||
|
||||
/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
|
||||
|
@ -369,7 +461,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
|||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
|
||||
iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
|
||||
if (!iova)
|
||||
goto out_free_pages;
|
||||
|
||||
|
@ -440,7 +532,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
|||
struct iova_domain *iovad = cookie_iovad(domain);
|
||||
size_t iova_off = iova_offset(iovad, phys);
|
||||
size_t len = iova_align(iovad, size + iova_off);
|
||||
struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
|
||||
struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
|
||||
|
||||
if (!iova)
|
||||
return DMA_ERROR_CODE;
|
||||
|
@ -598,7 +690,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
prev = s;
|
||||
}
|
||||
|
||||
iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
|
||||
iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
goto out_restore_sg;
|
||||
|
||||
|
@ -633,7 +725,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
|||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return __iommu_dma_map(dev, phys, size,
|
||||
dma_direction_to_prot(dir, false) | IOMMU_MMIO);
|
||||
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
|
@ -642,16 +734,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
|||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
|
||||
}
|
||||
|
||||
int iommu_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
/*
|
||||
* 'Special' IOMMUs which don't have the same addressing capability
|
||||
* as the CPU will have to wait until we have some way to query that
|
||||
* before they'll be able to use this framework.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == DMA_ERROR_CODE;
|
||||
|
@ -662,11 +744,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
|||
{
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iommu_dma_msi_page *msi_page;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct iova_domain *iovad = cookie_iovad(domain);
|
||||
struct iova *iova;
|
||||
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
|
||||
size_t size = cookie_msi_granule(cookie);
|
||||
|
||||
msi_addr &= ~(phys_addr_t)iova_mask(iovad);
|
||||
msi_addr &= ~(phys_addr_t)(size - 1);
|
||||
list_for_each_entry(msi_page, &cookie->msi_page_list, list)
|
||||
if (msi_page->phys == msi_addr)
|
||||
return msi_page;
|
||||
|
@ -675,13 +758,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
|||
if (!msi_page)
|
||||
return NULL;
|
||||
|
||||
iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
|
||||
msi_page->phys = msi_addr;
|
||||
if (iovad) {
|
||||
iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
goto out_free_page;
|
||||
|
||||
msi_page->phys = msi_addr;
|
||||
msi_page->iova = iova_dma_addr(iovad, iova);
|
||||
if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
|
||||
} else {
|
||||
msi_page->iova = cookie->msi_iova;
|
||||
cookie->msi_iova += size;
|
||||
}
|
||||
|
||||
if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
|
||||
goto out_free_iova;
|
||||
|
||||
INIT_LIST_HEAD(&msi_page->list);
|
||||
|
@ -689,7 +777,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
|||
return msi_page;
|
||||
|
||||
out_free_iova:
|
||||
if (iovad)
|
||||
__free_iova(iovad, iova);
|
||||
else
|
||||
cookie->msi_iova -= size;
|
||||
out_free_page:
|
||||
kfree(msi_page);
|
||||
return NULL;
|
||||
|
@ -730,7 +821,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
|
|||
msg->data = ~0U;
|
||||
} else {
|
||||
msg->address_hi = upper_32_bits(msi_page->iova);
|
||||
msg->address_lo &= iova_mask(&cookie->iovad);
|
||||
msg->address_lo &= cookie_msi_granule(cookie) - 1;
|
||||
msg->address_lo += lower_32_bits(msi_page->iova);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,6 +74,8 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
|
|||
static int alloc_iommu(struct dmar_drhd_unit *drhd);
|
||||
static void free_iommu(struct intel_iommu *iommu);
|
||||
|
||||
extern const struct iommu_ops intel_iommu_ops;
|
||||
|
||||
static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
|
||||
{
|
||||
/*
|
||||
|
@ -1078,14 +1080,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|||
raw_spin_lock_init(&iommu->register_lock);
|
||||
|
||||
if (intel_iommu_enabled) {
|
||||
iommu->iommu_dev = iommu_device_create(NULL, iommu,
|
||||
err = iommu_device_sysfs_add(&iommu->iommu, NULL,
|
||||
intel_iommu_groups,
|
||||
"%s", iommu->name);
|
||||
|
||||
if (IS_ERR(iommu->iommu_dev)) {
|
||||
err = PTR_ERR(iommu->iommu_dev);
|
||||
if (err)
|
||||
goto err_unmap;
|
||||
|
||||
iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
|
||||
|
||||
err = iommu_device_register(&iommu->iommu);
|
||||
if (err)
|
||||
goto err_unmap;
|
||||
}
|
||||
}
|
||||
|
||||
drhd->iommu = iommu;
|
||||
|
@ -1103,7 +1108,8 @@ error:
|
|||
|
||||
static void free_iommu(struct intel_iommu *iommu)
|
||||
{
|
||||
iommu_device_destroy(iommu->iommu_dev);
|
||||
iommu_device_sysfs_remove(&iommu->iommu);
|
||||
iommu_device_unregister(&iommu->iommu);
|
||||
|
||||
if (iommu->irq) {
|
||||
if (iommu->pr_irq) {
|
||||
|
|
|
@ -276,6 +276,8 @@ struct sysmmu_drvdata {
|
|||
struct list_head owner_node; /* node for owner controllers list */
|
||||
phys_addr_t pgtable; /* assigned page table structure */
|
||||
unsigned int version; /* our version */
|
||||
|
||||
struct iommu_device iommu; /* IOMMU core handle */
|
||||
};
|
||||
|
||||
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
|
||||
|
@ -381,13 +383,14 @@ static void show_fault_information(struct sysmmu_drvdata *data,
|
|||
{
|
||||
sysmmu_pte_t *ent;
|
||||
|
||||
dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
|
||||
finfo->name, fault_addr, &data->pgtable);
|
||||
dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
|
||||
dev_name(data->master), finfo->name, fault_addr);
|
||||
dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
|
||||
ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
|
||||
dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
|
||||
dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
|
||||
if (lv1ent_page(ent)) {
|
||||
ent = page_entry(ent, fault_addr);
|
||||
dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
|
||||
dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -611,6 +614,18 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
|
|||
data->sysmmu = dev;
|
||||
spin_lock_init(&data->lock);
|
||||
|
||||
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
|
||||
dev_name(data->sysmmu));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
|
||||
iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
|
||||
|
||||
ret = iommu_device_register(&data->iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
platform_set_drvdata(pdev, data);
|
||||
|
||||
__sysmmu_get_version(data);
|
||||
|
@ -628,8 +643,6 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
|
|||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -743,6 +756,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
|
|||
DMA_TO_DEVICE);
|
||||
/* For mapping page table entries we rely on dma == phys */
|
||||
BUG_ON(handle != virt_to_phys(domain->pgtable));
|
||||
if (dma_mapping_error(dma_dev, handle))
|
||||
goto err_lv2ent;
|
||||
|
||||
spin_lock_init(&domain->lock);
|
||||
spin_lock_init(&domain->pgtablelock);
|
||||
|
@ -754,6 +769,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
|
|||
|
||||
return &domain->domain;
|
||||
|
||||
err_lv2ent:
|
||||
free_pages((unsigned long)domain->lv2entcnt, 1);
|
||||
err_counter:
|
||||
free_pages((unsigned long)domain->pgtable, 2);
|
||||
err_dma_cookie:
|
||||
|
@ -897,6 +914,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
|
|||
}
|
||||
|
||||
if (lv1ent_fault(sent)) {
|
||||
dma_addr_t handle;
|
||||
sysmmu_pte_t *pent;
|
||||
bool need_flush_flpd_cache = lv1ent_zero(sent);
|
||||
|
||||
|
@ -908,7 +926,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
|
|||
update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
|
||||
kmemleak_ignore(pent);
|
||||
*pgcounter = NUM_LV2ENTRIES;
|
||||
dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
|
||||
handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dma_dev, handle)) {
|
||||
kmem_cache_free(lv2table_kmem_cache, pent);
|
||||
return ERR_PTR(-EADDRINUSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
|
||||
|
@ -1231,9 +1254,21 @@ static int exynos_iommu_add_device(struct device *dev)
|
|||
|
||||
static void exynos_iommu_remove_device(struct device *dev)
|
||||
{
|
||||
struct exynos_iommu_owner *owner = dev->archdata.iommu;
|
||||
|
||||
if (!has_sysmmu(dev))
|
||||
return;
|
||||
|
||||
if (owner->domain) {
|
||||
struct iommu_group *group = iommu_group_get(dev);
|
||||
|
||||
if (group) {
|
||||
WARN_ON(owner->domain !=
|
||||
iommu_group_default_domain(group));
|
||||
exynos_iommu_detach_device(owner->domain, dev);
|
||||
iommu_group_put(group);
|
||||
}
|
||||
}
|
||||
iommu_group_remove_device(dev);
|
||||
}
|
||||
|
||||
|
@ -1242,7 +1277,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
|
|||
{
|
||||
struct exynos_iommu_owner *owner = dev->archdata.iommu;
|
||||
struct platform_device *sysmmu = of_find_device_by_node(spec->np);
|
||||
struct sysmmu_drvdata *data;
|
||||
struct sysmmu_drvdata *data, *entry;
|
||||
|
||||
if (!sysmmu)
|
||||
return -ENODEV;
|
||||
|
@ -1261,6 +1296,10 @@ static int exynos_iommu_of_xlate(struct device *dev,
|
|||
dev->archdata.iommu = owner;
|
||||
}
|
||||
|
||||
list_for_each_entry(entry, &owner->controllers, owner_node)
|
||||
if (entry == data)
|
||||
return 0;
|
||||
|
||||
list_add_tail(&data->owner_node, &owner->controllers);
|
||||
data->master = dev;
|
||||
|
||||
|
|
|
@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
|
|||
u64 end_address; /* reserved end address */
|
||||
struct dmar_dev_scope *devices; /* target devices */
|
||||
int devices_cnt; /* target device count */
|
||||
struct iommu_resv_region *resv; /* reserved region handle */
|
||||
};
|
||||
|
||||
struct dmar_atsr_unit {
|
||||
|
@ -547,7 +548,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
|
|||
static DEFINE_SPINLOCK(device_domain_lock);
|
||||
static LIST_HEAD(device_domain_list);
|
||||
|
||||
static const struct iommu_ops intel_iommu_ops;
|
||||
const struct iommu_ops intel_iommu_ops;
|
||||
|
||||
static bool translation_pre_enabled(struct intel_iommu *iommu)
|
||||
{
|
||||
|
@ -1144,7 +1145,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
|
|||
if (!dma_pte_present(pte) || dma_pte_superpage(pte))
|
||||
goto next;
|
||||
|
||||
level_pfn = pfn & level_mask(level - 1);
|
||||
level_pfn = pfn & level_mask(level);
|
||||
level_pte = phys_to_virt(dma_pte_addr(pte));
|
||||
|
||||
if (level > 2)
|
||||
|
@ -3325,13 +3326,14 @@ static int __init init_dmars(void)
|
|||
iommu_identity_mapping |= IDENTMAP_GFX;
|
||||
#endif
|
||||
|
||||
check_tylersburg_isoch();
|
||||
|
||||
if (iommu_identity_mapping) {
|
||||
ret = si_domain_init(hw_pass_through);
|
||||
if (ret)
|
||||
goto free_iommu;
|
||||
}
|
||||
|
||||
check_tylersburg_isoch();
|
||||
|
||||
/*
|
||||
* If we copied translations from a previous kernel in the kdump
|
||||
|
@ -4246,27 +4248,40 @@ static inline void init_iommu_pm_ops(void) {}
|
|||
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
|
||||
{
|
||||
struct acpi_dmar_reserved_memory *rmrr;
|
||||
int prot = DMA_PTE_READ|DMA_PTE_WRITE;
|
||||
struct dmar_rmrr_unit *rmrru;
|
||||
size_t length;
|
||||
|
||||
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
|
||||
if (!rmrru)
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
|
||||
rmrru->hdr = header;
|
||||
rmrr = (struct acpi_dmar_reserved_memory *)header;
|
||||
rmrru->base_address = rmrr->base_address;
|
||||
rmrru->end_address = rmrr->end_address;
|
||||
|
||||
length = rmrr->end_address - rmrr->base_address + 1;
|
||||
rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
|
||||
IOMMU_RESV_DIRECT);
|
||||
if (!rmrru->resv)
|
||||
goto free_rmrru;
|
||||
|
||||
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
|
||||
((void *)rmrr) + rmrr->header.length,
|
||||
&rmrru->devices_cnt);
|
||||
if (rmrru->devices_cnt && rmrru->devices == NULL) {
|
||||
kfree(rmrru);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (rmrru->devices_cnt && rmrru->devices == NULL)
|
||||
goto free_all;
|
||||
|
||||
list_add(&rmrru->list, &dmar_rmrr_units);
|
||||
|
||||
return 0;
|
||||
free_all:
|
||||
kfree(rmrru->resv);
|
||||
free_rmrru:
|
||||
kfree(rmrru);
|
||||
out:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
|
||||
|
@ -4480,6 +4495,7 @@ static void intel_iommu_free_dmars(void)
|
|||
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
|
||||
list_del(&rmrru->list);
|
||||
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
|
||||
kfree(rmrru->resv);
|
||||
kfree(rmrru);
|
||||
}
|
||||
|
||||
|
@ -4853,10 +4869,13 @@ int __init intel_iommu_init(void)
|
|||
|
||||
init_iommu_pm_ops();
|
||||
|
||||
for_each_active_iommu(iommu, drhd)
|
||||
iommu->iommu_dev = iommu_device_create(NULL, iommu,
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
iommu_device_sysfs_add(&iommu->iommu, NULL,
|
||||
intel_iommu_groups,
|
||||
"%s", iommu->name);
|
||||
iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
|
||||
iommu_device_register(&iommu->iommu);
|
||||
}
|
||||
|
||||
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
|
||||
bus_register_notifier(&pci_bus_type, &device_nb);
|
||||
|
@ -5178,7 +5197,7 @@ static int intel_iommu_add_device(struct device *dev)
|
|||
if (!iommu)
|
||||
return -ENODEV;
|
||||
|
||||
iommu_device_link(iommu->iommu_dev, dev);
|
||||
iommu_device_link(&iommu->iommu, dev);
|
||||
|
||||
group = iommu_group_get_for_dev(dev);
|
||||
|
||||
|
@ -5200,7 +5219,46 @@ static void intel_iommu_remove_device(struct device *dev)
|
|||
|
||||
iommu_group_remove_device(dev);
|
||||
|
||||
iommu_device_unlink(iommu->iommu_dev, dev);
|
||||
iommu_device_unlink(&iommu->iommu, dev);
|
||||
}
|
||||
|
||||
static void intel_iommu_get_resv_regions(struct device *device,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct iommu_resv_region *reg;
|
||||
struct dmar_rmrr_unit *rmrr;
|
||||
struct device *i_dev;
|
||||
int i;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_rmrr_units(rmrr) {
|
||||
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
|
||||
i, i_dev) {
|
||||
if (i_dev != device)
|
||||
continue;
|
||||
|
||||
list_add_tail(&rmrr->resv->list, head);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
|
||||
IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
|
||||
0, IOMMU_RESV_RESERVED);
|
||||
if (!reg)
|
||||
return;
|
||||
list_add_tail(®->list, head);
|
||||
}
|
||||
|
||||
static void intel_iommu_put_resv_regions(struct device *dev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct iommu_resv_region *entry, *next;
|
||||
|
||||
list_for_each_entry_safe(entry, next, head, list) {
|
||||
if (entry->type == IOMMU_RESV_RESERVED)
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
|
@ -5332,7 +5390,7 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
|
|||
}
|
||||
#endif /* CONFIG_INTEL_IOMMU_SVM */
|
||||
|
||||
static const struct iommu_ops intel_iommu_ops = {
|
||||
const struct iommu_ops intel_iommu_ops = {
|
||||
.capable = intel_iommu_capable,
|
||||
.domain_alloc = intel_iommu_domain_alloc,
|
||||
.domain_free = intel_iommu_domain_free,
|
||||
|
@ -5344,6 +5402,8 @@ static const struct iommu_ops intel_iommu_ops = {
|
|||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.add_device = intel_iommu_add_device,
|
||||
.remove_device = intel_iommu_remove_device,
|
||||
.get_resv_regions = intel_iommu_get_resv_regions,
|
||||
.put_resv_regions = intel_iommu_put_resv_regions,
|
||||
.device_group = pci_device_group,
|
||||
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
|
||||
};
|
||||
|
|
|
@ -265,7 +265,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
|
|||
if (!(prot & IOMMU_MMIO))
|
||||
pte |= ARM_V7S_ATTR_TEX(1);
|
||||
if (ap) {
|
||||
pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
|
||||
pte |= ARM_V7S_PTE_AF;
|
||||
if (!(prot & IOMMU_PRIV))
|
||||
pte |= ARM_V7S_PTE_AP_UNPRIV;
|
||||
if (!(prot & IOMMU_WRITE))
|
||||
pte |= ARM_V7S_PTE_AP_RDONLY;
|
||||
}
|
||||
|
@ -288,6 +290,8 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
|
|||
|
||||
if (!(attr & ARM_V7S_PTE_AP_RDONLY))
|
||||
prot |= IOMMU_WRITE;
|
||||
if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
|
||||
prot |= IOMMU_PRIV;
|
||||
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
|
||||
prot |= IOMMU_MMIO;
|
||||
else if (pte & ARM_V7S_ATTR_C)
|
||||
|
|
|
@ -350,11 +350,14 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
|
|||
|
||||
if (data->iop.fmt == ARM_64_LPAE_S1 ||
|
||||
data->iop.fmt == ARM_32_LPAE_S1) {
|
||||
pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
|
||||
pte = ARM_LPAE_PTE_nG;
|
||||
|
||||
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
|
||||
pte |= ARM_LPAE_PTE_AP_RDONLY;
|
||||
|
||||
if (!(prot & IOMMU_PRIV))
|
||||
pte |= ARM_LPAE_PTE_AP_UNPRIV;
|
||||
|
||||
if (prot & IOMMU_MMIO)
|
||||
pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
|
|
|
@ -50,85 +50,76 @@ static int __init iommu_dev_init(void)
|
|||
postcore_initcall(iommu_dev_init);
|
||||
|
||||
/*
|
||||
* Create an IOMMU device and return a pointer to it. IOMMU specific
|
||||
* attributes can be provided as an attribute group, allowing a unique
|
||||
* namespace per IOMMU type.
|
||||
* Init the struct device for the IOMMU. IOMMU specific attributes can
|
||||
* be provided as an attribute group, allowing a unique namespace per
|
||||
* IOMMU type.
|
||||
*/
|
||||
struct device *iommu_device_create(struct device *parent, void *drvdata,
|
||||
int iommu_device_sysfs_add(struct iommu_device *iommu,
|
||||
struct device *parent,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct device *dev;
|
||||
va_list vargs;
|
||||
int ret;
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
device_initialize(&iommu->dev);
|
||||
|
||||
device_initialize(dev);
|
||||
|
||||
dev->class = &iommu_class;
|
||||
dev->parent = parent;
|
||||
dev->groups = groups;
|
||||
dev_set_drvdata(dev, drvdata);
|
||||
iommu->dev.class = &iommu_class;
|
||||
iommu->dev.parent = parent;
|
||||
iommu->dev.groups = groups;
|
||||
|
||||
va_start(vargs, fmt);
|
||||
ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
|
||||
ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
|
||||
va_end(vargs);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = device_add(dev);
|
||||
ret = device_add(&iommu->dev);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
return dev;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
put_device(dev);
|
||||
return ERR_PTR(ret);
|
||||
put_device(&iommu->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iommu_device_destroy(struct device *dev)
|
||||
void iommu_device_sysfs_remove(struct iommu_device *iommu)
|
||||
{
|
||||
if (!dev || IS_ERR(dev))
|
||||
return;
|
||||
|
||||
device_unregister(dev);
|
||||
device_unregister(&iommu->dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* IOMMU drivers can indicate a device is managed by a given IOMMU using
|
||||
* this interface. A link to the device will be created in the "devices"
|
||||
* directory of the IOMMU device in sysfs and an "iommu" link will be
|
||||
* created under the linked device, pointing back at the IOMMU device.
|
||||
*/
|
||||
int iommu_device_link(struct device *dev, struct device *link)
|
||||
int iommu_device_link(struct iommu_device *iommu, struct device *link)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!dev || IS_ERR(dev))
|
||||
if (!iommu || IS_ERR(iommu))
|
||||
return -ENODEV;
|
||||
|
||||
ret = sysfs_add_link_to_group(&dev->kobj, "devices",
|
||||
ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
|
||||
&link->kobj, dev_name(link));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu");
|
||||
ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
|
||||
if (ret)
|
||||
sysfs_remove_link_from_group(&dev->kobj, "devices",
|
||||
sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
|
||||
dev_name(link));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iommu_device_unlink(struct device *dev, struct device *link)
|
||||
void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
|
||||
{
|
||||
if (!dev || IS_ERR(dev))
|
||||
if (!iommu || IS_ERR(iommu))
|
||||
return;
|
||||
|
||||
sysfs_remove_link(&link->kobj, "iommu");
|
||||
sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link));
|
||||
sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ struct iommu_group {
|
|||
struct iommu_domain *domain;
|
||||
};
|
||||
|
||||
struct iommu_device {
|
||||
struct group_device {
|
||||
struct list_head list;
|
||||
struct device *dev;
|
||||
char *name;
|
||||
|
@ -68,6 +68,12 @@ struct iommu_group_attribute {
|
|||
const char *buf, size_t count);
|
||||
};
|
||||
|
||||
static const char * const iommu_group_resv_type_string[] = {
|
||||
[IOMMU_RESV_DIRECT] = "direct",
|
||||
[IOMMU_RESV_RESERVED] = "reserved",
|
||||
[IOMMU_RESV_MSI] = "msi",
|
||||
};
|
||||
|
||||
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
|
||||
struct iommu_group_attribute iommu_group_attr_##_name = \
|
||||
__ATTR(_name, _mode, _show, _store)
|
||||
|
@ -77,6 +83,25 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
|
|||
#define to_iommu_group(_kobj) \
|
||||
container_of(_kobj, struct iommu_group, kobj)
|
||||
|
||||
static LIST_HEAD(iommu_device_list);
|
||||
static DEFINE_SPINLOCK(iommu_device_lock);
|
||||
|
||||
int iommu_device_register(struct iommu_device *iommu)
|
||||
{
|
||||
spin_lock(&iommu_device_lock);
|
||||
list_add_tail(&iommu->list, &iommu_device_list);
|
||||
spin_unlock(&iommu_device_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iommu_device_unregister(struct iommu_device *iommu)
|
||||
{
|
||||
spin_lock(&iommu_device_lock);
|
||||
list_del(&iommu->list);
|
||||
spin_unlock(&iommu_device_lock);
|
||||
}
|
||||
|
||||
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
|
||||
unsigned type);
|
||||
static int __iommu_attach_device(struct iommu_domain *domain,
|
||||
|
@ -133,8 +158,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
|
|||
return sprintf(buf, "%s\n", group->name);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_insert_resv_region - Insert a new region in the
|
||||
* list of reserved regions.
|
||||
* @new: new region to insert
|
||||
* @regions: list of regions
|
||||
*
|
||||
* The new element is sorted by address with respect to the other
|
||||
* regions of the same type. In case it overlaps with another
|
||||
* region of the same type, regions are merged. In case it
|
||||
* overlaps with another region of different type, regions are
|
||||
* not merged.
|
||||
*/
|
||||
static int iommu_insert_resv_region(struct iommu_resv_region *new,
|
||||
struct list_head *regions)
|
||||
{
|
||||
struct iommu_resv_region *region;
|
||||
phys_addr_t start = new->start;
|
||||
phys_addr_t end = new->start + new->length - 1;
|
||||
struct list_head *pos = regions->next;
|
||||
|
||||
while (pos != regions) {
|
||||
struct iommu_resv_region *entry =
|
||||
list_entry(pos, struct iommu_resv_region, list);
|
||||
phys_addr_t a = entry->start;
|
||||
phys_addr_t b = entry->start + entry->length - 1;
|
||||
int type = entry->type;
|
||||
|
||||
if (end < a) {
|
||||
goto insert;
|
||||
} else if (start > b) {
|
||||
pos = pos->next;
|
||||
} else if ((start >= a) && (end <= b)) {
|
||||
if (new->type == type)
|
||||
goto done;
|
||||
else
|
||||
pos = pos->next;
|
||||
} else {
|
||||
if (new->type == type) {
|
||||
phys_addr_t new_start = min(a, start);
|
||||
phys_addr_t new_end = max(b, end);
|
||||
|
||||
list_del(&entry->list);
|
||||
entry->start = new_start;
|
||||
entry->length = new_end - new_start + 1;
|
||||
iommu_insert_resv_region(entry, regions);
|
||||
} else {
|
||||
pos = pos->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
insert:
|
||||
region = iommu_alloc_resv_region(new->start, new->length,
|
||||
new->prot, new->type);
|
||||
if (!region)
|
||||
return -ENOMEM;
|
||||
|
||||
list_add_tail(®ion->list, pos);
|
||||
done:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
|
||||
struct list_head *group_resv_regions)
|
||||
{
|
||||
struct iommu_resv_region *entry;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(entry, dev_resv_regions, list) {
|
||||
ret = iommu_insert_resv_region(entry, group_resv_regions);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iommu_get_group_resv_regions(struct iommu_group *group,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct group_device *device;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
list_for_each_entry(device, &group->devices, list) {
|
||||
struct list_head dev_resv_regions;
|
||||
|
||||
INIT_LIST_HEAD(&dev_resv_regions);
|
||||
iommu_get_resv_regions(device->dev, &dev_resv_regions);
|
||||
ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
|
||||
iommu_put_resv_regions(device->dev, &dev_resv_regions);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&group->mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
|
||||
|
||||
static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
|
||||
char *buf)
|
||||
{
|
||||
struct iommu_resv_region *region, *next;
|
||||
struct list_head group_resv_regions;
|
||||
char *str = buf;
|
||||
|
||||
INIT_LIST_HEAD(&group_resv_regions);
|
||||
iommu_get_group_resv_regions(group, &group_resv_regions);
|
||||
|
||||
list_for_each_entry_safe(region, next, &group_resv_regions, list) {
|
||||
str += sprintf(str, "0x%016llx 0x%016llx %s\n",
|
||||
(long long int)region->start,
|
||||
(long long int)(region->start +
|
||||
region->length - 1),
|
||||
iommu_group_resv_type_string[region->type]);
|
||||
kfree(region);
|
||||
}
|
||||
|
||||
return (str - buf);
|
||||
}
|
||||
|
||||
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
|
||||
|
||||
static IOMMU_GROUP_ATTR(reserved_regions, 0444,
|
||||
iommu_group_show_resv_regions, NULL);
|
||||
|
||||
static void iommu_group_release(struct kobject *kobj)
|
||||
{
|
||||
struct iommu_group *group = to_iommu_group(kobj);
|
||||
|
@ -212,6 +360,11 @@ struct iommu_group *iommu_group_alloc(void)
|
|||
*/
|
||||
kobject_put(&group->kobj);
|
||||
|
||||
ret = iommu_group_create_file(group,
|
||||
&iommu_group_attr_reserved_regions);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
pr_debug("Allocated group %d\n", group->id);
|
||||
|
||||
return group;
|
||||
|
@ -318,7 +471,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
|||
struct device *dev)
|
||||
{
|
||||
struct iommu_domain *domain = group->default_domain;
|
||||
struct iommu_dm_region *entry;
|
||||
struct iommu_resv_region *entry;
|
||||
struct list_head mappings;
|
||||
unsigned long pg_size;
|
||||
int ret = 0;
|
||||
|
@ -331,18 +484,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
|||
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
|
||||
INIT_LIST_HEAD(&mappings);
|
||||
|
||||
iommu_get_dm_regions(dev, &mappings);
|
||||
iommu_get_resv_regions(dev, &mappings);
|
||||
|
||||
/* We need to consider overlapping regions for different devices */
|
||||
list_for_each_entry(entry, &mappings, list) {
|
||||
dma_addr_t start, end, addr;
|
||||
|
||||
if (domain->ops->apply_dm_region)
|
||||
domain->ops->apply_dm_region(dev, domain, entry);
|
||||
if (domain->ops->apply_resv_region)
|
||||
domain->ops->apply_resv_region(dev, domain, entry);
|
||||
|
||||
start = ALIGN(entry->start, pg_size);
|
||||
end = ALIGN(entry->start + entry->length, pg_size);
|
||||
|
||||
if (entry->type != IOMMU_RESV_DIRECT)
|
||||
continue;
|
||||
|
||||
for (addr = start; addr < end; addr += pg_size) {
|
||||
phys_addr_t phys_addr;
|
||||
|
||||
|
@ -358,7 +514,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
|||
}
|
||||
|
||||
out:
|
||||
iommu_put_dm_regions(dev, &mappings);
|
||||
iommu_put_resv_regions(dev, &mappings);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -374,7 +530,7 @@ out:
|
|||
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
|
||||
{
|
||||
int ret, i = 0;
|
||||
struct iommu_device *device;
|
||||
struct group_device *device;
|
||||
|
||||
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
||||
if (!device)
|
||||
|
@ -383,36 +539,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
|
|||
device->dev = dev;
|
||||
|
||||
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
|
||||
if (ret) {
|
||||
kfree(device);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err_free_device;
|
||||
|
||||
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
|
||||
rename:
|
||||
if (!device->name) {
|
||||
sysfs_remove_link(&dev->kobj, "iommu_group");
|
||||
kfree(device);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err_remove_link;
|
||||
}
|
||||
|
||||
ret = sysfs_create_link_nowarn(group->devices_kobj,
|
||||
&dev->kobj, device->name);
|
||||
if (ret) {
|
||||
kfree(device->name);
|
||||
if (ret == -EEXIST && i >= 0) {
|
||||
/*
|
||||
* Account for the slim chance of collision
|
||||
* and append an instance to the name.
|
||||
*/
|
||||
kfree(device->name);
|
||||
device->name = kasprintf(GFP_KERNEL, "%s.%d",
|
||||
kobject_name(&dev->kobj), i++);
|
||||
goto rename;
|
||||
}
|
||||
|
||||
sysfs_remove_link(&dev->kobj, "iommu_group");
|
||||
kfree(device);
|
||||
return ret;
|
||||
goto err_free_name;
|
||||
}
|
||||
|
||||
kobject_get(group->devices_kobj);
|
||||
|
@ -424,8 +574,10 @@ rename:
|
|||
mutex_lock(&group->mutex);
|
||||
list_add_tail(&device->list, &group->devices);
|
||||
if (group->domain)
|
||||
__iommu_attach_device(group->domain, dev);
|
||||
ret = __iommu_attach_device(group->domain, dev);
|
||||
mutex_unlock(&group->mutex);
|
||||
if (ret)
|
||||
goto err_put_group;
|
||||
|
||||
/* Notify any listeners about change to group. */
|
||||
blocking_notifier_call_chain(&group->notifier,
|
||||
|
@ -436,6 +588,21 @@ rename:
|
|||
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_group:
|
||||
mutex_lock(&group->mutex);
|
||||
list_del(&device->list);
|
||||
mutex_unlock(&group->mutex);
|
||||
dev->iommu_group = NULL;
|
||||
kobject_put(group->devices_kobj);
|
||||
err_free_name:
|
||||
kfree(device->name);
|
||||
err_remove_link:
|
||||
sysfs_remove_link(&dev->kobj, "iommu_group");
|
||||
err_free_device:
|
||||
kfree(device);
|
||||
pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_add_device);
|
||||
|
||||
|
@ -449,7 +616,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);
|
|||
void iommu_group_remove_device(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group = dev->iommu_group;
|
||||
struct iommu_device *tmp_device, *device = NULL;
|
||||
struct group_device *tmp_device, *device = NULL;
|
||||
|
||||
pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
|
||||
|
||||
|
@ -484,7 +651,7 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device);
|
|||
|
||||
static int iommu_group_device_count(struct iommu_group *group)
|
||||
{
|
||||
struct iommu_device *entry;
|
||||
struct group_device *entry;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(entry, &group->devices, list)
|
||||
|
@ -507,7 +674,7 @@ static int iommu_group_device_count(struct iommu_group *group)
|
|||
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
|
||||
int (*fn)(struct device *, void *))
|
||||
{
|
||||
struct iommu_device *device;
|
||||
struct group_device *device;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(device, &group->devices, list) {
|
||||
|
@ -1559,20 +1726,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
|
||||
|
||||
void iommu_get_dm_regions(struct device *dev, struct list_head *list)
|
||||
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (ops && ops->get_dm_regions)
|
||||
ops->get_dm_regions(dev, list);
|
||||
if (ops && ops->get_resv_regions)
|
||||
ops->get_resv_regions(dev, list);
|
||||
}
|
||||
|
||||
void iommu_put_dm_regions(struct device *dev, struct list_head *list)
|
||||
void iommu_put_resv_regions(struct device *dev, struct list_head *list)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (ops && ops->put_dm_regions)
|
||||
ops->put_dm_regions(dev, list);
|
||||
if (ops && ops->put_resv_regions)
|
||||
ops->put_resv_regions(dev, list);
|
||||
}
|
||||
|
||||
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
|
||||
size_t length,
|
||||
int prot, int type)
|
||||
{
|
||||
struct iommu_resv_region *region;
|
||||
|
||||
region = kzalloc(sizeof(*region), GFP_KERNEL);
|
||||
if (!region)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(®ion->list);
|
||||
region->start = start;
|
||||
region->length = length;
|
||||
region->prot = prot;
|
||||
region->type = type;
|
||||
return region;
|
||||
}
|
||||
|
||||
/* Request that a device is direct mapped by the IOMMU */
|
||||
|
@ -1628,43 +1813,18 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct iommu_instance {
|
||||
struct list_head list;
|
||||
struct fwnode_handle *fwnode;
|
||||
const struct iommu_ops *ops;
|
||||
};
|
||||
static LIST_HEAD(iommu_instance_list);
|
||||
static DEFINE_SPINLOCK(iommu_instance_lock);
|
||||
|
||||
void iommu_register_instance(struct fwnode_handle *fwnode,
|
||||
const struct iommu_ops *ops)
|
||||
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
|
||||
|
||||
if (WARN_ON(!iommu))
|
||||
return;
|
||||
|
||||
of_node_get(to_of_node(fwnode));
|
||||
INIT_LIST_HEAD(&iommu->list);
|
||||
iommu->fwnode = fwnode;
|
||||
iommu->ops = ops;
|
||||
spin_lock(&iommu_instance_lock);
|
||||
list_add_tail(&iommu->list, &iommu_instance_list);
|
||||
spin_unlock(&iommu_instance_lock);
|
||||
}
|
||||
|
||||
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct iommu_instance *instance;
|
||||
const struct iommu_ops *ops = NULL;
|
||||
struct iommu_device *iommu;
|
||||
|
||||
spin_lock(&iommu_instance_lock);
|
||||
list_for_each_entry(instance, &iommu_instance_list, list)
|
||||
if (instance->fwnode == fwnode) {
|
||||
ops = instance->ops;
|
||||
spin_lock(&iommu_device_lock);
|
||||
list_for_each_entry(iommu, &iommu_device_list, list)
|
||||
if (iommu->fwnode == fwnode) {
|
||||
ops = iommu->ops;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&iommu_instance_lock);
|
||||
spin_unlock(&iommu_device_lock);
|
||||
return ops;
|
||||
}
|
||||
|
||||
|
@ -1714,13 +1874,14 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
|
|||
fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
|
||||
if (!fwspec)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->iommu_fwspec = fwspec;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_ids; i++)
|
||||
fwspec->ids[fwspec->num_ids + i] = ids[i];
|
||||
|
||||
fwspec->num_ids += num_ids;
|
||||
dev->iommu_fwspec = fwspec;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
|
||||
|
|
|
@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
|
|||
else {
|
||||
struct rb_node *prev_node = rb_prev(iovad->cached32_node);
|
||||
struct iova *curr_iova =
|
||||
container_of(iovad->cached32_node, struct iova, node);
|
||||
rb_entry(iovad->cached32_node, struct iova, node);
|
||||
*limit_pfn = curr_iova->pfn_lo - 1;
|
||||
return prev_node;
|
||||
}
|
||||
|
@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
|
|||
if (!iovad->cached32_node)
|
||||
return;
|
||||
curr = iovad->cached32_node;
|
||||
cached_iova = container_of(curr, struct iova, node);
|
||||
cached_iova = rb_entry(curr, struct iova, node);
|
||||
|
||||
if (free->pfn_lo >= cached_iova->pfn_lo) {
|
||||
struct rb_node *node = rb_next(&free->node);
|
||||
struct iova *iova = container_of(node, struct iova, node);
|
||||
struct iova *iova = rb_entry(node, struct iova, node);
|
||||
|
||||
/* only cache if it's below 32bit pfn */
|
||||
if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
|
||||
|
@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
|
|||
curr = __get_cached_rbnode(iovad, &limit_pfn);
|
||||
prev = curr;
|
||||
while (curr) {
|
||||
struct iova *curr_iova = container_of(curr, struct iova, node);
|
||||
struct iova *curr_iova = rb_entry(curr, struct iova, node);
|
||||
|
||||
if (limit_pfn < curr_iova->pfn_lo)
|
||||
goto move_left;
|
||||
|
@ -171,8 +171,7 @@ move_left:
|
|||
|
||||
/* Figure out where to put new node */
|
||||
while (*entry) {
|
||||
struct iova *this = container_of(*entry,
|
||||
struct iova, node);
|
||||
struct iova *this = rb_entry(*entry, struct iova, node);
|
||||
parent = *entry;
|
||||
|
||||
if (new->pfn_lo < this->pfn_lo)
|
||||
|
@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
|
|||
struct rb_node **new = &(root->rb_node), *parent = NULL;
|
||||
/* Figure out where to put new node */
|
||||
while (*new) {
|
||||
struct iova *this = container_of(*new, struct iova, node);
|
||||
struct iova *this = rb_entry(*new, struct iova, node);
|
||||
|
||||
parent = *new;
|
||||
|
||||
|
@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
|
|||
assert_spin_locked(&iovad->iova_rbtree_lock);
|
||||
|
||||
while (node) {
|
||||
struct iova *iova = container_of(node, struct iova, node);
|
||||
struct iova *iova = rb_entry(node, struct iova, node);
|
||||
|
||||
/* If pfn falls within iova's range, return iova */
|
||||
if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
|
||||
|
@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad)
|
|||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
node = rb_first(&iovad->rbroot);
|
||||
while (node) {
|
||||
struct iova *iova = container_of(node, struct iova, node);
|
||||
struct iova *iova = rb_entry(node, struct iova, node);
|
||||
|
||||
rb_erase(node, &iovad->rbroot);
|
||||
free_iova_mem(iova);
|
||||
|
@ -477,7 +476,7 @@ static int
|
|||
__is_range_overlap(struct rb_node *node,
|
||||
unsigned long pfn_lo, unsigned long pfn_hi)
|
||||
{
|
||||
struct iova *iova = container_of(node, struct iova, node);
|
||||
struct iova *iova = rb_entry(node, struct iova, node);
|
||||
|
||||
if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
|
||||
return 1;
|
||||
|
@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad,
|
|||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
|
||||
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
|
||||
iova = container_of(node, struct iova, node);
|
||||
iova = rb_entry(node, struct iova, node);
|
||||
__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
|
||||
if ((pfn_lo >= iova->pfn_lo) &&
|
||||
(pfn_hi <= iova->pfn_hi))
|
||||
|
@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
|
|||
|
||||
spin_lock_irqsave(&from->iova_rbtree_lock, flags);
|
||||
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
|
||||
struct iova *iova = container_of(node, struct iova, node);
|
||||
struct iova *iova = rb_entry(node, struct iova, node);
|
||||
struct iova *new_iova;
|
||||
|
||||
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
|
||||
|
|
|
@ -313,6 +313,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
|
|||
domain->cfg.ias = 32;
|
||||
domain->cfg.oas = 40;
|
||||
domain->cfg.tlb = &ipmmu_gather_ops;
|
||||
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
|
||||
domain->io_domain.geometry.force_aperture = true;
|
||||
/*
|
||||
* TODO: Add support for coherent walk through CCI with DVM and remove
|
||||
* cache handling. For now, delegate it to the io-pgtable code.
|
||||
|
|
|
@ -371,6 +371,58 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Must be called under msm_iommu_lock */
|
||||
static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
|
||||
{
|
||||
struct msm_iommu_dev *iommu, *ret = NULL;
|
||||
struct msm_iommu_ctx_dev *master;
|
||||
|
||||
list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
|
||||
master = list_first_entry(&iommu->ctx_list,
|
||||
struct msm_iommu_ctx_dev,
|
||||
list);
|
||||
if (master->of_node == dev->of_node) {
|
||||
ret = iommu;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_iommu_add_device(struct device *dev)
|
||||
{
|
||||
struct msm_iommu_dev *iommu;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
|
||||
iommu = find_iommu_for_dev(dev);
|
||||
if (iommu)
|
||||
iommu_device_link(&iommu->iommu, dev);
|
||||
else
|
||||
ret = -ENODEV;
|
||||
|
||||
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void msm_iommu_remove_device(struct device *dev)
|
||||
{
|
||||
struct msm_iommu_dev *iommu;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
|
||||
iommu = find_iommu_for_dev(dev);
|
||||
if (iommu)
|
||||
iommu_device_unlink(&iommu->iommu, dev);
|
||||
|
||||
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
||||
}
|
||||
|
||||
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -646,6 +698,8 @@ static struct iommu_ops msm_iommu_ops = {
|
|||
.unmap = msm_iommu_unmap,
|
||||
.map_sg = default_iommu_map_sg,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.add_device = msm_iommu_add_device,
|
||||
.remove_device = msm_iommu_remove_device,
|
||||
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
|
||||
.of_xlate = qcom_iommu_of_xlate,
|
||||
};
|
||||
|
@ -653,6 +707,7 @@ static struct iommu_ops msm_iommu_ops = {
|
|||
static int msm_iommu_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *r;
|
||||
resource_size_t ioaddr;
|
||||
struct msm_iommu_dev *iommu;
|
||||
int ret, par, val;
|
||||
|
||||
|
@ -696,6 +751,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
|
|||
ret = PTR_ERR(iommu->base);
|
||||
goto fail;
|
||||
}
|
||||
ioaddr = r->start;
|
||||
|
||||
iommu->irq = platform_get_irq(pdev, 0);
|
||||
if (iommu->irq < 0) {
|
||||
|
@ -737,7 +793,22 @@ static int msm_iommu_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
list_add(&iommu->dev_node, &qcom_iommu_devices);
|
||||
of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
|
||||
|
||||
ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
|
||||
"msm-smmu.%pa", &ioaddr);
|
||||
if (ret) {
|
||||
pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
|
||||
iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
|
||||
|
||||
ret = iommu_device_register(&iommu->iommu);
|
||||
if (ret) {
|
||||
pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
|
||||
iommu->base, iommu->irq, iommu->ncb);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#define MSM_IOMMU_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/clk.h>
|
||||
|
||||
/* Sharability attributes of MSM IOMMU mappings */
|
||||
|
@ -68,6 +69,8 @@ struct msm_iommu_dev {
|
|||
struct list_head dom_node;
|
||||
struct list_head ctx_list;
|
||||
DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
|
||||
|
||||
struct iommu_device iommu;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -360,11 +360,15 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
|
|||
|
||||
static int mtk_iommu_add_device(struct device *dev)
|
||||
{
|
||||
struct mtk_iommu_data *data;
|
||||
struct iommu_group *group;
|
||||
|
||||
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
|
||||
return -ENODEV; /* Not a iommu client device */
|
||||
|
||||
data = dev->iommu_fwspec->iommu_priv;
|
||||
iommu_device_link(&data->iommu, dev);
|
||||
|
||||
group = iommu_group_get_for_dev(dev);
|
||||
if (IS_ERR(group))
|
||||
return PTR_ERR(group);
|
||||
|
@ -375,9 +379,14 @@ static int mtk_iommu_add_device(struct device *dev)
|
|||
|
||||
static void mtk_iommu_remove_device(struct device *dev)
|
||||
{
|
||||
struct mtk_iommu_data *data;
|
||||
|
||||
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
|
||||
return;
|
||||
|
||||
data = dev->iommu_fwspec->iommu_priv;
|
||||
iommu_device_unlink(&data->iommu, dev);
|
||||
|
||||
iommu_group_remove_device(dev);
|
||||
iommu_fwspec_free(dev);
|
||||
}
|
||||
|
@ -497,6 +506,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
|
|||
struct mtk_iommu_data *data;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
resource_size_t ioaddr;
|
||||
struct component_match *match = NULL;
|
||||
void *protect;
|
||||
int i, larb_nr, ret;
|
||||
|
@ -519,6 +529,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
|
|||
data->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(data->base))
|
||||
return PTR_ERR(data->base);
|
||||
ioaddr = res->start;
|
||||
|
||||
data->irq = platform_get_irq(pdev, 0);
|
||||
if (data->irq < 0)
|
||||
|
@ -567,6 +578,18 @@ static int mtk_iommu_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
|
||||
"mtk-iommu.%pa", &ioaddr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
|
||||
iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
|
||||
|
||||
ret = iommu_device_register(&data->iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!iommu_present(&platform_bus_type))
|
||||
bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
|
||||
|
||||
|
@ -577,6 +600,9 @@ static int mtk_iommu_remove(struct platform_device *pdev)
|
|||
{
|
||||
struct mtk_iommu_data *data = platform_get_drvdata(pdev);
|
||||
|
||||
iommu_device_sysfs_remove(&data->iommu);
|
||||
iommu_device_unregister(&data->iommu);
|
||||
|
||||
if (iommu_present(&platform_bus_type))
|
||||
bus_set_iommu(&platform_bus_type, NULL);
|
||||
|
||||
|
@ -655,7 +681,6 @@ static int mtk_iommu_init_fn(struct device_node *np)
|
|||
return ret;
|
||||
}
|
||||
|
||||
of_iommu_set_ops(np, &mtk_iommu_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,8 @@ struct mtk_iommu_data {
|
|||
struct iommu_group *m4u_group;
|
||||
struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
|
||||
bool enable_4GB;
|
||||
|
||||
struct iommu_device iommu;
|
||||
};
|
||||
|
||||
static inline int compare_of(struct device *dev, void *data)
|
||||
|
|
|
@ -127,7 +127,7 @@ static const struct iommu_ops
|
|||
"iommu-map-mask", &iommu_spec.np, iommu_spec.args))
|
||||
return NULL;
|
||||
|
||||
ops = of_iommu_get_ops(iommu_spec.np);
|
||||
ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode);
|
||||
if (!ops || !ops->of_xlate ||
|
||||
iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
|
||||
ops->of_xlate(&pdev->dev, &iommu_spec))
|
||||
|
@ -157,7 +157,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
|
|||
"#iommu-cells", idx,
|
||||
&iommu_spec)) {
|
||||
np = iommu_spec.np;
|
||||
ops = of_iommu_get_ops(np);
|
||||
ops = iommu_ops_from_fwnode(&np->fwnode);
|
||||
|
||||
if (!ops || !ops->of_xlate ||
|
||||
iommu_fwspec_init(dev, &np->fwnode, ops) ||
|
||||
|
|
|
@ -1646,6 +1646,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
|
|||
|
||||
inner_domain->parent = its_parent;
|
||||
inner_domain->bus_token = DOMAIN_BUS_NEXUS;
|
||||
inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
|
||||
info->ops = &its_msi_domain_ops;
|
||||
info->data = its;
|
||||
inner_domain->host_data = info;
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/mdev.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/dma-iommu.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
#define DRIVER_VERSION "0.2"
|
||||
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
|
||||
|
@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
|
||||
phys_addr_t *base)
|
||||
{
|
||||
struct list_head group_resv_regions;
|
||||
struct iommu_resv_region *region, *next;
|
||||
bool ret = false;
|
||||
|
||||
INIT_LIST_HEAD(&group_resv_regions);
|
||||
iommu_get_group_resv_regions(group, &group_resv_regions);
|
||||
list_for_each_entry(region, &group_resv_regions, list) {
|
||||
if (region->type & IOMMU_RESV_MSI) {
|
||||
*base = region->start;
|
||||
ret = true;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
list_for_each_entry_safe(region, next, &group_resv_regions, list)
|
||||
kfree(region);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||
struct iommu_group *iommu_group)
|
||||
{
|
||||
|
@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
|||
struct vfio_domain *domain, *d;
|
||||
struct bus_type *bus = NULL, *mdev_bus;
|
||||
int ret;
|
||||
bool resv_msi, msi_remap;
|
||||
phys_addr_t resv_msi_base;
|
||||
|
||||
mutex_lock(&iommu->lock);
|
||||
|
||||
|
@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
|||
if (ret)
|
||||
goto out_domain;
|
||||
|
||||
resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
|
||||
|
||||
INIT_LIST_HEAD(&domain->group_list);
|
||||
list_add(&group->next, &domain->group_list);
|
||||
|
||||
if (!allow_unsafe_interrupts &&
|
||||
!iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
|
||||
msi_remap = resv_msi ? irq_domain_check_msi_remap() :
|
||||
iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
|
||||
|
||||
if (!allow_unsafe_interrupts && !msi_remap) {
|
||||
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
|
||||
__func__);
|
||||
ret = -EPERM;
|
||||
|
@ -1302,6 +1332,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
|||
if (ret)
|
||||
goto out_detach;
|
||||
|
||||
if (resv_msi) {
|
||||
ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
|
||||
if (ret)
|
||||
goto out_detach;
|
||||
}
|
||||
|
||||
list_add(&domain->next, &iommu->domain_list);
|
||||
|
||||
mutex_unlock(&iommu->lock);
|
||||
|
|
|
@ -27,6 +27,7 @@ int iommu_dma_init(void);
|
|||
|
||||
/* Domain management interface for IOMMU drivers */
|
||||
int iommu_get_dma_cookie(struct iommu_domain *domain);
|
||||
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
|
||||
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
||||
|
||||
/* Setup call for arch DMA mapping code */
|
||||
|
@ -34,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
|||
u64 size, struct device *dev);
|
||||
|
||||
/* General helpers for DMA-API <-> IOMMU-API interaction */
|
||||
int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
|
||||
int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
||||
unsigned long attrs);
|
||||
|
||||
/*
|
||||
* These implement the bulk of the relevant DMA mapping callbacks, but require
|
||||
|
@ -65,7 +67,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
|||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
int iommu_dma_supported(struct device *dev, u64 mask);
|
||||
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
/* The DMA API isn't _quite_ the whole story, though... */
|
||||
|
@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -62,6 +62,13 @@
|
|||
*/
|
||||
#define DMA_ATTR_NO_WARN (1UL << 8)
|
||||
|
||||
/*
|
||||
* DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
|
||||
* accessible at an elevated privilege level (and ideally inaccessible or
|
||||
* at least read-only at lesser-privileged levels).
|
||||
*/
|
||||
#define DMA_ATTR_PRIVILEGED (1UL << 9)
|
||||
|
||||
/*
|
||||
* A dma_addr_t can hold any valid DMA or bus address for the platform.
|
||||
* It can be given to a device to use as a DMA source or target. A CPU cannot
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/dma_remapping.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/iommu.h>
|
||||
|
||||
|
@ -153,8 +154,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
|
|||
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
|
||||
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
|
||||
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
|
||||
#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
|
||||
#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
|
||||
#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
|
||||
#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
|
||||
#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
|
||||
#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
|
||||
#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
|
||||
|
@ -164,9 +165,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
|
|||
|
||||
/* INVALID_DESC */
|
||||
#define DMA_CCMD_INVL_GRANU_OFFSET 61
|
||||
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
|
||||
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
|
||||
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
|
||||
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
|
||||
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
|
||||
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
|
||||
#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
|
||||
#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
|
||||
#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
|
||||
|
@ -316,8 +317,8 @@ enum {
|
|||
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
|
||||
#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
|
||||
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
|
||||
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
|
||||
#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16)
|
||||
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
|
||||
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
|
||||
#define QI_DEV_EIOTLB_MAX_INVS 32
|
||||
|
||||
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
|
||||
|
@ -439,7 +440,7 @@ struct intel_iommu {
|
|||
struct irq_domain *ir_domain;
|
||||
struct irq_domain *ir_msi_domain;
|
||||
#endif
|
||||
struct device *iommu_dev; /* IOMMU-sysfs device */
|
||||
struct iommu_device iommu; /* IOMMU core code handle */
|
||||
int node;
|
||||
u32 flags; /* Software defined flags */
|
||||
};
|
||||
|
|
|
@ -31,6 +31,13 @@
|
|||
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
|
||||
#define IOMMU_NOEXEC (1 << 3)
|
||||
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
|
||||
/*
|
||||
* This is to make the IOMMU API setup privileged
|
||||
* mapppings accessible by the master only at higher
|
||||
* privileged execution level and inaccessible at
|
||||
* less privileged levels.
|
||||
*/
|
||||
#define IOMMU_PRIV (1 << 5)
|
||||
|
||||
struct iommu_ops;
|
||||
struct iommu_group;
|
||||
|
@ -117,18 +124,25 @@ enum iommu_attr {
|
|||
DOMAIN_ATTR_MAX,
|
||||
};
|
||||
|
||||
/* These are the possible reserved region types */
|
||||
#define IOMMU_RESV_DIRECT (1 << 0)
|
||||
#define IOMMU_RESV_RESERVED (1 << 1)
|
||||
#define IOMMU_RESV_MSI (1 << 2)
|
||||
|
||||
/**
|
||||
* struct iommu_dm_region - descriptor for a direct mapped memory region
|
||||
* struct iommu_resv_region - descriptor for a reserved memory region
|
||||
* @list: Linked list pointers
|
||||
* @start: System physical start address of the region
|
||||
* @length: Length of the region in bytes
|
||||
* @prot: IOMMU Protection flags (READ/WRITE/...)
|
||||
* @type: Type of the reserved region
|
||||
*/
|
||||
struct iommu_dm_region {
|
||||
struct iommu_resv_region {
|
||||
struct list_head list;
|
||||
phys_addr_t start;
|
||||
size_t length;
|
||||
int prot;
|
||||
int type;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
|
@ -150,9 +164,9 @@ struct iommu_dm_region {
|
|||
* @device_group: find iommu group for a particular device
|
||||
* @domain_get_attr: Query domain attributes
|
||||
* @domain_set_attr: Change domain attributes
|
||||
* @get_dm_regions: Request list of direct mapping requirements for a device
|
||||
* @put_dm_regions: Free list of direct mapping requirements for a device
|
||||
* @apply_dm_region: Temporary helper call-back for iova reserved ranges
|
||||
* @get_resv_regions: Request list of reserved regions for a device
|
||||
* @put_resv_regions: Free list of reserved regions for a device
|
||||
* @apply_resv_region: Temporary helper call-back for iova reserved ranges
|
||||
* @domain_window_enable: Configure and enable a particular window for a domain
|
||||
* @domain_window_disable: Disable a particular window for a domain
|
||||
* @domain_set_windows: Set the number of windows for a domain
|
||||
|
@ -184,11 +198,12 @@ struct iommu_ops {
|
|||
int (*domain_set_attr)(struct iommu_domain *domain,
|
||||
enum iommu_attr attr, void *data);
|
||||
|
||||
/* Request/Free a list of direct mapping requirements for a device */
|
||||
void (*get_dm_regions)(struct device *dev, struct list_head *list);
|
||||
void (*put_dm_regions)(struct device *dev, struct list_head *list);
|
||||
void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
|
||||
struct iommu_dm_region *region);
|
||||
/* Request/Free a list of reserved regions for a device */
|
||||
void (*get_resv_regions)(struct device *dev, struct list_head *list);
|
||||
void (*put_resv_regions)(struct device *dev, struct list_head *list);
|
||||
void (*apply_resv_region)(struct device *dev,
|
||||
struct iommu_domain *domain,
|
||||
struct iommu_resv_region *region);
|
||||
|
||||
/* Window handling functions */
|
||||
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
|
||||
|
@ -204,6 +219,42 @@ struct iommu_ops {
|
|||
unsigned long pgsize_bitmap;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_device - IOMMU core representation of one IOMMU hardware
|
||||
* instance
|
||||
* @list: Used by the iommu-core to keep a list of registered iommus
|
||||
* @ops: iommu-ops for talking to this iommu
|
||||
* @dev: struct device for sysfs handling
|
||||
*/
|
||||
struct iommu_device {
|
||||
struct list_head list;
|
||||
const struct iommu_ops *ops;
|
||||
struct fwnode_handle *fwnode;
|
||||
struct device dev;
|
||||
};
|
||||
|
||||
int iommu_device_register(struct iommu_device *iommu);
|
||||
void iommu_device_unregister(struct iommu_device *iommu);
|
||||
int iommu_device_sysfs_add(struct iommu_device *iommu,
|
||||
struct device *parent,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...) __printf(4, 5);
|
||||
void iommu_device_sysfs_remove(struct iommu_device *iommu);
|
||||
int iommu_device_link(struct iommu_device *iommu, struct device *link);
|
||||
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
|
||||
|
||||
static inline void iommu_device_set_ops(struct iommu_device *iommu,
|
||||
const struct iommu_ops *ops)
|
||||
{
|
||||
iommu->ops = ops;
|
||||
}
|
||||
|
||||
static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
|
||||
struct fwnode_handle *fwnode)
|
||||
{
|
||||
iommu->fwnode = fwnode;
|
||||
}
|
||||
|
||||
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
|
||||
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
|
||||
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
|
||||
|
@ -233,9 +284,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
|
|||
extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||
iommu_fault_handler_t handler, void *token);
|
||||
|
||||
extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
|
||||
extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
|
||||
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
|
||||
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
|
||||
extern int iommu_request_dm_for_dev(struct device *dev);
|
||||
extern struct iommu_resv_region *
|
||||
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
|
||||
extern int iommu_get_group_resv_regions(struct iommu_group *group,
|
||||
struct list_head *head);
|
||||
|
||||
extern int iommu_attach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group);
|
||||
|
@ -267,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
|
|||
void *data);
|
||||
extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
|
||||
void *data);
|
||||
struct device *iommu_device_create(struct device *parent, void *drvdata,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...) __printf(4, 5);
|
||||
void iommu_device_destroy(struct device *dev);
|
||||
int iommu_device_link(struct device *dev, struct device *link);
|
||||
void iommu_device_unlink(struct device *dev, struct device *link);
|
||||
|
||||
/* Window handling function prototypes */
|
||||
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||
|
@ -352,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
|
|||
const struct iommu_ops *ops);
|
||||
void iommu_fwspec_free(struct device *dev);
|
||||
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
|
||||
void iommu_register_instance(struct fwnode_handle *fwnode,
|
||||
const struct iommu_ops *ops);
|
||||
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
|
||||
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
|
||||
|
||||
#else /* CONFIG_IOMMU_API */
|
||||
|
||||
struct iommu_ops {};
|
||||
struct iommu_group {};
|
||||
struct iommu_fwspec {};
|
||||
struct iommu_device {};
|
||||
|
||||
static inline bool iommu_present(struct bus_type *bus)
|
||||
{
|
||||
|
@ -443,16 +491,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void iommu_get_dm_regions(struct device *dev,
|
||||
static inline void iommu_get_resv_regions(struct device *dev,
|
||||
struct list_head *list)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void iommu_put_dm_regions(struct device *dev,
|
||||
static inline void iommu_put_resv_regions(struct device *dev,
|
||||
struct list_head *list)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_get_group_resv_regions(struct iommu_group *group,
|
||||
struct list_head *head)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_request_dm_for_dev(struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
|
@ -546,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct device *iommu_device_create(struct device *parent,
|
||||
void *drvdata,
|
||||
static inline int iommu_device_register(struct iommu_device *iommu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void iommu_device_set_ops(struct iommu_device *iommu,
|
||||
const struct iommu_ops *ops)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
|
||||
struct fwnode_handle *fwnode)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void iommu_device_unregister(struct iommu_device *iommu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
|
||||
struct device *parent,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void iommu_device_destroy(struct device *dev)
|
||||
static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -584,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void iommu_register_instance(struct fwnode_handle *fwnode,
|
||||
const struct iommu_ops *ops)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
|
||||
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -183,6 +183,12 @@ enum {
|
|||
/* Irq domain is an IPI domain with single virq */
|
||||
IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
|
||||
|
||||
/* Irq domain implements MSIs */
|
||||
IRQ_DOMAIN_FLAG_MSI = (1 << 4),
|
||||
|
||||
/* Irq domain implements MSI remapping */
|
||||
IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
|
||||
|
||||
/*
|
||||
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
|
||||
* for implementation specific purposes and ignored by the
|
||||
|
@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
|
|||
void *host_data);
|
||||
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
|
||||
enum irq_domain_bus_token bus_token);
|
||||
extern bool irq_domain_check_msi_remap(void);
|
||||
extern void irq_set_default_host(struct irq_domain *host);
|
||||
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
|
||||
irq_hw_number_t hwirq, int node,
|
||||
|
@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
|
|||
{
|
||||
return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
|
||||
}
|
||||
|
||||
static inline bool irq_domain_is_msi(struct irq_domain *domain)
|
||||
{
|
||||
return domain->flags & IRQ_DOMAIN_FLAG_MSI;
|
||||
}
|
||||
|
||||
static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
|
||||
{
|
||||
return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
|
||||
}
|
||||
|
||||
extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
|
||||
|
||||
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||
static inline void irq_domain_activate_irq(struct irq_data *data) { }
|
||||
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
|
||||
|
@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
|
|||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool irq_domain_is_msi(struct irq_domain *domain)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||
|
||||
#else /* CONFIG_IRQ_DOMAIN */
|
||||
|
|
|
@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
|
|||
|
||||
#endif /* CONFIG_OF_IOMMU */
|
||||
|
||||
static inline void of_iommu_set_ops(struct device_node *np,
|
||||
const struct iommu_ops *ops)
|
||||
{
|
||||
iommu_register_instance(&np->fwnode, ops);
|
||||
}
|
||||
|
||||
static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
|
||||
{
|
||||
return iommu_get_instance(&np->fwnode);
|
||||
}
|
||||
|
||||
extern struct of_device_id __iommu_of_table;
|
||||
|
||||
typedef int (*of_iommu_init_fn)(struct device_node *);
|
||||
|
|
|
@ -277,6 +277,31 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
|
||||
|
||||
/**
|
||||
* irq_domain_check_msi_remap - Check whether all MSI irq domains implement
|
||||
* IRQ remapping
|
||||
*
|
||||
* Return: false if any MSI irq domain does not support IRQ remapping,
|
||||
* true otherwise (including if there is no MSI irq domain)
|
||||
*/
|
||||
bool irq_domain_check_msi_remap(void)
|
||||
{
|
||||
struct irq_domain *h;
|
||||
bool ret = true;
|
||||
|
||||
mutex_lock(&irq_domain_mutex);
|
||||
list_for_each_entry(h, &irq_domain_list, link) {
|
||||
if (irq_domain_is_msi(h) &&
|
||||
!irq_domain_hierarchical_is_msi_remap(h)) {
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&irq_domain_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
|
||||
|
||||
/**
|
||||
* irq_set_default_host() - Set a "default" irq domain
|
||||
* @domain: default domain pointer
|
||||
|
@ -1408,6 +1433,20 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain)
|
|||
if (domain->ops->alloc)
|
||||
domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_domain_hierarchical_is_msi_remap - Check if the domain or any
|
||||
* parent has MSI remapping support
|
||||
* @domain: domain pointer
|
||||
*/
|
||||
bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
|
||||
{
|
||||
for (; domain; domain = domain->parent) {
|
||||
if (irq_domain_is_msi_remap(domain))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||
/**
|
||||
* irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
|
||||
|
|
|
@ -270,8 +270,8 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
|
|||
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
|
||||
msi_domain_update_chip_ops(info);
|
||||
|
||||
return irq_domain_create_hierarchy(parent, 0, 0, fwnode,
|
||||
&msi_domain_ops, info);
|
||||
return irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
|
||||
fwnode, &msi_domain_ops, info);
|
||||
}
|
||||
|
||||
int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
|
||||
|
|
Loading…
Reference in New Issue