Merge branch 'topic/ppc-kvm' into next

Merge the topic branch we were sharing with kvm-ppc, Paul has also
merged it.
This commit is contained in:
Michael Ellerman 2017-04-28 20:19:37 +10:00
commit b13f6683ed
9 changed files with 142 additions and 21 deletions

View File

@ -64,6 +64,11 @@ struct iommu_table_ops {
long index, long index,
unsigned long *hpa, unsigned long *hpa,
enum dma_data_direction *direction); enum dma_data_direction *direction);
/* Real mode */
int (*exchange_rm)(struct iommu_table *tbl,
long index,
unsigned long *hpa,
enum dma_data_direction *direction);
#endif #endif
void (*clear)(struct iommu_table *tbl, void (*clear)(struct iommu_table *tbl,
long index, long npages); long index, long npages);
@ -114,6 +119,7 @@ struct iommu_table {
struct list_head it_group_list;/* List of iommu_table_group_link */ struct list_head it_group_list;/* List of iommu_table_group_link */
unsigned long *it_userspace; /* userspace view of the table */ unsigned long *it_userspace; /* userspace view of the table */
struct iommu_table_ops *it_ops; struct iommu_table_ops *it_ops;
struct kref it_kref;
}; };
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
@ -146,8 +152,8 @@ static inline void *get_iommu_table_base(struct device *dev)
extern int dma_iommu_dma_supported(struct device *dev, u64 mask); extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
/* Frees table for an individual device node */ extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); extern int iommu_tce_table_put(struct iommu_table *tbl);
/* Initializes an iommu_table based in values set in the passed-in /* Initializes an iommu_table based in values set in the passed-in
* structure * structure
@ -208,6 +214,8 @@ extern void iommu_del_device(struct device *dev);
extern int __init tce_iommu_bus_notifier_init(void); extern int __init tce_iommu_bus_notifier_init(void);
extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction); unsigned long *hpa, enum dma_data_direction *direction);
extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction);
#else #else
static inline void iommu_register_group(struct iommu_table_group *table_group, static inline void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number, int pci_domain_number,

View File

@ -29,10 +29,14 @@ extern void mm_iommu_init(struct mm_struct *mm);
extern void mm_iommu_cleanup(struct mm_struct *mm); extern void mm_iommu_cleanup(struct mm_struct *mm);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long ua, unsigned long size); unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
struct mm_struct *mm, unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries); unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa); unsigned long ua, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif #endif

View File

@ -711,13 +711,16 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
return tbl; return tbl;
} }
void iommu_free_table(struct iommu_table *tbl, const char *node_name) static void iommu_table_free(struct kref *kref)
{ {
unsigned long bitmap_sz; unsigned long bitmap_sz;
unsigned int order; unsigned int order;
struct iommu_table *tbl;
if (!tbl) tbl = container_of(kref, struct iommu_table, it_kref);
return;
if (tbl->it_ops->free)
tbl->it_ops->free(tbl);
if (!tbl->it_map) { if (!tbl->it_map) {
kfree(tbl); kfree(tbl);
@ -733,7 +736,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
/* verify that table contains no entries */ /* verify that table contains no entries */
if (!bitmap_empty(tbl->it_map, tbl->it_size)) if (!bitmap_empty(tbl->it_map, tbl->it_size))
pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name); pr_warn("%s: Unexpected TCEs\n", __func__);
/* calculate bitmap size in bytes */ /* calculate bitmap size in bytes */
bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
@ -746,6 +749,24 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
kfree(tbl); kfree(tbl);
} }
struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
{
if (kref_get_unless_zero(&tbl->it_kref))
return tbl;
return NULL;
}
EXPORT_SYMBOL_GPL(iommu_tce_table_get);
int iommu_tce_table_put(struct iommu_table *tbl)
{
if (WARN_ON(!tbl))
return 0;
return kref_put(&tbl->it_kref, iommu_table_free);
}
EXPORT_SYMBOL_GPL(iommu_tce_table_put);
/* Creates TCEs for a user provided buffer. The user buffer must be /* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address passed here * contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t * comprises a page address and offset into that page. The dma_addr_t
@ -1004,6 +1025,31 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
} }
EXPORT_SYMBOL_GPL(iommu_tce_xchg); EXPORT_SYMBOL_GPL(iommu_tce_xchg);
#ifdef CONFIG_PPC_BOOK3S_64
long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction)
{
long ret;
ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
(*direction == DMA_BIDIRECTIONAL))) {
struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
if (likely(pg)) {
SetPageDirty(pg);
} else {
tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
ret = -EFAULT;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
#endif
int iommu_take_ownership(struct iommu_table *tbl) int iommu_take_ownership(struct iommu_table *tbl)
{ {
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;

View File

@ -314,6 +314,25 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
} }
EXPORT_SYMBOL_GPL(mm_iommu_lookup); EXPORT_SYMBOL_GPL(mm_iommu_lookup);
struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
unsigned long ua, unsigned long size)
{
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
next) {
if ((mem->ua <= ua) &&
(ua + size <= mem->ua +
(mem->entries << PAGE_SHIFT))) {
ret = mem;
break;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries) unsigned long ua, unsigned long entries)
{ {
@ -345,6 +364,26 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
} }
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
void *va = &mem->hpas[entry];
unsigned long *pa;
if (entry >= mem->entries)
return -EFAULT;
pa = (void *) vmalloc_to_phys(va);
if (!pa)
return -EFAULT;
*hpa = *pa | (ua & ~PAGE_MASK);
return 0;
}
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
{ {
if (atomic64_inc_not_zero(&mem->mapped)) if (atomic64_inc_not_zero(&mem->mapped))

View File

@ -1425,8 +1425,7 @@ static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe
iommu_group_put(pe->table_group.group); iommu_group_put(pe->table_group.group);
BUG_ON(pe->table_group.group); BUG_ON(pe->table_group.group);
} }
pnv_pci_ioda2_table_free_pages(tbl); iommu_tce_table_put(tbl);
iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
} }
static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
@ -1861,6 +1860,17 @@ static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
return ret; return ret;
} }
static int pnv_ioda1_tce_xchg_rm(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
if (!ret)
pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, true);
return ret;
}
#endif #endif
static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index, static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
@ -1875,6 +1885,7 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
.set = pnv_ioda1_tce_build, .set = pnv_ioda1_tce_build,
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
.exchange = pnv_ioda1_tce_xchg, .exchange = pnv_ioda1_tce_xchg,
.exchange_rm = pnv_ioda1_tce_xchg_rm,
#endif #endif
.clear = pnv_ioda1_tce_free, .clear = pnv_ioda1_tce_free,
.get = pnv_tce_get, .get = pnv_tce_get,
@ -1949,7 +1960,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
{ {
struct iommu_table_group_link *tgl; struct iommu_table_group_link *tgl;
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) {
struct pnv_ioda_pe *pe = container_of(tgl->table_group, struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group); struct pnv_ioda_pe, table_group);
struct pnv_phb *phb = pe->phb; struct pnv_phb *phb = pe->phb;
@ -2005,6 +2016,17 @@ static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
return ret; return ret;
} }
static int pnv_ioda2_tce_xchg_rm(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction)
{
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, 1, true);
return ret;
}
#endif #endif
static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
@ -2018,13 +2040,13 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
static void pnv_ioda2_table_free(struct iommu_table *tbl) static void pnv_ioda2_table_free(struct iommu_table *tbl)
{ {
pnv_pci_ioda2_table_free_pages(tbl); pnv_pci_ioda2_table_free_pages(tbl);
iommu_free_table(tbl, "pnv");
} }
static struct iommu_table_ops pnv_ioda2_iommu_ops = { static struct iommu_table_ops pnv_ioda2_iommu_ops = {
.set = pnv_ioda2_tce_build, .set = pnv_ioda2_tce_build,
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
.exchange = pnv_ioda2_tce_xchg, .exchange = pnv_ioda2_tce_xchg,
.exchange_rm = pnv_ioda2_tce_xchg_rm,
#endif #endif
.clear = pnv_ioda2_tce_free, .clear = pnv_ioda2_tce_free,
.get = pnv_tce_get, .get = pnv_tce_get,
@ -2204,7 +2226,7 @@ found:
__free_pages(tce_mem, get_order(tce32_segsz * segs)); __free_pages(tce_mem, get_order(tce32_segsz * segs));
if (tbl) { if (tbl) {
pnv_pci_unlink_table_and_group(tbl, &pe->table_group); pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
iommu_free_table(tbl, "pnv"); iommu_tce_table_put(tbl);
} }
} }
@ -2294,16 +2316,16 @@ static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
if (!tbl) if (!tbl)
return -ENOMEM; return -ENOMEM;
tbl->it_ops = &pnv_ioda2_iommu_ops;
ret = pnv_pci_ioda2_table_alloc_pages(nid, ret = pnv_pci_ioda2_table_alloc_pages(nid,
bus_offset, page_shift, window_size, bus_offset, page_shift, window_size,
levels, tbl); levels, tbl);
if (ret) { if (ret) {
iommu_free_table(tbl, "pnv"); iommu_tce_table_put(tbl);
return ret; return ret;
} }
tbl->it_ops = &pnv_ioda2_iommu_ops;
*ptbl = tbl; *ptbl = tbl;
return 0; return 0;
@ -2344,7 +2366,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
if (rc) { if (rc) {
pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
rc); rc);
pnv_ioda2_table_free(tbl); iommu_tce_table_put(tbl);
return rc; return rc;
} }
@ -2432,7 +2454,7 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
pnv_pci_ioda2_unset_window(&pe->table_group, 0); pnv_pci_ioda2_unset_window(&pe->table_group, 0);
if (pe->pbus) if (pe->pbus)
pnv_ioda_setup_bus_dma(pe, pe->pbus, false); pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
pnv_ioda2_table_free(tbl); iommu_tce_table_put(tbl);
} }
static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
@ -3405,7 +3427,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
} }
free_pages(tbl->it_base, get_order(tbl->it_size << 3)); free_pages(tbl->it_base, get_order(tbl->it_size << 3));
iommu_free_table(tbl, "pnv"); iommu_tce_table_put(tbl);
} }
static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
@ -3432,7 +3454,7 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
} }
pnv_pci_ioda2_table_free_pages(tbl); pnv_pci_ioda2_table_free_pages(tbl);
iommu_free_table(tbl, "pnv"); iommu_tce_table_put(tbl);
} }
static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe, static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,

View File

@ -767,6 +767,7 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
INIT_LIST_HEAD_RCU(&tbl->it_group_list); INIT_LIST_HEAD_RCU(&tbl->it_group_list);
kref_init(&tbl->it_kref);
return tbl; return tbl;
} }

View File

@ -74,6 +74,7 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node)
goto fail_exit; goto fail_exit;
INIT_LIST_HEAD_RCU(&tbl->it_group_list); INIT_LIST_HEAD_RCU(&tbl->it_group_list);
kref_init(&tbl->it_kref);
tgl->table_group = table_group; tgl->table_group = table_group;
list_add_rcu(&tgl->next, &tbl->it_group_list); list_add_rcu(&tgl->next, &tbl->it_group_list);
@ -115,7 +116,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group,
BUG_ON(table_group->group); BUG_ON(table_group->group);
} }
#endif #endif
iommu_free_table(tbl, node_name); iommu_tce_table_put(tbl);
kfree(table_group); kfree(table_group);
} }

View File

@ -1318,7 +1318,7 @@ static void vio_dev_release(struct device *dev)
struct iommu_table *tbl = get_iommu_table_base(dev); struct iommu_table *tbl = get_iommu_table_base(dev);
if (tbl) if (tbl)
iommu_free_table(tbl, of_node_full_name(dev->of_node)); iommu_tce_table_put(tbl);
of_node_put(dev->of_node); of_node_put(dev->of_node);
kfree(to_vio_dev(dev)); kfree(to_vio_dev(dev));
} }

View File

@ -680,7 +680,7 @@ static void tce_iommu_free_table(struct tce_container *container,
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
tce_iommu_userspace_view_free(tbl, container->mm); tce_iommu_userspace_view_free(tbl, container->mm);
tbl->it_ops->free(tbl); iommu_tce_table_put(tbl);
decrement_locked_vm(container->mm, pages); decrement_locked_vm(container->mm, pages);
} }