powerpc/iommu: Update constant names to reflect their hardcoded page size
The powerpc iommu uses a hardcoded page size of 4K. This patch changes the name of the IOMMU_PAGE_* macros to reflect the hardcoded values. A future patch will use the existing names to support dynamic page sizes. Signed-off-by: Alistair Popple <alistair@popple.id.au> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
parent
fee26f6d5d
commit
e589a4404f
|
@ -30,10 +30,10 @@
|
|||
#include <asm/machdep.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
#define IOMMU_PAGE_SHIFT 12
|
||||
#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT)
|
||||
#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
|
||||
#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
|
||||
#define IOMMU_PAGE_SHIFT_4K 12
|
||||
#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
|
||||
#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
|
||||
#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
|
||||
|
||||
/* Boot time flags */
|
||||
extern int iommu_is_off;
|
||||
|
@ -42,7 +42,7 @@ extern int iommu_force_on;
|
|||
/* Pure 2^n version of get_order */
|
||||
static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
|
||||
{
|
||||
return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
|
||||
return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT_4K) + 1;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -83,10 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
|
||||
if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT_4K)) {
|
||||
dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
|
||||
dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
|
||||
mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
|
||||
mask, tbl->it_offset << IOMMU_PAGE_SHIFT_4K);
|
||||
return 0;
|
||||
} else
|
||||
return 1;
|
||||
|
|
|
@ -251,14 +251,14 @@ again:
|
|||
|
||||
if (dev)
|
||||
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
1 << IOMMU_PAGE_SHIFT);
|
||||
1 << IOMMU_PAGE_SHIFT_4K);
|
||||
else
|
||||
boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
|
||||
boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT_4K);
|
||||
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
|
||||
|
||||
n = iommu_area_alloc(tbl->it_map, limit, start, npages,
|
||||
tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
|
||||
align_mask);
|
||||
tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT_4K,
|
||||
align_mask);
|
||||
if (n == -1) {
|
||||
if (likely(pass == 0)) {
|
||||
/* First try the pool from the start */
|
||||
|
@ -320,12 +320,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
|||
return DMA_ERROR_CODE;
|
||||
|
||||
entry += tbl->it_offset; /* Offset into real TCE table */
|
||||
ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
|
||||
ret = entry << IOMMU_PAGE_SHIFT_4K; /* Set the return dma address */
|
||||
|
||||
/* Put the TCEs in the HW table */
|
||||
build_fail = ppc_md.tce_build(tbl, entry, npages,
|
||||
(unsigned long)page & IOMMU_PAGE_MASK,
|
||||
direction, attrs);
|
||||
(unsigned long)page & IOMMU_PAGE_MASK_4K,
|
||||
direction, attrs);
|
||||
|
||||
/* ppc_md.tce_build() only returns non-zero for transient errors.
|
||||
* Clean up the table bitmap in this case and return
|
||||
|
@ -352,7 +352,7 @@ static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|||
{
|
||||
unsigned long entry, free_entry;
|
||||
|
||||
entry = dma_addr >> IOMMU_PAGE_SHIFT;
|
||||
entry = dma_addr >> IOMMU_PAGE_SHIFT_4K;
|
||||
free_entry = entry - tbl->it_offset;
|
||||
|
||||
if (((free_entry + npages) > tbl->it_size) ||
|
||||
|
@ -401,7 +401,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|||
unsigned long flags;
|
||||
struct iommu_pool *pool;
|
||||
|
||||
entry = dma_addr >> IOMMU_PAGE_SHIFT;
|
||||
entry = dma_addr >> IOMMU_PAGE_SHIFT_4K;
|
||||
free_entry = entry - tbl->it_offset;
|
||||
|
||||
pool = get_pool(tbl, free_entry);
|
||||
|
@ -468,13 +468,13 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||
}
|
||||
/* Allocate iommu entries for that segment */
|
||||
vaddr = (unsigned long) sg_virt(s);
|
||||
npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
|
||||
npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE_4K);
|
||||
align = 0;
|
||||
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
|
||||
if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && slen >= PAGE_SIZE &&
|
||||
(vaddr & ~PAGE_MASK) == 0)
|
||||
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
|
||||
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K;
|
||||
entry = iommu_range_alloc(dev, tbl, npages, &handle,
|
||||
mask >> IOMMU_PAGE_SHIFT, align);
|
||||
mask >> IOMMU_PAGE_SHIFT_4K, align);
|
||||
|
||||
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
|
||||
|
||||
|
@ -489,16 +489,16 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||
|
||||
/* Convert entry to a dma_addr_t */
|
||||
entry += tbl->it_offset;
|
||||
dma_addr = entry << IOMMU_PAGE_SHIFT;
|
||||
dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
|
||||
dma_addr = entry << IOMMU_PAGE_SHIFT_4K;
|
||||
dma_addr |= (s->offset & ~IOMMU_PAGE_MASK_4K);
|
||||
|
||||
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
|
||||
npages, entry, dma_addr);
|
||||
|
||||
/* Insert into HW table */
|
||||
build_fail = ppc_md.tce_build(tbl, entry, npages,
|
||||
vaddr & IOMMU_PAGE_MASK,
|
||||
direction, attrs);
|
||||
vaddr & IOMMU_PAGE_MASK_4K,
|
||||
direction, attrs);
|
||||
if(unlikely(build_fail))
|
||||
goto failure;
|
||||
|
||||
|
@ -559,9 +559,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||
if (s->dma_length != 0) {
|
||||
unsigned long vaddr, npages;
|
||||
|
||||
vaddr = s->dma_address & IOMMU_PAGE_MASK;
|
||||
vaddr = s->dma_address & IOMMU_PAGE_MASK_4K;
|
||||
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
||||
IOMMU_PAGE_SIZE);
|
||||
IOMMU_PAGE_SIZE_4K);
|
||||
__iommu_free(tbl, vaddr, npages);
|
||||
s->dma_address = DMA_ERROR_CODE;
|
||||
s->dma_length = 0;
|
||||
|
@ -592,7 +592,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
if (sg->dma_length == 0)
|
||||
break;
|
||||
npages = iommu_num_pages(dma_handle, sg->dma_length,
|
||||
IOMMU_PAGE_SIZE);
|
||||
IOMMU_PAGE_SIZE_4K);
|
||||
__iommu_free(tbl, dma_handle, npages);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
@ -676,7 +676,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
|
|||
set_bit(0, tbl->it_map);
|
||||
|
||||
/* We only split the IOMMU table if we have 1GB or more of space */
|
||||
if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024))
|
||||
if ((tbl->it_size << IOMMU_PAGE_SHIFT_4K) >= (1UL * 1024 * 1024 * 1024))
|
||||
tbl->nr_pools = IOMMU_NR_POOLS;
|
||||
else
|
||||
tbl->nr_pools = 1;
|
||||
|
@ -768,16 +768,16 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
|||
|
||||
vaddr = page_address(page) + offset;
|
||||
uaddr = (unsigned long)vaddr;
|
||||
npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
|
||||
npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE_4K);
|
||||
|
||||
if (tbl) {
|
||||
align = 0;
|
||||
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
|
||||
if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && size >= PAGE_SIZE &&
|
||||
((unsigned long)vaddr & ~PAGE_MASK) == 0)
|
||||
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
|
||||
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K;
|
||||
|
||||
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
|
||||
mask >> IOMMU_PAGE_SHIFT, align,
|
||||
mask >> IOMMU_PAGE_SHIFT_4K, align,
|
||||
attrs);
|
||||
if (dma_handle == DMA_ERROR_CODE) {
|
||||
if (printk_ratelimit()) {
|
||||
|
@ -786,7 +786,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
|||
npages);
|
||||
}
|
||||
} else
|
||||
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
|
||||
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK_4K);
|
||||
}
|
||||
|
||||
return dma_handle;
|
||||
|
@ -801,7 +801,7 @@ void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
|
|||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
if (tbl) {
|
||||
npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
|
||||
npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE_4K);
|
||||
iommu_free(tbl, dma_handle, npages);
|
||||
}
|
||||
}
|
||||
|
@ -845,10 +845,10 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
|||
memset(ret, 0, size);
|
||||
|
||||
/* Set up tces to cover the allocated range */
|
||||
nio_pages = size >> IOMMU_PAGE_SHIFT;
|
||||
nio_pages = size >> IOMMU_PAGE_SHIFT_4K;
|
||||
io_order = get_iommu_order(size);
|
||||
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
||||
mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
|
||||
mask >> IOMMU_PAGE_SHIFT_4K, io_order, NULL);
|
||||
if (mapping == DMA_ERROR_CODE) {
|
||||
free_pages((unsigned long)ret, order);
|
||||
return NULL;
|
||||
|
@ -864,7 +864,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
|||
unsigned int nio_pages;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
nio_pages = size >> IOMMU_PAGE_SHIFT;
|
||||
nio_pages = size >> IOMMU_PAGE_SHIFT_4K;
|
||||
iommu_free(tbl, dma_handle, nio_pages);
|
||||
size = PAGE_ALIGN(size);
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
|
@ -935,10 +935,10 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
|
|||
if (tce_value)
|
||||
return -EINVAL;
|
||||
|
||||
if (ioba & ~IOMMU_PAGE_MASK)
|
||||
if (ioba & ~IOMMU_PAGE_MASK_4K)
|
||||
return -EINVAL;
|
||||
|
||||
ioba >>= IOMMU_PAGE_SHIFT;
|
||||
ioba >>= IOMMU_PAGE_SHIFT_4K;
|
||||
if (ioba < tbl->it_offset)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -955,13 +955,13 @@ int iommu_tce_put_param_check(struct iommu_table *tbl,
|
|||
if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
|
||||
return -EINVAL;
|
||||
|
||||
if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ))
|
||||
if (tce & ~(IOMMU_PAGE_MASK_4K | TCE_PCI_WRITE | TCE_PCI_READ))
|
||||
return -EINVAL;
|
||||
|
||||
if (ioba & ~IOMMU_PAGE_MASK)
|
||||
if (ioba & ~IOMMU_PAGE_MASK_4K)
|
||||
return -EINVAL;
|
||||
|
||||
ioba >>= IOMMU_PAGE_SHIFT;
|
||||
ioba >>= IOMMU_PAGE_SHIFT_4K;
|
||||
if (ioba < tbl->it_offset)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1037,7 +1037,7 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
|
|||
|
||||
/* if (unlikely(ret))
|
||||
pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
|
||||
__func__, hwaddr, entry << IOMMU_PAGE_SHIFT,
|
||||
__func__, hwaddr, entry << IOMMU_PAGE_SHIFT_4K,
|
||||
hwaddr, ret); */
|
||||
|
||||
return ret;
|
||||
|
@ -1049,14 +1049,14 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
|
|||
{
|
||||
int ret;
|
||||
struct page *page = NULL;
|
||||
unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK;
|
||||
unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK_4K & ~PAGE_MASK;
|
||||
enum dma_data_direction direction = iommu_tce_direction(tce);
|
||||
|
||||
ret = get_user_pages_fast(tce & PAGE_MASK, 1,
|
||||
direction != DMA_TO_DEVICE, &page);
|
||||
if (unlikely(ret != 1)) {
|
||||
/* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
|
||||
tce, entry << IOMMU_PAGE_SHIFT, ret); */
|
||||
tce, entry << IOMMU_PAGE_SHIFT_4K, ret); */
|
||||
return -EFAULT;
|
||||
}
|
||||
hwaddr = (unsigned long) page_address(page) + offset;
|
||||
|
@ -1067,7 +1067,7 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
|
|||
|
||||
if (ret < 0)
|
||||
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
|
||||
__func__, entry << IOMMU_PAGE_SHIFT, tce, ret);
|
||||
__func__, entry << IOMMU_PAGE_SHIFT_4K, tce, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -520,14 +520,14 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
|
|||
struct vio_dev *viodev = to_vio_dev(dev);
|
||||
dma_addr_t ret = DMA_ERROR_CODE;
|
||||
|
||||
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
|
||||
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K))) {
|
||||
atomic_inc(&viodev->cmo.allocs_failed);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
|
||||
if (unlikely(dma_mapping_error(dev, ret))) {
|
||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
|
||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
|
||||
atomic_inc(&viodev->cmo.allocs_failed);
|
||||
}
|
||||
|
||||
|
@ -543,7 +543,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
|||
|
||||
dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
|
||||
|
||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
|
||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
|
||||
}
|
||||
|
||||
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
|
@ -556,7 +556,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
size_t alloc_size = 0;
|
||||
|
||||
for (sgl = sglist; count < nelems; count++, sgl++)
|
||||
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
|
||||
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE_4K);
|
||||
|
||||
if (vio_cmo_alloc(viodev, alloc_size)) {
|
||||
atomic_inc(&viodev->cmo.allocs_failed);
|
||||
|
@ -572,7 +572,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
}
|
||||
|
||||
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
|
||||
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
|
||||
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
|
||||
if (alloc_size)
|
||||
vio_cmo_dealloc(viodev, alloc_size);
|
||||
|
||||
|
@ -590,7 +590,7 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
|
|||
int count = 0;
|
||||
|
||||
for (sgl = sglist; count < nelems; count++, sgl++)
|
||||
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
|
||||
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
|
||||
|
||||
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
|
||||
|
||||
|
@ -736,7 +736,8 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
|
||||
viodev->cmo.desired =
|
||||
IOMMU_PAGE_ALIGN_4K(viodrv->get_desired_dma(viodev));
|
||||
if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
|
||||
viodev->cmo.desired = VIO_CMO_MIN_ENT;
|
||||
size = VIO_CMO_MIN_ENT;
|
||||
|
@ -1176,9 +1177,9 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
|
|||
&tbl->it_index, &offset, &size);
|
||||
|
||||
/* TCE table size - measured in tce entries */
|
||||
tbl->it_size = size >> IOMMU_PAGE_SHIFT;
|
||||
tbl->it_size = size >> IOMMU_PAGE_SHIFT_4K;
|
||||
/* offset for VIO should always be 0 */
|
||||
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
|
||||
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT_4K;
|
||||
tbl->it_busno = 0;
|
||||
tbl->it_type = TCE_VB;
|
||||
tbl->it_blocksize = 16;
|
||||
|
|
|
@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
|
|||
|
||||
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
|
||||
|
||||
for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
|
||||
for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE_4K)
|
||||
io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
|
||||
|
||||
mb();
|
||||
|
@ -430,7 +430,7 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
|
|||
{
|
||||
cell_iommu_setup_stab(iommu, base, size, 0, 0);
|
||||
iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
|
||||
IOMMU_PAGE_SHIFT);
|
||||
IOMMU_PAGE_SHIFT_4K);
|
||||
cell_iommu_enable_hardware(iommu);
|
||||
}
|
||||
|
||||
|
@ -487,8 +487,8 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
|
|||
window->table.it_blocksize = 16;
|
||||
window->table.it_base = (unsigned long)iommu->ptab;
|
||||
window->table.it_index = iommu->nid;
|
||||
window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset;
|
||||
window->table.it_size = size >> IOMMU_PAGE_SHIFT;
|
||||
window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT_4K) + pte_offset;
|
||||
window->table.it_size = size >> IOMMU_PAGE_SHIFT_4K;
|
||||
|
||||
iommu_init_table(&window->table, iommu->nid);
|
||||
|
||||
|
@ -773,7 +773,7 @@ static void __init cell_iommu_init_one(struct device_node *np,
|
|||
|
||||
/* Setup the iommu_table */
|
||||
cell_iommu_setup_window(iommu, np, base, size,
|
||||
offset >> IOMMU_PAGE_SHIFT);
|
||||
offset >> IOMMU_PAGE_SHIFT_4K);
|
||||
}
|
||||
|
||||
static void __init cell_disable_iommus(void)
|
||||
|
@ -1122,7 +1122,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
|
|||
|
||||
cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
|
||||
iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
|
||||
IOMMU_PAGE_SHIFT);
|
||||
IOMMU_PAGE_SHIFT_4K);
|
||||
cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
|
||||
fbase, fsize);
|
||||
cell_iommu_enable_hardware(iommu);
|
||||
|
|
|
@ -564,7 +564,7 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
|
|||
{
|
||||
tbl->it_blocksize = 16;
|
||||
tbl->it_base = (unsigned long)tce_mem;
|
||||
tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT;
|
||||
tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT_4K;
|
||||
tbl->it_index = 0;
|
||||
tbl->it_size = tce_size >> 3;
|
||||
tbl->it_busno = 0;
|
||||
|
@ -761,7 +761,7 @@ static struct notifier_block tce_iommu_bus_nb = {
|
|||
|
||||
static int __init tce_iommu_bus_notifier_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE);
|
||||
BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE_4K);
|
||||
|
||||
bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
|
||||
return 0;
|
||||
|
|
|
@ -488,7 +488,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
|
|||
tbl->it_busno = phb->bus->number;
|
||||
|
||||
/* Units of tce entries */
|
||||
tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT;
|
||||
tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT_4K;
|
||||
|
||||
/* Test if we are going over 2GB of DMA space */
|
||||
if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
|
||||
|
@ -499,7 +499,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
|
|||
phb->dma_window_base_cur += phb->dma_window_size;
|
||||
|
||||
/* Set the tce table size - measured in entries */
|
||||
tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT;
|
||||
tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT_4K;
|
||||
|
||||
tbl->it_index = 0;
|
||||
tbl->it_blocksize = 16;
|
||||
|
@ -540,8 +540,8 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
|
|||
tbl->it_base = 0;
|
||||
tbl->it_blocksize = 16;
|
||||
tbl->it_type = TCE_PCI;
|
||||
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
|
||||
tbl->it_size = size >> IOMMU_PAGE_SHIFT;
|
||||
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT_4K;
|
||||
tbl->it_size = size >> IOMMU_PAGE_SHIFT_4K;
|
||||
}
|
||||
|
||||
static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
|
||||
|
|
|
@ -72,7 +72,7 @@
|
|||
|
||||
int CMO_PrPSP = -1;
|
||||
int CMO_SecPSP = -1;
|
||||
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT);
|
||||
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
|
||||
EXPORT_SYMBOL(CMO_PageSize);
|
||||
|
||||
int fwnmi_active; /* TRUE if an FWNMI handler is present */
|
||||
|
@ -569,7 +569,7 @@ void pSeries_cmo_feature_init(void)
|
|||
{
|
||||
char *ptr, *key, *value, *end;
|
||||
int call_status;
|
||||
int page_order = IOMMU_PAGE_SHIFT;
|
||||
int page_order = IOMMU_PAGE_SHIFT_4K;
|
||||
|
||||
pr_debug(" -> fw_cmo_feature_init()\n");
|
||||
spin_lock(&rtas_data_buf_lock);
|
||||
|
|
|
@ -260,7 +260,7 @@ static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
|
|||
*tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
|
||||
|
||||
dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
|
||||
tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT);
|
||||
tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT_4K);
|
||||
|
||||
uaddr += TCE_PAGE_SIZE;
|
||||
index++;
|
||||
|
@ -381,8 +381,8 @@ static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
|
|||
|
||||
/* Init bits and pieces */
|
||||
tbl->table.it_blocksize = 16;
|
||||
tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT;
|
||||
tbl->table.it_size = size >> IOMMU_PAGE_SHIFT;
|
||||
tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT_4K;
|
||||
tbl->table.it_size = size >> IOMMU_PAGE_SHIFT_4K;
|
||||
|
||||
/*
|
||||
* It's already blank but we clear it anyway.
|
||||
|
@ -449,8 +449,8 @@ static void wsp_pci_dma_dev_setup(struct pci_dev *pdev)
|
|||
if (table) {
|
||||
pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
|
||||
pci_name(pdev),
|
||||
table->table.it_offset << IOMMU_PAGE_SHIFT,
|
||||
(table->table.it_offset << IOMMU_PAGE_SHIFT)
|
||||
table->table.it_offset << IOMMU_PAGE_SHIFT_4K,
|
||||
(table->table.it_offset << IOMMU_PAGE_SHIFT_4K)
|
||||
+ phb->dma32_region_size - 1);
|
||||
archdata->dma_data.iommu_table_base = &table->table;
|
||||
return;
|
||||
|
|
|
@ -1282,24 +1282,25 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
|
|||
|
||||
/* netdev inits at probe time along with the structures we need below*/
|
||||
if (netdev == NULL)
|
||||
return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
|
||||
return IOMMU_PAGE_ALIGN_4K(IBMVETH_IO_ENTITLEMENT_DEFAULT);
|
||||
|
||||
adapter = netdev_priv(netdev);
|
||||
|
||||
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
|
||||
ret += IOMMU_PAGE_ALIGN(netdev->mtu);
|
||||
ret += IOMMU_PAGE_ALIGN_4K(netdev->mtu);
|
||||
|
||||
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
|
||||
/* add the size of the active receive buffers */
|
||||
if (adapter->rx_buff_pool[i].active)
|
||||
ret +=
|
||||
adapter->rx_buff_pool[i].size *
|
||||
IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
|
||||
IOMMU_PAGE_ALIGN_4K(adapter->rx_buff_pool[i].
|
||||
buff_size);
|
||||
rxqentries += adapter->rx_buff_pool[i].size;
|
||||
}
|
||||
/* add the size of the receive queue entries */
|
||||
ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
|
||||
ret += IOMMU_PAGE_ALIGN_4K(
|
||||
rxqentries * sizeof(struct ibmveth_rx_q_entry));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container)
|
|||
* enforcing the limit based on the max that the guest can map.
|
||||
*/
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
|
||||
npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
|
||||
locked = current->mm->locked_vm + npages;
|
||||
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||
if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
|
||||
|
@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container)
|
|||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
current->mm->locked_vm -= (container->tbl->it_size <<
|
||||
IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
|
||||
IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
}
|
||||
|
||||
|
@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data,
|
|||
if (info.argsz < minsz)
|
||||
return -EINVAL;
|
||||
|
||||
info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT;
|
||||
info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT;
|
||||
info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
|
||||
info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
|
||||
info.flags = 0;
|
||||
|
||||
if (copy_to_user((void __user *)arg, &info, minsz))
|
||||
|
@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data,
|
|||
VFIO_DMA_MAP_FLAG_WRITE))
|
||||
return -EINVAL;
|
||||
|
||||
if ((param.size & ~IOMMU_PAGE_MASK) ||
|
||||
(param.vaddr & ~IOMMU_PAGE_MASK))
|
||||
if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
|
||||
(param.vaddr & ~IOMMU_PAGE_MASK_4K))
|
||||
return -EINVAL;
|
||||
|
||||
/* iova is checked by the IOMMU API */
|
||||
|
@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) {
|
||||
for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
|
||||
ret = iommu_put_tce_user_mode(tbl,
|
||||
(param.iova >> IOMMU_PAGE_SHIFT) + i,
|
||||
(param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
|
||||
tce);
|
||||
if (ret)
|
||||
break;
|
||||
tce += IOMMU_PAGE_SIZE;
|
||||
tce += IOMMU_PAGE_SIZE_4K;
|
||||
}
|
||||
if (ret)
|
||||
iommu_clear_tces_and_put_pages(tbl,
|
||||
param.iova >> IOMMU_PAGE_SHIFT, i);
|
||||
param.iova >> IOMMU_PAGE_SHIFT_4K, i);
|
||||
|
||||
iommu_flush_tce(tbl);
|
||||
|
||||
|
@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data,
|
|||
if (param.flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (param.size & ~IOMMU_PAGE_MASK)
|
||||
if (param.size & ~IOMMU_PAGE_MASK_4K)
|
||||
return -EINVAL;
|
||||
|
||||
ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
|
||||
param.size >> IOMMU_PAGE_SHIFT);
|
||||
param.size >> IOMMU_PAGE_SHIFT_4K);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = iommu_clear_tces_and_put_pages(tbl,
|
||||
param.iova >> IOMMU_PAGE_SHIFT,
|
||||
param.size >> IOMMU_PAGE_SHIFT);
|
||||
param.iova >> IOMMU_PAGE_SHIFT_4K,
|
||||
param.size >> IOMMU_PAGE_SHIFT_4K);
|
||||
iommu_flush_tce(tbl);
|
||||
|
||||
return ret;
|
||||
|
|
Loading…
Reference in New Issue