PowerPC: adapt for dma_map_ops changes

Adapt core PowerPC architecture code for dma_map_ops changes: replace
alloc/free_coherent with generic alloc/free methods.

Signed-off-by: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
[added missing changes to arch/powerpc/kernel/vio.c]
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Andrzej Pietrasiewicz 2011-12-06 14:14:46 +01:00 committed by Marek Szyprowski
parent e8d51e54ab
commit bfbf7d6151
8 changed files with 60 additions and 41 deletions

View File

@ -22,9 +22,11 @@
/* Some dma direct funcs must be visible for use in other dma_ops */ /* Some dma direct funcs must be visible for use in other dma_ops */
extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag); dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs);
extern void dma_direct_free_coherent(struct device *dev, size_t size, extern void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle); void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs);
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
@ -130,23 +132,29 @@ static inline int dma_supported(struct device *dev, u64 mask)
extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int dma_set_mask(struct device *dev, u64 dma_mask);
static inline void *dma_alloc_coherent(struct device *dev, size_t size, #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
dma_addr_t *dma_handle, gfp_t flag)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); struct dma_map_ops *dma_ops = get_dma_ops(dev);
void *cpu_addr; void *cpu_addr;
BUG_ON(!dma_ops); BUG_ON(!dma_ops);
cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag); cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr; return cpu_addr;
} }
static inline void dma_free_coherent(struct device *dev, size_t size, #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
void *cpu_addr, dma_addr_t dma_handle)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); struct dma_map_ops *dma_ops = get_dma_ops(dev);
@ -154,7 +162,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
} }
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)

View File

@ -17,7 +17,8 @@
* to the dma address (mapping) of the first page. * to the dma address (mapping) of the first page.
*/ */
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag) dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{ {
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag, dma_handle, dev->coherent_dma_mask, flag,
@ -25,7 +26,8 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
} }
static void dma_iommu_free_coherent(struct device *dev, size_t size, static void dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle) void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{ {
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
} }
@ -105,8 +107,8 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
} }
struct dma_map_ops dma_iommu_ops = { struct dma_map_ops dma_iommu_ops = {
.alloc_coherent = dma_iommu_alloc_coherent, .alloc = dma_iommu_alloc_coherent,
.free_coherent = dma_iommu_free_coherent, .free = dma_iommu_free_coherent,
.map_sg = dma_iommu_map_sg, .map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg, .unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported, .dma_supported = dma_iommu_dma_supported,

View File

@ -47,8 +47,8 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
* for everything else. * for everything else.
*/ */
struct dma_map_ops swiotlb_dma_ops = { struct dma_map_ops swiotlb_dma_ops = {
.alloc_coherent = dma_direct_alloc_coherent, .alloc = dma_direct_alloc_coherent,
.free_coherent = dma_direct_free_coherent, .free = dma_direct_free_coherent,
.map_sg = swiotlb_map_sg_attrs, .map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs, .unmap_sg = swiotlb_unmap_sg_attrs,
.dma_supported = swiotlb_dma_supported, .dma_supported = swiotlb_dma_supported,

View File

@ -26,7 +26,8 @@
void *dma_direct_alloc_coherent(struct device *dev, size_t size, void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag) dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{ {
void *ret; void *ret;
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
@ -54,7 +55,8 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
} }
void dma_direct_free_coherent(struct device *dev, size_t size, void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle) void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{ {
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
__dma_free_coherent(size, vaddr); __dma_free_coherent(size, vaddr);
@ -150,8 +152,8 @@ static inline void dma_direct_sync_single(struct device *dev,
#endif #endif
struct dma_map_ops dma_direct_ops = { struct dma_map_ops dma_direct_ops = {
.alloc_coherent = dma_direct_alloc_coherent, .alloc = dma_direct_alloc_coherent,
.free_coherent = dma_direct_free_coherent, .free = dma_direct_free_coherent,
.map_sg = dma_direct_map_sg, .map_sg = dma_direct_map_sg,
.unmap_sg = dma_direct_unmap_sg, .unmap_sg = dma_direct_unmap_sg,
.dma_supported = dma_direct_dma_supported, .dma_supported = dma_direct_dma_supported,

View File

@ -65,7 +65,8 @@ static struct of_device_id __initdata ibmebus_matches[] = {
static void *ibmebus_alloc_coherent(struct device *dev, static void *ibmebus_alloc_coherent(struct device *dev,
size_t size, size_t size,
dma_addr_t *dma_handle, dma_addr_t *dma_handle,
gfp_t flag) gfp_t flag,
struct dma_attrs *attrs)
{ {
void *mem; void *mem;
@ -77,7 +78,8 @@ static void *ibmebus_alloc_coherent(struct device *dev,
static void ibmebus_free_coherent(struct device *dev, static void ibmebus_free_coherent(struct device *dev,
size_t size, void *vaddr, size_t size, void *vaddr,
dma_addr_t dma_handle) dma_addr_t dma_handle,
struct dma_attrs *attrs)
{ {
kfree(vaddr); kfree(vaddr);
} }
@ -136,8 +138,8 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev)
} }
static struct dma_map_ops ibmebus_dma_ops = { static struct dma_map_ops ibmebus_dma_ops = {
.alloc_coherent = ibmebus_alloc_coherent, .alloc = ibmebus_alloc_coherent,
.free_coherent = ibmebus_free_coherent, .free = ibmebus_free_coherent,
.map_sg = ibmebus_map_sg, .map_sg = ibmebus_map_sg,
.unmap_sg = ibmebus_unmap_sg, .unmap_sg = ibmebus_unmap_sg,
.dma_supported = ibmebus_dma_supported, .dma_supported = ibmebus_dma_supported,

View File

@ -487,7 +487,8 @@ static void vio_cmo_balance(struct work_struct *work)
} }
static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag) dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{ {
struct vio_dev *viodev = to_vio_dev(dev); struct vio_dev *viodev = to_vio_dev(dev);
void *ret; void *ret;
@ -497,7 +498,7 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
return NULL; return NULL;
} }
ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag); ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
if (unlikely(ret == NULL)) { if (unlikely(ret == NULL)) {
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed); atomic_inc(&viodev->cmo.allocs_failed);
@ -507,11 +508,12 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
} }
static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle) void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{ {
struct vio_dev *viodev = to_vio_dev(dev); struct vio_dev *viodev = to_vio_dev(dev);
dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle); dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
} }
@ -612,8 +614,8 @@ static u64 vio_dma_get_required_mask(struct device *dev)
} }
struct dma_map_ops vio_dma_mapping_ops = { struct dma_map_ops vio_dma_mapping_ops = {
.alloc_coherent = vio_dma_iommu_alloc_coherent, .alloc = vio_dma_iommu_alloc_coherent,
.free_coherent = vio_dma_iommu_free_coherent, .free = vio_dma_iommu_free_coherent,
.map_sg = vio_dma_iommu_map_sg, .map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg, .unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page, .map_page = vio_dma_iommu_map_page,

View File

@ -564,7 +564,8 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
/* A coherent allocation implies strong ordering */ /* A coherent allocation implies strong ordering */
static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag) dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{ {
if (iommu_fixed_is_weak) if (iommu_fixed_is_weak)
return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
@ -572,18 +573,19 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
device_to_mask(dev), flag, device_to_mask(dev), flag,
dev_to_node(dev)); dev_to_node(dev));
else else
return dma_direct_ops.alloc_coherent(dev, size, dma_handle, return dma_direct_ops.alloc(dev, size, dma_handle, flag,
flag); attrs);
} }
static void dma_fixed_free_coherent(struct device *dev, size_t size, static void dma_fixed_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle) void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{ {
if (iommu_fixed_is_weak) if (iommu_fixed_is_weak)
iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
dma_handle); dma_handle);
else else
dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle); dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
} }
static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
@ -642,8 +644,8 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
struct dma_map_ops dma_iommu_fixed_ops = { struct dma_map_ops dma_iommu_fixed_ops = {
.alloc_coherent = dma_fixed_alloc_coherent, .alloc = dma_fixed_alloc_coherent,
.free_coherent = dma_fixed_free_coherent, .free = dma_fixed_free_coherent,
.map_sg = dma_fixed_map_sg, .map_sg = dma_fixed_map_sg,
.unmap_sg = dma_fixed_unmap_sg, .unmap_sg = dma_fixed_unmap_sg,
.dma_supported = dma_fixed_dma_supported, .dma_supported = dma_fixed_dma_supported,

View File

@ -515,7 +515,8 @@ core_initcall(ps3_system_bus_init);
* to the dma address (mapping) of the first page. * to the dma address (mapping) of the first page.
*/ */
static void * ps3_alloc_coherent(struct device *_dev, size_t size, static void * ps3_alloc_coherent(struct device *_dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag) dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{ {
int result; int result;
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
@ -552,7 +553,7 @@ clean_none:
} }
static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
dma_addr_t dma_handle) dma_addr_t dma_handle, struct dma_attrs *attrs)
{ {
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
@ -701,8 +702,8 @@ static u64 ps3_dma_get_required_mask(struct device *_dev)
} }
static struct dma_map_ops ps3_sb_dma_ops = { static struct dma_map_ops ps3_sb_dma_ops = {
.alloc_coherent = ps3_alloc_coherent, .alloc = ps3_alloc_coherent,
.free_coherent = ps3_free_coherent, .free = ps3_free_coherent,
.map_sg = ps3_sb_map_sg, .map_sg = ps3_sb_map_sg,
.unmap_sg = ps3_sb_unmap_sg, .unmap_sg = ps3_sb_unmap_sg,
.dma_supported = ps3_dma_supported, .dma_supported = ps3_dma_supported,
@ -712,8 +713,8 @@ static struct dma_map_ops ps3_sb_dma_ops = {
}; };
static struct dma_map_ops ps3_ioc0_dma_ops = { static struct dma_map_ops ps3_ioc0_dma_ops = {
.alloc_coherent = ps3_alloc_coherent, .alloc = ps3_alloc_coherent,
.free_coherent = ps3_free_coherent, .free = ps3_free_coherent,
.map_sg = ps3_ioc0_map_sg, .map_sg = ps3_ioc0_map_sg,
.unmap_sg = ps3_ioc0_unmap_sg, .unmap_sg = ps3_ioc0_unmap_sg,
.dma_supported = ps3_dma_supported, .dma_supported = ps3_dma_supported,