powerpc/dma: use the generic direct mapping bypass
Now that we've switched all the powerpc nommu and swiotlb methods to use the generic dma_direct_* calls we can remove these ops vectors entirely and rely on the common direct mapping bypass that avoids indirect function calls entirely. This also allows to remove a whole lot of boilerplate code related to setting up these operations. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Christian Zigotzky <chzigotzky@xenosoft.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
461db2bdbf
commit
68005b67d1
|
@ -18,14 +18,6 @@
|
|||
#include <asm/io.h>
|
||||
#include <asm/swiotlb.h>
|
||||
|
||||
/* Some dma direct funcs must be visible for use in other dma_ops */
|
||||
extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
unsigned long attrs);
|
||||
extern void __dma_nommu_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs);
|
||||
|
||||
static inline unsigned long device_to_mask(struct device *dev)
|
||||
{
|
||||
if (dev->dma_mask && *dev->dma_mask)
|
||||
|
@ -40,7 +32,6 @@ static inline unsigned long device_to_mask(struct device *dev)
|
|||
#ifdef CONFIG_PPC64
|
||||
extern const struct dma_map_ops dma_iommu_ops;
|
||||
#endif
|
||||
extern const struct dma_map_ops dma_nommu_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
|
|
|
@ -13,10 +13,7 @@
|
|||
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
extern const struct dma_map_ops powerpc_swiotlb_dma_ops;
|
||||
|
||||
extern unsigned int ppc_swiotlb_enable;
|
||||
int __init swiotlb_setup_bus_notifier(void);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
void swiotlb_detect_4g(void);
|
||||
|
|
|
@ -36,7 +36,7 @@ obj-y := cputable.o ptrace.o syscalls.o \
|
|||
process.o systbl.o idle.o \
|
||||
signal.o sysfs.o cacheinfo.o time.o \
|
||||
prom.o traps.o setup-common.o \
|
||||
udbg.o misc.o io.o dma.o misc_$(BITS).o \
|
||||
udbg.o misc.o io.o misc_$(BITS).o \
|
||||
of_platform.o prom_parse.o
|
||||
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
|
||||
signal_64.o ptrace32.o \
|
||||
|
|
|
@ -10,70 +10,12 @@
|
|||
* option) any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/swiotlb.h>
|
||||
#include <asm/dma.h>
|
||||
|
||||
unsigned int ppc_swiotlb_enable;
|
||||
|
||||
/*
|
||||
* At the moment, all platforms that use this code only require
|
||||
* swiotlb to be used if we're operating on HIGHMEM. Since
|
||||
* we don't ever call anything other than map_sg, unmap_sg,
|
||||
* map_page, and unmap_page on highmem, use normal dma_ops
|
||||
* for everything else.
|
||||
*/
|
||||
const struct dma_map_ops powerpc_swiotlb_dma_ops = {
|
||||
.alloc = dma_direct_alloc,
|
||||
.free = dma_direct_free,
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.unmap_sg = dma_direct_unmap_sg,
|
||||
.dma_supported = dma_direct_supported,
|
||||
.map_page = dma_direct_map_page,
|
||||
.unmap_page = dma_direct_unmap_page,
|
||||
.sync_single_for_cpu = dma_direct_sync_single_for_cpu,
|
||||
.sync_single_for_device = dma_direct_sync_single_for_device,
|
||||
.sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = dma_direct_sync_sg_for_device,
|
||||
.get_required_mask = dma_direct_get_required_mask,
|
||||
};
|
||||
|
||||
static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
|
||||
/* We are only intereted in device addition */
|
||||
if (action != BUS_NOTIFY_ADD_DEVICE)
|
||||
return 0;
|
||||
|
||||
/* May need to bounce if the device can't address all of DRAM */
|
||||
if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
|
||||
set_dma_ops(dev, &powerpc_swiotlb_dma_ops);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
|
||||
.notifier_call = ppc_swiotlb_bus_notify,
|
||||
.priority = 0,
|
||||
};
|
||||
|
||||
int __init swiotlb_setup_bus_notifier(void)
|
||||
{
|
||||
bus_register_notifier(&platform_bus_type,
|
||||
&ppc_swiotlb_plat_bus_notifier);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init swiotlb_detect_4g(void)
|
||||
{
|
||||
if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
|
||||
|
|
|
@ -1,62 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
|
||||
*
|
||||
* Provide default implementations of the DMA mapping callbacks for
|
||||
* directly mapped busses.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/vio.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/swiotlb.h>
|
||||
#include <asm/iommu.h>
|
||||
|
||||
/*
|
||||
* Generic direct DMA implementation
|
||||
*
|
||||
* This implementation supports a per-device offset that can be applied if
|
||||
* the address at which memory is visible to devices is not 0. Platform code
|
||||
* can set archdata.dma_data to an unsigned long holding the offset. By
|
||||
* default the offset is PCI_DRAM_OFFSET.
|
||||
*/
|
||||
|
||||
const struct dma_map_ops dma_nommu_ops = {
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
.alloc = __dma_nommu_alloc_coherent,
|
||||
.free = __dma_nommu_free_coherent,
|
||||
#else
|
||||
.alloc = dma_direct_alloc,
|
||||
.free = dma_direct_free,
|
||||
#endif
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.dma_supported = dma_direct_supported,
|
||||
.map_page = dma_direct_map_page,
|
||||
.get_required_mask = dma_direct_get_required_mask,
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
.unmap_sg = dma_direct_unmap_sg,
|
||||
.unmap_page = dma_direct_unmap_page,
|
||||
.sync_single_for_cpu = dma_direct_sync_single_for_cpu,
|
||||
.sync_single_for_device = dma_direct_sync_single_for_device,
|
||||
.sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = dma_direct_sync_sg_for_device,
|
||||
#endif
|
||||
};
|
||||
EXPORT_SYMBOL(dma_nommu_ops);
|
||||
|
||||
static int __init dma_init(void)
|
||||
{
|
||||
#ifdef CONFIG_IBMVIO
|
||||
dma_debug_add_bus(&vio_bus_type);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(dma_init);
|
||||
|
|
@ -62,7 +62,7 @@ resource_size_t isa_mem_base;
|
|||
EXPORT_SYMBOL(isa_mem_base);
|
||||
|
||||
|
||||
static const struct dma_map_ops *pci_dma_ops = &dma_nommu_ops;
|
||||
static const struct dma_map_ops *pci_dma_ops;
|
||||
|
||||
void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
|
||||
{
|
||||
|
|
|
@ -791,7 +791,6 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
|
|||
{
|
||||
pdev->archdata.dma_mask = DMA_BIT_MASK(32);
|
||||
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
|
||||
set_dma_ops(&pdev->dev, &dma_nommu_ops);
|
||||
}
|
||||
|
||||
static __init void print_system_info(void)
|
||||
|
|
|
@ -152,8 +152,8 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi
|
|||
* Allocate DMA-coherent memory space and return both the kernel remapped
|
||||
* virtual and bus address for that space.
|
||||
*/
|
||||
void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
struct page *page;
|
||||
struct ppc_vm_region *c;
|
||||
|
@ -254,7 +254,7 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
|
|||
/*
|
||||
* free a page as defined by the above mapping.
|
||||
*/
|
||||
void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
struct ppc_vm_region *c;
|
||||
|
|
|
@ -47,7 +47,7 @@ static int __init warp_probe(void)
|
|||
if (!of_machine_is_compatible("pika,warp"))
|
||||
return 0;
|
||||
|
||||
/* For __dma_nommu_alloc_coherent */
|
||||
/* For arch_dma_alloc */
|
||||
ISA_DMA_THRESHOLD = ~0L;
|
||||
|
||||
return 1;
|
||||
|
|
|
@ -223,7 +223,3 @@ define_machine(corenet_generic) {
|
|||
};
|
||||
|
||||
machine_arch_initcall(corenet_generic, corenet_gen_publish_devices);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
machine_arch_initcall(corenet_generic, swiotlb_setup_bus_notifier);
|
||||
#endif
|
||||
|
|
|
@ -202,8 +202,6 @@ static int __init ge_imp3a_probe(void)
|
|||
|
||||
machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices);
|
||||
|
||||
machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier);
|
||||
|
||||
define_machine(ge_imp3a) {
|
||||
.name = "GE_IMP3A",
|
||||
.probe = ge_imp3a_probe,
|
||||
|
|
|
@ -57,8 +57,6 @@ static void __init mpc8536_ds_setup_arch(void)
|
|||
|
||||
machine_arch_initcall(mpc8536_ds, mpc85xx_common_publish_devices);
|
||||
|
||||
machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier);
|
||||
|
||||
/*
|
||||
* Called very early, device-tree isn't unflattened
|
||||
*/
|
||||
|
|
|
@ -174,10 +174,6 @@ machine_arch_initcall(mpc8544_ds, mpc85xx_common_publish_devices);
|
|||
machine_arch_initcall(mpc8572_ds, mpc85xx_common_publish_devices);
|
||||
machine_arch_initcall(p2020_ds, mpc85xx_common_publish_devices);
|
||||
|
||||
machine_arch_initcall(mpc8544_ds, swiotlb_setup_bus_notifier);
|
||||
machine_arch_initcall(mpc8572_ds, swiotlb_setup_bus_notifier);
|
||||
machine_arch_initcall(p2020_ds, swiotlb_setup_bus_notifier);
|
||||
|
||||
/*
|
||||
* Called very early, device-tree isn't unflattened
|
||||
*/
|
||||
|
|
|
@ -367,10 +367,6 @@ machine_arch_initcall(mpc8568_mds, mpc85xx_publish_devices);
|
|||
machine_arch_initcall(mpc8569_mds, mpc85xx_publish_devices);
|
||||
machine_arch_initcall(p1021_mds, mpc85xx_common_publish_devices);
|
||||
|
||||
machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier);
|
||||
machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier);
|
||||
machine_arch_initcall(p1021_mds, swiotlb_setup_bus_notifier);
|
||||
|
||||
static void __init mpc85xx_mds_pic_init(void)
|
||||
{
|
||||
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
|
||||
|
|
|
@ -55,7 +55,6 @@ static void __init p1010_rdb_setup_arch(void)
|
|||
}
|
||||
|
||||
machine_arch_initcall(p1010_rdb, mpc85xx_common_publish_devices);
|
||||
machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
|
||||
|
||||
/*
|
||||
* Called very early, device-tree isn't unflattened
|
||||
|
|
|
@ -548,8 +548,6 @@ static void __init p1022_ds_setup_arch(void)
|
|||
|
||||
machine_arch_initcall(p1022_ds, mpc85xx_common_publish_devices);
|
||||
|
||||
machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
|
||||
|
||||
/*
|
||||
* Called very early, device-tree isn't unflattened
|
||||
*/
|
||||
|
|
|
@ -128,8 +128,6 @@ static void __init p1022_rdk_setup_arch(void)
|
|||
|
||||
machine_arch_initcall(p1022_rdk, mpc85xx_common_publish_devices);
|
||||
|
||||
machine_arch_initcall(p1022_rdk, swiotlb_setup_bus_notifier);
|
||||
|
||||
/*
|
||||
* Called very early, device-tree isn't unflattened
|
||||
*/
|
||||
|
|
|
@ -121,7 +121,6 @@ static int __init declare_of_platform_devices(void)
|
|||
return 0;
|
||||
}
|
||||
machine_arch_initcall(mpc86xx_hpcn, declare_of_platform_devices);
|
||||
machine_arch_initcall(mpc86xx_hpcn, swiotlb_setup_bus_notifier);
|
||||
|
||||
define_machine(mpc86xx_hpcn) {
|
||||
.name = "MPC86xx HPCN",
|
||||
|
|
|
@ -600,8 +600,6 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
|
|||
|
||||
if (cell_iommu_enabled)
|
||||
dev->dma_ops = &dma_iommu_ops;
|
||||
else
|
||||
dev->dma_ops = &dma_nommu_ops;
|
||||
cell_dma_dev_setup(dev);
|
||||
return 0;
|
||||
}
|
||||
|
@ -727,7 +725,6 @@ static int __init cell_iommu_init_disabled(void)
|
|||
unsigned long base = 0, size;
|
||||
|
||||
/* When no iommu is present, we use direct DMA ops */
|
||||
set_pci_dma_ops(&dma_nommu_ops);
|
||||
|
||||
/* First make sure all IOC translation is turned off */
|
||||
cell_disable_iommus();
|
||||
|
|
|
@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
|
|||
*/
|
||||
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
|
||||
!firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
dev->dev.dma_ops = &dma_nommu_ops;
|
||||
dev->dev.dma_ops = NULL;
|
||||
/*
|
||||
* Set the coherent DMA mask to prevent the iommu
|
||||
* being used unnecessarily
|
||||
|
|
|
@ -411,55 +411,6 @@ out:
|
|||
return !!(srr1 & 0x2);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCMCIA
|
||||
static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
|
||||
void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
struct device *parent;
|
||||
struct pcmcia_device *pdev = to_pcmcia_dev(dev);
|
||||
|
||||
/* We are only intereted in device addition */
|
||||
if (action != BUS_NOTIFY_ADD_DEVICE)
|
||||
return 0;
|
||||
|
||||
parent = pdev->socket->dev.parent;
|
||||
|
||||
/* We know electra_cf devices will always have of_node set, since
|
||||
* electra_cf is an of_platform driver.
|
||||
*/
|
||||
if (!parent->of_node)
|
||||
return 0;
|
||||
|
||||
if (!of_device_is_compatible(parent->of_node, "electra-cf"))
|
||||
return 0;
|
||||
|
||||
/* We use the direct ops for localbus */
|
||||
dev->dma_ops = &dma_nommu_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block pcmcia_notifier = {
|
||||
.notifier_call = pcmcia_notify,
|
||||
};
|
||||
|
||||
static inline void pasemi_pcmcia_init(void)
|
||||
{
|
||||
extern struct bus_type pcmcia_bus_type;
|
||||
|
||||
bus_register_notifier(&pcmcia_bus_type, &pcmcia_notifier);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void pasemi_pcmcia_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
static const struct of_device_id pasemi_bus_ids[] = {
|
||||
/* Unfortunately needed for legacy firmwares */
|
||||
{ .type = "localbus", },
|
||||
|
@ -472,8 +423,6 @@ static const struct of_device_id pasemi_bus_ids[] = {
|
|||
|
||||
static int __init pasemi_publish_devices(void)
|
||||
{
|
||||
pasemi_pcmcia_init();
|
||||
|
||||
/* Publish OF platform devices for SDC and other non-PCI devices */
|
||||
of_platform_bus_probe(NULL, pasemi_bus_ids, NULL);
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
|
|||
* their parent device so drivers shouldn't be doing DMA
|
||||
* operations directly on these devices.
|
||||
*/
|
||||
set_dma_ops(&npe->pdev->dev, NULL);
|
||||
set_dma_ops(&npe->pdev->dev, &dma_dummy_ops);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1699,3 +1699,10 @@ int vio_disable_interrupts(struct vio_dev *dev)
|
|||
}
|
||||
EXPORT_SYMBOL(vio_disable_interrupts);
|
||||
#endif /* CONFIG_PPC_PSERIES */
|
||||
|
||||
static int __init vio_init(void)
|
||||
{
|
||||
dma_debug_add_bus(&vio_bus_type);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(vio_init);
|
||||
|
|
|
@ -124,10 +124,8 @@ static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
|
|||
|
||||
static void setup_swiotlb_ops(struct pci_controller *hose)
|
||||
{
|
||||
if (ppc_swiotlb_enable) {
|
||||
if (ppc_swiotlb_enable)
|
||||
hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
|
||||
set_pci_dma_ops(&powerpc_swiotlb_dma_ops);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
|
||||
|
@ -141,7 +139,6 @@ static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
|
|||
*/
|
||||
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
|
||||
dev->bus_dma_mask = 0;
|
||||
set_dma_ops(dev, &dma_nommu_ops);
|
||||
set_dma_offset(dev, pci64_dma_offset);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
|
|||
return false;
|
||||
}
|
||||
|
||||
set_dma_ops(&dev->dev, &dma_nommu_ops);
|
||||
set_dma_offset(&dev->dev, PAGE_OFFSET);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue