2009-01-09 08:46:40 +08:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
|
|
|
|
* Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
|
|
|
|
* Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
|
|
|
|
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
|
|
|
|
* IP32 changes by Ilya.
|
2010-10-02 04:27:34 +08:00
|
|
|
* Copyright (C) 2010 Cavium Networks, Inc.
|
2009-01-09 08:46:40 +08:00
|
|
|
*/
|
2009-04-24 08:44:38 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/scatterlist.h>
|
2010-10-02 04:27:34 +08:00
|
|
|
#include <linux/bootmem.h>
|
2011-07-29 06:46:31 +08:00
|
|
|
#include <linux/export.h>
|
2010-10-02 04:27:34 +08:00
|
|
|
#include <linux/swiotlb.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mm.h>
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
#include <asm/bootinfo.h>
|
2009-04-24 08:44:38 +08:00
|
|
|
|
|
|
|
#include <asm/octeon/octeon.h>
|
2010-10-02 04:27:34 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PCI
|
|
|
|
#include <asm/octeon/pci-octeon.h>
|
2009-04-24 08:44:38 +08:00
|
|
|
#include <asm/octeon/cvmx-npi-defs.h>
|
|
|
|
#include <asm/octeon/cvmx-pci-defs.h>
|
2009-01-09 08:46:40 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
|
|
|
|
{
|
|
|
|
if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
|
|
|
|
return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
|
|
|
|
else
|
|
|
|
return paddr;
|
|
|
|
}
|
2009-01-09 08:46:40 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
|
|
|
|
{
|
|
|
|
if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
|
|
|
|
return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
|
|
|
|
else
|
|
|
|
return daddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
|
|
{
|
|
|
|
if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
|
|
|
|
paddr -= 0x400000000ull;
|
|
|
|
return octeon_hole_phys_to_dma(paddr);
|
|
|
|
}
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
|
{
|
|
|
|
daddr = octeon_hole_dma_to_phys(daddr);
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
|
|
|
|
daddr += 0x400000000ull;
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
return daddr;
|
|
|
|
}
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2011-11-22 22:47:04 +08:00
|
|
|
static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
|
|
{
|
|
|
|
return octeon_hole_phys_to_dma(paddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
|
{
|
|
|
|
return octeon_hole_dma_to_phys(daddr);
|
|
|
|
}
|
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
|
2009-01-09 08:46:40 +08:00
|
|
|
{
|
2010-10-02 04:27:34 +08:00
|
|
|
if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
|
|
|
|
paddr -= 0x400000000ull;
|
|
|
|
|
|
|
|
/* Anything in the BAR1 hole or above goes via BAR2 */
|
|
|
|
if (paddr >= 0xf0000000ull)
|
|
|
|
paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
|
|
|
|
|
|
|
|
return paddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
|
{
|
|
|
|
if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
|
|
|
|
daddr -= OCTEON_BAR2_PCI_ADDRESS;
|
|
|
|
|
|
|
|
if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
|
|
|
|
daddr += 0x400000000ull;
|
|
|
|
return daddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
|
|
|
|
phys_addr_t paddr)
|
|
|
|
{
|
|
|
|
if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
|
|
|
|
paddr -= 0x400000000ull;
|
|
|
|
|
|
|
|
/* Anything not in the BAR1 range goes via BAR2 */
|
|
|
|
if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
|
|
|
|
paddr = paddr - octeon_bar1_pci_phys;
|
|
|
|
else
|
|
|
|
paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
|
|
|
|
|
|
|
|
return paddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
|
|
|
|
dma_addr_t daddr)
|
|
|
|
{
|
|
|
|
if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
|
|
|
|
daddr -= OCTEON_BAR2_PCI_ADDRESS;
|
|
|
|
else
|
|
|
|
daddr += octeon_bar1_pci_phys;
|
|
|
|
|
|
|
|
if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
|
|
|
|
daddr += 0x400000000ull;
|
|
|
|
return daddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_PCI */
|
|
|
|
|
|
|
|
static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
|
|
|
|
unsigned long offset, size_t size, enum dma_data_direction direction,
|
2016-08-04 04:46:00 +08:00
|
|
|
unsigned long attrs)
|
2010-10-02 04:27:34 +08:00
|
|
|
{
|
|
|
|
dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
|
|
|
|
direction, attrs);
|
2009-01-09 08:46:40 +08:00
|
|
|
mb();
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
return daddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
2016-08-04 04:46:00 +08:00
|
|
|
int nents, enum dma_data_direction direction, unsigned long attrs)
|
2010-10-02 04:27:34 +08:00
|
|
|
{
|
|
|
|
int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
|
2009-04-24 08:44:38 +08:00
|
|
|
mb();
|
2010-10-02 04:27:34 +08:00
|
|
|
return r;
|
|
|
|
}
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
static void octeon_dma_sync_single_for_device(struct device *dev,
|
|
|
|
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
|
|
|
|
{
|
|
|
|
swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
|
|
|
|
mb();
|
|
|
|
}
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
static void octeon_dma_sync_sg_for_device(struct device *dev,
|
|
|
|
struct scatterlist *sg, int nelems, enum dma_data_direction direction)
|
|
|
|
{
|
|
|
|
swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
|
|
|
|
mb();
|
|
|
|
}
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
|
2016-08-04 04:46:00 +08:00
|
|
|
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
2010-10-02 04:27:34 +08:00
|
|
|
{
|
2017-12-24 20:37:55 +08:00
|
|
|
void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
mb();
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
|
2009-01-09 08:46:40 +08:00
|
|
|
{
|
2010-10-02 04:27:34 +08:00
|
|
|
return paddr;
|
|
|
|
}
|
2009-04-24 08:44:38 +08:00
|
|
|
|
2010-10-02 04:27:34 +08:00
|
|
|
static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
|
{
|
|
|
|
return daddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct octeon_dma_map_ops {
|
2017-01-21 05:04:01 +08:00
|
|
|
const struct dma_map_ops dma_map_ops;
|
2010-10-02 04:27:34 +08:00
|
|
|
dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
|
|
|
|
phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
|
|
|
|
};
|
|
|
|
|
|
|
|
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
|
|
{
|
|
|
|
struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
|
|
|
|
struct octeon_dma_map_ops,
|
|
|
|
dma_map_ops);
|
|
|
|
|
|
|
|
return ops->phys_to_dma(dev, paddr);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(phys_to_dma);
|
|
|
|
|
|
|
|
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
|
{
|
|
|
|
struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
|
|
|
|
struct octeon_dma_map_ops,
|
|
|
|
dma_map_ops);
|
|
|
|
|
|
|
|
return ops->dma_to_phys(dev, daddr);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dma_to_phys);
|
|
|
|
|
|
|
|
static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
|
|
|
|
.dma_map_ops = {
|
2012-03-27 20:32:21 +08:00
|
|
|
.alloc = octeon_dma_alloc_coherent,
|
2017-12-24 20:37:55 +08:00
|
|
|
.free = swiotlb_free,
|
2010-10-02 04:27:34 +08:00
|
|
|
.map_page = octeon_dma_map_page,
|
|
|
|
.unmap_page = swiotlb_unmap_page,
|
|
|
|
.map_sg = octeon_dma_map_sg,
|
|
|
|
.unmap_sg = swiotlb_unmap_sg_attrs,
|
|
|
|
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
|
|
|
.sync_single_for_device = octeon_dma_sync_single_for_device,
|
|
|
|
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
|
|
|
.sync_sg_for_device = octeon_dma_sync_sg_for_device,
|
|
|
|
.mapping_error = swiotlb_dma_mapping_error,
|
|
|
|
.dma_supported = swiotlb_dma_supported
|
|
|
|
},
|
|
|
|
.phys_to_dma = octeon_unity_phys_to_dma,
|
|
|
|
.dma_to_phys = octeon_unity_dma_to_phys
|
|
|
|
};
|
|
|
|
|
|
|
|
char *octeon_swiotlb;
|
|
|
|
|
|
|
|
void __init plat_swiotlb_setup(void)
|
|
|
|
{
|
|
|
|
int i;
|
2014-11-22 07:22:09 +08:00
|
|
|
phys_addr_t max_addr;
|
|
|
|
phys_addr_t addr_size;
|
2010-10-02 04:27:34 +08:00
|
|
|
size_t swiotlbsize;
|
|
|
|
unsigned long swiotlb_nslabs;
|
|
|
|
|
|
|
|
max_addr = 0;
|
|
|
|
addr_size = 0;
|
|
|
|
|
|
|
|
for (i = 0 ; i < boot_mem_map.nr_map; i++) {
|
|
|
|
struct boot_mem_map_entry *e = &boot_mem_map.map[i];
|
2011-11-22 22:47:04 +08:00
|
|
|
if (e->type != BOOT_MEM_RAM && e->type != BOOT_MEM_INIT_RAM)
|
2010-10-02 04:27:34 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* These addresses map low for PCI. */
|
2015-01-15 21:11:14 +08:00
|
|
|
if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2())
|
2010-10-02 04:27:34 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
addr_size += e->size;
|
|
|
|
|
|
|
|
if (max_addr < e->addr + e->size)
|
|
|
|
max_addr = e->addr + e->size;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
swiotlbsize = PAGE_SIZE;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PCI
|
2009-04-24 08:44:38 +08:00
|
|
|
/*
|
2010-10-02 04:27:34 +08:00
|
|
|
* For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
|
|
|
|
* size to a maximum of 64MB
|
2009-04-24 08:44:38 +08:00
|
|
|
*/
|
2010-10-02 04:27:34 +08:00
|
|
|
if (OCTEON_IS_MODEL(OCTEON_CN31XX)
|
|
|
|
|| OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
|
|
|
|
swiotlbsize = addr_size / 4;
|
|
|
|
if (swiotlbsize > 64 * (1<<20))
|
|
|
|
swiotlbsize = 64 * (1<<20);
|
|
|
|
} else if (max_addr > 0xf0000000ul) {
|
|
|
|
/*
|
|
|
|
* Otherwise only allocate a big iotlb if there is
|
|
|
|
* memory past the BAR1 hole.
|
|
|
|
*/
|
|
|
|
swiotlbsize = 64 * (1<<20);
|
|
|
|
}
|
2011-11-22 22:47:04 +08:00
|
|
|
#endif
|
2015-03-05 05:08:49 +08:00
|
|
|
#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
|
2011-11-22 22:47:04 +08:00
|
|
|
/* OCTEON II ohci is only 32-bit. */
|
2015-01-15 21:11:14 +08:00
|
|
|
if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
|
2011-11-22 22:47:04 +08:00
|
|
|
swiotlbsize = 64 * (1<<20);
|
2010-10-02 04:27:34 +08:00
|
|
|
#endif
|
|
|
|
swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
|
|
|
|
swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
|
|
|
|
swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
|
|
|
|
|
|
|
|
octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
|
2009-04-24 08:44:38 +08:00
|
|
|
|
x86: Don't panic if can not alloc buffer for swiotlb
Normal boot path on system with iommu support:
swiotlb buffer will be allocated early at first and then try to initialize
iommu, if iommu for intel or AMD could setup properly, swiotlb buffer
will be freed.
The early allocating is with bootmem, and could panic when we try to use
kdump with buffer above 4G only, or with memmap to limit mem under 4G.
for example: memmap=4095M$1M to remove memory under 4G.
According to Eric, add _nopanic version and no_iotlb_memory to fail
map single later if swiotlb is still needed.
-v2: don't pass nopanic, and use -ENOMEM return value according to Eric.
panic early instead of using swiotlb_full to panic...according to Eric/Konrad.
-v3: make swiotlb_init to be notpanic, but will affect:
arm64, ia64, powerpc, tile, unicore32, x86.
-v4: cleanup swiotlb_init by removing swiotlb_init_with_default_size.
Suggested-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-36-git-send-email-yinghai@kernel.org
Reviewed-and-tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Kyungmin Park <kyungmin.park@samsung.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
Cc: linux-mips@linux-mips.org
Cc: xen-devel@lists.xensource.com
Cc: virtualization@lists.linux-foundation.org
Cc: Shuah Khan <shuahkhan@gmail.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-01-25 04:20:16 +08:00
|
|
|
if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
|
|
|
|
panic("Cannot allocate SWIOTLB buffer");
|
2010-10-02 04:27:34 +08:00
|
|
|
|
|
|
|
mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PCI
|
|
|
|
static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
|
|
|
|
.dma_map_ops = {
|
2012-03-27 20:32:21 +08:00
|
|
|
.alloc = octeon_dma_alloc_coherent,
|
2017-12-24 20:37:55 +08:00
|
|
|
.free = swiotlb_free,
|
2010-10-02 04:27:34 +08:00
|
|
|
.map_page = octeon_dma_map_page,
|
|
|
|
.unmap_page = swiotlb_unmap_page,
|
|
|
|
.map_sg = octeon_dma_map_sg,
|
|
|
|
.unmap_sg = swiotlb_unmap_sg_attrs,
|
|
|
|
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
|
|
|
.sync_single_for_device = octeon_dma_sync_single_for_device,
|
|
|
|
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
|
|
|
.sync_sg_for_device = octeon_dma_sync_sg_for_device,
|
|
|
|
.mapping_error = swiotlb_dma_mapping_error,
|
|
|
|
.dma_supported = swiotlb_dma_supported
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2017-01-21 05:04:01 +08:00
|
|
|
const struct dma_map_ops *octeon_pci_dma_map_ops;
|
2010-10-02 04:27:34 +08:00
|
|
|
|
|
|
|
void __init octeon_pci_dma_init(void)
|
|
|
|
{
|
2009-04-24 08:44:38 +08:00
|
|
|
switch (octeon_dma_bar_type) {
|
2011-11-22 22:47:04 +08:00
|
|
|
case OCTEON_DMA_BAR_TYPE_PCIE2:
|
|
|
|
_octeon_pci_dma_map_ops.phys_to_dma = octeon_gen2_phys_to_dma;
|
|
|
|
_octeon_pci_dma_map_ops.dma_to_phys = octeon_gen2_dma_to_phys;
|
|
|
|
break;
|
2009-04-24 08:44:38 +08:00
|
|
|
case OCTEON_DMA_BAR_TYPE_PCIE:
|
2010-10-02 04:27:34 +08:00
|
|
|
_octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
|
|
|
|
_octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
|
|
|
|
break;
|
2009-04-24 08:44:38 +08:00
|
|
|
case OCTEON_DMA_BAR_TYPE_BIG:
|
2010-10-02 04:27:34 +08:00
|
|
|
_octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
|
|
|
|
_octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
|
2009-04-24 08:44:38 +08:00
|
|
|
break;
|
|
|
|
case OCTEON_DMA_BAR_TYPE_SMALL:
|
2010-10-02 04:27:34 +08:00
|
|
|
_octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
|
|
|
|
_octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
|
2009-04-24 08:44:38 +08:00
|
|
|
break;
|
|
|
|
default:
|
2010-10-02 04:27:34 +08:00
|
|
|
BUG();
|
2009-04-24 08:44:38 +08:00
|
|
|
}
|
2010-10-02 04:27:34 +08:00
|
|
|
octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
|
2009-01-09 08:46:40 +08:00
|
|
|
}
|
2010-10-02 04:27:34 +08:00
|
|
|
#endif /* CONFIG_PCI */
|