2005-11-19 17:46:04 +08:00
|
|
|
#ifndef __ASM_POWERPC_PCI_H
|
|
|
|
#define __ASM_POWERPC_PCI_H
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
|
|
|
|
#include <asm/machdep.h>
|
|
|
|
#include <asm/scatterlist.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/prom.h>
|
2005-11-19 17:46:04 +08:00
|
|
|
#include <asm/pci-bridge.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm-generic/pci-dma-compat.h>
|
|
|
|
|
2009-08-26 04:07:11 +08:00
|
|
|
/* Return values for ppc_md.pci_probe_mode function */
|
|
|
|
#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
|
|
|
|
#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
|
|
|
|
#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#define PCIBIOS_MIN_IO 0x1000
|
|
|
|
#define PCIBIOS_MIN_MEM 0x10000000
|
|
|
|
|
|
|
|
struct pci_dev;
|
|
|
|
|
2005-11-19 17:46:04 +08:00
|
|
|
/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
|
|
|
|
#define IOBASE_BRIDGE_NUMBER 0
|
|
|
|
#define IOBASE_MEMORY 1
|
|
|
|
#define IOBASE_IO 2
|
|
|
|
#define IOBASE_ISA_IO 3
|
|
|
|
#define IOBASE_ISA_MEM 4
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set this to 1 if you want the kernel to re-assign all PCI
|
2007-12-20 11:54:53 +08:00
|
|
|
* bus numbers (don't do that on ppc64 yet !)
|
2005-11-19 17:46:04 +08:00
|
|
|
*/
|
2008-12-11 17:46:44 +08:00
|
|
|
#define pcibios_assign_all_busses() \
|
|
|
|
(ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static inline void pcibios_set_master(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
/* No special bus mastering setup handling */
|
|
|
|
}
|
|
|
|
|
2005-04-01 13:07:31 +08:00
|
|
|
static inline void pcibios_penalize_isa_irq(int irq, int active)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
/* We don't do dynamic PCI IRQ allocation */
|
|
|
|
}
|
|
|
|
|
|
|
|
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
|
|
|
|
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
|
|
|
{
|
|
|
|
if (ppc_md.pci_get_legacy_ide_irq)
|
|
|
|
return ppc_md.pci_get_legacy_ide_irq(dev, channel);
|
|
|
|
return channel ? 15 : 14;
|
|
|
|
}
|
|
|
|
|
powerpc: Merge 32 and 64-bit dma code
We essentially adopt the 64-bit dma code, with some changes to support
32-bit systems, including HIGHMEM. dma functions on 32-bit are now
invoked via accessor functions which call the correct op for a device based
on archdata dma_ops. If there is no archdata dma_ops, this defaults
to dma_direct_ops.
In addition, the dma_map/unmap_page functions are added to dma_ops
because we can't just fall back on map/unmap_single when HIGHMEM is
enabled. In the case of dma_direct_*, we stop using map/unmap_single
and just use the page version - this saves a lot of ugly
ifdeffing. We leave map/unmap_single in the dma_ops definition,
though, because they are needed by the iommu code, which does not
implement map/unmap_page. Ideally, going forward, we will completely
eliminate map/unmap_single and just have map/unmap_page, if it's
workable for 64-bit.
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2008-09-12 18:34:46 +08:00
|
|
|
#ifdef CONFIG_PCI
|
2009-08-05 03:08:25 +08:00
|
|
|
extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
|
|
|
|
extern struct dma_map_ops *get_pci_dma_ops(void);
|
powerpc: Merge 32 and 64-bit dma code
We essentially adopt the 64-bit dma code, with some changes to support
32-bit systems, including HIGHMEM. dma functions on 32-bit are now
invoked via accessor functions which call the correct op for a device based
on archdata dma_ops. If there is no archdata dma_ops, this defaults
to dma_direct_ops.
In addition, the dma_map/unmap_page functions are added to dma_ops
because we can't just fall back on map/unmap_single when HIGHMEM is
enabled. In the case of dma_direct_*, we stop using map/unmap_single
and just use the page version - this saves a lot of ugly
ifdeffing. We leave map/unmap_single in the dma_ops definition,
though, because they are needed by the iommu code, which does not
implement map/unmap_page. Ideally, going forward, we will completely
eliminate map/unmap_single and just have map/unmap_page, if it's
workable for 64-bit.
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2008-09-12 18:34:46 +08:00
|
|
|
#else /* CONFIG_PCI */
|
|
|
|
#define set_pci_dma_ops(d)
|
|
|
|
#define get_pci_dma_ops() NULL
|
|
|
|
#endif
|
|
|
|
|
2005-11-19 17:46:04 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
2006-10-10 22:01:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We want to avoid touching the cacheline size or MWI bit.
|
|
|
|
* pSeries firmware sets the cacheline size (which is not the cpu cacheline
|
|
|
|
* size in all cases) and hardware treats MWI the same as memory write.
|
|
|
|
*/
|
|
|
|
#define PCI_DISABLE_MWI
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-03-04 13:58:39 +08:00
|
|
|
#ifdef CONFIG_PCI
|
2005-06-03 03:55:50 +08:00
|
|
|
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
|
|
|
enum pci_dma_burst_strategy *strat,
|
|
|
|
unsigned long *strategy_parameter)
|
|
|
|
{
|
|
|
|
unsigned long cacheline_size;
|
|
|
|
u8 byte;
|
|
|
|
|
|
|
|
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
|
|
|
|
if (byte == 0)
|
|
|
|
cacheline_size = 1024;
|
|
|
|
else
|
|
|
|
cacheline_size = (int) byte * 4;
|
|
|
|
|
|
|
|
*strat = PCI_DMA_BURST_MULTIPLE;
|
|
|
|
*strategy_parameter = cacheline_size;
|
|
|
|
}
|
2005-06-07 14:07:46 +08:00
|
|
|
#endif
|
2005-06-03 03:55:50 +08:00
|
|
|
|
2005-11-19 17:46:04 +08:00
|
|
|
#else /* 32-bit */
|
|
|
|
|
|
|
|
#ifdef CONFIG_PCI
|
|
|
|
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
|
|
|
enum pci_dma_burst_strategy *strat,
|
|
|
|
unsigned long *strategy_parameter)
|
|
|
|
{
|
|
|
|
*strat = PCI_DMA_BURST_INFINITY;
|
|
|
|
*strategy_parameter = ~0UL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
|
2007-06-27 14:17:57 +08:00
|
|
|
extern int pci_domain_nr(struct pci_bus *bus);
|
|
|
|
|
2007-12-20 11:54:49 +08:00
|
|
|
/* Decide whether to display the domain number in /proc */
|
|
|
|
extern int pci_proc_domain(struct pci_bus *bus);
|
|
|
|
|
2009-01-19 08:31:00 +08:00
|
|
|
/* MSI arch hooks */
|
|
|
|
#define arch_setup_msi_irqs arch_setup_msi_irqs
|
|
|
|
#define arch_teardown_msi_irqs arch_teardown_msi_irqs
|
|
|
|
#define arch_msi_check_device arch_msi_check_device
|
2007-12-20 11:54:49 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct vm_area_struct;
|
|
|
|
/* Map a range of PCI memory or I/O space for a device into user space */
|
|
|
|
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
|
|
|
|
enum pci_mmap_state mmap_state, int write_combine);
|
|
|
|
|
|
|
|
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
|
|
|
|
#define HAVE_PCI_MMAP 1
|
|
|
|
|
2008-10-14 08:55:31 +08:00
|
|
|
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
|
|
|
|
size_t count);
|
|
|
|
extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
|
|
|
|
size_t count);
|
|
|
|
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
|
|
|
|
struct vm_area_struct *vma,
|
|
|
|
enum pci_mmap_state mmap_state);
|
|
|
|
|
|
|
|
#define HAVE_PCI_LEGACY 1
|
|
|
|
|
[POWERPC] Define pci_unmap_addr() et al. when CONFIG_NOT_COHERENT_CACHE=y
The current PowerPC code makes pci_unmap_addr(), pci_unmap_addr_set(),
and friends trivial for all 32-bit kernels. This is reasonable, since
for those kernels it is true that pci_unmap_single() does not need the
DMA address from the original DMA mapping -- in fact, it is a NOP.
However, I recently tried the tg3 driver on a PowerPC 440SPe machine,
which runs a 32-bit kernel and has non-cache-coherent PCI DMA. I
found that the tg3 driver crashed in pci_dma_sync_single_for_cpu(),
since for non-coherent systems, that function must invalidate the
cache for the DMA address range requested, and therefore it does use
the address passed in. tg3 uses a DMA address it stashes away with
pci_unmap_addr_set() and retrieves with pci_unmap_addr(). Of course,
since pci_unmap_addr() is defined to (0) right now, this doesn't work.
It seems to me that the tg3 driver is using pci_unmap_addr() in a
legitimate way -- I wouldn't want to have to teach all drivers that
they should use pci_unmap_addr() if they only need the address for
unmapping functions, but if they want the pci_dma_sync functions, then
they have to store the DMA address without the helper macros.
The right fix therefore seems to be in the definition of the macros in
<asm/pci.h> -- we should use the trivial versions only for 32-bit
kernels for coherent systems, and the real versions for both 64-bit
kernels and non-coherent systems.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-12-07 07:15:38 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
|
2005-11-19 17:46:04 +08:00
|
|
|
/* The PCI address space does not equal the physical memory address
|
|
|
|
* space (we have an IOMMU). The IDE and SCSI device layers use
|
2005-04-17 06:20:36 +08:00
|
|
|
* this boolean for bounce buffer decisions.
|
|
|
|
*/
|
|
|
|
#define PCI_DMA_BUS_IS_PHYS (0)
|
2005-11-19 17:46:04 +08:00
|
|
|
|
|
|
|
#else /* 32-bit */
|
|
|
|
|
|
|
|
/* The PCI address space does equal the physical memory
|
|
|
|
* address space (no IOMMU). The IDE and SCSI device layers use
|
|
|
|
* this boolean for bounce buffer decisions.
|
|
|
|
*/
|
|
|
|
#define PCI_DMA_BUS_IS_PHYS (1)
|
|
|
|
|
|
|
|
#endif /* CONFIG_PPC64 */
|
[POWERPC] Define pci_unmap_addr() et al. when CONFIG_NOT_COHERENT_CACHE=y
The current PowerPC code makes pci_unmap_addr(), pci_unmap_addr_set(),
and friends trivial for all 32-bit kernels. This is reasonable, since
for those kernels it is true that pci_unmap_single() does not need the
DMA address from the original DMA mapping -- in fact, it is a NOP.
However, I recently tried the tg3 driver on a PowerPC 440SPe machine,
which runs a 32-bit kernel and has non-cache-coherent PCI DMA. I
found that the tg3 driver crashed in pci_dma_sync_single_for_cpu(),
since for non-coherent systems, that function must invalidate the
cache for the DMA address range requested, and therefore it does use
the address passed in. tg3 uses a DMA address it stashes away with
pci_unmap_addr_set() and retrieves with pci_unmap_addr(). Of course,
since pci_unmap_addr() is defined to (0) right now, this doesn't work.
It seems to me that the tg3 driver is using pci_unmap_addr() in a
legitimate way -- I wouldn't want to have to teach all drivers that
they should use pci_unmap_addr() if they only need the address for
unmapping functions, but if they want the pci_dma_sync functions, then
they have to store the DMA address without the helper macros.
The right fix therefore seems to be in the definition of the macros in
<asm/pci.h> -- we should use the trivial versions only for 32-bit
kernels for coherent systems, and the real versions for both 64-bit
kernels and non-coherent systems.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-12-07 07:15:38 +08:00
|
|
|
|
2005-11-19 17:46:04 +08:00
|
|
|
extern void pcibios_resource_to_bus(struct pci_dev *dev,
|
|
|
|
struct pci_bus_region *region,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct resource *res);
|
|
|
|
|
2005-11-19 17:46:04 +08:00
|
|
|
extern void pcibios_bus_to_resource(struct pci_dev *dev,
|
|
|
|
struct resource *res,
|
2005-08-05 09:06:21 +08:00
|
|
|
struct pci_bus_region *region);
|
|
|
|
|
2005-11-04 08:52:01 +08:00
|
|
|
extern void pcibios_claim_one_bus(struct pci_bus *b);
|
|
|
|
|
2008-10-28 03:48:52 +08:00
|
|
|
extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
|
2008-10-28 03:48:17 +08:00
|
|
|
|
2007-12-20 11:54:53 +08:00
|
|
|
extern void pcibios_resource_survey(void);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
|
2008-10-28 03:48:52 +08:00
|
|
|
extern int remove_phb_dynamic(struct pci_controller *phb);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-11-05 05:30:56 +08:00
|
|
|
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
|
|
|
|
struct pci_bus *bus, int devfn);
|
|
|
|
|
|
|
|
extern void of_scan_pci_bridge(struct device_node *node,
|
|
|
|
struct pci_dev *dev);
|
|
|
|
|
|
|
|
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
|
2008-10-28 03:48:37 +08:00
|
|
|
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
|
2005-11-05 05:30:56 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
extern int pci_read_irq_line(struct pci_dev *dev);
|
|
|
|
|
|
|
|
struct file;
|
|
|
|
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
|
2005-10-29 08:46:18 +08:00
|
|
|
unsigned long pfn,
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long size,
|
|
|
|
pgprot_t prot);
|
|
|
|
|
2005-05-13 15:44:10 +08:00
|
|
|
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
|
|
|
|
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
|
|
|
const struct resource *rsrc,
|
2006-06-13 08:06:02 +08:00
|
|
|
resource_size_t *start, resource_size_t *end);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-28 03:48:37 +08:00
|
|
|
extern void pcibios_setup_bus_devices(struct pci_bus *bus);
|
|
|
|
extern void pcibios_setup_bus_self(struct pci_bus *bus);
|
2009-08-28 16:58:16 +08:00
|
|
|
extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
|
|
|
|
extern void pcibios_scan_phb(struct pci_controller *hose, void *sysdata);
|
2008-10-14 08:55:31 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* __KERNEL__ */
|
2005-11-19 17:46:04 +08:00
|
|
|
#endif /* __ASM_POWERPC_PCI_H */
|