[IA64] SN specific version of dma_get_required_mask()
Create a platform specific version of dma_get_required_mask() for ia64 SN Altix. All SN Altix platforms support 64 bit DMA addressing regardless of the size of system memory. Create an ia64 machvec for dma_get_required_mask, with the SN version unconditionally returning DMA_64BIT_MASK. Signed-off-by: John Keller <jpk@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
a6a3bb5c88
commit
175add1981
|
@ -170,16 +170,15 @@ Returns: 0 if successful and a negative error if not.
|
||||||
u64
|
u64
|
||||||
dma_get_required_mask(struct device *dev)
|
dma_get_required_mask(struct device *dev)
|
||||||
|
|
||||||
After setting the mask with dma_set_mask(), this API returns the
|
This API returns the mask that the platform requires to
|
||||||
actual mask (within that already set) that the platform actually
|
operate efficiently. Usually this means the returned mask
|
||||||
requires to operate efficiently. Usually this means the returned mask
|
|
||||||
is the minimum required to cover all of memory. Examining the
|
is the minimum required to cover all of memory. Examining the
|
||||||
required mask gives drivers with variable descriptor sizes the
|
required mask gives drivers with variable descriptor sizes the
|
||||||
opportunity to use smaller descriptors as necessary.
|
opportunity to use smaller descriptors as necessary.
|
||||||
|
|
||||||
Requesting the required mask does not alter the current mask. If you
|
Requesting the required mask does not alter the current mask. If you
|
||||||
wish to take advantage of it, you should issue another dma_set_mask()
|
wish to take advantage of it, you should issue a dma_set_mask()
|
||||||
call to lower the mask again.
|
call to set the mask to the value returned.
|
||||||
|
|
||||||
|
|
||||||
Part Id - Streaming DMA mappings
|
Part Id - Streaming DMA mappings
|
||||||
|
|
|
@ -9,6 +9,8 @@
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <asm/swiotlb.h>
|
#include <asm/swiotlb.h>
|
||||||
|
|
||||||
|
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||||
|
|
||||||
struct dma_mapping_ops {
|
struct dma_mapping_ops {
|
||||||
int (*mapping_error)(struct device *dev,
|
int (*mapping_error)(struct device *dev,
|
||||||
dma_addr_t dma_addr);
|
dma_addr_t dma_addr);
|
||||||
|
|
|
@ -62,6 +62,7 @@ typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t
|
||||||
typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
|
typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
|
||||||
typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
|
typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
|
||||||
typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
|
typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
|
||||||
|
typedef u64 ia64_mv_dma_get_required_mask (struct device *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WARNING: The legacy I/O space is _architected_. Platforms are
|
* WARNING: The legacy I/O space is _architected_. Platforms are
|
||||||
|
@ -159,6 +160,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
|
||||||
# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
|
# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
|
||||||
# define platform_dma_mapping_error ia64_mv.dma_mapping_error
|
# define platform_dma_mapping_error ia64_mv.dma_mapping_error
|
||||||
# define platform_dma_supported ia64_mv.dma_supported
|
# define platform_dma_supported ia64_mv.dma_supported
|
||||||
|
# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
|
||||||
# define platform_irq_to_vector ia64_mv.irq_to_vector
|
# define platform_irq_to_vector ia64_mv.irq_to_vector
|
||||||
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
|
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
|
||||||
# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
|
# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
|
||||||
|
@ -213,6 +215,7 @@ struct ia64_machine_vector {
|
||||||
ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
|
ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
|
||||||
ia64_mv_dma_mapping_error *dma_mapping_error;
|
ia64_mv_dma_mapping_error *dma_mapping_error;
|
||||||
ia64_mv_dma_supported *dma_supported;
|
ia64_mv_dma_supported *dma_supported;
|
||||||
|
ia64_mv_dma_get_required_mask *dma_get_required_mask;
|
||||||
ia64_mv_irq_to_vector *irq_to_vector;
|
ia64_mv_irq_to_vector *irq_to_vector;
|
||||||
ia64_mv_local_vector_to_irq *local_vector_to_irq;
|
ia64_mv_local_vector_to_irq *local_vector_to_irq;
|
||||||
ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
|
ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
|
||||||
|
@ -263,6 +266,7 @@ struct ia64_machine_vector {
|
||||||
platform_dma_sync_sg_for_device, \
|
platform_dma_sync_sg_for_device, \
|
||||||
platform_dma_mapping_error, \
|
platform_dma_mapping_error, \
|
||||||
platform_dma_supported, \
|
platform_dma_supported, \
|
||||||
|
platform_dma_get_required_mask, \
|
||||||
platform_irq_to_vector, \
|
platform_irq_to_vector, \
|
||||||
platform_local_vector_to_irq, \
|
platform_local_vector_to_irq, \
|
||||||
platform_pci_get_legacy_mem, \
|
platform_pci_get_legacy_mem, \
|
||||||
|
@ -366,6 +370,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
|
||||||
#ifndef platform_dma_supported
|
#ifndef platform_dma_supported
|
||||||
# define platform_dma_supported swiotlb_dma_supported
|
# define platform_dma_supported swiotlb_dma_supported
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef platform_dma_get_required_mask
|
||||||
|
# define platform_dma_get_required_mask ia64_dma_get_required_mask
|
||||||
|
#endif
|
||||||
#ifndef platform_irq_to_vector
|
#ifndef platform_irq_to_vector
|
||||||
# define platform_irq_to_vector __ia64_irq_to_vector
|
# define platform_irq_to_vector __ia64_irq_to_vector
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
|
|
||||||
extern ia64_mv_send_ipi_t ia64_send_ipi;
|
extern ia64_mv_send_ipi_t ia64_send_ipi;
|
||||||
extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
|
extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
|
||||||
|
extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask;
|
||||||
extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
|
extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
|
||||||
extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
|
extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
|
||||||
extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
|
extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
|
||||||
|
|
|
@ -67,6 +67,7 @@ extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
|
||||||
extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
|
extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
|
||||||
extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
|
extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
|
||||||
extern ia64_mv_dma_supported sn_dma_supported;
|
extern ia64_mv_dma_supported sn_dma_supported;
|
||||||
|
extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
|
||||||
extern ia64_mv_migrate_t sn_migrate;
|
extern ia64_mv_migrate_t sn_migrate;
|
||||||
extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
|
extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
|
||||||
extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
|
extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
|
||||||
|
@ -123,6 +124,7 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
|
||||||
#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
|
#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
|
||||||
#define platform_dma_mapping_error sn_dma_mapping_error
|
#define platform_dma_mapping_error sn_dma_mapping_error
|
||||||
#define platform_dma_supported sn_dma_supported
|
#define platform_dma_supported sn_dma_supported
|
||||||
|
#define platform_dma_get_required_mask sn_dma_get_required_mask
|
||||||
#define platform_migrate sn_migrate
|
#define platform_migrate sn_migrate
|
||||||
#define platform_kernel_launch_event sn_kernel_launch_event
|
#define platform_kernel_launch_event sn_kernel_launch_event
|
||||||
#ifdef CONFIG_PCI_MSI
|
#ifdef CONFIG_PCI_MSI
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/bootmem.h>
|
||||||
|
|
||||||
#include <asm/machvec.h>
|
#include <asm/machvec.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
@ -748,6 +749,32 @@ static void __init set_pci_cacheline_size(void)
|
||||||
pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
|
pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 ia64_dma_get_required_mask(struct device *dev)
|
||||||
|
{
|
||||||
|
u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
|
||||||
|
u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
|
||||||
|
u64 mask;
|
||||||
|
|
||||||
|
if (!high_totalram) {
|
||||||
|
/* convert to mask just covering totalram */
|
||||||
|
low_totalram = (1 << (fls(low_totalram) - 1));
|
||||||
|
low_totalram += low_totalram - 1;
|
||||||
|
mask = low_totalram;
|
||||||
|
} else {
|
||||||
|
high_totalram = (1 << (fls(high_totalram) - 1));
|
||||||
|
high_totalram += high_totalram - 1;
|
||||||
|
mask = (((u64)high_totalram) << 32) + 0xffffffff;
|
||||||
|
}
|
||||||
|
return mask;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
|
||||||
|
|
||||||
|
u64 dma_get_required_mask(struct device *dev)
|
||||||
|
{
|
||||||
|
return platform_dma_get_required_mask(dev);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dma_get_required_mask);
|
||||||
|
|
||||||
static int __init pcibios_init(void)
|
static int __init pcibios_init(void)
|
||||||
{
|
{
|
||||||
set_pci_cacheline_size();
|
set_pci_cacheline_size();
|
||||||
|
|
|
@ -356,6 +356,12 @@ int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sn_dma_mapping_error);
|
EXPORT_SYMBOL(sn_dma_mapping_error);
|
||||||
|
|
||||||
|
u64 sn_dma_get_required_mask(struct device *dev)
|
||||||
|
{
|
||||||
|
return DMA_64BIT_MASK;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
|
||||||
|
|
||||||
char *sn_pci_get_legacy_mem(struct pci_bus *bus)
|
char *sn_pci_get_legacy_mem(struct pci_bus *bus)
|
||||||
{
|
{
|
||||||
if (!SN_PCIBUS_BUSSOFT(bus))
|
if (!SN_PCIBUS_BUSSOFT(bus))
|
||||||
|
|
Loading…
Reference in New Issue