Miscellaneous ia64 fixes from Christoph
-----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQQW3WBGcnu5yJnSXn0kTJLX0iGMLAUCW84v0RQcdG9ueS5sdWNr QGludGVsLmNvbQAKCRAkTJLX0iGMLGJGAP9fUhp7O4ef6PHxGtvmKHRqkTX6a4b5 /oASkd4qIetgzAEA7hwUopUllbq13IRqc+1Z93wymj4vGjT+jV+2unI0ZAc= =3Yoq -----END PGP SIGNATURE----- Merge tag 'please-pull-next' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux Pull ia64 updates from Tony Luck: "Miscellaneous ia64 fixes from Christoph" * tag 'please-pull-next' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: intel-iommu: mark intel_dma_ops static ia64: remove machvec_dma_sync_{single,sg} ia64/sn2: remove no-ops dma sync methods ia64: remove the unused iommu_dma_init function ia64: remove the unused pci_iommu_shutdown function ia64: remove the unused bad_dma_address symbol ia64: remove iommu_dma_supported ia64: remove the dead iommu_sac_force variable ia64: remove the kern_mem_attribute export
This commit is contained in:
commit
70408a9987
|
@ -2207,10 +2207,6 @@ const struct dma_map_ops sba_dma_ops = {
|
|||
.unmap_page = sba_unmap_page,
|
||||
.map_sg = sba_map_sg_attrs,
|
||||
.unmap_sg = sba_unmap_sg_attrs,
|
||||
.sync_single_for_cpu = machvec_dma_sync_single,
|
||||
.sync_sg_for_cpu = machvec_dma_sync_sg,
|
||||
.sync_single_for_device = machvec_dma_sync_single,
|
||||
.sync_sg_for_device = machvec_dma_sync_sg,
|
||||
.dma_supported = sba_dma_supported,
|
||||
.mapping_error = sba_dma_mapping_error,
|
||||
};
|
||||
|
|
|
@ -14,11 +14,6 @@ extern const struct dma_map_ops *dma_ops;
|
|||
extern struct ia64_machine_vector ia64_mv;
|
||||
extern void set_iommu_machvec(void);
|
||||
|
||||
extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return platform_dma_get_ops(NULL);
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
/* 10 seconds */
|
||||
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
|
||||
|
||||
extern void pci_iommu_shutdown(void);
|
||||
extern void no_iommu_init(void);
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
extern int force_iommu, no_iommu;
|
||||
|
@ -16,7 +15,6 @@ extern int iommu_detected;
|
|||
#define no_iommu (1)
|
||||
#define iommu_detected (0)
|
||||
#endif
|
||||
extern void iommu_dma_init(void);
|
||||
extern void machvec_init(const char *name);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -842,7 +842,6 @@ kern_mem_attribute (unsigned long phys_addr, unsigned long size)
|
|||
} while (md);
|
||||
return 0; /* never reached */
|
||||
}
|
||||
EXPORT_SYMBOL(kern_mem_attribute);
|
||||
|
||||
int
|
||||
valid_phys_addr_range (phys_addr_t phys_addr, unsigned long size)
|
||||
|
|
|
@ -73,19 +73,3 @@ machvec_timer_interrupt (int irq, void *dev_id)
|
|||
{
|
||||
}
|
||||
EXPORT_SYMBOL(machvec_timer_interrupt);
|
||||
|
||||
void
|
||||
machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
mb();
|
||||
}
|
||||
EXPORT_SYMBOL(machvec_dma_sync_single);
|
||||
|
||||
void
|
||||
machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
mb();
|
||||
}
|
||||
EXPORT_SYMBOL(machvec_dma_sync_sg);
|
||||
|
|
|
@ -15,11 +15,6 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
dma_addr_t bad_dma_address __read_mostly;
|
||||
EXPORT_SYMBOL(bad_dma_address);
|
||||
|
||||
static int iommu_sac_force __read_mostly;
|
||||
|
||||
int no_iommu __read_mostly;
|
||||
#ifdef CONFIG_IOMMU_DEBUG
|
||||
int force_iommu __read_mostly = 1;
|
||||
|
@ -29,8 +24,6 @@ int force_iommu __read_mostly;
|
|||
|
||||
int iommu_pass_through;
|
||||
|
||||
extern struct dma_map_ops intel_dma_ops;
|
||||
|
||||
static int __init pci_iommu_init(void)
|
||||
{
|
||||
if (iommu_detected)
|
||||
|
@ -42,56 +35,8 @@ static int __init pci_iommu_init(void)
|
|||
/* Must execute after PCI subsystem */
|
||||
fs_initcall(pci_iommu_init);
|
||||
|
||||
void pci_iommu_shutdown(void)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void __init
|
||||
iommu_dma_init(void)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
int iommu_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
/* Copied from i386. Doesn't make much sense, because it will
|
||||
only work for pci_alloc_coherent.
|
||||
The caller just has to use GFP_DMA in this case. */
|
||||
if (mask < DMA_BIT_MASK(24))
|
||||
return 0;
|
||||
|
||||
/* Tell the device to use SAC when IOMMU force is on. This
|
||||
allows the driver to use cheaper accesses in some cases.
|
||||
|
||||
Problem with this is that if we overflow the IOMMU area and
|
||||
return DAC as fallback address the device may not handle it
|
||||
correctly.
|
||||
|
||||
As a special case some controllers have a 39bit address
|
||||
mode that is as efficient as 32bit (aic79xx). Don't force
|
||||
SAC for these. Assume all masks <= 40 bits are of this
|
||||
type. Normally this doesn't make any difference, but gives
|
||||
more gentle handling of IOMMU overflow. */
|
||||
if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
|
||||
dev_info(dev, "Force SAC with mask %llx\n", mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(iommu_dma_supported);
|
||||
|
||||
void __init pci_iommu_alloc(void)
|
||||
{
|
||||
dma_ops = &intel_dma_ops;
|
||||
|
||||
intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single;
|
||||
intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg;
|
||||
intel_dma_ops.sync_single_for_device = machvec_dma_sync_single;
|
||||
intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg;
|
||||
intel_dma_ops.dma_supported = iommu_dma_supported;
|
||||
|
||||
/*
|
||||
* The order of these functions is important for
|
||||
* fall-back/fail-over reasons
|
||||
|
|
|
@ -314,31 +314,6 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||
return nhwentries;
|
||||
}
|
||||
|
||||
static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!dev_is_pci(dev));
|
||||
}
|
||||
|
||||
static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!dev_is_pci(dev));
|
||||
}
|
||||
|
||||
static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!dev_is_pci(dev));
|
||||
}
|
||||
|
||||
static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!dev_is_pci(dev));
|
||||
}
|
||||
|
||||
static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
|
@ -466,10 +441,6 @@ static struct dma_map_ops sn_dma_ops = {
|
|||
.unmap_page = sn_dma_unmap_page,
|
||||
.map_sg = sn_dma_map_sg,
|
||||
.unmap_sg = sn_dma_unmap_sg,
|
||||
.sync_single_for_cpu = sn_dma_sync_single_for_cpu,
|
||||
.sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
|
||||
.sync_single_for_device = sn_dma_sync_single_for_device,
|
||||
.sync_sg_for_device = sn_dma_sync_sg_for_device,
|
||||
.mapping_error = sn_dma_mapping_error,
|
||||
.dma_supported = sn_dma_supported,
|
||||
.get_required_mask = sn_dma_get_required_mask,
|
||||
|
|
|
@ -3895,7 +3895,7 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|||
return !dma_addr;
|
||||
}
|
||||
|
||||
const struct dma_map_ops intel_dma_ops = {
|
||||
static const struct dma_map_ops intel_dma_ops = {
|
||||
.alloc = intel_alloc_coherent,
|
||||
.free = intel_free_coherent,
|
||||
.map_sg = intel_map_sg,
|
||||
|
@ -3903,9 +3903,7 @@ const struct dma_map_ops intel_dma_ops = {
|
|||
.map_page = intel_map_page,
|
||||
.unmap_page = intel_unmap_page,
|
||||
.mapping_error = intel_mapping_error,
|
||||
#ifdef CONFIG_X86
|
||||
.dma_supported = dma_direct_supported,
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline int iommu_domain_cache_init(void)
|
||||
|
|
Loading…
Reference in New Issue