DMA mapping updates for Linux 4.21
A huge update this time, but a lot of that is just consolidating or removing code: - provide a common DMA_MAPPING_ERROR definition and avoid indirect calls for dma_map_* error checking - use direct calls for the DMA direct mapping case, avoiding huge retpoline overhead for high performance workloads - merge the swiotlb dma_map_ops into dma-direct - provide a generic remapping DMA consistent allocator for architectures that have devices that perform DMA that is not cache coherent. Based on the existing arm64 implementation and also used for csky now. - improve the dma-debug infrastructure, including dynamic allocation of entries (Robin Murphy) - default to providing chaining scatterlist everywhere, with opt-outs for the few architectures (alpha, parisc, most arm32 variants) that can't cope with it - misc sparc32 dma-related cleanups - remove the dma_mark_clean arch hook used by swiotlb on ia64 and replace it with the generic noncoherent infrastructure - fix the return type of dma_set_max_seg_size (Niklas Söderlund) - move the dummy dma ops for not DMA capable devices from arm64 to common code (Robin Murphy) - ensure dma_alloc_coherent returns zeroed memory to avoid kernel data leaks through userspace. We already did this for most common architectures, but this ensures we do it everywhere. dma_zalloc_coherent has been deprecated and can hopefully be removed after -rc1 with a coccinelle script. -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAlwctQgLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYMxgQ//dBpAfS4/J76CdAbYry2zqgcOUU9hIrD6NHiEMWov ltJxyvEl3LsUmIdEj3aCrYL9jZN0qsnCzn5BVj2c3jDIVgD64fAr7HDf/PbEEfKb j6/GgEnVLPZV+sQMvhNA5jOzHrkseaqPa4/pNLFZ/l8jnuZ2d+btusDWJpMoVDer TXVwtIfgeIu0gTygYOShLYXd5qptWKWsZEpbTZOO2sE6+x+ZJX7yQYUxYDTlcOIj JWVO2l5QNHPc5T9o2at+6L5aNUvnZOxT79sWgyZLn0Kc+FagKAVwfLqUEl0v7foG 8k/xca5/8p3afB1DfrIrtplJqis7cVgdyGxriwuuoO8X4F0nPyWwpGmxsBhrWwwl xTqC4UorEJ7QwoP6Azopk/vYI2QXIUBLjuCJCuFXZj9+2BGf4IfvBY1S2cLM9qLs HMcxQonuXJii044KEFS96ePEuiT+igVINweIFBKWcgNCEG0UQtyL6RQ1U5297ipF JiWZAqD+p9X52UdKS+oKfAiZEekMXn6Xyo97+YCiNpfOo0GP5eEcwhL+JpY4AiRq apPXtsRy2o1s8yfjdraUIM2Mc2n62vFKb35oUbGCd/QO9piPrFQHl6T0HHcHk4YR XrUXcHieFZBCYqh7ZVa4RL8Msq1wvGuTL4Dxl43mXdsMoUFRR6eSNWLoAV4IpOLZ WgA= =in72 -----END PGP SIGNATURE----- Merge tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping Pull DMA mapping updates from Christoph Hellwig: "A huge update this time, but a lot of that is just consolidating or removing code: - provide a common DMA_MAPPING_ERROR definition and avoid indirect calls for dma_map_* error checking - use direct calls for the DMA direct mapping case, avoiding huge retpoline overhead for high performance workloads - merge the swiotlb dma_map_ops into dma-direct - provide a generic remapping DMA consistent allocator for architectures that have devices that perform DMA that is not cache coherent. Based on the existing arm64 implementation and also used for csky now. - improve the dma-debug infrastructure, including dynamic allocation of entries (Robin Murphy) - default to providing chaining scatterlist everywhere, with opt-outs for the few architectures (alpha, parisc, most arm32 variants) that can't cope with it - misc sparc32 dma-related cleanups - remove the dma_mark_clean arch hook used by swiotlb on ia64 and replace it with the generic noncoherent infrastructure - fix the return type of dma_set_max_seg_size (Niklas Söderlund) - move the dummy dma ops for not DMA capable devices from arm64 to common code (Robin Murphy) - ensure dma_alloc_coherent returns zeroed memory to avoid kernel data leaks through userspace. We already did this for most common architectures, but this ensures we do it everywhere. dma_zalloc_coherent has been deprecated and can hopefully be removed after -rc1 with a coccinelle script" * tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping: (73 commits) dma-mapping: fix inverted logic in dma_supported dma-mapping: deprecate dma_zalloc_coherent dma-mapping: zero memory returned from dma_alloc_* sparc/iommu: fix ->map_sg return value sparc/io-unit: fix ->map_sg return value arm64: default to the direct mapping in get_arch_dma_ops PCI: Remove unused attr variable in pci_dma_configure ia64: only select ARCH_HAS_DMA_COHERENT_TO_PFN if swiotlb is enabled dma-mapping: bypass indirect calls for dma-direct vmd: use the proper dma_* APIs instead of direct methods calls dma-direct: merge swiotlb_dma_ops into the dma_direct code dma-direct: use dma_direct_map_page to implement dma_direct_map_sg dma-direct: improve addressability error reporting swiotlb: remove dma_mark_clean swiotlb: remove SWIOTLB_MAP_ERROR ACPI / scan: Refactor _CCA enforcement dma-mapping: factor out dummy DMA ops dma-mapping: always build the direct mapping code dma-mapping: move dma_cache_sync out of line dma-mapping: move various slow path functions out of line ...
This commit is contained in:
commit
af7ddd8a62
|
@ -58,15 +58,6 @@ specify the ``GFP_`` flags (see kmalloc()) for the allocation (the
|
|||
implementation may choose to ignore flags that affect the location of
|
||||
the returned memory, like GFP_DMA).
|
||||
|
||||
::
|
||||
|
||||
void *
|
||||
dma_zalloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
|
||||
Wraps dma_alloc_coherent() and also zeroes the returned memory if the
|
||||
allocation attempt succeeded.
|
||||
|
||||
::
|
||||
|
||||
void
|
||||
|
@ -717,12 +708,15 @@ dma-api/num_errors The number in this file shows how many
|
|||
dma-api/min_free_entries This read-only file can be read to get the
|
||||
minimum number of free dma_debug_entries the
|
||||
allocator has ever seen. If this value goes
|
||||
down to zero the code will disable itself
|
||||
because it is not longer reliable.
|
||||
down to zero the code will attempt to increase
|
||||
nr_total_entries to compensate.
|
||||
|
||||
dma-api/num_free_entries The current number of free dma_debug_entries
|
||||
in the allocator.
|
||||
|
||||
dma-api/nr_total_entries The total number of dma_debug_entries in the
|
||||
allocator, both free and used.
|
||||
|
||||
dma-api/driver-filter You can write a name of a driver into this file
|
||||
to limit the debug output to requests from that
|
||||
particular driver. Write an empty string to
|
||||
|
@ -742,10 +736,15 @@ driver filter at boot time. The debug code will only print errors for that
|
|||
driver afterwards. This filter can be disabled or changed later using debugfs.
|
||||
|
||||
When the code disables itself at runtime this is most likely because it ran
|
||||
out of dma_debug_entries. These entries are preallocated at boot. The number
|
||||
of preallocated entries is defined per architecture. If it is too low for you
|
||||
boot with 'dma_debug_entries=<your_desired_number>' to overwrite the
|
||||
architectural default.
|
||||
out of dma_debug_entries and was unable to allocate more on-demand. 65536
|
||||
entries are preallocated at boot - if this is too low for you boot with
|
||||
'dma_debug_entries=<your_desired_number>' to overwrite the default. Note
|
||||
that the code allocates entries in batches, so the exact number of
|
||||
preallocated entries may be greater than the actual number requested. The
|
||||
code will print to the kernel log each time it has dynamically allocated
|
||||
as many entries as were initially preallocated. This is to indicate that a
|
||||
larger preallocation size may be appropriate, or if it happens continually
|
||||
that a driver may be leaking mappings.
|
||||
|
||||
::
|
||||
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
#
|
||||
# Feature name: sg-chain
|
||||
# Kconfig: ARCH_HAS_SG_CHAIN
|
||||
# description: arch supports chained scatter-gather lists
|
||||
#
|
||||
-----------------------
|
||||
| arch |status|
|
||||
-----------------------
|
||||
| alpha: | TODO |
|
||||
| arc: | ok |
|
||||
| arm: | ok |
|
||||
| arm64: | ok |
|
||||
| c6x: | TODO |
|
||||
| h8300: | TODO |
|
||||
| hexagon: | TODO |
|
||||
| ia64: | ok |
|
||||
| m68k: | TODO |
|
||||
| microblaze: | TODO |
|
||||
| mips: | TODO |
|
||||
| nds32: | TODO |
|
||||
| nios2: | TODO |
|
||||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | ok |
|
||||
| riscv: | TODO |
|
||||
| s390: | ok |
|
||||
| sh: | TODO |
|
||||
| sparc: | ok |
|
||||
| um: | TODO |
|
||||
| unicore32: | TODO |
|
||||
| x86: | ok |
|
||||
| xtensa: | TODO |
|
||||
-----------------------
|
|
@ -209,7 +209,7 @@ IOMMU (input/output memory management unit)
|
|||
mapping with memory protection, etc.
|
||||
Kernel boot message: "PCI-DMA: Using Calgary IOMMU"
|
||||
|
||||
iommu=[<size>][,noagp][,off][,force][,noforce][,leak[=<nr_of_leak_pages>]
|
||||
iommu=[<size>][,noagp][,off][,force][,noforce]
|
||||
[,memaper[=<order>]][,merge][,fullflush][,nomerge]
|
||||
[,noaperture][,calgary]
|
||||
|
||||
|
@ -228,9 +228,6 @@ IOMMU (input/output memory management unit)
|
|||
allowed Overwrite iommu off workarounds for specific chipsets.
|
||||
fullflush Flush IOMMU on each allocation (default).
|
||||
nofullflush Don't use IOMMU fullflush.
|
||||
leak Turn on simple iommu leak tracing (only when
|
||||
CONFIG_IOMMU_LEAK is on). Default number of leak pages
|
||||
is 20.
|
||||
memaper[=<order>] Allocate an own aperture over RAM with size 32MB<<order.
|
||||
(default: order=1, i.e. 64MB)
|
||||
merge Do scatter-gather (SG) merging. Implies "force"
|
||||
|
|
|
@ -5,6 +5,7 @@ config ALPHA
|
|||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select ARCH_NO_PREEMPT
|
||||
select ARCH_NO_SG_CHAIN
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select HAVE_AOUT
|
||||
select HAVE_IDE
|
||||
|
@ -202,7 +203,6 @@ config ALPHA_EIGER
|
|||
config ALPHA_JENSEN
|
||||
bool "Jensen"
|
||||
depends on BROKEN
|
||||
select DMA_DIRECT_OPS
|
||||
help
|
||||
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
|
||||
of the first-generation Alpha systems. A number of these systems
|
||||
|
|
|
@ -7,7 +7,7 @@ extern const struct dma_map_ops alpha_pci_ops;
|
|||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
#ifdef CONFIG_ALPHA_JENSEN
|
||||
return &dma_direct_ops;
|
||||
return NULL;
|
||||
#else
|
||||
return &alpha_pci_ops;
|
||||
#endif
|
||||
|
|
|
@ -291,7 +291,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
|
|||
use direct_map above, it now must be considered an error. */
|
||||
if (! alpha_mv.mv_pci_tbi) {
|
||||
printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
|
||||
return 0;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
arena = hose->sg_pci;
|
||||
|
@ -307,7 +307,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
|
|||
if (dma_ofs < 0) {
|
||||
printk(KERN_WARNING "pci_map_single failed: "
|
||||
"could not allocate dma page tables\n");
|
||||
return 0;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
paddr &= PAGE_MASK;
|
||||
|
@ -443,7 +443,7 @@ static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
|
|||
gfp &= ~GFP_DMA;
|
||||
|
||||
try_again:
|
||||
cpu_addr = (void *)__get_free_pages(gfp, order);
|
||||
cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
|
||||
if (! cpu_addr) {
|
||||
printk(KERN_INFO "pci_alloc_consistent: "
|
||||
"get_free_pages failed from %pf\n",
|
||||
|
@ -455,7 +455,7 @@ try_again:
|
|||
memset(cpu_addr, 0, size);
|
||||
|
||||
*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
|
||||
if (*dma_addrp == 0) {
|
||||
if (*dma_addrp == DMA_MAPPING_ERROR) {
|
||||
free_pages((unsigned long)cpu_addr, order);
|
||||
if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
|
||||
return NULL;
|
||||
|
@ -671,7 +671,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
sg->dma_address
|
||||
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
|
||||
sg->length, dac_allowed);
|
||||
return sg->dma_address != 0;
|
||||
return sg->dma_address != DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
start = sg;
|
||||
|
@ -935,11 +935,6 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == 0;
|
||||
}
|
||||
|
||||
const struct dma_map_ops alpha_pci_ops = {
|
||||
.alloc = alpha_pci_alloc_coherent,
|
||||
.free = alpha_pci_free_coherent,
|
||||
|
@ -947,7 +942,6 @@ const struct dma_map_ops alpha_pci_ops = {
|
|||
.unmap_page = alpha_pci_unmap_page,
|
||||
.map_sg = alpha_pci_map_sg,
|
||||
.unmap_sg = alpha_pci_unmap_sg,
|
||||
.mapping_error = alpha_pci_mapping_error,
|
||||
.dma_supported = alpha_pci_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(alpha_pci_ops);
|
||||
|
|
|
@ -13,12 +13,10 @@ config ARC
|
|||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
select CLONE_BACKWARDS
|
||||
select COMMON_CLK
|
||||
select DMA_DIRECT_OPS
|
||||
select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_FIND_FIRST_BIT
|
||||
|
|
|
@ -1294,7 +1294,7 @@ void __init arc_cache_init_master(void)
|
|||
/*
|
||||
* In case of IOC (say IOC+SLC case), pointers above could still be set
|
||||
* but end up not being relevant as the first function in chain is not
|
||||
* called at all for @dma_direct_ops
|
||||
* called at all for devices using coherent DMA.
|
||||
* arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
|
||||
*/
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||
*/
|
||||
BUG_ON(gfp & __GFP_HIGHMEM);
|
||||
|
||||
page = alloc_pages(gfp, order);
|
||||
page = alloc_pages(gfp | __GFP_ZERO, order);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ config ARM
|
|||
select ARCH_HAVE_CUSTOM_GPIO_H
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN
|
||||
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
|
@ -29,7 +30,7 @@ config ARM
|
|||
select CLONE_BACKWARDS
|
||||
select CPU_PM if (SUSPEND || CPU_IDLE)
|
||||
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select DMA_DIRECT_OPS if !MMU
|
||||
select DMA_REMAP if MMU
|
||||
select EDAC_SUPPORT
|
||||
select EDAC_ATOMIC_SCRUB
|
||||
select GENERIC_ALLOCATOR
|
||||
|
@ -118,7 +119,6 @@ config ARM
|
|||
<http://www.arm.linux.org.uk/>.
|
||||
|
||||
config ARM_HAS_SG_CHAIN
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
bool
|
||||
|
||||
config ARM_DMA_USE_IOMMU
|
||||
|
|
|
@ -257,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
|
|||
if (buf == NULL) {
|
||||
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
|
||||
__func__, ptr);
|
||||
return ARM_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
|
@ -327,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
|
|||
|
||||
ret = needs_bounce(dev, dma_addr, size);
|
||||
if (ret < 0)
|
||||
return ARM_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (ret == 0) {
|
||||
arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
|
||||
|
@ -336,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
|
|||
|
||||
if (PageHighMem(page)) {
|
||||
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
|
||||
return ARM_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
return map_single(dev, page_address(page) + offset, size, dir, attrs);
|
||||
|
@ -453,11 +453,6 @@ static int dmabounce_dma_supported(struct device *dev, u64 dma_mask)
|
|||
return arm_dma_ops.dma_supported(dev, dma_mask);
|
||||
}
|
||||
|
||||
static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return arm_dma_ops.mapping_error(dev, dma_addr);
|
||||
}
|
||||
|
||||
static const struct dma_map_ops dmabounce_ops = {
|
||||
.alloc = arm_dma_alloc,
|
||||
.free = arm_dma_free,
|
||||
|
@ -472,7 +467,6 @@ static const struct dma_map_ops dmabounce_ops = {
|
|||
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = arm_dma_sync_sg_for_device,
|
||||
.dma_supported = dmabounce_dma_supported,
|
||||
.mapping_error = dmabounce_mapping_error,
|
||||
};
|
||||
|
||||
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
|
||||
|
|
|
@ -9,8 +9,6 @@
|
|||
#include <linux/dma-debug.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0)
|
||||
|
||||
struct dma_iommu_mapping {
|
||||
/* iommu specific data */
|
||||
struct iommu_domain *domain;
|
||||
|
|
|
@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
|
|||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops;
|
||||
return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL;
|
||||
}
|
||||
|
||||
#ifdef __arch_page_to_dma
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "dma.h"
|
||||
|
||||
/*
|
||||
* dma_direct_ops is used if
|
||||
* The generic direct mapping code is used if
|
||||
* - MMU/MPU is off
|
||||
* - cpu is v7m w/o cache support
|
||||
* - device is coherent
|
||||
|
@ -209,16 +209,9 @@ const struct dma_map_ops arm_nommu_dma_ops = {
|
|||
};
|
||||
EXPORT_SYMBOL(arm_nommu_dma_ops);
|
||||
|
||||
static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
|
||||
{
|
||||
return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
const struct dma_map_ops *dma_ops;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CPU_V7M)) {
|
||||
/*
|
||||
* Cache support for v7m is optional, so can be treated as
|
||||
|
@ -234,7 +227,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||
dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
|
||||
}
|
||||
|
||||
dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
|
||||
|
||||
set_dma_ops(dev, dma_ops);
|
||||
if (!dev->archdata.dma_coherent)
|
||||
set_dma_ops(dev, &arm_nommu_dma_ops);
|
||||
}
|
||||
|
|
|
@ -179,11 +179,6 @@ static void arm_dma_sync_single_for_device(struct device *dev,
|
|||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
}
|
||||
|
||||
static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == ARM_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
const struct dma_map_ops arm_dma_ops = {
|
||||
.alloc = arm_dma_alloc,
|
||||
.free = arm_dma_free,
|
||||
|
@ -197,7 +192,6 @@ const struct dma_map_ops arm_dma_ops = {
|
|||
.sync_single_for_device = arm_dma_sync_single_for_device,
|
||||
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = arm_dma_sync_sg_for_device,
|
||||
.mapping_error = arm_dma_mapping_error,
|
||||
.dma_supported = arm_dma_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(arm_dma_ops);
|
||||
|
@ -217,7 +211,6 @@ const struct dma_map_ops arm_coherent_dma_ops = {
|
|||
.get_sgtable = arm_dma_get_sgtable,
|
||||
.map_page = arm_coherent_dma_map_page,
|
||||
.map_sg = arm_dma_map_sg,
|
||||
.mapping_error = arm_dma_mapping_error,
|
||||
.dma_supported = arm_dma_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(arm_coherent_dma_ops);
|
||||
|
@ -774,7 +767,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
gfp &= ~(__GFP_COMP);
|
||||
args.gfp = gfp;
|
||||
|
||||
*handle = ARM_MAPPING_ERROR;
|
||||
*handle = DMA_MAPPING_ERROR;
|
||||
allowblock = gfpflags_allow_blocking(gfp);
|
||||
cma = allowblock ? dev_get_cma_area(dev) : false;
|
||||
|
||||
|
@ -1217,7 +1210,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
|||
if (i == mapping->nr_bitmaps) {
|
||||
if (extend_iommu_mapping(mapping)) {
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
return ARM_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
|
||||
|
@ -1225,7 +1218,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
|||
|
||||
if (start > mapping->bits) {
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
return ARM_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
bitmap_set(mapping->bitmaps[i], start, count);
|
||||
|
@ -1409,7 +1402,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
|
|||
int i;
|
||||
|
||||
dma_addr = __alloc_iova(mapping, size);
|
||||
if (dma_addr == ARM_MAPPING_ERROR)
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
return dma_addr;
|
||||
|
||||
iova = dma_addr;
|
||||
|
@ -1436,7 +1429,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
|
|||
fail:
|
||||
iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
|
||||
__free_iova(mapping, dma_addr, size);
|
||||
return ARM_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
|
||||
|
@ -1497,7 +1490,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
|
|||
return NULL;
|
||||
|
||||
*handle = __iommu_create_mapping(dev, &page, size, attrs);
|
||||
if (*handle == ARM_MAPPING_ERROR)
|
||||
if (*handle == DMA_MAPPING_ERROR)
|
||||
goto err_mapping;
|
||||
|
||||
return addr;
|
||||
|
@ -1525,7 +1518,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
struct page **pages;
|
||||
void *addr = NULL;
|
||||
|
||||
*handle = ARM_MAPPING_ERROR;
|
||||
*handle = DMA_MAPPING_ERROR;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
|
||||
|
@ -1546,7 +1539,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
return NULL;
|
||||
|
||||
*handle = __iommu_create_mapping(dev, pages, size, attrs);
|
||||
if (*handle == ARM_MAPPING_ERROR)
|
||||
if (*handle == DMA_MAPPING_ERROR)
|
||||
goto err_buffer;
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
|
||||
|
@ -1696,10 +1689,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||
int prot;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
*handle = ARM_MAPPING_ERROR;
|
||||
*handle = DMA_MAPPING_ERROR;
|
||||
|
||||
iova_base = iova = __alloc_iova(mapping, size);
|
||||
if (iova == ARM_MAPPING_ERROR)
|
||||
if (iova == DMA_MAPPING_ERROR)
|
||||
return -ENOMEM;
|
||||
|
||||
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
||||
|
@ -1739,7 +1732,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
for (i = 1; i < nents; i++) {
|
||||
s = sg_next(s);
|
||||
|
||||
s->dma_address = ARM_MAPPING_ERROR;
|
||||
s->dma_address = DMA_MAPPING_ERROR;
|
||||
s->dma_length = 0;
|
||||
|
||||
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
|
||||
|
@ -1914,7 +1907,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
|||
int ret, prot, len = PAGE_ALIGN(size + offset);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == ARM_MAPPING_ERROR)
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
return dma_addr;
|
||||
|
||||
prot = __dma_info_to_prot(dir, attrs);
|
||||
|
@ -1926,7 +1919,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
|||
return dma_addr + offset;
|
||||
fail:
|
||||
__free_iova(mapping, dma_addr, len);
|
||||
return ARM_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2020,7 +2013,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
|
|||
size_t len = PAGE_ALIGN(size + offset);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == ARM_MAPPING_ERROR)
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
return dma_addr;
|
||||
|
||||
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
|
||||
|
@ -2032,7 +2025,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
|
|||
return dma_addr + offset;
|
||||
fail:
|
||||
__free_iova(mapping, dma_addr, len);
|
||||
return ARM_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2105,7 +2098,6 @@ const struct dma_map_ops iommu_ops = {
|
|||
.map_resource = arm_iommu_map_resource,
|
||||
.unmap_resource = arm_iommu_unmap_resource,
|
||||
|
||||
.mapping_error = arm_dma_mapping_error,
|
||||
.dma_supported = arm_dma_supported,
|
||||
};
|
||||
|
||||
|
@ -2124,7 +2116,6 @@ const struct dma_map_ops iommu_coherent_ops = {
|
|||
.map_resource = arm_iommu_map_resource,
|
||||
.unmap_resource = arm_iommu_unmap_resource,
|
||||
|
||||
.mapping_error = arm_dma_mapping_error,
|
||||
.dma_supported = arm_dma_supported,
|
||||
};
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ config ARM64
|
|||
select ARCH_HAS_MEMBARRIER_SYNC_CORE
|
||||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
|
@ -81,7 +80,7 @@ config ARM64
|
|||
select CPU_PM if (SUSPEND || CPU_IDLE)
|
||||
select CRC32
|
||||
select DCACHE_WORD_ACCESS
|
||||
select DMA_DIRECT_OPS
|
||||
select DMA_DIRECT_REMAP
|
||||
select EDAC_SUPPORT
|
||||
select FRAME_POINTER
|
||||
select GENERIC_ALLOCATOR
|
||||
|
|
|
@ -24,15 +24,9 @@
|
|||
#include <xen/xen.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
extern const struct dma_map_ops dummy_dma_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
/*
|
||||
* We expect no ISA devices, and all other DMA masters are expected to
|
||||
* have someone call arch_setup_dma_ops at device creation time.
|
||||
*/
|
||||
return &dummy_dma_ops;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
|
|
|
@ -33,113 +33,6 @@
|
|||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
static struct gen_pool *atomic_pool __ro_after_init;
|
||||
|
||||
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
||||
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
|
||||
|
||||
static int __init early_coherent_pool(char *p)
|
||||
{
|
||||
atomic_pool_size = memparse(p, &p);
|
||||
return 0;
|
||||
}
|
||||
early_param("coherent_pool", early_coherent_pool);
|
||||
|
||||
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
|
||||
{
|
||||
unsigned long val;
|
||||
void *ptr = NULL;
|
||||
|
||||
if (!atomic_pool) {
|
||||
WARN(1, "coherent pool not initialised!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
val = gen_pool_alloc(atomic_pool, size);
|
||||
if (val) {
|
||||
phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
|
||||
|
||||
*ret_page = phys_to_page(phys);
|
||||
ptr = (void *)val;
|
||||
memset(ptr, 0, size);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static bool __in_atomic_pool(void *start, size_t size)
|
||||
{
|
||||
return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
|
||||
}
|
||||
|
||||
static int __free_from_pool(void *start, size_t size)
|
||||
{
|
||||
if (!__in_atomic_pool(start, size))
|
||||
return 0;
|
||||
|
||||
gen_pool_free(atomic_pool, (unsigned long)start, size);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flags, unsigned long attrs)
|
||||
{
|
||||
struct page *page;
|
||||
void *ptr, *coherent_ptr;
|
||||
pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (!gfpflags_allow_blocking(flags)) {
|
||||
struct page *page = NULL;
|
||||
void *addr = __alloc_from_pool(size, &page, flags);
|
||||
|
||||
if (addr)
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
|
||||
if (!ptr)
|
||||
goto no_mem;
|
||||
|
||||
/* remove any dirty cache lines on the kernel alias */
|
||||
__dma_flush_area(ptr, size);
|
||||
|
||||
/* create a coherent mapping */
|
||||
page = virt_to_page(ptr);
|
||||
coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
prot, __builtin_return_address(0));
|
||||
if (!coherent_ptr)
|
||||
goto no_map;
|
||||
|
||||
return coherent_ptr;
|
||||
|
||||
no_map:
|
||||
dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
|
||||
no_mem:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) {
|
||||
void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));
|
||||
|
||||
vunmap(vaddr);
|
||||
dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
|
||||
}
|
||||
}
|
||||
|
||||
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
return __phys_to_pfn(dma_to_phys(dev, dma_addr));
|
||||
}
|
||||
|
||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -160,6 +53,11 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
|||
__dma_unmap_area(phys_to_virt(paddr), size, dir);
|
||||
}
|
||||
|
||||
void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
{
|
||||
__dma_flush_area(page_address(page), size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
|
||||
struct page *page, size_t size)
|
||||
|
@ -191,167 +89,13 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
|
|||
}
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
|
||||
static int __init atomic_pool_init(void)
|
||||
{
|
||||
pgprot_t prot = __pgprot(PROT_NORMAL_NC);
|
||||
unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
|
||||
struct page *page;
|
||||
void *addr;
|
||||
unsigned int pool_size_order = get_order(atomic_pool_size);
|
||||
|
||||
if (dev_get_cma_area(NULL))
|
||||
page = dma_alloc_from_contiguous(NULL, nr_pages,
|
||||
pool_size_order, false);
|
||||
else
|
||||
page = alloc_pages(GFP_DMA32, pool_size_order);
|
||||
|
||||
if (page) {
|
||||
int ret;
|
||||
void *page_addr = page_address(page);
|
||||
|
||||
memset(page_addr, 0, atomic_pool_size);
|
||||
__dma_flush_area(page_addr, atomic_pool_size);
|
||||
|
||||
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
|
||||
if (!atomic_pool)
|
||||
goto free_page;
|
||||
|
||||
addr = dma_common_contiguous_remap(page, atomic_pool_size,
|
||||
VM_USERMAP, prot, atomic_pool_init);
|
||||
|
||||
if (!addr)
|
||||
goto destroy_genpool;
|
||||
|
||||
ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
|
||||
page_to_phys(page),
|
||||
atomic_pool_size, -1);
|
||||
if (ret)
|
||||
goto remove_mapping;
|
||||
|
||||
gen_pool_set_algo(atomic_pool,
|
||||
gen_pool_first_fit_order_align,
|
||||
NULL);
|
||||
|
||||
pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
|
||||
atomic_pool_size / 1024);
|
||||
return 0;
|
||||
}
|
||||
goto out;
|
||||
|
||||
remove_mapping:
|
||||
dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
|
||||
destroy_genpool:
|
||||
gen_pool_destroy(atomic_pool);
|
||||
atomic_pool = NULL;
|
||||
free_page:
|
||||
if (!dma_release_from_contiguous(NULL, page, nr_pages))
|
||||
__free_pages(page, pool_size_order);
|
||||
out:
|
||||
pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
|
||||
atomic_pool_size / 1024);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/********************************************
|
||||
* The following APIs are for dummy DMA ops *
|
||||
********************************************/
|
||||
|
||||
static void *__dummy_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __dummy_free(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static int __dummy_mmap(struct device *dev,
|
||||
struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nelems, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __dummy_unmap_sg(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static void __dummy_sync_single(struct device *dev,
|
||||
dma_addr_t dev_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
static void __dummy_sync_sg(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __dummy_dma_supported(struct device *hwdev, u64 mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct dma_map_ops dummy_dma_ops = {
|
||||
.alloc = __dummy_alloc,
|
||||
.free = __dummy_free,
|
||||
.mmap = __dummy_mmap,
|
||||
.map_page = __dummy_map_page,
|
||||
.unmap_page = __dummy_unmap_page,
|
||||
.map_sg = __dummy_map_sg,
|
||||
.unmap_sg = __dummy_unmap_sg,
|
||||
.sync_single_for_cpu = __dummy_sync_single,
|
||||
.sync_single_for_device = __dummy_sync_single,
|
||||
.sync_sg_for_cpu = __dummy_sync_sg,
|
||||
.sync_sg_for_device = __dummy_sync_sg,
|
||||
.mapping_error = __dummy_mapping_error,
|
||||
.dma_supported = __dummy_dma_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(dummy_dma_ops);
|
||||
|
||||
static int __init arm64_dma_init(void)
|
||||
{
|
||||
WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
|
||||
TAINT_CPU_OUT_OF_SPEC,
|
||||
"ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
|
||||
ARCH_DMA_MINALIGN, cache_line_size());
|
||||
|
||||
return atomic_pool_init();
|
||||
return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
|
||||
}
|
||||
arch_initcall(arm64_dma_init);
|
||||
|
||||
|
@ -397,17 +141,17 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
page = alloc_pages(gfp, get_order(size));
|
||||
addr = page ? page_address(page) : NULL;
|
||||
} else {
|
||||
addr = __alloc_from_pool(size, &page, gfp);
|
||||
addr = dma_alloc_from_pool(size, &page, gfp);
|
||||
}
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
|
||||
if (iommu_dma_mapping_error(dev, *handle)) {
|
||||
if (*handle == DMA_MAPPING_ERROR) {
|
||||
if (coherent)
|
||||
__free_pages(page, get_order(size));
|
||||
else
|
||||
__free_from_pool(addr, size);
|
||||
dma_free_from_pool(addr, size);
|
||||
addr = NULL;
|
||||
}
|
||||
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
|
@ -420,7 +164,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
return NULL;
|
||||
|
||||
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
|
||||
if (iommu_dma_mapping_error(dev, *handle)) {
|
||||
if (*handle == DMA_MAPPING_ERROR) {
|
||||
dma_release_from_contiguous(dev, page,
|
||||
size >> PAGE_SHIFT);
|
||||
return NULL;
|
||||
|
@ -471,9 +215,9 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||
* coherent devices.
|
||||
* Hence how dodgy the below logic looks...
|
||||
*/
|
||||
if (__in_atomic_pool(cpu_addr, size)) {
|
||||
if (dma_in_atomic_pool(cpu_addr, size)) {
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
|
||||
__free_from_pool(cpu_addr, size);
|
||||
dma_free_from_pool(cpu_addr, size);
|
||||
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
struct page *page = vmalloc_to_page(cpu_addr);
|
||||
|
||||
|
@ -580,7 +324,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
|
|||
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
|
||||
|
||||
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||
!iommu_dma_mapping_error(dev, dev_addr))
|
||||
dev_addr != DMA_MAPPING_ERROR)
|
||||
__dma_map_area(page_address(page) + offset, size, dir);
|
||||
|
||||
return dev_addr;
|
||||
|
@ -663,7 +407,6 @@ static const struct dma_map_ops iommu_dma_ops = {
|
|||
.sync_sg_for_device = __iommu_sync_sg_for_device,
|
||||
.map_resource = iommu_dma_map_resource,
|
||||
.unmap_resource = iommu_dma_unmap_resource,
|
||||
.mapping_error = iommu_dma_mapping_error,
|
||||
};
|
||||
|
||||
static int __init __iommu_dma_init(void)
|
||||
|
@ -719,9 +462,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
if (!dev->dma_ops)
|
||||
dev->dma_ops = &swiotlb_dma_ops;
|
||||
|
||||
dev->dma_coherent = coherent;
|
||||
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@ config C6X
|
|||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select CLKDEV_LOOKUP
|
||||
select DMA_DIRECT_OPS
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_IRQ_SHOW
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
|
|
|
@ -78,6 +78,7 @@ static void __free_dma_pages(u32 addr, int order)
|
|||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
void *ret;
|
||||
u32 paddr;
|
||||
int order;
|
||||
|
||||
|
@ -94,7 +95,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
if (!paddr)
|
||||
return NULL;
|
||||
|
||||
return phys_to_virt(paddr);
|
||||
ret = phys_to_virt(paddr);
|
||||
memset(ret, 0, 1 << order);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -7,8 +7,7 @@ config CSKY
|
|||
select COMMON_CLK
|
||||
select CLKSRC_MMIO
|
||||
select CLKSRC_OF
|
||||
select DMA_DIRECT_OPS
|
||||
select DMA_NONCOHERENT_OPS
|
||||
select DMA_DIRECT_REMAP
|
||||
select IRQ_DOMAIN
|
||||
select HANDLE_DOMAIN_IRQ
|
||||
select DW_APB_TIMER_OF
|
||||
|
|
|
@ -14,73 +14,13 @@
|
|||
#include <linux/version.h>
|
||||
#include <asm/cache.h>
|
||||
|
||||
static struct gen_pool *atomic_pool;
|
||||
static size_t atomic_pool_size __initdata = SZ_256K;
|
||||
|
||||
static int __init early_coherent_pool(char *p)
|
||||
{
|
||||
atomic_pool_size = memparse(p, &p);
|
||||
return 0;
|
||||
}
|
||||
early_param("coherent_pool", early_coherent_pool);
|
||||
|
||||
static int __init atomic_pool_init(void)
|
||||
{
|
||||
struct page *page;
|
||||
size_t size = atomic_pool_size;
|
||||
void *ptr;
|
||||
int ret;
|
||||
|
||||
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
|
||||
if (!atomic_pool)
|
||||
BUG();
|
||||
|
||||
page = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(size));
|
||||
if (!page)
|
||||
BUG();
|
||||
|
||||
ptr = dma_common_contiguous_remap(page, size, VM_ALLOC,
|
||||
pgprot_noncached(PAGE_KERNEL),
|
||||
__builtin_return_address(0));
|
||||
if (!ptr)
|
||||
BUG();
|
||||
|
||||
ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
|
||||
page_to_phys(page), atomic_pool_size, -1);
|
||||
if (ret)
|
||||
BUG();
|
||||
|
||||
gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
|
||||
|
||||
pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n",
|
||||
atomic_pool_size / 1024);
|
||||
|
||||
pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr,
|
||||
page_to_phys(page));
|
||||
|
||||
return 0;
|
||||
return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
|
||||
}
|
||||
postcore_initcall(atomic_pool_init);
|
||||
|
||||
static void *csky_dma_alloc_atomic(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
addr = gen_pool_alloc(atomic_pool, size);
|
||||
if (addr)
|
||||
*dma_handle = gen_pool_virt_to_phys(atomic_pool, addr);
|
||||
|
||||
return (void *)addr;
|
||||
}
|
||||
|
||||
static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
gen_pool_free(atomic_pool, (unsigned long)vaddr, size);
|
||||
}
|
||||
|
||||
static void __dma_clear_buffer(struct page *page, size_t size)
|
||||
void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
{
|
||||
if (PageHighMem(page)) {
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
@ -107,84 +47,6 @@ static void __dma_clear_buffer(struct page *page, size_t size)
|
|||
}
|
||||
}
|
||||
|
||||
static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
void *vaddr;
|
||||
struct page *page;
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
if (DMA_ATTR_NON_CONSISTENT & attrs) {
|
||||
pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_CMA))
|
||||
page = dma_alloc_from_contiguous(dev, count, get_order(size),
|
||||
gfp);
|
||||
else
|
||||
page = alloc_pages(gfp, get_order(size));
|
||||
|
||||
if (!page) {
|
||||
pr_err("csky %s no more free pages.\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*dma_handle = page_to_phys(page);
|
||||
|
||||
__dma_clear_buffer(page, size);
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
|
||||
return page;
|
||||
|
||||
vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP,
|
||||
pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0));
|
||||
if (!vaddr)
|
||||
BUG();
|
||||
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static void csky_dma_free_nonatomic(
|
||||
struct device *dev,
|
||||
size_t size,
|
||||
void *vaddr,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long attrs
|
||||
)
|
||||
{
|
||||
struct page *page = phys_to_page(dma_handle);
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
if ((unsigned int)vaddr >= VMALLOC_START)
|
||||
dma_common_free_remap(vaddr, size, VM_USERMAP);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_CMA))
|
||||
dma_release_from_contiguous(dev, page, count);
|
||||
else
|
||||
__free_pages(page, get_order(size));
|
||||
}
|
||||
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
if (gfpflags_allow_blocking(gfp))
|
||||
return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp,
|
||||
attrs);
|
||||
else
|
||||
return csky_dma_alloc_atomic(dev, size, dma_handle);
|
||||
}
|
||||
|
||||
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size))
|
||||
csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs);
|
||||
else
|
||||
csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline void cache_op(phys_addr_t paddr, size_t size,
|
||||
void (*fn)(unsigned long start, unsigned long end))
|
||||
{
|
||||
|
|
|
@ -22,7 +22,6 @@ config H8300
|
|||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_HASH
|
||||
select CPU_NO_EFFICIENT_FFS
|
||||
select DMA_DIRECT_OPS
|
||||
|
||||
config CPU_BIG_ENDIAN
|
||||
def_bool y
|
||||
|
|
|
@ -31,7 +31,6 @@ config HEXAGON
|
|||
select GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select MODULES_USE_ELF_RELA
|
||||
select GENERIC_CPU_DEVICES
|
||||
select DMA_DIRECT_OPS
|
||||
---help---
|
||||
Qualcomm Hexagon is a processor architecture designed for high
|
||||
performance and low power across a wide variety of applications.
|
||||
|
|
|
@ -28,8 +28,8 @@ config IA64
|
|||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
select ARCH_HAS_DMA_MARK_CLEAN
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select VIRT_TO_BUS
|
||||
select ARCH_DISCARD_MEMBLOCK
|
||||
select GENERIC_IRQ_PROBE
|
||||
|
|
|
@ -38,7 +38,7 @@ static inline int use_swiotlb(struct device *dev)
|
|||
const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
|
||||
{
|
||||
if (use_swiotlb(dev))
|
||||
return &swiotlb_dma_ops;
|
||||
return NULL;
|
||||
return &sba_dma_ops;
|
||||
}
|
||||
EXPORT_SYMBOL(hwsw_dma_get_ops);
|
||||
|
|
|
@ -907,11 +907,12 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
|
|||
}
|
||||
|
||||
/**
|
||||
* sba_map_single_attrs - map one buffer and return IOVA for DMA
|
||||
* sba_map_page - map one buffer and return IOVA for DMA
|
||||
* @dev: instance of PCI owned by the driver that's asking.
|
||||
* @addr: driver buffer to map.
|
||||
* @size: number of bytes to map in driver buffer.
|
||||
* @dir: R/W or both.
|
||||
* @page: page to map
|
||||
* @poff: offset into page
|
||||
* @size: number of bytes to map
|
||||
* @dir: dma direction
|
||||
* @attrs: optional dma attributes
|
||||
*
|
||||
* See Documentation/DMA-API-HOWTO.txt
|
||||
|
@ -944,7 +945,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
|
|||
** Device is bit capable of DMA'ing to the buffer...
|
||||
** just return the PCI address of ptr
|
||||
*/
|
||||
DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
|
||||
DBG_BYPASS("sba_map_page() bypass mask/addr: "
|
||||
"0x%lx/0x%lx\n",
|
||||
to_pci_dev(dev)->dma_mask, pci_addr);
|
||||
return pci_addr;
|
||||
|
@ -966,14 +967,14 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
|
|||
|
||||
#ifdef ASSERT_PDIR_SANITY
|
||||
spin_lock_irqsave(&ioc->res_lock, flags);
|
||||
if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
|
||||
if (sba_check_pdir(ioc,"Check before sba_map_page()"))
|
||||
panic("Sanity check failed");
|
||||
spin_unlock_irqrestore(&ioc->res_lock, flags);
|
||||
#endif
|
||||
|
||||
pide = sba_alloc_range(ioc, dev, size);
|
||||
if (pide < 0)
|
||||
return 0;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
iovp = (dma_addr_t) pide << iovp_shift;
|
||||
|
||||
|
@ -997,20 +998,12 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
|
|||
/* form complete address */
|
||||
#ifdef ASSERT_PDIR_SANITY
|
||||
spin_lock_irqsave(&ioc->res_lock, flags);
|
||||
sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
|
||||
sba_check_pdir(ioc,"Check after sba_map_page()");
|
||||
spin_unlock_irqrestore(&ioc->res_lock, flags);
|
||||
#endif
|
||||
return SBA_IOVA(ioc, iovp, offset);
|
||||
}
|
||||
|
||||
static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return sba_map_page(dev, virt_to_page(addr),
|
||||
(unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
|
||||
}
|
||||
|
||||
#ifdef ENABLE_MARK_CLEAN
|
||||
static SBA_INLINE void
|
||||
sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
|
||||
|
@ -1036,7 +1029,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
|
|||
#endif
|
||||
|
||||
/**
|
||||
* sba_unmap_single_attrs - unmap one IOVA and free resources
|
||||
* sba_unmap_page - unmap one IOVA and free resources
|
||||
* @dev: instance of PCI owned by the driver that's asking.
|
||||
* @iova: IOVA of driver buffer previously mapped.
|
||||
* @size: number of bytes mapped in driver buffer.
|
||||
|
@ -1063,7 +1056,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
|||
/*
|
||||
** Address does not fall w/in IOVA, must be bypassing
|
||||
*/
|
||||
DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n",
|
||||
DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n",
|
||||
iova);
|
||||
|
||||
#ifdef ENABLE_MARK_CLEAN
|
||||
|
@ -1114,12 +1107,6 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
|
|||
#endif /* DELAYED_RESOURCE_CNT == 0 */
|
||||
}
|
||||
|
||||
void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
sba_unmap_page(dev, iova, size, dir, attrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* sba_alloc_coherent - allocate/map shared mem for DMA
|
||||
* @dev: instance of PCI owned by the driver that's asking.
|
||||
|
@ -1132,30 +1119,24 @@ static void *
|
|||
sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flags, unsigned long attrs)
|
||||
{
|
||||
struct page *page;
|
||||
struct ioc *ioc;
|
||||
int node = -1;
|
||||
void *addr;
|
||||
|
||||
ioc = GET_IOC(dev);
|
||||
ASSERT(ioc);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages_node(ioc->node, flags, get_order(size));
|
||||
if (unlikely(!page))
|
||||
return NULL;
|
||||
|
||||
addr = page_address(page);
|
||||
}
|
||||
#else
|
||||
addr = (void *) __get_free_pages(flags, get_order(size));
|
||||
node = ioc->node;
|
||||
#endif
|
||||
if (unlikely(!addr))
|
||||
|
||||
page = alloc_pages_node(node, flags, get_order(size));
|
||||
if (unlikely(!page))
|
||||
return NULL;
|
||||
|
||||
addr = page_address(page);
|
||||
memset(addr, 0, size);
|
||||
*dma_handle = virt_to_phys(addr);
|
||||
*dma_handle = page_to_phys(page);
|
||||
|
||||
#ifdef ALLOW_IOV_BYPASS
|
||||
ASSERT(dev->coherent_dma_mask);
|
||||
|
@ -1174,9 +1155,10 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||
* If device can't bypass or bypass is disabled, pass the 32bit fake
|
||||
* device to map single to get an iova mapping.
|
||||
*/
|
||||
*dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
|
||||
size, 0, 0);
|
||||
|
||||
*dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size,
|
||||
DMA_BIDIRECTIONAL, 0);
|
||||
if (dma_mapping_error(dev, *dma_handle))
|
||||
return NULL;
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
@ -1193,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||
static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
sba_unmap_single_attrs(dev, dma_handle, size, 0, 0);
|
||||
sba_unmap_page(dev, dma_handle, size, 0, 0);
|
||||
free_pages((unsigned long) vaddr, get_order(size));
|
||||
}
|
||||
|
||||
|
@ -1483,7 +1465,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
|||
/* Fast path single entry scatterlists. */
|
||||
if (nents == 1) {
|
||||
sglist->dma_length = sglist->length;
|
||||
sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
|
||||
sglist->dma_address = sba_map_page(dev, sg_page(sglist),
|
||||
sglist->offset, sglist->length, dir, attrs);
|
||||
if (dma_mapping_error(dev, sglist->dma_address))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -1572,8 +1557,8 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
|||
|
||||
while (nents && sglist->dma_length) {
|
||||
|
||||
sba_unmap_single_attrs(dev, sglist->dma_address,
|
||||
sglist->dma_length, dir, attrs);
|
||||
sba_unmap_page(dev, sglist->dma_address, sglist->dma_length,
|
||||
dir, attrs);
|
||||
sglist = sg_next(sglist);
|
||||
nents--;
|
||||
}
|
||||
|
@ -2080,8 +2065,6 @@ static int __init acpi_sba_ioc_init_acpi(void)
|
|||
/* This has to run before acpi_scan_init(). */
|
||||
arch_initcall(acpi_sba_ioc_init_acpi);
|
||||
|
||||
extern const struct dma_map_ops swiotlb_dma_ops;
|
||||
|
||||
static int __init
|
||||
sba_init(void)
|
||||
{
|
||||
|
@ -2095,7 +2078,7 @@ sba_init(void)
|
|||
* a successful kdump kernel boot is to use the swiotlb.
|
||||
*/
|
||||
if (is_kdump_kernel()) {
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
dma_ops = NULL;
|
||||
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
|
||||
panic("Unable to initialize software I/O TLB:"
|
||||
" Try machvec=dig boot option");
|
||||
|
@ -2117,7 +2100,7 @@ sba_init(void)
|
|||
* If we didn't find something sba_iommu can claim, we
|
||||
* need to setup the swiotlb and switch to the dig machvec.
|
||||
*/
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
dma_ops = NULL;
|
||||
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
|
||||
panic("Unable to find SBA IOMMU or initialize "
|
||||
"software I/O TLB: Try machvec=dig boot option");
|
||||
|
@ -2170,11 +2153,6 @@ static int sba_dma_supported (struct device *dev, u64 mask)
|
|||
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
|
||||
}
|
||||
|
||||
static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
__setup("nosbagart", nosbagart);
|
||||
|
||||
static int __init
|
||||
|
@ -2208,7 +2186,6 @@ const struct dma_map_ops sba_dma_ops = {
|
|||
.map_sg = sba_map_sg_attrs,
|
||||
.unmap_sg = sba_unmap_sg_attrs,
|
||||
.dma_supported = sba_dma_supported,
|
||||
.mapping_error = sba_dma_mapping_error,
|
||||
};
|
||||
|
||||
void sba_dma_init(void)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
|
@ -16,9 +16,26 @@ const struct dma_map_ops *dma_get_ops(struct device *dev)
|
|||
EXPORT_SYMBOL(dma_get_ops);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
void *arch_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
|
||||
}
|
||||
|
||||
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
|
||||
}
|
||||
|
||||
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
return page_to_pfn(virt_to_page(cpu_addr));
|
||||
}
|
||||
|
||||
void __init swiotlb_dma_init(void)
|
||||
{
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
swiotlb_init(1);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/memblock.h>
|
||||
|
@ -71,18 +72,14 @@ __ia64_sync_icache_dcache (pte_t pte)
|
|||
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
|
||||
* flush them when they get mapped into an executable vm-area.
|
||||
*/
|
||||
void
|
||||
dma_mark_clean(void *addr, size_t size)
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
unsigned long pg_addr, end;
|
||||
unsigned long pfn = PHYS_PFN(paddr);
|
||||
|
||||
pg_addr = PAGE_ALIGN((unsigned long) addr);
|
||||
end = (unsigned long) addr + size;
|
||||
while (pg_addr + PAGE_SIZE <= end) {
|
||||
struct page *page = virt_to_page(pg_addr);
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
pg_addr += PAGE_SIZE;
|
||||
}
|
||||
do {
|
||||
set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
|
||||
} while (++pfn <= PHYS_PFN(paddr + size - 1));
|
||||
}
|
||||
|
||||
inline void
|
||||
|
|
|
@ -196,7 +196,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
|
|||
|
||||
if (!dma_addr) {
|
||||
printk(KERN_ERR "%s: out of ATEs\n", __func__);
|
||||
return 0;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
return dma_addr;
|
||||
}
|
||||
|
@ -314,11 +314,6 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||
return nhwentries;
|
||||
}
|
||||
|
||||
static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 sn_dma_get_required_mask(struct device *dev)
|
||||
{
|
||||
return DMA_BIT_MASK(64);
|
||||
|
@ -441,7 +436,6 @@ static struct dma_map_ops sn_dma_ops = {
|
|||
.unmap_page = sn_dma_unmap_page,
|
||||
.map_sg = sn_dma_map_sg,
|
||||
.unmap_sg = sn_dma_unmap_sg,
|
||||
.mapping_error = sn_dma_mapping_error,
|
||||
.dma_supported = sn_dma_supported,
|
||||
.get_required_mask = sn_dma_get_required_mask,
|
||||
};
|
||||
|
|
|
@ -26,7 +26,6 @@ config M68K
|
|||
select MODULES_USE_ELF_RELA
|
||||
select OLD_SIGSUSPEND3
|
||||
select OLD_SIGACTION
|
||||
select DMA_DIRECT_OPS if HAS_DMA
|
||||
select ARCH_DISCARD_MEMBLOCK
|
||||
|
||||
config CPU_BIG_ENDIAN
|
||||
|
|
|
@ -32,7 +32,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
size = PAGE_ALIGN(size);
|
||||
order = get_order(size);
|
||||
|
||||
page = alloc_pages(flag, order);
|
||||
page = alloc_pages(flag | __GFP_ZERO, order);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ config MICROBLAZE
|
|||
select TIMER_OF
|
||||
select CLONE_BACKWARDS3
|
||||
select COMMON_CLK
|
||||
select DMA_DIRECT_OPS
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CPU_DEVICES
|
||||
|
|
|
@ -81,7 +81,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||
size = PAGE_ALIGN(size);
|
||||
order = get_order(size);
|
||||
|
||||
vaddr = __get_free_pages(gfp, order);
|
||||
vaddr = __get_free_pages(gfp | __GFP_ZERO, order);
|
||||
if (!vaddr)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@ config MIPS
|
|||
select CLONE_BACKWARDS
|
||||
select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1)
|
||||
select CPU_PM if CPU_IDLE
|
||||
select DMA_DIRECT_OPS
|
||||
select GENERIC_ATOMIC64 if !64BIT
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CMOS_UPDATE
|
||||
|
|
|
@ -10,10 +10,8 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
{
|
||||
#if defined(CONFIG_MACH_JAZZ)
|
||||
return &jazz_dma_ops;
|
||||
#elif defined(CONFIG_SWIOTLB)
|
||||
return &swiotlb_dma_ops;
|
||||
#else
|
||||
return &dma_direct_ops;
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -39,12 +39,6 @@ extern int vdma_get_enable(int channel);
|
|||
#define VDMA_PAGE(a) ((unsigned int)(a) >> 12)
|
||||
#define VDMA_OFFSET(a) ((unsigned int)(a) & (VDMA_PAGESIZE-1))
|
||||
|
||||
/*
|
||||
* error code returned by vdma_alloc()
|
||||
* (See also arch/mips/kernel/jazzdma.c)
|
||||
*/
|
||||
#define VDMA_ERROR 0xffffffff
|
||||
|
||||
/*
|
||||
* VDMA pagetable entry description
|
||||
*/
|
||||
|
|
|
@ -104,12 +104,12 @@ unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
|
|||
if (vdma_debug)
|
||||
printk("vdma_alloc: Invalid physical address: %08lx\n",
|
||||
paddr);
|
||||
return VDMA_ERROR; /* invalid physical address */
|
||||
return DMA_MAPPING_ERROR; /* invalid physical address */
|
||||
}
|
||||
if (size > 0x400000 || size == 0) {
|
||||
if (vdma_debug)
|
||||
printk("vdma_alloc: Invalid size: %08lx\n", size);
|
||||
return VDMA_ERROR; /* invalid physical address */
|
||||
return DMA_MAPPING_ERROR; /* invalid physical address */
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&vdma_lock, flags);
|
||||
|
@ -123,7 +123,7 @@ unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
|
|||
first < VDMA_PGTBL_ENTRIES) first++;
|
||||
if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */
|
||||
spin_unlock_irqrestore(&vdma_lock, flags);
|
||||
return VDMA_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
last = first + 1;
|
||||
|
@ -569,7 +569,7 @@ static void *jazz_dma_alloc(struct device *dev, size_t size,
|
|||
return NULL;
|
||||
|
||||
*dma_handle = vdma_alloc(virt_to_phys(ret), size);
|
||||
if (*dma_handle == VDMA_ERROR) {
|
||||
if (*dma_handle == DMA_MAPPING_ERROR) {
|
||||
dma_direct_free_pages(dev, size, ret, *dma_handle, attrs);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -620,7 +620,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
|
||||
dir);
|
||||
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
|
||||
if (sg->dma_address == VDMA_ERROR)
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||
return 0;
|
||||
sg_dma_len(sg) = sg->length;
|
||||
}
|
||||
|
@ -674,11 +674,6 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev,
|
|||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == VDMA_ERROR;
|
||||
}
|
||||
|
||||
const struct dma_map_ops jazz_dma_ops = {
|
||||
.alloc = jazz_dma_alloc,
|
||||
.free = jazz_dma_free,
|
||||
|
@ -692,6 +687,5 @@ const struct dma_map_ops jazz_dma_ops = {
|
|||
.sync_sg_for_device = jazz_dma_sync_sg_for_device,
|
||||
.dma_supported = dma_direct_supported,
|
||||
.cache_sync = arch_dma_cache_sync,
|
||||
.mapping_error = jazz_dma_mapping_error,
|
||||
};
|
||||
EXPORT_SYMBOL(jazz_dma_ops);
|
||||
|
|
|
@ -11,7 +11,6 @@ config NDS32
|
|||
select CLKSRC_MMIO
|
||||
select CLONE_BACKWARDS
|
||||
select COMMON_CLK
|
||||
select DMA_DIRECT_OPS
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_CPU_DEVICES
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
|
|
@ -4,7 +4,6 @@ config NIOS2
|
|||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_NO_SWAP
|
||||
select DMA_DIRECT_OPS
|
||||
select TIMER_OF
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
config OPENRISC
|
||||
def_bool y
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select DMA_DIRECT_OPS
|
||||
select OF
|
||||
select OF_EARLY_FLATTREE
|
||||
select IRQ_DOMAIN
|
||||
|
|
|
@ -89,7 +89,7 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||
.mm = &init_mm
|
||||
};
|
||||
|
||||
page = alloc_pages_exact(size, gfp);
|
||||
page = alloc_pages_exact(size, gfp | __GFP_ZERO);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ config PARISC
|
|||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
select ARCH_NO_SG_CHAIN
|
||||
select ARCH_SUPPORTS_MEMORY_FAILURE
|
||||
select RTC_CLASS
|
||||
select RTC_DRV_GENERIC
|
||||
|
@ -184,7 +185,6 @@ config PA11
|
|||
depends on PA7000 || PA7100LC || PA7200 || PA7300LC
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select DMA_DIRECT_OPS
|
||||
select DMA_NONCOHERENT_CACHE_SYNC
|
||||
|
||||
config PREFETCH
|
||||
|
|
|
@ -404,7 +404,7 @@ static void *pcxl_dma_alloc(struct device *dev, size_t size,
|
|||
order = get_order(size);
|
||||
size = 1 << (order + PAGE_SHIFT);
|
||||
vaddr = pcxl_alloc_range(size);
|
||||
paddr = __get_free_pages(flag, order);
|
||||
paddr = __get_free_pages(flag | __GFP_ZERO, order);
|
||||
flush_kernel_dcache_range(paddr, size);
|
||||
paddr = __pa(paddr);
|
||||
map_uncached_pages(vaddr, size, paddr);
|
||||
|
@ -429,7 +429,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size,
|
|||
if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
|
||||
return NULL;
|
||||
|
||||
addr = (void *)__get_free_pages(flag, get_order(size));
|
||||
addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
|
||||
if (addr)
|
||||
*dma_handle = (dma_addr_t)virt_to_phys(addr);
|
||||
|
||||
|
|
|
@ -99,10 +99,6 @@ void __init dma_ops_init(void)
|
|||
|
||||
case pcxl2:
|
||||
pa7300lc_init();
|
||||
case pcxl: /* falls through */
|
||||
case pcxs:
|
||||
case pcxt:
|
||||
hppa_dma_ops = &dma_direct_ops;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -139,7 +139,6 @@ config PPC
|
|||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_MEMBARRIER_CALLBACKS
|
||||
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC64
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION)
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAS_UACCESS_FLUSHCACHE if PPC64
|
||||
|
|
|
@ -103,7 +103,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
|
|||
}
|
||||
|
||||
#define HAVE_ARCH_DMA_SET_MASK 1
|
||||
extern int dma_set_mask(struct device *dev, u64 dma_mask);
|
||||
|
||||
extern u64 __dma_get_required_mask(struct device *dev);
|
||||
|
||||
|
|
|
@ -143,8 +143,6 @@ struct scatterlist;
|
|||
|
||||
#ifdef CONFIG_PPC64
|
||||
|
||||
#define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0)
|
||||
|
||||
static inline void set_iommu_table_base(struct device *dev,
|
||||
struct iommu_table *base)
|
||||
{
|
||||
|
@ -239,8 +237,6 @@ static inline void iommu_del_device(struct device *dev)
|
|||
}
|
||||
#endif /* !CONFIG_IOMMU_API */
|
||||
|
||||
int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
#else
|
||||
|
||||
static inline void *get_iommu_table_base(struct device *dev)
|
||||
|
|
|
@ -105,11 +105,6 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
|
|||
return mask;
|
||||
}
|
||||
|
||||
int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == IOMMU_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
struct dma_map_ops dma_iommu_ops = {
|
||||
.alloc = dma_iommu_alloc_coherent,
|
||||
.free = dma_iommu_free_coherent,
|
||||
|
@ -120,5 +115,4 @@ struct dma_map_ops dma_iommu_ops = {
|
|||
.map_page = dma_iommu_map_page,
|
||||
.unmap_page = dma_iommu_unmap_page,
|
||||
.get_required_mask = dma_iommu_get_required_mask,
|
||||
.mapping_error = dma_iommu_mapping_error,
|
||||
};
|
||||
|
|
|
@ -50,16 +50,15 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = {
|
|||
.alloc = __dma_nommu_alloc_coherent,
|
||||
.free = __dma_nommu_free_coherent,
|
||||
.mmap = dma_nommu_mmap_coherent,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.unmap_sg = dma_direct_unmap_sg,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
||||
.mapping_error = dma_direct_mapping_error,
|
||||
.map_page = dma_direct_map_page,
|
||||
.unmap_page = dma_direct_unmap_page,
|
||||
.sync_single_for_cpu = dma_direct_sync_single_for_cpu,
|
||||
.sync_single_for_device = dma_direct_sync_single_for_device,
|
||||
.sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = dma_direct_sync_sg_for_device,
|
||||
.get_required_mask = swiotlb_powerpc_get_required,
|
||||
};
|
||||
|
||||
|
|
|
@ -198,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev,
|
|||
if (unlikely(npages == 0)) {
|
||||
if (printk_ratelimit())
|
||||
WARN_ON(1);
|
||||
return IOMMU_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
if (should_fail_iommu(dev))
|
||||
return IOMMU_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
/*
|
||||
* We don't need to disable preemption here because any CPU can
|
||||
|
@ -278,7 +278,7 @@ again:
|
|||
} else {
|
||||
/* Give up */
|
||||
spin_unlock_irqrestore(&(pool->lock), flags);
|
||||
return IOMMU_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -310,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
|||
unsigned long attrs)
|
||||
{
|
||||
unsigned long entry;
|
||||
dma_addr_t ret = IOMMU_MAPPING_ERROR;
|
||||
dma_addr_t ret = DMA_MAPPING_ERROR;
|
||||
int build_fail;
|
||||
|
||||
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
|
||||
|
||||
if (unlikely(entry == IOMMU_MAPPING_ERROR))
|
||||
return IOMMU_MAPPING_ERROR;
|
||||
if (unlikely(entry == DMA_MAPPING_ERROR))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
entry += tbl->it_offset; /* Offset into real TCE table */
|
||||
ret = entry << tbl->it_page_shift; /* Set the return dma address */
|
||||
|
@ -328,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
|||
|
||||
/* tbl->it_ops->set() only returns non-zero for transient errors.
|
||||
* Clean up the table bitmap in this case and return
|
||||
* IOMMU_MAPPING_ERROR. For all other errors the functionality is
|
||||
* DMA_MAPPING_ERROR. For all other errors the functionality is
|
||||
* not altered.
|
||||
*/
|
||||
if (unlikely(build_fail)) {
|
||||
__iommu_free(tbl, ret, npages);
|
||||
return IOMMU_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/* Flush/invalidate TLB caches if necessary */
|
||||
|
@ -478,7 +478,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
|
||||
|
||||
/* Handle failure */
|
||||
if (unlikely(entry == IOMMU_MAPPING_ERROR)) {
|
||||
if (unlikely(entry == DMA_MAPPING_ERROR)) {
|
||||
if (!(attrs & DMA_ATTR_NO_WARN) &&
|
||||
printk_ratelimit())
|
||||
dev_info(dev, "iommu_alloc failed, tbl %p "
|
||||
|
@ -545,7 +545,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||
*/
|
||||
if (outcount < incount) {
|
||||
outs = sg_next(outs);
|
||||
outs->dma_address = IOMMU_MAPPING_ERROR;
|
||||
outs->dma_address = DMA_MAPPING_ERROR;
|
||||
outs->dma_length = 0;
|
||||
}
|
||||
|
||||
|
@ -563,7 +563,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
||||
IOMMU_PAGE_SIZE(tbl));
|
||||
__iommu_free(tbl, vaddr, npages);
|
||||
s->dma_address = IOMMU_MAPPING_ERROR;
|
||||
s->dma_address = DMA_MAPPING_ERROR;
|
||||
s->dma_length = 0;
|
||||
}
|
||||
if (s == outs)
|
||||
|
@ -777,7 +777,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
|||
unsigned long mask, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
dma_addr_t dma_handle = IOMMU_MAPPING_ERROR;
|
||||
dma_addr_t dma_handle = DMA_MAPPING_ERROR;
|
||||
void *vaddr;
|
||||
unsigned long uaddr;
|
||||
unsigned int npages, align;
|
||||
|
@ -797,7 +797,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
|||
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
|
||||
mask >> tbl->it_page_shift, align,
|
||||
attrs);
|
||||
if (dma_handle == IOMMU_MAPPING_ERROR) {
|
||||
if (dma_handle == DMA_MAPPING_ERROR) {
|
||||
if (!(attrs & DMA_ATTR_NO_WARN) &&
|
||||
printk_ratelimit()) {
|
||||
dev_info(dev, "iommu_alloc failed, tbl %p "
|
||||
|
@ -869,7 +869,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
|||
io_order = get_iommu_order(size, tbl);
|
||||
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
||||
mask >> tbl->it_page_shift, io_order, 0);
|
||||
if (mapping == IOMMU_MAPPING_ERROR) {
|
||||
if (mapping == DMA_MAPPING_ERROR) {
|
||||
free_pages((unsigned long)ret, order);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -654,7 +654,6 @@ static const struct dma_map_ops dma_iommu_fixed_ops = {
|
|||
.dma_supported = dma_suported_and_switch,
|
||||
.map_page = dma_fixed_map_page,
|
||||
.unmap_page = dma_fixed_unmap_page,
|
||||
.mapping_error = dma_iommu_mapping_error,
|
||||
};
|
||||
|
||||
static void cell_dma_dev_setup(struct device *dev)
|
||||
|
|
|
@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
|
|||
{
|
||||
struct vio_dev *viodev = to_vio_dev(dev);
|
||||
struct iommu_table *tbl;
|
||||
dma_addr_t ret = IOMMU_MAPPING_ERROR;
|
||||
dma_addr_t ret = DMA_MAPPING_ERROR;
|
||||
|
||||
tbl = get_iommu_table_base(dev);
|
||||
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
|
||||
|
@ -625,7 +625,6 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
|
|||
.unmap_page = vio_dma_iommu_unmap_page,
|
||||
.dma_supported = vio_dma_iommu_dma_supported,
|
||||
.get_required_mask = vio_dma_get_required_mask,
|
||||
.mapping_error = dma_iommu_mapping_error,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,7 +19,6 @@ config RISCV
|
|||
select ARCH_WANT_FRAME_POINTERS
|
||||
select CLONE_BACKWARDS
|
||||
select COMMON_CLK
|
||||
select DMA_DIRECT_OPS
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CPU_DEVICES
|
||||
select GENERIC_IRQ_SHOW
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#ifndef _RISCV_ASM_DMA_MAPPING_H
|
||||
#define _RISCV_ASM_DMA_MAPPING_H 1
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
#include <linux/swiotlb.h>
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return &swiotlb_dma_ops;
|
||||
}
|
||||
#else
|
||||
#include <asm-generic/dma-mapping.h>
|
||||
#endif /* CONFIG_SWIOTLB */
|
||||
|
||||
#endif /* _RISCV_ASM_DMA_MAPPING_H */
|
|
@ -73,7 +73,6 @@ config S390
|
|||
select ARCH_HAS_KCOV
|
||||
select ARCH_HAS_PTE_SPECIAL
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
|
@ -140,7 +139,6 @@ config S390
|
|||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select DMA_DIRECT_OPS
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
#include <linux/pci.h>
|
||||
#include <asm/pci_dma.h>
|
||||
|
||||
#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0)
|
||||
|
||||
static struct kmem_cache *dma_region_table_cache;
|
||||
static struct kmem_cache *dma_page_table_cache;
|
||||
static int s390_iommu_strict;
|
||||
|
@ -301,7 +299,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
|
|||
|
||||
out_error:
|
||||
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
|
||||
return S390_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
|
||||
|
@ -349,7 +347,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
|
|||
/* This rounds up number of pages based on size and offset */
|
||||
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
|
||||
dma_addr = dma_alloc_address(dev, nr_pages);
|
||||
if (dma_addr == S390_MAPPING_ERROR) {
|
||||
if (dma_addr == DMA_MAPPING_ERROR) {
|
||||
ret = -ENOSPC;
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -372,7 +370,7 @@ out_free:
|
|||
out_err:
|
||||
zpci_err("map error:\n");
|
||||
zpci_err_dma(ret, pa);
|
||||
return S390_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
|
||||
|
@ -406,7 +404,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
|
|||
dma_addr_t map;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
page = alloc_pages(flag, get_order(size));
|
||||
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
|
@ -449,7 +447,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
int ret;
|
||||
|
||||
dma_addr_base = dma_alloc_address(dev, nr_pages);
|
||||
if (dma_addr_base == S390_MAPPING_ERROR)
|
||||
if (dma_addr_base == DMA_MAPPING_ERROR)
|
||||
return -ENOMEM;
|
||||
|
||||
dma_addr = dma_addr_base;
|
||||
|
@ -496,7 +494,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
for (i = 1; i < nr_elements; i++) {
|
||||
s = sg_next(s);
|
||||
|
||||
s->dma_address = S390_MAPPING_ERROR;
|
||||
s->dma_address = DMA_MAPPING_ERROR;
|
||||
s->dma_length = 0;
|
||||
|
||||
if (s->offset || (size & ~PAGE_MASK) ||
|
||||
|
@ -546,11 +544,6 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|||
}
|
||||
}
|
||||
|
||||
static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == S390_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
int zpci_dma_init_device(struct zpci_dev *zdev)
|
||||
{
|
||||
int rc;
|
||||
|
@ -675,7 +668,6 @@ const struct dma_map_ops s390_pci_dma_ops = {
|
|||
.unmap_sg = s390_dma_unmap_sg,
|
||||
.map_page = s390_dma_map_pages,
|
||||
.unmap_page = s390_dma_unmap_pages,
|
||||
.mapping_error = s390_mapping_error,
|
||||
/* dma_supported is unconditionally true without a callback */
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
|
||||
|
|
|
@ -7,7 +7,6 @@ config SUPERH
|
|||
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
|
||||
select HAVE_PATA_PLATFORM
|
||||
select CLKDEV_LOOKUP
|
||||
select DMA_DIRECT_OPS
|
||||
select HAVE_IDE if HAS_IOPORT_MAP
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select ARCH_DISCARD_MEMBLOCK
|
||||
|
|
|
@ -40,7 +40,6 @@ config SPARC
|
|||
select MODULES_USE_ELF_RELA
|
||||
select ODD_RT_SIGACTION
|
||||
select OLD_SIGSUSPEND
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select CPU_NO_EFFICIENT_FFS
|
||||
select LOCKDEP_SMALL if LOCKDEP
|
||||
select NEED_DMA_MAP_STATE
|
||||
|
@ -49,7 +48,6 @@ config SPARC
|
|||
config SPARC32
|
||||
def_bool !64BIT
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select DMA_DIRECT_OPS
|
||||
select GENERIC_ATOMIC64
|
||||
select CLZ_TAB
|
||||
select HAVE_UID16
|
||||
|
|
|
@ -2,9 +2,7 @@
|
|||
#ifndef ___ASM_SPARC_DMA_MAPPING_H
|
||||
#define ___ASM_SPARC_DMA_MAPPING_H
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <asm/cpu_type.h>
|
||||
|
||||
extern const struct dma_map_ops *dma_ops;
|
||||
|
||||
|
@ -14,11 +12,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
{
|
||||
#ifdef CONFIG_SPARC_LEON
|
||||
if (sparc_cpu_model == sparc_leon)
|
||||
return &dma_direct_ops;
|
||||
return NULL;
|
||||
#endif
|
||||
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
|
||||
if (bus == &pci_bus_type)
|
||||
return &dma_direct_ops;
|
||||
return NULL;
|
||||
#endif
|
||||
return dma_ops;
|
||||
}
|
||||
|
|
|
@ -91,54 +91,10 @@ extern int isa_dma_bridge_buggy;
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SPARC32
|
||||
|
||||
/* Routines for data transfer buffers. */
|
||||
struct device;
|
||||
struct scatterlist;
|
||||
|
||||
struct sparc32_dma_ops {
|
||||
__u32 (*get_scsi_one)(struct device *, char *, unsigned long);
|
||||
void (*get_scsi_sgl)(struct device *, struct scatterlist *, int);
|
||||
void (*release_scsi_one)(struct device *, __u32, unsigned long);
|
||||
void (*release_scsi_sgl)(struct device *, struct scatterlist *,int);
|
||||
#ifdef CONFIG_SBUS
|
||||
int (*map_dma_area)(struct device *, dma_addr_t *, unsigned long, unsigned long, int);
|
||||
void (*unmap_dma_area)(struct device *, unsigned long, int);
|
||||
#endif
|
||||
};
|
||||
extern const struct sparc32_dma_ops *sparc32_dma_ops;
|
||||
|
||||
#define mmu_get_scsi_one(dev,vaddr,len) \
|
||||
sparc32_dma_ops->get_scsi_one(dev, vaddr, len)
|
||||
#define mmu_get_scsi_sgl(dev,sg,sz) \
|
||||
sparc32_dma_ops->get_scsi_sgl(dev, sg, sz)
|
||||
#define mmu_release_scsi_one(dev,vaddr,len) \
|
||||
sparc32_dma_ops->release_scsi_one(dev, vaddr,len)
|
||||
#define mmu_release_scsi_sgl(dev,sg,sz) \
|
||||
sparc32_dma_ops->release_scsi_sgl(dev, sg, sz)
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
/*
|
||||
* mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
|
||||
*
|
||||
* The mmu_map_dma_area establishes two mappings in one go.
|
||||
* These mappings point to pages normally mapped at 'va' (linear address).
|
||||
* First mapping is for CPU visible address at 'a', uncached.
|
||||
* This is an alias, but it works because it is an uncached mapping.
|
||||
* Second mapping is for device visible address, or "bus" address.
|
||||
* The bus address is returned at '*pba'.
|
||||
*
|
||||
* These functions seem distinct, but are hard to split.
|
||||
* On sun4m, page attributes depend on the CPU type, so we have to
|
||||
* know if we are mapping RAM or I/O, so it has to be an additional argument
|
||||
* to a separate mapping function for CPU visible mappings.
|
||||
*/
|
||||
#define sbus_map_dma_area(dev,pba,va,a,len) \
|
||||
sparc32_dma_ops->map_dma_area(dev, pba, va, a, len)
|
||||
#define sbus_unmap_dma_area(dev,ba,len) \
|
||||
sparc32_dma_ops->unmap_dma_area(dev, ba, len)
|
||||
#endif /* CONFIG_SBUS */
|
||||
|
||||
unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len);
|
||||
bool sparc_dma_free_resource(void *cpu_addr, size_t size);
|
||||
#endif
|
||||
|
||||
#endif /* !(_ASM_SPARC_DMA_H) */
|
||||
|
|
|
@ -254,4 +254,13 @@ extern int leon_ipi_irq;
|
|||
#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
|
||||
#define _SRMMU_PTE_PMASK_LEON 0xffffffff
|
||||
|
||||
/*
|
||||
* On LEON PCI Memory space is mapped 1:1 with physical address space.
|
||||
*
|
||||
* I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses
|
||||
* are converted into CPU addresses to virtual addresses that are mapped with
|
||||
* MMU to the PCI Host PCI I/O space window which are translated to the low
|
||||
* 64Kbytes by the Host controller.
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,9 +1,54 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef ___ASM_SPARC_PCI_H
|
||||
#define ___ASM_SPARC_PCI_H
|
||||
#if defined(__sparc__) && defined(__arch64__)
|
||||
#include <asm/pci_64.h>
|
||||
|
||||
|
||||
/* Can be used to override the logic in pci_scan_bus for skipping
|
||||
* already-configured bus numbers - to be used for buggy BIOSes
|
||||
* or architectures with incomplete PCI setup by the loader.
|
||||
*/
|
||||
#define pcibios_assign_all_busses() 0
|
||||
|
||||
#define PCIBIOS_MIN_IO 0UL
|
||||
#define PCIBIOS_MIN_MEM 0UL
|
||||
|
||||
#define PCI_IRQ_NONE 0xffffffff
|
||||
|
||||
|
||||
#ifdef CONFIG_SPARC64
|
||||
|
||||
/* PCI IOMMU mapping bypass support. */
|
||||
|
||||
/* PCI 64-bit addressing works for all slots on all controller
|
||||
* types on sparc64. However, it requires that the device
|
||||
* can drive enough of the 64 bits.
|
||||
*/
|
||||
#define PCI64_REQUIRED_MASK (~(u64)0)
|
||||
#define PCI64_ADDR_BASE 0xfffc000000000000UL
|
||||
|
||||
/* Return the index of the PCI controller for device PDEV. */
|
||||
int pci_domain_nr(struct pci_bus *bus);
|
||||
static inline int pci_proc_domain(struct pci_bus *bus)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
|
||||
#define HAVE_PCI_MMAP
|
||||
#define arch_can_pci_mmap_io() 1
|
||||
#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
|
||||
#define get_pci_unmapped_area get_fb_unmapped_area
|
||||
|
||||
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
|
||||
#endif /* CONFIG_SPARC64 */
|
||||
|
||||
#if defined(CONFIG_SPARC64) || defined(CONFIG_LEON_PCI)
|
||||
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||
{
|
||||
return PCI_IRQ_NONE;
|
||||
}
|
||||
#else
|
||||
#include <asm/pci_32.h>
|
||||
#endif
|
||||
#include <asm-generic/pci.h>
|
||||
#endif
|
||||
|
||||
#endif /* ___ASM_SPARC_PCI_H */
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __SPARC_PCI_H
|
||||
#define __SPARC_PCI_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
/* Can be used to override the logic in pci_scan_bus for skipping
|
||||
* already-configured bus numbers - to be used for buggy BIOSes
|
||||
* or architectures with incomplete PCI setup by the loader.
|
||||
*/
|
||||
#define pcibios_assign_all_busses() 0
|
||||
|
||||
#define PCIBIOS_MIN_IO 0UL
|
||||
#define PCIBIOS_MIN_MEM 0UL
|
||||
|
||||
#define PCI_IRQ_NONE 0xffffffff
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#ifndef CONFIG_LEON_PCI
|
||||
/* generic pci stuff */
|
||||
#include <asm-generic/pci.h>
|
||||
#else
|
||||
/*
|
||||
* On LEON PCI Memory space is mapped 1:1 with physical address space.
|
||||
*
|
||||
* I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses
|
||||
* are converted into CPU addresses to virtual addresses that are mapped with
|
||||
* MMU to the PCI Host PCI I/O space window which are translated to the low
|
||||
* 64Kbytes by the Host controller.
|
||||
*/
|
||||
|
||||
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||
{
|
||||
return PCI_IRQ_NONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __SPARC_PCI_H */
|
|
@ -1,52 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __SPARC64_PCI_H
|
||||
#define __SPARC64_PCI_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
/* Can be used to override the logic in pci_scan_bus for skipping
|
||||
* already-configured bus numbers - to be used for buggy BIOSes
|
||||
* or architectures with incomplete PCI setup by the loader.
|
||||
*/
|
||||
#define pcibios_assign_all_busses() 0
|
||||
|
||||
#define PCIBIOS_MIN_IO 0UL
|
||||
#define PCIBIOS_MIN_MEM 0UL
|
||||
|
||||
#define PCI_IRQ_NONE 0xffffffff
|
||||
|
||||
/* PCI IOMMU mapping bypass support. */
|
||||
|
||||
/* PCI 64-bit addressing works for all slots on all controller
|
||||
* types on sparc64. However, it requires that the device
|
||||
* can drive enough of the 64 bits.
|
||||
*/
|
||||
#define PCI64_REQUIRED_MASK (~(u64)0)
|
||||
#define PCI64_ADDR_BASE 0xfffc000000000000UL
|
||||
|
||||
/* Return the index of the PCI controller for device PDEV. */
|
||||
|
||||
int pci_domain_nr(struct pci_bus *bus);
|
||||
static inline int pci_proc_domain(struct pci_bus *bus)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
|
||||
|
||||
#define HAVE_PCI_MMAP
|
||||
#define arch_can_pci_mmap_io() 1
|
||||
#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
|
||||
#define get_pci_unmapped_area get_fb_unmapped_area
|
||||
|
||||
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||
{
|
||||
return PCI_IRQ_NONE;
|
||||
}
|
||||
|
||||
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __SPARC64_PCI_H */
|
|
@ -314,7 +314,7 @@ bad:
|
|||
bad_no_ctx:
|
||||
if (printk_ratelimit())
|
||||
WARN_ON(1);
|
||||
return SPARC_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
|
||||
|
@ -547,7 +547,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
|
||||
if (outcount < incount) {
|
||||
outs = sg_next(outs);
|
||||
outs->dma_address = SPARC_MAPPING_ERROR;
|
||||
outs->dma_address = DMA_MAPPING_ERROR;
|
||||
outs->dma_length = 0;
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ iommu_map_failed:
|
|||
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
|
||||
IOMMU_ERROR_CODE);
|
||||
|
||||
s->dma_address = SPARC_MAPPING_ERROR;
|
||||
s->dma_address = DMA_MAPPING_ERROR;
|
||||
s->dma_length = 0;
|
||||
}
|
||||
if (s == outs)
|
||||
|
@ -741,11 +741,6 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
|
|||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == SPARC_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static int dma_4u_supported(struct device *dev, u64 device_mask)
|
||||
{
|
||||
struct iommu *iommu = dev->archdata.iommu;
|
||||
|
@ -771,7 +766,6 @@ static const struct dma_map_ops sun4u_dma_ops = {
|
|||
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
|
||||
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
|
||||
.dma_supported = dma_4u_supported,
|
||||
.mapping_error = dma_4u_mapping_error,
|
||||
};
|
||||
|
||||
const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
|
||||
|
|
|
@ -48,6 +48,4 @@ static inline int is_span_boundary(unsigned long entry,
|
|||
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
|
||||
}
|
||||
|
||||
#define SPARC_MAPPING_ERROR (~(dma_addr_t)0x0)
|
||||
|
||||
#endif /* _IOMMU_COMMON_H */
|
||||
|
|
|
@ -52,8 +52,6 @@
|
|||
#include <asm/io-unit.h>
|
||||
#include <asm/leon.h>
|
||||
|
||||
const struct sparc32_dma_ops *sparc32_dma_ops;
|
||||
|
||||
/* This function must make sure that caches and memory are coherent after DMA
|
||||
* On LEON systems without cache snooping it flushes the entire D-CACHE.
|
||||
*/
|
||||
|
@ -247,6 +245,53 @@ static void _sparc_free_io(struct resource *res)
|
|||
release_resource(res);
|
||||
}
|
||||
|
||||
unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len)
|
||||
{
|
||||
struct resource *res;
|
||||
|
||||
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
if (!res)
|
||||
return 0;
|
||||
res->name = dev->of_node->full_name;
|
||||
|
||||
if (allocate_resource(&_sparc_dvma, res, len, _sparc_dvma.start,
|
||||
_sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
|
||||
printk("%s: cannot occupy 0x%zx", __func__, len);
|
||||
kfree(res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return res->start;
|
||||
}
|
||||
|
||||
bool sparc_dma_free_resource(void *cpu_addr, size_t size)
|
||||
{
|
||||
unsigned long addr = (unsigned long)cpu_addr;
|
||||
struct resource *res;
|
||||
|
||||
res = lookup_resource(&_sparc_dvma, addr);
|
||||
if (!res) {
|
||||
printk("%s: cannot free %p\n", __func__, cpu_addr);
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((addr & (PAGE_SIZE - 1)) != 0) {
|
||||
printk("%s: unaligned va %p\n", __func__, cpu_addr);
|
||||
return false;
|
||||
}
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
if (resource_size(res) != size) {
|
||||
printk("%s: region 0x%lx asked 0x%zx\n",
|
||||
__func__, (long)resource_size(res), size);
|
||||
return false;
|
||||
}
|
||||
|
||||
release_resource(res);
|
||||
kfree(res);
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
|
||||
void sbus_set_sbus64(struct device *dev, int x)
|
||||
|
@ -255,171 +300,6 @@ void sbus_set_sbus64(struct device *dev, int x)
|
|||
}
|
||||
EXPORT_SYMBOL(sbus_set_sbus64);
|
||||
|
||||
/*
|
||||
* Allocate a chunk of memory suitable for DMA.
|
||||
* Typically devices use them for control blocks.
|
||||
* CPU may access them without any explicit flushing.
|
||||
*/
|
||||
static void *sbus_alloc_coherent(struct device *dev, size_t len,
|
||||
dma_addr_t *dma_addrp, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct platform_device *op = to_platform_device(dev);
|
||||
unsigned long len_total = PAGE_ALIGN(len);
|
||||
unsigned long va;
|
||||
struct resource *res;
|
||||
int order;
|
||||
|
||||
/* XXX why are some lengths signed, others unsigned? */
|
||||
if (len <= 0) {
|
||||
return NULL;
|
||||
}
|
||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||
if (len > 256*1024) { /* __get_free_pages() limit */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
order = get_order(len_total);
|
||||
va = __get_free_pages(gfp, order);
|
||||
if (va == 0)
|
||||
goto err_nopages;
|
||||
|
||||
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
|
||||
goto err_nomem;
|
||||
|
||||
if (allocate_resource(&_sparc_dvma, res, len_total,
|
||||
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
|
||||
printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
|
||||
goto err_nova;
|
||||
}
|
||||
|
||||
// XXX The sbus_map_dma_area does this for us below, see comments.
|
||||
// srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
|
||||
/*
|
||||
* XXX That's where sdev would be used. Currently we load
|
||||
* all iommu tables with the same translations.
|
||||
*/
|
||||
if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
|
||||
goto err_noiommu;
|
||||
|
||||
res->name = op->dev.of_node->full_name;
|
||||
|
||||
return (void *)(unsigned long)res->start;
|
||||
|
||||
err_noiommu:
|
||||
release_resource(res);
|
||||
err_nova:
|
||||
kfree(res);
|
||||
err_nomem:
|
||||
free_pages(va, order);
|
||||
err_nopages:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void sbus_free_coherent(struct device *dev, size_t n, void *p,
|
||||
dma_addr_t ba, unsigned long attrs)
|
||||
{
|
||||
struct resource *res;
|
||||
struct page *pgv;
|
||||
|
||||
if ((res = lookup_resource(&_sparc_dvma,
|
||||
(unsigned long)p)) == NULL) {
|
||||
printk("sbus_free_consistent: cannot free %p\n", p);
|
||||
return;
|
||||
}
|
||||
|
||||
if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
|
||||
printk("sbus_free_consistent: unaligned va %p\n", p);
|
||||
return;
|
||||
}
|
||||
|
||||
n = PAGE_ALIGN(n);
|
||||
if (resource_size(res) != n) {
|
||||
printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
|
||||
(long)resource_size(res), n);
|
||||
return;
|
||||
}
|
||||
|
||||
release_resource(res);
|
||||
kfree(res);
|
||||
|
||||
pgv = virt_to_page(p);
|
||||
sbus_unmap_dma_area(dev, ba, n);
|
||||
|
||||
__free_pages(pgv, get_order(n));
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a chunk of memory so that devices can see it.
|
||||
* CPU view of this memory may be inconsistent with
|
||||
* a device view and explicit flushing is necessary.
|
||||
*/
|
||||
static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t len,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
void *va = page_address(page) + offset;
|
||||
|
||||
/* XXX why are some lengths signed, others unsigned? */
|
||||
if (len <= 0) {
|
||||
return 0;
|
||||
}
|
||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||
if (len > 256*1024) { /* __get_free_pages() limit */
|
||||
return 0;
|
||||
}
|
||||
return mmu_get_scsi_one(dev, va, len);
|
||||
}
|
||||
|
||||
static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
mmu_release_scsi_one(dev, ba, n);
|
||||
}
|
||||
|
||||
static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
mmu_get_scsi_sgl(dev, sg, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
mmu_release_scsi_sgl(dev, sg, n);
|
||||
}
|
||||
|
||||
static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int n, enum dma_data_direction dir)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int n, enum dma_data_direction dir)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static int sbus_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops sbus_dma_ops = {
|
||||
.alloc = sbus_alloc_coherent,
|
||||
.free = sbus_free_coherent,
|
||||
.map_page = sbus_map_page,
|
||||
.unmap_page = sbus_unmap_page,
|
||||
.map_sg = sbus_map_sg,
|
||||
.unmap_sg = sbus_unmap_sg,
|
||||
.sync_sg_for_cpu = sbus_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = sbus_sync_sg_for_device,
|
||||
.dma_supported = sbus_dma_supported,
|
||||
};
|
||||
|
||||
static int __init sparc_register_ioport(void)
|
||||
{
|
||||
register_proc_sparc_ioport();
|
||||
|
@ -438,45 +318,30 @@ arch_initcall(sparc_register_ioport);
|
|||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
unsigned long len_total = PAGE_ALIGN(size);
|
||||
unsigned long addr;
|
||||
void *va;
|
||||
struct resource *res;
|
||||
int order;
|
||||
|
||||
if (size == 0) {
|
||||
if (!size || size > 256 * 1024) /* __get_free_pages() limit */
|
||||
return NULL;
|
||||
}
|
||||
if (size > 256*1024) { /* __get_free_pages() limit */
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size));
|
||||
if (!va) {
|
||||
printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
order = get_order(len_total);
|
||||
va = (void *) __get_free_pages(gfp, order);
|
||||
if (va == NULL) {
|
||||
printk("%s: no %ld pages\n", __func__, len_total>>PAGE_SHIFT);
|
||||
goto err_nopages;
|
||||
}
|
||||
|
||||
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
|
||||
printk("%s: no core\n", __func__);
|
||||
addr = sparc_dma_alloc_resource(dev, size);
|
||||
if (!addr)
|
||||
goto err_nomem;
|
||||
}
|
||||
|
||||
if (allocate_resource(&_sparc_dvma, res, len_total,
|
||||
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
|
||||
printk("%s: cannot occupy 0x%lx", __func__, len_total);
|
||||
goto err_nova;
|
||||
}
|
||||
srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
|
||||
srmmu_mapiorange(0, virt_to_phys(va), addr, size);
|
||||
|
||||
*dma_handle = virt_to_phys(va);
|
||||
return (void *) res->start;
|
||||
return (void *)addr;
|
||||
|
||||
err_nova:
|
||||
kfree(res);
|
||||
err_nomem:
|
||||
free_pages((unsigned long)va, order);
|
||||
err_nopages:
|
||||
free_pages((unsigned long)va, get_order(size));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -491,31 +356,11 @@ err_nopages:
|
|||
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
struct resource *res;
|
||||
|
||||
if ((res = lookup_resource(&_sparc_dvma,
|
||||
(unsigned long)cpu_addr)) == NULL) {
|
||||
printk("%s: cannot free %p\n", __func__, cpu_addr);
|
||||
if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
|
||||
return;
|
||||
}
|
||||
|
||||
if (((unsigned long)cpu_addr & (PAGE_SIZE-1)) != 0) {
|
||||
printk("%s: unaligned va %p\n", __func__, cpu_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
if (resource_size(res) != size) {
|
||||
printk("%s: region 0x%lx asked 0x%zx\n", __func__,
|
||||
(long)resource_size(res), size);
|
||||
return;
|
||||
}
|
||||
|
||||
dma_make_coherent(dma_addr, size);
|
||||
srmmu_unmapiorange((unsigned long)cpu_addr, size);
|
||||
|
||||
release_resource(res);
|
||||
kfree(res);
|
||||
free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size));
|
||||
}
|
||||
|
||||
|
@ -528,7 +373,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
|||
dma_make_coherent(paddr, PAGE_ALIGN(size));
|
||||
}
|
||||
|
||||
const struct dma_map_ops *dma_ops = &sbus_dma_ops;
|
||||
const struct dma_map_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
|
|
@ -414,12 +414,12 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
|
|||
bad:
|
||||
if (printk_ratelimit())
|
||||
WARN_ON(1);
|
||||
return SPARC_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
iommu_map_fail:
|
||||
local_irq_restore(flags);
|
||||
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
|
||||
return SPARC_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
|
||||
|
@ -592,7 +592,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
|
||||
if (outcount < incount) {
|
||||
outs = sg_next(outs);
|
||||
outs->dma_address = SPARC_MAPPING_ERROR;
|
||||
outs->dma_address = DMA_MAPPING_ERROR;
|
||||
outs->dma_length = 0;
|
||||
}
|
||||
|
||||
|
@ -609,7 +609,7 @@ iommu_map_failed:
|
|||
iommu_tbl_range_free(tbl, vaddr, npages,
|
||||
IOMMU_ERROR_CODE);
|
||||
/* XXX demap? XXX */
|
||||
s->dma_address = SPARC_MAPPING_ERROR;
|
||||
s->dma_address = DMA_MAPPING_ERROR;
|
||||
s->dma_length = 0;
|
||||
}
|
||||
if (s == outs)
|
||||
|
@ -688,11 +688,6 @@ static int dma_4v_supported(struct device *dev, u64 device_mask)
|
|||
return pci64_dma_supported(to_pci_dev(dev), device_mask);
|
||||
}
|
||||
|
||||
static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == SPARC_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops sun4v_dma_ops = {
|
||||
.alloc = dma_4v_alloc_coherent,
|
||||
.free = dma_4v_free_coherent,
|
||||
|
@ -701,7 +696,6 @@ static const struct dma_map_ops sun4v_dma_ops = {
|
|||
.map_sg = dma_4v_map_sg,
|
||||
.unmap_sg = dma_4v_unmap_sg,
|
||||
.dma_supported = dma_4v_supported,
|
||||
.mapping_error = dma_4v_mapping_error,
|
||||
};
|
||||
|
||||
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
|
@ -140,34 +140,44 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
|
|||
return vaddr;
|
||||
}
|
||||
|
||||
static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
|
||||
static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t len, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
void *vaddr = page_address(page) + offset;
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
unsigned long ret, flags;
|
||||
|
||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||
if (!len || len > 256 * 1024)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
|
||||
static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
struct scatterlist *sg;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
|
||||
sg->dma_length = sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
|
||||
static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
unsigned long flags;
|
||||
|
@ -181,34 +191,47 @@ static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned lo
|
|||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
}
|
||||
|
||||
static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
|
||||
static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
unsigned long flags;
|
||||
unsigned long vaddr, len;
|
||||
unsigned long flags, vaddr, len;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
|
||||
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
|
||||
for (len += vaddr; vaddr < len; vaddr++)
|
||||
clear_bit(vaddr, iounit->bmap);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
|
||||
static void *iounit_alloc(struct device *dev, size_t len,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||
unsigned long page, end;
|
||||
unsigned long va, addr, page, end, ret;
|
||||
pgprot_t dvma_prot;
|
||||
iopte_t __iomem *iopte;
|
||||
|
||||
*pba = addr;
|
||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||
if (!len || len > 256 * 1024)
|
||||
return NULL;
|
||||
|
||||
len = PAGE_ALIGN(len);
|
||||
va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
|
||||
if (!va)
|
||||
return NULL;
|
||||
|
||||
addr = ret = sparc_dma_alloc_resource(dev, len);
|
||||
if (!addr)
|
||||
goto out_free_pages;
|
||||
*dma_handle = addr;
|
||||
|
||||
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
|
||||
end = PAGE_ALIGN((addr + len));
|
||||
|
@ -237,27 +260,32 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
|
|||
flush_cache_all();
|
||||
flush_tlb_all();
|
||||
|
||||
return 0;
|
||||
return (void *)ret;
|
||||
|
||||
out_free_pages:
|
||||
free_pages(va, get_order(len));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
|
||||
static void iounit_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
/* XXX Somebody please fill this in */
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct sparc32_dma_ops iounit_dma_ops = {
|
||||
.get_scsi_one = iounit_get_scsi_one,
|
||||
.get_scsi_sgl = iounit_get_scsi_sgl,
|
||||
.release_scsi_one = iounit_release_scsi_one,
|
||||
.release_scsi_sgl = iounit_release_scsi_sgl,
|
||||
static const struct dma_map_ops iounit_dma_ops = {
|
||||
#ifdef CONFIG_SBUS
|
||||
.map_dma_area = iounit_map_dma_area,
|
||||
.unmap_dma_area = iounit_unmap_dma_area,
|
||||
.alloc = iounit_alloc,
|
||||
.free = iounit_free,
|
||||
#endif
|
||||
.map_page = iounit_map_page,
|
||||
.unmap_page = iounit_unmap_page,
|
||||
.map_sg = iounit_map_sg,
|
||||
.unmap_sg = iounit_unmap_sg,
|
||||
};
|
||||
|
||||
void __init ld_mmu_iounit(void)
|
||||
{
|
||||
sparc32_dma_ops = &iounit_dma_ops;
|
||||
dma_ops = &iounit_dma_ops;
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
|
@ -205,59 +205,67 @@ static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
|
|||
return busa0;
|
||||
}
|
||||
|
||||
static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
|
||||
static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t len)
|
||||
{
|
||||
unsigned long off;
|
||||
int npages;
|
||||
struct page *page;
|
||||
u32 busa;
|
||||
|
||||
off = (unsigned long)vaddr & ~PAGE_MASK;
|
||||
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
|
||||
busa = iommu_get_one(dev, page, npages);
|
||||
return busa + off;
|
||||
void *vaddr = page_address(page) + offset;
|
||||
unsigned long off = (unsigned long)vaddr & ~PAGE_MASK;
|
||||
unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||
if (!len || len > 256 * 1024)
|
||||
return DMA_MAPPING_ERROR;
|
||||
return iommu_get_one(dev, virt_to_page(vaddr), npages) + off;
|
||||
}
|
||||
|
||||
static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
|
||||
static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
|
||||
struct page *page, unsigned long offset, size_t len,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
flush_page_for_dma(0);
|
||||
return iommu_get_scsi_one(dev, vaddr, len);
|
||||
return __sbus_iommu_map_page(dev, page, offset, len);
|
||||
}
|
||||
|
||||
static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
|
||||
static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
|
||||
struct page *page, unsigned long offset, size_t len,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
|
||||
void *vaddr = page_address(page) + offset;
|
||||
unsigned long p = ((unsigned long)vaddr) & PAGE_MASK;
|
||||
|
||||
while(page < ((unsigned long)(vaddr + len))) {
|
||||
flush_page_for_dma(page);
|
||||
page += PAGE_SIZE;
|
||||
while (p < (unsigned long)vaddr + len) {
|
||||
flush_page_for_dma(p);
|
||||
p += PAGE_SIZE;
|
||||
}
|
||||
return iommu_get_scsi_one(dev, vaddr, len);
|
||||
|
||||
return __sbus_iommu_map_page(dev, page, offset, len);
|
||||
}
|
||||
|
||||
static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
|
||||
static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
int n;
|
||||
struct scatterlist *sg;
|
||||
int i, n;
|
||||
|
||||
flush_page_for_dma(0);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
|
||||
static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
unsigned long page, oldpage = 0;
|
||||
int n, i;
|
||||
|
||||
while(sz != 0) {
|
||||
--sz;
|
||||
struct scatterlist *sg;
|
||||
int i, j, n;
|
||||
|
||||
for_each_sg(sgl, sg, nents, j) {
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
|
@ -277,8 +285,9 @@ static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg
|
|||
|
||||
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void iommu_release_one(struct device *dev, u32 busa, int npages)
|
||||
|
@ -297,40 +306,52 @@ static void iommu_release_one(struct device *dev, u32 busa, int npages)
|
|||
bit_map_clear(&iommu->usemap, ioptex, npages);
|
||||
}
|
||||
|
||||
static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
|
||||
static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t len, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
unsigned long off;
|
||||
unsigned long off = dma_addr & ~PAGE_MASK;
|
||||
int npages;
|
||||
|
||||
off = vaddr & ~PAGE_MASK;
|
||||
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
iommu_release_one(dev, vaddr & PAGE_MASK, npages);
|
||||
iommu_release_one(dev, dma_addr & PAGE_MASK, npages);
|
||||
}
|
||||
|
||||
static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
|
||||
static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
int n;
|
||||
|
||||
while(sz != 0) {
|
||||
--sz;
|
||||
struct scatterlist *sg;
|
||||
int i, n;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
|
||||
sg->dma_address = 0x21212121;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
|
||||
unsigned long addr, int len)
|
||||
static void *sbus_iommu_alloc(struct device *dev, size_t len,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||
unsigned long page, end;
|
||||
unsigned long va, addr, page, end, ret;
|
||||
iopte_t *iopte = iommu->page_table;
|
||||
iopte_t *first;
|
||||
int ioptex;
|
||||
|
||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||
if (!len || len > 256 * 1024)
|
||||
return NULL;
|
||||
|
||||
len = PAGE_ALIGN(len);
|
||||
va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
|
||||
if (va == 0)
|
||||
return NULL;
|
||||
|
||||
addr = ret = sparc_dma_alloc_resource(dev, len);
|
||||
if (!addr)
|
||||
goto out_free_pages;
|
||||
|
||||
BUG_ON((va & ~PAGE_MASK) != 0);
|
||||
BUG_ON((addr & ~PAGE_MASK) != 0);
|
||||
BUG_ON((len & ~PAGE_MASK) != 0);
|
||||
|
@ -385,16 +406,25 @@ static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long
|
|||
flush_tlb_all();
|
||||
iommu_invalidate(iommu->regs);
|
||||
|
||||
*pba = iommu->start + (ioptex << PAGE_SHIFT);
|
||||
return 0;
|
||||
*dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
|
||||
return (void *)ret;
|
||||
|
||||
out_free_pages:
|
||||
free_pages(va, get_order(len));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
|
||||
static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
|
||||
dma_addr_t busa, unsigned long attrs)
|
||||
{
|
||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||
iopte_t *iopte = iommu->page_table;
|
||||
unsigned long end;
|
||||
struct page *page = virt_to_page(cpu_addr);
|
||||
int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
|
||||
unsigned long end;
|
||||
|
||||
if (!sparc_dma_free_resource(cpu_addr, len))
|
||||
return;
|
||||
|
||||
BUG_ON((busa & ~PAGE_MASK) != 0);
|
||||
BUG_ON((len & ~PAGE_MASK) != 0);
|
||||
|
@ -408,38 +438,40 @@ static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len
|
|||
flush_tlb_all();
|
||||
iommu_invalidate(iommu->regs);
|
||||
bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
|
||||
|
||||
__free_pages(page, get_order(len));
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
|
||||
.get_scsi_one = iommu_get_scsi_one_gflush,
|
||||
.get_scsi_sgl = iommu_get_scsi_sgl_gflush,
|
||||
.release_scsi_one = iommu_release_scsi_one,
|
||||
.release_scsi_sgl = iommu_release_scsi_sgl,
|
||||
static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
|
||||
#ifdef CONFIG_SBUS
|
||||
.map_dma_area = iommu_map_dma_area,
|
||||
.unmap_dma_area = iommu_unmap_dma_area,
|
||||
.alloc = sbus_iommu_alloc,
|
||||
.free = sbus_iommu_free,
|
||||
#endif
|
||||
.map_page = sbus_iommu_map_page_gflush,
|
||||
.unmap_page = sbus_iommu_unmap_page,
|
||||
.map_sg = sbus_iommu_map_sg_gflush,
|
||||
.unmap_sg = sbus_iommu_unmap_sg,
|
||||
};
|
||||
|
||||
static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
|
||||
.get_scsi_one = iommu_get_scsi_one_pflush,
|
||||
.get_scsi_sgl = iommu_get_scsi_sgl_pflush,
|
||||
.release_scsi_one = iommu_release_scsi_one,
|
||||
.release_scsi_sgl = iommu_release_scsi_sgl,
|
||||
static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
|
||||
#ifdef CONFIG_SBUS
|
||||
.map_dma_area = iommu_map_dma_area,
|
||||
.unmap_dma_area = iommu_unmap_dma_area,
|
||||
.alloc = sbus_iommu_alloc,
|
||||
.free = sbus_iommu_free,
|
||||
#endif
|
||||
.map_page = sbus_iommu_map_page_pflush,
|
||||
.unmap_page = sbus_iommu_unmap_page,
|
||||
.map_sg = sbus_iommu_map_sg_pflush,
|
||||
.unmap_sg = sbus_iommu_unmap_sg,
|
||||
};
|
||||
|
||||
void __init ld_mmu_iommu(void)
|
||||
{
|
||||
if (flush_page_for_dma_global) {
|
||||
/* flush_page_for_dma flushes everything, no matter of what page is it */
|
||||
sparc32_dma_ops = &iommu_dma_gflush_ops;
|
||||
dma_ops = &sbus_iommu_dma_gflush_ops;
|
||||
} else {
|
||||
sparc32_dma_ops = &iommu_dma_pflush_ops;
|
||||
dma_ops = &sbus_iommu_dma_pflush_ops;
|
||||
}
|
||||
|
||||
if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
|
||||
|
|
|
@ -4,7 +4,6 @@ config UNICORE32
|
|||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select DMA_DIRECT_OPS
|
||||
select HAVE_GENERIC_DMA_COHERENT
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_BZIP2
|
||||
|
|
|
@ -66,7 +66,6 @@ config X86
|
|||
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
|
||||
select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
|
||||
|
@ -90,7 +89,6 @@ config X86
|
|||
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
|
||||
select CLOCKSOURCE_WATCHDOG
|
||||
select DCACHE_WORD_ACCESS
|
||||
select DMA_DIRECT_OPS
|
||||
select EDAC_ATOMIC_SCRUB
|
||||
select EDAC_SUPPORT
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
|
|
@ -50,8 +50,6 @@ static unsigned long iommu_pages; /* .. and in pages */
|
|||
|
||||
static u32 *iommu_gatt_base; /* Remapping table */
|
||||
|
||||
static dma_addr_t bad_dma_addr;
|
||||
|
||||
/*
|
||||
* If this is disabled the IOMMU will use an optimized flushing strategy
|
||||
* of only flushing when an mapping is reused. With it true the GART is
|
||||
|
@ -74,8 +72,6 @@ static u32 gart_unmapped_entry;
|
|||
(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
|
||||
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
|
||||
|
||||
#define EMERGENCY_PAGES 32 /* = 128KB */
|
||||
|
||||
#ifdef CONFIG_AGP
|
||||
#define AGPEXTERN extern
|
||||
#else
|
||||
|
@ -155,9 +151,6 @@ static void flush_gart(void)
|
|||
|
||||
#ifdef CONFIG_IOMMU_LEAK
|
||||
/* Debugging aid for drivers that don't free their IOMMU tables */
|
||||
static int leak_trace;
|
||||
static int iommu_leak_pages = 20;
|
||||
|
||||
static void dump_leak(void)
|
||||
{
|
||||
static int dump;
|
||||
|
@ -184,14 +177,6 @@ static void iommu_full(struct device *dev, size_t size, int dir)
|
|||
*/
|
||||
|
||||
dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
|
||||
|
||||
if (size > PAGE_SIZE*EMERGENCY_PAGES) {
|
||||
if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
|
||||
panic("PCI-DMA: Memory would be corrupted\n");
|
||||
if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
|
||||
panic(KERN_ERR
|
||||
"PCI-DMA: Random memory would be DMAed\n");
|
||||
}
|
||||
#ifdef CONFIG_IOMMU_LEAK
|
||||
dump_leak();
|
||||
#endif
|
||||
|
@ -220,7 +205,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
|
|||
int i;
|
||||
|
||||
if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
|
||||
return bad_dma_addr;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
iommu_page = alloc_iommu(dev, npages, align_mask);
|
||||
if (iommu_page == -1) {
|
||||
|
@ -229,7 +214,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
|
|||
if (panic_on_overflow)
|
||||
panic("dma_map_area overflow %lu bytes\n", size);
|
||||
iommu_full(dev, size, dir);
|
||||
return bad_dma_addr;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
|
@ -271,7 +256,7 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
|||
int npages;
|
||||
int i;
|
||||
|
||||
if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
|
||||
if (dma_addr == DMA_MAPPING_ERROR ||
|
||||
dma_addr >= iommu_bus_base + iommu_size)
|
||||
return;
|
||||
|
||||
|
@ -315,7 +300,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
|||
|
||||
if (nonforced_iommu(dev, addr, s->length)) {
|
||||
addr = dma_map_area(dev, addr, s->length, dir, 0);
|
||||
if (addr == bad_dma_addr) {
|
||||
if (addr == DMA_MAPPING_ERROR) {
|
||||
if (i > 0)
|
||||
gart_unmap_sg(dev, sg, i, dir, 0);
|
||||
nents = 0;
|
||||
|
@ -471,7 +456,7 @@ error:
|
|||
|
||||
iommu_full(dev, pages << PAGE_SHIFT, dir);
|
||||
for_each_sg(sg, s, nents, i)
|
||||
s->dma_address = bad_dma_addr;
|
||||
s->dma_address = DMA_MAPPING_ERROR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -490,7 +475,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
|
|||
*dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
|
||||
DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
|
||||
flush_gart();
|
||||
if (unlikely(*dma_addr == bad_dma_addr))
|
||||
if (unlikely(*dma_addr == DMA_MAPPING_ERROR))
|
||||
goto out_free;
|
||||
return vaddr;
|
||||
out_free:
|
||||
|
@ -507,11 +492,6 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|||
dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs);
|
||||
}
|
||||
|
||||
static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return (dma_addr == bad_dma_addr);
|
||||
}
|
||||
|
||||
static int no_agp;
|
||||
|
||||
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
|
||||
|
@ -695,7 +675,6 @@ static const struct dma_map_ops gart_dma_ops = {
|
|||
.unmap_page = gart_unmap_page,
|
||||
.alloc = gart_alloc_coherent,
|
||||
.free = gart_free_coherent,
|
||||
.mapping_error = gart_mapping_error,
|
||||
.dma_supported = dma_direct_supported,
|
||||
};
|
||||
|
||||
|
@ -730,7 +709,6 @@ int __init gart_iommu_init(void)
|
|||
unsigned long aper_base, aper_size;
|
||||
unsigned long start_pfn, end_pfn;
|
||||
unsigned long scratch;
|
||||
long i;
|
||||
|
||||
if (!amd_nb_has_feature(AMD_NB_GART))
|
||||
return 0;
|
||||
|
@ -774,29 +752,12 @@ int __init gart_iommu_init(void)
|
|||
if (!iommu_gart_bitmap)
|
||||
panic("Cannot allocate iommu bitmap\n");
|
||||
|
||||
#ifdef CONFIG_IOMMU_LEAK
|
||||
if (leak_trace) {
|
||||
int ret;
|
||||
|
||||
ret = dma_debug_resize_entries(iommu_pages);
|
||||
if (ret)
|
||||
pr_debug("PCI-DMA: Cannot trace all the entries\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Out of IOMMU space handling.
|
||||
* Reserve some invalid pages at the beginning of the GART.
|
||||
*/
|
||||
bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
|
||||
|
||||
pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
|
||||
iommu_size >> 20);
|
||||
|
||||
agp_memory_reserved = iommu_size;
|
||||
iommu_start = aper_size - iommu_size;
|
||||
iommu_bus_base = info.aper_base + iommu_start;
|
||||
bad_dma_addr = iommu_bus_base;
|
||||
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
|
@ -838,8 +799,6 @@ int __init gart_iommu_init(void)
|
|||
if (!scratch)
|
||||
panic("Cannot allocate iommu scratch page");
|
||||
gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
|
||||
for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
|
||||
iommu_gatt_base[i] = gart_unmapped_entry;
|
||||
|
||||
flush_gart();
|
||||
dma_ops = &gart_dma_ops;
|
||||
|
@ -853,16 +812,6 @@ void __init gart_parse_options(char *p)
|
|||
{
|
||||
int arg;
|
||||
|
||||
#ifdef CONFIG_IOMMU_LEAK
|
||||
if (!strncmp(p, "leak", 4)) {
|
||||
leak_trace = 1;
|
||||
p += 4;
|
||||
if (*p == '=')
|
||||
++p;
|
||||
if (isdigit(*p) && get_option(&p, &arg))
|
||||
iommu_leak_pages = arg;
|
||||
}
|
||||
#endif
|
||||
if (isdigit(*p) && get_option(&p, &arg))
|
||||
iommu_size = arg;
|
||||
if (!strncmp(p, "fullflush", 9))
|
||||
|
|
|
@ -51,8 +51,6 @@
|
|||
#include <asm/x86_init.h>
|
||||
#include <asm/iommu_table.h>
|
||||
|
||||
#define CALGARY_MAPPING_ERROR 0
|
||||
|
||||
#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
|
||||
int use_calgary __read_mostly = 1;
|
||||
#else
|
||||
|
@ -157,8 +155,6 @@ static const unsigned long phb_debug_offsets[] = {
|
|||
|
||||
#define PHB_DEBUG_STUFF_OFFSET 0x0020
|
||||
|
||||
#define EMERGENCY_PAGES 32 /* = 128KB */
|
||||
|
||||
unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
|
||||
static int translate_empty_slots __read_mostly = 0;
|
||||
static int calgary_detected __read_mostly = 0;
|
||||
|
@ -255,7 +251,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
|
|||
if (panic_on_overflow)
|
||||
panic("Calgary: fix the allocator.\n");
|
||||
else
|
||||
return CALGARY_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -274,11 +270,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
|||
dma_addr_t ret;
|
||||
|
||||
entry = iommu_range_alloc(dev, tbl, npages);
|
||||
|
||||
if (unlikely(entry == CALGARY_MAPPING_ERROR)) {
|
||||
if (unlikely(entry == DMA_MAPPING_ERROR)) {
|
||||
pr_warn("failed to allocate %u pages in iommu %p\n",
|
||||
npages, tbl);
|
||||
return CALGARY_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/* set the return dma address */
|
||||
|
@ -294,12 +289,10 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|||
unsigned int npages)
|
||||
{
|
||||
unsigned long entry;
|
||||
unsigned long badend;
|
||||
unsigned long flags;
|
||||
|
||||
/* were we called with bad_dma_address? */
|
||||
badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE);
|
||||
if (unlikely(dma_addr < badend)) {
|
||||
if (unlikely(dma_addr == DMA_MAPPING_ERROR)) {
|
||||
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
|
||||
"address 0x%Lx\n", dma_addr);
|
||||
return;
|
||||
|
@ -383,7 +376,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
|
||||
|
||||
entry = iommu_range_alloc(dev, tbl, npages);
|
||||
if (entry == CALGARY_MAPPING_ERROR) {
|
||||
if (entry == DMA_MAPPING_ERROR) {
|
||||
/* makes sure unmap knows to stop */
|
||||
s->dma_length = 0;
|
||||
goto error;
|
||||
|
@ -401,7 +394,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
error:
|
||||
calgary_unmap_sg(dev, sg, nelems, dir, 0);
|
||||
for_each_sg(sg, s, nelems, i) {
|
||||
sg->dma_address = CALGARY_MAPPING_ERROR;
|
||||
sg->dma_address = DMA_MAPPING_ERROR;
|
||||
sg->dma_length = 0;
|
||||
}
|
||||
return 0;
|
||||
|
@ -454,7 +447,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
|
|||
|
||||
/* set up tces to cover the allocated range */
|
||||
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
|
||||
if (mapping == CALGARY_MAPPING_ERROR)
|
||||
if (mapping == DMA_MAPPING_ERROR)
|
||||
goto free;
|
||||
*dma_handle = mapping;
|
||||
return ret;
|
||||
|
@ -479,11 +472,6 @@ static void calgary_free_coherent(struct device *dev, size_t size,
|
|||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
}
|
||||
|
||||
static int calgary_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == CALGARY_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops calgary_dma_ops = {
|
||||
.alloc = calgary_alloc_coherent,
|
||||
.free = calgary_free_coherent,
|
||||
|
@ -491,7 +479,6 @@ static const struct dma_map_ops calgary_dma_ops = {
|
|||
.unmap_sg = calgary_unmap_sg,
|
||||
.map_page = calgary_map_page,
|
||||
.unmap_page = calgary_unmap_page,
|
||||
.mapping_error = calgary_mapping_error,
|
||||
.dma_supported = dma_direct_supported,
|
||||
};
|
||||
|
||||
|
@ -739,9 +726,6 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
|
|||
u64 start;
|
||||
struct iommu_table *tbl = pci_iommu(dev->bus);
|
||||
|
||||
/* reserve EMERGENCY_PAGES from bad_dma_address and up */
|
||||
iommu_range_reserve(tbl, CALGARY_MAPPING_ERROR, EMERGENCY_PAGES);
|
||||
|
||||
/* avoid the BIOS/VGA first 640KB-1MB region */
|
||||
/* for CalIOC2 - avoid the entire first MB */
|
||||
if (is_calgary(dev->device)) {
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
static bool disable_dac_quirk __read_mostly;
|
||||
|
||||
const struct dma_map_ops *dma_ops = &dma_direct_ops;
|
||||
const struct dma_map_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
||||
#ifdef CONFIG_IOMMU_DEBUG
|
||||
|
|
|
@ -62,10 +62,8 @@ IOMMU_INIT(pci_swiotlb_detect_4gb,
|
|||
|
||||
void __init pci_swiotlb_init(void)
|
||||
{
|
||||
if (swiotlb) {
|
||||
if (swiotlb)
|
||||
swiotlb_init(0);
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
}
|
||||
}
|
||||
|
||||
void __init pci_swiotlb_late_init(void)
|
||||
|
|
|
@ -380,13 +380,6 @@ void __init mem_encrypt_init(void)
|
|||
/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
|
||||
swiotlb_update_mem_attributes();
|
||||
|
||||
/*
|
||||
* With SEV, DMA operations cannot use encryption, we need to use
|
||||
* SWIOTLB to bounce buffer DMA operation.
|
||||
*/
|
||||
if (sev_active())
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
|
||||
/*
|
||||
* With SEV, we need to unroll the rep string I/O instructions.
|
||||
*/
|
||||
|
|
|
@ -168,7 +168,6 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev)
|
|||
return;
|
||||
pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
|
||||
pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
|
||||
pdev->dev.dma_ops = &swiotlb_dma_ops;
|
||||
pdev->dev.archdata.is_sta2x11 = true;
|
||||
|
||||
/* We must enable all devices as master, for audio DMA to work */
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
config XTENSA
|
||||
def_bool y
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
|
||||
|
@ -10,7 +9,7 @@ config XTENSA
|
|||
select BUILDTIME_EXTABLE_SORT
|
||||
select CLONE_BACKWARDS
|
||||
select COMMON_CLK
|
||||
select DMA_DIRECT_OPS
|
||||
select DMA_REMAP if MMU
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_IRQ_SHOW
|
||||
|
|
|
@ -160,7 +160,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
flag & __GFP_NOWARN);
|
||||
|
||||
if (!page)
|
||||
page = alloc_pages(flag, get_order(size));
|
||||
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
|
||||
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
|
|
@ -1456,6 +1456,11 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
|
|||
const struct iommu_ops *iommu;
|
||||
u64 dma_addr = 0, size = 0;
|
||||
|
||||
if (attr == DEV_DMA_NOT_SUPPORTED) {
|
||||
set_dma_ops(dev, &dma_dummy_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
iort_dma_setup(dev, &dma_addr, &size);
|
||||
|
||||
iommu = iort_iommu_configure(dev);
|
||||
|
|
|
@ -1137,8 +1137,7 @@ int platform_dma_configure(struct device *dev)
|
|||
ret = of_dma_configure(dev, dev->of_node, true);
|
||||
} else if (has_acpi_companion(dev)) {
|
||||
attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
|
||||
if (attr != DEV_DMA_NOT_SUPPORTED)
|
||||
ret = acpi_dma_configure(dev, attr);
|
||||
ret = acpi_dma_configure(dev, attr);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1178,37 +1177,6 @@ int __init platform_bus_init(void)
|
|||
return error;
|
||||
}
|
||||
|
||||
#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||
static u64 dma_default_get_required_mask(struct device *dev)
|
||||
{
|
||||
u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
|
||||
u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
|
||||
u64 mask;
|
||||
|
||||
if (!high_totalram) {
|
||||
/* convert to mask just covering totalram */
|
||||
low_totalram = (1 << (fls(low_totalram) - 1));
|
||||
low_totalram += low_totalram - 1;
|
||||
mask = low_totalram;
|
||||
} else {
|
||||
high_totalram = (1 << (fls(high_totalram) - 1));
|
||||
high_totalram += high_totalram - 1;
|
||||
mask = (((u64)high_totalram) << 32) + 0xffffffff;
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
u64 dma_get_required_mask(struct device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (ops->get_required_mask)
|
||||
return ops->get_required_mask(dev);
|
||||
return dma_default_get_required_mask(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_get_required_mask);
|
||||
#endif
|
||||
|
||||
static __initdata LIST_HEAD(early_platform_driver_list);
|
||||
static __initdata LIST_HEAD(early_platform_device_list);
|
||||
|
||||
|
|
|
@ -583,7 +583,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
|||
|
||||
dev_priv->map_mode = vmw_dma_map_populate;
|
||||
|
||||
if (dma_ops->sync_single_for_cpu)
|
||||
if (dma_ops && dma_ops->sync_single_for_cpu)
|
||||
dev_priv->map_mode = vmw_dma_alloc_coherent;
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl() == 0)
|
||||
|
|
|
@ -55,8 +55,6 @@
|
|||
#include "amd_iommu_types.h"
|
||||
#include "irq_remapping.h"
|
||||
|
||||
#define AMD_IOMMU_MAPPING_ERROR 0
|
||||
|
||||
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
|
||||
|
||||
#define LOOP_TIMEOUT 100000
|
||||
|
@ -2186,7 +2184,7 @@ static int amd_iommu_add_device(struct device *dev)
|
|||
dev_name(dev));
|
||||
|
||||
iommu_ignore_device(dev);
|
||||
dev->dma_ops = &dma_direct_ops;
|
||||
dev->dma_ops = NULL;
|
||||
goto out;
|
||||
}
|
||||
init_iommu_group(dev);
|
||||
|
@ -2339,7 +2337,7 @@ static dma_addr_t __map_single(struct device *dev,
|
|||
paddr &= PAGE_MASK;
|
||||
|
||||
address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
|
||||
if (address == AMD_IOMMU_MAPPING_ERROR)
|
||||
if (!address)
|
||||
goto out;
|
||||
|
||||
prot = dir2prot(direction);
|
||||
|
@ -2376,7 +2374,7 @@ out_unmap:
|
|||
|
||||
dma_ops_free_iova(dma_dom, address, pages);
|
||||
|
||||
return AMD_IOMMU_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2427,7 +2425,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
|
|||
if (PTR_ERR(domain) == -EINVAL)
|
||||
return (dma_addr_t)paddr;
|
||||
else if (IS_ERR(domain))
|
||||
return AMD_IOMMU_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
dma_mask = *dev->dma_mask;
|
||||
dma_dom = to_dma_ops_domain(domain);
|
||||
|
@ -2504,7 +2502,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
npages = sg_num_pages(dev, sglist, nelems);
|
||||
|
||||
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
|
||||
if (address == AMD_IOMMU_MAPPING_ERROR)
|
||||
if (address == DMA_MAPPING_ERROR)
|
||||
goto out_err;
|
||||
|
||||
prot = dir2prot(direction);
|
||||
|
@ -2627,7 +2625,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
|||
*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
|
||||
size, DMA_BIDIRECTIONAL, dma_mask);
|
||||
|
||||
if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
|
||||
if (*dma_addr == DMA_MAPPING_ERROR)
|
||||
goto out_free;
|
||||
|
||||
return page_address(page);
|
||||
|
@ -2678,11 +2676,6 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
|
|||
return check_device(dev);
|
||||
}
|
||||
|
||||
static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == AMD_IOMMU_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops amd_iommu_dma_ops = {
|
||||
.alloc = alloc_coherent,
|
||||
.free = free_coherent,
|
||||
|
@ -2691,7 +2684,6 @@ static const struct dma_map_ops amd_iommu_dma_ops = {
|
|||
.map_sg = map_sg,
|
||||
.unmap_sg = unmap_sg,
|
||||
.dma_supported = amd_iommu_dma_supported,
|
||||
.mapping_error = amd_iommu_mapping_error,
|
||||
};
|
||||
|
||||
static int init_reserved_iova_ranges(void)
|
||||
|
@ -2778,17 +2770,6 @@ int __init amd_iommu_init_dma_ops(void)
|
|||
swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
|
||||
iommu_detected = 1;
|
||||
|
||||
/*
|
||||
* In case we don't initialize SWIOTLB (actually the common case
|
||||
* when AMD IOMMU is enabled and SME is not active), make sure there
|
||||
* are global dma_ops set as a fall-back for devices not handled by
|
||||
* this driver (for example non-PCI devices). When SME is active,
|
||||
* make sure that swiotlb variable remains set so the global dma_ops
|
||||
* continue to be SWIOTLB.
|
||||
*/
|
||||
if (!swiotlb)
|
||||
dma_ops = &dma_direct_ops;
|
||||
|
||||
if (amd_iommu_unmap_flush)
|
||||
pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
|
||||
else
|
||||
|
|
|
@ -32,8 +32,6 @@
|
|||
#include <linux/scatterlist.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#define IOMMU_MAPPING_ERROR 0
|
||||
|
||||
struct iommu_dma_msi_page {
|
||||
struct list_head list;
|
||||
dma_addr_t iova;
|
||||
|
@ -523,7 +521,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
|
|||
{
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
|
||||
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
|
||||
*handle = IOMMU_MAPPING_ERROR;
|
||||
*handle = DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -556,7 +554,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
|||
dma_addr_t iova;
|
||||
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
|
||||
|
||||
*handle = IOMMU_MAPPING_ERROR;
|
||||
*handle = DMA_MAPPING_ERROR;
|
||||
|
||||
min_size = alloc_sizes & -alloc_sizes;
|
||||
if (min_size < PAGE_SIZE) {
|
||||
|
@ -649,11 +647,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
|||
|
||||
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
|
||||
if (!iova)
|
||||
return IOMMU_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
|
||||
iommu_dma_free_iova(cookie, iova, size);
|
||||
return IOMMU_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
return iova + iova_off;
|
||||
}
|
||||
|
@ -694,7 +692,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
|
||||
s->offset += s_iova_off;
|
||||
s->length = s_length;
|
||||
sg_dma_address(s) = IOMMU_MAPPING_ERROR;
|
||||
sg_dma_address(s) = DMA_MAPPING_ERROR;
|
||||
sg_dma_len(s) = 0;
|
||||
|
||||
/*
|
||||
|
@ -737,11 +735,11 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
|
|||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
|
||||
if (sg_dma_address(s) != DMA_MAPPING_ERROR)
|
||||
s->offset += sg_dma_address(s);
|
||||
if (sg_dma_len(s))
|
||||
s->length = sg_dma_len(s);
|
||||
sg_dma_address(s) = IOMMU_MAPPING_ERROR;
|
||||
sg_dma_address(s) = DMA_MAPPING_ERROR;
|
||||
sg_dma_len(s) = 0;
|
||||
}
|
||||
}
|
||||
|
@ -858,11 +856,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
|||
__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
|
||||
}
|
||||
|
||||
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == IOMMU_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||
phys_addr_t msi_addr, struct iommu_domain *domain)
|
||||
{
|
||||
|
@ -882,7 +875,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
|||
return NULL;
|
||||
|
||||
iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
|
||||
if (iommu_dma_mapping_error(dev, iova))
|
||||
if (iova == DMA_MAPPING_ERROR)
|
||||
goto out_free_page;
|
||||
|
||||
INIT_LIST_HEAD(&msi_page->list);
|
||||
|
|
|
@ -3597,9 +3597,11 @@ static int iommu_no_mapping(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, int dir, u64 dma_mask)
|
||||
static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, int dir,
|
||||
u64 dma_mask)
|
||||
{
|
||||
phys_addr_t paddr = page_to_phys(page) + offset;
|
||||
struct dmar_domain *domain;
|
||||
phys_addr_t start_paddr;
|
||||
unsigned long iova_pfn;
|
||||
|
@ -3615,7 +3617,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
|
|||
|
||||
domain = get_valid_domain_for_dev(dev);
|
||||
if (!domain)
|
||||
return 0;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
iommu = domain_get_iommu(domain);
|
||||
size = aligned_nrpages(paddr, size);
|
||||
|
@ -3653,7 +3655,7 @@ error:
|
|||
free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
|
||||
pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
|
||||
dev_name(dev), size, (unsigned long long)paddr, dir);
|
||||
return 0;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static dma_addr_t intel_map_page(struct device *dev, struct page *page,
|
||||
|
@ -3661,8 +3663,7 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
|
|||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return __intel_map_single(dev, page_to_phys(page) + offset, size,
|
||||
dir, *dev->dma_mask);
|
||||
return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask);
|
||||
}
|
||||
|
||||
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
|
||||
|
@ -3753,10 +3754,9 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
|
|||
return NULL;
|
||||
memset(page_address(page), 0, size);
|
||||
|
||||
*dma_handle = __intel_map_single(dev, page_to_phys(page), size,
|
||||
DMA_BIDIRECTIONAL,
|
||||
dev->coherent_dma_mask);
|
||||
if (*dma_handle)
|
||||
*dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
|
||||
dev->coherent_dma_mask);
|
||||
if (*dma_handle != DMA_MAPPING_ERROR)
|
||||
return page_address(page);
|
||||
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
|
||||
__free_pages(page, order);
|
||||
|
@ -3865,11 +3865,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
|
|||
return nelems;
|
||||
}
|
||||
|
||||
static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return !dma_addr;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops intel_dma_ops = {
|
||||
.alloc = intel_alloc_coherent,
|
||||
.free = intel_free_coherent,
|
||||
|
@ -3877,7 +3872,6 @@ static const struct dma_map_ops intel_dma_ops = {
|
|||
.unmap_sg = intel_unmap_sg,
|
||||
.map_page = intel_map_page,
|
||||
.unmap_page = intel_unmap_page,
|
||||
.mapping_error = intel_mapping_error,
|
||||
.dma_supported = dma_direct_supported,
|
||||
};
|
||||
|
||||
|
|
|
@ -149,7 +149,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size,
|
|||
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
|
||||
struct mic_device *mdev = scdev_to_mdev(scdev);
|
||||
dma_addr_t tmp;
|
||||
void *va = kmalloc(size, gfp);
|
||||
void *va = kmalloc(size, gfp | __GFP_ZERO);
|
||||
|
||||
if (va) {
|
||||
tmp = mic_map_single(mdev, va, size);
|
||||
|
|
|
@ -110,8 +110,6 @@
|
|||
#define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */
|
||||
#define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */
|
||||
|
||||
#define CCIO_MAPPING_ERROR (~(dma_addr_t)0)
|
||||
|
||||
struct ioa_registers {
|
||||
/* Runway Supervisory Set */
|
||||
int32_t unused1[12];
|
||||
|
@ -740,7 +738,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
|
|||
BUG_ON(!dev);
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc)
|
||||
return CCIO_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
BUG_ON(size <= 0);
|
||||
|
||||
|
@ -1021,11 +1019,6 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
|
||||
}
|
||||
|
||||
static int ccio_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == CCIO_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops ccio_ops = {
|
||||
.dma_supported = ccio_dma_supported,
|
||||
.alloc = ccio_alloc,
|
||||
|
@ -1034,7 +1027,6 @@ static const struct dma_map_ops ccio_ops = {
|
|||
.unmap_page = ccio_unmap_page,
|
||||
.map_sg = ccio_map_sg,
|
||||
.unmap_sg = ccio_unmap_sg,
|
||||
.mapping_error = ccio_mapping_error,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
|
|
@ -93,8 +93,6 @@
|
|||
|
||||
#define DEFAULT_DMA_HINT_REG 0
|
||||
|
||||
#define SBA_MAPPING_ERROR (~(dma_addr_t)0)
|
||||
|
||||
struct sba_device *sba_list;
|
||||
EXPORT_SYMBOL_GPL(sba_list);
|
||||
|
||||
|
@ -725,7 +723,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
|
|||
|
||||
ioc = GET_IOC(dev);
|
||||
if (!ioc)
|
||||
return SBA_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
/* save offset bits */
|
||||
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
|
||||
|
@ -1080,11 +1078,6 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||
|
||||
}
|
||||
|
||||
static int sba_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == SBA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops sba_ops = {
|
||||
.dma_supported = sba_dma_supported,
|
||||
.alloc = sba_alloc,
|
||||
|
@ -1093,7 +1086,6 @@ static const struct dma_map_ops sba_ops = {
|
|||
.unmap_page = sba_unmap_page,
|
||||
.map_sg = sba_map_sg,
|
||||
.unmap_sg = sba_unmap_sg,
|
||||
.mapping_error = sba_mapping_error,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -307,39 +307,32 @@ static struct device *to_vmd_dev(struct device *dev)
|
|||
return &vmd->dev->dev;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops *vmd_dma_ops(struct device *dev)
|
||||
{
|
||||
return get_dma_ops(to_vmd_dev(dev));
|
||||
}
|
||||
|
||||
static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
|
||||
gfp_t flag, unsigned long attrs)
|
||||
{
|
||||
return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
|
||||
attrs);
|
||||
return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
|
||||
}
|
||||
|
||||
static void vmd_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t addr, unsigned long attrs)
|
||||
{
|
||||
return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
|
||||
attrs);
|
||||
return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
|
||||
}
|
||||
|
||||
static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
|
||||
size, attrs);
|
||||
return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
|
||||
attrs);
|
||||
}
|
||||
|
||||
static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
|
||||
addr, size, attrs);
|
||||
return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
|
||||
attrs);
|
||||
}
|
||||
|
||||
static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
|
||||
|
@ -347,66 +340,60 @@ static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
|
|||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
|
||||
dir, attrs);
|
||||
return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
|
||||
attrs);
|
||||
}
|
||||
|
||||
static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
|
||||
dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
|
||||
}
|
||||
|
||||
static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
|
||||
return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
|
||||
}
|
||||
|
||||
static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
|
||||
dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
|
||||
}
|
||||
|
||||
static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
|
||||
dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
|
||||
}
|
||||
|
||||
static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
|
||||
dir);
|
||||
dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
|
||||
}
|
||||
|
||||
static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
|
||||
dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
|
||||
}
|
||||
|
||||
static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
|
||||
}
|
||||
|
||||
static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
|
||||
dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
|
||||
}
|
||||
|
||||
static int vmd_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
|
||||
return dma_supported(to_vmd_dev(dev), mask);
|
||||
}
|
||||
|
||||
static u64 vmd_get_required_mask(struct device *dev)
|
||||
{
|
||||
return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
|
||||
return dma_get_required_mask(to_vmd_dev(dev));
|
||||
}
|
||||
|
||||
static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
|
||||
|
@ -446,7 +433,6 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
|
|||
ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
|
||||
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
|
||||
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
|
||||
ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
|
||||
ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
|
||||
ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
|
||||
add_dma_domain(domain);
|
||||
|
|
|
@ -1600,10 +1600,8 @@ static int pci_dma_configure(struct device *dev)
|
|||
ret = of_dma_configure(dev, bridge->parent->of_node, true);
|
||||
} else if (has_acpi_companion(bridge)) {
|
||||
struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
|
||||
enum dev_dma_attr attr = acpi_get_dma_attr(adev);
|
||||
|
||||
if (attr != DEV_DMA_NOT_SUPPORTED)
|
||||
ret = acpi_dma_configure(dev, attr);
|
||||
ret = acpi_dma_configure(dev, acpi_get_dma_attr(adev));
|
||||
}
|
||||
|
||||
pci_put_host_bridge_device(bridge);
|
||||
|
|
|
@ -53,8 +53,6 @@
|
|||
* API.
|
||||
*/
|
||||
|
||||
#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
|
||||
|
||||
static char *xen_io_tlb_start, *xen_io_tlb_end;
|
||||
static unsigned long xen_io_tlb_nslabs;
|
||||
/*
|
||||
|
@ -405,8 +403,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|||
|
||||
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
|
||||
attrs);
|
||||
if (map == SWIOTLB_MAP_ERROR)
|
||||
return XEN_SWIOTLB_ERROR_CODE;
|
||||
if (map == DMA_MAPPING_ERROR)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
dev_addr = xen_phys_to_bus(map);
|
||||
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
|
||||
|
@ -421,7 +419,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|||
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
||||
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
|
||||
|
||||
return XEN_SWIOTLB_ERROR_CODE;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -443,21 +441,8 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
|
|||
xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
|
||||
|
||||
/* NOTE: We use dev_addr here, not paddr! */
|
||||
if (is_xen_swiotlb_buffer(dev_addr)) {
|
||||
if (is_xen_swiotlb_buffer(dev_addr))
|
||||
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
|
||||
return;
|
||||
}
|
||||
|
||||
if (dir != DMA_FROM_DEVICE)
|
||||
return;
|
||||
|
||||
/*
|
||||
* phys_to_virt doesn't work with hihgmem page but we could
|
||||
* call dma_mark_clean() with hihgmem page here. However, we
|
||||
* are fine since dma_mark_clean() is null on POWERPC. We can
|
||||
* make dma_mark_clean() take a physical address if necessary.
|
||||
*/
|
||||
dma_mark_clean(phys_to_virt(paddr), size);
|
||||
}
|
||||
|
||||
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
||||
|
@ -495,11 +480,6 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
|
|||
|
||||
if (target == SYNC_FOR_DEVICE)
|
||||
xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
|
||||
|
||||
if (dir != DMA_FROM_DEVICE)
|
||||
return;
|
||||
|
||||
dma_mark_clean(phys_to_virt(paddr), size);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -574,7 +554,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|||
sg_phys(sg),
|
||||
sg->length,
|
||||
dir, attrs);
|
||||
if (map == SWIOTLB_MAP_ERROR) {
|
||||
if (map == DMA_MAPPING_ERROR) {
|
||||
dev_warn(hwdev, "swiotlb buffer is full\n");
|
||||
/* Don't panic here, we expect map_sg users
|
||||
to do proper error handling. */
|
||||
|
@ -700,11 +680,6 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|||
return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
|
||||
}
|
||||
|
||||
static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == XEN_SWIOTLB_ERROR_CODE;
|
||||
}
|
||||
|
||||
const struct dma_map_ops xen_swiotlb_dma_ops = {
|
||||
.alloc = xen_swiotlb_alloc_coherent,
|
||||
.free = xen_swiotlb_free_coherent,
|
||||
|
@ -719,5 +694,4 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
|
|||
.dma_supported = xen_swiotlb_dma_supported,
|
||||
.mmap = xen_swiotlb_dma_mmap,
|
||||
.get_sgtable = xen_swiotlb_get_sgtable,
|
||||
.mapping_error = xen_swiotlb_mapping_error,
|
||||
};
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return &dma_direct_ops;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_DMA_MAPPING_H */
|
||||
|
|
|
@ -30,8 +30,6 @@ struct bus_type;
|
|||
|
||||
extern void dma_debug_add_bus(struct bus_type *bus);
|
||||
|
||||
extern int dma_debug_resize_entries(u32 num_entries);
|
||||
|
||||
extern void debug_dma_map_single(struct device *dev, const void *addr,
|
||||
unsigned long len);
|
||||
|
||||
|
@ -72,17 +70,6 @@ extern void debug_dma_sync_single_for_device(struct device *dev,
|
|||
dma_addr_t dma_handle,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
int direction);
|
||||
|
||||
extern void debug_dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg,
|
||||
int nelems, int direction);
|
||||
|
@ -101,11 +88,6 @@ static inline void dma_debug_add_bus(struct bus_type *bus)
|
|||
{
|
||||
}
|
||||
|
||||
static inline int dma_debug_resize_entries(u32 num_entries)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void debug_dma_map_single(struct device *dev, const void *addr,
|
||||
unsigned long len)
|
||||
{
|
||||
|
@ -174,22 +156,6 @@ static inline void debug_dma_sync_single_for_device(struct device *dev,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg,
|
||||
int nelems, int direction)
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
|
||||
#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
|
||||
#include <asm/dma-direct.h>
|
||||
#else
|
||||
|
@ -50,14 +48,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|||
return __sme_clr(__dma_to_phys(dev, daddr));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
|
||||
void dma_mark_clean(void *addr, size_t size);
|
||||
#else
|
||||
static inline void dma_mark_clean(void *addr, size_t size)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
|
||||
|
||||
u64 dma_direct_get_required_mask(struct device *dev);
|
||||
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs);
|
||||
|
@ -67,11 +57,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
|
||||
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs);
|
||||
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
|
||||
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
|
||||
int dma_direct_supported(struct device *dev, u64 mask);
|
||||
int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
#endif /* _LINUX_DMA_DIRECT_H */
|
||||
|
|
|
@ -69,7 +69,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
|||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
/* The DMA API isn't _quite_ the whole story, though... */
|
||||
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
|
||||
|
|
|
@ -128,13 +128,14 @@ struct dma_map_ops {
|
|||
enum dma_data_direction dir);
|
||||
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
|
||||
int (*dma_supported)(struct device *dev, u64 mask);
|
||||
u64 (*get_required_mask)(struct device *dev);
|
||||
};
|
||||
|
||||
extern const struct dma_map_ops dma_direct_ops;
|
||||
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
|
||||
|
||||
extern const struct dma_map_ops dma_virt_ops;
|
||||
extern const struct dma_map_ops dma_dummy_ops;
|
||||
|
||||
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
|
||||
|
||||
|
@ -220,6 +221,69 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline bool dma_is_direct(const struct dma_map_ops *ops)
|
||||
{
|
||||
return likely(!ops);
|
||||
}
|
||||
|
||||
/*
|
||||
* All the dma_direct_* declarations are here just for the indirect call bypass,
|
||||
* and must not be used directly drivers!
|
||||
*/
|
||||
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir);
|
||||
void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
|
||||
#else
|
||||
static inline void dma_direct_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
|
||||
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir);
|
||||
void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
|
||||
#else
|
||||
static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_unmap_sg(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
|
@ -230,9 +294,12 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
|||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
debug_dma_map_single(dev, ptr, size);
|
||||
addr = ops->map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size,
|
||||
dir, attrs);
|
||||
if (dma_is_direct(ops))
|
||||
addr = dma_direct_map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size, dir, attrs);
|
||||
else
|
||||
addr = ops->map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size, dir, attrs);
|
||||
debug_dma_map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size,
|
||||
dir, addr, true);
|
||||
|
@ -247,11 +314,19 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_page)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_unmap_page(dev, addr, size, dir, attrs);
|
||||
else if (ops->unmap_page)
|
||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_page(dev, addr, size, dir, true);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return dma_unmap_single_attrs(dev, addr, size, dir, attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* dma_maps_sg_attrs returns 0 on error and > 0 on success.
|
||||
* It should never return a value < 0.
|
||||
|
@ -264,7 +339,10 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
|||
int ents;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
||||
if (dma_is_direct(ops))
|
||||
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
|
||||
else
|
||||
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
||||
BUG_ON(ents < 0);
|
||||
debug_dma_map_sg(dev, sg, nents, ents, dir);
|
||||
|
||||
|
@ -279,7 +357,9 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
|
|||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
debug_dma_unmap_sg(dev, sg, nents, dir);
|
||||
if (ops->unmap_sg)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
|
||||
else if (ops->unmap_sg)
|
||||
ops->unmap_sg(dev, sg, nents, dir, attrs);
|
||||
}
|
||||
|
||||
|
@ -293,25 +373,15 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
|||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
if (dma_is_direct(ops))
|
||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||
else
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page_attrs(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_page)
|
||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_page(dev, addr, size, dir, false);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_resource(struct device *dev,
|
||||
phys_addr_t phys_addr,
|
||||
size_t size,
|
||||
|
@ -327,7 +397,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
|
|||
BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
|
||||
|
||||
addr = phys_addr;
|
||||
if (ops->map_resource)
|
||||
if (ops && ops->map_resource)
|
||||
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
|
||||
|
||||
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
|
||||
|
@ -342,7 +412,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_resource)
|
||||
if (ops && ops->unmap_resource)
|
||||
ops->unmap_resource(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_resource(dev, addr, size, dir);
|
||||
}
|
||||
|
@ -354,11 +424,20 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_cpu)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
|
||||
else if (ops->sync_single_for_cpu)
|
||||
ops->sync_single_for_cpu(dev, addr, size, dir);
|
||||
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
|
@ -366,37 +445,18 @@ static inline void dma_sync_single_for_device(struct device *dev,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_device)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_single_for_device(dev, addr, size, dir);
|
||||
else if (ops->sync_single_for_device)
|
||||
ops->sync_single_for_device(dev, addr, size, dir);
|
||||
debug_dma_sync_single_for_device(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t addr,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_cpu)
|
||||
ops->sync_single_for_cpu(dev, addr + offset, size, dir);
|
||||
debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t addr,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
dma_addr_t addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_device)
|
||||
ops->sync_single_for_device(dev, addr + offset, size, dir);
|
||||
debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
|
||||
return dma_sync_single_for_device(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -406,7 +466,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_sg_for_cpu)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
else if (ops->sync_sg_for_cpu)
|
||||
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
}
|
||||
|
@ -418,7 +480,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_sg_for_device)
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
else if (ops->sync_sg_for_device)
|
||||
ops->sync_sg_for_device(dev, sg, nelems, dir);
|
||||
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
|
||||
|
@ -431,16 +495,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|||
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
|
||||
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
|
||||
|
||||
static inline void
|
||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->cache_sync)
|
||||
ops->cache_sync(dev, vaddr, size, dir);
|
||||
}
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
|
@ -455,107 +511,29 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
|
|||
const void *caller);
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
|
||||
|
||||
/**
|
||||
* dma_mmap_attrs - map a coherent DMA allocation into user space
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @vma: vm_area_struct describing requested user mapping
|
||||
* @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
|
||||
* @handle: device-view address returned from dma_alloc_attrs
|
||||
* @size: size of memory originally requested in dma_alloc_attrs
|
||||
* @attrs: attributes of mapping properties requested in dma_alloc_attrs
|
||||
*
|
||||
* Map a coherent DMA buffer previously allocated by dma_alloc_attrs
|
||||
* into user space. The coherent DMA buffer must not be freed by the
|
||||
* driver until the user space mapping has been released.
|
||||
*/
|
||||
static inline int
|
||||
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
BUG_ON(!ops);
|
||||
if (ops->mmap)
|
||||
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
|
||||
bool dma_in_atomic_pool(void *start, size_t size);
|
||||
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
|
||||
bool dma_free_from_pool(void *start, size_t size);
|
||||
|
||||
int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
|
||||
|
||||
int
|
||||
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size, unsigned long attrs);
|
||||
|
||||
static inline int
|
||||
dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
BUG_ON(!ops);
|
||||
if (ops->get_sgtable)
|
||||
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
|
||||
attrs);
|
||||
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
|
||||
attrs);
|
||||
}
|
||||
|
||||
int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
|
||||
|
||||
#ifndef arch_dma_alloc_attrs
|
||||
#define arch_dma_alloc_attrs(dev) (true)
|
||||
#endif
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
BUG_ON(!ops);
|
||||
WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
|
||||
|
||||
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
|
||||
return cpu_addr;
|
||||
|
||||
/* let the implementation decide on the zone to allocate from: */
|
||||
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
if (!arch_dma_alloc_attrs(&dev))
|
||||
return NULL;
|
||||
if (!ops->alloc)
|
||||
return NULL;
|
||||
|
||||
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!ops);
|
||||
|
||||
if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
|
||||
return;
|
||||
/*
|
||||
* On non-coherent platforms which implement DMA-coherent buffers via
|
||||
* non-cacheable remaps, ops->free() may call vunmap(). Thus getting
|
||||
* this far in IRQ context is a) at risk of a BUG_ON() or trying to
|
||||
* sleep on some machines, and b) an indication that the driver is
|
||||
* probably misusing the coherent API anyway.
|
||||
*/
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
if (!ops->free || !cpu_addr)
|
||||
return;
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flag, unsigned long attrs);
|
||||
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle, unsigned long attrs);
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
|
@ -573,43 +551,16 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
|
|||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
debug_dma_mapping_error(dev, dma_addr);
|
||||
if (ops->mapping_error)
|
||||
return ops->mapping_error(dev, dma_addr);
|
||||
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dma_check_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
|
||||
dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
|
||||
}
|
||||
|
||||
static inline int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (!ops)
|
||||
return 0;
|
||||
if (!ops->dma_supported)
|
||||
return 1;
|
||||
return ops->dma_supported(dev, mask);
|
||||
}
|
||||
|
||||
#ifndef HAVE_ARCH_DMA_SET_MASK
|
||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
dma_check_mask(dev, mask);
|
||||
|
||||
*dev->dma_mask = mask;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
int dma_supported(struct device *dev, u64 mask);
|
||||
int dma_set_mask(struct device *dev, u64 mask);
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
|
||||
static inline u64 dma_get_mask(struct device *dev)
|
||||
{
|
||||
|
@ -618,21 +569,6 @@ static inline u64 dma_get_mask(struct device *dev)
|
|||
return DMA_BIT_MASK(32);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
#else
|
||||
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
if (!dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
dma_check_mask(dev, mask);
|
||||
|
||||
dev->coherent_dma_mask = mask;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set both the DMA mask and the coherent DMA mask to the same thing.
|
||||
* Note that we don't check the return value from dma_set_coherent_mask()
|
||||
|
@ -676,8 +612,7 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
|
|||
return SZ_64K;
|
||||
}
|
||||
|
||||
static inline unsigned int dma_set_max_seg_size(struct device *dev,
|
||||
unsigned int size)
|
||||
static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
|
||||
{
|
||||
if (dev->dma_parms) {
|
||||
dev->dma_parms->max_segment_size = size;
|
||||
|
@ -709,12 +644,13 @@ static inline unsigned long dma_max_pfn(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Please always use dma_alloc_coherent instead as it already zeroes the memory!
|
||||
*/
|
||||
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
void *ret = dma_alloc_coherent(dev, size, dma_handle,
|
||||
flag | __GFP_ZERO);
|
||||
return ret;
|
||||
return dma_alloc_coherent(dev, size, dma_handle, flag);
|
||||
}
|
||||
|
||||
static inline int dma_get_cache_alignment(void)
|
||||
|
|
|
@ -38,7 +38,10 @@ pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
|||
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
#else
|
||||
#define arch_dma_cache_sync NULL
|
||||
static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
|
||||
size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
|
@ -69,4 +72,6 @@ static inline void arch_sync_dma_for_cpu_all(struct device *dev)
|
|||
}
|
||||
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
|
||||
|
||||
void arch_dma_prep_coherent(struct page *page, size_t size);
|
||||
|
||||
#endif /* _LINUX_DMA_NONCOHERENT_H */
|
||||
|
|
|
@ -324,10 +324,10 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
|
|||
* Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
|
||||
* is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
|
||||
*/
|
||||
#ifdef CONFIG_ARCH_HAS_SG_CHAIN
|
||||
#define SG_MAX_SEGMENTS 2048
|
||||
#else
|
||||
#ifdef CONFIG_ARCH_NO_SG_CHAIN
|
||||
#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
|
||||
#else
|
||||
#define SG_MAX_SEGMENTS 2048
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SG_POOL
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue