dma-mapping fixes for 5.3-rc
- fix the handling of the bus_dma_mask in dma_get_required_mask, which caused a regression in this merge window (Lucas Stach) - fix a regression in the handling of DMA_ATTR_NO_KERNEL_MAPPING (me) - fix dma_mmap_coherent to not cause page attribute mismatches on coherent architectures like x86 (me) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl1UFhILHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYOjexAAjPKLo4WGBGO1nd0btwXcI9A7jQTQlXrokmorDVzx 5++GmTUBeEgvUJath5D3qpQTRZXo9Wb9oGMdS5U6bWJB+SbWtErM304t905TJoDM Cs7xcB1ZQeG/5OrQ+qGPgQCo6WO1dOl9FpaIptjNm4dn+OYhyO/YA+dgrJDwgkiA 140RYUWa+Zhq3df4YqP4M4EnezLN1c4uE80wUxVQKDcq59sxCJek0QT0pUAMbdmQ /cUd2XSU113o1llmIRUh0Oj6VSEhWKHb+bdb8JfGndLzxvDcXZKl60tikWe6xpy2 Ue0kkHRk6OPVRIxWkRjt8D+mlrCyNqN6HWx6eBmVnRKHxZ4ia2hYOFuYN9FFLLK+ kCUlu5P/HUabBedKIxk4rbWITUqcRSviPD2WdnH2RWblvXNSDoSAufYuJ/9IGSoL P6a43DVKFesVF/MxeH9Ko8bnxMUO9Zn97GHcQIUplRwaqrnrCEPlvLVf/teswSQG C13rTnouZ0FA4z/uV96G6HfGIj87MLe/RovmLCMTeiSKrDpbcO7szP037Km73M+V UBmatoYCioVLxBjw3NkxCRc9UpDPdRUu31uVHrAarh4tutUASEWLrb6s9vFlGyED zis9IHWtIAYP3VfFtkXdZ7oDlqC/3KdEErHZuT+z4PK3Wj/QtQVfQ8SB79xFMneD V2E= =Jzmo -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.3-4' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping fixes from Christoph Hellwig: - fix the handling of the bus_dma_mask in dma_get_required_mask, which caused a regression in this merge window (Lucas Stach) - fix a regression in the handling of DMA_ATTR_NO_KERNEL_MAPPING (me) - fix dma_mmap_coherent to not cause page attribute mismatches on coherent architectures like x86 (me) * tag 'dma-mapping-5.3-4' of git://git.infradead.org/users/hch/dma-mapping: dma-mapping: fix page attributes for dma_mmap_* dma-direct: don't truncate dma_required_mask to bus addressing capabilities dma-direct: fix DMA_ATTR_NO_KERNEL_MAPPING
This commit is contained in:
commit
e83b009c5c
|
@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
|
|||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
return __get_dma_pgprot(attrs, prot);
|
||||
return prot;
|
||||
return __get_dma_pgprot(attrs, prot);
|
||||
}
|
||||
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
|
|
|
@ -14,9 +14,7 @@
|
|||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
|
||||
return pgprot_writecombine(prot);
|
||||
return prot;
|
||||
return pgprot_writecombine(prot);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
|
|
|
@ -121,7 +121,6 @@ config PPC
|
|||
select ARCH_32BIT_OFF_T if PPC32
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_DMA_MMAP_PGPROT
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
|
|
@ -49,8 +49,7 @@ obj-y := cputable.o ptrace.o syscalls.o \
|
|||
signal.o sysfs.o cacheinfo.o time.o \
|
||||
prom.o traps.o setup-common.o \
|
||||
udbg.o misc.o io.o misc_$(BITS).o \
|
||||
of_platform.o prom_parse.o \
|
||||
dma-common.o
|
||||
of_platform.o prom_parse.o
|
||||
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
|
||||
signal_64.o ptrace32.o \
|
||||
paca.o nvram_64.o firmware.o
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Contains common dma routines for all powerpc platforms.
|
||||
*
|
||||
* Copyright (C) 2019 Shawn Anastasio.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
|
||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
return pgprot_noncached(prot);
|
||||
return prot;
|
||||
}
|
|
@ -572,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
|||
struct iova_domain *iovad = &cookie->iovad;
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
|
||||
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
|
||||
struct page **pages;
|
||||
struct sg_table sgt;
|
||||
|
@ -973,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
|
|||
return NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
|
||||
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
|
||||
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
|
||||
VM_USERMAP, prot, __builtin_return_address(0));
|
||||
|
@ -1033,7 +1033,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
unsigned long pfn, off = vma->vm_pgoff;
|
||||
int ret;
|
||||
|
||||
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
|
||||
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
|
||||
|
||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
|
|
@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||
dma_addr_t dma_addr, unsigned long attrs);
|
||||
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
|
||||
dma_addr_t dma_addr);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
|
||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
|
||||
#else
|
||||
# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot)
|
||||
#endif
|
||||
static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return prot; /* no protection bits supported without page tables */
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
|
||||
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
|
|
|
@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
|
|||
{
|
||||
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
|
||||
|
||||
if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
|
||||
max_dma = dev->bus_dma_mask;
|
||||
|
||||
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
|
||||
}
|
||||
|
||||
|
@ -130,10 +127,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
if (!page)
|
||||
return NULL;
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
||||
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
|
||||
!force_dma_unencrypted(dev)) {
|
||||
/* remove any dirty cache lines on the kernel alias */
|
||||
if (!PageHighMem(page))
|
||||
arch_dma_prep_coherent(page, size);
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
/* return the page pointer as the opaque cookie */
|
||||
return page;
|
||||
}
|
||||
|
@ -178,7 +177,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
|||
{
|
||||
unsigned int page_order = get_order(size);
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
||||
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
|
||||
!force_dma_unencrypted(dev)) {
|
||||
/* cpu_addr is a struct page cookie, not a kernel address */
|
||||
__dma_direct_free_pages(dev, size, cpu_addr);
|
||||
return;
|
||||
|
|
|
@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
|
|||
}
|
||||
EXPORT_SYMBOL(dma_get_sgtable_attrs);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* Return the page attributes used for mapping dma_alloc_* memory, either in
|
||||
* kernel space if remapping is needed, or to userspace through dma_mmap_*.
|
||||
*/
|
||||
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
|
||||
{
|
||||
if (dev_is_dma_coherent(dev) ||
|
||||
(IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
|
||||
(attrs & DMA_ATTR_NON_CONSISTENT)))
|
||||
return prot;
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
|
||||
return arch_dma_mmap_pgprot(dev, prot, attrs);
|
||||
return pgprot_noncached(prot);
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
* Create userspace mapping for the DMA-coherent memory.
|
||||
*/
|
||||
|
@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
unsigned long pfn;
|
||||
int ret = -ENXIO;
|
||||
|
||||
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
|
||||
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
|
||||
|
||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
|
|
@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||
|
||||
/* create a coherent mapping */
|
||||
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
|
||||
dma_pgprot(dev, PAGE_KERNEL, attrs),
|
||||
__builtin_return_address(0));
|
||||
if (!ret) {
|
||||
__dma_direct_free_pages(dev, size, page);
|
||||
|
|
Loading…
Reference in New Issue