diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 22d7b5127184..9dc68a12a5bf 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5999,8 +5999,11 @@ it if 0 is given (See Documentation/admin-guide/cgroup-v1/memory.rst) swiotlb= [ARM,IA-64,PPC,MIPS,X86] - Format: { | force | noforce } + Format: { [,] | force | noforce } -- Number of I/O TLB slabs + -- Second integer after comma. Number of swiotlb + areas with their own lock. Will be rounded up + to a power of 2. force -- force using of bounce buffers even if they wouldn't be automatically used by the kernel noforce -- Never use bounce buffers (for debugging) diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst index 6d6d0edd2d27..829f20a193ca 100644 --- a/Documentation/core-api/dma-api.rst +++ b/Documentation/core-api/dma-api.rst @@ -204,6 +204,20 @@ Returns the maximum size of a mapping for the device. The size parameter of the mapping functions like dma_map_single(), dma_map_page() and others should not be larger than the returned value. +:: + + size_t + dma_opt_mapping_size(struct device *dev); + +Returns the maximum optimal size of a mapping for the device. + +Mapping larger buffers may take much longer in certain scenarios. In +addition, for high-rate short-lived streaming mappings, the upfront time +spent on the mapping may account for an appreciable part of the total +request lifetime. As such, if splitting larger requests incurs no +significant performance penalty, then device drivers are advised to +limit total DMA streaming mappings length to the returned value. + :: bool diff --git a/Documentation/x86/x86_64/boot-options.rst b/Documentation/x86/x86_64/boot-options.rst index 03ec9cf01181..cbd14124a667 100644 --- a/Documentation/x86/x86_64/boot-options.rst +++ b/Documentation/x86/x86_64/boot-options.rst @@ -287,11 +287,13 @@ iommu options only relevant to the AMD GART hardware IOMMU: iommu options only relevant to the software bounce buffering (SWIOTLB) IOMMU implementation: - swiotlb=[,force] - - Prereserve that many 128K pages for the software IO bounce buffering. + swiotlb=[,force,noforce] + + Prereserve that many 2K slots for the software IO bounce buffering. force Force all IO through the software TLB. + noforce + Do not initialize the software TLB. Miscellaneous diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 53e6a1da9af5..87badeae3181 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -15,13 +15,12 @@ config ARM select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PTE_SPECIAL if ARM_LPAE - select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL select ARCH_HAS_STRICT_MODULE_RWX if MMU - select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU - select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index c8e198631d41..d2fdb1796f48 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig @@ -1,11 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config SA1111 bool - select DMABOUNCE if !ARCH_PXA - -config DMABOUNCE - bool - select ZONE_DMA + select ZONE_DMA if ARCH_SA1100 config KRAIT_L2_ACCESSORS bool diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 8cd574be94cf..7bae8cbaafe7 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -6,7 +6,6 @@ obj-y += firmware.o obj-$(CONFIG_SA1111) += sa1111.o -obj-$(CONFIG_DMABOUNCE) += dmabounce.o obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o obj-$(CONFIG_SHARP_LOCOMO) += locomo.o obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c deleted file mode 100644 index 7996c04393d5..000000000000 --- a/arch/arm/common/dmabounce.c +++ /dev/null @@ -1,582 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/common/dmabounce.c - * - * Special dma_{map/unmap/dma_sync}_* routines for systems that have - * limited DMA windows. These functions utilize bounce buffers to - * copy data to/from buffers located outside the DMA region. This - * only works for systems in which DMA memory is at the bottom of - * RAM, the remainder of memory is at the top and the DMA memory - * can be marked as ZONE_DMA. Anything beyond that such as discontiguous - * DMA windows will require custom implementations that reserve memory - * areas at early bootup. - * - * Original version by Brad Parker (brad@heeltoe.com) - * Re-written by Christopher Hoover - * Made generic by Deepak Saxena - * - * Copyright (C) 2002 Hewlett Packard Company. - * Copyright (C) 2004 MontaVista Software, Inc. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#undef STATS - -#ifdef STATS -#define DO_STATS(X) do { X ; } while (0) -#else -#define DO_STATS(X) do { } while (0) -#endif - -/* ************************************************** */ - -struct safe_buffer { - struct list_head node; - - /* original request */ - void *ptr; - size_t size; - int direction; - - /* safe buffer info */ - struct dmabounce_pool *pool; - void *safe; - dma_addr_t safe_dma_addr; -}; - -struct dmabounce_pool { - unsigned long size; - struct dma_pool *pool; -#ifdef STATS - unsigned long allocs; -#endif -}; - -struct dmabounce_device_info { - struct device *dev; - struct list_head safe_buffers; -#ifdef STATS - unsigned long total_allocs; - unsigned long map_op_count; - unsigned long bounce_count; - int attr_res; -#endif - struct dmabounce_pool small; - struct dmabounce_pool large; - - rwlock_t lock; - - int (*needs_bounce)(struct device *, dma_addr_t, size_t); -}; - -#ifdef STATS -static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n", - device_info->small.allocs, - device_info->large.allocs, - device_info->total_allocs - device_info->small.allocs - - device_info->large.allocs, - device_info->total_allocs, - device_info->map_op_count, - device_info->bounce_count); -} - -static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL); -#endif - - -/* allocate a 'safe' buffer and keep track of it */ -static inline struct safe_buffer * -alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, - size_t size, enum dma_data_direction dir) -{ - struct safe_buffer *buf; - struct dmabounce_pool *pool; - struct device *dev = device_info->dev; - unsigned long flags; - - dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", - __func__, ptr, size, dir); - - if (size <= device_info->small.size) { - pool = &device_info->small; - } else if (size <= device_info->large.size) { - pool = &device_info->large; - } else { - pool = NULL; - } - - buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); - if (buf == NULL) { - dev_warn(dev, "%s: kmalloc failed\n", __func__); - return NULL; - } - - buf->ptr = ptr; - buf->size = size; - buf->direction = dir; - buf->pool = pool; - - if (pool) { - buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, - &buf->safe_dma_addr); - } else { - buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, - GFP_ATOMIC); - } - - if (buf->safe == NULL) { - dev_warn(dev, - "%s: could not alloc dma memory (size=%d)\n", - __func__, size); - kfree(buf); - return NULL; - } - -#ifdef STATS - if (pool) - pool->allocs++; - device_info->total_allocs++; -#endif - - write_lock_irqsave(&device_info->lock, flags); - list_add(&buf->node, &device_info->safe_buffers); - write_unlock_irqrestore(&device_info->lock, flags); - - return buf; -} - -/* determine if a buffer is from our "safe" pool */ -static inline struct safe_buffer * -find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) -{ - struct safe_buffer *b, *rb = NULL; - unsigned long flags; - - read_lock_irqsave(&device_info->lock, flags); - - list_for_each_entry(b, &device_info->safe_buffers, node) - if (b->safe_dma_addr <= safe_dma_addr && - b->safe_dma_addr + b->size > safe_dma_addr) { - rb = b; - break; - } - - read_unlock_irqrestore(&device_info->lock, flags); - return rb; -} - -static inline void -free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) -{ - unsigned long flags; - - dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); - - write_lock_irqsave(&device_info->lock, flags); - - list_del(&buf->node); - - write_unlock_irqrestore(&device_info->lock, flags); - - if (buf->pool) - dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); - else - dma_free_coherent(device_info->dev, buf->size, buf->safe, - buf->safe_dma_addr); - - kfree(buf); -} - -/* ************************************************** */ - -static struct safe_buffer *find_safe_buffer_dev(struct device *dev, - dma_addr_t dma_addr, const char *where) -{ - if (!dev || !dev->archdata.dmabounce) - return NULL; - if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "Trying to %s invalid mapping\n", where); - return NULL; - } - return find_safe_buffer(dev->archdata.dmabounce, dma_addr); -} - -static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) -{ - if (!dev || !dev->archdata.dmabounce) - return 0; - - if (dev->dma_mask) { - unsigned long limit, mask = *dev->dma_mask; - - limit = (mask + 1) & ~mask; - if (limit && size > limit) { - dev_err(dev, "DMA mapping too big (requested %#x " - "mask %#Lx)\n", size, *dev->dma_mask); - return -E2BIG; - } - - /* Figure out if we need to bounce from the DMA mask. */ - if ((dma_addr | (dma_addr + size - 1)) & ~mask) - return 1; - } - - return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); -} - -static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - struct safe_buffer *buf; - - if (device_info) - DO_STATS ( device_info->map_op_count++ ); - - buf = alloc_safe_buffer(device_info, ptr, size, dir); - if (buf == NULL) { - dev_err(dev, "%s: unable to map unsafe buffer %p!\n", - __func__, ptr); - return DMA_MAPPING_ERROR; - } - - dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), - buf->safe, buf->safe_dma_addr); - - if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) && - !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { - dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", - __func__, ptr, buf->safe, size); - memcpy(buf->safe, ptr, size); - } - - return buf->safe_dma_addr; -} - -static inline void unmap_single(struct device *dev, struct safe_buffer *buf, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - BUG_ON(buf->size != size); - BUG_ON(buf->direction != dir); - - dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), - buf->safe, buf->safe_dma_addr); - - DO_STATS(dev->archdata.dmabounce->bounce_count++); - - if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) && - !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { - void *ptr = buf->ptr; - - dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", - __func__, buf->safe, ptr, size); - memcpy(ptr, buf->safe, size); - - /* - * Since we may have written to a page cache page, - * we need to ensure that the data will be coherent - * with user mappings. - */ - __cpuc_flush_dcache_area(ptr, size); - } - free_safe_buffer(dev->archdata.dmabounce, buf); -} - -/* ************************************************** */ - -/* - * see if a buffer address is in an 'unsafe' range. if it is - * allocate a 'safe' buffer and copy the unsafe buffer into it. - * substitute the safe buffer for the unsafe one. - * (basically move the buffer from an unsafe area to a safe one) - */ -static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t dma_addr; - int ret; - - dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", - __func__, page, offset, size, dir); - - dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; - - ret = needs_bounce(dev, dma_addr, size); - if (ret < 0) - return DMA_MAPPING_ERROR; - - if (ret == 0) { - arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); - return dma_addr; - } - - if (PageHighMem(page)) { - dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); - return DMA_MAPPING_ERROR; - } - - return map_single(dev, page_address(page) + offset, size, dir, attrs); -} - -/* - * see if a mapped address was really a "safe" buffer and if so, copy - * the data from the safe buffer back to the unsafe buffer and free up - * the safe buffer. (basically return things back to the way they - * should be) - */ -static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - struct safe_buffer *buf; - - dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", - __func__, dma_addr, size, dir); - - buf = find_safe_buffer_dev(dev, dma_addr, __func__); - if (!buf) { - arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir); - return; - } - - unmap_single(dev, buf, size, dir, attrs); -} - -static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, - size_t sz, enum dma_data_direction dir) -{ - struct safe_buffer *buf; - unsigned long off; - - dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", - __func__, addr, sz, dir); - - buf = find_safe_buffer_dev(dev, addr, __func__); - if (!buf) - return 1; - - off = addr - buf->safe_dma_addr; - - BUG_ON(buf->direction != dir); - - dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, - buf->safe, buf->safe_dma_addr); - - DO_STATS(dev->archdata.dmabounce->bounce_count++); - - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { - dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", - __func__, buf->safe + off, buf->ptr + off, sz); - memcpy(buf->ptr + off, buf->safe + off, sz); - } - return 0; -} - -static void dmabounce_sync_for_cpu(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - if (!__dmabounce_sync_for_cpu(dev, handle, size, dir)) - return; - - arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir); -} - -static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, - size_t sz, enum dma_data_direction dir) -{ - struct safe_buffer *buf; - unsigned long off; - - dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", - __func__, addr, sz, dir); - - buf = find_safe_buffer_dev(dev, addr, __func__); - if (!buf) - return 1; - - off = addr - buf->safe_dma_addr; - - BUG_ON(buf->direction != dir); - - dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, - buf->safe, buf->safe_dma_addr); - - DO_STATS(dev->archdata.dmabounce->bounce_count++); - - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { - dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", - __func__,buf->ptr + off, buf->safe + off, sz); - memcpy(buf->safe + off, buf->ptr + off, sz); - } - return 0; -} - -static void dmabounce_sync_for_device(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - if (!__dmabounce_sync_for_device(dev, handle, size, dir)) - return; - - arm_dma_ops.sync_single_for_device(dev, handle, size, dir); -} - -static int dmabounce_dma_supported(struct device *dev, u64 dma_mask) -{ - if (dev->archdata.dmabounce) - return 0; - - return arm_dma_ops.dma_supported(dev, dma_mask); -} - -static const struct dma_map_ops dmabounce_ops = { - .alloc = arm_dma_alloc, - .free = arm_dma_free, - .mmap = arm_dma_mmap, - .get_sgtable = arm_dma_get_sgtable, - .map_page = dmabounce_map_page, - .unmap_page = dmabounce_unmap_page, - .sync_single_for_cpu = dmabounce_sync_for_cpu, - .sync_single_for_device = dmabounce_sync_for_device, - .map_sg = arm_dma_map_sg, - .unmap_sg = arm_dma_unmap_sg, - .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, - .sync_sg_for_device = arm_dma_sync_sg_for_device, - .dma_supported = dmabounce_dma_supported, -}; - -static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, - const char *name, unsigned long size) -{ - pool->size = size; - DO_STATS(pool->allocs = 0); - pool->pool = dma_pool_create(name, dev, size, - 0 /* byte alignment */, - 0 /* no page-crossing issues */); - - return pool->pool ? 0 : -ENOMEM; -} - -int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, - unsigned long large_buffer_size, - int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) -{ - struct dmabounce_device_info *device_info; - int ret; - - device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); - if (!device_info) { - dev_err(dev, - "Could not allocated dmabounce_device_info\n"); - return -ENOMEM; - } - - ret = dmabounce_init_pool(&device_info->small, dev, - "small_dmabounce_pool", small_buffer_size); - if (ret) { - dev_err(dev, - "dmabounce: could not allocate DMA pool for %ld byte objects\n", - small_buffer_size); - goto err_free; - } - - if (large_buffer_size) { - ret = dmabounce_init_pool(&device_info->large, dev, - "large_dmabounce_pool", - large_buffer_size); - if (ret) { - dev_err(dev, - "dmabounce: could not allocate DMA pool for %ld byte objects\n", - large_buffer_size); - goto err_destroy; - } - } - - device_info->dev = dev; - INIT_LIST_HEAD(&device_info->safe_buffers); - rwlock_init(&device_info->lock); - device_info->needs_bounce = needs_bounce_fn; - -#ifdef STATS - device_info->total_allocs = 0; - device_info->map_op_count = 0; - device_info->bounce_count = 0; - device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); -#endif - - dev->archdata.dmabounce = device_info; - set_dma_ops(dev, &dmabounce_ops); - - dev_info(dev, "dmabounce: registered device\n"); - - return 0; - - err_destroy: - dma_pool_destroy(device_info->small.pool); - err_free: - kfree(device_info); - return ret; -} -EXPORT_SYMBOL(dmabounce_register_dev); - -void dmabounce_unregister_dev(struct device *dev) -{ - struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - - dev->archdata.dmabounce = NULL; - set_dma_ops(dev, NULL); - - if (!device_info) { - dev_warn(dev, - "Never registered with dmabounce but attempting" - "to unregister!\n"); - return; - } - - if (!list_empty(&device_info->safe_buffers)) { - dev_err(dev, - "Removing from dmabounce with pending buffers!\n"); - BUG(); - } - - if (device_info->small.pool) - dma_pool_destroy(device_info->small.pool); - if (device_info->large.pool) - dma_pool_destroy(device_info->large.pool); - -#ifdef STATS - if (device_info->attr_res == 0) - device_remove_file(dev, &dev_attr_dmabounce_stats); -#endif - - kfree(device_info); - - dev_info(dev, "dmabounce: device unregistered\n"); -} -EXPORT_SYMBOL(dmabounce_unregister_dev); - -MODULE_AUTHOR("Christopher Hoover , Deepak Saxena "); -MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); -MODULE_LICENSE("GPL"); diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 2343e2b6214d..f5e6990b8856 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -1389,70 +1389,9 @@ void sa1111_driver_unregister(struct sa1111_driver *driver) } EXPORT_SYMBOL(sa1111_driver_unregister); -#ifdef CONFIG_DMABOUNCE -/* - * According to the "Intel StrongARM SA-1111 Microprocessor Companion - * Chip Specification Update" (June 2000), erratum #7, there is a - * significant bug in the SA1111 SDRAM shared memory controller. If - * an access to a region of memory above 1MB relative to the bank base, - * it is important that address bit 10 _NOT_ be asserted. Depending - * on the configuration of the RAM, bit 10 may correspond to one - * of several different (processor-relative) address bits. - * - * This routine only identifies whether or not a given DMA address - * is susceptible to the bug. - * - * This should only get called for sa1111_device types due to the - * way we configure our device dma_masks. - */ -static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) -{ - /* - * Section 4.6 of the "Intel StrongARM SA-1111 Development Module - * User's Guide" mentions that jumpers R51 and R52 control the - * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or - * SDRAM bank 1 on Neponset). The default configuration selects - * Assabet, so any address in bank 1 is necessarily invalid. - */ - return (machine_is_assabet() || machine_is_pfs168()) && - (addr >= 0xc8000000 || (addr + size) >= 0xc8000000); -} - -static int sa1111_notifier_call(struct notifier_block *n, unsigned long action, - void *data) -{ - struct sa1111_dev *dev = to_sa1111_device(data); - - switch (action) { - case BUS_NOTIFY_ADD_DEVICE: - if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) { - int ret = dmabounce_register_dev(&dev->dev, 1024, 4096, - sa1111_needs_bounce); - if (ret) - dev_err(&dev->dev, "failed to register with dmabounce: %d\n", ret); - } - break; - - case BUS_NOTIFY_DEL_DEVICE: - if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) - dmabounce_unregister_dev(&dev->dev); - break; - } - return NOTIFY_OK; -} - -static struct notifier_block sa1111_bus_notifier = { - .notifier_call = sa1111_notifier_call, -}; -#endif - static int __init sa1111_init(void) { int ret = bus_register(&sa1111_bus_type); -#ifdef CONFIG_DMABOUNCE - if (ret == 0) - bus_register_notifier(&sa1111_bus_type, &sa1111_bus_notifier); -#endif if (ret == 0) platform_driver_register(&sa1111_device_driver); return ret; @@ -1461,9 +1400,6 @@ static int __init sa1111_init(void) static void __exit sa1111_exit(void) { platform_driver_unregister(&sa1111_device_driver); -#ifdef CONFIG_DMABOUNCE - bus_unregister_notifier(&sa1111_bus_type, &sa1111_bus_notifier); -#endif bus_unregister(&sa1111_bus_type); } diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index be666f58bf7a..8754c0f5fc90 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -6,9 +6,6 @@ #define ASMARM_DEVICE_H struct dev_archdata { -#ifdef CONFIG_DMABOUNCE - struct dmabounce_device_info *dmabounce; -#endif #ifdef CONFIG_ARM_DMA_USE_IOMMU struct dma_iommu_mapping *mapping; #endif diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h index 77fcb7ee5ec9..4f7bcde03abb 100644 --- a/arch/arm/include/asm/dma-direct.h +++ b/arch/arm/include/asm/dma-direct.h @@ -1,48 +1 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_ARM_DMA_DIRECT_H -#define ASM_ARM_DMA_DIRECT_H 1 - -#include - -/* - * dma_to_pfn/pfn_to_dma/virt_to_dma are architecture private - * functions used internally by the DMA-mapping API to provide DMA - * addresses. They must not be used by drivers. - */ -static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) -{ - if (dev && dev->dma_range_map) - pfn = PFN_DOWN(translate_phys_to_dma(dev, PFN_PHYS(pfn))); - return (dma_addr_t)__pfn_to_bus(pfn); -} - -static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) -{ - unsigned long pfn = __bus_to_pfn(addr); - - if (dev && dev->dma_range_map) - pfn = PFN_DOWN(translate_dma_to_phys(dev, PFN_PHYS(pfn))); - return pfn; -} - -static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) -{ - if (dev) - return pfn_to_dma(dev, virt_to_pfn(addr)); - - return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); -} - -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) -{ - unsigned int offset = paddr & ~PAGE_MASK; - return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; -} - -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) -{ - unsigned int offset = dev_addr & ~PAGE_MASK; - return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; -} - -#endif /* ASM_ARM_DMA_DIRECT_H */ +#include diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h deleted file mode 100644 index 77082246a5e1..000000000000 --- a/arch/arm/include/asm/dma-mapping.h +++ /dev/null @@ -1,128 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASMARM_DMA_MAPPING_H -#define ASMARM_DMA_MAPPING_H - -#ifdef __KERNEL__ - -#include -#include - -#include -#include - -extern const struct dma_map_ops arm_dma_ops; -extern const struct dma_map_ops arm_coherent_dma_ops; - -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE)) - return &arm_dma_ops; - return NULL; -} - -/** - * arm_dma_alloc - allocate consistent memory for DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @size: required memory size - * @handle: bus-specific DMA address - * @attrs: optinal attributes that specific mapping properties - * - * Allocate some memory for a device for performing DMA. This function - * allocates pages, and will return the CPU-viewed address, and sets @handle - * to be the device-viewed address. - */ -extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, unsigned long attrs); - -/** - * arm_dma_free - free memory allocated by arm_dma_alloc - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @size: size of memory originally requested in dma_alloc_coherent - * @cpu_addr: CPU-view address returned from dma_alloc_coherent - * @handle: device-view address returned from dma_alloc_coherent - * @attrs: optinal attributes that specific mapping properties - * - * Free (and unmap) a DMA buffer previously allocated by - * arm_dma_alloc(). - * - * References to memory and mappings associated with cpu_addr/handle - * during and after this call executing are illegal. - */ -extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, unsigned long attrs); - -/** - * arm_dma_mmap - map a coherent DMA allocation into user space - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @vma: vm_area_struct describing requested user mapping - * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent - * @handle: device-view address returned from dma_alloc_coherent - * @size: size of memory originally requested in dma_alloc_coherent - * @attrs: optinal attributes that specific mapping properties - * - * Map a coherent DMA buffer previously allocated by dma_alloc_coherent - * into user space. The coherent DMA buffer must not be freed by the - * driver until the user space mapping has been released. - */ -extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs); - -/* - * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" - * and utilize bounce buffers as needed to work around limited DMA windows. - * - * On the SA-1111, a bug limits DMA to only certain regions of RAM. - * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) - * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) - * - * The following are helper functions used by the dmabounce subystem - * - */ - -/** - * dmabounce_register_dev - * - * @dev: valid struct device pointer - * @small_buf_size: size of buffers to use with small buffer pool - * @large_buf_size: size of buffers to use with large buffer pool (can be 0) - * @needs_bounce_fn: called to determine whether buffer needs bouncing - * - * This function should be called by low-level platform code to register - * a device as requireing DMA buffer bouncing. The function will allocate - * appropriate DMA pools for the device. - */ -extern int dmabounce_register_dev(struct device *, unsigned long, - unsigned long, int (*)(struct device *, dma_addr_t, size_t)); - -/** - * dmabounce_unregister_dev - * - * @dev: valid struct device pointer - * - * This function should be called by low-level platform code when device - * that was previously registered with dmabounce_register_dev is removed - * from the system. - * - */ -extern void dmabounce_unregister_dev(struct device *); - - - -/* - * The scatter list versions of the above methods. - */ -extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction, unsigned long attrs); -extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction, unsigned long attrs); -extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, - enum dma_data_direction); -extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, - enum dma_data_direction); -extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs); - -#endif /* __KERNEL__ */ -#endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index f673e13e0f94..a55a9038abc8 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -378,8 +378,6 @@ static inline unsigned long __virt_to_idmap(unsigned long x) #ifndef __virt_to_bus #define __virt_to_bus __virt_to_phys #define __bus_to_virt __phys_to_virt -#define __pfn_to_bus(x) __pfn_to_phys(x) -#define __bus_to_pfn(x) __phys_to_pfn(x) #endif /* diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index bcd4e4ca34f7..acc10b1caa69 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig @@ -61,6 +61,7 @@ endmenu # Footbridge support config FOOTBRIDGE + select ARCH_HAS_PHYS_TO_DMA bool # Footbridge in host mode diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 322495df271d..5020eb96b025 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -12,6 +12,7 @@ #include #include #include +#include #include