mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap
These two functions have never been used since they were added. Link: https://lore.kernel.org/r/20191113134528.21187-1-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
d28c2c9a48
commit
93f4e735b6
|
@ -230,34 +230,11 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
|
|||
* Please see Documentation/vm/hmm.rst for how to use the range API.
|
||||
*/
|
||||
long hmm_range_fault(struct hmm_range *range, unsigned int flags);
|
||||
|
||||
long hmm_range_dma_map(struct hmm_range *range,
|
||||
struct device *device,
|
||||
dma_addr_t *daddrs,
|
||||
unsigned int flags);
|
||||
long hmm_range_dma_unmap(struct hmm_range *range,
|
||||
struct device *device,
|
||||
dma_addr_t *daddrs,
|
||||
bool dirty);
|
||||
#else
|
||||
static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline long hmm_range_dma_map(struct hmm_range *range,
|
||||
struct device *device, dma_addr_t *daddrs,
|
||||
unsigned int flags)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline long hmm_range_dma_unmap(struct hmm_range *range,
|
||||
struct device *device,
|
||||
dma_addr_t *daddrs, bool dirty)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
147
mm/hmm.c
147
mm/hmm.c
|
@ -691,150 +691,3 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
|
|||
return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
|
||||
}
|
||||
EXPORT_SYMBOL(hmm_range_fault);
|
||||
|
||||
/**
|
||||
* hmm_range_dma_map - hmm_range_fault() and dma map page all in one.
|
||||
* @range: range being faulted
|
||||
* @device: device to map page to
|
||||
* @daddrs: array of dma addresses for the mapped pages
|
||||
* @flags: HMM_FAULT_*
|
||||
*
|
||||
* Return: the number of pages mapped on success (including zero), or any
|
||||
* status return from hmm_range_fault() otherwise.
|
||||
*/
|
||||
long hmm_range_dma_map(struct hmm_range *range, struct device *device,
|
||||
dma_addr_t *daddrs, unsigned int flags)
|
||||
{
|
||||
unsigned long i, npages, mapped;
|
||||
long ret;
|
||||
|
||||
ret = hmm_range_fault(range, flags);
|
||||
if (ret <= 0)
|
||||
return ret ? ret : -EBUSY;
|
||||
|
||||
npages = (range->end - range->start) >> PAGE_SHIFT;
|
||||
for (i = 0, mapped = 0; i < npages; ++i) {
|
||||
enum dma_data_direction dir = DMA_TO_DEVICE;
|
||||
struct page *page;
|
||||
|
||||
/*
|
||||
* FIXME need to update DMA API to provide invalid DMA address
|
||||
* value instead of a function to test dma address value. This
|
||||
* would remove lot of dumb code duplicated accross many arch.
|
||||
*
|
||||
* For now setting it to 0 here is good enough as the pfns[]
|
||||
* value is what is use to check what is valid and what isn't.
|
||||
*/
|
||||
daddrs[i] = 0;
|
||||
|
||||
page = hmm_device_entry_to_page(range, range->pfns[i]);
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
/* Check if range is being invalidated */
|
||||
if (mmu_interval_check_retry(range->notifier,
|
||||
range->notifier_seq)) {
|
||||
ret = -EBUSY;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
/* If it is read and write than map bi-directional. */
|
||||
if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
|
||||
dir = DMA_BIDIRECTIONAL;
|
||||
|
||||
daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
|
||||
if (dma_mapping_error(device, daddrs[i])) {
|
||||
ret = -EFAULT;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
mapped++;
|
||||
}
|
||||
|
||||
return mapped;
|
||||
|
||||
unmap:
|
||||
for (npages = i, i = 0; (i < npages) && mapped; ++i) {
|
||||
enum dma_data_direction dir = DMA_TO_DEVICE;
|
||||
struct page *page;
|
||||
|
||||
page = hmm_device_entry_to_page(range, range->pfns[i]);
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
if (dma_mapping_error(device, daddrs[i]))
|
||||
continue;
|
||||
|
||||
/* If it is read and write than map bi-directional. */
|
||||
if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
|
||||
dir = DMA_BIDIRECTIONAL;
|
||||
|
||||
dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
|
||||
mapped--;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hmm_range_dma_map);
|
||||
|
||||
/**
|
||||
* hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
|
||||
* @range: range being unmapped
|
||||
* @device: device against which dma map was done
|
||||
* @daddrs: dma address of mapped pages
|
||||
* @dirty: dirty page if it had the write flag set
|
||||
* Return: number of page unmapped on success, -EINVAL otherwise
|
||||
*
|
||||
* Note that caller MUST abide by mmu notifier or use HMM mirror and abide
|
||||
* to the sync_cpu_device_pagetables() callback so that it is safe here to
|
||||
* call set_page_dirty(). Caller must also take appropriate locks to avoid
|
||||
* concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
|
||||
*/
|
||||
long hmm_range_dma_unmap(struct hmm_range *range,
|
||||
struct device *device,
|
||||
dma_addr_t *daddrs,
|
||||
bool dirty)
|
||||
{
|
||||
unsigned long i, npages;
|
||||
long cpages = 0;
|
||||
|
||||
/* Sanity check. */
|
||||
if (range->end <= range->start)
|
||||
return -EINVAL;
|
||||
if (!daddrs)
|
||||
return -EINVAL;
|
||||
if (!range->pfns)
|
||||
return -EINVAL;
|
||||
|
||||
npages = (range->end - range->start) >> PAGE_SHIFT;
|
||||
for (i = 0; i < npages; ++i) {
|
||||
enum dma_data_direction dir = DMA_TO_DEVICE;
|
||||
struct page *page;
|
||||
|
||||
page = hmm_device_entry_to_page(range, range->pfns[i]);
|
||||
if (page == NULL)
|
||||
continue;
|
||||
|
||||
/* If it is read and write than map bi-directional. */
|
||||
if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
|
||||
dir = DMA_BIDIRECTIONAL;
|
||||
|
||||
/*
|
||||
* See comments in function description on why it is
|
||||
* safe here to call set_page_dirty()
|
||||
*/
|
||||
if (dirty)
|
||||
set_page_dirty(page);
|
||||
}
|
||||
|
||||
/* Unmap and clear pfns/dma address */
|
||||
dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
|
||||
range->pfns[i] = range->values[HMM_PFN_NONE];
|
||||
/* FIXME see comments in hmm_vma_dma_map() */
|
||||
daddrs[i] = 0;
|
||||
cpages++;
|
||||
}
|
||||
|
||||
return cpages;
|
||||
}
|
||||
EXPORT_SYMBOL(hmm_range_dma_unmap);
|
||||
|
|
Loading…
Reference in New Issue