dma-mapping: simplify the dma_sync_single_range_for_{cpu,device} implementation

We can just call the regular calls after adding offset the the address instead
of reimplementing them.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Christoph Hellwig 2018-12-03 14:58:59 +01:00
parent 20b105feda
commit 8d59b5f2a4
3 changed files with 10 additions and 93 deletions

View File

@ -70,17 +70,6 @@ extern void debug_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, dma_addr_t dma_handle,
size_t size, int direction); size_t size, int direction);
extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
int direction);
extern void debug_dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size, int direction);
extern void debug_dma_sync_sg_for_cpu(struct device *dev, extern void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, struct scatterlist *sg,
int nelems, int direction); int nelems, int direction);
@ -167,22 +156,6 @@ static inline void debug_dma_sync_single_for_device(struct device *dev,
{ {
} }
static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
int direction)
{
}
static inline void debug_dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
int direction)
{
}
static inline void debug_dma_sync_sg_for_cpu(struct device *dev, static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, struct scatterlist *sg,
int nelems, int direction) int nelems, int direction)

View File

@ -360,6 +360,13 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
debug_dma_sync_single_for_cpu(dev, addr, size, dir); debug_dma_sync_single_for_cpu(dev, addr, size, dir);
} }
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t addr, unsigned long offset, size_t size,
enum dma_data_direction dir)
{
return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev, static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, dma_addr_t addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
@ -372,32 +379,11 @@ static inline void dma_sync_single_for_device(struct device *dev,
debug_dma_sync_single_for_device(dev, addr, size, dir); debug_dma_sync_single_for_device(dev, addr, size, dir);
} }
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t addr,
unsigned long offset,
size_t size,
enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr + offset, size, dir);
debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev, static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t addr, dma_addr_t addr, unsigned long offset, size_t size,
unsigned long offset, enum dma_data_direction dir)
size_t size,
enum dma_data_direction dir)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); return dma_sync_single_for_device(dev, addr + offset, size, dir);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr + offset, size, dir);
debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
} }
static inline void static inline void

View File

@ -1633,48 +1633,6 @@ void debug_dma_sync_single_for_device(struct device *dev,
} }
EXPORT_SYMBOL(debug_dma_sync_single_for_device); EXPORT_SYMBOL(debug_dma_sync_single_for_device);
void debug_dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset, size_t size,
int direction)
{
struct dma_debug_entry ref;
if (unlikely(dma_debug_disabled()))
return;
ref.type = dma_debug_single;
ref.dev = dev;
ref.dev_addr = dma_handle;
ref.size = offset + size;
ref.direction = direction;
ref.sg_call_ents = 0;
check_sync(dev, &ref, true);
}
EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
void debug_dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size, int direction)
{
struct dma_debug_entry ref;
if (unlikely(dma_debug_disabled()))
return;
ref.type = dma_debug_single;
ref.dev = dev;
ref.dev_addr = dma_handle;
ref.size = offset + size;
ref.direction = direction;
ref.sg_call_ents = 0;
check_sync(dev, &ref, false);
}
EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, int direction) int nelems, int direction)
{ {