dma-mapping: simplify the dma_sync_single_range_for_{cpu,device} implementation
We can just call the regular calls after adding offset the the address instead of reimplementing them. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
20b105feda
commit
8d59b5f2a4
|
@ -70,17 +70,6 @@ extern void debug_dma_sync_single_for_device(struct device *dev,
|
|||
dma_addr_t dma_handle,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
int direction);
|
||||
|
||||
extern void debug_dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg,
|
||||
int nelems, int direction);
|
||||
|
@ -167,22 +156,6 @@ static inline void debug_dma_sync_single_for_device(struct device *dev,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg,
|
||||
int nelems, int direction)
|
||||
|
|
|
@ -360,6 +360,13 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
|||
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
|
@ -372,32 +379,11 @@ static inline void dma_sync_single_for_device(struct device *dev,
|
|||
debug_dma_sync_single_for_device(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t addr,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_cpu)
|
||||
ops->sync_single_for_cpu(dev, addr + offset, size, dir);
|
||||
debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t addr,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
dma_addr_t addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->sync_single_for_device)
|
||||
ops->sync_single_for_device(dev, addr + offset, size, dir);
|
||||
debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
|
||||
return dma_sync_single_for_device(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
|
@ -1633,48 +1633,6 @@ void debug_dma_sync_single_for_device(struct device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL(debug_dma_sync_single_for_device);
|
||||
|
||||
void debug_dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset, size_t size,
|
||||
int direction)
|
||||
{
|
||||
struct dma_debug_entry ref;
|
||||
|
||||
if (unlikely(dma_debug_disabled()))
|
||||
return;
|
||||
|
||||
ref.type = dma_debug_single;
|
||||
ref.dev = dev;
|
||||
ref.dev_addr = dma_handle;
|
||||
ref.size = offset + size;
|
||||
ref.direction = direction;
|
||||
ref.sg_call_ents = 0;
|
||||
|
||||
check_sync(dev, &ref, true);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
|
||||
|
||||
void debug_dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle,
|
||||
unsigned long offset,
|
||||
size_t size, int direction)
|
||||
{
|
||||
struct dma_debug_entry ref;
|
||||
|
||||
if (unlikely(dma_debug_disabled()))
|
||||
return;
|
||||
|
||||
ref.type = dma_debug_single;
|
||||
ref.dev = dev;
|
||||
ref.dev_addr = dma_handle;
|
||||
ref.size = offset + size;
|
||||
ref.direction = direction;
|
||||
ref.sg_call_ents = 0;
|
||||
|
||||
check_sync(dev, &ref, false);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
|
||||
|
||||
void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, int direction)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue